comment stringlengths 22 3.02k | method_body stringlengths 46 368k | target_code stringlengths 0 181 | method_body_after stringlengths 12 368k | context_before stringlengths 11 634k | context_after stringlengths 11 632k |
|---|---|---|---|---|---|
If return true constant, which means we need to keep all partitions or prunes all partitions? | public ScalarOperator visitBinaryPredicate(BinaryPredicateOperator predicate, Void context) {
if (partitionColumnSet.containsAll(predicate.getUsedColumns())) {
ScalarOperator left = predicate.getChild(0).accept(this, null);
if (isShortCut(left)) {
return ConstantOperator.createBoolean(true);
}
ScalarOperator right = predicate.getChild(1).accept(this, null);
if (isShortCut(right)) {
return ConstantOperator.createBoolean(true);
}
if (EQ_FOR_NULL == predicate.getBinaryType() || !isConstantNull(right)) {
return predicate;
} else {
return ConstantOperator.createBoolean(false);
}
}
return ConstantOperator.createBoolean(true);
} | return ConstantOperator.createBoolean(true); | public ScalarOperator visitBinaryPredicate(BinaryPredicateOperator predicate, Void context) {
if (partitionColumnSet.containsAll(predicate.getUsedColumns())) {
ScalarOperator left = predicate.getChild(0).accept(this, null);
if (isShortCut(left)) {
return ConstantOperator.createBoolean(true);
}
ScalarOperator right = predicate.getChild(1).accept(this, null);
if (isShortCut(right)) {
return ConstantOperator.createBoolean(true);
}
if (EQ_FOR_NULL == predicate.getBinaryType() || !isConstantNull(right)) {
return predicate;
} else {
return ConstantOperator.createBoolean(false);
}
}
return ConstantOperator.createBoolean(true);
} | class PartitionColPredicateExtractor extends ScalarOperatorVisitor<ScalarOperator, Void> {
private ColumnRefSet partitionColumnSet;
public PartitionColPredicateExtractor(RangePartitionInfo rangePartitionInfo,
Map<Column, ColumnRefOperator> columnMetaToColRefMap) {
List<ColumnRefOperator> columnRefOperators = Lists.newArrayList();
Column partitionColumn = rangePartitionInfo.getPartitionColumns().get(0);
columnRefOperators.add(columnMetaToColRefMap.get(partitionColumn));
partitionColumnSet = new ColumnRefSet(columnRefOperators);
}
public ScalarOperator extract(ScalarOperator predicate) {
predicate = predicate.clone();
List<ScalarOperator> splitPredicates = Utils.extractConjuncts(predicate).stream()
.filter(e -> !e.isFromPredicateRangeDerive()).collect(Collectors.toList());
ScalarOperator scalarOperator = Utils.compoundAnd(splitPredicates);
return scalarOperator.accept(this, null);
}
@Override
public ScalarOperator visit(ScalarOperator scalarOperator, Void context) {
return null;
}
@Override
public ScalarOperator visitConstant(ConstantOperator literal, Void context) {
return literal;
}
@Override
public ScalarOperator visitVariableReference(ColumnRefOperator variable, Void context) {
if (partitionColumnSet.containsAll(variable.getUsedColumns())) {
return variable;
}
return null;
}
@Override
public ScalarOperator visitCall(CallOperator call, Void context) {
return null;
}
@Override
@Override
public ScalarOperator visitCompoundPredicate(CompoundPredicateOperator predicate, Void context) {
if (predicate.isNot()) {
return ConstantOperator.createBoolean(true);
}
ScalarOperator first = predicate.getChild(0).accept(this, null);
if (predicate.isAnd()) {
ScalarOperator second = predicate.getChild(1);
if (first.isConstantRef()) {
boolean isTrue = ((ConstantOperator) first).getBoolean();
if (isTrue) {
second = second.accept(this, null);
return second;
} else {
return ConstantOperator.createBoolean(false);
}
} else {
second = second.accept(this, null);
if (second.isConstantRef()) {
boolean isTrue = ((ConstantOperator) second).getBoolean();
if (isTrue) {
return first;
} else {
return ConstantOperator.createBoolean(false);
}
} else {
return new CompoundPredicateOperator(CompoundPredicateOperator.CompoundType.AND, first, second);
}
}
} else {
ScalarOperator second = predicate.getChild(1).accept(this, null);
if (first.isConstantRef()) {
return ((ConstantOperator) first).getBoolean() ? first : second;
} else if (second.isConstantRef()) {
return ((ConstantOperator) second).getBoolean() ? second : first;
} else {
return new CompoundPredicateOperator(CompoundPredicateOperator.CompoundType.OR, first, second);
}
}
}
@Override
public ScalarOperator visitInPredicate(InPredicateOperator predicate, Void context) {
ScalarOperator first = predicate.getChild(0).accept(this, null);
if (isShortCut(first)) {
return ConstantOperator.createBoolean(true);
} else {
if (predicate.allValuesMatch(ScalarOperator::isConstantRef)) {
if (predicate.isNotIn()) {
return ConstantOperator.createBoolean(!predicate.hasAnyNullValues());
}
return predicate;
} else {
return ConstantOperator.createBoolean(true);
}
}
}
@Override
public ScalarOperator visitIsNullPredicate(IsNullPredicateOperator predicate, Void context) {
ScalarOperator first = predicate.getChild(0).accept(this, null);
if (isShortCut(first) || predicate.isNotNull()) {
return ConstantOperator.createBoolean(true);
} else {
return ConstantOperator.createBoolean(false);
}
}
@Override
public ScalarOperator visitLikePredicateOperator(LikePredicateOperator predicate, Void context) {
return ConstantOperator.createBoolean(true);
}
private boolean isShortCut(ScalarOperator operator) {
return operator == null;
}
private boolean isConstantNull(ScalarOperator operator) {
if (operator.isConstantRef()) {
return ((ConstantOperator) operator).isNull();
}
return false;
}
} | class PartitionColPredicateExtractor extends ScalarOperatorVisitor<ScalarOperator, Void> {
private ColumnRefSet partitionColumnSet;
public PartitionColPredicateExtractor(RangePartitionInfo rangePartitionInfo,
Map<Column, ColumnRefOperator> columnMetaToColRefMap) {
List<ColumnRefOperator> columnRefOperators = Lists.newArrayList();
Column partitionColumn = rangePartitionInfo.getPartitionColumns().get(0);
columnRefOperators.add(columnMetaToColRefMap.get(partitionColumn));
partitionColumnSet = new ColumnRefSet(columnRefOperators);
}
public ScalarOperator extract(ScalarOperator predicate) {
predicate = predicate.clone();
List<ScalarOperator> splitPredicates = Utils.extractConjuncts(predicate).stream()
.filter(e -> !e.isFromPredicateRangeDerive()).collect(Collectors.toList());
ScalarOperator scalarOperator = Utils.compoundAnd(splitPredicates);
return scalarOperator.accept(this, null);
}
@Override
public ScalarOperator visit(ScalarOperator scalarOperator, Void context) {
return null;
}
@Override
public ScalarOperator visitConstant(ConstantOperator literal, Void context) {
return literal;
}
@Override
public ScalarOperator visitVariableReference(ColumnRefOperator variable, Void context) {
if (partitionColumnSet.containsAll(variable.getUsedColumns())) {
return variable;
}
return null;
}
@Override
public ScalarOperator visitCall(CallOperator call, Void context) {
return null;
}
@Override
@Override
public ScalarOperator visitCompoundPredicate(CompoundPredicateOperator predicate, Void context) {
if (predicate.isNot()) {
return ConstantOperator.createBoolean(true);
}
ScalarOperator first = predicate.getChild(0).accept(this, null);
if (predicate.isAnd()) {
ScalarOperator second = predicate.getChild(1);
if (first.isConstantRef()) {
boolean isTrue = ((ConstantOperator) first).getBoolean();
if (isTrue) {
second = second.accept(this, null);
return second;
} else {
return ConstantOperator.createBoolean(false);
}
} else {
second = second.accept(this, null);
if (second.isConstantRef()) {
boolean isTrue = ((ConstantOperator) second).getBoolean();
if (isTrue) {
return first;
} else {
return ConstantOperator.createBoolean(false);
}
} else {
return new CompoundPredicateOperator(CompoundPredicateOperator.CompoundType.AND, first, second);
}
}
} else {
ScalarOperator second = predicate.getChild(1).accept(this, null);
if (first.isConstantRef()) {
return ((ConstantOperator) first).getBoolean() ? first : second;
} else if (second.isConstantRef()) {
return ((ConstantOperator) second).getBoolean() ? second : first;
} else {
return new CompoundPredicateOperator(CompoundPredicateOperator.CompoundType.OR, first, second);
}
}
}
@Override
public ScalarOperator visitInPredicate(InPredicateOperator predicate, Void context) {
ScalarOperator first = predicate.getChild(0).accept(this, null);
if (isShortCut(first)) {
return ConstantOperator.createBoolean(true);
} else {
if (predicate.allValuesMatch(ScalarOperator::isConstantRef)) {
if (predicate.isNotIn()) {
return ConstantOperator.createBoolean(!predicate.hasAnyNullValues());
}
return predicate;
} else {
return ConstantOperator.createBoolean(true);
}
}
}
@Override
public ScalarOperator visitIsNullPredicate(IsNullPredicateOperator predicate, Void context) {
ScalarOperator first = predicate.getChild(0).accept(this, null);
if (isShortCut(first) || predicate.isNotNull()) {
return ConstantOperator.createBoolean(true);
} else {
return ConstantOperator.createBoolean(false);
}
}
@Override
public ScalarOperator visitLikePredicateOperator(LikePredicateOperator predicate, Void context) {
return ConstantOperator.createBoolean(true);
}
private boolean isShortCut(ScalarOperator operator) {
return operator == null;
}
private boolean isConstantNull(ScalarOperator operator) {
if (operator.isConstantRef()) {
return ((ConstantOperator) operator).isNull();
}
return false;
}
} |
That can be added any time when we feel comfortable with Spotless formatting the code in Monitor OpenTelemetry Exporter. I can file another PR after this for that if you want. | private static boolean isClient(String metricName) {
return metricName.contains(".client.");
} | return metricName.contains(".client."); | private static boolean isClient(String metricName) {
return metricName.contains(".client.");
} | class MetricDataMapper {
private static final ClientLogger logger = new ClientLogger(MetricDataMapper.class);
private static final Set<String> OTEL_UNSTABLE_METRICS_TO_EXCLUDE = new HashSet<>();
private static final String OTEL_INSTRUMENTATION_NAME_PREFIX = "io.opentelemetry";
private static final Set<String> OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES = new HashSet<>(4);
public static final AttributeKey<String> APPLICATIONINSIGHTS_INTERNAL_METRIC_NAME = AttributeKey.stringKey("applicationinsights.internal.metric_name");
private final BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer;
private final boolean captureHttpServer4xxAsError;
static {
OTEL_UNSTABLE_METRICS_TO_EXCLUDE.add("rpc.client.duration");
OTEL_UNSTABLE_METRICS_TO_EXCLUDE.add("rpc.server.duration");
OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("http.server.request.duration");
OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("http.client.request.duration");
OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("rpc.client.duration");
OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("rpc.server.duration");
}
public MetricDataMapper(
BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer,
boolean captureHttpServer4xxAsError) {
this.telemetryInitializer = telemetryInitializer;
this.captureHttpServer4xxAsError = captureHttpServer4xxAsError;
}
public void map(MetricData metricData, Consumer<TelemetryItem> consumer) {
MetricDataType type = metricData.getType();
if (type == DOUBLE_SUM
|| type == DOUBLE_GAUGE
|| type == LONG_SUM
|| type == LONG_GAUGE
|| type == HISTOGRAM) {
boolean isPreAggregatedStandardMetric =
OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.contains(metricData.getName());
if (isPreAggregatedStandardMetric) {
List<TelemetryItem> preAggregatedStandardMetrics =
convertOtelMetricToAzureMonitorMetric(metricData, true);
preAggregatedStandardMetrics.forEach(consumer::accept);
}
if (OTEL_UNSTABLE_METRICS_TO_EXCLUDE.contains(metricData.getName())
&& metricData.getInstrumentationScopeInfo().getName().startsWith(OTEL_INSTRUMENTATION_NAME_PREFIX)) {
return;
}
List<TelemetryItem> stableOtelMetrics = convertOtelMetricToAzureMonitorMetric(metricData, false);
stableOtelMetrics.forEach(consumer::accept);
} else {
logger.warning("metric data type {} is not supported yet.", metricData.getType());
}
}
private List<TelemetryItem> convertOtelMetricToAzureMonitorMetric(
MetricData metricData, boolean isPreAggregatedStandardMetric) {
List<TelemetryItem> telemetryItems = new ArrayList<>();
for (PointData pointData : metricData.getData().getPoints()) {
MetricTelemetryBuilder builder = MetricTelemetryBuilder.create();
telemetryInitializer.accept(builder, metricData.getResource());
builder.setTime(FormattedTime.offSetDateTimeFromEpochNanos(pointData.getEpochNanos()));
updateMetricPointBuilder(
builder,
metricData,
pointData,
captureHttpServer4xxAsError,
isPreAggregatedStandardMetric);
telemetryItems.add(builder.build());
}
return telemetryItems;
}
public static void updateMetricPointBuilder(
MetricTelemetryBuilder metricTelemetryBuilder,
MetricData metricData,
PointData pointData,
boolean captureHttpServer4xxAsError,
boolean isPreAggregatedStandardMetric) {
checkArgument(metricData != null, "MetricData cannot be null.");
MetricPointBuilder pointBuilder = new MetricPointBuilder();
MetricDataType type = metricData.getType();
double pointDataValue;
switch (type) {
case LONG_SUM:
case LONG_GAUGE:
pointDataValue = (double) ((LongPointData) pointData).getValue();
break;
case DOUBLE_SUM:
case DOUBLE_GAUGE:
pointDataValue = ((DoublePointData) pointData).getValue();
break;
case HISTOGRAM:
long histogramCount = ((HistogramPointData) pointData).getCount();
if (histogramCount <= Integer.MAX_VALUE && histogramCount >= Integer.MIN_VALUE) {
pointBuilder.setCount((int) histogramCount);
}
HistogramPointData histogramPointData = (HistogramPointData) pointData;
double min = histogramPointData.getMin();
double max = histogramPointData.getMax();
if (shouldConvertToMilliseconds(metricData.getName(), isPreAggregatedStandardMetric)) {
min = min * 1000;
max = max * 1000;
}
pointDataValue = histogramPointData.getSum();
pointBuilder.setMin(min);
pointBuilder.setMax(max);
break;
case SUMMARY:
case EXPONENTIAL_HISTOGRAM:
default:
throw new IllegalArgumentException("metric data type '" + type + "' is not supported yet");
}
if (shouldConvertToMilliseconds(metricData.getName(), isPreAggregatedStandardMetric)) {
pointDataValue = pointDataValue * 1000;
}
pointBuilder.setValue(pointDataValue);
String metricName = pointData.getAttributes().get(APPLICATIONINSIGHTS_INTERNAL_METRIC_NAME);
if (metricName != null) {
pointBuilder.setName(metricName);
} else {
pointBuilder.setName(metricData.getName());
}
metricTelemetryBuilder.setMetricPoint(pointBuilder);
Attributes attributes = pointData.getAttributes();
if (isPreAggregatedStandardMetric) {
Long statusCode = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE);
boolean success = isSuccess(metricData.getName(), statusCode, captureHttpServer4xxAsError);
Boolean isSynthetic = attributes.get(IS_SYNTHETIC);
attributes.forEach(
(key, value) ->
applyConnectionStringAndRoleNameOverrides(
metricTelemetryBuilder, value, key.getKey()));
if (isServer(metricData.getName())) {
RequestExtractor.extract(metricTelemetryBuilder, statusCode, success, isSynthetic);
} else if (isClient(metricData.getName())) {
String dependencyType;
int defaultPort;
if (metricData.getName().startsWith("http")) {
dependencyType = "Http";
defaultPort = getDefaultPortForHttpScheme(getStableOrOldAttribute(attributes, SemanticAttributes.URL_SCHEME, SemanticAttributes.HTTP_SCHEME));
} else {
dependencyType = attributes.get(SemanticAttributes.RPC_SYSTEM);
if (dependencyType == null) {
dependencyType = "Unknown";
}
defaultPort = Integer.MAX_VALUE;
}
String target = SpanDataMapper.getTargetOrDefault(attributes, defaultPort, dependencyType);
DependencyExtractor.extract(
metricTelemetryBuilder, statusCode, success, dependencyType, target, isSynthetic);
}
} else {
MappingsBuilder mappingsBuilder = new MappingsBuilder(METRIC);
mappingsBuilder.build().map(attributes, metricTelemetryBuilder);
}
}
private static boolean shouldConvertToMilliseconds(String metricName, boolean isPreAggregatedStandardMetric) {
return isPreAggregatedStandardMetric && (metricName.equals("http.server.request.duration") || metricName.equals("http.client.request.duration"));
}
private static boolean applyConnectionStringAndRoleNameOverrides(
AbstractTelemetryBuilder telemetryBuilder, Object value, String key) {
if (key.equals(AiSemanticAttributes.INTERNAL_CONNECTION_STRING.getKey())
&& value instanceof String) {
telemetryBuilder.setConnectionString(ConnectionString.parse((String) value));
return true;
}
if (key.equals(AiSemanticAttributes.INTERNAL_ROLE_NAME.getKey()) && value instanceof String) {
telemetryBuilder.addTag(ContextTagKeys.AI_CLOUD_ROLE.toString(), (String) value);
return true;
}
return false;
}
private static int getDefaultPortForHttpScheme(@Nullable String httpScheme) {
if (httpScheme == null) {
return Integer.MAX_VALUE;
}
if (httpScheme.equals("https")) {
return 443;
}
if (httpScheme.equals("http")) {
return 80;
}
return Integer.MAX_VALUE;
}
private static boolean isSuccess(String metricName, Long statusCode, boolean captureHttpServer4xxAsError) {
if (statusCode == null) {
return true;
}
if (isClient(metricName)) {
return statusCode < 400;
}
if (isServer(metricName)) {
if (captureHttpServer4xxAsError) {
return statusCode < 400;
}
return statusCode < 500;
}
return false;
}
private static boolean isServer(String metricName) {
return metricName.contains(".server.");
}
} | class MetricDataMapper {
private static final ClientLogger logger = new ClientLogger(MetricDataMapper.class);
private static final Set<String> OTEL_UNSTABLE_METRICS_TO_EXCLUDE = new HashSet<>();
private static final String OTEL_INSTRUMENTATION_NAME_PREFIX = "io.opentelemetry";
private static final Set<String> OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES = new HashSet<>(4);
public static final AttributeKey<String> APPLICATIONINSIGHTS_INTERNAL_METRIC_NAME = AttributeKey.stringKey("applicationinsights.internal.metric_name");
private final BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer;
private final boolean captureHttpServer4xxAsError;
static {
OTEL_UNSTABLE_METRICS_TO_EXCLUDE.add("rpc.client.duration");
OTEL_UNSTABLE_METRICS_TO_EXCLUDE.add("rpc.server.duration");
OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("http.server.request.duration");
OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("http.client.request.duration");
OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("rpc.client.duration");
OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("rpc.server.duration");
}
public MetricDataMapper(
BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer,
boolean captureHttpServer4xxAsError) {
this.telemetryInitializer = telemetryInitializer;
this.captureHttpServer4xxAsError = captureHttpServer4xxAsError;
}
public void map(MetricData metricData, Consumer<TelemetryItem> consumer) {
MetricDataType type = metricData.getType();
if (type == DOUBLE_SUM
|| type == DOUBLE_GAUGE
|| type == LONG_SUM
|| type == LONG_GAUGE
|| type == HISTOGRAM) {
boolean isPreAggregatedStandardMetric =
OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.contains(metricData.getName());
if (isPreAggregatedStandardMetric) {
List<TelemetryItem> preAggregatedStandardMetrics =
convertOtelMetricToAzureMonitorMetric(metricData, true);
preAggregatedStandardMetrics.forEach(consumer::accept);
}
if (OTEL_UNSTABLE_METRICS_TO_EXCLUDE.contains(metricData.getName())
&& metricData.getInstrumentationScopeInfo().getName().startsWith(OTEL_INSTRUMENTATION_NAME_PREFIX)) {
return;
}
List<TelemetryItem> stableOtelMetrics = convertOtelMetricToAzureMonitorMetric(metricData, false);
stableOtelMetrics.forEach(consumer::accept);
} else {
logger.warning("metric data type {} is not supported yet.", metricData.getType());
}
}
private List<TelemetryItem> convertOtelMetricToAzureMonitorMetric(
MetricData metricData, boolean isPreAggregatedStandardMetric) {
List<TelemetryItem> telemetryItems = new ArrayList<>();
for (PointData pointData : metricData.getData().getPoints()) {
MetricTelemetryBuilder builder = MetricTelemetryBuilder.create();
telemetryInitializer.accept(builder, metricData.getResource());
builder.setTime(FormattedTime.offSetDateTimeFromEpochNanos(pointData.getEpochNanos()));
updateMetricPointBuilder(
builder,
metricData,
pointData,
captureHttpServer4xxAsError,
isPreAggregatedStandardMetric);
telemetryItems.add(builder.build());
}
return telemetryItems;
}
public static void updateMetricPointBuilder(
MetricTelemetryBuilder metricTelemetryBuilder,
MetricData metricData,
PointData pointData,
boolean captureHttpServer4xxAsError,
boolean isPreAggregatedStandardMetric) {
checkArgument(metricData != null, "MetricData cannot be null.");
MetricPointBuilder pointBuilder = new MetricPointBuilder();
MetricDataType type = metricData.getType();
double pointDataValue;
switch (type) {
case LONG_SUM:
case LONG_GAUGE:
pointDataValue = (double) ((LongPointData) pointData).getValue();
break;
case DOUBLE_SUM:
case DOUBLE_GAUGE:
pointDataValue = ((DoublePointData) pointData).getValue();
break;
case HISTOGRAM:
long histogramCount = ((HistogramPointData) pointData).getCount();
if (histogramCount <= Integer.MAX_VALUE && histogramCount >= Integer.MIN_VALUE) {
pointBuilder.setCount((int) histogramCount);
}
HistogramPointData histogramPointData = (HistogramPointData) pointData;
double min = histogramPointData.getMin();
double max = histogramPointData.getMax();
if (shouldConvertToMilliseconds(metricData.getName(), isPreAggregatedStandardMetric)) {
min = min * 1000;
max = max * 1000;
}
pointDataValue = histogramPointData.getSum();
pointBuilder.setMin(min);
pointBuilder.setMax(max);
break;
case SUMMARY:
case EXPONENTIAL_HISTOGRAM:
default:
throw new IllegalArgumentException("metric data type '" + type + "' is not supported yet");
}
if (shouldConvertToMilliseconds(metricData.getName(), isPreAggregatedStandardMetric)) {
pointDataValue = pointDataValue * 1000;
}
pointBuilder.setValue(pointDataValue);
String metricName = pointData.getAttributes().get(APPLICATIONINSIGHTS_INTERNAL_METRIC_NAME);
if (metricName != null) {
pointBuilder.setName(metricName);
} else {
pointBuilder.setName(metricData.getName());
}
metricTelemetryBuilder.setMetricPoint(pointBuilder);
Attributes attributes = pointData.getAttributes();
if (isPreAggregatedStandardMetric) {
Long statusCode = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE);
boolean success = isSuccess(metricData.getName(), statusCode, captureHttpServer4xxAsError);
Boolean isSynthetic = attributes.get(IS_SYNTHETIC);
attributes.forEach(
(key, value) ->
applyConnectionStringAndRoleNameOverrides(
metricTelemetryBuilder, value, key.getKey()));
if (isServer(metricData.getName())) {
RequestExtractor.extract(metricTelemetryBuilder, statusCode, success, isSynthetic);
} else if (isClient(metricData.getName())) {
String dependencyType;
int defaultPort;
if (metricData.getName().startsWith("http")) {
dependencyType = "Http";
defaultPort = getDefaultPortForHttpScheme(getStableOrOldAttribute(attributes, SemanticAttributes.URL_SCHEME, SemanticAttributes.HTTP_SCHEME));
} else {
dependencyType = attributes.get(SemanticAttributes.RPC_SYSTEM);
if (dependencyType == null) {
dependencyType = "Unknown";
}
defaultPort = Integer.MAX_VALUE;
}
String target = SpanDataMapper.getTargetOrDefault(attributes, defaultPort, dependencyType);
DependencyExtractor.extract(
metricTelemetryBuilder, statusCode, success, dependencyType, target, isSynthetic);
}
} else {
MappingsBuilder mappingsBuilder = new MappingsBuilder(METRIC);
mappingsBuilder.build().map(attributes, metricTelemetryBuilder);
}
}
private static boolean shouldConvertToMilliseconds(String metricName, boolean isPreAggregatedStandardMetric) {
return isPreAggregatedStandardMetric && (metricName.equals("http.server.request.duration") || metricName.equals("http.client.request.duration"));
}
private static boolean applyConnectionStringAndRoleNameOverrides(
AbstractTelemetryBuilder telemetryBuilder, Object value, String key) {
if (key.equals(AiSemanticAttributes.INTERNAL_CONNECTION_STRING.getKey())
&& value instanceof String) {
telemetryBuilder.setConnectionString(ConnectionString.parse((String) value));
return true;
}
if (key.equals(AiSemanticAttributes.INTERNAL_ROLE_NAME.getKey()) && value instanceof String) {
telemetryBuilder.addTag(ContextTagKeys.AI_CLOUD_ROLE.toString(), (String) value);
return true;
}
return false;
}
private static int getDefaultPortForHttpScheme(@Nullable String httpScheme) {
if (httpScheme == null) {
return Integer.MAX_VALUE;
}
if (httpScheme.equals("https")) {
return 443;
}
if (httpScheme.equals("http")) {
return 80;
}
return Integer.MAX_VALUE;
}
private static boolean isSuccess(String metricName, Long statusCode, boolean captureHttpServer4xxAsError) {
if (statusCode == null) {
return true;
}
if (isClient(metricName)) {
return statusCode < 400;
}
if (isServer(metricName)) {
if (captureHttpServer4xxAsError) {
return statusCode < 400;
}
return statusCode < 500;
}
return false;
}
private static boolean isServer(String metricName) {
return metricName.contains(".server.");
}
} |
you need to modify exception message for you delete 'BinaryPredicate' | public void analyzeImpl(Analyzer analyzer) throws AnalysisException {
Type whenType = null;
Type returnType = null;
Expr lastCompatibleThenExpr = null;
Expr lastCompatibleWhenExpr = null;
int loopEnd = children.size();
if (hasElseExpr) {
--loopEnd;
}
int loopStart;
Expr caseExpr = null;
if (hasCaseExpr) {
loopStart = 1;
caseExpr = children.get(0);
caseExpr.analyze(analyzer);
if (caseExpr instanceof Subquery && !caseExpr.getType().isScalarType()) {
throw new AnalysisException("Subquery in case-when must return scala type");
}
whenType = caseExpr.getType();
lastCompatibleWhenExpr = children.get(0);
} else {
whenType = Type.BOOLEAN;
loopStart = 0;
}
for (int i = loopStart; i < loopEnd; i += 2) {
Expr whenExpr = children.get(i);
if (hasCaseExpr) {
whenType = analyzer.getCompatibleType(whenType, lastCompatibleWhenExpr, whenExpr);
lastCompatibleWhenExpr = whenExpr;
} else {
if (!Type.canCastTo(whenExpr.getType(), Type.BOOLEAN)) {
throw new AnalysisException("When expr '" + whenExpr.toSql() + "'"
+ " is not of type boolean and not castable to type boolean.");
}
if (!whenExpr.getType().isBoolean()) {
castChild(Type.BOOLEAN, i);
}
}
if (whenExpr instanceof Subquery && !whenExpr.getType().isScalarType()) {
throw new AnalysisException("Subquery in case-when must return scala type");
}
if (whenExpr.contains(Predicates.instanceOf(Subquery.class))
&& !((hasCaseExpr() && whenExpr instanceof Subquery || !checkSubquery(whenExpr)))) {
throw new AnalysisException("Only support subquery in binary predicate in case statement.");
}
Expr thenExpr = children.get(i + 1);
if (thenExpr instanceof Subquery && !thenExpr.getType().isScalarType()) {
throw new AnalysisException("Subquery in case-when must return scala type");
}
returnType = analyzer.getCompatibleType(returnType, lastCompatibleThenExpr, thenExpr);
lastCompatibleThenExpr = thenExpr;
}
if (hasElseExpr) {
Expr elseExpr = children.get(children.size() - 1);
if (elseExpr instanceof Subquery && !elseExpr.getType().isScalarType()) {
throw new AnalysisException("Subquery in case-when must return scala type");
}
returnType = analyzer.getCompatibleType(returnType, lastCompatibleThenExpr, elseExpr);
}
if (hasCaseExpr) {
if (children.get(0).type != whenType) {
castChild(whenType, 0);
}
for (int i = loopStart; i < loopEnd; i += 2) {
if (children.get(i).type != whenType) {
castChild(whenType, i);
}
}
}
for (int i = loopStart + 1; i < children.size(); i += 2) {
if (children.get(i).type != returnType) {
castChild(returnType, i);
}
}
if (hasElseExpr) {
if (children.get(children.size() - 1).type != returnType) {
castChild(returnType, children.size() - 1);
}
}
type = returnType;
} | throw new AnalysisException("Only support subquery in binary predicate in case statement."); | public void analyzeImpl(Analyzer analyzer) throws AnalysisException {
Type whenType = null;
Type returnType = null;
Expr lastCompatibleThenExpr = null;
Expr lastCompatibleWhenExpr = null;
int loopEnd = children.size();
if (hasElseExpr) {
--loopEnd;
}
int loopStart;
Expr caseExpr = null;
if (hasCaseExpr) {
loopStart = 1;
caseExpr = children.get(0);
caseExpr.analyze(analyzer);
if (caseExpr instanceof Subquery && !caseExpr.getType().isScalarType()) {
throw new AnalysisException("Subquery in case-when must return scala type");
}
whenType = caseExpr.getType();
lastCompatibleWhenExpr = children.get(0);
} else {
whenType = Type.BOOLEAN;
loopStart = 0;
}
for (int i = loopStart; i < loopEnd; i += 2) {
Expr whenExpr = children.get(i);
if (hasCaseExpr) {
whenType = analyzer.getCompatibleType(whenType, lastCompatibleWhenExpr, whenExpr);
lastCompatibleWhenExpr = whenExpr;
} else {
if (!Type.canCastTo(whenExpr.getType(), Type.BOOLEAN)) {
throw new AnalysisException("When expr '" + whenExpr.toSql() + "'"
+ " is not of type boolean and not castable to type boolean.");
}
if (!whenExpr.getType().isBoolean()) {
castChild(Type.BOOLEAN, i);
}
}
if (whenExpr instanceof Subquery && !whenExpr.getType().isScalarType()) {
throw new AnalysisException("Subquery in case-when must return scala type");
}
if (whenExpr.contains(Predicates.instanceOf(Subquery.class))
&& !((hasCaseExpr() && whenExpr instanceof Subquery || !checkSubquery(whenExpr)))) {
throw new AnalysisException("Only support subquery in binary predicate in case statement.");
}
Expr thenExpr = children.get(i + 1);
if (thenExpr instanceof Subquery && !thenExpr.getType().isScalarType()) {
throw new AnalysisException("Subquery in case-when must return scala type");
}
returnType = analyzer.getCompatibleType(returnType, lastCompatibleThenExpr, thenExpr);
lastCompatibleThenExpr = thenExpr;
}
if (hasElseExpr) {
Expr elseExpr = children.get(children.size() - 1);
if (elseExpr instanceof Subquery && !elseExpr.getType().isScalarType()) {
throw new AnalysisException("Subquery in case-when must return scala type");
}
returnType = analyzer.getCompatibleType(returnType, lastCompatibleThenExpr, elseExpr);
}
if (hasCaseExpr) {
if (children.get(0).type != whenType) {
castChild(whenType, 0);
}
for (int i = loopStart; i < loopEnd; i += 2) {
if (children.get(i).type != whenType) {
castChild(whenType, i);
}
}
}
for (int i = loopStart + 1; i < children.size(); i += 2) {
if (children.get(i).type != returnType) {
castChild(returnType, i);
}
}
if (hasElseExpr) {
if (children.get(children.size() - 1).type != returnType) {
castChild(returnType, children.size() - 1);
}
}
type = returnType;
} | class CaseExpr extends Expr {
private boolean hasCaseExpr;
private boolean hasElseExpr;
public CaseExpr(Expr caseExpr, List<CaseWhenClause> whenClauses, Expr elseExpr) {
super();
if (caseExpr != null) {
children.add(caseExpr);
hasCaseExpr = true;
}
for (CaseWhenClause whenClause : whenClauses) {
Preconditions.checkNotNull(whenClause.getWhenExpr());
children.add(whenClause.getWhenExpr());
Preconditions.checkNotNull(whenClause.getThenExpr());
children.add(whenClause.getThenExpr());
}
if (elseExpr != null) {
children.add(elseExpr);
hasElseExpr = true;
}
}
protected CaseExpr(CaseExpr other) {
super(other);
hasCaseExpr = other.hasCaseExpr;
hasElseExpr = other.hasElseExpr;
}
@Override
public Expr clone() {
return new CaseExpr(this);
}
@Override
public boolean equals(Object obj) {
if (!super.equals(obj)) {
return false;
}
CaseExpr expr = (CaseExpr) obj;
return hasCaseExpr == expr.hasCaseExpr && hasElseExpr == expr.hasElseExpr;
}
public boolean hasCaseExpr() {
return hasCaseExpr;
}
@Override
public String toSqlImpl() {
StringBuilder output = new StringBuilder("CASE");
int childIdx = 0;
if (hasCaseExpr) {
output.append(children.get(childIdx++).toSql());
}
while (childIdx + 2 <= children.size()) {
output.append(" WHEN " + children.get(childIdx++).toSql());
output.append(" THEN " + children.get(childIdx++).toSql());
}
if (hasElseExpr) {
output.append(" ELSE " + children.get(children.size() - 1).toSql());
}
output.append(" END");
return output.toString();
}
@Override
public boolean isVectorized() {
return false;
}
@Override
protected void toThrift(TExprNode msg) {
msg.node_type = TExprNodeType.CASE_EXPR;
msg.case_expr = new TCaseExpr(hasCaseExpr, hasElseExpr);
}
@Override
public List<Expr> getConditionExprs() {
List<Expr> exprs = Lists.newArrayList();
int childIdx = 0;
if (hasCaseExpr) {
exprs.add(children.get(childIdx++));
}
while (childIdx + 2 <= children.size()) {
exprs.add(children.get(childIdx++));
childIdx++;
}
return exprs;
}
public List<Expr> getReturnExprs() {
List<Expr> exprs = Lists.newArrayList();
int childIdx = 0;
if (hasCaseExpr) {
childIdx++;
}
while (childIdx + 2 <= children.size()) {
childIdx++;
exprs.add(children.get(childIdx++));
}
if (hasElseExpr) {
exprs.add(children.get(children.size() - 1));
}
return exprs;
}
public static Expr computeCaseExpr(CaseExpr expr) {
LiteralExpr caseExpr;
int startIndex = 0;
int endIndex = expr.getChildren().size();
if (expr.hasCaseExpr()) {
Expr caseChildExpr = expr.getChild(0);
if (!caseChildExpr.isLiteral()
|| caseChildExpr instanceof DecimalLiteral || caseChildExpr instanceof FloatLiteral) {
return expr;
}
caseExpr = (LiteralExpr) expr.getChild(0);
startIndex++;
} else {
caseExpr = new BoolLiteral(true);
}
if (caseExpr instanceof NullLiteral) {
if (expr.hasElseExpr) {
return expr.getChild(expr.getChildren().size() - 1);
} else {
return new NullLiteral();
}
}
if (expr.hasElseExpr) {
endIndex--;
}
Expr startExpr = expr.getChild(startIndex);
if ((!startExpr.isLiteral() || startExpr instanceof DecimalLiteral || startExpr instanceof FloatLiteral)
|| (!(startExpr instanceof NullLiteral) && !startExpr.getClass().toString().equals(caseExpr.getClass().toString()))) {
return expr;
}
for (int i = startIndex; i < endIndex; i = i + 2) {
Expr currentWhenExpr = expr.getChild(i);
if (currentWhenExpr instanceof NullLiteral) {
continue;
}
if ((!currentWhenExpr.isLiteral() || currentWhenExpr instanceof DecimalLiteral || currentWhenExpr instanceof FloatLiteral)
|| !currentWhenExpr.getClass().toString().equals(caseExpr.getClass().toString())) {
List<Expr> exprLeft = new ArrayList<>();
if (expr.hasCaseExpr()) {
exprLeft.add(caseExpr);
}
for (int j = i; j < expr.getChildren().size(); j++) {
exprLeft.add(expr.getChild(j));
}
Expr retCaseExpr = expr.clone();
retCaseExpr.getChildren().clear();
retCaseExpr.addChildren(exprLeft);
return retCaseExpr;
} else if (caseExpr.compareLiteral((LiteralExpr) currentWhenExpr) == 0) {
return expr.getChild(i + 1);
}
}
if (expr.hasElseExpr) {
return expr.getChild(expr.getChildren().size() - 1);
} else {
return new NullLiteral();
}
}
private boolean checkSubquery(Expr expr) {
for (Expr child : expr.getChildren()) {
if (child instanceof Subquery && (expr instanceof ExistsPredicate || expr instanceof InPredicate)) {
return true;
}
if (checkSubquery(child)) {
return true;
}
}
return false;
}
} | class CaseExpr extends Expr {
private boolean hasCaseExpr;
private boolean hasElseExpr;
public CaseExpr(Expr caseExpr, List<CaseWhenClause> whenClauses, Expr elseExpr) {
super();
if (caseExpr != null) {
children.add(caseExpr);
hasCaseExpr = true;
}
for (CaseWhenClause whenClause : whenClauses) {
Preconditions.checkNotNull(whenClause.getWhenExpr());
children.add(whenClause.getWhenExpr());
Preconditions.checkNotNull(whenClause.getThenExpr());
children.add(whenClause.getThenExpr());
}
if (elseExpr != null) {
children.add(elseExpr);
hasElseExpr = true;
}
}
protected CaseExpr(CaseExpr other) {
super(other);
hasCaseExpr = other.hasCaseExpr;
hasElseExpr = other.hasElseExpr;
}
@Override
public Expr clone() {
return new CaseExpr(this);
}
@Override
public boolean equals(Object obj) {
if (!super.equals(obj)) {
return false;
}
CaseExpr expr = (CaseExpr) obj;
return hasCaseExpr == expr.hasCaseExpr && hasElseExpr == expr.hasElseExpr;
}
public boolean hasCaseExpr() {
return hasCaseExpr;
}
@Override
public String toSqlImpl() {
StringBuilder output = new StringBuilder("CASE");
int childIdx = 0;
if (hasCaseExpr) {
output.append(children.get(childIdx++).toSql());
}
while (childIdx + 2 <= children.size()) {
output.append(" WHEN " + children.get(childIdx++).toSql());
output.append(" THEN " + children.get(childIdx++).toSql());
}
if (hasElseExpr) {
output.append(" ELSE " + children.get(children.size() - 1).toSql());
}
output.append(" END");
return output.toString();
}
@Override
public boolean isVectorized() {
return false;
}
@Override
protected void toThrift(TExprNode msg) {
msg.node_type = TExprNodeType.CASE_EXPR;
msg.case_expr = new TCaseExpr(hasCaseExpr, hasElseExpr);
}
@Override
public List<Expr> getConditionExprs() {
List<Expr> exprs = Lists.newArrayList();
int childIdx = 0;
if (hasCaseExpr) {
exprs.add(children.get(childIdx++));
}
while (childIdx + 2 <= children.size()) {
exprs.add(children.get(childIdx++));
childIdx++;
}
return exprs;
}
public List<Expr> getReturnExprs() {
List<Expr> exprs = Lists.newArrayList();
int childIdx = 0;
if (hasCaseExpr) {
childIdx++;
}
while (childIdx + 2 <= children.size()) {
childIdx++;
exprs.add(children.get(childIdx++));
}
if (hasElseExpr) {
exprs.add(children.get(children.size() - 1));
}
return exprs;
}
public static Expr computeCaseExpr(CaseExpr expr) {
LiteralExpr caseExpr;
int startIndex = 0;
int endIndex = expr.getChildren().size();
if (expr.hasCaseExpr()) {
Expr caseChildExpr = expr.getChild(0);
if (!caseChildExpr.isLiteral()
|| caseChildExpr instanceof DecimalLiteral || caseChildExpr instanceof FloatLiteral) {
return expr;
}
caseExpr = (LiteralExpr) expr.getChild(0);
startIndex++;
} else {
caseExpr = new BoolLiteral(true);
}
if (caseExpr instanceof NullLiteral) {
if (expr.hasElseExpr) {
return expr.getChild(expr.getChildren().size() - 1);
} else {
return new NullLiteral();
}
}
if (expr.hasElseExpr) {
endIndex--;
}
Expr startExpr = expr.getChild(startIndex);
if ((!startExpr.isLiteral() || startExpr instanceof DecimalLiteral || startExpr instanceof FloatLiteral)
|| (!(startExpr instanceof NullLiteral) && !startExpr.getClass().toString().equals(caseExpr.getClass().toString()))) {
return expr;
}
for (int i = startIndex; i < endIndex; i = i + 2) {
Expr currentWhenExpr = expr.getChild(i);
if (currentWhenExpr instanceof NullLiteral) {
continue;
}
if ((!currentWhenExpr.isLiteral() || currentWhenExpr instanceof DecimalLiteral || currentWhenExpr instanceof FloatLiteral)
|| !currentWhenExpr.getClass().toString().equals(caseExpr.getClass().toString())) {
List<Expr> exprLeft = new ArrayList<>();
if (expr.hasCaseExpr()) {
exprLeft.add(caseExpr);
}
for (int j = i; j < expr.getChildren().size(); j++) {
exprLeft.add(expr.getChild(j));
}
Expr retCaseExpr = expr.clone();
retCaseExpr.getChildren().clear();
retCaseExpr.addChildren(exprLeft);
return retCaseExpr;
} else if (caseExpr.compareLiteral((LiteralExpr) currentWhenExpr) == 0) {
return expr.getChild(i + 1);
}
}
if (expr.hasElseExpr) {
return expr.getChild(expr.getChildren().size() - 1);
} else {
return new NullLiteral();
}
}
private boolean checkSubquery(Expr expr) {
for (Expr child : expr.getChildren()) {
if (child instanceof Subquery && (expr instanceof ExistsPredicate || expr instanceof InPredicate)) {
return true;
}
if (checkSubquery(child)) {
return true;
}
}
return false;
}
} |
Junit 5 has API to assert thrown exception, ```java assertThrows( MyException.class, () -> myObject.doThing(), "Expected doThing() to throw, but it didn't" ); ``` | public void testInvalidScopeFromRequestSync(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope" + invalidCharacter);
AzureCliCredential credential = new AzureCliCredentialBuilder().build();
try {
credential.getTokenSync(request);
} catch (Exception e) {
assertTrue(e instanceof IllegalArgumentException);
}
} | credential.getTokenSync(request); | public void testInvalidScopeFromRequestSync(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope" + invalidCharacter);
AzureCliCredential credential = new AzureCliCredentialBuilder().build();
assertThrows(IllegalArgumentException.class, () -> credential.getTokenSync(request));
} | class AzureCliCredentialNegativeTest {
static Stream<String> invalidCharacters() {
return Stream.of("|", "&", ";");
}
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidScopeFromRequest(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope" + invalidCharacter);
AzureCliCredential credential = new AzureCliCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(e -> e instanceof IllegalArgumentException)
.verify();
}
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidTenantFromRequest(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope").setTenantId("tenant" + invalidCharacter);
AzureCliCredential credential = new AzureCliCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(e -> e instanceof IllegalArgumentException)
.verify();
}
@ParameterizedTest
@MethodSource("invalidCharacters")
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidTenantFromRequestSync(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope").setTenantId("tenant" + invalidCharacter);
AzureCliCredential credential = new AzureCliCredentialBuilder().build();
try {
credential.getTokenSync(request);
} catch (Exception e) {
assertTrue(e instanceof IllegalArgumentException);
}
}
} | class AzureCliCredentialNegativeTest {
static Stream<String> invalidCharacters() {
return Stream.of("|", "&", ";");
}
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidScopeFromRequest(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope" + invalidCharacter);
AzureCliCredential credential = new AzureCliCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(e -> e instanceof IllegalArgumentException)
.verify();
}
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidTenantFromRequest(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope").setTenantId("tenant" + invalidCharacter);
AzureCliCredential credential = new AzureCliCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(e -> e instanceof IllegalArgumentException)
.verify();
}
@ParameterizedTest
@MethodSource("invalidCharacters")
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidTenantFromRequestSync(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope").setTenantId("tenant" + invalidCharacter);
AzureCliCredential credential = new AzureCliCredentialBuilder().build();
assertThrows(IllegalArgumentException.class, () -> credential.getTokenSync(request));
}
} |
Not sure if this comment adds any value | OperationHandlerImpl createHandler() throws Exception {
VisitorSession visitorSession = mock(VisitorSession.class);
when(documentAccess.createVisitorSession(any(VisitorParameters.class))).thenAnswer(p -> {
VisitorParameters params = (VisitorParameters)p.getArguments()[0];
assignedParameters.set(params);
VisitorStatistics statistics = new VisitorStatistics();
statistics.setBucketsVisited(bucketsVisited);
params.getControlHandler().onVisitorStatistics(statistics);
ProgressToken progress = new ProgressToken();
params.getControlHandler().onProgress(progress);
params.getControlHandler().onDone(completionCode, "bork bork");
return visitorSession;
});
OperationHandlerImpl.ClusterEnumerator clusterEnumerator = () -> Arrays.asList(new ClusterDef("foo", "configId"));
return new OperationHandlerImpl(documentAccess, clusterEnumerator);
} | OperationHandlerImpl createHandler() throws Exception {
VisitorSession visitorSession = mock(VisitorSession.class);
when(documentAccess.createVisitorSession(any(VisitorParameters.class))).thenAnswer(p -> {
VisitorParameters params = (VisitorParameters)p.getArguments()[0];
assignedParameters.set(params);
VisitorStatistics statistics = new VisitorStatistics();
statistics.setBucketsVisited(bucketsVisited);
params.getControlHandler().onVisitorStatistics(statistics);
ProgressToken progress = new ProgressToken();
params.getControlHandler().onProgress(progress);
params.getControlHandler().onDone(completionCode, "bork bork");
return visitorSession;
});
OperationHandlerImpl.ClusterEnumerator clusterEnumerator = () -> Arrays.asList(new ClusterDef("foo", "configId"));
return new OperationHandlerImpl(documentAccess, clusterEnumerator);
} | class OperationHandlerImplFixture {
DocumentAccess documentAccess = mock(DocumentAccess.class);
AtomicReference<VisitorParameters> assignedParameters = new AtomicReference<>();
VisitorControlHandler.CompletionCode completionCode = VisitorControlHandler.CompletionCode.SUCCESS;
int bucketsVisited = 0;
} | class OperationHandlerImplFixture {
DocumentAccess documentAccess = mock(DocumentAccess.class);
AtomicReference<VisitorParameters> assignedParameters = new AtomicReference<>();
VisitorControlHandler.CompletionCode completionCode = VisitorControlHandler.CompletionCode.SUCCESS;
int bucketsVisited = 0;
} | |
Seems one `getSchemaLock` call is enough. | public void stopClusterWriteDB(final String schemaName, final String jobId) {
LockContext lockContext = PipelineContext.getContextManager().getLockContext();
ShardingSphereLock lock = lockContext.getSchemaLock(schemaName).orElse(lockContext.getSchemaLock(schemaName).orElse(null));
if (null == lock) {
log.info("stopClusterWriteDB, lock is null");
throw new RuntimeException("Stop source writing failed");
}
boolean tryLockSuccess = lock.tryLock(schemaName);
log.info("stopClusterWriteDB, tryLockSuccess={}", tryLockSuccess);
if (!tryLockSuccess) {
throw new RuntimeException("Stop source writing failed");
}
} | ShardingSphereLock lock = lockContext.getSchemaLock(schemaName).orElse(lockContext.getSchemaLock(schemaName).orElse(null)); | public void stopClusterWriteDB(final String schemaName, final String jobId) {
LockContext lockContext = PipelineContext.getContextManager().getLockContext();
ShardingSphereLock lock = lockContext.getSchemaLock(schemaName).orElse(null);
if (null == lock) {
log.info("stopClusterWriteDB, lock is null");
throw new RuntimeException("Stop source writing failed");
}
boolean tryLockSuccess = lock.tryLock(schemaName);
log.info("stopClusterWriteDB, tryLockSuccess={}", tryLockSuccess);
if (!tryLockSuccess) {
throw new RuntimeException("Stop source writing failed");
}
} | class RuleAlteredJobAPIImpl extends AbstractPipelineJobAPIImpl implements RuleAlteredJobAPI {
private static final Map<String, DataConsistencyCheckAlgorithm> DATA_CONSISTENCY_CHECK_ALGORITHM_MAP = new TreeMap<>(
SingletonSPIRegistry.getTypedSingletonInstancesMap(DataConsistencyCheckAlgorithm.class));
@Override
public List<JobInfo> list() {
checkModeConfig();
return getJobBriefInfos().map(each -> getJobInfo(each.getJobName())).collect(Collectors.toList());
}
private void checkModeConfig() {
ModeConfiguration modeConfig = PipelineContext.getModeConfig();
Preconditions.checkNotNull(modeConfig, "Mode configuration is required.");
Preconditions.checkArgument("Cluster".equals(modeConfig.getType()), "Mode must be `Cluster`.");
}
private Stream<JobBriefInfo> getJobBriefInfos() {
return PipelineAPIFactory.getJobStatisticsAPI().getAllJobsBriefInfo().stream().filter(each -> !each.getJobName().startsWith("_"));
}
private JobInfo getJobInfo(final String jobName) {
JobInfo result = new JobInfo(jobName);
JobConfigurationPOJO jobConfigPOJO = getElasticJobConfigPOJO(result.getJobId());
JobConfiguration jobConfig = getJobConfig(jobConfigPOJO);
result.setActive(!jobConfigPOJO.isDisabled());
result.setShardingTotalCount(jobConfig.getHandleConfig().getJobShardingCount());
result.setTables(jobConfig.getHandleConfig().getLogicTables());
result.setCreateTime(jobConfigPOJO.getProps().getProperty("create_time"));
result.setStopTime(jobConfigPOJO.getProps().getProperty("stop_time"));
result.setJobParameter(jobConfigPOJO.getJobParameter());
return result;
}
@Override
public Optional<String> start(final JobConfiguration jobConfig) {
jobConfig.buildHandleConfig();
if (jobConfig.getHandleConfig().getJobShardingCount() == 0) {
log.warn("Invalid scaling job config!");
throw new PipelineJobCreationException("handleConfig shardingTotalCount is 0");
}
log.info("Start scaling job by {}", jobConfig.getHandleConfig());
GovernanceRepositoryAPI repositoryAPI = PipelineAPIFactory.getGovernanceRepositoryAPI();
String jobId = jobConfig.getHandleConfig().getJobId();
String jobConfigKey = String.format("%s/%s/config", DataPipelineConstants.DATA_PIPELINE_ROOT, jobId);
if (repositoryAPI.isExisted(jobConfigKey)) {
log.warn("jobId already exists in registry center, ignore, jobConfigKey={}", jobConfigKey);
return Optional.of(jobId);
}
repositoryAPI.persist(String.format("%s/%s", DataPipelineConstants.DATA_PIPELINE_ROOT, jobId), RuleAlteredJob.class.getName());
repositoryAPI.persist(jobConfigKey, createJobConfig(jobConfig));
return Optional.of(jobId);
}
private String createJobConfig(final JobConfiguration jobConfig) {
JobConfigurationPOJO jobConfigPOJO = new JobConfigurationPOJO();
jobConfigPOJO.setJobName(jobConfig.getHandleConfig().getJobId());
jobConfigPOJO.setShardingTotalCount(jobConfig.getHandleConfig().getJobShardingCount());
jobConfigPOJO.setJobParameter(YamlEngine.marshal(jobConfig));
jobConfigPOJO.getProps().setProperty("create_time", LocalDateTime.now().format(DATE_TIME_FORMATTER));
return YamlEngine.marshal(jobConfigPOJO);
}
@Override
public Map<Integer, JobProgress> getProgress(final String jobId) {
checkModeConfig();
JobConfiguration jobConfig = getJobConfig(jobId);
return getProgress(jobConfig);
}
@Override
public Map<Integer, JobProgress> getProgress(final JobConfiguration jobConfig) {
String jobId = jobConfig.getHandleConfig().getJobId();
JobConfigurationPOJO jobConfigPOJO = getElasticJobConfigPOJO(jobId);
return IntStream.range(0, jobConfig.getHandleConfig().getJobShardingCount()).boxed().collect(LinkedHashMap::new, (map, each) -> {
JobProgress jobProgress = PipelineAPIFactory.getGovernanceRepositoryAPI().getJobProgress(jobId, each);
if (null != jobProgress) {
jobProgress.setActive(!jobConfigPOJO.isDisabled());
}
map.put(each, jobProgress);
}, LinkedHashMap::putAll);
}
@Override
public void stopClusterWriteDB(final String jobId) {
checkModeConfig();
log.info("stopClusterWriteDB for job {}", jobId);
JobConfiguration jobConfig = getJobConfig(jobId);
String schemaName = jobConfig.getWorkflowConfig().getSchemaName();
stopClusterWriteDB(schemaName, jobId);
}
@Override
@Override
public void restoreClusterWriteDB(final String jobId) {
checkModeConfig();
log.info("restoreClusterWriteDB for job {}", jobId);
JobConfiguration jobConfig = getJobConfig(jobId);
String schemaName = jobConfig.getWorkflowConfig().getSchemaName();
restoreClusterWriteDB(schemaName, jobId);
}
@Override
public void restoreClusterWriteDB(final String schemaName, final String jobId) {
LockContext lockContext = PipelineContext.getContextManager().getLockContext();
ShardingSphereLock lock = lockContext.getSchemaLock(schemaName).orElse(null);
if (null == lock) {
log.info("restoreClusterWriteDB, lock is null");
throw new RuntimeException("Not necessary to restore source writing");
}
boolean isLocked = lock.isLocked(schemaName);
if (!isLocked) {
log.info("restoreClusterWriteDB, isLocked false, schemaName={}", schemaName);
throw new RuntimeException("Not necessary to restore source writing");
}
lock.releaseLock(schemaName);
}
@Override
public Collection<DataConsistencyCheckAlgorithmInfo> listDataConsistencyCheckAlgorithms() {
checkModeConfig();
return DATA_CONSISTENCY_CHECK_ALGORITHM_MAP.values()
.stream().map(each -> {
DataConsistencyCheckAlgorithmInfo algorithmInfo = new DataConsistencyCheckAlgorithmInfo();
algorithmInfo.setType(each.getType());
algorithmInfo.setDescription(each.getDescription());
algorithmInfo.setSupportedDatabaseTypes(each.getSupportedDatabaseTypes());
algorithmInfo.setProvider(each.getProvider());
return algorithmInfo;
}).collect(Collectors.toList());
}
@Override
public boolean isDataConsistencyCheckNeeded(final String jobId) {
log.info("isDataConsistencyCheckNeeded for job {}", jobId);
JobConfiguration jobConfig = getJobConfig(jobId);
return isDataConsistencyCheckNeeded(jobConfig);
}
@Override
public boolean isDataConsistencyCheckNeeded(final JobConfiguration jobConfig) {
RuleAlteredContext ruleAlteredContext = RuleAlteredJobWorker.createRuleAlteredContext(jobConfig);
return isDataConsistencyCheckNeeded(ruleAlteredContext);
}
private boolean isDataConsistencyCheckNeeded(final RuleAlteredContext ruleAlteredContext) {
return null != ruleAlteredContext.getDataConsistencyCheckAlgorithm();
}
@Override
public Map<String, DataConsistencyCheckResult> dataConsistencyCheck(final String jobId) {
checkModeConfig();
log.info("Data consistency check for job {}", jobId);
JobConfiguration jobConfig = getJobConfig(jobId);
return dataConsistencyCheck(jobConfig);
}
@Override
public Map<String, DataConsistencyCheckResult> dataConsistencyCheck(final JobConfiguration jobConfig) {
RuleAlteredContext ruleAlteredContext = RuleAlteredJobWorker.createRuleAlteredContext(jobConfig);
if (!isDataConsistencyCheckNeeded(ruleAlteredContext)) {
log.info("dataConsistencyCheckAlgorithm is not configured, data consistency check is ignored.");
return Collections.emptyMap();
}
return dataConsistencyCheck0(jobConfig, ruleAlteredContext.getDataConsistencyCheckAlgorithm());
}
@Override
public Map<String, DataConsistencyCheckResult> dataConsistencyCheck(final String jobId, final String algorithmType) {
checkModeConfig();
log.info("Data consistency check for job {}, algorithmType: {}", jobId, algorithmType);
JobConfiguration jobConfig = getJobConfig(jobId);
TypedSPIConfiguration typedSPIConfig = new ShardingSphereAlgorithmConfiguration(algorithmType, new Properties());
DataConsistencyCheckAlgorithm checkAlgorithm = ShardingSphereAlgorithmFactory.createAlgorithm(typedSPIConfig, DataConsistencyCheckAlgorithm.class);
return dataConsistencyCheck0(jobConfig, checkAlgorithm);
}
private Map<String, DataConsistencyCheckResult> dataConsistencyCheck0(final JobConfiguration jobConfig, final DataConsistencyCheckAlgorithm checkAlgorithm) {
String jobId = jobConfig.getHandleConfig().getJobId();
DataConsistencyChecker dataConsistencyChecker = EnvironmentCheckerFactory.newInstance(jobConfig);
Map<String, DataConsistencyCheckResult> result = dataConsistencyChecker.checkRecordsCount();
if (result.values().stream().allMatch(DataConsistencyCheckResult::isRecordsCountMatched)) {
Map<String, Boolean> contentCheckResult = dataConsistencyChecker.checkRecordsContent(checkAlgorithm);
result.forEach((key, value) -> value.setRecordsContentMatched(contentCheckResult.getOrDefault(key, false)));
}
log.info("Scaling job {} with check algorithm '{}' data consistency checker result {}", jobId, checkAlgorithm.getClass().getName(), result);
PipelineAPIFactory.getGovernanceRepositoryAPI().persistJobCheckResult(jobId, aggregateDataConsistencyCheckResults(jobId, result));
return result;
}
@Override
public boolean aggregateDataConsistencyCheckResults(final String jobId, final Map<String, DataConsistencyCheckResult> checkResultMap) {
if (checkResultMap.isEmpty()) {
return false;
}
for (Entry<String, DataConsistencyCheckResult> entry : checkResultMap.entrySet()) {
boolean recordsCountMatched = entry.getValue().isRecordsCountMatched();
boolean recordsContentMatched = entry.getValue().isRecordsContentMatched();
if (!recordsContentMatched || !recordsCountMatched) {
log.error("Scaling job: {}, table: {} data consistency check failed, recordsContentMatched: {}, recordsCountMatched: {}",
jobId, entry.getKey(), recordsContentMatched, recordsCountMatched);
return false;
}
}
return true;
}
@Override
public void switchClusterConfiguration(final String jobId) {
checkModeConfig();
log.info("Switch cluster configuration for job {}", jobId);
JobConfiguration jobConfig = getJobConfig(jobId);
switchClusterConfiguration(jobConfig);
}
@Override
public void switchClusterConfiguration(final JobConfiguration jobConfig) {
String jobId = jobConfig.getHandleConfig().getJobId();
RuleAlteredContext ruleAlteredContext = RuleAlteredJobWorker.createRuleAlteredContext(jobConfig);
GovernanceRepositoryAPI repositoryAPI = PipelineAPIFactory.getGovernanceRepositoryAPI();
if (isDataConsistencyCheckNeeded(ruleAlteredContext)) {
Optional<Boolean> checkResultOptional = repositoryAPI.getJobCheckResult(jobId);
if (!checkResultOptional.isPresent() || !checkResultOptional.get()) {
throw new PipelineDataConsistencyCheckFailedException("Data consistency check not finished or failed.");
}
}
WorkflowConfiguration workflowConfig = jobConfig.getWorkflowConfig();
ScalingTaskFinishedEvent taskFinishedEvent = new ScalingTaskFinishedEvent(workflowConfig.getSchemaName(), workflowConfig.getActiveVersion(), workflowConfig.getNewVersion());
ShardingSphereEventBus.getInstance().post(taskFinishedEvent);
for (int each : repositoryAPI.getShardingItems(jobId)) {
repositoryAPI.updateShardingJobStatus(jobId, each, JobStatus.FINISHED);
}
stop(jobId);
try {
TimeUnit.SECONDS.sleep(1);
} catch (final InterruptedException ex) {
log.error(ex.getMessage());
}
RuleAlteredJobContext jobContext = new RuleAlteredJobContext(jobConfig);
RuleAlteredJobPreparer jobPreparer = new RuleAlteredJobPreparer();
jobPreparer.cleanup(jobContext);
jobContext.close();
}
@Override
public void reset(final String jobId) {
checkModeConfig();
log.info("Scaling job {} reset target table", jobId);
try {
new ScalingEnvironmentManager().cleanupTargetTables(getJobConfig(jobId));
} catch (final SQLException ex) {
throw new PipelineJobExecutionException("Reset target table failed for job " + jobId);
}
}
@Override
public JobConfiguration getJobConfig(final String jobId) {
return getJobConfig(getElasticJobConfigPOJO(jobId));
}
private JobConfiguration getJobConfig(final JobConfigurationPOJO elasticJobConfigPOJO) {
return YamlEngine.unmarshal(elasticJobConfigPOJO.getJobParameter(), JobConfiguration.class, true);
}
} | class RuleAlteredJobAPIImpl extends AbstractPipelineJobAPIImpl implements RuleAlteredJobAPI {
private static final Map<String, DataConsistencyCheckAlgorithm> DATA_CONSISTENCY_CHECK_ALGORITHM_MAP = new TreeMap<>(
SingletonSPIRegistry.getTypedSingletonInstancesMap(DataConsistencyCheckAlgorithm.class));
@Override
public List<JobInfo> list() {
checkModeConfig();
return getJobBriefInfos().map(each -> getJobInfo(each.getJobName())).collect(Collectors.toList());
}
private void checkModeConfig() {
ModeConfiguration modeConfig = PipelineContext.getModeConfig();
Preconditions.checkNotNull(modeConfig, "Mode configuration is required.");
Preconditions.checkArgument("Cluster".equals(modeConfig.getType()), "Mode must be `Cluster`.");
}
private Stream<JobBriefInfo> getJobBriefInfos() {
return PipelineAPIFactory.getJobStatisticsAPI().getAllJobsBriefInfo().stream().filter(each -> !each.getJobName().startsWith("_"));
}
private JobInfo getJobInfo(final String jobName) {
JobInfo result = new JobInfo(jobName);
JobConfigurationPOJO jobConfigPOJO = getElasticJobConfigPOJO(result.getJobId());
JobConfiguration jobConfig = getJobConfig(jobConfigPOJO);
result.setActive(!jobConfigPOJO.isDisabled());
result.setShardingTotalCount(jobConfig.getHandleConfig().getJobShardingCount());
result.setTables(jobConfig.getHandleConfig().getLogicTables());
result.setCreateTime(jobConfigPOJO.getProps().getProperty("create_time"));
result.setStopTime(jobConfigPOJO.getProps().getProperty("stop_time"));
result.setJobParameter(jobConfigPOJO.getJobParameter());
return result;
}
@Override
public Optional<String> start(final JobConfiguration jobConfig) {
jobConfig.buildHandleConfig();
if (jobConfig.getHandleConfig().getJobShardingCount() == 0) {
log.warn("Invalid scaling job config!");
throw new PipelineJobCreationException("handleConfig shardingTotalCount is 0");
}
log.info("Start scaling job by {}", jobConfig.getHandleConfig());
GovernanceRepositoryAPI repositoryAPI = PipelineAPIFactory.getGovernanceRepositoryAPI();
String jobId = jobConfig.getHandleConfig().getJobId();
String jobConfigKey = String.format("%s/%s/config", DataPipelineConstants.DATA_PIPELINE_ROOT, jobId);
if (repositoryAPI.isExisted(jobConfigKey)) {
log.warn("jobId already exists in registry center, ignore, jobConfigKey={}", jobConfigKey);
return Optional.of(jobId);
}
repositoryAPI.persist(String.format("%s/%s", DataPipelineConstants.DATA_PIPELINE_ROOT, jobId), RuleAlteredJob.class.getName());
repositoryAPI.persist(jobConfigKey, createJobConfig(jobConfig));
return Optional.of(jobId);
}
private String createJobConfig(final JobConfiguration jobConfig) {
JobConfigurationPOJO jobConfigPOJO = new JobConfigurationPOJO();
jobConfigPOJO.setJobName(jobConfig.getHandleConfig().getJobId());
jobConfigPOJO.setShardingTotalCount(jobConfig.getHandleConfig().getJobShardingCount());
jobConfigPOJO.setJobParameter(YamlEngine.marshal(jobConfig));
jobConfigPOJO.getProps().setProperty("create_time", LocalDateTime.now().format(DATE_TIME_FORMATTER));
return YamlEngine.marshal(jobConfigPOJO);
}
@Override
public Map<Integer, JobProgress> getProgress(final String jobId) {
checkModeConfig();
JobConfiguration jobConfig = getJobConfig(jobId);
return getProgress(jobConfig);
}
@Override
public Map<Integer, JobProgress> getProgress(final JobConfiguration jobConfig) {
String jobId = jobConfig.getHandleConfig().getJobId();
JobConfigurationPOJO jobConfigPOJO = getElasticJobConfigPOJO(jobId);
return IntStream.range(0, jobConfig.getHandleConfig().getJobShardingCount()).boxed().collect(LinkedHashMap::new, (map, each) -> {
JobProgress jobProgress = PipelineAPIFactory.getGovernanceRepositoryAPI().getJobProgress(jobId, each);
if (null != jobProgress) {
jobProgress.setActive(!jobConfigPOJO.isDisabled());
}
map.put(each, jobProgress);
}, LinkedHashMap::putAll);
}
@Override
public void stopClusterWriteDB(final String jobId) {
checkModeConfig();
log.info("stopClusterWriteDB for job {}", jobId);
JobConfiguration jobConfig = getJobConfig(jobId);
String schemaName = jobConfig.getWorkflowConfig().getSchemaName();
stopClusterWriteDB(schemaName, jobId);
}
@Override
@Override
public void restoreClusterWriteDB(final String jobId) {
checkModeConfig();
log.info("restoreClusterWriteDB for job {}", jobId);
JobConfiguration jobConfig = getJobConfig(jobId);
String schemaName = jobConfig.getWorkflowConfig().getSchemaName();
restoreClusterWriteDB(schemaName, jobId);
}
@Override
public void restoreClusterWriteDB(final String schemaName, final String jobId) {
LockContext lockContext = PipelineContext.getContextManager().getLockContext();
ShardingSphereLock lock = lockContext.getSchemaLock(schemaName).orElse(null);
if (null == lock) {
log.info("restoreClusterWriteDB, lock is null");
throw new RuntimeException("Not necessary to restore source writing");
}
boolean isLocked = lock.isLocked(schemaName);
if (!isLocked) {
log.info("restoreClusterWriteDB, isLocked false, schemaName={}", schemaName);
throw new RuntimeException("Not necessary to restore source writing");
}
lock.releaseLock(schemaName);
}
@Override
public Collection<DataConsistencyCheckAlgorithmInfo> listDataConsistencyCheckAlgorithms() {
checkModeConfig();
return DATA_CONSISTENCY_CHECK_ALGORITHM_MAP.values()
.stream().map(each -> {
DataConsistencyCheckAlgorithmInfo algorithmInfo = new DataConsistencyCheckAlgorithmInfo();
algorithmInfo.setType(each.getType());
algorithmInfo.setDescription(each.getDescription());
algorithmInfo.setSupportedDatabaseTypes(each.getSupportedDatabaseTypes());
algorithmInfo.setProvider(each.getProvider());
return algorithmInfo;
}).collect(Collectors.toList());
}
@Override
public boolean isDataConsistencyCheckNeeded(final String jobId) {
log.info("isDataConsistencyCheckNeeded for job {}", jobId);
JobConfiguration jobConfig = getJobConfig(jobId);
return isDataConsistencyCheckNeeded(jobConfig);
}
@Override
public boolean isDataConsistencyCheckNeeded(final JobConfiguration jobConfig) {
RuleAlteredContext ruleAlteredContext = RuleAlteredJobWorker.createRuleAlteredContext(jobConfig);
return isDataConsistencyCheckNeeded(ruleAlteredContext);
}
private boolean isDataConsistencyCheckNeeded(final RuleAlteredContext ruleAlteredContext) {
return null != ruleAlteredContext.getDataConsistencyCheckAlgorithm();
}
@Override
public Map<String, DataConsistencyCheckResult> dataConsistencyCheck(final String jobId) {
checkModeConfig();
log.info("Data consistency check for job {}", jobId);
JobConfiguration jobConfig = getJobConfig(jobId);
return dataConsistencyCheck(jobConfig);
}
@Override
public Map<String, DataConsistencyCheckResult> dataConsistencyCheck(final JobConfiguration jobConfig) {
RuleAlteredContext ruleAlteredContext = RuleAlteredJobWorker.createRuleAlteredContext(jobConfig);
if (!isDataConsistencyCheckNeeded(ruleAlteredContext)) {
log.info("dataConsistencyCheckAlgorithm is not configured, data consistency check is ignored.");
return Collections.emptyMap();
}
return dataConsistencyCheck0(jobConfig, ruleAlteredContext.getDataConsistencyCheckAlgorithm());
}
@Override
public Map<String, DataConsistencyCheckResult> dataConsistencyCheck(final String jobId, final String algorithmType) {
checkModeConfig();
log.info("Data consistency check for job {}, algorithmType: {}", jobId, algorithmType);
JobConfiguration jobConfig = getJobConfig(jobId);
TypedSPIConfiguration typedSPIConfig = new ShardingSphereAlgorithmConfiguration(algorithmType, new Properties());
DataConsistencyCheckAlgorithm checkAlgorithm = ShardingSphereAlgorithmFactory.createAlgorithm(typedSPIConfig, DataConsistencyCheckAlgorithm.class);
return dataConsistencyCheck0(jobConfig, checkAlgorithm);
}
private Map<String, DataConsistencyCheckResult> dataConsistencyCheck0(final JobConfiguration jobConfig, final DataConsistencyCheckAlgorithm checkAlgorithm) {
String jobId = jobConfig.getHandleConfig().getJobId();
DataConsistencyChecker dataConsistencyChecker = EnvironmentCheckerFactory.newInstance(jobConfig);
Map<String, DataConsistencyCheckResult> result = dataConsistencyChecker.checkRecordsCount();
if (result.values().stream().allMatch(DataConsistencyCheckResult::isRecordsCountMatched)) {
Map<String, Boolean> contentCheckResult = dataConsistencyChecker.checkRecordsContent(checkAlgorithm);
result.forEach((key, value) -> value.setRecordsContentMatched(contentCheckResult.getOrDefault(key, false)));
}
log.info("Scaling job {} with check algorithm '{}' data consistency checker result {}", jobId, checkAlgorithm.getClass().getName(), result);
PipelineAPIFactory.getGovernanceRepositoryAPI().persistJobCheckResult(jobId, aggregateDataConsistencyCheckResults(jobId, result));
return result;
}
@Override
public boolean aggregateDataConsistencyCheckResults(final String jobId, final Map<String, DataConsistencyCheckResult> checkResultMap) {
if (checkResultMap.isEmpty()) {
return false;
}
for (Entry<String, DataConsistencyCheckResult> entry : checkResultMap.entrySet()) {
boolean recordsCountMatched = entry.getValue().isRecordsCountMatched();
boolean recordsContentMatched = entry.getValue().isRecordsContentMatched();
if (!recordsContentMatched || !recordsCountMatched) {
log.error("Scaling job: {}, table: {} data consistency check failed, recordsContentMatched: {}, recordsCountMatched: {}",
jobId, entry.getKey(), recordsContentMatched, recordsCountMatched);
return false;
}
}
return true;
}
@Override
public void switchClusterConfiguration(final String jobId) {
checkModeConfig();
log.info("Switch cluster configuration for job {}", jobId);
JobConfiguration jobConfig = getJobConfig(jobId);
switchClusterConfiguration(jobConfig);
}
@Override
public void switchClusterConfiguration(final JobConfiguration jobConfig) {
String jobId = jobConfig.getHandleConfig().getJobId();
RuleAlteredContext ruleAlteredContext = RuleAlteredJobWorker.createRuleAlteredContext(jobConfig);
GovernanceRepositoryAPI repositoryAPI = PipelineAPIFactory.getGovernanceRepositoryAPI();
if (isDataConsistencyCheckNeeded(ruleAlteredContext)) {
Optional<Boolean> checkResultOptional = repositoryAPI.getJobCheckResult(jobId);
if (!checkResultOptional.isPresent() || !checkResultOptional.get()) {
throw new PipelineDataConsistencyCheckFailedException("Data consistency check not finished or failed.");
}
}
WorkflowConfiguration workflowConfig = jobConfig.getWorkflowConfig();
ScalingTaskFinishedEvent taskFinishedEvent = new ScalingTaskFinishedEvent(workflowConfig.getSchemaName(), workflowConfig.getActiveVersion(), workflowConfig.getNewVersion());
ShardingSphereEventBus.getInstance().post(taskFinishedEvent);
for (int each : repositoryAPI.getShardingItems(jobId)) {
repositoryAPI.updateShardingJobStatus(jobId, each, JobStatus.FINISHED);
}
stop(jobId);
try {
TimeUnit.SECONDS.sleep(1);
} catch (final InterruptedException ex) {
log.error(ex.getMessage());
}
RuleAlteredJobContext jobContext = new RuleAlteredJobContext(jobConfig);
RuleAlteredJobPreparer jobPreparer = new RuleAlteredJobPreparer();
jobPreparer.cleanup(jobContext);
jobContext.close();
}
@Override
public void reset(final String jobId) {
checkModeConfig();
log.info("Scaling job {} reset target table", jobId);
try {
new ScalingEnvironmentManager().cleanupTargetTables(getJobConfig(jobId));
} catch (final SQLException ex) {
throw new PipelineJobExecutionException("Reset target table failed for job " + jobId);
}
}
@Override
public JobConfiguration getJobConfig(final String jobId) {
return getJobConfig(getElasticJobConfigPOJO(jobId));
}
private JobConfiguration getJobConfig(final JobConfigurationPOJO elasticJobConfigPOJO) {
return YamlEngine.unmarshal(elasticJobConfigPOJO.getJobParameter(), JobConfiguration.class, true);
}
} |
Otherwise, an application with two upgrade targets (due to previous failed deployments, or, later, due to an aborted change) would start tests for the other target immediately after completing them for the first, disregarding the delay. | private List<Job> computeReadyJobs(ApplicationId id) {
List<Job> jobs = new ArrayList<>();
applications().get(id).ifPresent(application -> {
List<Step> steps = application.deploymentSpec().steps().isEmpty()
? singletonList(new DeploymentSpec.DeclaredZone(test))
: application.deploymentSpec().steps();
List<Step> productionSteps = steps.stream().filter(step -> step.deploysTo(prod) || step.zones().isEmpty()).collect(toList());
Optional<Instant> completedAt = application.deploymentJobs().statusOf(stagingTest)
.flatMap(JobStatus::lastSuccess).map(JobRun::at);
String reason = "New change available";
List<Job> testJobs = null;
for (Step step : productionSteps) {
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(application.change(), application, job)));
if (jobsByCompletion.containsKey(empty())) {
for (JobType job : jobsByCompletion.get(empty())) {
State target = targetFor(application, application.change(), deploymentFor(application, job));
if (isVerified(application, target, job)) {
if (completedAt.isPresent())
jobs.add(deploymentJob(application, target, application.change(), job, reason, completedAt.get(), stepJobs));
}
else if (testJobs == null) {
if ( ! alreadyTriggered(application, target))
testJobs = testJobsFor(application, target, "Testing deployment for " + job.jobName(), completedAt.orElse(clock.instant()));
}
}
}
else {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
}
if (testJobs == null)
testJobs = testJobsFor(application, targetFor(application, application.change(), empty()), "Testing last changes outside prod", clock.instant());
jobs.addAll(testJobs);
if (steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob)
.allMatch(job -> completedAt(application.change(), application, job).isPresent()))
applications().lockIfPresent(id, lockedApplication -> applications().store(lockedApplication.withChange(Change.empty())));
});
return jobs;
} | if ( ! alreadyTriggered(application, target)) | private List<Job> computeReadyJobs(ApplicationId id) {
List<Job> jobs = new ArrayList<>();
applications().get(id).ifPresent(application -> {
List<Step> steps = application.deploymentSpec().steps().isEmpty()
? singletonList(new DeploymentSpec.DeclaredZone(test))
: application.deploymentSpec().steps();
List<Step> productionSteps = steps.stream().filter(step -> step.deploysTo(prod) || step.zones().isEmpty()).collect(toList());
Optional<Instant> completedAt = application.deploymentJobs().statusOf(stagingTest)
.flatMap(JobStatus::lastSuccess).map(JobRun::at);
String reason = "New change available";
List<Job> testJobs = null;
for (Step step : productionSteps) {
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(application.change(), application, job)));
if (jobsByCompletion.containsKey(empty())) {
for (JobType job : jobsByCompletion.get(empty())) {
State target = targetFor(application, application.change(), deploymentFor(application, job));
if (isVerified(application, target, job)) {
if (completedAt.isPresent())
jobs.add(deploymentJob(application, target, application.change(), job, reason, completedAt.get(), stepJobs));
}
else if (testJobs == null) {
if ( ! alreadyTriggered(application, target))
testJobs = testJobsFor(application, target, "Testing deployment for " + job.jobName(), completedAt.orElse(clock.instant()));
else
testJobs = emptyList();
}
}
}
else {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
}
if (testJobs == null)
testJobs = testJobsFor(application, targetFor(application, application.change(), empty()), "Testing last changes outside prod", clock.instant());
jobs.addAll(testJobs);
if (steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob)
.allMatch(job -> completedAt(application.change(), application, job).isPresent()))
applications().lockIfPresent(id, lockedApplication -> applications().store(lockedApplication.withChange(Change.empty())));
});
return jobs;
} | class DeploymentTrigger {
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentOrder order;
private final BuildService buildService;
public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.order = new DeploymentOrder(controller::system);
this.buildService = buildService;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*/
public void notifyOfCompletion(JobReport report) {
log.log(LogLevel.DEBUG, String.format("Got notified of %s for %s of %s (%d).",
report.jobError().map(JobError::toString).orElse("success"),
report.jobType(),
report.applicationId(),
report.projectId()));
if ( ! applications().get(report.applicationId()).isPresent()) {
log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() +
"': Unknown application '" + report.applicationId() + "'");
return;
}
applications().lockOrThrow(report.applicationId(), application -> {
ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber()))
.orElse(ApplicationVersion.unknown);
application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller);
application = application.withProjectId(OptionalLong.of(report.projectId()));
if (report.jobType() == component && report.success()) {
if (acceptNewApplicationVersion(application))
application = application.withChange(application.change().with(applicationVersion));
else
application = application.withOutstandingChange(Change.of(applicationVersion));
}
applications().store(application);
});
}
/**
* Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs.
*
* Only one job is triggered each run for test jobs, since their environments have limited capacity.
*/
public long triggerReadyJobs() {
return computeReadyJobs().collect(partitioningBy(job -> job.jobType().isTest()))
.entrySet().stream()
.flatMap(entry -> (entry.getKey()
? entry.getValue().stream()
.sorted(comparing(Job::isRetry)
.thenComparing(Job::applicationUpgrade)
.reversed()
.thenComparing(Job::availableSince))
.collect(groupingBy(Job::jobType))
: entry.getValue().stream()
.collect(groupingBy(Job::applicationId)))
.values().stream()
.map(jobs -> (Supplier<Long>) jobs.stream()
.filter(job -> canTrigger(job) && trigger(job))
.limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count))
.parallel().map(Supplier::get).reduce(0L, Long::sum);
}
/**
* Attempts to trigger the given job for the given application and returns the outcome.
*
* If the build service can not find the given job, or claims it is illegal to trigger it,
* the project id is removed from the application owning the job, to prevent further trigger attemps.
*/
public boolean trigger(Job job) {
log.log(LogLevel.INFO, String.format("Attempting to trigger %s: %s (%s)", job, job.reason, job.target));
try {
buildService.trigger(job);
applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(
job.jobType, new JobRun(-1, job.target.targetPlatform, job.target.targetApplication, job.reason, clock.instant()))));
return true;
}
catch (RuntimeException e) {
log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e);
if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException)
applications().lockOrThrow(job.applicationId(), application ->
applications().store(application.withProjectId(OptionalLong.empty())));
return false;
}
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already has an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && ! application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
});
}
public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() {
return computeReadyJobs().collect(groupingBy(Job::jobType));
}
/** Returns the set of all jobs which have changes to propagate from the upstream steps. */
private Stream<Job> computeReadyJobs() {
return ApplicationList.from(applications().asList())
.notPullRequest()
.withProjectId()
.deploying()
.idList().stream()
.map(this::computeReadyJobs)
.flatMap(List::stream);
}
/** Returns whether the given job is currently running; false if completed since last triggered, asking the build service othewise. */
public boolean isRunning(Application application, JobType jobType) {
return ! application.deploymentJobs().statusOf(jobType)
.flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))).orElse(false)
&& buildService.isRunning(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
}
public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType) {
Application application = applications().require(applicationId);
if (jobType == component) {
buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
return singletonList(component);
}
State target = targetFor(application, application.change(), deploymentFor(application, jobType));
String reason = ">:o:< Triggered by force! (-o-) |-o-| (=oo=)";
if (isVerified(application, target, jobType)) {
trigger(deploymentJob(application, target, application.change(), jobType, reason, clock.instant(), Collections.emptySet()));
return singletonList(jobType);
}
List<Job> testJobs = testJobsFor(application, target, reason, clock.instant());
testJobs.forEach(this::trigger);
return testJobs.stream().map(Job::jobType).collect(toList());
}
private Job deploymentJob(Application application, State target, Change change, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith) {
boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError)
.filter(JobError.outOfCapacity::equals).isPresent();
if (isRetry) reason += "; retrying on out of capacity";
return new Job(application, target, jobType, reason, availableSince, concurrentlyWith, isRetry, change.application().isPresent());
}
private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::version), change.platform())
.orElse(application.oldestDeployedPlatform()
.orElse(controller.systemVersion()));
}
private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::applicationVersion), change.application())
.orElse(application.oldestDeployedApplication()
.orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().applicationVersion()));
}
private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) {
return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2;
}
/**
* Finds the next step to trigger for the given application, if any, and returns these as a list.
*/
private List<Job> testJobsFor(Application application, State target, String reason, Instant availableSince) {
List<Step> steps = application.deploymentSpec().steps();
if (steps.isEmpty()) steps = singletonList(new DeploymentSpec.DeclaredZone(test));
List<Job> jobs = new ArrayList<>();
for (Step step : steps.stream().filter(step -> step.deploysTo(test) || step.deploysTo(staging)).collect(toList())) {
for (JobType jobType : step.zones().stream().map(order::toJob).collect(toList())) {
Optional<JobRun> completion = successOn(application, jobType, target);
if (completion.isPresent())
availableSince = completion.get().at();
else if (isVerified(application, target, jobType))
jobs.add(deploymentJob(application, target, application.change(), jobType, reason, availableSince, emptySet()));
}
}
return jobs;
}
private boolean isVerified(Application application, State state, JobType jobType) {
if (jobType.environment() == staging)
return successOn(application, systemTest, state).isPresent();
if (jobType.environment() == prod)
return successOn(application, stagingTest, state).isPresent()
|| ! JobList.from(application).production()
.lastTriggered().on(state.targetPlatform)
.lastTriggered().on(state.targetApplication)
.isEmpty();
return true;
}
/**
* Returns the instant when the given change is complete for the given application for the given job.
*
* Any job is complete if the given change is already successful on that job.
* A production job is also considered complete if its current change is strictly dominated by what
* is already deployed in its zone, i.e., no parts of the change are upgrades, and at least one
* part is a downgrade, regardless of the status of the job.
*/
private Optional<Instant> completedAt(Change change, Application application, JobType jobType) {
State target = targetFor(application, change, deploymentFor(application, jobType));
Optional<JobRun> lastSuccess = successOn(application, jobType, target);
if (lastSuccess.isPresent() || ! jobType.isProduction())
return lastSuccess.map(JobRun::at);
return deploymentFor(application, jobType)
.filter(deployment -> ! ( change.upgrades(deployment.version())
|| change.upgrades(deployment.applicationVersion()))
&& ( change.downgrades(deployment.version())
|| change.downgrades(deployment.applicationVersion())))
.map(Deployment::at);
}
private Optional<JobRun> successOn(Application application, JobType jobType, State target) {
return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess)
.filter(last -> target.targetPlatform.equals(last.version())
&& target.targetApplication.equals(last.applicationVersion()));
}
private boolean canTrigger(Job job) {
Application application = applications().require(job.applicationId());
if (isRunning(application, job.jobType))
return false;
if ( ! job.jobType.isProduction())
return true;
if ( ! job.concurrentlyWith.containsAll(runningProductionJobsFor(application)))
return false;
if ( ! application.changeAt(clock.instant()).isPresent())
return false;
return true;
}
private List<JobType> runningProductionJobsFor(Application application) {
return application.deploymentJobs().jobStatus().keySet().parallelStream()
.filter(job -> job.isProduction())
.filter(job -> isRunning(application, job))
.collect(toList());
}
private ApplicationController applications() {
return controller.applications();
}
private boolean acceptNewApplicationVersion(LockedApplication application) {
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
return ! application.changeAt(clock.instant()).platform().isPresent();
}
private Optional<Deployment> deploymentFor(Application application, JobType jobType) {
return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get()));
}
private State targetFor(Application application, Change change, Optional<Deployment> deployment) {
return new State(targetPlatform(application, change, deployment),
targetApplication(application, change, deployment),
deployment.map(Deployment::version),
deployment.map(Deployment::applicationVersion));
}
private static class Job extends BuildJob {
private final JobType jobType;
private final String reason;
private final Instant availableSince;
private final Collection<JobType> concurrentlyWith;
private final boolean isRetry;
private final boolean isApplicationUpgrade;
private final State target;
private Job(Application application, State target, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith, boolean isRetry, boolean isApplicationUpgrade) {
super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName());
this.jobType = jobType;
this.availableSince = availableSince;
this.concurrentlyWith = concurrentlyWith;
this.reason = reason;
this.isRetry = isRetry;
this.isApplicationUpgrade = isApplicationUpgrade;
this.target = target;
}
JobType jobType() { return jobType; }
Instant availableSince() { return availableSince; }
boolean isRetry() { return isRetry; }
boolean applicationUpgrade() { return isApplicationUpgrade; }
}
private static class State {
private final Version targetPlatform;
private final ApplicationVersion targetApplication;
private final Optional<Version> sourcePlatform;
private final Optional<ApplicationVersion> sourceApplication;
public State(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) {
this.targetPlatform = targetPlatform;
this.targetApplication = targetApplication;
this.sourcePlatform = sourcePlatform;
this.sourceApplication = sourceApplication;
}
@Override
public String toString() {
return String.format("platform %s %s, application %s %s",
targetPlatform,
sourcePlatform.map(v -> "(from " + v + ")").orElse(""),
targetApplication.id(),
sourceApplication.map(v -> "(from " + v.id() + ")").orElse(""));
}
}
} | class DeploymentTrigger {
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentOrder order;
private final BuildService buildService;
public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.order = new DeploymentOrder(controller::system);
this.buildService = buildService;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*/
public void notifyOfCompletion(JobReport report) {
log.log(LogLevel.DEBUG, String.format("Got notified of %s for %s of %s (%d).",
report.jobError().map(JobError::toString).orElse("success"),
report.jobType(),
report.applicationId(),
report.projectId()));
if ( ! applications().get(report.applicationId()).isPresent()) {
log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() +
"': Unknown application '" + report.applicationId() + "'");
return;
}
applications().lockOrThrow(report.applicationId(), application -> {
ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber()))
.orElse(ApplicationVersion.unknown);
application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller);
application = application.withProjectId(OptionalLong.of(report.projectId()));
if (report.jobType() == component && report.success()) {
if (acceptNewApplicationVersion(application))
application = application.withChange(application.change().with(applicationVersion));
else
application = application.withOutstandingChange(Change.of(applicationVersion));
}
applications().store(application);
});
}
/**
* Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs.
*
* Only one job is triggered each run for test jobs, since their environments have limited capacity.
*/
public long triggerReadyJobs() {
return computeReadyJobs().collect(partitioningBy(job -> job.jobType().isTest()))
.entrySet().stream()
.flatMap(entry -> (entry.getKey()
? entry.getValue().stream()
.sorted(comparing(Job::isRetry)
.thenComparing(Job::applicationUpgrade)
.reversed()
.thenComparing(Job::availableSince))
.collect(groupingBy(Job::jobType))
: entry.getValue().stream()
.collect(groupingBy(Job::applicationId)))
.values().stream()
.map(jobs -> (Supplier<Long>) jobs.stream()
.filter(job -> canTrigger(job) && trigger(job))
.limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count))
.parallel().map(Supplier::get).reduce(0L, Long::sum);
}
/**
* Attempts to trigger the given job for the given application and returns the outcome.
*
* If the build service can not find the given job, or claims it is illegal to trigger it,
* the project id is removed from the application owning the job, to prevent further trigger attemps.
*/
public boolean trigger(Job job) {
log.log(LogLevel.INFO, String.format("Attempting to trigger %s: %s (%s)", job, job.reason, job.target));
try {
buildService.trigger(job);
applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(
job.jobType, new JobRun(-1, job.target.targetPlatform, job.target.targetApplication, job.reason, clock.instant()))));
return true;
}
catch (RuntimeException e) {
log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e);
if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException)
applications().lockOrThrow(job.applicationId(), application ->
applications().store(application.withProjectId(OptionalLong.empty())));
return false;
}
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already has an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && ! application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
});
}
public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() {
return computeReadyJobs().collect(groupingBy(Job::jobType));
}
/** Returns the set of all jobs which have changes to propagate from the upstream steps. */
private Stream<Job> computeReadyJobs() {
return ApplicationList.from(applications().asList())
.notPullRequest()
.withProjectId()
.deploying()
.idList().stream()
.map(this::computeReadyJobs)
.flatMap(List::stream);
}
/** Returns whether the given job is currently running; false if completed since last triggered, asking the build service othewise. */
public boolean isRunning(Application application, JobType jobType) {
return ! application.deploymentJobs().statusOf(jobType)
.flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))).orElse(false)
&& buildService.isRunning(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
}
public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType) {
Application application = applications().require(applicationId);
if (jobType == component) {
buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
return singletonList(component);
}
State target = targetFor(application, application.change(), deploymentFor(application, jobType));
String reason = ">:o:< Triggered by force! (-o-) |-o-| (=oo=)";
if (isVerified(application, target, jobType)) {
trigger(deploymentJob(application, target, application.change(), jobType, reason, clock.instant(), Collections.emptySet()));
return singletonList(jobType);
}
List<Job> testJobs = testJobsFor(application, target, reason, clock.instant());
testJobs.forEach(this::trigger);
return testJobs.stream().map(Job::jobType).collect(toList());
}
private Job deploymentJob(Application application, State target, Change change, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith) {
boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError)
.filter(JobError.outOfCapacity::equals).isPresent();
if (isRetry) reason += "; retrying on out of capacity";
return new Job(application, target, jobType, reason, availableSince, concurrentlyWith, isRetry, change.application().isPresent());
}
private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::version), change.platform())
.orElse(application.oldestDeployedPlatform()
.orElse(controller.systemVersion()));
}
private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::applicationVersion), change.application())
.orElse(application.oldestDeployedApplication()
.orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().applicationVersion()));
}
private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) {
return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2;
}
/**
* Finds the next step to trigger for the given application, if any, and returns these as a list.
*/
private List<Job> testJobsFor(Application application, State target, String reason, Instant availableSince) {
List<Step> steps = application.deploymentSpec().steps();
if (steps.isEmpty()) steps = singletonList(new DeploymentSpec.DeclaredZone(test));
List<Job> jobs = new ArrayList<>();
for (Step step : steps.stream().filter(step -> step.deploysTo(test) || step.deploysTo(staging)).collect(toList())) {
for (JobType jobType : step.zones().stream().map(order::toJob).collect(toList())) {
Optional<JobRun> completion = successOn(application, jobType, target);
if (completion.isPresent())
availableSince = completion.get().at();
else if (isVerified(application, target, jobType))
jobs.add(deploymentJob(application, target, application.change(), jobType, reason, availableSince, emptySet()));
}
}
return jobs;
}
private boolean isVerified(Application application, State state, JobType jobType) {
if (jobType.environment() == staging)
return successOn(application, systemTest, state).isPresent();
if (jobType.environment() == prod)
return successOn(application, stagingTest, state).isPresent()
|| ! JobList.from(application).production()
.lastTriggered().on(state.targetPlatform)
.lastTriggered().on(state.targetApplication)
.isEmpty();
return true;
}
private Optional<Instant> testedAt(Application application, State target) {
return max(successOn(application, systemTest, target).map(JobRun::at),
successOn(application, stagingTest, target).map(JobRun::at));
}
private boolean alreadyTriggered(Application application, State target) {
return ! JobList.from(application).production()
.lastTriggered().on(target.targetPlatform)
.lastTriggered().on(target.targetApplication)
.isEmpty();
}
/**
* Returns the instant when the given change is complete for the given application for the given job.
*
* Any job is complete if the given change is already successful on that job.
* A production job is also considered complete if its current change is strictly dominated by what
* is already deployed in its zone, i.e., no parts of the change are upgrades, and at least one
* part is a downgrade, regardless of the status of the job.
*/
private Optional<Instant> completedAt(Change change, Application application, JobType jobType) {
State target = targetFor(application, change, deploymentFor(application, jobType));
Optional<JobRun> lastSuccess = successOn(application, jobType, target);
if (lastSuccess.isPresent() || ! jobType.isProduction())
return lastSuccess.map(JobRun::at);
return deploymentFor(application, jobType)
.filter(deployment -> ! ( change.upgrades(deployment.version())
|| change.upgrades(deployment.applicationVersion()))
&& ( change.downgrades(deployment.version())
|| change.downgrades(deployment.applicationVersion())))
.map(Deployment::at);
}
private Optional<JobRun> successOn(Application application, JobType jobType, State target) {
return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess)
.filter(last -> target.targetPlatform.equals(last.version())
&& target.targetApplication.equals(last.applicationVersion()));
}
private boolean canTrigger(Job job) {
Application application = applications().require(job.applicationId());
if (isRunning(application, job.jobType))
return false;
if ( ! job.jobType.isProduction())
return true;
if ( ! job.concurrentlyWith.containsAll(runningProductionJobsFor(application)))
return false;
if ( ! application.changeAt(clock.instant()).isPresent())
return false;
return true;
}
private List<JobType> runningProductionJobsFor(Application application) {
return application.deploymentJobs().jobStatus().keySet().parallelStream()
.filter(job -> job.isProduction())
.filter(job -> isRunning(application, job))
.collect(toList());
}
private ApplicationController applications() {
return controller.applications();
}
private boolean acceptNewApplicationVersion(LockedApplication application) {
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
return ! application.changeAt(clock.instant()).platform().isPresent();
}
private Optional<Deployment> deploymentFor(Application application, JobType jobType) {
return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get()));
}
private State targetFor(Application application, Change change, Optional<Deployment> deployment) {
return new State(targetPlatform(application, change, deployment),
targetApplication(application, change, deployment),
deployment.map(Deployment::version),
deployment.map(Deployment::applicationVersion));
}
private static class Job extends BuildJob {
private final JobType jobType;
private final String reason;
private final Instant availableSince;
private final Collection<JobType> concurrentlyWith;
private final boolean isRetry;
private final boolean isApplicationUpgrade;
private final State target;
private Job(Application application, State target, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith, boolean isRetry, boolean isApplicationUpgrade) {
super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName());
this.jobType = jobType;
this.availableSince = availableSince;
this.concurrentlyWith = concurrentlyWith;
this.reason = reason;
this.isRetry = isRetry;
this.isApplicationUpgrade = isApplicationUpgrade;
this.target = target;
}
JobType jobType() { return jobType; }
Instant availableSince() { return availableSince; }
boolean isRetry() { return isRetry; }
boolean applicationUpgrade() { return isApplicationUpgrade; }
}
private static class State {
private final Version targetPlatform;
private final ApplicationVersion targetApplication;
private final Optional<Version> sourcePlatform;
private final Optional<ApplicationVersion> sourceApplication;
public State(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) {
this.targetPlatform = targetPlatform;
this.targetApplication = targetApplication;
this.sourcePlatform = sourcePlatform;
this.sourceApplication = sourceApplication;
}
@Override
public String toString() {
return String.format("platform %s %s, application %s %s",
targetPlatform,
sourcePlatform.map(v -> "(from " + v + ")").orElse(""),
targetApplication.id(),
sourceApplication.map(v -> "(from " + v.id() + ")").orElse(""));
}
}
} |
Couldn't we remove the "primitive" attribute from the Read transform instead of introducing the additional logic? Runners could still decide whether they want to consider READ primitive. | static Collection<String> getPrimitiveTransformIds(RunnerApi.Components components) {
Collection<String> ids = new LinkedHashSet<>();
for (Map.Entry<String, PTransform> transformEntry : components.getTransformsMap().entrySet()) {
PTransform transform = transformEntry.getValue();
boolean isPrimitive = isPrimitiveTransform(transform);
if (isPrimitive) {
Deque<String> transforms = new ArrayDeque<>();
transforms.push(transformEntry.getKey());
while (!transforms.isEmpty()) {
String id = transforms.pop();
PTransform next = components.getTransformsMap().get(id);
List<String> subtransforms = next.getSubtransformsList();
if (subtransforms.isEmpty()) {
ids.add(id);
} else {
transforms.addAll(subtransforms);
}
}
}
}
return ids;
} | if (isPrimitive) { | static Collection<String> getPrimitiveTransformIds(RunnerApi.Components components) {
Collection<String> ids = new LinkedHashSet<>();
for (Map.Entry<String, PTransform> transformEntry : components.getTransformsMap().entrySet()) {
PTransform transform = transformEntry.getValue();
boolean isPrimitive = isPrimitiveTransform(transform);
if (isPrimitive) {
Deque<String> transforms = new ArrayDeque<>();
transforms.push(transformEntry.getKey());
while (!transforms.isEmpty()) {
String id = transforms.pop();
PTransform next = components.getTransformsMap().get(id);
List<String> subtransforms = next.getSubtransformsList();
if (subtransforms.isEmpty()) {
ids.add(id);
} else {
transforms.addAll(subtransforms);
}
}
}
}
return ids;
} | class QueryablePipeline {
/**
* Create a new {@link QueryablePipeline} based on the provided components.
*
* <p>The returned {@link QueryablePipeline} will contain only the primitive transforms present
* within the provided components.
*/
public static QueryablePipeline forPrimitivesIn(Components components) {
return new QueryablePipeline(getPrimitiveTransformIds(components), components);
}
/**
* Create a new {@link QueryablePipeline} which uses the root transform IDs and components of the
* provided {@link Pipeline}.
*/
public static QueryablePipeline forPipeline(RunnerApi.Pipeline p) {
return forTransforms(p.getRootTransformIdsList(), p.getComponents());
}
/**
* Create a new {@link QueryablePipeline} based on the provided components containing only the
* provided {@code transformIds}.
*/
public static QueryablePipeline forTransforms(
Collection<String> transformIds, Components components) {
return new QueryablePipeline(transformIds, components);
}
private final Components components;
/**
* The {@link Pipeline} represented by a {@link Network}.
*
* <p>This is a directed bipartite graph consisting of {@link PTransformNode PTransformNodes} and
* {@link PCollectionNode PCollectionNodes}. Each {@link PCollectionNode} has exactly one in edge,
* and an arbitrary number of out edges. Each {@link PTransformNode} has an arbitrary number of in
* and out edges.
*
* <p>Parallel edges are permitted, as a {@link PCollectionNode} can be consumed by a single
* {@link PTransformNode} any number of times with different local names.
*/
private final Network<PipelineNode, PipelineEdge> pipelineNetwork;
private QueryablePipeline(Collection<String> transformIds, Components components) {
this.components = components;
this.pipelineNetwork = buildNetwork(transformIds, this.components);
}
/** Produces a {@link RunnerApi.Components} which contains only primitive transforms. */
@VisibleForTesting
private static final Set<String> PRIMITIVE_URNS =
ImmutableSet.of(
PAR_DO_TRANSFORM_URN,
FLATTEN_TRANSFORM_URN,
GROUP_BY_KEY_TRANSFORM_URN,
IMPULSE_TRANSFORM_URN,
ASSIGN_WINDOWS_TRANSFORM_URN,
TEST_STREAM_TRANSFORM_URN,
MAP_WINDOWS_TRANSFORM_URN,
READ_TRANSFORM_URN,
CREATE_VIEW_TRANSFORM_URN,
SPLITTABLE_PROCESS_KEYED_URN,
SPLITTABLE_PROCESS_ELEMENTS_URN);
/** Returns true if the provided transform is a primitive. */
private static boolean isPrimitiveTransform(PTransform transform) {
String urn = PTransformTranslation.urnForTransformOrNull(transform);
return PRIMITIVE_URNS.contains(urn) || NativeTransforms.isNative(transform);
}
private MutableNetwork<PipelineNode, PipelineEdge> buildNetwork(
Collection<String> transformIds, Components components) {
MutableNetwork<PipelineNode, PipelineEdge> network =
NetworkBuilder.directed().allowsParallelEdges(true).allowsSelfLoops(false).build();
Set<PCollectionNode> unproducedCollections = new HashSet<>();
for (String transformId : transformIds) {
PTransform transform = components.getTransformsOrThrow(transformId);
PTransformNode transformNode =
PipelineNode.pTransform(transformId, this.components.getTransformsOrThrow(transformId));
network.addNode(transformNode);
for (String produced : transform.getOutputsMap().values()) {
PCollectionNode producedNode =
PipelineNode.pCollection(produced, components.getPcollectionsOrThrow(produced));
network.addNode(producedNode);
network.addEdge(transformNode, producedNode, new PerElementEdge());
checkArgument(
network.inDegree(producedNode) == 1,
"A %s should have exactly one producing %s, but found %s:\nPCollection:\n%s\nProducers:\n%s",
PCollectionNode.class.getSimpleName(),
PTransformNode.class.getSimpleName(),
network.predecessors(producedNode).size(),
producedNode,
network.predecessors(producedNode));
unproducedCollections.remove(producedNode);
}
for (Map.Entry<String, String> consumed : transform.getInputsMap().entrySet()) {
String pcollectionId = consumed.getValue();
PCollectionNode consumedNode =
PipelineNode.pCollection(
pcollectionId, this.components.getPcollectionsOrThrow(pcollectionId));
if (network.addNode(consumedNode)) {
unproducedCollections.add(consumedNode);
}
if (getLocalSideInputNames(transform).contains(consumed.getKey())) {
network.addEdge(consumedNode, transformNode, new SingletonEdge());
} else {
network.addEdge(consumedNode, transformNode, new PerElementEdge());
}
}
}
checkArgument(
unproducedCollections.isEmpty(),
"%ss %s were consumed but never produced",
PCollectionNode.class.getSimpleName(),
unproducedCollections);
return network;
}
public Collection<PTransformNode> getTransforms() {
return pipelineNetwork
.nodes()
.stream()
.filter(PTransformNode.class::isInstance)
.map(PTransformNode.class::cast)
.collect(Collectors.toList());
}
public Iterable<PTransformNode> getTopologicallyOrderedTransforms() {
return StreamSupport.stream(
Networks.topologicalOrder(pipelineNetwork, Comparator.comparing(PipelineNode::getId))
.spliterator(),
false)
.filter(PTransformNode.class::isInstance)
.map(PTransformNode.class::cast)
.collect(Collectors.toList());
}
/**
* Get the transforms that are roots of this {@link QueryablePipeline}. These are all nodes which
* have no input {@link PCollection}.
*/
public Set<PTransformNode> getRootTransforms() {
return pipelineNetwork
.nodes()
.stream()
.filter(pipelineNode -> pipelineNetwork.inEdges(pipelineNode).isEmpty())
.map(pipelineNode -> (PTransformNode) pipelineNode)
.collect(Collectors.toSet());
}
public PTransformNode getProducer(PCollectionNode pcollection) {
return (PTransformNode) Iterables.getOnlyElement(pipelineNetwork.predecessors(pcollection));
}
/**
* Get all of the {@link PTransformNode PTransforms} which consume the provided {@link
* PCollectionNode} on a per-element basis.
*
* <p>If a {@link PTransformNode} consumes a {@link PCollectionNode} on a per-element basis one or
* more times, it will appear a single time in the result.
*
* <p>In theory, a transform may consume a single {@link PCollectionNode} in both a per-element
* and singleton manner. If this is the case, the transform node is included in the result, as it
* does consume the {@link PCollectionNode} on a per-element basis.
*/
public Set<PTransformNode> getPerElementConsumers(PCollectionNode pCollection) {
return pipelineNetwork
.successors(pCollection)
.stream()
.filter(
consumer ->
pipelineNetwork
.edgesConnecting(pCollection, consumer)
.stream()
.anyMatch(PipelineEdge::isPerElement))
.map(pipelineNode -> (PTransformNode) pipelineNode)
.collect(Collectors.toSet());
}
/**
* Same as {@link
* the collection as a singleton.
*/
public Set<PTransformNode> getSingletonConsumers(PCollectionNode pCollection) {
return pipelineNetwork
.successors(pCollection)
.stream()
.filter(
consumer ->
pipelineNetwork
.edgesConnecting(pCollection, consumer)
.stream()
.anyMatch(edge -> !edge.isPerElement()))
.map(pipelineNode -> (PTransformNode) pipelineNode)
.collect(Collectors.toSet());
}
/**
* Gets each {@link PCollectionNode} that the provided {@link PTransformNode} consumes on a
* per-element basis.
*/
public Set<PCollectionNode> getPerElementInputPCollections(PTransformNode ptransform) {
return pipelineNetwork
.inEdges(ptransform)
.stream()
.filter(PipelineEdge::isPerElement)
.map(edge -> (PCollectionNode) pipelineNetwork.incidentNodes(edge).source())
.collect(Collectors.toSet());
}
public Set<PCollectionNode> getOutputPCollections(PTransformNode ptransform) {
return pipelineNetwork
.successors(ptransform)
.stream()
.map(pipelineNode -> (PCollectionNode) pipelineNode)
.collect(Collectors.toSet());
}
public Components getComponents() {
return components;
}
/**
* Returns the {@link SideInputReference SideInputReferences} that the provided transform consumes
* as side inputs.
*/
public Collection<SideInputReference> getSideInputs(PTransformNode transform) {
return getLocalSideInputNames(transform.getTransform())
.stream()
.map(
localName -> {
String transformId = transform.getId();
PTransform transformProto = components.getTransformsOrThrow(transformId);
String collectionId = transform.getTransform().getInputsOrThrow(localName);
PCollection collection = components.getPcollectionsOrThrow(collectionId);
return SideInputReference.of(
PipelineNode.pTransform(transformId, transformProto),
localName,
PipelineNode.pCollection(collectionId, collection));
})
.collect(Collectors.toSet());
}
public Collection<UserStateReference> getUserStates(PTransformNode transform) {
return getLocalUserStateNames(transform.getTransform())
.stream()
.map(
localName -> {
String transformId = transform.getId();
PTransform transformProto = components.getTransformsOrThrow(transformId);
String collectionId =
transform
.getTransform()
.getInputsOrThrow(
Iterables.getOnlyElement(
Sets.difference(
transform.getTransform().getInputsMap().keySet(),
getLocalSideInputNames(transformProto))));
PCollection collection = components.getPcollectionsOrThrow(collectionId);
return UserStateReference.of(
PipelineNode.pTransform(transformId, transformProto),
localName,
PipelineNode.pCollection(collectionId, collection));
})
.collect(Collectors.toSet());
}
public Collection<TimerReference> getTimers(PTransformNode transform) {
return getLocalTimerNames(transform.getTransform())
.stream()
.map(
localName -> {
String transformId = transform.getId();
PTransform transformProto = components.getTransformsOrThrow(transformId);
String collectionId = transform.getTransform().getInputsOrThrow(localName);
PCollection collection = components.getPcollectionsOrThrow(collectionId);
return TimerReference.of(
PipelineNode.pTransform(transformId, transformProto),
localName,
PipelineNode.pCollection(collectionId, collection));
})
.collect(Collectors.toSet());
}
private Set<String> getLocalSideInputNames(PTransform transform) {
if (PAR_DO_TRANSFORM_URN.equals(transform.getSpec().getUrn())) {
try {
return ParDoPayload.parseFrom(transform.getSpec().getPayload()).getSideInputsMap().keySet();
} catch (InvalidProtocolBufferException e) {
throw new RuntimeException(e);
}
} else {
return Collections.emptySet();
}
}
private Set<String> getLocalUserStateNames(PTransform transform) {
if (PAR_DO_TRANSFORM_URN.equals(transform.getSpec().getUrn())) {
try {
return ParDoPayload.parseFrom(transform.getSpec().getPayload()).getStateSpecsMap().keySet();
} catch (InvalidProtocolBufferException e) {
throw new RuntimeException(e);
}
} else {
return Collections.emptySet();
}
}
private Set<String> getLocalTimerNames(PTransform transform) {
if (PAR_DO_TRANSFORM_URN.equals(transform.getSpec().getUrn())) {
try {
return ParDoPayload.parseFrom(transform.getSpec().getPayload()).getTimerSpecsMap().keySet();
} catch (InvalidProtocolBufferException e) {
throw new RuntimeException(e);
}
} else {
return Collections.emptySet();
}
}
public Optional<Environment> getEnvironment(PTransformNode parDo) {
return Environments.getEnvironment(parDo.getId(), components);
}
private interface PipelineEdge {
boolean isPerElement();
}
private static class PerElementEdge implements PipelineEdge {
@Override
public boolean isPerElement() {
return true;
}
}
private static class SingletonEdge implements PipelineEdge {
@Override
public boolean isPerElement() {
return false;
}
}
} | class QueryablePipeline {
/**
* Create a new {@link QueryablePipeline} based on the provided components.
*
* <p>The returned {@link QueryablePipeline} will contain only the primitive transforms present
* within the provided components.
*/
public static QueryablePipeline forPrimitivesIn(Components components) {
return new QueryablePipeline(getPrimitiveTransformIds(components), components);
}
/**
* Create a new {@link QueryablePipeline} which uses the root transform IDs and components of the
* provided {@link Pipeline}.
*/
public static QueryablePipeline forPipeline(RunnerApi.Pipeline p) {
return forTransforms(p.getRootTransformIdsList(), p.getComponents());
}
/**
* Create a new {@link QueryablePipeline} based on the provided components containing only the
* provided {@code transformIds}.
*/
public static QueryablePipeline forTransforms(
Collection<String> transformIds, Components components) {
return new QueryablePipeline(transformIds, components);
}
private final Components components;
/**
* The {@link Pipeline} represented by a {@link Network}.
*
* <p>This is a directed bipartite graph consisting of {@link PTransformNode PTransformNodes} and
* {@link PCollectionNode PCollectionNodes}. Each {@link PCollectionNode} has exactly one in edge,
* and an arbitrary number of out edges. Each {@link PTransformNode} has an arbitrary number of in
* and out edges.
*
* <p>Parallel edges are permitted, as a {@link PCollectionNode} can be consumed by a single
* {@link PTransformNode} any number of times with different local names.
*/
private final Network<PipelineNode, PipelineEdge> pipelineNetwork;
private QueryablePipeline(Collection<String> transformIds, Components components) {
this.components = components;
this.pipelineNetwork = buildNetwork(transformIds, this.components);
}
/** Produces a {@link RunnerApi.Components} which contains only primitive transforms. */
@VisibleForTesting
private static final Set<String> PRIMITIVE_URNS =
ImmutableSet.of(
PAR_DO_TRANSFORM_URN,
FLATTEN_TRANSFORM_URN,
GROUP_BY_KEY_TRANSFORM_URN,
IMPULSE_TRANSFORM_URN,
ASSIGN_WINDOWS_TRANSFORM_URN,
TEST_STREAM_TRANSFORM_URN,
MAP_WINDOWS_TRANSFORM_URN,
READ_TRANSFORM_URN,
CREATE_VIEW_TRANSFORM_URN,
SPLITTABLE_PROCESS_KEYED_URN,
SPLITTABLE_PROCESS_ELEMENTS_URN);
/** Returns true if the provided transform is a primitive. */
private static boolean isPrimitiveTransform(PTransform transform) {
String urn = PTransformTranslation.urnForTransformOrNull(transform);
return PRIMITIVE_URNS.contains(urn) || NativeTransforms.isNative(transform);
}
private MutableNetwork<PipelineNode, PipelineEdge> buildNetwork(
Collection<String> transformIds, Components components) {
MutableNetwork<PipelineNode, PipelineEdge> network =
NetworkBuilder.directed().allowsParallelEdges(true).allowsSelfLoops(false).build();
Set<PCollectionNode> unproducedCollections = new HashSet<>();
for (String transformId : transformIds) {
PTransform transform = components.getTransformsOrThrow(transformId);
PTransformNode transformNode =
PipelineNode.pTransform(transformId, this.components.getTransformsOrThrow(transformId));
network.addNode(transformNode);
for (String produced : transform.getOutputsMap().values()) {
PCollectionNode producedNode =
PipelineNode.pCollection(produced, components.getPcollectionsOrThrow(produced));
network.addNode(producedNode);
network.addEdge(transformNode, producedNode, new PerElementEdge());
checkArgument(
network.inDegree(producedNode) == 1,
"A %s should have exactly one producing %s, but found %s:\nPCollection:\n%s\nProducers:\n%s",
PCollectionNode.class.getSimpleName(),
PTransformNode.class.getSimpleName(),
network.predecessors(producedNode).size(),
producedNode,
network.predecessors(producedNode));
unproducedCollections.remove(producedNode);
}
for (Map.Entry<String, String> consumed : transform.getInputsMap().entrySet()) {
String pcollectionId = consumed.getValue();
PCollectionNode consumedNode =
PipelineNode.pCollection(
pcollectionId, this.components.getPcollectionsOrThrow(pcollectionId));
if (network.addNode(consumedNode)) {
unproducedCollections.add(consumedNode);
}
if (getLocalSideInputNames(transform).contains(consumed.getKey())) {
network.addEdge(consumedNode, transformNode, new SingletonEdge());
} else {
network.addEdge(consumedNode, transformNode, new PerElementEdge());
}
}
}
checkArgument(
unproducedCollections.isEmpty(),
"%ss %s were consumed but never produced",
PCollectionNode.class.getSimpleName(),
unproducedCollections);
return network;
}
public Collection<PTransformNode> getTransforms() {
return pipelineNetwork
.nodes()
.stream()
.filter(PTransformNode.class::isInstance)
.map(PTransformNode.class::cast)
.collect(Collectors.toList());
}
public Iterable<PTransformNode> getTopologicallyOrderedTransforms() {
return StreamSupport.stream(
Networks.topologicalOrder(pipelineNetwork, Comparator.comparing(PipelineNode::getId))
.spliterator(),
false)
.filter(PTransformNode.class::isInstance)
.map(PTransformNode.class::cast)
.collect(Collectors.toList());
}
/**
* Get the transforms that are roots of this {@link QueryablePipeline}. These are all nodes which
* have no input {@link PCollection}.
*/
public Set<PTransformNode> getRootTransforms() {
return pipelineNetwork
.nodes()
.stream()
.filter(pipelineNode -> pipelineNetwork.inEdges(pipelineNode).isEmpty())
.map(pipelineNode -> (PTransformNode) pipelineNode)
.collect(Collectors.toSet());
}
public PTransformNode getProducer(PCollectionNode pcollection) {
return (PTransformNode) Iterables.getOnlyElement(pipelineNetwork.predecessors(pcollection));
}
/**
* Get all of the {@link PTransformNode PTransforms} which consume the provided {@link
* PCollectionNode} on a per-element basis.
*
* <p>If a {@link PTransformNode} consumes a {@link PCollectionNode} on a per-element basis one or
* more times, it will appear a single time in the result.
*
* <p>In theory, a transform may consume a single {@link PCollectionNode} in both a per-element
* and singleton manner. If this is the case, the transform node is included in the result, as it
* does consume the {@link PCollectionNode} on a per-element basis.
*/
public Set<PTransformNode> getPerElementConsumers(PCollectionNode pCollection) {
return pipelineNetwork
.successors(pCollection)
.stream()
.filter(
consumer ->
pipelineNetwork
.edgesConnecting(pCollection, consumer)
.stream()
.anyMatch(PipelineEdge::isPerElement))
.map(pipelineNode -> (PTransformNode) pipelineNode)
.collect(Collectors.toSet());
}
/**
* Same as {@link
* the collection as a singleton.
*/
public Set<PTransformNode> getSingletonConsumers(PCollectionNode pCollection) {
return pipelineNetwork
.successors(pCollection)
.stream()
.filter(
consumer ->
pipelineNetwork
.edgesConnecting(pCollection, consumer)
.stream()
.anyMatch(edge -> !edge.isPerElement()))
.map(pipelineNode -> (PTransformNode) pipelineNode)
.collect(Collectors.toSet());
}
/**
* Gets each {@link PCollectionNode} that the provided {@link PTransformNode} consumes on a
* per-element basis.
*/
public Set<PCollectionNode> getPerElementInputPCollections(PTransformNode ptransform) {
return pipelineNetwork
.inEdges(ptransform)
.stream()
.filter(PipelineEdge::isPerElement)
.map(edge -> (PCollectionNode) pipelineNetwork.incidentNodes(edge).source())
.collect(Collectors.toSet());
}
public Set<PCollectionNode> getOutputPCollections(PTransformNode ptransform) {
return pipelineNetwork
.successors(ptransform)
.stream()
.map(pipelineNode -> (PCollectionNode) pipelineNode)
.collect(Collectors.toSet());
}
public Components getComponents() {
return components;
}
/**
* Returns the {@link SideInputReference SideInputReferences} that the provided transform consumes
* as side inputs.
*/
public Collection<SideInputReference> getSideInputs(PTransformNode transform) {
return getLocalSideInputNames(transform.getTransform())
.stream()
.map(
localName -> {
String transformId = transform.getId();
PTransform transformProto = components.getTransformsOrThrow(transformId);
String collectionId = transform.getTransform().getInputsOrThrow(localName);
PCollection collection = components.getPcollectionsOrThrow(collectionId);
return SideInputReference.of(
PipelineNode.pTransform(transformId, transformProto),
localName,
PipelineNode.pCollection(collectionId, collection));
})
.collect(Collectors.toSet());
}
public Collection<UserStateReference> getUserStates(PTransformNode transform) {
return getLocalUserStateNames(transform.getTransform())
.stream()
.map(
localName -> {
String transformId = transform.getId();
PTransform transformProto = components.getTransformsOrThrow(transformId);
String collectionId =
transform
.getTransform()
.getInputsOrThrow(
Iterables.getOnlyElement(
Sets.difference(
transform.getTransform().getInputsMap().keySet(),
getLocalSideInputNames(transformProto))));
PCollection collection = components.getPcollectionsOrThrow(collectionId);
return UserStateReference.of(
PipelineNode.pTransform(transformId, transformProto),
localName,
PipelineNode.pCollection(collectionId, collection));
})
.collect(Collectors.toSet());
}
public Collection<TimerReference> getTimers(PTransformNode transform) {
return getLocalTimerNames(transform.getTransform())
.stream()
.map(
localName -> {
String transformId = transform.getId();
PTransform transformProto = components.getTransformsOrThrow(transformId);
String collectionId = transform.getTransform().getInputsOrThrow(localName);
PCollection collection = components.getPcollectionsOrThrow(collectionId);
return TimerReference.of(
PipelineNode.pTransform(transformId, transformProto),
localName,
PipelineNode.pCollection(collectionId, collection));
})
.collect(Collectors.toSet());
}
private Set<String> getLocalSideInputNames(PTransform transform) {
if (PAR_DO_TRANSFORM_URN.equals(transform.getSpec().getUrn())) {
try {
return ParDoPayload.parseFrom(transform.getSpec().getPayload()).getSideInputsMap().keySet();
} catch (InvalidProtocolBufferException e) {
throw new RuntimeException(e);
}
} else {
return Collections.emptySet();
}
}
private Set<String> getLocalUserStateNames(PTransform transform) {
if (PAR_DO_TRANSFORM_URN.equals(transform.getSpec().getUrn())) {
try {
return ParDoPayload.parseFrom(transform.getSpec().getPayload()).getStateSpecsMap().keySet();
} catch (InvalidProtocolBufferException e) {
throw new RuntimeException(e);
}
} else {
return Collections.emptySet();
}
}
private Set<String> getLocalTimerNames(PTransform transform) {
if (PAR_DO_TRANSFORM_URN.equals(transform.getSpec().getUrn())) {
try {
return ParDoPayload.parseFrom(transform.getSpec().getPayload()).getTimerSpecsMap().keySet();
} catch (InvalidProtocolBufferException e) {
throw new RuntimeException(e);
}
} else {
return Collections.emptySet();
}
}
public Optional<Environment> getEnvironment(PTransformNode parDo) {
return Environments.getEnvironment(parDo.getId(), components);
}
private interface PipelineEdge {
boolean isPerElement();
}
private static class PerElementEdge implements PipelineEdge {
@Override
public boolean isPerElement() {
return true;
}
}
private static class SingletonEdge implements PipelineEdge {
@Override
public boolean isPerElement() {
return false;
}
}
} |
We don't need a variable 'contentType' here. Check all getPayload methods | public void testGetBinaryPayloadMethod() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
BStruct entity = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, entityStruct);
BStruct mediaType = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, mediaTypeStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
String payload = "ballerina";
String contentType = OCTET_STREAM;
MimeUtil.setContentType(mediaType, entity, contentType);
entity.setBlobField(BYTE_DATA_INDEX, payload.getBytes());
entity.setBooleanField(IS_IN_MEMORY_INDEX, 1);
request.addNativeData(MESSAGE_ENTITY, entity);
request.addNativeData(IS_ENTITY_BODY_PRESENT, true);
HttpUtil.addCarbonMsg(request, cMsg);
BValue[] inputArg = { request };
BValue[] returnVals = BRunUtil.invoke(result, "testGetBinaryPayload", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertEquals(returnVals[0].stringValue(), payload);
} | String contentType = OCTET_STREAM; | public void testGetBinaryPayloadMethod() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
BStruct entity = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, entityStruct);
BStruct mediaType = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, mediaTypeStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
String payload = "ballerina";
MimeUtil.setContentType(mediaType, entity, OCTET_STREAM);
entity.setBlobField(BYTE_DATA_INDEX, payload.getBytes());
entity.setBooleanField(IS_IN_MEMORY_INDEX, 1);
request.addNativeData(MESSAGE_ENTITY, entity);
request.addNativeData(IS_ENTITY_BODY_PRESENT, true);
HttpUtil.addCarbonMsg(request, cMsg);
BValue[] inputArg = { request };
BValue[] returnVals = BRunUtil.invoke(result, "testGetBinaryPayload", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertEquals(returnVals[0].stringValue(), payload);
} | class RequestNativeFunctionSuccessTest {
private static final Logger LOG = LoggerFactory.getLogger(RequestNativeFunctionSuccessTest.class);
private CompileResult result, serviceResult;
private final String requestStruct = Constants.REQUEST;
private final String headerStruct = HEADER_VALUE_STRUCT;
private final String protocolPackageHttp = Constants.PROTOCOL_PACKAGE_HTTP;
private final String protocolPackageMime = PROTOCOL_PACKAGE_MIME;
private final String protocolPackageFile = PROTOCOL_PACKAGE_FILE;
private final String entityStruct = Constants.ENTITY;
private final String mediaTypeStruct = MEDIA_TYPE;
private String sourceFilePath = "test-src/statements/services/nativeimpl/request/request-native-function.bal";
@BeforeClass
public void setup() {
result = BCompileUtil.compile(sourceFilePath);
serviceResult = BServiceUtil.setupProgramFile(this, sourceFilePath);
}
@Test
public void testAddHeader() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
HttpUtil.addCarbonMsg(request, cMsg);
String headerName = "header1";
String headerValue = "headerValue";
BString key = new BString(headerName);
BString value = new BString(headerValue);
BValue[] inputArg = { request, key, value };
BValue[] returnVals = BRunUtil.invoke(result, "testAddHeader", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertTrue(returnVals[0] instanceof BStruct);
BStruct entityStruct = (BStruct) ((BStruct) returnVals[0]).getNativeData(MESSAGE_ENTITY);
BMap map = (BMap) entityStruct.getRefField(ENTITY_HEADERS_INDEX);
BRefValueArray array = (BRefValueArray) map.get(headerName);
Assert.assertEquals(((BStruct) array.get(0)).getStringField(0), headerValue);
}
@Test(description = "Test addHeader function within a service")
public void testServiceAddHeader() {
String key = "lang";
String value = "ballerina";
String path = "/hello/addheader/" + key + "/" + value;
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(getReturnValue(response));
Assert.assertEquals(bJson.value().get(key).asText(), value);
}
@Test(description = "Test req struct add Header function")
public void testStructAddHeader() {
String value = "ballerina";
String path = "/hello/addReqHeader";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(getReturnValue(response));
Assert.assertEquals(bJson.value().get("headerValue").asText(), value);
Assert.assertEquals(bJson.value().get("paramValue").asText(), String.valueOf(6));
}
@Test(description = "Test req struct add Header function without params")
public void testStructAddHeaderWithNoParam() {
String value = "ballerina";
String path = "/hello/addReqHeaderWithoutParam";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(getReturnValue(response));
Assert.assertEquals(bJson.value().get("headerValue").asText(), value);
Assert.assertEquals(bJson.value().get("paramValue").asText(), "param is null");
}
@Test(description = "Test req struct add Header function")
public void testAddHeaderViaBalFunction() {
String path = "/hello/addReqHeaderFunc";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(getReturnValue(response));
Assert.assertEquals(bJson.value().get("headerValue").asText(), "chamil");
Assert.assertEquals(bJson.value().get("size").asText(), String.valueOf(3));
}
@Test(description = "Test getBinaryPayload method of the request")
@Test
public void testGetContentLength() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage("", Constants.HTTP_METHOD_GET);
String payload = "ballerina";
cMsg.setHeader(Constants.HTTP_CONTENT_LENGTH, String.valueOf(payload.length()));
HttpUtil.addCarbonMsg(request, cMsg);
HttpUtil.setHeaderValueStructType(
BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, headerStruct));
BStruct entity = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, entityStruct);
BStruct mediaType = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, mediaTypeStruct);
HttpUtil.populateInboundRequest(request, entity, mediaType, cMsg);
BValue[] inputArg = { request };
BValue[] returnVals = BRunUtil.invoke(result, "testGetContentLength", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertEquals(payload.length(), ((BInteger) returnVals[0]).intValue());
}
@Test(description = "Test GetContentLength function within a service")
public void testServiceGetContentLength() {
String key = "lang";
String value = "ballerina";
String path = "/hello/getContentLength";
String jsonString = "{\"" + key + "\":\"" + value + "\"}";
int length = jsonString.length();
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_POST, jsonString);
cMsg.setHeader(Constants.HTTP_CONTENT_LENGTH, String.valueOf(length));
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(new HttpMessageDataStreamer(response).getInputStream());
Assert.assertEquals(bJson.value().get("value").asText(), String.valueOf(length));
}
@Test
public void testGetHeader() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage("", Constants.HTTP_METHOD_GET);
cMsg.setHeader(CONTENT_TYPE, APPLICATION_FORM);
HttpUtil.setHeaderValueStructType(
BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, headerStruct));
BStruct entity = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, entityStruct);
BStruct mediaType = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, mediaTypeStruct);
HttpUtil.populateInboundRequest(request, entity, mediaType, cMsg);
BString key = new BString(CONTENT_TYPE);
BValue[] inputArg = { request, key };
BValue[] returnVals = BRunUtil.invoke(result, "testGetHeader", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertEquals(returnVals[0].stringValue(), APPLICATION_FORM);
}
@Test(description = "Test GetHeader function within a service")
public void testServiceGetHeader() {
String path = "/hello/getHeader";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
cMsg.setHeader(CONTENT_TYPE, APPLICATION_FORM);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(new HttpMessageDataStreamer(response).getInputStream());
Assert.assertEquals(bJson.value().get("value").asText(), APPLICATION_FORM);
}
@Test(description = "Test struct Get Header operation")
public void testStructGetHeader() {
String path = "/hello/getReqHeader";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
cMsg.setHeader("test-header", APPLICATION_FORM);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(getReturnValue(response));
Assert.assertEquals(bJson.value().get("value").asText(), APPLICATION_FORM);
}
@Test(description = "Test GetHeaders function within a function")
public void testGetHeaders() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage("", Constants.HTTP_METHOD_GET);
cMsg.setHeader("test-header", APPLICATION_FORM + "," + TEXT_PLAIN + ";b=5");
HttpUtil.setHeaderValueStructType(
BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, headerStruct));
BStruct entity = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, entityStruct);
BStruct mediaType = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, mediaTypeStruct);
HttpUtil.populateInboundRequest(request, entity, mediaType, cMsg);
BString key = new BString("test-header");
BValue[] inputArg = { request, key };
BValue[] returnVals = BRunUtil.invoke(result, "testGetHeaders", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertEquals(returnVals[0].stringValue(), TEXT_PLAIN);
}
@Test(description = "Test GetHeaders function within a service")
public void testServiceGetHeaders() {
String path = "/hello/getHeaders";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
cMsg.setHeader("test-header", APPLICATION_FORM + "," + TEXT_PLAIN + ";b=5");
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(getReturnValue(response));
Assert.assertEquals(bJson.value().get("value").asText(), TEXT_PLAIN);
Assert.assertEquals(bJson.value().get("paramValue").asText(), String.valueOf(5));
}
@Test(description = "Test GetHeaders function with values of struct")
public void testStructGetHeaders() {
String path = "/hello/getReqHeaders";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
cMsg.setHeader("test-header", APPLICATION_FORM);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(getReturnValue(response));
Assert.assertEquals(bJson.value().get("value").asText(), "transport");
}
@Test
public void testGetJsonPayload() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
BStruct entity = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, entityStruct);
BStruct mediaType = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, mediaTypeStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
String payload = "{'code':'123'}";
cMsg.setHeader(CONTENT_TYPE, APPLICATION_JSON);
HttpUtil.addCarbonMsg(request, cMsg);
MimeUtil.setContentType(mediaType, entity, APPLICATION_JSON);
entity.setRefField(JSON_DATA_INDEX, new BJSON(payload));
entity.setBooleanField(IS_IN_MEMORY_INDEX, 1);
request.addNativeData(MESSAGE_ENTITY, entity);
request.addNativeData(IS_ENTITY_BODY_PRESENT, true);
BValue[] inputArg = { request };
BValue[] returnVals = BRunUtil.invoke(result, "testGetJsonPayload", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertEquals(((BJSON) returnVals[0]).value().get("code").asText(), "123");
}
@Test(description = "Test GetJsonPayload function within a service")
public void testServiceGetJsonPayload() {
String key = "lang";
String value = "ballerina";
String path = "/hello/getJsonPayload";
String jsonString = "{\"" + key + "\":\"" + value + "\"}";
List<Header> headers = new ArrayList<Header>();
headers.add(new Header("Content-Type", APPLICATION_JSON));
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_POST, headers, jsonString);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
Assert.assertEquals(new BJSON(getReturnValue(response)).value().stringValue(), value);
}
@Test
public void testGetProperty() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
String propertyName = "wso2";
String propertyValue = "Ballerina";
cMsg.setProperty(propertyName, propertyValue);
HttpUtil.addCarbonMsg(request, cMsg);
BString name = new BString(propertyName);
BValue[] inputArg = { request, name };
BValue[] returnVals = BRunUtil.invoke(result, "testGetProperty", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertEquals(returnVals[0].stringValue(), propertyValue);
}
@Test(description = "Test GetProperty function within a service")
public void testServiceGetProperty() {
String propertyName = "wso2";
String propertyValue = "Ballerina";
String path = "/hello/GetProperty";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
cMsg.setProperty(propertyName, propertyValue);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(new HttpMessageDataStreamer(response).getInputStream());
Assert.assertEquals(bJson.value().get("value").asText(), propertyValue);
}
@Test
public void testGetStringPayload() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
BStruct entity = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, entityStruct);
BStruct mediaType = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, mediaTypeStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
String payload = "ballerina";
String contentType = TEXT_PLAIN;
MimeUtil.setContentType(mediaType, entity, contentType);
entity.setStringField(TEXT_DATA_INDEX, payload);
entity.setBooleanField(IS_IN_MEMORY_INDEX, 1);
request.addNativeData(MESSAGE_ENTITY, entity);
request.addNativeData(IS_ENTITY_BODY_PRESENT, true);
HttpUtil.addCarbonMsg(request, cMsg);
BValue[] inputArg = { request };
BValue[] returnVals = BRunUtil.invoke(result, "testGetStringPayload", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertEquals(returnVals[0].stringValue(), payload);
}
@Test(description = "Test GetStringPayload function within a service")
public void testServiceGetStringPayload() {
String value = "ballerina";
String path = "/hello/GetStringPayload";
List<Header> headers = new ArrayList<Header>();
headers.add(new Header("Content-Type", TEXT_PLAIN));
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_POST, headers, value);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
Assert.assertEquals(getReturnValue(response), value);
}
@Test
public void testGetXmlPayload() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
BStruct entity = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, entityStruct);
BStruct mediaType = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, mediaTypeStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
String payload = "<name>ballerina</name>";
String contentType = APPLICATION_XML;
MimeUtil.setContentType(mediaType, entity, contentType);
entity.setRefField(XML_DATA_INDEX, new BXMLItem(payload));
entity.setBooleanField(IS_IN_MEMORY_INDEX, 1);
request.addNativeData(MESSAGE_ENTITY, entity);
request.addNativeData(IS_ENTITY_BODY_PRESENT, true);
HttpUtil.addCarbonMsg(request, cMsg);
BValue[] inputArg = { request };
BValue[] returnVals = BRunUtil.invoke(result, "testGetXmlPayload", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertEquals(((BXMLItem) returnVals[0]).getTextValue().stringValue(), "ballerina");
}
@Test(description = "Test GetXmlPayload function within a service")
public void testServiceGetXmlPayload() {
String value = "ballerina";
String path = "/hello/GetXmlPayload";
String bxmlItemString = "<name>ballerina</name>";
List<Header> headers = new ArrayList<Header>();
headers.add(new Header("Content-Type", APPLICATION_XML));
HTTPTestRequest cMsg = MessageUtils
.generateHTTPMessage(path, Constants.HTTP_METHOD_POST, headers, bxmlItemString);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
Assert.assertEquals(getReturnValue(response), value);
}
@Test
public void testRemoveHeader() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
String expect = "Expect";
cMsg.setHeader(expect, "100-continue");
HttpUtil.addCarbonMsg(request, cMsg);
BString key = new BString(expect);
BValue[] inputArg = { request, key };
BValue[] returnVals = BRunUtil.invoke(result, "testRemoveHeader", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertTrue(returnVals[0] instanceof BStruct);
BStruct entityStruct = (BStruct) ((BStruct) returnVals[0]).getNativeData(MESSAGE_ENTITY);
BMap map = (BMap) entityStruct.getRefField(ENTITY_HEADERS_INDEX);
Assert.assertNull(map.get("100-continue"));
}
@Test(description = "Test RemoveHeader function within a service")
public void testServiceRemoveHeader() {
String path = "/hello/RemoveHeader";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
cMsg.setHeader(CONTENT_TYPE, APPLICATION_FORM);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(new HttpMessageDataStreamer(response).getInputStream());
Assert.assertEquals(bJson.value().get("value").asText(), "value is null");
}
@Test
public void testRemoveAllHeaders() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
String expect = "Expect";
String range = "Range";
cMsg.setHeader(expect, "100-continue");
cMsg.setHeader(range, "bytes=500-999");
HttpUtil.addCarbonMsg(request, cMsg);
BValue[] inputArg = { request };
BValue[] returnVals = BRunUtil.invoke(result, "testRemoveAllHeaders", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertTrue(returnVals[0] instanceof BStruct);
BStruct entityStruct = (BStruct) ((BStruct) returnVals[0]).getNativeData(MESSAGE_ENTITY);
BMap map = (BMap) entityStruct.getRefField(ENTITY_HEADERS_INDEX);
Assert.assertNull(map.get(expect));
Assert.assertNull(map.get(range));
}
@Test(description = "Test RemoveAllHeaders function within a service")
public void testServiceRemoveAllHeaders() {
String expect = "Expect";
String range = "Range";
String path = "/hello/RemoveAllHeaders";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
cMsg.setHeader(expect, "100-continue");
cMsg.setHeader(range, "bytes=500-999");
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(new HttpMessageDataStreamer(response).getInputStream());
Assert.assertEquals(bJson.value().get("value").asText(), "value is null");
}
@Test
public void testSetHeader() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
HttpUtil.addCarbonMsg(request, cMsg);
String range = "Range";
String rangeValue = "bytes=500-999";
BString key = new BString(range);
BString value = new BString(rangeValue);
BValue[] inputArg = { request, key, value };
BValue[] returnVals = BRunUtil.invoke(result, "testSetHeader", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertTrue(returnVals[0] instanceof BStruct);
BStruct entityStruct = (BStruct) ((BStruct) returnVals[0]).getNativeData(MESSAGE_ENTITY);
BMap map = (BMap) entityStruct.getRefField(ENTITY_HEADERS_INDEX);
BRefValueArray array = (BRefValueArray) map.get(range);
Assert.assertEquals(((BStruct) array.get(0)).getStringField(0), rangeValue);
}
@Test
public void testSetHeaderStruct() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage("", Constants.HTTP_METHOD_GET);
HttpUtil.addCarbonMsg(request, cMsg);
HttpUtil.setHeaderValueStructType(
BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, entityStruct));
BStruct entity = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, entityStruct);
BStruct mediaType = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, mediaTypeStruct);
HttpUtil.populateInboundRequest(request, entity, mediaType, cMsg);
String range = "Range";
String rangeValue = "bytes=500-999";
BString key = new BString(range);
BString value = new BString(rangeValue);
BValue[] inputArg = { request, key, value };
BValue[] returnVals = BRunUtil.invoke(result, "testSetHeaderStruct", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertTrue(returnVals[0] instanceof BStruct);
BStruct entityStruct = (BStruct) ((BStruct) returnVals[0]).getNativeData(MESSAGE_ENTITY);
BMap map = (BMap) entityStruct.getRefField(ENTITY_HEADERS_INDEX);
BRefValueArray array = (BRefValueArray) map.get(range);
Assert.assertEquals(((BStruct) array.get(0)).getStringField(0), rangeValue);
}
@Test(description = "Test SetHeader function within a service")
public void testServiceSetHeader() {
String key = "lang";
String value = "ballerina";
String path = "/hello/setHeader/" + key + "/" + value;
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(new HttpMessageDataStreamer(response).getInputStream());
Assert.assertEquals(bJson.value().get("value").asText(), value);
}
@Test(description = "Test Setting Header in struct within a service")
public void testServiceSetHeaderStruct() {
String key = "lang";
String value = "ballerina";
String path = "/hello/setHeaderStruct/" + key + "/" + value;
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(new HttpMessageDataStreamer(response).getInputStream());
Assert.assertEquals(bJson.value().get("value").asText(), value);
}
@Test
public void testSetJsonPayload() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPCarbonMessage requestMsg = HttpUtil.createHttpCarbonMessage(true);
HttpUtil.addCarbonMsg(request, requestMsg);
BJSON value = new BJSON("{'name':'wso2'}");
BValue[] inputArg = { request, value };
BValue[] returnVals = BRunUtil.invoke(result, "testSetJsonPayload", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertTrue(returnVals[0] instanceof BStruct);
BStruct entity = (BStruct) ((BStruct) returnVals[0]).getNativeData(MESSAGE_ENTITY);
BJSON bJson = (BJSON) entity.getRefField(JSON_DATA_INDEX);
Assert.assertEquals(bJson.value().get("name").asText(), "wso2", "Payload is not set properly");
}
@Test(description = "Test SetJsonPayload function within a service")
public void testServiceSetJsonPayload() {
String value = "ballerina";
String path = "/hello/SetJsonPayload/" + value;
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(new HttpMessageDataStreamer(response).getInputStream());
Assert.assertEquals(bJson.value().get("lang").asText(), value);
}
@Test
public void testSetProperty() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
HttpUtil.addCarbonMsg(request, cMsg);
String propertyName = "wso2";
String propertyValue = "Ballerina";
BString name = new BString(propertyName);
BString value = new BString(propertyValue);
BValue[] inputArg = { request, name, value };
BValue[] returnVals = BRunUtil.invoke(result, "testSetProperty", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertTrue(returnVals[0] instanceof BStruct);
HTTPCarbonMessage response = HttpUtil.getCarbonMsg((BStruct) returnVals[0], null);
Assert.assertEquals(response.getProperty(propertyName), propertyValue);
}
@Test(description = "Test SetProperty function within a service")
public void testServiceSetProperty() {
String key = "lang";
String value = "ballerina";
String path = "/hello/SetProperty/" + key + "/" + value;
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(new HttpMessageDataStreamer(response).getInputStream());
Assert.assertEquals(bJson.value().get("value").asText(), value);
}
@Test
public void testSetStringPayload() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
HttpUtil.addCarbonMsg(request, cMsg);
BString value = new BString("Ballerina");
BValue[] inputArg = { request, value };
BValue[] returnVals = BRunUtil.invoke(result, "testSetStringPayload", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertTrue(returnVals[0] instanceof BStruct);
BStruct entity = (BStruct) ((BStruct) returnVals[0]).getNativeData(MESSAGE_ENTITY);
String stringValue = entity.getStringField(TEXT_DATA_INDEX);
Assert.assertEquals(stringValue, "Ballerina", "Payload is not set properly");
}
@Test(description = "Test SetStringPayload function within a service")
public void testServiceSetStringPayload() {
String value = "ballerina";
String path = "/hello/SetStringPayload/" + value;
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(new HttpMessageDataStreamer(response).getInputStream());
Assert.assertEquals(bJson.value().get("lang").asText(), value);
}
@Test
public void testSetXmlPayload() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
HttpUtil.addCarbonMsg(request, cMsg);
BXMLItem value = new BXMLItem("<name>Ballerina</name>");
BValue[] inputArg = { request, value };
BValue[] returnVals = BRunUtil.invoke(result, "testSetXmlPayload", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertTrue(returnVals[0] instanceof BStruct);
BStruct entity = (BStruct) ((BStruct) returnVals[0]).getNativeData(MESSAGE_ENTITY);
BXMLItem xmlValue = (BXMLItem) entity.getRefField(XML_DATA_INDEX);
Assert.assertEquals(xmlValue.getTextValue().stringValue(), "Ballerina", "Payload is not set properly");
}
@Test(description = "Test SetXmlPayload function within a service")
public void testServiceSetXmlPayload() {
String value = "Ballerina";
String path = "/hello/SetXmlPayload/";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(new HttpMessageDataStreamer(response).getInputStream());
Assert.assertEquals(bJson.value().get("lang").asText(), value);
}
@Test
public void testGetMethod() {
String path = "/hello/11";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
Assert.assertEquals(
StringUtils.getStringFromInputStream(new HttpMessageDataStreamer(response).getInputStream()),
Constants.HTTP_METHOD_GET);
}
@Test
public void testGetRequestURL() {
String path = "/hello/12";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
Assert.assertEquals(
StringUtils.getStringFromInputStream(new HttpMessageDataStreamer(response).getInputStream()), path);
}
@Test(description = "Test setBinaryPayload() function within a service")
public void testServiceSetBinaryPayload() {
String value = "Ballerina";
String path = "/hello/SetBinaryPayload/";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(new HttpMessageDataStreamer(response).getInputStream());
Assert.assertEquals(bJson.value().get("lang").asText(), value);
}
@Test(description = "Test getBinaryPayload() function within a service")
public void testServiceGetBinaryPayload() {
String payload = "ballerina";
String path = "/hello/GetBinaryPayload";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_POST, payload);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
Assert.assertEquals(
StringUtils.getStringFromInputStream(new HttpMessageDataStreamer(response).getInputStream()), payload);
}
@Test(description = "Test setBinaryPayload() function")
public void testSetBinaryPayload() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
HttpUtil.addCarbonMsg(request, cMsg);
BBlob value = new BBlob("Ballerina".getBytes());
BValue[] inputArg = { request, value };
BValue[] returnVals = BRunUtil.invoke(result, "testSetBinaryPayload", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertTrue(returnVals[0] instanceof BStruct);
BStruct entity = (BStruct) ((BStruct) returnVals[0]).getNativeData(MESSAGE_ENTITY);
BlobDataSource blobDataSource = new BlobDataSource(entity.getBlobField(BYTE_DATA_INDEX));
Assert.assertEquals(blobDataSource.getMessageAsString(), "Ballerina", "Payload is not set properly");
}
@Test (description = "Test setEntityBody() function")
public void testSetEntityBody() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPCarbonMessage requestMsg = HttpUtil.createHttpCarbonMessage(true);
HttpUtil.addCarbonMsg(request, requestMsg);
try {
File file = File.createTempFile("test", ".json");
file.deleteOnExit();
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(file));
bufferedWriter.write("{'name':'wso2'}");
bufferedWriter.close();
BStruct fileStruct = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageFile, FILE);
fileStruct.setStringField(0, file.getAbsolutePath());
BValue[] inputArg = { request, fileStruct, new BString(APPLICATION_JSON) };
BValue[] returnVals = BRunUtil.invoke(result, "testSetEntityBody", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertTrue(returnVals[0] instanceof BStruct);
BStruct entity = (BStruct) ((BStruct) returnVals[0]).getNativeData(MESSAGE_ENTITY);
BStruct returnFileStruct = (BStruct) entity.getRefField(OVERFLOW_DATA_INDEX);
String returnJsonValue = new String (Files.readAllBytes(Paths.get(returnFileStruct.getStringField(0))),
UTF_8);
BJSON bJson = new BJSON(returnJsonValue);
Assert.assertEquals(bJson.value().get("name").asText(), "wso2", "Payload is not set properly");
} catch (IOException e) {
LOG.error("Error occured while creating a temporary file in testSetEntityBody", e.getMessage());
}
}
/**
* Get the response value from input stream.
*
* @param response carbon response
* @return return value from input stream as a string
*/
private String getReturnValue(HTTPCarbonMessage response) {
Reader reader;
final int bufferSize = 1024;
final char[] buffer = new char[bufferSize];
final StringBuilder out = new StringBuilder();
try {
reader = new InputStreamReader(new HttpMessageDataStreamer(response).getInputStream(), UTF_8);
while (true) {
int size = reader.read(buffer, 0, buffer.length);
if (size < 0) {
break;
}
out.append(buffer, 0, size);
}
} catch (UnsupportedEncodingException e) {
LOG.error("Error occured while reading the response value in getReturnValue", e.getMessage());
} catch (IOException e) {
LOG.error("Error occured while reading the response value in getReturnValue", e.getMessage());
}
return out.toString();
}
} | class RequestNativeFunctionSuccessTest {
private static final Logger LOG = LoggerFactory.getLogger(RequestNativeFunctionSuccessTest.class);
private CompileResult result, serviceResult;
private final String requestStruct = Constants.REQUEST;
private final String headerStruct = HEADER_VALUE_STRUCT;
private final String protocolPackageHttp = Constants.PROTOCOL_PACKAGE_HTTP;
private final String protocolPackageMime = PROTOCOL_PACKAGE_MIME;
private final String protocolPackageFile = PROTOCOL_PACKAGE_FILE;
private final String entityStruct = Constants.ENTITY;
private final String mediaTypeStruct = MEDIA_TYPE;
private String sourceFilePath = "test-src/statements/services/nativeimpl/request/request-native-function.bal";
@BeforeClass
public void setup() {
result = BCompileUtil.compile(sourceFilePath);
serviceResult = BServiceUtil.setupProgramFile(this, sourceFilePath);
}
@Test
public void testAddHeader() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
HttpUtil.addCarbonMsg(request, cMsg);
String headerName = "header1";
String headerValue = "headerValue";
BString key = new BString(headerName);
BString value = new BString(headerValue);
BValue[] inputArg = { request, key, value };
BValue[] returnVals = BRunUtil.invoke(result, "testAddHeader", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertTrue(returnVals[0] instanceof BStruct);
BStruct entityStruct = (BStruct) ((BStruct) returnVals[0]).getNativeData(MESSAGE_ENTITY);
BMap map = (BMap) entityStruct.getRefField(ENTITY_HEADERS_INDEX);
BRefValueArray array = (BRefValueArray) map.get(headerName);
Assert.assertEquals(((BStruct) array.get(0)).getStringField(0), headerValue);
}
@Test(description = "Test addHeader function within a service")
public void testServiceAddHeader() {
String key = "lang";
String value = "ballerina";
String path = "/hello/addheader/" + key + "/" + value;
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(getReturnValue(response));
Assert.assertEquals(bJson.value().get(key).asText(), value);
}
@Test(description = "Test req struct add Header function")
public void testStructAddHeader() {
String value = "ballerina";
String path = "/hello/addReqHeader";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(getReturnValue(response));
Assert.assertEquals(bJson.value().get("headerValue").asText(), value);
Assert.assertEquals(bJson.value().get("paramValue").asText(), String.valueOf(6));
}
@Test(description = "Test req struct add Header function without params")
public void testStructAddHeaderWithNoParam() {
String value = "ballerina";
String path = "/hello/addReqHeaderWithoutParam";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(getReturnValue(response));
Assert.assertEquals(bJson.value().get("headerValue").asText(), value);
Assert.assertEquals(bJson.value().get("paramValue").asText(), "param is null");
}
@Test(description = "Test req struct add Header function")
public void testAddHeaderViaBalFunction() {
String path = "/hello/addReqHeaderFunc";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(getReturnValue(response));
Assert.assertEquals(bJson.value().get("headerValue").asText(), "chamil");
Assert.assertEquals(bJson.value().get("size").asText(), String.valueOf(3));
}
@Test(description = "Test getBinaryPayload method of the request")
@Test
public void testGetContentLength() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage("", Constants.HTTP_METHOD_GET);
String payload = "ballerina";
cMsg.setHeader(Constants.HTTP_CONTENT_LENGTH, String.valueOf(payload.length()));
HttpUtil.addCarbonMsg(request, cMsg);
HttpUtil.setHeaderValueStructType(
BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, headerStruct));
BStruct entity = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, entityStruct);
BStruct mediaType = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, mediaTypeStruct);
HttpUtil.populateInboundRequest(request, entity, mediaType, cMsg);
BValue[] inputArg = { request };
BValue[] returnVals = BRunUtil.invoke(result, "testGetContentLength", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertEquals(payload.length(), ((BInteger) returnVals[0]).intValue());
}
@Test(description = "Test GetContentLength function within a service")
public void testServiceGetContentLength() {
String key = "lang";
String value = "ballerina";
String path = "/hello/getContentLength";
String jsonString = "{\"" + key + "\":\"" + value + "\"}";
int length = jsonString.length();
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_POST, jsonString);
cMsg.setHeader(Constants.HTTP_CONTENT_LENGTH, String.valueOf(length));
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(new HttpMessageDataStreamer(response).getInputStream());
Assert.assertEquals(bJson.value().get("value").asText(), String.valueOf(length));
}
@Test
public void testGetHeader() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage("", Constants.HTTP_METHOD_GET);
cMsg.setHeader(CONTENT_TYPE, APPLICATION_FORM);
HttpUtil.setHeaderValueStructType(
BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, headerStruct));
BStruct entity = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, entityStruct);
BStruct mediaType = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, mediaTypeStruct);
HttpUtil.populateInboundRequest(request, entity, mediaType, cMsg);
BString key = new BString(CONTENT_TYPE);
BValue[] inputArg = { request, key };
BValue[] returnVals = BRunUtil.invoke(result, "testGetHeader", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertEquals(returnVals[0].stringValue(), APPLICATION_FORM);
}
@Test(description = "Test GetHeader function within a service")
public void testServiceGetHeader() {
String path = "/hello/getHeader";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
cMsg.setHeader(CONTENT_TYPE, APPLICATION_FORM);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(new HttpMessageDataStreamer(response).getInputStream());
Assert.assertEquals(bJson.value().get("value").asText(), APPLICATION_FORM);
}
@Test(description = "Test struct Get Header operation")
public void testStructGetHeader() {
String path = "/hello/getReqHeader";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
cMsg.setHeader("test-header", APPLICATION_FORM);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(getReturnValue(response));
Assert.assertEquals(bJson.value().get("value").asText(), APPLICATION_FORM);
}
@Test(description = "Test GetHeaders function within a function")
public void testGetHeaders() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage("", Constants.HTTP_METHOD_GET);
cMsg.setHeader("test-header", APPLICATION_FORM + "," + TEXT_PLAIN + ";b=5");
HttpUtil.setHeaderValueStructType(
BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, headerStruct));
BStruct entity = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, entityStruct);
BStruct mediaType = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, mediaTypeStruct);
HttpUtil.populateInboundRequest(request, entity, mediaType, cMsg);
BString key = new BString("test-header");
BValue[] inputArg = { request, key };
BValue[] returnVals = BRunUtil.invoke(result, "testGetHeaders", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertEquals(returnVals[0].stringValue(), TEXT_PLAIN);
}
@Test(description = "Test GetHeaders function within a service")
public void testServiceGetHeaders() {
String path = "/hello/getHeaders";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
cMsg.setHeader("test-header", APPLICATION_FORM + "," + TEXT_PLAIN + ";b=5");
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(getReturnValue(response));
Assert.assertEquals(bJson.value().get("value").asText(), TEXT_PLAIN);
Assert.assertEquals(bJson.value().get("paramValue").asText(), String.valueOf(5));
}
@Test(description = "Test GetHeaders function with values of struct")
public void testStructGetHeaders() {
String path = "/hello/getReqHeaders";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
cMsg.setHeader("test-header", APPLICATION_FORM);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(getReturnValue(response));
Assert.assertEquals(bJson.value().get("value").asText(), "transport");
}
@Test
public void testGetJsonPayload() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
BStruct entity = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, entityStruct);
BStruct mediaType = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, mediaTypeStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
String payload = "{'code':'123'}";
cMsg.setHeader(CONTENT_TYPE, APPLICATION_JSON);
HttpUtil.addCarbonMsg(request, cMsg);
MimeUtil.setContentType(mediaType, entity, APPLICATION_JSON);
entity.setRefField(JSON_DATA_INDEX, new BJSON(payload));
entity.setBooleanField(IS_IN_MEMORY_INDEX, 1);
request.addNativeData(MESSAGE_ENTITY, entity);
request.addNativeData(IS_ENTITY_BODY_PRESENT, true);
BValue[] inputArg = { request };
BValue[] returnVals = BRunUtil.invoke(result, "testGetJsonPayload", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertEquals(((BJSON) returnVals[0]).value().get("code").asText(), "123");
}
@Test(description = "Test GetJsonPayload function within a service")
public void testServiceGetJsonPayload() {
String key = "lang";
String value = "ballerina";
String path = "/hello/getJsonPayload";
String jsonString = "{\"" + key + "\":\"" + value + "\"}";
List<Header> headers = new ArrayList<Header>();
headers.add(new Header("Content-Type", APPLICATION_JSON));
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_POST, headers, jsonString);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
Assert.assertEquals(new BJSON(getReturnValue(response)).value().stringValue(), value);
}
@Test
public void testGetProperty() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
String propertyName = "wso2";
String propertyValue = "Ballerina";
cMsg.setProperty(propertyName, propertyValue);
HttpUtil.addCarbonMsg(request, cMsg);
BString name = new BString(propertyName);
BValue[] inputArg = { request, name };
BValue[] returnVals = BRunUtil.invoke(result, "testGetProperty", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertEquals(returnVals[0].stringValue(), propertyValue);
}
@Test(description = "Test GetProperty function within a service")
public void testServiceGetProperty() {
String propertyName = "wso2";
String propertyValue = "Ballerina";
String path = "/hello/GetProperty";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
cMsg.setProperty(propertyName, propertyValue);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(new HttpMessageDataStreamer(response).getInputStream());
Assert.assertEquals(bJson.value().get("value").asText(), propertyValue);
}
@Test
public void testGetStringPayload() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
BStruct entity = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, entityStruct);
BStruct mediaType = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, mediaTypeStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
String payload = "ballerina";
MimeUtil.setContentType(mediaType, entity, TEXT_PLAIN);
entity.setStringField(TEXT_DATA_INDEX, payload);
entity.setBooleanField(IS_IN_MEMORY_INDEX, 1);
request.addNativeData(MESSAGE_ENTITY, entity);
request.addNativeData(IS_ENTITY_BODY_PRESENT, true);
HttpUtil.addCarbonMsg(request, cMsg);
BValue[] inputArg = { request };
BValue[] returnVals = BRunUtil.invoke(result, "testGetStringPayload", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertEquals(returnVals[0].stringValue(), payload);
}
@Test(description = "Test GetStringPayload function within a service")
public void testServiceGetStringPayload() {
String value = "ballerina";
String path = "/hello/GetStringPayload";
List<Header> headers = new ArrayList<Header>();
headers.add(new Header("Content-Type", TEXT_PLAIN));
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_POST, headers, value);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
Assert.assertEquals(getReturnValue(response), value);
}
@Test
public void testGetXmlPayload() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
BStruct entity = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, entityStruct);
BStruct mediaType = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, mediaTypeStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
String payload = "<name>ballerina</name>";
MimeUtil.setContentType(mediaType, entity, APPLICATION_XML);
entity.setRefField(XML_DATA_INDEX, new BXMLItem(payload));
entity.setBooleanField(IS_IN_MEMORY_INDEX, 1);
request.addNativeData(MESSAGE_ENTITY, entity);
request.addNativeData(IS_ENTITY_BODY_PRESENT, true);
HttpUtil.addCarbonMsg(request, cMsg);
BValue[] inputArg = { request };
BValue[] returnVals = BRunUtil.invoke(result, "testGetXmlPayload", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertEquals(((BXMLItem) returnVals[0]).getTextValue().stringValue(), "ballerina");
}
@Test(description = "Test GetXmlPayload function within a service")
public void testServiceGetXmlPayload() {
String value = "ballerina";
String path = "/hello/GetXmlPayload";
String bxmlItemString = "<name>ballerina</name>";
List<Header> headers = new ArrayList<Header>();
headers.add(new Header("Content-Type", APPLICATION_XML));
HTTPTestRequest cMsg = MessageUtils
.generateHTTPMessage(path, Constants.HTTP_METHOD_POST, headers, bxmlItemString);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
Assert.assertEquals(getReturnValue(response), value);
}
@Test
public void testRemoveHeader() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
String expect = "Expect";
cMsg.setHeader(expect, "100-continue");
HttpUtil.addCarbonMsg(request, cMsg);
BString key = new BString(expect);
BValue[] inputArg = { request, key };
BValue[] returnVals = BRunUtil.invoke(result, "testRemoveHeader", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertTrue(returnVals[0] instanceof BStruct);
BStruct entityStruct = (BStruct) ((BStruct) returnVals[0]).getNativeData(MESSAGE_ENTITY);
BMap map = (BMap) entityStruct.getRefField(ENTITY_HEADERS_INDEX);
Assert.assertNull(map.get("100-continue"));
}
@Test(description = "Test RemoveHeader function within a service")
public void testServiceRemoveHeader() {
String path = "/hello/RemoveHeader";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
cMsg.setHeader(CONTENT_TYPE, APPLICATION_FORM);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(new HttpMessageDataStreamer(response).getInputStream());
Assert.assertEquals(bJson.value().get("value").asText(), "value is null");
}
@Test
public void testRemoveAllHeaders() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
String expect = "Expect";
String range = "Range";
cMsg.setHeader(expect, "100-continue");
cMsg.setHeader(range, "bytes=500-999");
HttpUtil.addCarbonMsg(request, cMsg);
BValue[] inputArg = { request };
BValue[] returnVals = BRunUtil.invoke(result, "testRemoveAllHeaders", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertTrue(returnVals[0] instanceof BStruct);
BStruct entityStruct = (BStruct) ((BStruct) returnVals[0]).getNativeData(MESSAGE_ENTITY);
BMap map = (BMap) entityStruct.getRefField(ENTITY_HEADERS_INDEX);
Assert.assertNull(map.get(expect));
Assert.assertNull(map.get(range));
}
@Test(description = "Test RemoveAllHeaders function within a service")
public void testServiceRemoveAllHeaders() {
String expect = "Expect";
String range = "Range";
String path = "/hello/RemoveAllHeaders";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
cMsg.setHeader(expect, "100-continue");
cMsg.setHeader(range, "bytes=500-999");
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(new HttpMessageDataStreamer(response).getInputStream());
Assert.assertEquals(bJson.value().get("value").asText(), "value is null");
}
@Test
public void testSetHeader() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
HttpUtil.addCarbonMsg(request, cMsg);
String range = "Range";
String rangeValue = "bytes=500-999";
BString key = new BString(range);
BString value = new BString(rangeValue);
BValue[] inputArg = { request, key, value };
BValue[] returnVals = BRunUtil.invoke(result, "testSetHeader", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertTrue(returnVals[0] instanceof BStruct);
BStruct entityStruct = (BStruct) ((BStruct) returnVals[0]).getNativeData(MESSAGE_ENTITY);
BMap map = (BMap) entityStruct.getRefField(ENTITY_HEADERS_INDEX);
BRefValueArray array = (BRefValueArray) map.get(range);
Assert.assertEquals(((BStruct) array.get(0)).getStringField(0), rangeValue);
}
@Test
public void testSetHeaderStruct() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage("", Constants.HTTP_METHOD_GET);
HttpUtil.addCarbonMsg(request, cMsg);
HttpUtil.setHeaderValueStructType(
BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, entityStruct));
BStruct entity = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, entityStruct);
BStruct mediaType = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageMime, mediaTypeStruct);
HttpUtil.populateInboundRequest(request, entity, mediaType, cMsg);
String range = "Range";
String rangeValue = "bytes=500-999";
BString key = new BString(range);
BString value = new BString(rangeValue);
BValue[] inputArg = { request, key, value };
BValue[] returnVals = BRunUtil.invoke(result, "testSetHeaderStruct", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertTrue(returnVals[0] instanceof BStruct);
BStruct entityStruct = (BStruct) ((BStruct) returnVals[0]).getNativeData(MESSAGE_ENTITY);
BMap map = (BMap) entityStruct.getRefField(ENTITY_HEADERS_INDEX);
BRefValueArray array = (BRefValueArray) map.get(range);
Assert.assertEquals(((BStruct) array.get(0)).getStringField(0), rangeValue);
}
@Test(description = "Test SetHeader function within a service")
public void testServiceSetHeader() {
String key = "lang";
String value = "ballerina";
String path = "/hello/setHeader/" + key + "/" + value;
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(new HttpMessageDataStreamer(response).getInputStream());
Assert.assertEquals(bJson.value().get("value").asText(), value);
}
@Test(description = "Test Setting Header in struct within a service")
public void testServiceSetHeaderStruct() {
String key = "lang";
String value = "ballerina";
String path = "/hello/setHeaderStruct/" + key + "/" + value;
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(new HttpMessageDataStreamer(response).getInputStream());
Assert.assertEquals(bJson.value().get("value").asText(), value);
}
@Test
public void testSetJsonPayload() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPCarbonMessage requestMsg = HttpUtil.createHttpCarbonMessage(true);
HttpUtil.addCarbonMsg(request, requestMsg);
BJSON value = new BJSON("{'name':'wso2'}");
BValue[] inputArg = { request, value };
BValue[] returnVals = BRunUtil.invoke(result, "testSetJsonPayload", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertTrue(returnVals[0] instanceof BStruct);
BStruct entity = (BStruct) ((BStruct) returnVals[0]).getNativeData(MESSAGE_ENTITY);
BJSON bJson = (BJSON) entity.getRefField(JSON_DATA_INDEX);
Assert.assertEquals(bJson.value().get("name").asText(), "wso2", "Payload is not set properly");
}
@Test(description = "Test SetJsonPayload function within a service")
public void testServiceSetJsonPayload() {
String value = "ballerina";
String path = "/hello/SetJsonPayload/" + value;
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(new HttpMessageDataStreamer(response).getInputStream());
Assert.assertEquals(bJson.value().get("lang").asText(), value);
}
@Test
public void testSetProperty() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
HttpUtil.addCarbonMsg(request, cMsg);
String propertyName = "wso2";
String propertyValue = "Ballerina";
BString name = new BString(propertyName);
BString value = new BString(propertyValue);
BValue[] inputArg = { request, name, value };
BValue[] returnVals = BRunUtil.invoke(result, "testSetProperty", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertTrue(returnVals[0] instanceof BStruct);
HTTPCarbonMessage response = HttpUtil.getCarbonMsg((BStruct) returnVals[0], null);
Assert.assertEquals(response.getProperty(propertyName), propertyValue);
}
@Test(description = "Test SetProperty function within a service")
public void testServiceSetProperty() {
String key = "lang";
String value = "ballerina";
String path = "/hello/SetProperty/" + key + "/" + value;
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(new HttpMessageDataStreamer(response).getInputStream());
Assert.assertEquals(bJson.value().get("value").asText(), value);
}
@Test
public void testSetStringPayload() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
HttpUtil.addCarbonMsg(request, cMsg);
BString value = new BString("Ballerina");
BValue[] inputArg = { request, value };
BValue[] returnVals = BRunUtil.invoke(result, "testSetStringPayload", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertTrue(returnVals[0] instanceof BStruct);
BStruct entity = (BStruct) ((BStruct) returnVals[0]).getNativeData(MESSAGE_ENTITY);
String stringValue = entity.getStringField(TEXT_DATA_INDEX);
Assert.assertEquals(stringValue, "Ballerina", "Payload is not set properly");
}
@Test(description = "Test SetStringPayload function within a service")
public void testServiceSetStringPayload() {
String value = "ballerina";
String path = "/hello/SetStringPayload/" + value;
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(new HttpMessageDataStreamer(response).getInputStream());
Assert.assertEquals(bJson.value().get("lang").asText(), value);
}
@Test
public void testSetXmlPayload() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
HttpUtil.addCarbonMsg(request, cMsg);
BXMLItem value = new BXMLItem("<name>Ballerina</name>");
BValue[] inputArg = { request, value };
BValue[] returnVals = BRunUtil.invoke(result, "testSetXmlPayload", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertTrue(returnVals[0] instanceof BStruct);
BStruct entity = (BStruct) ((BStruct) returnVals[0]).getNativeData(MESSAGE_ENTITY);
BXMLItem xmlValue = (BXMLItem) entity.getRefField(XML_DATA_INDEX);
Assert.assertEquals(xmlValue.getTextValue().stringValue(), "Ballerina", "Payload is not set properly");
}
@Test(description = "Test SetXmlPayload function within a service")
public void testServiceSetXmlPayload() {
String value = "Ballerina";
String path = "/hello/SetXmlPayload/";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(new HttpMessageDataStreamer(response).getInputStream());
Assert.assertEquals(bJson.value().get("lang").asText(), value);
}
@Test
public void testGetMethod() {
String path = "/hello/11";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
Assert.assertEquals(
StringUtils.getStringFromInputStream(new HttpMessageDataStreamer(response).getInputStream()),
Constants.HTTP_METHOD_GET);
}
@Test
public void testGetRequestURL() {
String path = "/hello/12";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
Assert.assertEquals(
StringUtils.getStringFromInputStream(new HttpMessageDataStreamer(response).getInputStream()), path);
}
@Test(description = "Test setBinaryPayload() function within a service")
public void testServiceSetBinaryPayload() {
String value = "Ballerina";
String path = "/hello/SetBinaryPayload/";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_GET);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
BJSON bJson = new BJSON(new HttpMessageDataStreamer(response).getInputStream());
Assert.assertEquals(bJson.value().get("lang").asText(), value);
}
@Test(description = "Test getBinaryPayload() function within a service")
public void testServiceGetBinaryPayload() {
String payload = "ballerina";
String path = "/hello/GetBinaryPayload";
HTTPTestRequest cMsg = MessageUtils.generateHTTPMessage(path, Constants.HTTP_METHOD_POST, payload);
HTTPCarbonMessage response = Services.invokeNew(serviceResult, cMsg);
Assert.assertNotNull(response, "Response message not found");
Assert.assertEquals(
StringUtils.getStringFromInputStream(new HttpMessageDataStreamer(response).getInputStream()), payload);
}
@Test(description = "Test setBinaryPayload() function")
public void testSetBinaryPayload() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPCarbonMessage cMsg = HttpUtil.createHttpCarbonMessage(true);
HttpUtil.addCarbonMsg(request, cMsg);
BBlob value = new BBlob("Ballerina".getBytes());
BValue[] inputArg = { request, value };
BValue[] returnVals = BRunUtil.invoke(result, "testSetBinaryPayload", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertTrue(returnVals[0] instanceof BStruct);
BStruct entity = (BStruct) ((BStruct) returnVals[0]).getNativeData(MESSAGE_ENTITY);
BlobDataSource blobDataSource = new BlobDataSource(entity.getBlobField(BYTE_DATA_INDEX));
Assert.assertEquals(blobDataSource.getMessageAsString(), "Ballerina", "Payload is not set properly");
}
@Test (description = "Test setEntityBody() function")
public void testSetEntityBody() {
BStruct request = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageHttp, requestStruct);
HTTPCarbonMessage requestMsg = HttpUtil.createHttpCarbonMessage(true);
HttpUtil.addCarbonMsg(request, requestMsg);
try {
File file = File.createTempFile("test", ".json");
file.deleteOnExit();
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(file));
bufferedWriter.write("{'name':'wso2'}");
bufferedWriter.close();
BStruct fileStruct = BCompileUtil.createAndGetStruct(result.getProgFile(), protocolPackageFile, FILE);
fileStruct.setStringField(0, file.getAbsolutePath());
BValue[] inputArg = { request, fileStruct, new BString(APPLICATION_JSON) };
BValue[] returnVals = BRunUtil.invoke(result, "testSetEntityBody", inputArg);
Assert.assertFalse(returnVals == null || returnVals.length == 0 || returnVals[0] == null,
"Invalid Return Values.");
Assert.assertTrue(returnVals[0] instanceof BStruct);
BStruct entity = (BStruct) ((BStruct) returnVals[0]).getNativeData(MESSAGE_ENTITY);
BStruct returnFileStruct = (BStruct) entity.getRefField(OVERFLOW_DATA_INDEX);
String returnJsonValue = new String (Files.readAllBytes(Paths.get(returnFileStruct.getStringField(0))),
UTF_8);
BJSON bJson = new BJSON(returnJsonValue);
Assert.assertEquals(bJson.value().get("name").asText(), "wso2", "Payload is not set properly");
} catch (IOException e) {
LOG.error("Error occured while creating a temporary file in testSetEntityBody", e.getMessage());
}
}
/**
* Get the response value from input stream.
*
* @param response carbon response
* @return return value from input stream as a string
*/
private String getReturnValue(HTTPCarbonMessage response) {
Reader reader;
final int bufferSize = 1024;
final char[] buffer = new char[bufferSize];
final StringBuilder out = new StringBuilder();
try {
reader = new InputStreamReader(new HttpMessageDataStreamer(response).getInputStream(), UTF_8);
while (true) {
int size = reader.read(buffer, 0, buffer.length);
if (size < 0) {
break;
}
out.append(buffer, 0, size);
}
} catch (IOException e) {
LOG.error("Error occured while reading the response value in getReturnValue", e.getMessage());
}
return out.toString();
}
} |
No need for null check here | public void listExtensions(boolean all, String format, String search) throws IOException {
final Map<String, Dependency> installed = findInstalled();
Stream<Extension> extensionsStream = loadExtensions().stream();
if (search != null && !"*".equalsIgnoreCase(search)) {
final Pattern searchPattern = Pattern.compile(".*" + search + ".*", Pattern.CASE_INSENSITIVE);
extensionsStream = extensionsStream.filter(e -> filterBySearch(searchPattern, e));
}
List<Extension> loadedExtensions = extensionsStream.collect(Collectors.toList());
if (loadedExtensions.isEmpty()) {
System.out.println("No extension found with this pattern");
} else {
String extensionStatus = all ? "available" : "installable";
System.out.println(String.format("%nCurrent Quarkus extensions %s: ", extensionStatus));
Consumer<String[]> currentFormatter;
switch (format.toLowerCase()) {
case "name":
currentFormatter = this::nameFormatter;
break;
case "full":
currentFormatter = this::fullFormatter;
currentFormatter.accept(new String[] { "Status", "Extension", "ArtifactId", "Updated Version", "Guide" });
break;
case "concise":
default:
currentFormatter = this::conciseFormatter;
}
loadedExtensions.forEach(extension -> display(extension, installed, all, currentFormatter));
if ("concise".equalsIgnoreCase(format)) {
System.out.println("\nTo get more information, append -Dquarkus.extension.format=full to your command line.");
}
if (this.buildFile != null && this.buildFile instanceof GradleBuildFile) {
System.out.println("\nAdd an extension to your project by adding the dependency to your " +
"build.gradle or use `./gradlew addExtension --extensions=\"artifactId\"`");
}
else {
System.out.println("\nAdd an extension to your project by adding the dependency to your " +
"pom.xml or use `./mvnw quarkus:add-extension -Dextensions=\"artifactId\"`");
}
}
} | if (this.buildFile != null && this.buildFile instanceof GradleBuildFile) { | public void listExtensions(boolean all, String format, String search) throws IOException {
final Map<String, Dependency> installed = findInstalled();
Stream<Extension> extensionsStream = loadExtensions().stream();
if (search != null && !"*".equalsIgnoreCase(search)) {
final Pattern searchPattern = Pattern.compile(".*" + search + ".*", Pattern.CASE_INSENSITIVE);
extensionsStream = extensionsStream.filter(e -> filterBySearch(searchPattern, e));
}
List<Extension> loadedExtensions = extensionsStream.collect(Collectors.toList());
if (loadedExtensions.isEmpty()) {
System.out.println("No extension found with this pattern");
} else {
String extensionStatus = all ? "available" : "installable";
System.out.println(String.format("%nCurrent Quarkus extensions %s: ", extensionStatus));
Consumer<String[]> currentFormatter;
switch (format.toLowerCase()) {
case "name":
currentFormatter = this::nameFormatter;
break;
case "full":
currentFormatter = this::fullFormatter;
currentFormatter.accept(new String[] { "Status", "Extension", "ArtifactId", "Updated Version", "Guide" });
break;
case "concise":
default:
currentFormatter = this::conciseFormatter;
}
loadedExtensions.forEach(extension -> display(extension, installed, all, currentFormatter));
if ("concise".equalsIgnoreCase(format)) {
if (this.buildFile instanceof GradleBuildFile) {
System.out.println("\nTo get more information, append --format=full to your command line.");
}
else {
System.out.println("\nTo get more information, append -Dquarkus.extension.format=full to your command line.");
}
}
if (this.buildFile instanceof GradleBuildFile) {
System.out.println("\nAdd an extension to your project by adding the dependency to your " +
"build.gradle or use `./gradlew addExtension --extensions=\"artifactId\"`");
}
else {
System.out.println("\nAdd an extension to your project by adding the dependency to your " +
"pom.xml or use `./mvnw quarkus:add-extension -Dextensions=\"artifactId\"`");
}
}
} | class ListExtensions {
private static final String FULL_FORMAT = "%-8s %-50s %-50s %-25s%n%s";
private static final String CONCISE_FORMAT = "%-50s %-50s";
private static final String NAME_FORMAT = "%-50s";
private BuildFile buildFile = null;
public ListExtensions(final BuildFile buildFile) throws IOException {
if (buildFile != null) {
this.buildFile = buildFile;
}
}
public Map<String, Dependency> findInstalled() throws IOException {
if (buildFile != null) {
return buildFile.findInstalled();
} else {
return Collections.emptyMap();
}
}
private boolean filterBySearch(final Pattern searchPattern, Extension e) {
return searchPattern.matcher(e.getName()).matches();
}
private void conciseFormatter(String[] cols) {
System.out.println(String.format(CONCISE_FORMAT, cols[1], cols[2], cols[4]));
}
private void fullFormatter(String[] cols) {
System.out.println(String.format(FULL_FORMAT, cols[0], cols[1], cols[2], cols[3], cols[4]));
}
private void nameFormatter(String[] cols) {
System.out.println(String.format(NAME_FORMAT, cols[2]));
}
private void display(Extension extension, final Map<String, Dependency> installed, boolean all,
Consumer<String[]> formatter) {
if (!all && installed.containsKey(String.format("%s:%s", extension.getGroupId(), extension.getArtifactId()))) {
return;
}
final Dependency dependency = installed.get(String.format("%s:%s", extension.getGroupId(), extension.getArtifactId()));
String label = "";
String version = "";
final String extracted = extractVersion(dependency);
if (extracted != null) {
if (getPluginVersion().equalsIgnoreCase(extracted)) {
label = "current";
version = String.format("%s", extracted);
} else {
label = "update";
version = String.format("%s <> %s", extracted, getPluginVersion());
}
}
String guide = StringUtils.defaultString(extension.getGuide(), "");
formatter.accept(new String[] { label, extension.getName(), extension.getArtifactId(), version, guide });
}
private String extractVersion(final Dependency dependency) {
String version = dependency != null ? dependency.getVersion() : null;
if (version != null && version.startsWith("$")) {
String value = null;
try {
value = (String) buildFile.getProperty(propertyName(version));
} catch (IOException e) {
}
if (value != null) {
version = value;
}
}
return version;
}
private String propertyName(final String variable) {
return variable.substring(2, variable.length() - 1);
}
} | class ListExtensions {
private static final String FULL_FORMAT = "%-8s %-50s %-50s %-25s%n%s";
private static final String CONCISE_FORMAT = "%-50s %-50s";
private static final String NAME_FORMAT = "%-50s";
private BuildFile buildFile = null;
public ListExtensions(final BuildFile buildFile) throws IOException {
if (buildFile != null) {
this.buildFile = buildFile;
}
}
public Map<String, Dependency> findInstalled() throws IOException {
if (buildFile != null) {
return buildFile.findInstalled();
} else {
return Collections.emptyMap();
}
}
private boolean filterBySearch(final Pattern searchPattern, Extension e) {
return searchPattern.matcher(e.getName()).matches();
}
private void conciseFormatter(String[] cols) {
System.out.println(String.format(CONCISE_FORMAT, cols[1], cols[2], cols[4]));
}
private void fullFormatter(String[] cols) {
System.out.println(String.format(FULL_FORMAT, cols[0], cols[1], cols[2], cols[3], cols[4]));
}
private void nameFormatter(String[] cols) {
System.out.println(String.format(NAME_FORMAT, cols[2]));
}
private void display(Extension extension, final Map<String, Dependency> installed, boolean all,
Consumer<String[]> formatter) {
if (!all && installed.containsKey(String.format("%s:%s", extension.getGroupId(), extension.getArtifactId()))) {
return;
}
final Dependency dependency = installed.get(String.format("%s:%s", extension.getGroupId(), extension.getArtifactId()));
String label = "";
String version = "";
final String extracted = extractVersion(dependency);
if (extracted != null) {
if (getPluginVersion().equalsIgnoreCase(extracted)) {
label = "current";
version = String.format("%s", extracted);
} else {
label = "update";
version = String.format("%s <> %s", extracted, getPluginVersion());
}
}
String guide = StringUtils.defaultString(extension.getGuide(), "");
formatter.accept(new String[] { label, extension.getName(), extension.getArtifactId(), version, guide });
}
private String extractVersion(final Dependency dependency) {
String version = dependency != null ? dependency.getVersion() : null;
if (version != null && version.startsWith("$")) {
String value = null;
try {
value = (String) buildFile.getProperty(propertyName(version));
} catch (IOException e) {
}
if (value != null) {
version = value;
}
}
return version;
}
private String propertyName(final String variable) {
return variable.substring(2, variable.length() - 1);
}
} |
I'd just name this variable "errors". Feel like naming is not consistent with the previous variable (responses) :) | public void testCircuitBreaker() {
int[] expectedStatusCodes = new int[] { 200, 200, 500, 503, 503, 200, 200, 200 };
BValue[] returnVals = BRunUtil.invoke(compileResult, "testTypicalScenario");
Assert.assertEquals(returnVals.length, 2);
BRefValueArray responses = (BRefValueArray) returnVals[0];
BRefValueArray errorsArray = (BRefValueArray) returnVals[1];
for (int i = 0; i < responses.size(); i++) {
long statusCode;
if (i != CB_CLIENT_FIRST_ERROR_INDEX && i != CB_CLIENT_SECOND_ERROR_INDEX) {
BMap<String, BValue> res = (BMap<String, BValue>) responses.get(i);
statusCode = ((BInteger) res.get(STATUS_CODE_FIELD)).intValue();
Assert.assertEquals(statusCode, expectedStatusCodes[i], "Status code does not match.");
} else {
Assert.assertNotNull(errorsArray.get(i));
BError error = (BError) errorsArray.get(i);
String errMsg = error.getReason();
Assert.assertTrue(errMsg != null && errMsg.startsWith(CB_ERROR_MSG),
"Invalid error message from circuit breaker.");
}
}
} | BRefValueArray errorsArray = (BRefValueArray) returnVals[1]; | public void testCircuitBreaker() {
int[] expectedStatusCodes = new int[] { 200, 200, 500, 503, 503, 200, 200, 200 };
BValue[] returnVals = BRunUtil.invoke(compileResult, "testTypicalScenario");
Assert.assertEquals(returnVals.length, 2);
BRefValueArray responses = (BRefValueArray) returnVals[0];
BRefValueArray errors = (BRefValueArray) returnVals[1];
for (int i = 0; i < responses.size(); i++) {
long statusCode;
if (i != CB_CLIENT_FIRST_ERROR_INDEX && i != CB_CLIENT_SECOND_ERROR_INDEX) {
BMap<String, BValue> res = (BMap<String, BValue>) responses.get(i);
statusCode = ((BInteger) res.get(STATUS_CODE_FIELD)).intValue();
Assert.assertEquals(statusCode, expectedStatusCodes[i], "Status code does not match.");
} else {
Assert.assertNotNull(errors.get(i));
BError error = (BError) errors.get(i);
String errMsg = error.getReason();
Assert.assertTrue(errMsg != null && errMsg.startsWith(CB_ERROR_MSG),
"Invalid error message from circuit breaker.");
}
}
} | class CircuitBreakerTest {
private static final String CB_ERROR_MSG = "Upstream service unavailable.";
private static final String MOCK_ENDPOINT_NAME = "mockEP";
private static final int CB_CLIENT_FIRST_ERROR_INDEX = 3;
private static final int CB_CLIENT_SECOND_ERROR_INDEX = 4;
private static final int CB_CLIENT_TOP_MOST_SUCCESS_INDEX = 2;
private static final int CB_CLIENT_FAILURE_CASE_ERROR_INDEX = 5;
private static final int CB_CLIENT_FORCE_OPEN_INDEX = 4;
private static final String STATUS_CODE_FIELD = "statusCode";
private CompileResult compileResult, serviceResult;
@BeforeClass
public void setup() {
String sourceFilePath = "test-src/net/http/resiliency/circuit-breaker-test.bal";
compileResult = BCompileUtil.compile(sourceFilePath);
serviceResult = BServiceUtil.setupProgramFile(this, sourceFilePath);
}
/**
* Test case for a typical scenario where an upstream service may become unavailable temporarily.
*/
@Test
/**
* Test case scenario:
* - Initially the circuit is healthy and functioning normally.
* - Backend service becomes unavailable and eventually, the failure threshold is exceeded.
* - Requests afterwards are immediately failed, with a 503 response.
* - After the reset timeout expires, the circuit goes to HALF_OPEN state and a trial request is sent.
* - The backend service is not available and therefore, the request fails again and the circuit goes back to OPEN.
*/
@Test
public void testTrialRunFailure() {
int[] expectedStatusCodes = new int[] { 200, 500, 503, 500, 503, 500 };
BValue[] returnVals = BRunUtil.invoke(compileResult, "testTrialRunFailure");
Assert.assertEquals(returnVals.length, 2);
BRefValueArray responses = (BRefValueArray) returnVals[0];
BRefValueArray errorsArray = (BRefValueArray) returnVals[1];
for (int i = 0; i < responses.size(); i++) {
long statusCode;
if (i < CB_CLIENT_TOP_MOST_SUCCESS_INDEX || i == CB_CLIENT_FAILURE_CASE_ERROR_INDEX) {
BMap<String, BValue> res = (BMap<String, BValue>) responses.get(i);
statusCode = ((BInteger) res.get(STATUS_CODE_FIELD)).intValue();
Assert.assertEquals(statusCode, expectedStatusCodes[i], "Status code does not match.");
} else {
Assert.assertNotNull(errorsArray.get(i));
BError error = (BError) errorsArray.get(i);
String msg = error.getReason();
Assert.assertTrue(msg != null && msg.startsWith(CB_ERROR_MSG),
"Invalid error message from circuit breaker.");
}
}
}
/**
* Test case scenario:
* - Initially the circuit is healthy and functioning normally.
* - Backend service respond with HTTP status code configured to consider as failures responses.
* eventually the failure threshold is exceeded.
* - Requests afterwards are immediately failed, with a 503 response.
* - After the reset timeout expires, the circuit goes to HALF_OPEN state and a trial request is sent.
* - The backend service is not available and therefore, the request fails again and the circuit goes back to OPEN.
*/
@Test(description = "Test case for Circuit Breaker HTTP status codes.")
public void testHttpStatusCodeFailure() {
int[] expectedStatusCodes = new int[] { 200, 500, 503, 500, 503, 503 };
BValue[] returnVals = BRunUtil.invoke(compileResult, "testHttpStatusCodeFailure");
Assert.assertEquals(returnVals.length, 2);
BRefValueArray responses = (BRefValueArray) returnVals[0];
BRefValueArray errs = (BRefValueArray) returnVals[1];
validateCBResponses(responses, errs, CB_CLIENT_TOP_MOST_SUCCESS_INDEX, expectedStatusCodes);
}
/**
* Test case scenario:
* - Initially the circuit is healthy and functioning normally.
* - during the middle of execution circuit will be force fully opened.
* - Afterward requests should immediately fail.
*/
@Test(description = "Verify the functionality of circuit breaker force open implementation")
public void testCBForceOpenScenario() {
int[] expectedStatusCodes = new int[] { 200, 200, 200, 200, 503, 503, 503, 503 };
BValue[] returnVals = BRunUtil.invoke(compileResult, "testForceOpenScenario");
Assert.assertEquals(returnVals.length, 2);
BRefValueArray responses = (BRefValueArray) returnVals[0];
BRefValueArray errs = (BRefValueArray) returnVals[1];
validateCBResponses(responses, errs, CB_CLIENT_FORCE_OPEN_INDEX, expectedStatusCodes);
}
/**
* Test case scenario:
* - Initially the circuit is healthy and functioning normally.
* - Backend service becomes unavailable and eventually, the failure threshold is exceeded.
* - After that circuit will be force fully closed.
* - Afterward success responses should received.
*/
@Test(description = "Verify the functionality of circuit breaker force close implementation")
public void testCBForceCloseScenario() {
int[] expectedStatusCodes = new int[] { 200, 200, 500, 200, 200, 200, 200, 200 };
BValue[] returnVals = BRunUtil.invoke(compileResult, "testForceCloseScenario");
Assert.assertEquals(returnVals.length, 2);
BRefValueArray responses = (BRefValueArray) returnVals[0];
for (int i = 0; i < responses.size(); i++) {
BMap<String, BValue> res = (BMap<String, BValue>) responses.get(i);
long statusCode = ((BInteger) res.get(STATUS_CODE_FIELD)).intValue();
Assert.assertEquals(statusCode, expectedStatusCodes[i], "Status code does not match.");
}
}
/**
* Test case scenario:
* - Circuit Breaker configured with requestVolumeThreshold.
* - Circuit Breaker shouldn't interact with circuit state until the configured threshold exceeded.
*/
@Test(description = "Verify the functionality of circuit breaker request volume threshold implementation")
public void testCBRequestVolumeThresholdSuccessResponseScenario() {
int[] expectedStatusCodes = new int[]{200, 200, 200, 200, 200, 200};
BValue[] returnVals = BRunUtil.invoke(compileResult, "testRequestVolumeThresholdSuccessResponseScenario");
Assert.assertEquals(returnVals.length, 2);
BRefValueArray responses = (BRefValueArray) returnVals[0];
for (int i = 0; i < responses.size(); i++) {
BMap<String, BValue> res = (BMap<String, BValue>) responses.get(i);
long statusCode = ((BInteger) res.get(STATUS_CODE_FIELD)).intValue();
Assert.assertEquals(statusCode, expectedStatusCodes[i], "Status code does not match.");
}
}
/**
* Test case scenario:
* - Circuit Breaker configured with requestVolumeThreshold.
* - Circuit Breaker shouldn't interact with circuit state until the configured threshold exceeded.
*/
@Test(description = "Verify the functionality of circuit breaker request volume threshold implementation")
public void testCBRequestVolumeThresholdFailureResponseScenario() {
int[] expectedStatusCodes = new int[]{500, 500, 500, 500, 500, 500};
BValue[] returnVals = BRunUtil.invoke(compileResult, "testRequestVolumeThresholdFailureResponseScenario");
Assert.assertEquals(returnVals.length, 2);
BRefValueArray responses = (BRefValueArray) returnVals[0];
for (int i = 0; i < responses.size(); i++) {
BMap<String, BValue> res = (BMap<String, BValue>) responses.get(i);
long statusCode = ((BInteger) res.get(STATUS_CODE_FIELD)).intValue();
Assert.assertEquals(statusCode, expectedStatusCodes[i], "Status code does not match.");
}
}
@Test(description = "Test the getCurrentState function of circuit breaker")
public void testCBGetCurrentStatausScenario() {
String value = "Circuit Breaker is in CLOSED state";
String path = "/cb/getState";
HTTPTestRequest inRequestMsg = MessageUtils.generateHTTPMessage(path, HttpConstants.HTTP_METHOD_GET);
HttpCarbonMessage responseMsg = Services.invokeNew(serviceResult, MOCK_ENDPOINT_NAME, inRequestMsg);
Assert.assertNotNull(responseMsg, "Response message not found");
Assert.assertEquals(
StringUtils.getStringFromInputStream(new HttpMessageDataStreamer(responseMsg).getInputStream()), value);
}
private void validateCBResponses(BRefValueArray responses, BRefValueArray errors,
int index, int[] expectedStatusCodes) {
for (int i = 0; i < responses.size(); i++) {
long statusCode;
if (i < CB_CLIENT_FORCE_OPEN_INDEX) {
BMap<String, BValue> res = (BMap<String, BValue>) responses.get(i);
statusCode = ((BInteger) res.get(STATUS_CODE_FIELD)).intValue();
Assert.assertEquals(statusCode, expectedStatusCodes[i], "Status code does not match.");
} else {
Assert.assertNotNull(errors.get(i));
BMap<String, BValue> err = (BMap<String, BValue>) errors.get(i);
String msg = err.get(BLangVMErrors.ERROR_MESSAGE_FIELD).stringValue();
Assert.assertTrue(msg != null && msg.startsWith(CB_ERROR_MSG),
"Invalid error message from circuit breaker.");
}
}
}
} | class CircuitBreakerTest {
private static final String CB_ERROR_MSG = "Upstream service unavailable.";
private static final String MOCK_ENDPOINT_NAME = "mockEP";
private static final int CB_CLIENT_FIRST_ERROR_INDEX = 3;
private static final int CB_CLIENT_SECOND_ERROR_INDEX = 4;
private static final int CB_CLIENT_TOP_MOST_SUCCESS_INDEX = 2;
private static final int CB_CLIENT_FAILURE_CASE_ERROR_INDEX = 5;
private static final int CB_CLIENT_FORCE_OPEN_INDEX = 4;
private static final String STATUS_CODE_FIELD = "statusCode";
private CompileResult compileResult, serviceResult;
@BeforeClass
public void setup() {
String sourceFilePath = "test-src/net/http/resiliency/circuit-breaker-test.bal";
compileResult = BCompileUtil.compile(sourceFilePath);
serviceResult = BServiceUtil.setupProgramFile(this, sourceFilePath);
}
/**
* Test case for a typical scenario where an upstream service may become unavailable temporarily.
*/
@Test
/**
* Test case scenario:
* - Initially the circuit is healthy and functioning normally.
* - Backend service becomes unavailable and eventually, the failure threshold is exceeded.
* - Requests afterwards are immediately failed, with a 503 response.
* - After the reset timeout expires, the circuit goes to HALF_OPEN state and a trial request is sent.
* - The backend service is not available and therefore, the request fails again and the circuit goes back to OPEN.
*/
@Test
public void testTrialRunFailure() {
int[] expectedStatusCodes = new int[] { 200, 500, 503, 500, 503, 500 };
BValue[] returnVals = BRunUtil.invoke(compileResult, "testTrialRunFailure");
Assert.assertEquals(returnVals.length, 2);
BRefValueArray responses = (BRefValueArray) returnVals[0];
BRefValueArray errors = (BRefValueArray) returnVals[1];
for (int i = 0; i < responses.size(); i++) {
long statusCode;
if (i < CB_CLIENT_TOP_MOST_SUCCESS_INDEX || i == CB_CLIENT_FAILURE_CASE_ERROR_INDEX) {
BMap<String, BValue> res = (BMap<String, BValue>) responses.get(i);
statusCode = ((BInteger) res.get(STATUS_CODE_FIELD)).intValue();
Assert.assertEquals(statusCode, expectedStatusCodes[i], "Status code does not match.");
} else {
Assert.assertNotNull(errors.get(i));
BError error = (BError) errors.get(i);
String msg = error.getReason();
Assert.assertTrue(msg != null && msg.startsWith(CB_ERROR_MSG),
"Invalid error message from circuit breaker.");
}
}
}
/**
* Test case scenario:
* - Initially the circuit is healthy and functioning normally.
* - Backend service respond with HTTP status code configured to consider as failures responses.
* eventually the failure threshold is exceeded.
* - Requests afterwards are immediately failed, with a 503 response.
* - After the reset timeout expires, the circuit goes to HALF_OPEN state and a trial request is sent.
* - The backend service is not available and therefore, the request fails again and the circuit goes back to OPEN.
*/
@Test(description = "Test case for Circuit Breaker HTTP status codes.")
public void testHttpStatusCodeFailure() {
int[] expectedStatusCodes = new int[] { 200, 500, 503, 500, 503, 503 };
BValue[] returnVals = BRunUtil.invoke(compileResult, "testHttpStatusCodeFailure");
Assert.assertEquals(returnVals.length, 2);
BRefValueArray responses = (BRefValueArray) returnVals[0];
BRefValueArray errs = (BRefValueArray) returnVals[1];
validateCBResponses(responses, errs, CB_CLIENT_TOP_MOST_SUCCESS_INDEX, expectedStatusCodes);
}
/**
* Test case scenario:
* - Initially the circuit is healthy and functioning normally.
* - during the middle of execution circuit will be force fully opened.
* - Afterward requests should immediately fail.
*/
@Test(description = "Verify the functionality of circuit breaker force open implementation")
public void testCBForceOpenScenario() {
int[] expectedStatusCodes = new int[] { 200, 200, 200, 200, 503, 503, 503, 503 };
BValue[] returnVals = BRunUtil.invoke(compileResult, "testForceOpenScenario");
Assert.assertEquals(returnVals.length, 2);
BRefValueArray responses = (BRefValueArray) returnVals[0];
BRefValueArray errs = (BRefValueArray) returnVals[1];
validateCBResponses(responses, errs, CB_CLIENT_FORCE_OPEN_INDEX, expectedStatusCodes);
}
/**
* Test case scenario:
* - Initially the circuit is healthy and functioning normally.
* - Backend service becomes unavailable and eventually, the failure threshold is exceeded.
* - After that circuit will be force fully closed.
* - Afterward success responses should received.
*/
@Test(description = "Verify the functionality of circuit breaker force close implementation")
public void testCBForceCloseScenario() {
int[] expectedStatusCodes = new int[] { 200, 200, 500, 200, 200, 200, 200, 200 };
BValue[] returnVals = BRunUtil.invoke(compileResult, "testForceCloseScenario");
Assert.assertEquals(returnVals.length, 2);
BRefValueArray responses = (BRefValueArray) returnVals[0];
for (int i = 0; i < responses.size(); i++) {
BMap<String, BValue> res = (BMap<String, BValue>) responses.get(i);
long statusCode = ((BInteger) res.get(STATUS_CODE_FIELD)).intValue();
Assert.assertEquals(statusCode, expectedStatusCodes[i], "Status code does not match.");
}
}
/**
* Test case scenario:
* - Circuit Breaker configured with requestVolumeThreshold.
* - Circuit Breaker shouldn't interact with circuit state until the configured threshold exceeded.
*/
@Test(description = "Verify the functionality of circuit breaker request volume threshold implementation")
public void testCBRequestVolumeThresholdSuccessResponseScenario() {
int[] expectedStatusCodes = new int[]{200, 200, 200, 200, 200, 200};
BValue[] returnVals = BRunUtil.invoke(compileResult, "testRequestVolumeThresholdSuccessResponseScenario");
Assert.assertEquals(returnVals.length, 2);
BRefValueArray responses = (BRefValueArray) returnVals[0];
for (int i = 0; i < responses.size(); i++) {
BMap<String, BValue> res = (BMap<String, BValue>) responses.get(i);
long statusCode = ((BInteger) res.get(STATUS_CODE_FIELD)).intValue();
Assert.assertEquals(statusCode, expectedStatusCodes[i], "Status code does not match.");
}
}
/**
* Test case scenario:
* - Circuit Breaker configured with requestVolumeThreshold.
* - Circuit Breaker shouldn't interact with circuit state until the configured threshold exceeded.
*/
@Test(description = "Verify the functionality of circuit breaker request volume threshold implementation")
public void testCBRequestVolumeThresholdFailureResponseScenario() {
int[] expectedStatusCodes = new int[]{500, 500, 500, 500, 500, 500};
BValue[] returnVals = BRunUtil.invoke(compileResult, "testRequestVolumeThresholdFailureResponseScenario");
Assert.assertEquals(returnVals.length, 2);
BRefValueArray responses = (BRefValueArray) returnVals[0];
for (int i = 0; i < responses.size(); i++) {
BMap<String, BValue> res = (BMap<String, BValue>) responses.get(i);
long statusCode = ((BInteger) res.get(STATUS_CODE_FIELD)).intValue();
Assert.assertEquals(statusCode, expectedStatusCodes[i], "Status code does not match.");
}
}
@Test(description = "Test the getCurrentState function of circuit breaker")
public void testCBGetCurrentStatausScenario() {
String value = "Circuit Breaker is in CLOSED state";
String path = "/cb/getState";
HTTPTestRequest inRequestMsg = MessageUtils.generateHTTPMessage(path, HttpConstants.HTTP_METHOD_GET);
HttpCarbonMessage responseMsg = Services.invokeNew(serviceResult, MOCK_ENDPOINT_NAME, inRequestMsg);
Assert.assertNotNull(responseMsg, "Response message not found");
Assert.assertEquals(
StringUtils.getStringFromInputStream(new HttpMessageDataStreamer(responseMsg).getInputStream()), value);
}
private void validateCBResponses(BRefValueArray responses, BRefValueArray errors,
int index, int[] expectedStatusCodes) {
for (int i = 0; i < responses.size(); i++) {
long statusCode;
if (i < CB_CLIENT_FORCE_OPEN_INDEX) {
BMap<String, BValue> res = (BMap<String, BValue>) responses.get(i);
statusCode = ((BInteger) res.get(STATUS_CODE_FIELD)).intValue();
Assert.assertEquals(statusCode, expectedStatusCodes[i], "Status code does not match.");
} else {
Assert.assertNotNull(errors.get(i));
BMap<String, BValue> err = (BMap<String, BValue>) errors.get(i);
String msg = err.get(BLangVMErrors.ERROR_MESSAGE_FIELD).stringValue();
Assert.assertTrue(msg != null && msg.startsWith(CB_ERROR_MSG),
"Invalid error message from circuit breaker.");
}
}
}
} |
```suggestion assertThat(group.getScopeComponents()).containsExactly("constant", "host", "foo", "host"); ``` | void testGenerateScopeCustom() throws Exception {
Configuration cfg = new Configuration();
cfg.setString(MetricOptions.SCOPE_NAMING_TM, "constant.<host>.foo.<host>");
MetricRegistryImpl registry =
new MetricRegistryImpl(MetricRegistryTestUtils.fromConfiguration(cfg));
TaskManagerMetricGroup group =
TaskManagerMetricGroup.createTaskManagerMetricGroup(
registry, "host", new ResourceID("id"));
assertThat(group.getScopeComponents()).containsAnyOf("constant", "host", "foo", "host");
assertThat(group.getMetricIdentifier("name")).isEqualTo("constant.host.foo.host.name");
registry.closeAsync().get();
} | assertThat(group.getScopeComponents()).containsAnyOf("constant", "host", "foo", "host"); | void testGenerateScopeCustom() throws Exception {
Configuration cfg = new Configuration();
cfg.setString(MetricOptions.SCOPE_NAMING_TM, "constant.<host>.foo.<host>");
MetricRegistryImpl registry =
new MetricRegistryImpl(MetricRegistryTestUtils.fromConfiguration(cfg));
TaskManagerMetricGroup group =
TaskManagerMetricGroup.createTaskManagerMetricGroup(
registry, "host", new ResourceID("id"));
assertThat(group.getScopeComponents()).containsExactly("constant", "host", "foo", "host");
assertThat(group.getMetricIdentifier("name")).isEqualTo("constant.host.foo.host.name");
registry.closeAsync().get();
} | class TaskManagerGroupTest {
private MetricRegistryImpl registry;
@BeforeEach
void setup() {
registry =
new MetricRegistryImpl(
MetricRegistryTestUtils.defaultMetricRegistryConfiguration());
}
@AfterEach
void teardown() throws Exception {
if (registry != null) {
registry.closeAsync().get();
}
}
@Test
void addAndRemoveJobs() {
final TaskManagerMetricGroup group =
TaskManagerMetricGroup.createTaskManagerMetricGroup(
registry, "localhost", ResourceID.generate());
final JobID jid1 = new JobID();
final JobID jid2 = new JobID();
final String jobName1 = "testjob";
final String jobName2 = "anotherJob";
final JobVertexID vertex11 = new JobVertexID();
final JobVertexID vertex12 = new JobVertexID();
final JobVertexID vertex13 = new JobVertexID();
final JobVertexID vertex21 = new JobVertexID();
final ExecutionAttemptID execution11 = createExecutionAttemptId(vertex11, 17, 0);
final ExecutionAttemptID execution12 = createExecutionAttemptId(vertex12, 13, 1);
final ExecutionAttemptID execution13 = createExecutionAttemptId(vertex13, 0, 0);
final ExecutionAttemptID execution21 = createExecutionAttemptId(vertex21, 7, 2);
TaskMetricGroup tmGroup11 = group.addJob(jid1, jobName1).addTask(execution11, "test");
TaskMetricGroup tmGroup12 = group.addJob(jid1, jobName1).addTask(execution12, "test");
TaskMetricGroup tmGroup21 = group.addJob(jid2, jobName2).addTask(execution21, "test");
assertThat(group.numRegisteredJobMetricGroups()).isEqualTo(2);
assertThat(tmGroup11.parent().isClosed()).isFalse();
assertThat(tmGroup12.parent().isClosed()).isFalse();
assertThat(tmGroup21.parent().isClosed()).isFalse();
tmGroup11.close();
tmGroup21.close();
assertThat(tmGroup11.isClosed()).isTrue();
assertThat(tmGroup21.isClosed()).isTrue();
assertThat(tmGroup11.parent().isClosed()).isFalse();
assertThat(tmGroup12.parent().isClosed()).isFalse();
assertThat(tmGroup21.parent().isClosed()).isFalse();
assertThat(group.numRegisteredJobMetricGroups()).isEqualTo(2);
TaskMetricGroup tmGroup13 = group.addJob(jid1, jobName1).addTask(execution13, "test");
assertThat(tmGroup11.parent())
.isSameAs(tmGroup13.parent());
tmGroup12.close();
tmGroup13.close();
}
@Test
void testCloseClosesAll() {
final TaskManagerMetricGroup group =
TaskManagerMetricGroup.createTaskManagerMetricGroup(
registry, "localhost", new ResourceID(new AbstractID().toString()));
final JobID jid1 = new JobID();
final JobID jid2 = new JobID();
final String jobName1 = "testjob";
final String jobName2 = "anotherJob";
final JobVertexID vertex11 = new JobVertexID();
final JobVertexID vertex12 = new JobVertexID();
final JobVertexID vertex21 = new JobVertexID();
final ExecutionAttemptID execution11 = createExecutionAttemptId(vertex11, 17, 0);
final ExecutionAttemptID execution12 = createExecutionAttemptId(vertex12, 13, 1);
final ExecutionAttemptID execution21 = createExecutionAttemptId(vertex21, 7, 1);
TaskMetricGroup tmGroup11 = group.addJob(jid1, jobName1).addTask(execution11, "test");
TaskMetricGroup tmGroup12 = group.addJob(jid1, jobName1).addTask(execution12, "test");
TaskMetricGroup tmGroup21 = group.addJob(jid2, jobName2).addTask(execution21, "test");
group.close();
assertThat(tmGroup11.isClosed()).isTrue();
assertThat(tmGroup12.isClosed()).isTrue();
assertThat(tmGroup21.isClosed()).isTrue();
}
@Test
void testGenerateScopeDefault() {
TaskManagerMetricGroup group =
TaskManagerMetricGroup.createTaskManagerMetricGroup(
registry, "localhost", new ResourceID("id"));
assertThat(group.getScopeComponents()).containsAnyOf("localhost", "taskmanager", "id");
assertThat(group.getMetricIdentifier("name")).isEqualTo("localhost.taskmanager.id.name");
}
@Test
@Test
void testCreateQueryServiceMetricInfo() {
TaskManagerMetricGroup tm =
TaskManagerMetricGroup.createTaskManagerMetricGroup(
registry, "host", new ResourceID("id"));
QueryScopeInfo.TaskManagerQueryScopeInfo info =
tm.createQueryServiceMetricInfo(new DummyCharacterFilter());
assertThat(info.scope).isEmpty();
assertThat(info.taskManagerID).isEqualTo("id");
}
} | class TaskManagerGroupTest {
private MetricRegistryImpl registry;
@BeforeEach
void setup() {
registry =
new MetricRegistryImpl(
MetricRegistryTestUtils.defaultMetricRegistryConfiguration());
}
@AfterEach
void teardown() throws Exception {
if (registry != null) {
registry.closeAsync().get();
}
}
@Test
void addAndRemoveJobs() {
final TaskManagerMetricGroup group =
TaskManagerMetricGroup.createTaskManagerMetricGroup(
registry, "localhost", ResourceID.generate());
final JobID jid1 = new JobID();
final JobID jid2 = new JobID();
final String jobName1 = "testjob";
final String jobName2 = "anotherJob";
final JobVertexID vertex11 = new JobVertexID();
final JobVertexID vertex12 = new JobVertexID();
final JobVertexID vertex13 = new JobVertexID();
final JobVertexID vertex21 = new JobVertexID();
final ExecutionAttemptID execution11 = createExecutionAttemptId(vertex11, 17, 0);
final ExecutionAttemptID execution12 = createExecutionAttemptId(vertex12, 13, 1);
final ExecutionAttemptID execution13 = createExecutionAttemptId(vertex13, 0, 0);
final ExecutionAttemptID execution21 = createExecutionAttemptId(vertex21, 7, 2);
TaskMetricGroup tmGroup11 = group.addJob(jid1, jobName1).addTask(execution11, "test");
TaskMetricGroup tmGroup12 = group.addJob(jid1, jobName1).addTask(execution12, "test");
TaskMetricGroup tmGroup21 = group.addJob(jid2, jobName2).addTask(execution21, "test");
assertThat(group.numRegisteredJobMetricGroups()).isEqualTo(2);
assertThat(tmGroup11.parent().isClosed()).isFalse();
assertThat(tmGroup12.parent().isClosed()).isFalse();
assertThat(tmGroup21.parent().isClosed()).isFalse();
tmGroup11.close();
tmGroup21.close();
assertThat(tmGroup11.isClosed()).isTrue();
assertThat(tmGroup21.isClosed()).isTrue();
assertThat(tmGroup11.parent().isClosed()).isFalse();
assertThat(tmGroup12.parent().isClosed()).isFalse();
assertThat(tmGroup21.parent().isClosed()).isFalse();
assertThat(group.numRegisteredJobMetricGroups()).isEqualTo(2);
TaskMetricGroup tmGroup13 = group.addJob(jid1, jobName1).addTask(execution13, "test");
assertThat(tmGroup11.parent())
.isSameAs(tmGroup13.parent());
tmGroup12.close();
tmGroup13.close();
}
@Test
void testCloseClosesAll() {
final TaskManagerMetricGroup group =
TaskManagerMetricGroup.createTaskManagerMetricGroup(
registry, "localhost", new ResourceID(new AbstractID().toString()));
final JobID jid1 = new JobID();
final JobID jid2 = new JobID();
final String jobName1 = "testjob";
final String jobName2 = "anotherJob";
final JobVertexID vertex11 = new JobVertexID();
final JobVertexID vertex12 = new JobVertexID();
final JobVertexID vertex21 = new JobVertexID();
final ExecutionAttemptID execution11 = createExecutionAttemptId(vertex11, 17, 0);
final ExecutionAttemptID execution12 = createExecutionAttemptId(vertex12, 13, 1);
final ExecutionAttemptID execution21 = createExecutionAttemptId(vertex21, 7, 1);
TaskMetricGroup tmGroup11 = group.addJob(jid1, jobName1).addTask(execution11, "test");
TaskMetricGroup tmGroup12 = group.addJob(jid1, jobName1).addTask(execution12, "test");
TaskMetricGroup tmGroup21 = group.addJob(jid2, jobName2).addTask(execution21, "test");
group.close();
assertThat(tmGroup11.isClosed()).isTrue();
assertThat(tmGroup12.isClosed()).isTrue();
assertThat(tmGroup21.isClosed()).isTrue();
}
@Test
void testGenerateScopeDefault() {
TaskManagerMetricGroup group =
TaskManagerMetricGroup.createTaskManagerMetricGroup(
registry, "localhost", new ResourceID("id"));
assertThat(group.getScopeComponents()).containsExactly("localhost", "taskmanager", "id");
assertThat(group.getMetricIdentifier("name")).isEqualTo("localhost.taskmanager.id.name");
}
@Test
@Test
void testCreateQueryServiceMetricInfo() {
TaskManagerMetricGroup tm =
TaskManagerMetricGroup.createTaskManagerMetricGroup(
registry, "host", new ResourceID("id"));
QueryScopeInfo.TaskManagerQueryScopeInfo info =
tm.createQueryServiceMetricInfo(new DummyCharacterFilter());
assertThat(info.scope).isEmpty();
assertThat(info.taskManagerID).isEqualTo("id");
}
} |
This seems like it should be a subtype check. What was wrong with the prior code? | private static void verifySplittableMethods(DoFnSignature signature, ErrorReporter errors) {
DoFnSignature.ProcessElementMethod processElement = signature.processElement();
DoFnSignature.GetInitialRestrictionMethod getInitialRestriction =
signature.getInitialRestriction();
DoFnSignature.NewTrackerMethod newTracker = signature.newTracker();
DoFnSignature.GetRestrictionCoderMethod getRestrictionCoder = signature.getRestrictionCoder();
DoFnSignature.SplitRestrictionMethod splitRestriction = signature.splitRestriction();
ErrorReporter processElementErrors =
errors.forMethod(DoFn.ProcessElement.class, processElement.targetMethod());
List<String> missingRequiredMethods = new ArrayList<>();
if (getInitialRestriction == null) {
missingRequiredMethods.add("@" + DoFn.GetInitialRestriction.class.getSimpleName());
}
if (newTracker == null) {
if (getInitialRestriction != null
&& getInitialRestriction
.restrictionT()
.isSubtypeOf(TypeDescriptor.of(HasDefaultTracker.class))) {
} else {
missingRequiredMethods.add("@" + DoFn.NewTracker.class.getSimpleName());
}
} else {
ErrorReporter getInitialRestrictionErrors =
errors.forMethod(DoFn.GetInitialRestriction.class, getInitialRestriction.targetMethod());
TypeDescriptor<?> restrictionT = getInitialRestriction.restrictionT();
getInitialRestrictionErrors.checkArgument(
restrictionT.equals(newTracker.restrictionT()),
"Uses restriction type %s, but @%s method %s uses restriction type %s",
formatType(restrictionT),
DoFn.NewTracker.class.getSimpleName(),
format(newTracker.targetMethod()),
formatType(newTracker.restrictionT()));
}
if (!missingRequiredMethods.isEmpty()) {
processElementErrors.throwIllegalArgument(
"Splittable, but does not define the following required methods: %s",
missingRequiredMethods);
}
ErrorReporter getInitialRestrictionErrors =
errors.forMethod(DoFn.GetInitialRestriction.class, getInitialRestriction.targetMethod());
TypeDescriptor<?> restrictionT = getInitialRestriction.restrictionT();
processElementErrors.checkArgument(
processElement.trackerT().getRawType().equals(RestrictionTracker.class),
"Has tracker type %s, but the DoFn's tracker type must be of type RestrictionTracker.",
formatType(processElement.trackerT()));
if (getRestrictionCoder != null) {
getInitialRestrictionErrors.checkArgument(
getRestrictionCoder.coderT().isSubtypeOf(coderTypeOf(restrictionT)),
"Uses restriction type %s, but @%s method %s returns %s "
+ "which is not a subtype of %s",
formatType(restrictionT),
DoFn.GetRestrictionCoder.class.getSimpleName(),
format(getRestrictionCoder.targetMethod()),
formatType(getRestrictionCoder.coderT()),
formatType(coderTypeOf(restrictionT)));
}
if (splitRestriction != null) {
getInitialRestrictionErrors.checkArgument(
splitRestriction.restrictionT().equals(restrictionT),
"Uses restriction type %s, but @%s method %s uses restriction type %s",
formatType(restrictionT),
DoFn.SplitRestriction.class.getSimpleName(),
format(splitRestriction.targetMethod()),
formatType(splitRestriction.restrictionT()));
}
} | processElement.trackerT().getRawType().equals(RestrictionTracker.class), | private static void verifySplittableMethods(DoFnSignature signature, ErrorReporter errors) {
DoFnSignature.ProcessElementMethod processElement = signature.processElement();
DoFnSignature.GetInitialRestrictionMethod getInitialRestriction =
signature.getInitialRestriction();
DoFnSignature.NewTrackerMethod newTracker = signature.newTracker();
DoFnSignature.GetRestrictionCoderMethod getRestrictionCoder = signature.getRestrictionCoder();
DoFnSignature.SplitRestrictionMethod splitRestriction = signature.splitRestriction();
ErrorReporter processElementErrors =
errors.forMethod(DoFn.ProcessElement.class, processElement.targetMethod());
List<String> missingRequiredMethods = new ArrayList<>();
if (getInitialRestriction == null) {
missingRequiredMethods.add("@" + DoFn.GetInitialRestriction.class.getSimpleName());
}
if (newTracker == null) {
if (getInitialRestriction != null
&& getInitialRestriction
.restrictionT()
.isSubtypeOf(TypeDescriptor.of(HasDefaultTracker.class))) {
} else {
missingRequiredMethods.add("@" + DoFn.NewTracker.class.getSimpleName());
}
} else {
ErrorReporter getInitialRestrictionErrors =
errors.forMethod(DoFn.GetInitialRestriction.class, getInitialRestriction.targetMethod());
TypeDescriptor<?> restrictionT = getInitialRestriction.restrictionT();
getInitialRestrictionErrors.checkArgument(
restrictionT.equals(newTracker.restrictionT()),
"Uses restriction type %s, but @%s method %s uses restriction type %s",
formatType(restrictionT),
DoFn.NewTracker.class.getSimpleName(),
format(newTracker.targetMethod()),
formatType(newTracker.restrictionT()));
}
if (!missingRequiredMethods.isEmpty()) {
processElementErrors.throwIllegalArgument(
"Splittable, but does not define the following required methods: %s",
missingRequiredMethods);
}
ErrorReporter getInitialRestrictionErrors =
errors.forMethod(DoFn.GetInitialRestriction.class, getInitialRestriction.targetMethod());
TypeDescriptor<?> restrictionT = getInitialRestriction.restrictionT();
processElementErrors.checkArgument(
processElement.trackerT().getRawType().equals(RestrictionTracker.class),
"Has tracker type %s, but the DoFn's tracker type must be of type RestrictionTracker.",
formatType(processElement.trackerT()));
if (getRestrictionCoder != null) {
getInitialRestrictionErrors.checkArgument(
getRestrictionCoder.coderT().isSubtypeOf(coderTypeOf(restrictionT)),
"Uses restriction type %s, but @%s method %s returns %s "
+ "which is not a subtype of %s",
formatType(restrictionT),
DoFn.GetRestrictionCoder.class.getSimpleName(),
format(getRestrictionCoder.targetMethod()),
formatType(getRestrictionCoder.coderT()),
formatType(coderTypeOf(restrictionT)));
}
if (splitRestriction != null) {
getInitialRestrictionErrors.checkArgument(
splitRestriction.restrictionT().equals(restrictionT),
"Uses restriction type %s, but @%s method %s uses restriction type %s",
formatType(restrictionT),
DoFn.SplitRestriction.class.getSimpleName(),
format(splitRestriction.targetMethod()),
formatType(splitRestriction.restrictionT()));
}
} | class %s."
+ " Timer callbacks must be declared in the same lexical scope as their timer",
onTimerMethod,
id,
timerDecl.field().getDeclaringClass().getCanonicalName());
onTimerMethodMap.put(
id, analyzeOnTimerMethod(errors, fnT, onTimerMethod, id, inputT, outputT, fnContext));
}
signatureBuilder.setOnTimerMethods(onTimerMethodMap);
for (TimerDeclaration decl : fnContext.getTimerDeclarations().values()) {
errors.checkArgument(
onTimerMethodMap.containsKey(decl.id()),
"No callback registered via %s for timer %s",
DoFn.OnTimer.class.getSimpleName(),
decl.id());
} | class %s."
+ " Timer callbacks must be declared in the same lexical scope as their timer",
onTimerMethod,
id,
timerDecl.field().getDeclaringClass().getCanonicalName());
onTimerMethodMap.put(
id, analyzeOnTimerMethod(errors, fnT, onTimerMethod, id, inputT, outputT, fnContext));
}
signatureBuilder.setOnTimerMethods(onTimerMethodMap);
for (TimerDeclaration decl : fnContext.getTimerDeclarations().values()) {
errors.checkArgument(
onTimerMethodMap.containsKey(decl.id()),
"No callback registered via %s for timer %s",
DoFn.OnTimer.class.getSimpleName(),
decl.id());
} |
Shouldn't we suspend the `mailboxProcessor` here? It was being done before when the future completed. | protected void processInput(MailboxDefaultAction.Controller controller) throws Exception {
controller.suspendDefaultAction();
sourceThread.setTaskDescription(getName());
if (operatorChain.isFinishedOnRestore()) {
LOG.debug(
"Legacy source {} skip execution since the task is finished on restore",
getTaskNameWithSubtaskAndId());
sourceThread.getCompletionFuture().complete(null);
} else {
sourceThread.start();
}
sourceThread
.getCompletionFuture()
.whenComplete(
(Void ignore, Throwable sourceThreadThrowable) -> {
if (isCanceled()
&& ExceptionUtils.findThrowable(
sourceThreadThrowable,
InterruptedException.class)
.isPresent()) {
mailboxProcessor.reportThrowable(
new CancelTaskException(sourceThreadThrowable));
} else if (!wasStoppedExternally && sourceThreadThrowable != null) {
mailboxProcessor.reportThrowable(sourceThreadThrowable);
} else {
mailboxProcessor.suspend();
}
});
} | sourceThread.getCompletionFuture().complete(null); | protected void processInput(MailboxDefaultAction.Controller controller) throws Exception {
controller.suspendDefaultAction();
sourceThread.setTaskDescription(getName());
if (operatorChain.isFinishedOnRestore()) {
LOG.debug(
"Legacy source {} skip execution since the task is finished on restore",
getTaskNameWithSubtaskAndId());
sourceThread.getCompletionFuture().complete(null);
} else {
sourceThread.start();
}
sourceThread
.getCompletionFuture()
.whenComplete(
(Void ignore, Throwable sourceThreadThrowable) -> {
if (isCanceled()
&& ExceptionUtils.findThrowable(
sourceThreadThrowable,
InterruptedException.class)
.isPresent()) {
mailboxProcessor.reportThrowable(
new CancelTaskException(sourceThreadThrowable));
} else if (!wasStoppedExternally && sourceThreadThrowable != null) {
mailboxProcessor.reportThrowable(sourceThreadThrowable);
} else {
mailboxProcessor.suspend();
}
});
} | class SourceStreamTask<
OUT, SRC extends SourceFunction<OUT>, OP extends StreamSource<OUT, SRC>>
extends StreamTask<OUT, OP> {
private final LegacySourceFunctionThread sourceThread;
private final Object lock;
private volatile boolean externallyInducedCheckpoints;
/**
* Indicates whether this Task was purposefully finished (by finishTask()), in this case we want
* to ignore exceptions thrown after finishing, to ensure shutdown works smoothly.
*/
private volatile boolean wasStoppedExternally = false;
public SourceStreamTask(Environment env) throws Exception {
this(env, new Object());
}
private SourceStreamTask(Environment env, Object lock) throws Exception {
super(
env,
null,
FatalExitExceptionHandler.INSTANCE,
StreamTaskActionExecutor.synchronizedExecutor(lock));
this.lock = Preconditions.checkNotNull(lock);
this.sourceThread = new LegacySourceFunctionThread();
getEnvironment().getMetricGroup().getIOMetricGroup().setEnableBusyTime(false);
}
@Override
protected void init() {
SourceFunction<?> source = mainOperator.getUserFunction();
if (source instanceof ExternallyInducedSource) {
externallyInducedCheckpoints = true;
ExternallyInducedSource.CheckpointTrigger triggerHook =
new ExternallyInducedSource.CheckpointTrigger() {
@Override
public void triggerCheckpoint(long checkpointId) throws FlinkException {
final CheckpointOptions checkpointOptions =
CheckpointOptions.forConfig(
CheckpointType.CHECKPOINT,
CheckpointStorageLocationReference.getDefault(),
configuration.isExactlyOnceCheckpointMode(),
configuration.isUnalignedCheckpointsEnabled(),
configuration.getAlignedCheckpointTimeout().toMillis());
final long timestamp = System.currentTimeMillis();
final CheckpointMetaData checkpointMetaData =
new CheckpointMetaData(checkpointId, timestamp, timestamp);
try {
SourceStreamTask.super
.triggerCheckpointAsync(
checkpointMetaData, checkpointOptions)
.get();
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new FlinkException(e.getMessage(), e);
}
}
};
((ExternallyInducedSource<?, ?>) source).setCheckpointTrigger(triggerHook);
}
getEnvironment()
.getMetricGroup()
.getIOMetricGroup()
.gauge(
MetricNames.CHECKPOINT_START_DELAY_TIME,
this::getAsyncCheckpointStartDelayNanos);
}
@Override
protected void advanceToEndOfEventTime() throws Exception {
mainOperator.advanceToEndOfEventTime();
}
@Override
protected void cleanup() {
}
@Override
@Override
protected void cleanUpInvoke() throws Exception {
if (isFailing()) {
interruptSourceThread(true);
}
super.cleanUpInvoke();
}
@Override
protected void cancelTask() {
cancelTask(true);
}
@Override
protected void finishTask() {
wasStoppedExternally = true;
/**
* Currently stop with savepoint relies on the EndOfPartitionEvents propagation and performs
* clean shutdown after the stop with savepoint (which can produce some records to process
* after the savepoint while stopping). If we interrupt source thread, we might leave the
* network stack in an inconsistent state. So, if we want to relay on the clean shutdown, we
* can not interrupt the source thread.
*/
cancelTask(false);
}
private void cancelTask(boolean interrupt) {
try {
if (mainOperator != null) {
mainOperator.cancel();
}
} finally {
interruptSourceThread(interrupt);
}
}
private void interruptSourceThread(boolean interrupt) {
if (operatorChain.isFinishedOnRestore()) {
return;
}
if (sourceThread.isAlive()) {
if (interrupt) {
sourceThread.interrupt();
}
} else if (!sourceThread.getCompletionFuture().isDone()) {
sourceThread.getCompletionFuture().complete(null);
}
}
@Override
protected CompletableFuture<Void> getCompletionFuture() {
return sourceThread.getCompletionFuture();
}
@Override
public Future<Boolean> triggerCheckpointAsync(
CheckpointMetaData checkpointMetaData, CheckpointOptions checkpointOptions) {
if (!externallyInducedCheckpoints) {
return super.triggerCheckpointAsync(checkpointMetaData, checkpointOptions);
} else {
synchronized (lock) {
return CompletableFuture.completedFuture(isRunning());
}
}
}
@Override
protected void declineCheckpoint(long checkpointId) {
if (!externallyInducedCheckpoints) {
super.declineCheckpoint(checkpointId);
}
}
/** Runnable that executes the the source function in the head operator. */
private class LegacySourceFunctionThread extends Thread {
private final CompletableFuture<Void> completionFuture;
LegacySourceFunctionThread() {
this.completionFuture = new CompletableFuture<>();
}
@Override
public void run() {
try {
mainOperator.run(lock, operatorChain);
if (!wasStoppedExternally && !isCanceled()) {
synchronized (lock) {
operatorChain.setIgnoreEndOfInput(false);
}
}
completionFuture.complete(null);
} catch (Throwable t) {
completionFuture.completeExceptionally(t);
}
}
public void setTaskDescription(final String taskDescription) {
setName("Legacy Source Thread - " + taskDescription);
}
/**
* @return future that is completed once this thread completes. If this task {@link
*
* completed future.
*/
CompletableFuture<Void> getCompletionFuture() {
return isFailing() && !isAlive()
? CompletableFuture.completedFuture(null)
: completionFuture;
}
}
} | class SourceStreamTask<
OUT, SRC extends SourceFunction<OUT>, OP extends StreamSource<OUT, SRC>>
extends StreamTask<OUT, OP> {
private final LegacySourceFunctionThread sourceThread;
private final Object lock;
private volatile boolean externallyInducedCheckpoints;
/**
* Indicates whether this Task was purposefully finished (by finishTask()), in this case we want
* to ignore exceptions thrown after finishing, to ensure shutdown works smoothly.
*/
private volatile boolean wasStoppedExternally = false;
public SourceStreamTask(Environment env) throws Exception {
this(env, new Object());
}
private SourceStreamTask(Environment env, Object lock) throws Exception {
super(
env,
null,
FatalExitExceptionHandler.INSTANCE,
StreamTaskActionExecutor.synchronizedExecutor(lock));
this.lock = Preconditions.checkNotNull(lock);
this.sourceThread = new LegacySourceFunctionThread();
getEnvironment().getMetricGroup().getIOMetricGroup().setEnableBusyTime(false);
}
@Override
protected void init() {
SourceFunction<?> source = mainOperator.getUserFunction();
if (source instanceof ExternallyInducedSource) {
externallyInducedCheckpoints = true;
ExternallyInducedSource.CheckpointTrigger triggerHook =
new ExternallyInducedSource.CheckpointTrigger() {
@Override
public void triggerCheckpoint(long checkpointId) throws FlinkException {
final CheckpointOptions checkpointOptions =
CheckpointOptions.forConfig(
CheckpointType.CHECKPOINT,
CheckpointStorageLocationReference.getDefault(),
configuration.isExactlyOnceCheckpointMode(),
configuration.isUnalignedCheckpointsEnabled(),
configuration.getAlignedCheckpointTimeout().toMillis());
final long timestamp = System.currentTimeMillis();
final CheckpointMetaData checkpointMetaData =
new CheckpointMetaData(checkpointId, timestamp, timestamp);
try {
SourceStreamTask.super
.triggerCheckpointAsync(
checkpointMetaData, checkpointOptions)
.get();
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new FlinkException(e.getMessage(), e);
}
}
};
((ExternallyInducedSource<?, ?>) source).setCheckpointTrigger(triggerHook);
}
getEnvironment()
.getMetricGroup()
.getIOMetricGroup()
.gauge(
MetricNames.CHECKPOINT_START_DELAY_TIME,
this::getAsyncCheckpointStartDelayNanos);
}
@Override
protected void advanceToEndOfEventTime() throws Exception {
mainOperator.advanceToEndOfEventTime();
}
@Override
protected void cleanup() {
}
@Override
@Override
protected void cleanUpInvoke() throws Exception {
if (isFailing()) {
interruptSourceThread(true);
}
super.cleanUpInvoke();
}
@Override
protected void cancelTask() {
cancelTask(true);
}
@Override
protected void finishTask() {
wasStoppedExternally = true;
/**
* Currently stop with savepoint relies on the EndOfPartitionEvents propagation and performs
* clean shutdown after the stop with savepoint (which can produce some records to process
* after the savepoint while stopping). If we interrupt source thread, we might leave the
* network stack in an inconsistent state. So, if we want to relay on the clean shutdown, we
* can not interrupt the source thread.
*/
cancelTask(false);
}
private void cancelTask(boolean interrupt) {
try {
if (mainOperator != null) {
mainOperator.cancel();
}
} finally {
interruptSourceThread(interrupt);
}
}
private void interruptSourceThread(boolean interrupt) {
if (operatorChain != null && operatorChain.isFinishedOnRestore()) {
return;
}
if (sourceThread.isAlive()) {
if (interrupt) {
sourceThread.interrupt();
}
} else if (!sourceThread.getCompletionFuture().isDone()) {
sourceThread.getCompletionFuture().complete(null);
}
}
@Override
protected CompletableFuture<Void> getCompletionFuture() {
return sourceThread.getCompletionFuture();
}
@Override
public Future<Boolean> triggerCheckpointAsync(
CheckpointMetaData checkpointMetaData, CheckpointOptions checkpointOptions) {
if (!externallyInducedCheckpoints) {
return super.triggerCheckpointAsync(checkpointMetaData, checkpointOptions);
} else {
synchronized (lock) {
return CompletableFuture.completedFuture(isRunning());
}
}
}
@Override
protected void declineCheckpoint(long checkpointId) {
if (!externallyInducedCheckpoints) {
super.declineCheckpoint(checkpointId);
}
}
/** Runnable that executes the the source function in the head operator. */
private class LegacySourceFunctionThread extends Thread {
private final CompletableFuture<Void> completionFuture;
LegacySourceFunctionThread() {
this.completionFuture = new CompletableFuture<>();
}
@Override
public void run() {
try {
mainOperator.run(lock, operatorChain);
if (!wasStoppedExternally && !isCanceled()) {
synchronized (lock) {
operatorChain.setIgnoreEndOfInput(false);
}
}
completionFuture.complete(null);
} catch (Throwable t) {
completionFuture.completeExceptionally(t);
}
}
public void setTaskDescription(final String taskDescription) {
setName("Legacy Source Thread - " + taskDescription);
}
/**
* @return future that is completed once this thread completes. If this task {@link
*
* completed future.
*/
CompletableFuture<Void> getCompletionFuture() {
return isFailing() && !isAlive()
? CompletableFuture.completedFuture(null)
: completionFuture;
}
}
} |
``` // instead of completing stop with savepoint via `notifyCheckpointCompleted` call // we simulate that source has finished first. As a result we expect that the endOfInput // should have been issued ``` ? | public void testInputEndedBeforeStopWithSavepointConfirmed() throws Exception {
CancelTestSource source =
new CancelTestSource(
STRING_TYPE_INFO.createSerializer(new ExecutionConfig()), "src");
TestBoundedOneInputStreamOperator chainTail = new TestBoundedOneInputStreamOperator("t");
StreamTaskMailboxTestHarness<String> harness =
new StreamTaskMailboxTestHarnessBuilder<>(SourceStreamTask::new, STRING_TYPE_INFO)
.setupOperatorChain(
new OperatorID(),
new StreamSource<String, CancelTestSource>(source))
.chain(
new OperatorID(),
chainTail,
STRING_TYPE_INFO.createSerializer(new ExecutionConfig()))
.finish()
.build();
Future<Boolean> triggerFuture =
harness.streamTask.triggerCheckpointAsync(
new CheckpointMetaData(1, 1),
new CheckpointOptions(SYNC_SAVEPOINT, getDefault()),
false);
while (!triggerFuture.isDone()) {
harness.streamTask.runMailboxStep();
}
source.cancel();
harness.streamTask.invoke();
harness.waitForTaskCompletion();
assertTrue(TestBoundedOneInputStreamOperator.isInputEnded());
} | public void testInputEndedBeforeStopWithSavepointConfirmed() throws Exception {
CancelTestSource source =
new CancelTestSource(
STRING_TYPE_INFO.createSerializer(new ExecutionConfig()), "src");
TestBoundedOneInputStreamOperator chainTail = new TestBoundedOneInputStreamOperator("t");
StreamTaskMailboxTestHarness<String> harness =
new StreamTaskMailboxTestHarnessBuilder<>(SourceStreamTask::new, STRING_TYPE_INFO)
.setupOperatorChain(
new OperatorID(),
new StreamSource<String, CancelTestSource>(source))
.chain(
new OperatorID(),
chainTail,
STRING_TYPE_INFO.createSerializer(new ExecutionConfig()))
.finish()
.build();
Future<Boolean> triggerFuture =
harness.streamTask.triggerCheckpointAsync(
new CheckpointMetaData(1, 1),
new CheckpointOptions(SYNC_SAVEPOINT, getDefault()),
false);
while (!triggerFuture.isDone()) {
harness.streamTask.runMailboxStep();
}
source.cancel();
harness.streamTask.invoke();
harness.waitForTaskCompletion();
assertTrue(TestBoundedOneInputStreamOperator.isInputEnded());
} | class SourceStreamTaskTest {
@Test
/** This test verifies that open() and close() are correctly called by the StreamTask. */
@Test
public void testOpenClose() throws Exception {
final StreamTaskTestHarness<String> testHarness =
new StreamTaskTestHarness<>(SourceStreamTask::new, STRING_TYPE_INFO);
testHarness.setupOutputForSingletonOperatorChain();
StreamConfig streamConfig = testHarness.getStreamConfig();
StreamSource<String, ?> sourceOperator = new StreamSource<>(new OpenCloseTestSource());
streamConfig.setStreamOperator(sourceOperator);
streamConfig.setOperatorID(new OperatorID());
testHarness.invoke();
testHarness.waitForTaskCompletion();
assertTrue("RichFunction methods where not called.", OpenCloseTestSource.closeCalled);
List<String> resultElements =
TestHarnessUtil.getRawElementsFromOutput(testHarness.getOutput());
Assert.assertEquals(10, resultElements.size());
}
@Test(timeout = 60_000)
public void testMetrics() throws Exception {
long sleepTime = 42;
StreamTaskMailboxTestHarnessBuilder<String> builder =
new StreamTaskMailboxTestHarnessBuilder<>(SourceStreamTask::new, STRING_TYPE_INFO);
final Map<String, Metric> metrics = new ConcurrentHashMap<>();
final TaskMetricGroup taskMetricGroup =
new StreamTaskTestHarness.TestTaskMetricGroup(metrics);
StreamTaskMailboxTestHarness<String> harness =
builder.setupOutputForSingletonOperatorChain(
new StreamSource<>(
new CancelTestSource(
STRING_TYPE_INFO.createSerializer(
new ExecutionConfig()),
"Hello")))
.setTaskMetricGroup(taskMetricGroup)
.build();
Future<Boolean> triggerFuture =
harness.streamTask.triggerCheckpointAsync(
new CheckpointMetaData(1L, System.currentTimeMillis()),
CheckpointOptions.forCheckpointWithDefaultLocation(),
false);
assertFalse(triggerFuture.isDone());
Thread.sleep(sleepTime);
while (!triggerFuture.isDone()) {
harness.streamTask.runMailboxStep();
}
Gauge<Long> checkpointStartDelayGauge =
(Gauge<Long>) metrics.get(MetricNames.CHECKPOINT_START_DELAY_TIME);
assertThat(
checkpointStartDelayGauge.getValue(), greaterThanOrEqualTo(sleepTime * 1_000_000));
Gauge<Double> busyTimeGauge = (Gauge<Double>) metrics.get(MetricNames.TASK_BUSY_TIME);
assertTrue(Double.isNaN(busyTimeGauge.getValue()));
}
/**
* This test ensures that the SourceStreamTask properly serializes checkpointing and element
* emission. This also verifies that there are no concurrent invocations of the checkpoint
* method on the source operator.
*
* <p>The source emits elements and performs checkpoints. We have several checkpointer threads
* that fire checkpoint requests at the source task.
*
* <p>If element emission and checkpointing are not in series the count of elements at the
* beginning of a checkpoint and at the end of a checkpoint are not the same because the source
* kept emitting elements while the checkpoint was ongoing.
*/
@Test
@SuppressWarnings("unchecked")
public void testCheckpointing() throws Exception {
final int numElements = 100;
final int numCheckpoints = 100;
final int numCheckpointers = 1;
final int checkpointInterval = 5;
final int sourceCheckpointDelay =
1000;
final int sourceReadDelay = 1;
ExecutorService executor = Executors.newFixedThreadPool(10);
try {
final TupleTypeInfo<Tuple2<Long, Integer>> typeInfo =
new TupleTypeInfo<>(BasicTypeInfo.LONG_TYPE_INFO, BasicTypeInfo.INT_TYPE_INFO);
final StreamTaskTestHarness<Tuple2<Long, Integer>> testHarness =
new StreamTaskTestHarness<>(SourceStreamTask::new, typeInfo);
testHarness.setupOutputForSingletonOperatorChain();
StreamConfig streamConfig = testHarness.getStreamConfig();
StreamSource<Tuple2<Long, Integer>, ?> sourceOperator =
new StreamSource<>(
new MockSource(numElements, sourceCheckpointDelay, sourceReadDelay));
streamConfig.setStreamOperator(sourceOperator);
streamConfig.setOperatorID(new OperatorID());
Future<Boolean>[] checkpointerResults = new Future[numCheckpointers];
testHarness.invoke();
testHarness.waitForTaskRunning();
final StreamTask<Tuple2<Long, Integer>, ?> sourceTask = testHarness.getTask();
for (int i = 0; i < numCheckpointers; i++) {
checkpointerResults[i] =
executor.submit(
new Checkpointer(numCheckpoints, checkpointInterval, sourceTask));
}
testHarness.waitForTaskCompletion();
for (int i = 0; i < numCheckpointers; i++) {
if (!checkpointerResults[i].isDone()) {
checkpointerResults[i].cancel(true);
}
if (!checkpointerResults[i].isCancelled()) {
checkpointerResults[i].get();
}
}
List<Tuple2<Long, Integer>> resultElements =
TestHarnessUtil.getRawElementsFromOutput(testHarness.getOutput());
Assert.assertEquals(numElements, resultElements.size());
} finally {
executor.shutdown();
}
}
@Test
public void testClosingAllOperatorsOnChainProperly() throws Exception {
final StreamTaskTestHarness<String> testHarness =
new StreamTaskTestHarness<>(SourceStreamTask::new, STRING_TYPE_INFO);
testHarness
.setupOperatorChain(
new OperatorID(),
new OutputRecordInCloseTestSource<>(
"Source0",
new FromElementsFunction<>(StringSerializer.INSTANCE, "Hello")))
.chain(
new OperatorID(),
new TestBoundedOneInputStreamOperator("Operator1"),
STRING_TYPE_INFO.createSerializer(new ExecutionConfig()))
.finish();
StreamConfig streamConfig = testHarness.getStreamConfig();
streamConfig.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);
testHarness.invoke();
testHarness.waitForTaskCompletion();
ArrayList<StreamRecord<String>> expected = new ArrayList<>();
Collections.addAll(
expected,
new StreamRecord<>("Hello"),
new StreamRecord<>("[Source0]: End of input"),
new StreamRecord<>("[Source0]: Bye"),
new StreamRecord<>("[Operator1]: End of input"),
new StreamRecord<>("[Operator1]: Bye"));
final Object[] output = testHarness.getOutput().toArray();
assertArrayEquals("Output was not correct.", expected.toArray(), output);
}
@Test
public void testNotMarkingEndOfInputWhenTaskCancelled() throws Exception {
final StreamTaskTestHarness<String> testHarness =
new StreamTaskTestHarness<>(SourceStreamTask::new, STRING_TYPE_INFO);
testHarness
.setupOperatorChain(
new OperatorID(),
new StreamSource<>(
new CancelTestSource(
STRING_TYPE_INFO.createSerializer(new ExecutionConfig()),
"Hello")))
.chain(
new OperatorID(),
new TestBoundedOneInputStreamOperator("Operator1"),
STRING_TYPE_INFO.createSerializer(new ExecutionConfig()))
.finish();
StreamConfig streamConfig = testHarness.getStreamConfig();
streamConfig.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
testHarness.invoke();
CancelTestSource.getDataProcessing().await();
testHarness.getTask().cancel();
try {
testHarness.waitForTaskCompletion();
} catch (Throwable t) {
if (!ExceptionUtils.findThrowable(t, CancelTaskException.class).isPresent()) {
throw t;
}
}
expectedOutput.add(new StreamRecord<>("Hello"));
TestHarnessUtil.assertOutputEquals(
"Output was not correct.", expectedOutput, testHarness.getOutput());
}
@Test
public void testCancellationWithSourceBlockedOnLock() throws Exception {
testCancellationWithSourceBlockedOnLock(false, false);
}
@Test
public void testCancellationWithSourceBlockedOnLockWithPendingMail() throws Exception {
testCancellationWithSourceBlockedOnLock(true, false);
}
@Test
public void testCancellationWithSourceBlockedOnLockAndThrowingOnError() throws Exception {
testCancellationWithSourceBlockedOnLock(false, true);
}
@Test
public void testCancellationWithSourceBlockedOnLockWithPendingMailAndThrowingOnError()
throws Exception {
testCancellationWithSourceBlockedOnLock(true, true);
}
/**
* Note that this test is testing also for the shared cancellation logic inside {@link
* StreamTask} which, as of the time this test is being written, is not tested anywhere else
* (like {@link StreamTaskTest} or {@link OneInputStreamTaskTest}).
*/
public void testCancellationWithSourceBlockedOnLock(
boolean withPendingMail, boolean throwInCancel) throws Exception {
final StreamTaskTestHarness<String> testHarness =
new StreamTaskTestHarness<>(SourceStreamTask::new, STRING_TYPE_INFO);
CancelLockingSource.reset();
testHarness
.setupOperatorChain(
new OperatorID(),
new StreamSource<>(new CancelLockingSource(throwInCancel)))
.chain(
new OperatorID(),
new TestBoundedOneInputStreamOperator("Operator1"),
STRING_TYPE_INFO.createSerializer(new ExecutionConfig()))
.finish();
StreamConfig streamConfig = testHarness.getStreamConfig();
streamConfig.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);
testHarness.invoke();
CancelLockingSource.awaitRunning();
if (withPendingMail) {
testHarness
.getTask()
.getMailboxExecutorFactory()
.createExecutor(0)
.execute(
() ->
assertFalse(
"This should never execute before task cancelation",
testHarness.getTask().isRunning()),
"Test");
}
try {
testHarness.getTask().cancel();
} catch (ExpectedTestException e) {
checkState(throwInCancel);
}
try {
testHarness.waitForTaskCompletion();
} catch (Throwable t) {
if (!ExceptionUtils.findThrowable(t, InterruptedException.class).isPresent()
&& !ExceptionUtils.findThrowable(t, CancelTaskException.class).isPresent()) {
throw t;
}
}
}
/** A source that locks if cancellation attempts to cleanly shut down. */
public static class CancelLockingSource implements SourceFunction<String> {
private static final long serialVersionUID = 8713065281092996042L;
private static CompletableFuture<Void> isRunning = new CompletableFuture<>();
private final boolean throwOnCancel;
private volatile boolean cancelled = false;
public CancelLockingSource(boolean throwOnCancel) {
this.throwOnCancel = throwOnCancel;
}
public static void reset() {
isRunning = new CompletableFuture<>();
}
public static void awaitRunning() throws ExecutionException, InterruptedException {
isRunning.get();
}
@Override
public void run(SourceContext<String> ctx) throws Exception {
synchronized (ctx.getCheckpointLock()) {
while (!cancelled) {
isRunning.complete(null);
if (throwOnCancel) {
Thread.sleep(1000000000);
} else {
try {
Thread.sleep(1000000000);
} catch (InterruptedException ignored) {
}
}
}
}
}
@Override
public void cancel() {
if (throwOnCancel) {
throw new ExpectedTestException();
}
cancelled = true;
}
}
@Test
public void testInterruptionExceptionNotSwallowed() throws Exception {
testInterruptionExceptionNotSwallowed(InterruptedException::new);
}
@Test
public void testWrappedInterruptionExceptionNotSwallowed() throws Exception {
testInterruptionExceptionNotSwallowed(
() -> new RuntimeException(new FlinkRuntimeException(new InterruptedException())));
}
private void testInterruptionExceptionNotSwallowed(
InterruptedSource.ExceptionGenerator exceptionGenerator) throws Exception {
final StreamTaskTestHarness<String> testHarness =
new StreamTaskTestHarness<>(SourceStreamTask::new, STRING_TYPE_INFO);
CancelLockingSource.reset();
testHarness
.setupOperatorChain(
new OperatorID(),
new StreamSource<>(new InterruptedSource(exceptionGenerator)))
.chain(
new OperatorID(),
new TestBoundedOneInputStreamOperator("Operator1"),
STRING_TYPE_INFO.createSerializer(new ExecutionConfig()))
.finish();
StreamConfig streamConfig = testHarness.getStreamConfig();
streamConfig.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);
testHarness.invoke();
try {
testHarness.waitForTaskCompletion();
} catch (Exception e) {
if (!ExceptionUtils.findThrowable(e, InterruptedException.class).isPresent()) {
throw e;
}
}
}
/** A source that locks if cancellation attempts to cleanly shut down. */
public static class InterruptedSource implements SourceFunction<String> {
interface ExceptionGenerator extends CheckedSupplier<Exception>, Serializable {}
private static final long serialVersionUID = 8713065281092996042L;
private ExceptionGenerator exceptionGenerator;
public InterruptedSource(final ExceptionGenerator exceptionGenerator) {
this.exceptionGenerator = exceptionGenerator;
}
@Override
public void run(SourceContext<String> ctx) throws Exception {
synchronized (ctx.getCheckpointLock()) {
Thread.currentThread().interrupt();
throw exceptionGenerator.get();
}
}
@Override
public void cancel() {}
}
/** If finishing a task doesn't swallow exceptions this test would fail with an exception. */
@Test
public void finishingIgnoresExceptions() throws Exception {
final StreamTaskTestHarness<String> testHarness =
new StreamTaskTestHarness<>(SourceStreamTask::new, STRING_TYPE_INFO);
final CompletableFuture<Void> operatorRunningWaitingFuture = new CompletableFuture<>();
ExceptionThrowingSource.setIsInRunLoopFuture(operatorRunningWaitingFuture);
testHarness.setupOutputForSingletonOperatorChain();
StreamConfig streamConfig = testHarness.getStreamConfig();
streamConfig.setStreamOperator(new StreamSource<>(new ExceptionThrowingSource()));
streamConfig.setOperatorID(new OperatorID());
testHarness.invoke();
operatorRunningWaitingFuture.get();
testHarness.getTask().finishTask();
testHarness.waitForTaskCompletion();
}
@Test
public void testWaitsForSourceThreadOnCancel() throws Exception {
StreamTaskTestHarness<String> harness =
new StreamTaskTestHarness<>(SourceStreamTask::new, STRING_TYPE_INFO);
harness.setupOutputForSingletonOperatorChain();
harness.getStreamConfig().setStreamOperator(new StreamSource<>(new NonStoppingSource()));
harness.invoke();
NonStoppingSource.waitForStart();
harness.getTask().cancel();
harness.waitForTaskCompletion(500, true);
assertTrue(harness.taskThread.isAlive());
NonStoppingSource.forceCancel();
harness.waitForTaskCompletion(Long.MAX_VALUE, true);
}
private static class MockSource
implements SourceFunction<Tuple2<Long, Integer>>, ListCheckpointed<Serializable> {
private static final long serialVersionUID = 1;
private int maxElements;
private int checkpointDelay;
private int readDelay;
private volatile int count;
private volatile long lastCheckpointId = -1;
private Semaphore semaphore;
private volatile boolean isRunning = true;
public MockSource(int maxElements, int checkpointDelay, int readDelay) {
this.maxElements = maxElements;
this.checkpointDelay = checkpointDelay;
this.readDelay = readDelay;
this.count = 0;
semaphore = new Semaphore(1);
}
@Override
public void run(SourceContext<Tuple2<Long, Integer>> ctx) {
while (isRunning && count < maxElements) {
try {
Thread.sleep(readDelay);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
synchronized (ctx.getCheckpointLock()) {
ctx.collect(new Tuple2<>(lastCheckpointId, count));
count++;
}
}
}
@Override
public void cancel() {
isRunning = false;
}
@Override
public List<Serializable> snapshotState(long checkpointId, long timestamp)
throws Exception {
if (!semaphore.tryAcquire()) {
Assert.fail("Concurrent invocation of snapshotState.");
}
int startCount = count;
lastCheckpointId = checkpointId;
long sum = 0;
for (int i = 0; i < checkpointDelay; i++) {
sum += new Random().nextLong();
}
if (startCount != count) {
semaphore.release();
Assert.fail("Count is different at start end end of snapshot.");
}
semaphore.release();
return Collections.singletonList(sum);
}
@Override
public void restoreState(List<Serializable> state) throws Exception {}
}
/** This calls triggerCheckpointAsync on the given task with the given interval. */
private static class Checkpointer implements Callable<Boolean> {
private final int numCheckpoints;
private final int checkpointInterval;
private final AtomicLong checkpointId;
private final StreamTask<Tuple2<Long, Integer>, ?> sourceTask;
Checkpointer(
int numCheckpoints,
int checkpointInterval,
StreamTask<Tuple2<Long, Integer>, ?> task) {
this.numCheckpoints = numCheckpoints;
checkpointId = new AtomicLong(0);
sourceTask = task;
this.checkpointInterval = checkpointInterval;
}
@Override
public Boolean call() throws Exception {
for (int i = 0; i < numCheckpoints; i++) {
long currentCheckpointId = checkpointId.getAndIncrement();
try {
sourceTask.triggerCheckpointAsync(
new CheckpointMetaData(currentCheckpointId, 0L),
CheckpointOptions.forCheckpointWithDefaultLocation(),
false);
} catch (RejectedExecutionException e) {
return false;
}
Thread.sleep(checkpointInterval);
}
return true;
}
}
private static class NonStoppingSource implements SourceFunction<String> {
private static final long serialVersionUID = 1L;
private static boolean running = true;
private static CompletableFuture<Void> startFuture = new CompletableFuture<>();
@Override
public void run(SourceContext<String> ctx) throws Exception {
startFuture.complete(null);
while (running) {
try {
Thread.sleep(500);
} catch (InterruptedException e) {
}
}
}
@Override
public void cancel() {
}
static void forceCancel() {
running = false;
}
static void waitForStart() {
startFuture.join();
}
}
private static class OpenCloseTestSource extends RichSourceFunction<String> {
private static final long serialVersionUID = 1L;
public static boolean openCalled = false;
public static boolean closeCalled = false;
OpenCloseTestSource() {
openCalled = false;
closeCalled = false;
}
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
if (closeCalled) {
Assert.fail("Close called before open.");
}
openCalled = true;
}
@Override
public void close() throws Exception {
super.close();
if (!openCalled) {
Assert.fail("Open was not called before close.");
}
closeCalled = true;
}
@Override
public void run(SourceContext<String> ctx) throws Exception {
if (!openCalled) {
Assert.fail("Open was not called before run.");
}
for (int i = 0; i < 10; i++) {
ctx.collect("Hello" + i);
}
}
@Override
public void cancel() {}
}
private static class CancelTestSource extends FromElementsFunction<String> {
private static final long serialVersionUID = 8713065281092996067L;
private static MultiShotLatch dataProcessing = new MultiShotLatch();
private static MultiShotLatch cancellationWaiting = new MultiShotLatch();
public CancelTestSource(TypeSerializer<String> serializer, String... elements)
throws IOException {
super(serializer, elements);
}
@Override
public void run(SourceContext<String> ctx) throws Exception {
super.run(ctx);
dataProcessing.trigger();
cancellationWaiting.await();
}
@Override
public void cancel() {
super.cancel();
cancellationWaiting.trigger();
}
public static MultiShotLatch getDataProcessing() {
return dataProcessing;
}
}
/**
* A {@link SourceFunction} that throws an exception from {@link
* cancelled via {@link
*/
private static class ExceptionThrowingSource implements SourceFunction<String> {
private static volatile CompletableFuture<Void> isInRunLoop;
private volatile boolean running = true;
public static class TestException extends RuntimeException {
public TestException(String message) {
super(message);
}
}
public static void setIsInRunLoopFuture(
@Nonnull final CompletableFuture<Void> waitingLatch) {
ExceptionThrowingSource.isInRunLoop = waitingLatch;
}
@Override
public void run(SourceContext<String> ctx) throws TestException {
checkState(isInRunLoop != null && !isInRunLoop.isDone());
while (running) {
if (!isInRunLoop.isDone()) {
isInRunLoop.complete(null);
}
ctx.collect("hello");
}
throw new TestException("Oh no, we're failing.");
}
@Override
public void cancel() {
running = false;
}
}
private static final class OutputRecordInCloseTestSource<SRC extends SourceFunction<String>>
extends StreamSource<String, SRC> implements BoundedOneInput {
private final String name;
public OutputRecordInCloseTestSource(String name, SRC sourceFunction) {
super(sourceFunction);
this.name = name;
}
@Override
public void endInput() {
output("[" + name + "]: End of input");
}
@Override
public void close() throws Exception {
ProcessingTimeService timeService = getProcessingTimeService();
timeService.registerTimer(
timeService.getCurrentProcessingTime(),
t -> output("[" + name + "]: Timer registered in close"));
output("[" + name + "]: Bye");
super.close();
}
private void output(String record) {
output.collect(new StreamRecord<>(record));
}
}
} | class SourceStreamTaskTest {
@Test
/** This test verifies that open() and close() are correctly called by the StreamTask. */
@Test
public void testOpenClose() throws Exception {
final StreamTaskTestHarness<String> testHarness =
new StreamTaskTestHarness<>(SourceStreamTask::new, STRING_TYPE_INFO);
testHarness.setupOutputForSingletonOperatorChain();
StreamConfig streamConfig = testHarness.getStreamConfig();
StreamSource<String, ?> sourceOperator = new StreamSource<>(new OpenCloseTestSource());
streamConfig.setStreamOperator(sourceOperator);
streamConfig.setOperatorID(new OperatorID());
testHarness.invoke();
testHarness.waitForTaskCompletion();
assertTrue("RichFunction methods where not called.", OpenCloseTestSource.closeCalled);
List<String> resultElements =
TestHarnessUtil.getRawElementsFromOutput(testHarness.getOutput());
Assert.assertEquals(10, resultElements.size());
}
@Test(timeout = 60_000)
public void testMetrics() throws Exception {
long sleepTime = 42;
StreamTaskMailboxTestHarnessBuilder<String> builder =
new StreamTaskMailboxTestHarnessBuilder<>(SourceStreamTask::new, STRING_TYPE_INFO);
final Map<String, Metric> metrics = new ConcurrentHashMap<>();
final TaskMetricGroup taskMetricGroup =
new StreamTaskTestHarness.TestTaskMetricGroup(metrics);
StreamTaskMailboxTestHarness<String> harness =
builder.setupOutputForSingletonOperatorChain(
new StreamSource<>(
new CancelTestSource(
STRING_TYPE_INFO.createSerializer(
new ExecutionConfig()),
"Hello")))
.setTaskMetricGroup(taskMetricGroup)
.build();
Future<Boolean> triggerFuture =
harness.streamTask.triggerCheckpointAsync(
new CheckpointMetaData(1L, System.currentTimeMillis()),
CheckpointOptions.forCheckpointWithDefaultLocation(),
false);
assertFalse(triggerFuture.isDone());
Thread.sleep(sleepTime);
while (!triggerFuture.isDone()) {
harness.streamTask.runMailboxStep();
}
Gauge<Long> checkpointStartDelayGauge =
(Gauge<Long>) metrics.get(MetricNames.CHECKPOINT_START_DELAY_TIME);
assertThat(
checkpointStartDelayGauge.getValue(), greaterThanOrEqualTo(sleepTime * 1_000_000));
Gauge<Double> busyTimeGauge = (Gauge<Double>) metrics.get(MetricNames.TASK_BUSY_TIME);
assertTrue(Double.isNaN(busyTimeGauge.getValue()));
}
/**
* This test ensures that the SourceStreamTask properly serializes checkpointing and element
* emission. This also verifies that there are no concurrent invocations of the checkpoint
* method on the source operator.
*
* <p>The source emits elements and performs checkpoints. We have several checkpointer threads
* that fire checkpoint requests at the source task.
*
* <p>If element emission and checkpointing are not in series the count of elements at the
* beginning of a checkpoint and at the end of a checkpoint are not the same because the source
* kept emitting elements while the checkpoint was ongoing.
*/
@Test
@SuppressWarnings("unchecked")
public void testCheckpointing() throws Exception {
final int numElements = 100;
final int numCheckpoints = 100;
final int numCheckpointers = 1;
final int checkpointInterval = 5;
final int sourceCheckpointDelay =
1000;
final int sourceReadDelay = 1;
ExecutorService executor = Executors.newFixedThreadPool(10);
try {
final TupleTypeInfo<Tuple2<Long, Integer>> typeInfo =
new TupleTypeInfo<>(BasicTypeInfo.LONG_TYPE_INFO, BasicTypeInfo.INT_TYPE_INFO);
final StreamTaskTestHarness<Tuple2<Long, Integer>> testHarness =
new StreamTaskTestHarness<>(SourceStreamTask::new, typeInfo);
testHarness.setupOutputForSingletonOperatorChain();
StreamConfig streamConfig = testHarness.getStreamConfig();
StreamSource<Tuple2<Long, Integer>, ?> sourceOperator =
new StreamSource<>(
new MockSource(numElements, sourceCheckpointDelay, sourceReadDelay));
streamConfig.setStreamOperator(sourceOperator);
streamConfig.setOperatorID(new OperatorID());
Future<Boolean>[] checkpointerResults = new Future[numCheckpointers];
testHarness.invoke();
testHarness.waitForTaskRunning();
final StreamTask<Tuple2<Long, Integer>, ?> sourceTask = testHarness.getTask();
for (int i = 0; i < numCheckpointers; i++) {
checkpointerResults[i] =
executor.submit(
new Checkpointer(numCheckpoints, checkpointInterval, sourceTask));
}
testHarness.waitForTaskCompletion();
for (int i = 0; i < numCheckpointers; i++) {
if (!checkpointerResults[i].isDone()) {
checkpointerResults[i].cancel(true);
}
if (!checkpointerResults[i].isCancelled()) {
checkpointerResults[i].get();
}
}
List<Tuple2<Long, Integer>> resultElements =
TestHarnessUtil.getRawElementsFromOutput(testHarness.getOutput());
Assert.assertEquals(numElements, resultElements.size());
} finally {
executor.shutdown();
}
}
@Test
public void testClosingAllOperatorsOnChainProperly() throws Exception {
final StreamTaskTestHarness<String> testHarness =
new StreamTaskTestHarness<>(SourceStreamTask::new, STRING_TYPE_INFO);
testHarness
.setupOperatorChain(
new OperatorID(),
new OutputRecordInCloseTestSource<>(
"Source0",
new FromElementsFunction<>(StringSerializer.INSTANCE, "Hello")))
.chain(
new OperatorID(),
new TestBoundedOneInputStreamOperator("Operator1"),
STRING_TYPE_INFO.createSerializer(new ExecutionConfig()))
.finish();
StreamConfig streamConfig = testHarness.getStreamConfig();
streamConfig.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);
testHarness.invoke();
testHarness.waitForTaskCompletion();
ArrayList<StreamRecord<String>> expected = new ArrayList<>();
Collections.addAll(
expected,
new StreamRecord<>("Hello"),
new StreamRecord<>("[Source0]: End of input"),
new StreamRecord<>("[Source0]: Bye"),
new StreamRecord<>("[Operator1]: End of input"),
new StreamRecord<>("[Operator1]: Bye"));
final Object[] output = testHarness.getOutput().toArray();
assertArrayEquals("Output was not correct.", expected.toArray(), output);
}
@Test
public void testNotMarkingEndOfInputWhenTaskCancelled() throws Exception {
final StreamTaskTestHarness<String> testHarness =
new StreamTaskTestHarness<>(SourceStreamTask::new, STRING_TYPE_INFO);
testHarness
.setupOperatorChain(
new OperatorID(),
new StreamSource<>(
new CancelTestSource(
STRING_TYPE_INFO.createSerializer(new ExecutionConfig()),
"Hello")))
.chain(
new OperatorID(),
new TestBoundedOneInputStreamOperator("Operator1"),
STRING_TYPE_INFO.createSerializer(new ExecutionConfig()))
.finish();
StreamConfig streamConfig = testHarness.getStreamConfig();
streamConfig.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
testHarness.invoke();
CancelTestSource.getDataProcessing().await();
testHarness.getTask().cancel();
try {
testHarness.waitForTaskCompletion();
} catch (Throwable t) {
if (!ExceptionUtils.findThrowable(t, CancelTaskException.class).isPresent()) {
throw t;
}
}
expectedOutput.add(new StreamRecord<>("Hello"));
TestHarnessUtil.assertOutputEquals(
"Output was not correct.", expectedOutput, testHarness.getOutput());
}
@Test
public void testCancellationWithSourceBlockedOnLock() throws Exception {
testCancellationWithSourceBlockedOnLock(false, false);
}
@Test
public void testCancellationWithSourceBlockedOnLockWithPendingMail() throws Exception {
testCancellationWithSourceBlockedOnLock(true, false);
}
@Test
public void testCancellationWithSourceBlockedOnLockAndThrowingOnError() throws Exception {
testCancellationWithSourceBlockedOnLock(false, true);
}
@Test
public void testCancellationWithSourceBlockedOnLockWithPendingMailAndThrowingOnError()
throws Exception {
testCancellationWithSourceBlockedOnLock(true, true);
}
/**
* Note that this test is testing also for the shared cancellation logic inside {@link
* StreamTask} which, as of the time this test is being written, is not tested anywhere else
* (like {@link StreamTaskTest} or {@link OneInputStreamTaskTest}).
*/
public void testCancellationWithSourceBlockedOnLock(
boolean withPendingMail, boolean throwInCancel) throws Exception {
final StreamTaskTestHarness<String> testHarness =
new StreamTaskTestHarness<>(SourceStreamTask::new, STRING_TYPE_INFO);
CancelLockingSource.reset();
testHarness
.setupOperatorChain(
new OperatorID(),
new StreamSource<>(new CancelLockingSource(throwInCancel)))
.chain(
new OperatorID(),
new TestBoundedOneInputStreamOperator("Operator1"),
STRING_TYPE_INFO.createSerializer(new ExecutionConfig()))
.finish();
StreamConfig streamConfig = testHarness.getStreamConfig();
streamConfig.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);
testHarness.invoke();
CancelLockingSource.awaitRunning();
if (withPendingMail) {
testHarness
.getTask()
.getMailboxExecutorFactory()
.createExecutor(0)
.execute(
() ->
assertFalse(
"This should never execute before task cancelation",
testHarness.getTask().isRunning()),
"Test");
}
try {
testHarness.getTask().cancel();
} catch (ExpectedTestException e) {
checkState(throwInCancel);
}
try {
testHarness.waitForTaskCompletion();
} catch (Throwable t) {
if (!ExceptionUtils.findThrowable(t, InterruptedException.class).isPresent()
&& !ExceptionUtils.findThrowable(t, CancelTaskException.class).isPresent()) {
throw t;
}
}
}
/** A source that locks if cancellation attempts to cleanly shut down. */
public static class CancelLockingSource implements SourceFunction<String> {
private static final long serialVersionUID = 8713065281092996042L;
private static CompletableFuture<Void> isRunning = new CompletableFuture<>();
private final boolean throwOnCancel;
private volatile boolean cancelled = false;
public CancelLockingSource(boolean throwOnCancel) {
this.throwOnCancel = throwOnCancel;
}
public static void reset() {
isRunning = new CompletableFuture<>();
}
public static void awaitRunning() throws ExecutionException, InterruptedException {
isRunning.get();
}
@Override
public void run(SourceContext<String> ctx) throws Exception {
synchronized (ctx.getCheckpointLock()) {
while (!cancelled) {
isRunning.complete(null);
if (throwOnCancel) {
Thread.sleep(1000000000);
} else {
try {
Thread.sleep(1000000000);
} catch (InterruptedException ignored) {
}
}
}
}
}
@Override
public void cancel() {
if (throwOnCancel) {
throw new ExpectedTestException();
}
cancelled = true;
}
}
@Test
public void testInterruptionExceptionNotSwallowed() throws Exception {
testInterruptionExceptionNotSwallowed(InterruptedException::new);
}
@Test
public void testWrappedInterruptionExceptionNotSwallowed() throws Exception {
testInterruptionExceptionNotSwallowed(
() -> new RuntimeException(new FlinkRuntimeException(new InterruptedException())));
}
private void testInterruptionExceptionNotSwallowed(
InterruptedSource.ExceptionGenerator exceptionGenerator) throws Exception {
final StreamTaskTestHarness<String> testHarness =
new StreamTaskTestHarness<>(SourceStreamTask::new, STRING_TYPE_INFO);
CancelLockingSource.reset();
testHarness
.setupOperatorChain(
new OperatorID(),
new StreamSource<>(new InterruptedSource(exceptionGenerator)))
.chain(
new OperatorID(),
new TestBoundedOneInputStreamOperator("Operator1"),
STRING_TYPE_INFO.createSerializer(new ExecutionConfig()))
.finish();
StreamConfig streamConfig = testHarness.getStreamConfig();
streamConfig.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);
testHarness.invoke();
try {
testHarness.waitForTaskCompletion();
} catch (Exception e) {
if (!ExceptionUtils.findThrowable(e, InterruptedException.class).isPresent()) {
throw e;
}
}
}
/** A source that locks if cancellation attempts to cleanly shut down. */
public static class InterruptedSource implements SourceFunction<String> {
interface ExceptionGenerator extends CheckedSupplier<Exception>, Serializable {}
private static final long serialVersionUID = 8713065281092996042L;
private ExceptionGenerator exceptionGenerator;
public InterruptedSource(final ExceptionGenerator exceptionGenerator) {
this.exceptionGenerator = exceptionGenerator;
}
@Override
public void run(SourceContext<String> ctx) throws Exception {
synchronized (ctx.getCheckpointLock()) {
Thread.currentThread().interrupt();
throw exceptionGenerator.get();
}
}
@Override
public void cancel() {}
}
/** If finishing a task doesn't swallow exceptions this test would fail with an exception. */
@Test
public void finishingIgnoresExceptions() throws Exception {
final StreamTaskTestHarness<String> testHarness =
new StreamTaskTestHarness<>(SourceStreamTask::new, STRING_TYPE_INFO);
final CompletableFuture<Void> operatorRunningWaitingFuture = new CompletableFuture<>();
ExceptionThrowingSource.setIsInRunLoopFuture(operatorRunningWaitingFuture);
testHarness.setupOutputForSingletonOperatorChain();
StreamConfig streamConfig = testHarness.getStreamConfig();
streamConfig.setStreamOperator(new StreamSource<>(new ExceptionThrowingSource()));
streamConfig.setOperatorID(new OperatorID());
testHarness.invoke();
operatorRunningWaitingFuture.get();
testHarness.getTask().finishTask();
testHarness.waitForTaskCompletion();
}
@Test
public void testWaitsForSourceThreadOnCancel() throws Exception {
StreamTaskTestHarness<String> harness =
new StreamTaskTestHarness<>(SourceStreamTask::new, STRING_TYPE_INFO);
harness.setupOutputForSingletonOperatorChain();
harness.getStreamConfig().setStreamOperator(new StreamSource<>(new NonStoppingSource()));
harness.invoke();
NonStoppingSource.waitForStart();
harness.getTask().cancel();
harness.waitForTaskCompletion(500, true);
assertTrue(harness.taskThread.isAlive());
NonStoppingSource.forceCancel();
harness.waitForTaskCompletion(Long.MAX_VALUE, true);
}
private static class MockSource
implements SourceFunction<Tuple2<Long, Integer>>, ListCheckpointed<Serializable> {
private static final long serialVersionUID = 1;
private int maxElements;
private int checkpointDelay;
private int readDelay;
private volatile int count;
private volatile long lastCheckpointId = -1;
private Semaphore semaphore;
private volatile boolean isRunning = true;
public MockSource(int maxElements, int checkpointDelay, int readDelay) {
this.maxElements = maxElements;
this.checkpointDelay = checkpointDelay;
this.readDelay = readDelay;
this.count = 0;
semaphore = new Semaphore(1);
}
@Override
public void run(SourceContext<Tuple2<Long, Integer>> ctx) {
while (isRunning && count < maxElements) {
try {
Thread.sleep(readDelay);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
synchronized (ctx.getCheckpointLock()) {
ctx.collect(new Tuple2<>(lastCheckpointId, count));
count++;
}
}
}
@Override
public void cancel() {
isRunning = false;
}
@Override
public List<Serializable> snapshotState(long checkpointId, long timestamp)
throws Exception {
if (!semaphore.tryAcquire()) {
Assert.fail("Concurrent invocation of snapshotState.");
}
int startCount = count;
lastCheckpointId = checkpointId;
long sum = 0;
for (int i = 0; i < checkpointDelay; i++) {
sum += new Random().nextLong();
}
if (startCount != count) {
semaphore.release();
Assert.fail("Count is different at start end end of snapshot.");
}
semaphore.release();
return Collections.singletonList(sum);
}
@Override
public void restoreState(List<Serializable> state) throws Exception {}
}
/** This calls triggerCheckpointAsync on the given task with the given interval. */
private static class Checkpointer implements Callable<Boolean> {
private final int numCheckpoints;
private final int checkpointInterval;
private final AtomicLong checkpointId;
private final StreamTask<Tuple2<Long, Integer>, ?> sourceTask;
Checkpointer(
int numCheckpoints,
int checkpointInterval,
StreamTask<Tuple2<Long, Integer>, ?> task) {
this.numCheckpoints = numCheckpoints;
checkpointId = new AtomicLong(0);
sourceTask = task;
this.checkpointInterval = checkpointInterval;
}
@Override
public Boolean call() throws Exception {
for (int i = 0; i < numCheckpoints; i++) {
long currentCheckpointId = checkpointId.getAndIncrement();
try {
sourceTask.triggerCheckpointAsync(
new CheckpointMetaData(currentCheckpointId, 0L),
CheckpointOptions.forCheckpointWithDefaultLocation(),
false);
} catch (RejectedExecutionException e) {
return false;
}
Thread.sleep(checkpointInterval);
}
return true;
}
}
private static class NonStoppingSource implements SourceFunction<String> {
private static final long serialVersionUID = 1L;
private static boolean running = true;
private static CompletableFuture<Void> startFuture = new CompletableFuture<>();
@Override
public void run(SourceContext<String> ctx) throws Exception {
startFuture.complete(null);
while (running) {
try {
Thread.sleep(500);
} catch (InterruptedException e) {
}
}
}
@Override
public void cancel() {
}
static void forceCancel() {
running = false;
}
static void waitForStart() {
startFuture.join();
}
}
private static class OpenCloseTestSource extends RichSourceFunction<String> {
private static final long serialVersionUID = 1L;
public static boolean openCalled = false;
public static boolean closeCalled = false;
OpenCloseTestSource() {
openCalled = false;
closeCalled = false;
}
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
if (closeCalled) {
Assert.fail("Close called before open.");
}
openCalled = true;
}
@Override
public void close() throws Exception {
super.close();
if (!openCalled) {
Assert.fail("Open was not called before close.");
}
closeCalled = true;
}
@Override
public void run(SourceContext<String> ctx) throws Exception {
if (!openCalled) {
Assert.fail("Open was not called before run.");
}
for (int i = 0; i < 10; i++) {
ctx.collect("Hello" + i);
}
}
@Override
public void cancel() {}
}
private static class CancelTestSource extends FromElementsFunction<String> {
private static final long serialVersionUID = 8713065281092996067L;
private static MultiShotLatch dataProcessing = new MultiShotLatch();
private static MultiShotLatch cancellationWaiting = new MultiShotLatch();
public CancelTestSource(TypeSerializer<String> serializer, String... elements)
throws IOException {
super(serializer, elements);
}
@Override
public void run(SourceContext<String> ctx) throws Exception {
super.run(ctx);
dataProcessing.trigger();
cancellationWaiting.await();
}
@Override
public void cancel() {
super.cancel();
cancellationWaiting.trigger();
}
public static MultiShotLatch getDataProcessing() {
return dataProcessing;
}
}
/**
* A {@link SourceFunction} that throws an exception from {@link
* cancelled via {@link
*/
private static class ExceptionThrowingSource implements SourceFunction<String> {
private static volatile CompletableFuture<Void> isInRunLoop;
private volatile boolean running = true;
public static class TestException extends RuntimeException {
public TestException(String message) {
super(message);
}
}
public static void setIsInRunLoopFuture(
@Nonnull final CompletableFuture<Void> waitingLatch) {
ExceptionThrowingSource.isInRunLoop = waitingLatch;
}
@Override
public void run(SourceContext<String> ctx) throws TestException {
checkState(isInRunLoop != null && !isInRunLoop.isDone());
while (running) {
if (!isInRunLoop.isDone()) {
isInRunLoop.complete(null);
}
ctx.collect("hello");
}
throw new TestException("Oh no, we're failing.");
}
@Override
public void cancel() {
running = false;
}
}
private static final class OutputRecordInCloseTestSource<SRC extends SourceFunction<String>>
extends StreamSource<String, SRC> implements BoundedOneInput {
private final String name;
public OutputRecordInCloseTestSource(String name, SRC sourceFunction) {
super(sourceFunction);
this.name = name;
}
@Override
public void endInput() {
output("[" + name + "]: End of input");
}
@Override
public void close() throws Exception {
ProcessingTimeService timeService = getProcessingTimeService();
timeService.registerTimer(
timeService.getCurrentProcessingTime(),
t -> output("[" + name + "]: Timer registered in close"));
output("[" + name + "]: Bye");
super.close();
}
private void output(String record) {
output.collect(new StreamRecord<>(record));
}
}
} | |
What do you think about making `ModuleManager.loadedModules` to be `LinkedHashMap`? This can provide deterministic result for `listFullModules` and make the testing easier. If we do this, we should add a comment on the `loadedModules` to explain why we use `LinkedHashMap`. | public void testListFullModules() {
ModuleMock.ModuleZ x = new ModuleMock.ModuleZ("x");
ModuleMock.ModuleY y = new ModuleMock.ModuleY("y");
ModuleMock.ModuleZ z = new ModuleMock.ModuleZ("z");
manager.loadModule("y", y);
manager.loadModule("x", x);
manager.loadModule("z", z);
manager.useModules("z", "y");
assertEquals(
getExpectedModuleEntries(2, "z", "y", MODULE_TYPE_CORE, "x"),
getActualModuleEntries());
} | getActualModuleEntries()); | public void testListFullModules() {
ModuleMock x = new ModuleMock("x");
ModuleMock y = new ModuleMock("y");
ModuleMock z = new ModuleMock("z");
manager.loadModule("y", y);
manager.loadModule("x", x);
manager.loadModule("z", z);
manager.useModules("z", "y");
assertEquals(
getExpectedModuleEntries(2, "z", "y", MODULE_TYPE_CORE, "x"),
manager.listFullModules());
} | class ModuleManagerTest extends TestLogger {
private static final Comparator<ModuleEntry> COMPARATOR =
Comparator.comparing(ModuleEntry::name);
private ModuleManager manager;
@Rule public ExpectedException thrown = ExpectedException.none();
@Before
public void before() {
manager = new ModuleManager();
}
@Test
public void testLoadModuleTwice() {
assertEquals(Collections.singletonList(MODULE_TYPE_CORE), manager.getUsedModules());
assertEquals(CoreModule.INSTANCE, manager.getLoadedModules().get(MODULE_TYPE_CORE));
thrown.expect(ValidationException.class);
thrown.expectMessage("A module with name core already exists");
manager.loadModule(MODULE_TYPE_CORE, CoreModule.INSTANCE);
}
@Test
public void testLoadModuleWithoutUnusedModulesExist() {
ModuleMock.ModuleX x = new ModuleMock.ModuleX("x");
ModuleMock.ModuleY y = new ModuleMock.ModuleY("y");
ModuleMock.ModuleZ z = new ModuleMock.ModuleZ("z");
manager.loadModule(x.getType(), x);
manager.loadModule(y.getType(), y);
manager.loadModule(z.getType(), z);
Map<String, Module> expectedLoadedModules = new HashMap<>();
expectedLoadedModules.put(MODULE_TYPE_CORE, CoreModule.INSTANCE);
expectedLoadedModules.put("x", x);
expectedLoadedModules.put("y", y);
expectedLoadedModules.put("z", z);
assertEquals(Arrays.asList(MODULE_TYPE_CORE, "x", "y", "z"), manager.getUsedModules());
assertEquals(expectedLoadedModules, manager.getLoadedModules());
}
@Test
public void testLoadModuleWithUnusedModulesExist() {
ModuleMock.ModuleY y = new ModuleMock.ModuleY("y");
ModuleMock.ModuleZ z = new ModuleMock.ModuleZ("z");
manager.loadModule(y.getType(), y);
manager.loadModule(z.getType(), z);
Map<String, Module> expectedLoadedModules = new HashMap<>();
expectedLoadedModules.put(MODULE_TYPE_CORE, CoreModule.INSTANCE);
expectedLoadedModules.put("y", y);
expectedLoadedModules.put("z", z);
assertEquals(Arrays.asList(MODULE_TYPE_CORE, "y", "z"), manager.getUsedModules());
assertEquals(expectedLoadedModules, manager.getLoadedModules());
manager.getUsedModules().remove("z");
manager.getUsedModules().remove("y");
ModuleMock.ModuleX x = new ModuleMock.ModuleX("x");
manager.loadModule(x.getType(), x);
expectedLoadedModules.put("x", x);
assertEquals(Arrays.asList(MODULE_TYPE_CORE, "x"), manager.getUsedModules());
assertEquals(expectedLoadedModules, manager.getLoadedModules());
}
@Test
public void testUnloadModuleTwice() {
assertEquals(Collections.singletonList(MODULE_TYPE_CORE), manager.getUsedModules());
manager.unloadModule(MODULE_TYPE_CORE);
assertEquals(Collections.emptyList(), manager.getUsedModules());
assertEquals(Collections.emptyMap(), manager.getLoadedModules());
thrown.expect(ValidationException.class);
thrown.expectMessage("No module with name core exists");
manager.unloadModule(MODULE_TYPE_CORE);
}
@Test
public void testUseUnloadedModules() {
thrown.expect(ValidationException.class);
thrown.expectMessage("No module with name x exists");
manager.useModules(MODULE_TYPE_CORE, "x");
}
@Test
public void testUseModulesWithDuplicateModuleName() {
thrown.expect(ValidationException.class);
thrown.expectMessage("Module core appears more than once");
manager.useModules(MODULE_TYPE_CORE, MODULE_TYPE_CORE);
}
@Test
public void testUseModules() {
ModuleMock.ModuleX x = new ModuleMock.ModuleX("x");
ModuleMock.ModuleY y = new ModuleMock.ModuleY("y");
ModuleMock.ModuleZ z = new ModuleMock.ModuleZ("z");
manager.loadModule(x.getType(), x);
manager.loadModule(y.getType(), y);
manager.loadModule(z.getType(), z);
assertEquals(Arrays.asList(MODULE_TYPE_CORE, "x", "y", "z"), manager.getUsedModules());
manager.useModules("z", MODULE_TYPE_CORE);
assertEquals(Arrays.asList("z", MODULE_TYPE_CORE), manager.getUsedModules());
Map<String, Module> expectedLoadedModules = new HashMap<>();
expectedLoadedModules.put(MODULE_TYPE_CORE, CoreModule.INSTANCE);
expectedLoadedModules.put("x", x);
expectedLoadedModules.put("y", y);
expectedLoadedModules.put("z", z);
assertEquals(expectedLoadedModules, manager.getLoadedModules());
}
@Test
public void testListModules() {
ModuleMock.ModuleY y = new ModuleMock.ModuleY("y");
ModuleMock.ModuleZ z = new ModuleMock.ModuleZ("z");
manager.loadModule("y", y);
manager.loadModule("z", z);
manager.useModules("z", "y");
assertEquals(Arrays.asList("z", "y"), manager.listModules());
}
@Test
@Test
public void testListFunctions() {
ModuleMock.ModuleX x = new ModuleMock.ModuleX("x");
manager.loadModule(x.getType(), x);
assertTrue(manager.listFunctions().contains("dummy"));
manager.useModules(MODULE_TYPE_CORE);
assertFalse(manager.listFunctions().contains("dummy"));
}
@Test
public void testGetFunctionDefinition() {
ModuleMock.ModuleX x = new ModuleMock.ModuleX("x");
manager.loadModule(x.getType(), x);
assertTrue(manager.getFunctionDefinition("dummy").isPresent());
manager.useModules(MODULE_TYPE_CORE);
assertFalse(manager.getFunctionDefinition("dummy").isPresent());
}
private static List<ModuleEntry> getExpectedModuleEntries(int index, String... names) {
List<ModuleEntry> expected = new ArrayList<>();
IntStream.range(0, index).forEach(i -> expected.add(new ModuleEntry(names[i], true)));
expected.addAll(
IntStream.range(index, names.length)
.mapToObj(i -> new ModuleEntry(names[i], false))
.sorted(COMPARATOR)
.collect(Collectors.toList()));
return expected;
}
private List<ModuleEntry> getActualModuleEntries() {
List<ModuleEntry> actual = manager.listFullModules();
List<ModuleEntry> sortedActual = actual.subList(0, manager.listModules().size());
sortedActual.addAll(
actual.subList(manager.listModules().size(), actual.size()).stream()
.sorted(COMPARATOR)
.collect(Collectors.toList()));
return sortedActual;
}
} | class ModuleManagerTest extends TestLogger {
private ModuleManager manager;
@Rule public ExpectedException thrown = ExpectedException.none();
@Before
public void before() {
manager = new ModuleManager();
}
@Test
public void testLoadModuleTwice() {
assertEquals(Collections.singletonList(MODULE_TYPE_CORE), manager.getUsedModules());
assertEquals(CoreModule.INSTANCE, manager.getLoadedModules().get(MODULE_TYPE_CORE));
thrown.expect(ValidationException.class);
thrown.expectMessage("A module with name 'core' already exists");
manager.loadModule(MODULE_TYPE_CORE, CoreModule.INSTANCE);
}
@Test
public void testLoadModuleWithoutUnusedModulesExist() {
ModuleMock x = new ModuleMock("x");
ModuleMock y = new ModuleMock("y");
ModuleMock z = new ModuleMock("z");
manager.loadModule(x.getType(), x);
manager.loadModule(y.getType(), y);
manager.loadModule(z.getType(), z);
Map<String, Module> expectedLoadedModules = new HashMap<>();
expectedLoadedModules.put(MODULE_TYPE_CORE, CoreModule.INSTANCE);
expectedLoadedModules.put("x", x);
expectedLoadedModules.put("y", y);
expectedLoadedModules.put("z", z);
assertEquals(Arrays.asList(MODULE_TYPE_CORE, "x", "y", "z"), manager.getUsedModules());
assertEquals(expectedLoadedModules, manager.getLoadedModules());
}
@Test
public void testLoadModuleWithUnusedModulesExist() {
ModuleMock y = new ModuleMock("y");
ModuleMock z = new ModuleMock("z");
manager.loadModule(y.getType(), y);
manager.loadModule(z.getType(), z);
Map<String, Module> expectedLoadedModules = new HashMap<>();
expectedLoadedModules.put(MODULE_TYPE_CORE, CoreModule.INSTANCE);
expectedLoadedModules.put("y", y);
expectedLoadedModules.put("z", z);
assertEquals(Arrays.asList(MODULE_TYPE_CORE, "y", "z"), manager.getUsedModules());
assertEquals(expectedLoadedModules, manager.getLoadedModules());
manager.useModules(MODULE_TYPE_CORE);
ModuleMock x = new ModuleMock("x");
manager.loadModule(x.getType(), x);
expectedLoadedModules.put("x", x);
assertEquals(Arrays.asList(MODULE_TYPE_CORE, "x"), manager.getUsedModules());
assertEquals(expectedLoadedModules, manager.getLoadedModules());
}
@Test
public void testUnloadModuleTwice() {
assertEquals(Collections.singletonList(MODULE_TYPE_CORE), manager.getUsedModules());
manager.unloadModule(MODULE_TYPE_CORE);
assertEquals(Collections.emptyList(), manager.getUsedModules());
assertEquals(Collections.emptyMap(), manager.getLoadedModules());
thrown.expect(ValidationException.class);
thrown.expectMessage("No module with name 'core' exists");
manager.unloadModule(MODULE_TYPE_CORE);
}
@Test
public void testUseUnloadedModules() {
thrown.expect(ValidationException.class);
thrown.expectMessage("No module with name 'x' exists");
manager.useModules(MODULE_TYPE_CORE, "x");
}
@Test
public void testUseModulesWithDuplicateModuleName() {
thrown.expect(ValidationException.class);
thrown.expectMessage("Module 'core' appears more than once");
manager.useModules(MODULE_TYPE_CORE, MODULE_TYPE_CORE);
}
@Test
public void testUseModules() {
ModuleMock x = new ModuleMock("x");
ModuleMock y = new ModuleMock("y");
ModuleMock z = new ModuleMock("z");
manager.loadModule(x.getType(), x);
manager.loadModule(y.getType(), y);
manager.loadModule(z.getType(), z);
assertEquals(Arrays.asList(MODULE_TYPE_CORE, "x", "y", "z"), manager.getUsedModules());
manager.useModules("z", MODULE_TYPE_CORE);
assertEquals(Arrays.asList("z", MODULE_TYPE_CORE), manager.getUsedModules());
Map<String, Module> expectedLoadedModules = new HashMap<>();
expectedLoadedModules.put(MODULE_TYPE_CORE, CoreModule.INSTANCE);
expectedLoadedModules.put("x", x);
expectedLoadedModules.put("y", y);
expectedLoadedModules.put("z", z);
assertEquals(expectedLoadedModules, manager.getLoadedModules());
}
@Test
public void testListModules() {
ModuleMock y = new ModuleMock("y");
ModuleMock z = new ModuleMock("z");
manager.loadModule("y", y);
manager.loadModule("z", z);
manager.useModules("z", "y");
assertEquals(Arrays.asList("z", "y"), manager.listModules());
}
@Test
@Test
public void testListFunctions() {
ModuleMock x = new ModuleMock("x");
manager.loadModule(x.getType(), x);
assertTrue(manager.listFunctions().contains("dummy"));
manager.useModules(MODULE_TYPE_CORE);
assertFalse(manager.listFunctions().contains("dummy"));
}
@Test
public void testGetFunctionDefinition() {
ModuleMock x = new ModuleMock("x");
manager.loadModule(x.getType(), x);
assertTrue(manager.getFunctionDefinition("dummy").isPresent());
manager.useModules(MODULE_TYPE_CORE);
assertFalse(manager.getFunctionDefinition("dummy").isPresent());
}
private static List<ModuleEntry> getExpectedModuleEntries(int index, String... names) {
return IntStream.range(0, names.length)
.mapToObj(i -> new ModuleEntry(names[i], i < index))
.collect(Collectors.toList());
}
} |
I'm not familiar with this API, but is it safe to expect the input stream to contain the full output when the process is already terminated? | public void provideOutcome(AppCreator ctx) throws AppCreatorException {
outputDir = outputDir == null ? ctx.getWorkPath() : IoUtils.mkdirs(outputDir);
final RunnerJarOutcome runnerJarOutcome = ctx.resolveOutcome(RunnerJarOutcome.class);
Path runnerJar = runnerJarOutcome.getRunnerJar();
boolean runnerJarCopied = false;
if (!runnerJar.getParent().equals(outputDir)) {
try {
runnerJar = IoUtils.copy(runnerJar, outputDir.resolve(runnerJar.getFileName()));
} catch (IOException e) {
throw new AppCreatorException("Failed to copy the runnable jar to the output dir", e);
}
runnerJarCopied = true;
}
final String runnerJarName = runnerJar.getFileName().toString();
Path outputLibDir = outputDir.resolve(runnerJarOutcome.getLibDir().getFileName());
boolean outputLibDirCopied = false;
if (Files.exists(outputLibDir)) {
outputLibDir = null;
} else {
try {
IoUtils.copy(runnerJarOutcome.getLibDir(), outputLibDir);
} catch (IOException e) {
throw new AppCreatorException("Failed to copy the runnable jar and the lib to the docker project dir", e);
}
outputLibDirCopied = true;
}
final Config config = SmallRyeConfigProviderResolver.instance().getConfig();
boolean vmVersionOutOfDate = isThisGraalVMRCObsolete();
HashMap<String, String> env = new HashMap<>(System.getenv());
List<String> nativeImage;
String noPIE = "";
if (!"".equals(containerRuntime)) {
nativeImage = new ArrayList<>();
Collections.addAll(nativeImage, containerRuntime, "run", "-v", outputDir.toAbsolutePath() + ":/project:z", "--rm");
if (IS_LINUX & "docker".equals(containerRuntime)) {
try {
BufferedReader reader;
StringBuilder builder;
String uid = null;
String gid = null;
ProcessBuilder idPB = new ProcessBuilder().command("id", "-ur");
Process process = idPB.start();
if (process.waitFor() == 0) {
reader = new BufferedReader(new InputStreamReader(process.getInputStream()));
builder = new StringBuilder();
String line;
while ((line = reader.readLine()) != null) {
builder.append(line);
}
uid = builder.toString();
}
idPB = new ProcessBuilder().command("id", "-gr");
process = idPB.start();
if (process.waitFor() == 0) {
reader = new BufferedReader(new InputStreamReader(process.getInputStream()));
builder = new StringBuilder();
String line = null;
while ((line = reader.readLine()) != null) {
builder.append(line);
}
gid = builder.toString();
}
if (uid != null & gid != null & !"".equals(uid) & !"".equals(gid)) {
Collections.addAll(nativeImage, "--user", uid.concat(":").concat(gid));
}
} catch (Exception e) {
}
}
nativeImage.addAll(containerRuntimeOptions);
nativeImage.add(this.builderImage);
} else {
if (IS_LINUX) {
noPIE = detectNoPIE();
}
String graalvmHome = this.graalvmHome;
if (graalvmHome != null) {
env.put(GRAALVM_HOME, graalvmHome);
} else {
graalvmHome = env.get(GRAALVM_HOME);
if (graalvmHome == null) {
throw new AppCreatorException("GRAALVM_HOME was not set");
}
}
nativeImage = Collections.singletonList(graalvmHome + File.separator + "bin" + File.separator + "native-image");
}
try {
List<String> command = new ArrayList<>();
command.addAll(nativeImage);
if (cleanupServer) {
List<String> cleanup = new ArrayList<>(nativeImage);
cleanup.add("--server-shutdown");
ProcessBuilder pb = new ProcessBuilder(cleanup.toArray(new String[0]));
pb.directory(outputDir.toFile());
pb.redirectInput(ProcessBuilder.Redirect.INHERIT);
pb.redirectOutput(ProcessBuilder.Redirect.INHERIT);
pb.redirectError(ProcessBuilder.Redirect.INHERIT);
Process process = pb.start();
process.waitFor();
}
final Path propsFile = ctx.resolveOutcome(AugmentOutcome.class).getAppClassesDir()
.resolve("native-image.properties");
boolean enableSslNative = false;
if (Files.exists(propsFile)) {
final Properties properties = new Properties();
try (BufferedReader reader = Files.newBufferedReader(propsFile, StandardCharsets.UTF_8)) {
properties.load(reader);
}
for (String propertyName : properties.stringPropertyNames()) {
if (propertyName.startsWith(QUARKUS_PREFIX)) {
continue;
}
final String propertyValue = properties.getProperty(propertyName);
if (propertyValue == null) {
command.add("-J-D" + propertyName);
} else {
command.add("-J-D" + propertyName + "=" + propertyValue);
}
}
enableSslNative = properties.getProperty("quarkus.ssl.native") != null
? Boolean.parseBoolean(properties.getProperty("quarkus.ssl.native"))
: false;
}
if (enableSslNative) {
enableHttpsUrlHandler = true;
enableJni = true;
enableAllSecurityServices = true;
}
if (additionalBuildArgs != null) {
additionalBuildArgs.forEach(command::add);
}
command.add("-H:InitialCollectionPolicy=com.oracle.svm.core.genscavenge.CollectionPolicy$BySpaceAndTime");
command.add("-jar");
command.add(runnerJarName);
command.add("-J-Djava.util.concurrent.ForkJoinPool.common.parallelism=1");
if (reportErrorsAtRuntime) {
command.add("-H:+ReportUnsupportedElementsAtRuntime");
}
if (debugSymbols) {
command.add("-g");
}
if (debugBuildProcess) {
command.add("-J-Xrunjdwp:transport=dt_socket,address=5005,server=y,suspend=y");
}
if (!disableReports) {
command.add("-H:+PrintAnalysisCallTree");
}
if (dumpProxies) {
command.add("-Dsun.misc.ProxyGenerator.saveGeneratedFiles=true");
if (enableServer) {
log.warn(
"Options dumpProxies and enableServer are both enabled: this will get the proxies dumped in an unknown external working directory");
}
}
if (nativeImageXmx != null) {
command.add("-J-Xmx" + nativeImageXmx);
}
List<String> protocols = new ArrayList<>(2);
if (enableHttpUrlHandler) {
protocols.add("http");
}
if (enableHttpsUrlHandler) {
protocols.add("https");
}
if (addAllCharsets) {
command.add("-H:+AddAllCharsets");
} else {
command.add("-H:-AddAllCharsets");
}
if (!protocols.isEmpty()) {
command.add("-H:EnableURLProtocols=" + String.join(",", protocols));
}
if (enableAllSecurityServices) {
command.add("--enable-all-security-services");
}
if (!noPIE.isEmpty()) {
command.add("-H:NativeLinkerOption=" + noPIE);
}
if (enableRetainedHeapReporting) {
command.add("-H:+PrintRetainedHeapHistogram");
}
if (enableCodeSizeReporting) {
command.add("-H:+PrintCodeSizeReport");
}
if (!enableIsolates) {
command.add("-H:-SpawnIsolates");
}
if (enableJni) {
command.add("-H:+JNI");
} else {
command.add("-H:-JNI");
}
if (!enableServer) {
command.add("--no-server");
}
if (enableVMInspection) {
command.add("-H:+AllowVMInspection");
}
if (autoServiceLoaderRegistration) {
command.add("-H:+UseServiceLoaderFeature");
command.add("-H:+TraceServiceLoaderFeature");
} else {
command.add("-H:-UseServiceLoaderFeature");
}
if (fullStackTraces) {
command.add("-H:+StackTrace");
} else {
command.add("-H:-StackTrace");
}
log.info(command.stream().collect(Collectors.joining(" ")));
CountDownLatch errorReportLatch = new CountDownLatch(1);
ProcessBuilder pb = new ProcessBuilder(command.toArray(new String[0]));
pb.directory(outputDir.toFile());
pb.redirectInput(ProcessBuilder.Redirect.INHERIT);
pb.redirectOutput(ProcessBuilder.Redirect.INHERIT);
Process process = pb.start();
new Thread(new ErrorReplacingProcessReader(process.getErrorStream(), outputDir.resolve("reports").toFile(),
errorReportLatch)).start();
errorReportLatch.await();
if (process.waitFor() != 0) {
throw new RuntimeException("Image generation failed");
}
System.setProperty("native.image.path", runnerJarName.substring(0, runnerJarName.lastIndexOf('.')));
ctx.pushOutcome(NativeImageOutcome.class, this);
} catch (Exception e) {
throw new AppCreatorException("Failed to build native image", e);
} finally {
if (runnerJarCopied) {
IoUtils.recursiveDelete(runnerJar);
}
if (outputLibDirCopied) {
IoUtils.recursiveDelete(outputLibDir);
}
}
} | reader = new BufferedReader(new InputStreamReader(process.getInputStream())); | public void provideOutcome(AppCreator ctx) throws AppCreatorException {
outputDir = outputDir == null ? ctx.getWorkPath() : IoUtils.mkdirs(outputDir);
final RunnerJarOutcome runnerJarOutcome = ctx.resolveOutcome(RunnerJarOutcome.class);
Path runnerJar = runnerJarOutcome.getRunnerJar();
boolean runnerJarCopied = false;
if (!runnerJar.getParent().equals(outputDir)) {
try {
runnerJar = IoUtils.copy(runnerJar, outputDir.resolve(runnerJar.getFileName()));
} catch (IOException e) {
throw new AppCreatorException("Failed to copy the runnable jar to the output dir", e);
}
runnerJarCopied = true;
}
final String runnerJarName = runnerJar.getFileName().toString();
Path outputLibDir = outputDir.resolve(runnerJarOutcome.getLibDir().getFileName());
boolean outputLibDirCopied = false;
if (Files.exists(outputLibDir)) {
outputLibDir = null;
} else {
try {
IoUtils.copy(runnerJarOutcome.getLibDir(), outputLibDir);
} catch (IOException e) {
throw new AppCreatorException("Failed to copy the runnable jar and the lib to the docker project dir", e);
}
outputLibDirCopied = true;
}
final Config config = SmallRyeConfigProviderResolver.instance().getConfig();
boolean vmVersionOutOfDate = isThisGraalVMRCObsolete();
HashMap<String, String> env = new HashMap<>(System.getenv());
List<String> nativeImage;
String noPIE = "";
if (!"".equals(containerRuntime)) {
nativeImage = new ArrayList<>();
Collections.addAll(nativeImage, containerRuntime, "run", "-v", outputDir.toAbsolutePath() + ":/project:z", "--rm");
if (IS_LINUX & "docker".equals(containerRuntime)) {
String uid = getLinuxID("-ur");
String gid = getLinuxID("-gr");
if (uid != null & gid != null & !"".equals(uid) & !"".equals(gid)) {
Collections.addAll(nativeImage, "--user", uid.concat(":").concat(gid));
}
}
nativeImage.addAll(containerRuntimeOptions);
nativeImage.add(this.builderImage);
} else {
if (IS_LINUX) {
noPIE = detectNoPIE();
}
String graalvmHome = this.graalvmHome;
if (graalvmHome != null) {
env.put(GRAALVM_HOME, graalvmHome);
} else {
graalvmHome = env.get(GRAALVM_HOME);
if (graalvmHome == null) {
throw new AppCreatorException("GRAALVM_HOME was not set");
}
}
nativeImage = Collections.singletonList(graalvmHome + File.separator + "bin" + File.separator + "native-image");
}
try {
List<String> command = new ArrayList<>();
command.addAll(nativeImage);
if (cleanupServer) {
List<String> cleanup = new ArrayList<>(nativeImage);
cleanup.add("--server-shutdown");
ProcessBuilder pb = new ProcessBuilder(cleanup.toArray(new String[0]));
pb.directory(outputDir.toFile());
pb.redirectInput(ProcessBuilder.Redirect.INHERIT);
pb.redirectOutput(ProcessBuilder.Redirect.INHERIT);
pb.redirectError(ProcessBuilder.Redirect.INHERIT);
Process process = pb.start();
process.waitFor();
}
final Path propsFile = ctx.resolveOutcome(AugmentOutcome.class).getAppClassesDir()
.resolve("native-image.properties");
boolean enableSslNative = false;
if (Files.exists(propsFile)) {
final Properties properties = new Properties();
try (BufferedReader reader = Files.newBufferedReader(propsFile, StandardCharsets.UTF_8)) {
properties.load(reader);
}
for (String propertyName : properties.stringPropertyNames()) {
if (propertyName.startsWith(QUARKUS_PREFIX)) {
continue;
}
final String propertyValue = properties.getProperty(propertyName);
if (propertyValue == null) {
command.add("-J-D" + propertyName);
} else {
command.add("-J-D" + propertyName + "=" + propertyValue);
}
}
enableSslNative = properties.getProperty("quarkus.ssl.native") != null
? Boolean.parseBoolean(properties.getProperty("quarkus.ssl.native"))
: false;
}
if (enableSslNative) {
enableHttpsUrlHandler = true;
enableJni = true;
enableAllSecurityServices = true;
}
if (additionalBuildArgs != null) {
additionalBuildArgs.forEach(command::add);
}
command.add("-H:InitialCollectionPolicy=com.oracle.svm.core.genscavenge.CollectionPolicy$BySpaceAndTime");
command.add("-jar");
command.add(runnerJarName);
command.add("-J-Djava.util.concurrent.ForkJoinPool.common.parallelism=1");
if (reportErrorsAtRuntime) {
command.add("-H:+ReportUnsupportedElementsAtRuntime");
}
if (debugSymbols) {
command.add("-g");
}
if (debugBuildProcess) {
command.add("-J-Xrunjdwp:transport=dt_socket,address=5005,server=y,suspend=y");
}
if (!disableReports) {
command.add("-H:+PrintAnalysisCallTree");
}
if (dumpProxies) {
command.add("-Dsun.misc.ProxyGenerator.saveGeneratedFiles=true");
if (enableServer) {
log.warn(
"Options dumpProxies and enableServer are both enabled: this will get the proxies dumped in an unknown external working directory");
}
}
if (nativeImageXmx != null) {
command.add("-J-Xmx" + nativeImageXmx);
}
List<String> protocols = new ArrayList<>(2);
if (enableHttpUrlHandler) {
protocols.add("http");
}
if (enableHttpsUrlHandler) {
protocols.add("https");
}
if (addAllCharsets) {
command.add("-H:+AddAllCharsets");
} else {
command.add("-H:-AddAllCharsets");
}
if (!protocols.isEmpty()) {
command.add("-H:EnableURLProtocols=" + String.join(",", protocols));
}
if (enableAllSecurityServices) {
command.add("--enable-all-security-services");
}
if (!noPIE.isEmpty()) {
command.add("-H:NativeLinkerOption=" + noPIE);
}
if (enableRetainedHeapReporting) {
command.add("-H:+PrintRetainedHeapHistogram");
}
if (enableCodeSizeReporting) {
command.add("-H:+PrintCodeSizeReport");
}
if (!enableIsolates) {
command.add("-H:-SpawnIsolates");
}
if (enableJni) {
command.add("-H:+JNI");
} else {
command.add("-H:-JNI");
}
if (!enableServer) {
command.add("--no-server");
}
if (enableVMInspection) {
command.add("-H:+AllowVMInspection");
}
if (autoServiceLoaderRegistration) {
command.add("-H:+UseServiceLoaderFeature");
command.add("-H:+TraceServiceLoaderFeature");
} else {
command.add("-H:-UseServiceLoaderFeature");
}
if (fullStackTraces) {
command.add("-H:+StackTrace");
} else {
command.add("-H:-StackTrace");
}
log.info(command.stream().collect(Collectors.joining(" ")));
CountDownLatch errorReportLatch = new CountDownLatch(1);
ProcessBuilder pb = new ProcessBuilder(command.toArray(new String[0]));
pb.directory(outputDir.toFile());
pb.redirectInput(ProcessBuilder.Redirect.INHERIT);
pb.redirectOutput(ProcessBuilder.Redirect.INHERIT);
Process process = pb.start();
new Thread(new ErrorReplacingProcessReader(process.getErrorStream(), outputDir.resolve("reports").toFile(),
errorReportLatch)).start();
errorReportLatch.await();
if (process.waitFor() != 0) {
throw new RuntimeException("Image generation failed");
}
System.setProperty("native.image.path", runnerJarName.substring(0, runnerJarName.lastIndexOf('.')));
ctx.pushOutcome(NativeImageOutcome.class, this);
} catch (Exception e) {
throw new AppCreatorException("Failed to build native image", e);
} finally {
if (runnerJarCopied) {
IoUtils.recursiveDelete(runnerJar);
}
if (outputLibDirCopied) {
IoUtils.recursiveDelete(outputLibDir);
}
}
} | class NativeImagePhase implements AppCreationPhase<NativeImagePhase>, NativeImageOutcome {
private static final Logger log = Logger.getLogger(NativeImagePhase.class);
private static final String GRAALVM_HOME = "GRAALVM_HOME";
private static final String QUARKUS_PREFIX = "quarkus.";
private static final boolean IS_LINUX = System.getProperty("os.name").toLowerCase(Locale.ROOT).contains("linux");
private Path outputDir;
private boolean reportErrorsAtRuntime;
private boolean debugSymbols;
private boolean debugBuildProcess;
private boolean cleanupServer;
private boolean enableHttpUrlHandler;
private boolean enableHttpsUrlHandler;
private boolean enableAllSecurityServices;
private boolean enableRetainedHeapReporting;
private boolean enableCodeSizeReporting;
private boolean enableIsolates;
private String graalvmHome;
private boolean enableServer;
private boolean enableJni;
private boolean autoServiceLoaderRegistration;
private boolean dumpProxies;
private String nativeImageXmx;
private String builderImage = "quay.io/quarkus/centos-quarkus-native-image:graalvm-1.0.0-rc14";
private String containerRuntime = "";
private List<String> containerRuntimeOptions = new ArrayList<>();
private boolean enableVMInspection;
private boolean fullStackTraces;
private boolean disableReports;
private List<String> additionalBuildArgs;
private boolean addAllCharsets;
public NativeImagePhase setAddAllCharsets(boolean addAllCharsets) {
this.addAllCharsets = addAllCharsets;
return this;
}
public NativeImagePhase setOutputDir(Path outputDir) {
this.outputDir = outputDir;
return this;
}
public NativeImagePhase setReportErrorsAtRuntime(boolean reportErrorsAtRuntime) {
this.reportErrorsAtRuntime = reportErrorsAtRuntime;
return this;
}
public NativeImagePhase setDebugSymbols(boolean debugSymbols) {
this.debugSymbols = debugSymbols;
return this;
}
public NativeImagePhase setDebugBuildProcess(boolean debugBuildProcess) {
this.debugBuildProcess = debugBuildProcess;
return this;
}
public NativeImagePhase setCleanupServer(boolean cleanupServer) {
this.cleanupServer = cleanupServer;
return this;
}
public NativeImagePhase setEnableHttpUrlHandler(boolean enableHttpUrlHandler) {
this.enableHttpUrlHandler = enableHttpUrlHandler;
return this;
}
public NativeImagePhase setEnableHttpsUrlHandler(boolean enableHttpsUrlHandler) {
this.enableHttpsUrlHandler = enableHttpsUrlHandler;
return this;
}
public NativeImagePhase setEnableAllSecurityServices(boolean enableAllSecurityServices) {
this.enableAllSecurityServices = enableAllSecurityServices;
return this;
}
public NativeImagePhase setEnableRetainedHeapReporting(boolean enableRetainedHeapReporting) {
this.enableRetainedHeapReporting = enableRetainedHeapReporting;
return this;
}
public NativeImagePhase setEnableCodeSizeReporting(boolean enableCodeSizeReporting) {
this.enableCodeSizeReporting = enableCodeSizeReporting;
return this;
}
public NativeImagePhase setEnableIsolates(boolean enableIsolates) {
this.enableIsolates = enableIsolates;
return this;
}
public NativeImagePhase setGraalvmHome(String graalvmHome) {
this.graalvmHome = graalvmHome;
return this;
}
public NativeImagePhase setEnableServer(boolean enableServer) {
this.enableServer = enableServer;
return this;
}
public NativeImagePhase setEnableJni(boolean enableJni) {
this.enableJni = enableJni;
return this;
}
public NativeImagePhase setAutoServiceLoaderRegistration(boolean autoServiceLoaderRegistration) {
this.autoServiceLoaderRegistration = autoServiceLoaderRegistration;
return this;
}
public NativeImagePhase setDumpProxies(boolean dumpProxies) {
this.dumpProxies = dumpProxies;
return this;
}
public NativeImagePhase setNativeImageXmx(String nativeImageXmx) {
this.nativeImageXmx = nativeImageXmx;
return this;
}
public NativeImagePhase setDockerBuild(String dockerBuild) {
if (dockerBuild == null) {
return this;
}
if ("false".equals(dockerBuild.toLowerCase())) {
this.containerRuntime = "";
} else {
this.containerRuntime = "docker";
if (!"true".equals(dockerBuild.toLowerCase())) {
this.builderImage = dockerBuild;
}
}
return this;
}
public NativeImagePhase setContainerRuntime(String containerRuntime) {
if (containerRuntime == null) {
return this;
}
if ("podman".equals(containerRuntime) || "docker".equals(containerRuntime)) {
this.containerRuntime = containerRuntime;
} else {
log.warn("container runtime is not docker or podman. fallback to docker");
this.containerRuntime = "docker";
}
return this;
}
public NativeImagePhase setContainerRuntimeOptions(String containerRuntimeOptions) {
if (containerRuntimeOptions != null) {
this.containerRuntimeOptions = Arrays.asList(containerRuntimeOptions.split(","));
}
return this;
}
public NativeImagePhase setEnableVMInspection(boolean enableVMInspection) {
this.enableVMInspection = enableVMInspection;
return this;
}
public NativeImagePhase setFullStackTraces(boolean fullStackTraces) {
this.fullStackTraces = fullStackTraces;
return this;
}
public NativeImagePhase setDisableReports(boolean disableReports) {
this.disableReports = disableReports;
return this;
}
public NativeImagePhase setAdditionalBuildArgs(List<String> additionalBuildArgs) {
this.additionalBuildArgs = additionalBuildArgs;
return this;
}
@Override
public void register(OutcomeProviderRegistration registration) throws AppCreatorException {
registration.provides(NativeImageOutcome.class);
}
@Override
private boolean isThisGraalVMRCObsolete() {
final String vmName = System.getProperty("java.vm.name");
log.info("Running Quarkus native-image plugin on " + vmName);
final List<String> obsoleteGraalVmVersions = Arrays.asList("-rc9", "-rc10", "-rc11", "-rc12", "-rc13");
final boolean vmVersionIsObsolete = obsoleteGraalVmVersions.stream().anyMatch(vmName::contains);
if (vmVersionIsObsolete) {
log.error("Out of date RC build of GraalVM detected! Please upgrade to RC14");
return true;
}
return false;
}
private static String detectNoPIE() {
String argument = testGCCArgument("-no-pie");
return argument.length() == 0 ? testGCCArgument("-nopie") : argument;
}
private static String testGCCArgument(String argument) {
try {
Process gcc = new ProcessBuilder("cc", "-v", "-E", argument, "-").start();
gcc.getOutputStream().close();
if (gcc.waitFor() == 0) {
return argument;
}
} catch (IOException | InterruptedException e) {
}
return "";
}
@Override
public String getConfigPropertyName() {
return "native-image";
}
@Override
public PropertiesHandler<NativeImagePhase> getPropertiesHandler() {
return new PropertiesHandler<NativeImagePhase>() {
@Override
public NativeImagePhase getTarget() {
return NativeImagePhase.this;
}
@Override
public boolean set(NativeImagePhase t, PropertyContext ctx) {
final String value = ctx.getValue();
switch (ctx.getRelativeName()) {
case "output":
t.setOutputDir(Paths.get(value));
break;
case "report-errors-at-runtime":
t.setReportErrorsAtRuntime(Boolean.parseBoolean(value));
break;
case "debug-symbols":
t.setDebugSymbols(Boolean.parseBoolean(value));
break;
case "debug-build-process":
t.setDebugBuildProcess(Boolean.parseBoolean(value));
break;
case "cleanup-server":
t.setCleanupServer(Boolean.parseBoolean(value));
break;
case "enable-http-url-handler":
t.setEnableHttpUrlHandler(Boolean.parseBoolean(value));
break;
case "enable-https-url-handler":
t.setEnableHttpsUrlHandler(Boolean.parseBoolean(value));
break;
case "enable-all-security-services":
t.setEnableAllSecurityServices(Boolean.parseBoolean(value));
break;
case "enable-retained-heap-reporting":
t.setEnableRetainedHeapReporting(Boolean.parseBoolean(value));
break;
case "enable-code-size-reporting":
t.setEnableCodeSizeReporting(Boolean.parseBoolean(value));
break;
case "enable-isolates":
t.setEnableIsolates(Boolean.parseBoolean(value));
break;
case "graalvm-home":
t.setGraalvmHome(value);
break;
case "enable-server":
t.setEnableServer(Boolean.parseBoolean(value));
break;
case "enable-jni":
t.setEnableJni(Boolean.parseBoolean(value));
break;
case "auto-service-loader-registration":
t.setAutoServiceLoaderRegistration(Boolean.parseBoolean(value));
break;
case "dump-proxies":
t.setDumpProxies(Boolean.parseBoolean(value));
break;
case "native-image-xmx":
t.setNativeImageXmx(value);
break;
case "docker-build":
t.setDockerBuild(value);
break;
case "enable-vm-inspection":
t.setEnableVMInspection(Boolean.parseBoolean(value));
break;
case "full-stack-traces":
t.setFullStackTraces(Boolean.parseBoolean(value));
break;
case "disable-reports":
t.setDisableReports(Boolean.parseBoolean(value));
break;
case "additional-build-args":
t.setAdditionalBuildArgs(Arrays.asList(value.split(",")));
break;
default:
return false;
}
return true;
}
};
}
} | class NativeImagePhase implements AppCreationPhase<NativeImagePhase>, NativeImageOutcome {
private static final Logger log = Logger.getLogger(NativeImagePhase.class);
private static final String GRAALVM_HOME = "GRAALVM_HOME";
private static final String QUARKUS_PREFIX = "quarkus.";
private static final boolean IS_LINUX = System.getProperty("os.name").toLowerCase(Locale.ROOT).contains("linux");
private Path outputDir;
private boolean reportErrorsAtRuntime;
private boolean debugSymbols;
private boolean debugBuildProcess;
private boolean cleanupServer;
private boolean enableHttpUrlHandler;
private boolean enableHttpsUrlHandler;
private boolean enableAllSecurityServices;
private boolean enableRetainedHeapReporting;
private boolean enableCodeSizeReporting;
private boolean enableIsolates;
private String graalvmHome;
private boolean enableServer;
private boolean enableJni;
private boolean autoServiceLoaderRegistration;
private boolean dumpProxies;
private String nativeImageXmx;
private String builderImage = "quay.io/quarkus/centos-quarkus-native-image:graalvm-1.0.0-rc14";
private String containerRuntime = "";
private List<String> containerRuntimeOptions = new ArrayList<>();
private boolean enableVMInspection;
private boolean fullStackTraces;
private boolean disableReports;
private List<String> additionalBuildArgs;
private boolean addAllCharsets;
public NativeImagePhase setAddAllCharsets(boolean addAllCharsets) {
this.addAllCharsets = addAllCharsets;
return this;
}
public NativeImagePhase setOutputDir(Path outputDir) {
this.outputDir = outputDir;
return this;
}
public NativeImagePhase setReportErrorsAtRuntime(boolean reportErrorsAtRuntime) {
this.reportErrorsAtRuntime = reportErrorsAtRuntime;
return this;
}
public NativeImagePhase setDebugSymbols(boolean debugSymbols) {
this.debugSymbols = debugSymbols;
return this;
}
public NativeImagePhase setDebugBuildProcess(boolean debugBuildProcess) {
this.debugBuildProcess = debugBuildProcess;
return this;
}
public NativeImagePhase setCleanupServer(boolean cleanupServer) {
this.cleanupServer = cleanupServer;
return this;
}
public NativeImagePhase setEnableHttpUrlHandler(boolean enableHttpUrlHandler) {
this.enableHttpUrlHandler = enableHttpUrlHandler;
return this;
}
public NativeImagePhase setEnableHttpsUrlHandler(boolean enableHttpsUrlHandler) {
this.enableHttpsUrlHandler = enableHttpsUrlHandler;
return this;
}
public NativeImagePhase setEnableAllSecurityServices(boolean enableAllSecurityServices) {
this.enableAllSecurityServices = enableAllSecurityServices;
return this;
}
public NativeImagePhase setEnableRetainedHeapReporting(boolean enableRetainedHeapReporting) {
this.enableRetainedHeapReporting = enableRetainedHeapReporting;
return this;
}
public NativeImagePhase setEnableCodeSizeReporting(boolean enableCodeSizeReporting) {
this.enableCodeSizeReporting = enableCodeSizeReporting;
return this;
}
public NativeImagePhase setEnableIsolates(boolean enableIsolates) {
this.enableIsolates = enableIsolates;
return this;
}
public NativeImagePhase setGraalvmHome(String graalvmHome) {
this.graalvmHome = graalvmHome;
return this;
}
public NativeImagePhase setEnableServer(boolean enableServer) {
this.enableServer = enableServer;
return this;
}
public NativeImagePhase setEnableJni(boolean enableJni) {
this.enableJni = enableJni;
return this;
}
public NativeImagePhase setAutoServiceLoaderRegistration(boolean autoServiceLoaderRegistration) {
this.autoServiceLoaderRegistration = autoServiceLoaderRegistration;
return this;
}
public NativeImagePhase setDumpProxies(boolean dumpProxies) {
this.dumpProxies = dumpProxies;
return this;
}
public NativeImagePhase setNativeImageXmx(String nativeImageXmx) {
this.nativeImageXmx = nativeImageXmx;
return this;
}
public NativeImagePhase setDockerBuild(String dockerBuild) {
if (dockerBuild == null) {
return this;
}
if ("false".equals(dockerBuild.toLowerCase())) {
this.containerRuntime = "";
} else {
this.containerRuntime = "docker";
if (!"true".equals(dockerBuild.toLowerCase())) {
this.builderImage = dockerBuild;
}
}
return this;
}
public NativeImagePhase setContainerRuntime(String containerRuntime) {
if (containerRuntime == null) {
return this;
}
if ("podman".equals(containerRuntime) || "docker".equals(containerRuntime)) {
this.containerRuntime = containerRuntime;
} else {
log.warn("container runtime is not docker or podman. fallback to docker");
this.containerRuntime = "docker";
}
return this;
}
public NativeImagePhase setContainerRuntimeOptions(String containerRuntimeOptions) {
if (containerRuntimeOptions != null) {
this.containerRuntimeOptions = Arrays.asList(containerRuntimeOptions.split(","));
}
return this;
}
public NativeImagePhase setEnableVMInspection(boolean enableVMInspection) {
this.enableVMInspection = enableVMInspection;
return this;
}
public NativeImagePhase setFullStackTraces(boolean fullStackTraces) {
this.fullStackTraces = fullStackTraces;
return this;
}
public NativeImagePhase setDisableReports(boolean disableReports) {
this.disableReports = disableReports;
return this;
}
public NativeImagePhase setAdditionalBuildArgs(List<String> additionalBuildArgs) {
this.additionalBuildArgs = additionalBuildArgs;
return this;
}
@Override
public void register(OutcomeProviderRegistration registration) throws AppCreatorException {
registration.provides(NativeImageOutcome.class);
}
@Override
private boolean isThisGraalVMRCObsolete() {
final String vmName = System.getProperty("java.vm.name");
log.info("Running Quarkus native-image plugin on " + vmName);
final List<String> obsoleteGraalVmVersions = Arrays.asList("-rc9", "-rc10", "-rc11", "-rc12", "-rc13");
final boolean vmVersionIsObsolete = obsoleteGraalVmVersions.stream().anyMatch(vmName::contains);
if (vmVersionIsObsolete) {
log.error("Out of date RC build of GraalVM detected! Please upgrade to RC14");
return true;
}
return false;
}
private static String getLinuxID(String option) {
Process process;
try {
StringBuilder responseBuilder = new StringBuilder();
String line;
ProcessBuilder idPB = new ProcessBuilder().command("id", option);
idPB.redirectError(new File("/dev/null"));
idPB.redirectOutput(new File("/dev/null"));
process = idPB.start();
try (InputStream inputStream = process.getInputStream()) {
try (BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream))) {
while ((line = reader.readLine()) != null) {
responseBuilder.append(line);
}
safeWaitFor(process);
return responseBuilder.toString();
}
} catch (Throwable t) {
safeWaitFor(process);
throw t;
}
} catch (IOException e) {
return null;
}
}
static void safeWaitFor(Process process) {
boolean intr = false;
try {
for (;;)
try {
process.waitFor();
return;
} catch (InterruptedException ex) {
intr = true;
}
} finally {
if (intr)
Thread.currentThread().interrupt();
}
}
private static String detectNoPIE() {
String argument = testGCCArgument("-no-pie");
return argument.length() == 0 ? testGCCArgument("-nopie") : argument;
}
private static String testGCCArgument(String argument) {
try {
Process gcc = new ProcessBuilder("cc", "-v", "-E", argument, "-").start();
gcc.getOutputStream().close();
if (gcc.waitFor() == 0) {
return argument;
}
} catch (IOException | InterruptedException e) {
}
return "";
}
@Override
public String getConfigPropertyName() {
return "native-image";
}
@Override
public PropertiesHandler<NativeImagePhase> getPropertiesHandler() {
return new PropertiesHandler<NativeImagePhase>() {
@Override
public NativeImagePhase getTarget() {
return NativeImagePhase.this;
}
@Override
public boolean set(NativeImagePhase t, PropertyContext ctx) {
final String value = ctx.getValue();
switch (ctx.getRelativeName()) {
case "output":
t.setOutputDir(Paths.get(value));
break;
case "report-errors-at-runtime":
t.setReportErrorsAtRuntime(Boolean.parseBoolean(value));
break;
case "debug-symbols":
t.setDebugSymbols(Boolean.parseBoolean(value));
break;
case "debug-build-process":
t.setDebugBuildProcess(Boolean.parseBoolean(value));
break;
case "cleanup-server":
t.setCleanupServer(Boolean.parseBoolean(value));
break;
case "enable-http-url-handler":
t.setEnableHttpUrlHandler(Boolean.parseBoolean(value));
break;
case "enable-https-url-handler":
t.setEnableHttpsUrlHandler(Boolean.parseBoolean(value));
break;
case "enable-all-security-services":
t.setEnableAllSecurityServices(Boolean.parseBoolean(value));
break;
case "enable-retained-heap-reporting":
t.setEnableRetainedHeapReporting(Boolean.parseBoolean(value));
break;
case "enable-code-size-reporting":
t.setEnableCodeSizeReporting(Boolean.parseBoolean(value));
break;
case "enable-isolates":
t.setEnableIsolates(Boolean.parseBoolean(value));
break;
case "graalvm-home":
t.setGraalvmHome(value);
break;
case "enable-server":
t.setEnableServer(Boolean.parseBoolean(value));
break;
case "enable-jni":
t.setEnableJni(Boolean.parseBoolean(value));
break;
case "auto-service-loader-registration":
t.setAutoServiceLoaderRegistration(Boolean.parseBoolean(value));
break;
case "dump-proxies":
t.setDumpProxies(Boolean.parseBoolean(value));
break;
case "native-image-xmx":
t.setNativeImageXmx(value);
break;
case "docker-build":
t.setDockerBuild(value);
break;
case "enable-vm-inspection":
t.setEnableVMInspection(Boolean.parseBoolean(value));
break;
case "full-stack-traces":
t.setFullStackTraces(Boolean.parseBoolean(value));
break;
case "disable-reports":
t.setDisableReports(Boolean.parseBoolean(value));
break;
case "additional-build-args":
t.setAdditionalBuildArgs(Arrays.asList(value.split(",")));
break;
default:
return false;
}
return true;
}
};
}
} |
it will be used only with Write which already does the PDone.in | public boolean start() throws IOException {
restClient = source.spec.getConnectionConfiguration().createClient();
String query = source.spec.getQuery() != null ? source.spec.getQuery().get() : null;
if (query == null) {
query = "{\"query\": { \"match_all\": {} }}";
}
if ((source.backendVersion >= 5) && source.numSlices != null && source.numSlices > 1) {
String sliceQuery =
String.format("\"slice\": {\"id\": %s,\"max\": %s}", source.sliceId, source.numSlices);
query = query.replaceFirst("\\{", "{" + sliceQuery + ",");
}
String endPoint =
String.format(
"/%s/%s/_search",
source.spec.getConnectionConfiguration().getIndex(),
source.spec.getConnectionConfiguration().getType());
Map<String, String> params = new HashMap<>();
params.put("scroll", source.spec.getScrollKeepalive());
if (source.backendVersion == 2) {
params.put("size", String.valueOf(source.spec.getBatchSize()));
if (source.shardPreference != null) {
params.put("preference", "_shards:" + source.shardPreference);
}
}
HttpEntity queryEntity = new NStringEntity(query, ContentType.APPLICATION_JSON);
Request request = new Request("GET", endPoint);
request.addParameters(params);
request.setEntity(queryEntity);
Response response = restClient.performRequest(request);
JsonNode searchResult = parseResponse(response.getEntity());
updateScrollId(searchResult);
return readNextBatchAndReturnFirstDocument(searchResult);
}
private void updateScrollId(JsonNode searchResult) {
scrollId = searchResult.path("_scroll_id").asText();
}
@Override
public boolean advance() throws IOException {
if (batchIterator.hasNext()) {
current = batchIterator.next();
return true;
} else {
String requestBody =
String.format(
"{\"scroll\" : \"%s\",\"scroll_id\" : \"%s\"}",
source.spec.getScrollKeepalive(), scrollId);
HttpEntity scrollEntity = new NStringEntity(requestBody, ContentType.APPLICATION_JSON);
Request request = new Request("GET", "/_search/scroll");
request.addParameters(Collections.emptyMap());
request.setEntity(scrollEntity);
Response response = restClient.performRequest(request);
JsonNode searchResult = parseResponse(response.getEntity());
updateScrollId(searchResult);
return readNextBatchAndReturnFirstDocument(searchResult);
}
}
private boolean readNextBatchAndReturnFirstDocument(JsonNode searchResult) {
JsonNode hits = searchResult.path("hits").path("hits");
if (hits.size() == 0) {
current = null;
batchIterator = null;
return false;
}
List<String> batch = new ArrayList<>();
boolean withMetadata = source.spec.isWithMetadata();
for (JsonNode hit : hits) {
if (withMetadata) {
batch.add(hit.toString());
} else {
String document = hit.path("_source").toString();
batch.add(document);
}
}
batchIterator = batch.listIterator();
current = batchIterator.next();
return true;
}
@Override
public String getCurrent() throws NoSuchElementException {
if (current == null) {
throw new NoSuchElementException();
}
return current;
}
@Override
public void close() throws IOException {
String requestBody = String.format("{\"scroll_id\" : [\"%s\"]}", scrollId);
HttpEntity entity = new NStringEntity(requestBody, ContentType.APPLICATION_JSON);
try {
Request request = new Request("DELETE", "/_search/scroll");
request.addParameters(Collections.emptyMap());
request.setEntity(entity);
restClient.performRequest(request);
} finally {
if (restClient != null) {
restClient.close();
}
}
}
@Override
public BoundedSource<String> getCurrentSource() {
return source;
}
}
/**
* A POJO encapsulating a configuration for retry behavior when issuing requests to ES. A retry
* will be attempted until the maxAttempts or maxDuration is exceeded, whichever comes first, for
* 429 TOO_MANY_REQUESTS error.
*/
@AutoValue
public abstract static class RetryConfiguration implements Serializable {
@VisibleForTesting
static final RetryPredicate DEFAULT_RETRY_PREDICATE = new DefaultRetryPredicate();
abstract int getMaxAttempts();
abstract Duration getMaxDuration();
abstract RetryPredicate getRetryPredicate();
abstract Builder builder();
@AutoValue.Builder
abstract static class Builder {
abstract ElasticsearchIO.RetryConfiguration.Builder setMaxAttempts(int maxAttempts);
abstract ElasticsearchIO.RetryConfiguration.Builder setMaxDuration(Duration maxDuration);
abstract ElasticsearchIO.RetryConfiguration.Builder setRetryPredicate(
RetryPredicate retryPredicate);
abstract ElasticsearchIO.RetryConfiguration build();
}
/**
* Creates RetryConfiguration for {@link ElasticsearchIO} with provided maxAttempts,
* maxDurations and exponential backoff based retries.
*
* @param maxAttempts max number of attempts.
* @param maxDuration maximum duration for retries.
* @return {@link RetryConfiguration} object with provided settings.
*/
public static RetryConfiguration create(int maxAttempts, Duration maxDuration) {
checkArgument(maxAttempts > 0, "maxAttempts must be greater than 0");
checkArgument(
maxDuration != null && maxDuration.isLongerThan(Duration.ZERO),
"maxDuration must be greater than 0");
return new AutoValue_ElasticsearchIO_RetryConfiguration.Builder()
.setMaxAttempts(maxAttempts)
.setMaxDuration(maxDuration)
.setRetryPredicate(DEFAULT_RETRY_PREDICATE)
.build();
}
@VisibleForTesting
RetryConfiguration withRetryPredicate(RetryPredicate predicate) {
checkArgument(predicate != null, "predicate must be provided");
return builder().setRetryPredicate(predicate).build();
}
/**
* An interface used to control if we retry the Elasticsearch call when a {@link Response} is
* obtained. If {@link RetryPredicate
* the requests to the Elasticsearch server if the {@link RetryConfiguration} permits it.
*/
@FunctionalInterface
interface RetryPredicate extends Predicate<HttpEntity>, Serializable {}
/**
* This is the default predicate used to test if a failed ES operation should be retried. A
* retry will be attempted until the maxAttempts or maxDuration is exceeded, whichever comes
* first, for TOO_MANY_REQUESTS(429) error.
*/
@VisibleForTesting
static class DefaultRetryPredicate implements RetryPredicate {
private int errorCode;
DefaultRetryPredicate(int code) {
this.errorCode = code;
}
DefaultRetryPredicate() {
this(429);
}
/** Returns true if the response has the error code for any mutation. */
private static boolean errorCodePresent(HttpEntity responseEntity, int errorCode) {
try {
JsonNode json = parseResponse(responseEntity);
if (json.path("errors").asBoolean()) {
for (JsonNode item : json.path("items")) {
if (item.findValue("status").asInt() == errorCode) {
return true;
}
}
}
} catch (IOException e) {
LOG.warn("Could not extract error codes from responseEntity {}", responseEntity);
}
return false;
}
@Override
public boolean test(HttpEntity responseEntity) {
return errorCodePresent(responseEntity, errorCode);
}
}
}
/** A {@link PTransform} converting docs to their Bulk API counterparts. */
@AutoValue
public abstract static class DocToBulk
extends PTransform<PCollection<String>, PCollection<String>> {
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private static final int DEFAULT_RETRY_ON_CONFLICT = 5;
static {
SimpleModule module = new SimpleModule();
module.addSerializer(DocumentMetadata.class, new DocumentMetadataSerializer());
OBJECT_MAPPER.registerModule(module);
}
abstract @Nullable ConnectionConfiguration getConnectionConfiguration();
abstract Write.@Nullable FieldValueExtractFn getIdFn();
abstract Write.@Nullable FieldValueExtractFn getIndexFn();
abstract Write.@Nullable FieldValueExtractFn getRoutingFn();
abstract Write.@Nullable FieldValueExtractFn getTypeFn();
abstract Write.@Nullable FieldValueExtractFn getDocVersionFn();
abstract @Nullable String getDocVersionType();
abstract @Nullable String getUpsertScript();
abstract @Nullable Boolean getUsePartialUpdate();
abstract Write.@Nullable BooleanFieldValueExtractFn getIsDeleteFn();
abstract @Nullable Integer getBackendVersion();
abstract Builder builder();
@AutoValue.Builder
abstract static class Builder {
abstract Builder setConnectionConfiguration(ConnectionConfiguration connectionConfiguration);
abstract Builder setIdFn(Write.FieldValueExtractFn idFunction);
abstract Builder setIndexFn(Write.FieldValueExtractFn indexFn);
abstract Builder setRoutingFn(Write.FieldValueExtractFn routingFunction);
abstract Builder setTypeFn(Write.FieldValueExtractFn typeFn);
abstract Builder setDocVersionFn(Write.FieldValueExtractFn docVersionFn);
abstract Builder setDocVersionType(String docVersionType);
abstract Builder setIsDeleteFn(Write.BooleanFieldValueExtractFn isDeleteFn);
abstract Builder setUsePartialUpdate(Boolean usePartialUpdate);
abstract Builder setUpsertScript(String source);
abstract Builder setBackendVersion(Integer assumedBackendVersion);
abstract DocToBulk build();
}
/**
* Provide the Elasticsearch connection configuration object. Only required if
* withBackendVersion was not used i.e. getBackendVersion() returns null.
*
* @param connectionConfiguration the Elasticsearch {@link ConnectionConfiguration} object
* @return the {@link DocToBulk} with connection configuration set
*/
public DocToBulk withConnectionConfiguration(ConnectionConfiguration connectionConfiguration) {
checkArgument(connectionConfiguration != null, "connectionConfiguration can not be null");
return builder().setConnectionConfiguration(connectionConfiguration).build();
}
/**
* Provide a function to extract the id from the document. This id will be used as the document
* id in Elasticsearch. Should the function throw an Exception then the batch will fail and the
* exception propagated.
*
* @param idFn to extract the document ID
* @return the {@link DocToBulk} with the function set
*/
public DocToBulk withIdFn(Write.FieldValueExtractFn idFn) {
checkArgument(idFn != null, "idFn must not be null");
return builder().setIdFn(idFn).build();
}
/**
* Provide a function to extract the target index from the document allowing for dynamic
* document routing. Should the function throw an Exception then the batch will fail and the
* exception propagated.
*
* @param indexFn to extract the destination index from
* @return the {@link DocToBulk} with the function set
*/
public DocToBulk withIndexFn(Write.FieldValueExtractFn indexFn) {
checkArgument(indexFn != null, "indexFn must not be null");
return builder().setIndexFn(indexFn).build();
}
/**
* Provide a function to extract the target routing from the document allowing for dynamic
* document routing. Should the function throw an Exception then the batch will fail and the
* exception propagated.
*
* @param routingFn to extract the destination index from
* @return the {@link DocToBulk} with the function set
*/
public DocToBulk withRoutingFn(Write.FieldValueExtractFn routingFn) {
checkArgument(routingFn != null, "routingFn must not be null");
return builder().setRoutingFn(routingFn).build();
}
/**
* Provide a function to extract the target type from the document allowing for dynamic document
* routing. Should the function throw an Exception then the batch will fail and the exception
* propagated. Users are encouraged to consider carefully if multipe types are a sensible model
* <a
* href="https:
* discussed in this blog</a>.
*
* @param typeFn to extract the destination index from
* @return the {@link DocToBulk} with the function set
*/
public DocToBulk withTypeFn(Write.FieldValueExtractFn typeFn) {
checkArgument(typeFn != null, "typeFn must not be null");
return builder().setTypeFn(typeFn).build();
}
/**
* Provide an instruction to control whether partial updates or inserts (default) are issued to
* Elasticsearch.
*
* @param usePartialUpdate set to true to issue partial updates
* @return the {@link DocToBulk} with the partial update control set
*/
public DocToBulk withUsePartialUpdate(boolean usePartialUpdate) {
return builder().setUsePartialUpdate(usePartialUpdate).build();
}
/**
* Whether to use scripted updates and what script to use.
*
* @param source set to the value of the script source, painless lang
* @return the {@link DocToBulk} with the scripted updates set
*/
public DocToBulk withUpsertScript(String source) {
return builder().setUsePartialUpdate(false).setUpsertScript(source).build();
}
/**
* Provide a function to extract the doc version from the document. This version number will be
* used as the document version in Elasticsearch. Should the function throw an Exception then
* the batch will fail and the exception propagated. Incompatible with update operations and
* should only be used with withUsePartialUpdate(false)
*
* @param docVersionFn to extract the document version
* @return the {@link DocToBulk} with the function set
*/
public DocToBulk withDocVersionFn(Write.FieldValueExtractFn docVersionFn) {
checkArgument(docVersionFn != null, "docVersionFn must not be null");
return builder().setDocVersionFn(docVersionFn).build();
}
/**
* Provide a function to extract the target operation either upsert or delete from the document
* fields allowing dynamic bulk operation decision. While using withIsDeleteFn, it should be
* taken care that the document's id extraction is defined using the withIdFn function or else
* IllegalArgumentException is thrown. Should the function throw an Exception then the batch
* will fail and the exception propagated.
*
* @param isDeleteFn set to true for deleting the specific document
* @return the {@link Write} with the function set
*/
public DocToBulk withIsDeleteFn(Write.BooleanFieldValueExtractFn isDeleteFn) {
checkArgument(isDeleteFn != null, "deleteFn is required");
return builder().setIsDeleteFn(isDeleteFn).build();
}
/**
* Provide a function to extract the doc version from the document. This version number will be
* used as the document version in Elasticsearch. Should the function throw an Exception then
* the batch will fail and the exception propagated. Incompatible with update operations and
* should only be used with withUsePartialUpdate(false)
*
* @param docVersionType the version type to use, one of {@value ElasticsearchIO
* @return the {@link DocToBulk} with the doc version type set
*/
public DocToBulk withDocVersionType(String docVersionType) {
checkArgument(
VERSION_TYPES.contains(docVersionType),
"docVersionType must be one of " + "%s",
String.join(", ", VERSION_TYPES));
return builder().setDocVersionType(docVersionType).build();
}
/**
* Use to set explicitly which version of Elasticsearch the destination cluster is running.
* Providing this hint means there is no need for setting {@link
* DocToBulk
*
* @param backendVersion the major version number of the version of Elasticsearch being run in
* the cluster where documents will be indexed.
* @return the {@link DocToBulk} with the Elasticsearch major version number set
*/
public DocToBulk withBackendVersion(int backendVersion) {
checkArgument(
VALID_CLUSTER_VERSIONS.contains(backendVersion),
"Backend version may only be one of " + "%s",
String.join(", ", VERSION_TYPES));
return builder().setBackendVersion(backendVersion).build();
}
@Override
public PCollection<String> expand(PCollection<String> docs) {
ConnectionConfiguration connectionConfiguration = getConnectionConfiguration();
Integer backendVersion = getBackendVersion();
Write.FieldValueExtractFn idFn = getIdFn();
Write.BooleanFieldValueExtractFn isDeleteFn = getIsDeleteFn();
checkState(
(backendVersion != null || connectionConfiguration != null),
"withBackendVersion() or withConnectionConfiguration() is required");
checkArgument(
isDeleteFn == null || idFn != null,
"Id needs to be specified by withIdFn for delete operation");
return docs.apply(ParDo.of(new DocToBulkFn(this)));
}
private static class DocumentMetadata implements Serializable {
final String index;
final String type;
final String id;
final Integer retryOnConflict;
final String routing;
final Integer backendVersion;
final String version;
final String versionType;
DocumentMetadata(
String index,
String type,
String id,
Integer retryOnConflict,
String routing,
Integer backendVersion,
String version,
String versionType) {
this.index = index;
this.id = id;
this.type = type;
this.retryOnConflict = retryOnConflict;
this.routing = routing;
this.backendVersion = backendVersion;
this.version = version;
this.versionType = versionType;
}
}
private static class DocumentMetadataSerializer extends StdSerializer<DocumentMetadata> {
private DocumentMetadataSerializer() {
super(DocumentMetadata.class);
}
@Override
public void serialize(DocumentMetadata value, JsonGenerator gen, SerializerProvider provider)
throws IOException {
gen.writeStartObject();
if (value.index != null) {
gen.writeStringField("_index", value.index);
}
if (value.type != null) {
gen.writeStringField("_type", value.type);
}
if (value.id != null) {
gen.writeStringField("_id", value.id);
}
if (value.routing != null) {
gen.writeStringField("routing", value.routing);
}
if (value.retryOnConflict != null && value.backendVersion <= 6) {
gen.writeNumberField("_retry_on_conflict", value.retryOnConflict);
}
if (value.retryOnConflict != null && value.backendVersion >= 7) {
gen.writeNumberField("retry_on_conflict", value.retryOnConflict);
}
if (value.version != null) {
gen.writeStringField("version", value.version);
}
if (value.versionType != null) {
gen.writeStringField("version_type", value.versionType);
}
gen.writeEndObject();
}
}
@VisibleForTesting
static String createBulkApiEntity(DocToBulk spec, String document, int backendVersion)
throws IOException {
String documentMetadata = "{}";
boolean isDelete = false;
if (spec.getIndexFn() != null || spec.getTypeFn() != null || spec.getIdFn() != null) {
JsonNode parsedDocument = OBJECT_MAPPER.readTree(document);
documentMetadata = getDocumentMetadata(spec, parsedDocument, backendVersion);
if (spec.getIsDeleteFn() != null) {
isDelete = spec.getIsDeleteFn().apply(parsedDocument);
}
}
if (isDelete) {
return String.format("{ \"delete\" : %s }%n", documentMetadata);
} else {
if (spec.getUsePartialUpdate()) {
return String.format(
"{ \"update\" : %s }%n{ \"doc\" : %s, " + "\"doc_as_upsert\" : true }%n",
documentMetadata, document);
} else if (spec.getUpsertScript() != null) {
return String.format(
"{ \"update\" : %s }%n{ \"script\" : {\"source\": \"%s\", "
+ "\"params\": %s}, \"upsert\" : %s }%n",
documentMetadata, spec.getUpsertScript(), document, document);
} else {
return String.format("{ \"index\" : %s }%n%s%n", documentMetadata, document);
}
}
}
private static String lowerCaseOrNull(String input) {
return input == null ? null : input.toLowerCase();
}
/**
* Extracts the components that comprise the document address from the document using the {@link
* Write.FieldValueExtractFn} configured. This allows any or all of the index, type and document
* id to be controlled on a per document basis. If none are provided then an empty default of
* {@code {}} is returned. Sanitization of the index is performed, automatically lower-casing
* the value as required by Elasticsearch.
*
* @param parsedDocument the json from which the index, type and id may be extracted
* @return the document address as JSON or the default
* @throws IOException if the document cannot be parsed as JSON
*/
private static String getDocumentMetadata(
DocToBulk spec, JsonNode parsedDocument, int backendVersion) throws IOException {
DocumentMetadata metadata =
new DocumentMetadata(
spec.getIndexFn() != null
? lowerCaseOrNull(spec.getIndexFn().apply(parsedDocument))
: null,
spec.getTypeFn() != null ? spec.getTypeFn().apply(parsedDocument) : null,
spec.getIdFn() != null ? spec.getIdFn().apply(parsedDocument) : null,
(spec.getUsePartialUpdate()
|| (spec.getUpsertScript() != null && !spec.getUpsertScript().isEmpty()))
? DEFAULT_RETRY_ON_CONFLICT
: null,
spec.getRoutingFn() != null ? spec.getRoutingFn().apply(parsedDocument) : null,
backendVersion,
spec.getDocVersionFn() != null ? spec.getDocVersionFn().apply(parsedDocument) : null,
spec.getDocVersionType());
return OBJECT_MAPPER.writeValueAsString(metadata);
}
/** {@link DoFn} to for the {@link DocToBulk} transform. */
@VisibleForTesting
static class DocToBulkFn extends DoFn<String, String> {
private final DocToBulk spec;
private int backendVersion;
public DocToBulkFn(DocToBulk spec) {
this.spec = spec;
}
@Setup
public void setup() throws IOException {
ConnectionConfiguration connectionConfiguration = spec.getConnectionConfiguration();
if (spec.getBackendVersion() == null) {
backendVersion = ElasticsearchIO.getBackendVersion(connectionConfiguration);
} else {
backendVersion = spec.getBackendVersion();
}
}
@ProcessElement
public void processElement(ProcessContext c) throws IOException {
c.output(createBulkApiEntity(spec, c.element(), backendVersion));
}
}
}
/**
* A {@link PTransform} convenience wrapper for doing both document to bulk API serialization as
* well as batching those Bulk API entities and writing them to an Elasticsearch cluster. This
* class is effectively a thin proxy for DocToBulk->BulkIO all-in-one for convenience and backward
* compatibility.
*/
@AutoValue
public abstract static class Write extends PTransform<PCollection<String>, PDone> {
public interface FieldValueExtractFn extends SerializableFunction<JsonNode, String> {}
public interface BooleanFieldValueExtractFn extends SerializableFunction<JsonNode, Boolean> {}
public abstract DocToBulk getDocToBulk();
public abstract BulkIO getBulkIO();
abstract Builder writeBuilder();
@AutoValue.Builder
abstract static class Builder {
abstract Builder setDocToBulk(DocToBulk docToBulk);
abstract Builder setBulkIO(BulkIO bulkIO);
abstract Write build();
}
/** Refer to {@link DocToBulk
public Write withIdFn(FieldValueExtractFn idFn) {
return writeBuilder().setDocToBulk(getDocToBulk().withIdFn(idFn)).build();
}
/** Refer to {@link DocToBulk
public Write withIndexFn(FieldValueExtractFn indexFn) {
return writeBuilder().setDocToBulk(getDocToBulk().withIndexFn(indexFn)).build();
}
/** Refer to {@link DocToBulk
public Write withRoutingFn(FieldValueExtractFn routingFn) {
return writeBuilder().setDocToBulk(getDocToBulk().withRoutingFn(routingFn)).build();
}
/** Refer to {@link DocToBulk
public Write withTypeFn(FieldValueExtractFn typeFn) {
return writeBuilder().setDocToBulk(getDocToBulk().withTypeFn(typeFn)).build();
}
/** Refer to {@link DocToBulk
public Write withDocVersionFn(FieldValueExtractFn docVersionFn) {
return writeBuilder().setDocToBulk(getDocToBulk().withDocVersionFn(docVersionFn)).build();
}
/** Refer to {@link DocToBulk
public Write withDocVersionType(String docVersionType) {
return writeBuilder().setDocToBulk(getDocToBulk().withDocVersionType(docVersionType)).build();
}
/** Refer to {@link DocToBulk
public Write withUsePartialUpdate(boolean usePartialUpdate) {
return writeBuilder()
.setDocToBulk(getDocToBulk().withUsePartialUpdate(usePartialUpdate))
.build();
}
/** Refer to {@link DocToBulk
public Write withUpsertScript(String source) {
return writeBuilder().setDocToBulk(getDocToBulk().withUpsertScript(source)).build();
}
/** Refer to {@link DocToBulk
public Write withBackendVersion(int backendVersion) {
return writeBuilder().setDocToBulk(getDocToBulk().withBackendVersion(backendVersion)).build();
}
/** Refer to {@link DocToBulk
public Write withIsDeleteFn(Write.BooleanFieldValueExtractFn isDeleteFn) {
return writeBuilder().setDocToBulk(getDocToBulk().withIsDeleteFn(isDeleteFn)).build();
}
/** Refer to {@link BulkIO
public Write withConnectionConfiguration(ConnectionConfiguration connectionConfiguration) {
checkArgument(connectionConfiguration != null, "connectionConfiguration can not be null");
return writeBuilder()
.setDocToBulk(getDocToBulk().withConnectionConfiguration(connectionConfiguration))
.setBulkIO(getBulkIO().withConnectionConfiguration(connectionConfiguration))
.build();
}
/** Refer to {@link BulkIO
public Write withMaxBatchSize(long batchSize) {
return writeBuilder().setBulkIO(getBulkIO().withMaxBatchSize(batchSize)).build();
}
/** Refer to {@link BulkIO
public Write withMaxBatchSizeBytes(long batchSizeBytes) {
return writeBuilder().setBulkIO(getBulkIO().withMaxBatchSizeBytes(batchSizeBytes)).build();
}
/** Refer to {@link BulkIO
public Write withRetryConfiguration(RetryConfiguration retryConfiguration) {
return writeBuilder()
.setBulkIO(getBulkIO().withRetryConfiguration(retryConfiguration))
.build();
}
/** Refer to {@link BulkIO
public Write withIgnoreVersionConflicts(boolean ignoreVersionConflicts) {
return writeBuilder()
.setBulkIO(getBulkIO().withIgnoreVersionConflicts(ignoreVersionConflicts))
.build();
}
/** Refer to {@link BulkIO
public Write withUseStatefulBatches(boolean useStatefulBatches) {
return writeBuilder()
.setBulkIO(getBulkIO().withUseStatefulBatches(useStatefulBatches))
.build();
}
/** Refer to {@link BulkIO
public Write withMaxBufferingDuration(Duration maxBufferingDuration) {
return writeBuilder()
.setBulkIO(getBulkIO().withMaxBufferingDuration(maxBufferingDuration))
.build();
}
/** Refer to {@link BulkIO
public Write withMaxParallelRquestsPerWindow(int maxParallelRquestsPerWindow) {
return writeBuilder()
.setBulkIO(getBulkIO().withMaxParallelRequestsPerWindow(maxParallelRquestsPerWindow))
.build();
}
/** Refer to {@link BulkIO
public Write withAllowableResponseErrors(@Nullable Set<String> allowableResponseErrors) {
if (allowableResponseErrors == null) {
allowableResponseErrors = new HashSet<>();
}
return writeBuilder()
.setBulkIO(getBulkIO().withAllowableResponseErrors(allowableResponseErrors))
.build();
}
@Override
public PDone expand(PCollection<String> input) {
input.apply(getDocToBulk()).apply(getBulkIO());
return PDone.in(input.getPipeline());
}
}
/** A {@link PTransform} writing data to Elasticsearch. */
@AutoValue
public abstract static class BulkIO extends PTransform<PCollection<String>, PDone> {
@VisibleForTesting
static final String RETRY_ATTEMPT_LOG = "Error writing to Elasticsearch. Retry attempt[%d]";
@VisibleForTesting
static final String RETRY_FAILED_LOG =
"Error writing to ES after %d attempt(s). No more attempts allowed";
abstract @Nullable ConnectionConfiguration getConnectionConfiguration();
abstract long getMaxBatchSize();
abstract long getMaxBatchSizeBytes();
abstract @Nullable Duration getMaxBufferingDuration();
abstract boolean getUseStatefulBatches();
abstract int getMaxParallelRequestsPerWindow();
abstract @Nullable RetryConfiguration getRetryConfiguration();
abstract @Nullable Set<String> getAllowedResponseErrors();
abstract Builder builder();
@AutoValue.Builder
abstract static class Builder {
abstract Builder setConnectionConfiguration(ConnectionConfiguration connectionConfiguration);
abstract Builder setMaxBatchSize(long maxBatchSize);
abstract Builder setMaxBatchSizeBytes(long maxBatchSizeBytes);
abstract Builder setRetryConfiguration(RetryConfiguration retryConfiguration);
abstract Builder setAllowedResponseErrors(Set<String> allowedResponseErrors);
abstract Builder setMaxBufferingDuration(Duration maxBufferingDuration);
abstract Builder setUseStatefulBatches(boolean useStatefulBatches);
abstract Builder setMaxParallelRequestsPerWindow(int maxParallelRequestsPerWindow);
abstract BulkIO build();
}
/**
* Provide the Elasticsearch connection configuration object.
*
* @param connectionConfiguration the Elasticsearch {@link ConnectionConfiguration} object
* @return the {@link BulkIO} with connection configuration set
*/
public BulkIO withConnectionConfiguration(ConnectionConfiguration connectionConfiguration) {
checkArgument(connectionConfiguration != null, "connectionConfiguration can not be null");
return builder().setConnectionConfiguration(connectionConfiguration).build();
}
/**
* Provide a maximum size in number of documents for the batch see bulk API
* (https:
* docs (like Elasticsearch bulk size advice). See
* https:
* execution engine, size of bundles may vary, this sets the maximum size. Change this if you
* need to have smaller ElasticSearch bulks.
*
* @param batchSize maximum batch size in number of documents
* @return the {@link BulkIO} with connection batch size set
*/
public BulkIO withMaxBatchSize(long batchSize) {
checkArgument(batchSize > 0, "batchSize must be > 0, but was %s", batchSize);
return builder().setMaxBatchSize(batchSize).build();
}
/**
* Provide a maximum size in bytes for the batch see bulk API
* (https:
* (like Elasticsearch bulk size advice). See
* https:
* execution engine, size of bundles may vary, this sets the maximum size. Change this if you
* need to have smaller ElasticSearch bulks.
*
* @param batchSizeBytes maximum batch size in bytes
* @return the {@link BulkIO} with connection batch size in bytes set
*/
public BulkIO withMaxBatchSizeBytes(long batchSizeBytes) {
checkArgument(batchSizeBytes > 0, "batchSizeBytes must be > 0, but was %s", batchSizeBytes);
return builder().setMaxBatchSizeBytes(batchSizeBytes).build();
}
/**
* Provides configuration to retry a failed batch call to Elasticsearch. A batch is considered
* as failed if the underlying {@link RestClient} surfaces 429 HTTP status code as error for one
* or more of the items in the {@link Response}. Users should consider that retrying might
* compound the underlying problem which caused the initial failure. Users should also be aware
* that once retrying is exhausted the error is surfaced to the runner which <em>may</em> then
* opt to retry the current bundle in entirety or abort if the max number of retries of the
* runner is completed. Retrying uses an exponential backoff algorithm, with minimum backoff of
* 5 seconds and then surfacing the error once the maximum number of retries or maximum
* configuration duration is exceeded.
*
* <p>Example use:
*
* <pre>{@code
* ElasticsearchIO.write()
* .withRetryConfiguration(ElasticsearchIO.RetryConfiguration.create(10, Duration.standardMinutes(3))
* ...
* }</pre>
*
* @param retryConfiguration the rules which govern the retry behavior
* @return the {@link BulkIO} with retrying configured
*/
public BulkIO withRetryConfiguration(RetryConfiguration retryConfiguration) {
checkArgument(retryConfiguration != null, "retryConfiguration is required");
return builder().setRetryConfiguration(retryConfiguration).build();
}
/**
* Whether or not to suppress version conflict errors in a Bulk API response. This can be useful
* if your use case involves using external version types.
*
* @param ignoreVersionConflicts true to suppress version conflicts, false to surface version
* conflict errors.
* @return the {@link BulkIO} with version conflict handling configured
*/
public BulkIO withIgnoreVersionConflicts(boolean ignoreVersionConflicts) {
Set<String> allowedResponseErrors = getAllowedResponseErrors();
if (allowedResponseErrors == null) {
allowedResponseErrors = new HashSet<>();
}
if (ignoreVersionConflicts) {
allowedResponseErrors.add(VERSION_CONFLICT_ERROR);
}
return builder().setAllowedResponseErrors(allowedResponseErrors).build();
}
/**
* Provide a set of textual error types which can be contained in Bulk API response
* items[].error.type field. Any element in @param allowableResponseErrorTypes will suppress
* errors of the same type in Bulk responses.
*
* <p>See also
* https:
*
* @param allowableResponseErrorTypes
* @return the {@link BulkIO} with allowable response errors set
*/
public BulkIO withAllowableResponseErrors(@Nullable Set<String> allowableResponseErrorTypes) {
if (allowableResponseErrorTypes == null) {
allowableResponseErrorTypes = new HashSet<>();
}
return builder().setAllowedResponseErrors(allowableResponseErrorTypes).build();
}
/**
* If using {@link BulkIO
* time before buffered elements are emitted to Elasticsearch as a Bulk API request. If this
* config is not set, Bulk requests will not be issued until {@link BulkIO
* number of documents have been buffered. This may result in higher latency in particular if
* your max batch size is set to a large value and your pipeline input is low volume.
*
* @param maxBufferingDuration the maximum duration to wait before sending any buffered
* documents to Elasticsearch, regardless of maxBatchSize.
* @return the {@link BulkIO} with maximum buffering duration set
*/
public BulkIO withMaxBufferingDuration(Duration maxBufferingDuration) {
LOG.warn(
"Use of withMaxBufferingDuration requires withUseStatefulBatches(true). "
+ "Setting that automatically.");
return builder()
.setUseStatefulBatches(true)
.setMaxBufferingDuration(maxBufferingDuration)
.build();
}
/**
* Whether or not to use Stateful Processing to ensure bulk requests have the desired number of
* entities i.e. as close to the maxBatchSize as possible. By default without this feature
* enabled, Bulk requests will not contain more than maxBatchSize entities, but the lower bound
* of batch size is determined by Beam Runner bundle sizes, which may be as few as 1.
*
* @param useStatefulBatches true enables the use of Stateful Processing to ensure that batches
* are as close to the maxBatchSize as possible.
* @return the {@link BulkIO} with Stateful Processing enabled or disabled
*/
public BulkIO withUseStatefulBatches(boolean useStatefulBatches) {
return builder().setUseStatefulBatches(useStatefulBatches).build();
}
/**
* When using {@link BulkIO
* batches are maintained per-key-per-window. If data is globally windowed and this
* configuration is set to 1, there will only ever be 1 request in flight. Having only a single
* request in flight can be beneficial for ensuring an Elasticsearch cluster is not overwhelmed
* by parallel requests, but may not work for all use cases. If this number is less than the
* number of maximum workers in your pipeline, the IO work may not be distributed across all
* workers.
*
* @param maxParallelRequestsPerWindow the maximum number of parallel bulk requests for a window
* of data
* @return the {@link BulkIO} with maximum parallel bulk requests per window set
*/
public BulkIO withMaxParallelRequestsPerWindow(int maxParallelRequestsPerWindow) {
checkArgument(
maxParallelRequestsPerWindow > 0, "parameter value must be positive " + "a integer");
return builder().setMaxParallelRequestsPerWindow(maxParallelRequestsPerWindow).build();
}
@Override
public PDone expand(PCollection<String> input) {
ConnectionConfiguration connectionConfiguration = getConnectionConfiguration();
checkState(connectionConfiguration != null, "withConnectionConfiguration() is required");
if (getUseStatefulBatches()) {
GroupIntoBatches<Integer, String> groupIntoBatches =
GroupIntoBatches.ofSize(getMaxBatchSize());
if (getMaxBufferingDuration() != null) {
groupIntoBatches = groupIntoBatches.withMaxBufferingDuration(getMaxBufferingDuration());
}
input
.apply(ParDo.of(new AssignShardFn<>(getMaxParallelRequestsPerWindow())))
.apply(groupIntoBatches)
.apply(
"Remove key no longer needed",
MapElements.into(TypeDescriptors.iterables(TypeDescriptors.strings()))
.via(KV::getValue))
.apply(ParDo.of(new BulkIOFn(this)));
} else {
input
.apply(
"Make elements iterable",
MapElements.into(TypeDescriptors.iterables(TypeDescriptors.strings()))
.via(Collections::singletonList))
.apply(ParDo.of(new BulkIOFn(this)));
}
return PDone.in(input.getPipeline());
}
/** {@link DoFn} to for the {@link BulkIO} transform. */
@VisibleForTesting
static class BulkIOFn extends DoFn<Iterable<String>, Void> {
private static final Duration RETRY_INITIAL_BACKOFF = Duration.standardSeconds(5);
private transient FluentBackoff retryBackoff;
protected BulkIO spec;
private transient RestClient restClient;
protected ArrayList<String> batch;
long currentBatchSizeBytes;
@VisibleForTesting
BulkIOFn(BulkIO bulkSpec) {
this.spec = bulkSpec;
}
@Setup
public void setup() throws IOException {
ConnectionConfiguration connectionConfiguration = spec.getConnectionConfiguration();
restClient = connectionConfiguration.createClient();
retryBackoff =
FluentBackoff.DEFAULT.withMaxRetries(0).withInitialBackoff(RETRY_INITIAL_BACKOFF);
if (spec.getRetryConfiguration() != null) {
retryBackoff =
FluentBackoff.DEFAULT
.withInitialBackoff(RETRY_INITIAL_BACKOFF)
.withMaxRetries(spec.getRetryConfiguration().getMaxAttempts() - 1)
.withMaxCumulativeBackoff(spec.getRetryConfiguration().getMaxDuration());
}
}
@StartBundle
public void startBundle(StartBundleContext context) {
batch = new ArrayList<>();
currentBatchSizeBytes = 0;
}
@FinishBundle
public void finishBundle(FinishBundleContext context)
throws IOException, InterruptedException {
flushBatch();
}
@ProcessElement
public void processElement(@Element @NonNull Iterable<String> bulkApiEntities)
throws Exception {
for (String bulkApiEntity : bulkApiEntities) {
addAndMaybeFlush(bulkApiEntity);
}
}
protected void addAndMaybeFlush(String bulkApiEntity)
throws IOException, InterruptedException {
batch.add(bulkApiEntity);
currentBatchSizeBytes += bulkApiEntity.getBytes(StandardCharsets.UTF_8).length;
if (batch.size() >= spec.getMaxBatchSize()
|| currentBatchSizeBytes >= spec.getMaxBatchSizeBytes()) {
flushBatch();
}
}
private void flushBatch() throws IOException, InterruptedException {
if (batch.isEmpty()) {
return;
}
LOG.info(
"ElasticsearchIO batch size: {}, batch size bytes: {}",
batch.size(),
currentBatchSizeBytes);
StringBuilder bulkRequest = new StringBuilder();
for (String json : batch) {
bulkRequest.append(json);
}
batch.clear();
currentBatchSizeBytes = 0L;
Response response = null;
HttpEntity responseEntity = null;
String endPoint = spec.getConnectionConfiguration().getBulkEndPoint();
HttpEntity requestBody =
new NStringEntity(bulkRequest.toString(), ContentType.APPLICATION_JSON);
try {
Request request = new Request("POST", endPoint);
request.addParameters(Collections.emptyMap());
request.setEntity(requestBody);
response = restClient.performRequest(request);
responseEntity = new BufferedHttpEntity(response.getEntity());
} catch (java.io.IOException ex) {
if (spec.getRetryConfiguration() == null) {
throw ex;
}
LOG.error("Caught ES timeout, retrying", ex);
}
if (spec.getRetryConfiguration() != null
&& (response == null
|| responseEntity == null
|| spec.getRetryConfiguration().getRetryPredicate().test(responseEntity))) {
if (responseEntity != null
&& spec.getRetryConfiguration().getRetryPredicate().test(responseEntity)) {
LOG.warn("ES Cluster is responding with HTP 429 - TOO_MANY_REQUESTS.");
}
responseEntity = handleRetry("POST", endPoint, Collections.emptyMap(), requestBody);
}
checkForErrors(responseEntity, spec.getAllowedResponseErrors());
}
/** retry request based on retry configuration policy. */
private HttpEntity handleRetry(
String method, String endpoint, Map<String, String> params, HttpEntity requestBody)
throws IOException, InterruptedException {
Response response;
HttpEntity responseEntity;
Sleeper sleeper = Sleeper.DEFAULT;
BackOff backoff = retryBackoff.backoff();
int attempt = 0;
while (BackOffUtils.next(sleeper, backoff)) {
LOG.warn(String.format(RETRY_ATTEMPT_LOG, ++attempt));
try {
Request request = new Request(method, endpoint);
request.addParameters(params);
request.setEntity(requestBody);
response = restClient.performRequest(request);
responseEntity = new BufferedHttpEntity(response.getEntity());
} catch (java.io.IOException ex) {
LOG.error("Caught ES timeout, retrying", ex);
continue;
}
if (!Objects.requireNonNull(spec.getRetryConfiguration())
.getRetryPredicate()
.test(responseEntity)) {
return responseEntity;
} else {
LOG.warn("ES Cluster is responding with HTP 429 - TOO_MANY_REQUESTS.");
}
}
throw new IOException(String.format(RETRY_FAILED_LOG, attempt));
}
@Teardown
public void closeClient() throws IOException {
if (restClient != null) {
restClient.close();
}
}
}
}
static int getBackendVersion(ConnectionConfiguration connectionConfiguration) {
try (RestClient restClient = connectionConfiguration.createClient()) {
Request request = new Request("GET", "");
Response response = restClient.performRequest(request);
JsonNode jsonNode = parseResponse(response.getEntity());
int backendVersion =
Integer.parseInt(jsonNode.path("version").path("number").asText().substring(0, 1));
checkArgument(
(VALID_CLUSTER_VERSIONS.contains(backendVersion)),
"The Elasticsearch version to connect to is %s.x. "
+ "This version of the ElasticsearchIO is only compatible with "
+ "Elasticsearch v7.x, v6.x, v5.x and v2.x",
backendVersion);
return backendVersion;
} catch (IOException e) {
throw new IllegalArgumentException("Cannot get Elasticsearch version", e);
}
}
} | return PDone.in(input.getPipeline()); | public boolean start() throws IOException {
restClient = source.spec.getConnectionConfiguration().createClient();
String query = source.spec.getQuery() != null ? source.spec.getQuery().get() : null;
if (query == null) {
query = "{\"query\": { \"match_all\": {} }}";
}
if ((source.backendVersion >= 5) && source.numSlices != null && source.numSlices > 1) {
String sliceQuery =
String.format("\"slice\": {\"id\": %s,\"max\": %s}", source.sliceId, source.numSlices);
query = query.replaceFirst("\\{", "{" + sliceQuery + ",");
}
String endPoint =
String.format(
"/%s/%s/_search",
source.spec.getConnectionConfiguration().getIndex(),
source.spec.getConnectionConfiguration().getType());
Map<String, String> params = new HashMap<>();
params.put("scroll", source.spec.getScrollKeepalive());
if (source.backendVersion == 2) {
params.put("size", String.valueOf(source.spec.getBatchSize()));
if (source.shardPreference != null) {
params.put("preference", "_shards:" + source.shardPreference);
}
}
HttpEntity queryEntity = new NStringEntity(query, ContentType.APPLICATION_JSON);
Request request = new Request("GET", endPoint);
request.addParameters(params);
request.setEntity(queryEntity);
Response response = restClient.performRequest(request);
JsonNode searchResult = parseResponse(response.getEntity());
updateScrollId(searchResult);
return readNextBatchAndReturnFirstDocument(searchResult);
}
private void updateScrollId(JsonNode searchResult) {
scrollId = searchResult.path("_scroll_id").asText();
}
@Override
public boolean advance() throws IOException {
if (batchIterator.hasNext()) {
current = batchIterator.next();
return true;
} else {
String requestBody =
String.format(
"{\"scroll\" : \"%s\",\"scroll_id\" : \"%s\"}",
source.spec.getScrollKeepalive(), scrollId);
HttpEntity scrollEntity = new NStringEntity(requestBody, ContentType.APPLICATION_JSON);
Request request = new Request("GET", "/_search/scroll");
request.addParameters(Collections.emptyMap());
request.setEntity(scrollEntity);
Response response = restClient.performRequest(request);
JsonNode searchResult = parseResponse(response.getEntity());
updateScrollId(searchResult);
return readNextBatchAndReturnFirstDocument(searchResult);
}
}
private boolean readNextBatchAndReturnFirstDocument(JsonNode searchResult) {
JsonNode hits = searchResult.path("hits").path("hits");
if (hits.size() == 0) {
current = null;
batchIterator = null;
return false;
}
List<String> batch = new ArrayList<>();
boolean withMetadata = source.spec.isWithMetadata();
for (JsonNode hit : hits) {
if (withMetadata) {
batch.add(hit.toString());
} else {
String document = hit.path("_source").toString();
batch.add(document);
}
}
batchIterator = batch.listIterator();
current = batchIterator.next();
return true;
}
@Override
public String getCurrent() throws NoSuchElementException {
if (current == null) {
throw new NoSuchElementException();
}
return current;
}
@Override
public void close() throws IOException {
String requestBody = String.format("{\"scroll_id\" : [\"%s\"]}", scrollId);
HttpEntity entity = new NStringEntity(requestBody, ContentType.APPLICATION_JSON);
try {
Request request = new Request("DELETE", "/_search/scroll");
request.addParameters(Collections.emptyMap());
request.setEntity(entity);
restClient.performRequest(request);
} finally {
if (restClient != null) {
restClient.close();
}
}
}
@Override
public BoundedSource<String> getCurrentSource() {
return source;
}
}
/**
* A POJO encapsulating a configuration for retry behavior when issuing requests to ES. A retry
* will be attempted until the maxAttempts or maxDuration is exceeded, whichever comes first, for
* 429 TOO_MANY_REQUESTS error.
*/
@AutoValue
public abstract static class RetryConfiguration implements Serializable {
@VisibleForTesting
static final RetryPredicate DEFAULT_RETRY_PREDICATE = new DefaultRetryPredicate();
abstract int getMaxAttempts();
abstract Duration getMaxDuration();
abstract RetryPredicate getRetryPredicate();
abstract Builder builder();
@AutoValue.Builder
abstract static class Builder {
abstract ElasticsearchIO.RetryConfiguration.Builder setMaxAttempts(int maxAttempts);
abstract ElasticsearchIO.RetryConfiguration.Builder setMaxDuration(Duration maxDuration);
abstract ElasticsearchIO.RetryConfiguration.Builder setRetryPredicate(
RetryPredicate retryPredicate);
abstract ElasticsearchIO.RetryConfiguration build();
}
/**
* Creates RetryConfiguration for {@link ElasticsearchIO} with provided maxAttempts,
* maxDurations and exponential backoff based retries.
*
* @param maxAttempts max number of attempts.
* @param maxDuration maximum duration for retries.
* @return {@link RetryConfiguration} object with provided settings.
*/
public static RetryConfiguration create(int maxAttempts, Duration maxDuration) {
checkArgument(maxAttempts > 0, "maxAttempts must be greater than 0");
checkArgument(
maxDuration != null && maxDuration.isLongerThan(Duration.ZERO),
"maxDuration must be greater than 0");
return new AutoValue_ElasticsearchIO_RetryConfiguration.Builder()
.setMaxAttempts(maxAttempts)
.setMaxDuration(maxDuration)
.setRetryPredicate(DEFAULT_RETRY_PREDICATE)
.build();
}
@VisibleForTesting
RetryConfiguration withRetryPredicate(RetryPredicate predicate) {
checkArgument(predicate != null, "predicate must be provided");
return builder().setRetryPredicate(predicate).build();
}
/**
* An interface used to control if we retry the Elasticsearch call when a {@link Response} is
* obtained. If {@link RetryPredicate
* the requests to the Elasticsearch server if the {@link RetryConfiguration} permits it.
*/
@FunctionalInterface
interface RetryPredicate extends Predicate<HttpEntity>, Serializable {}
/**
* This is the default predicate used to test if a failed ES operation should be retried. A
* retry will be attempted until the maxAttempts or maxDuration is exceeded, whichever comes
* first, for TOO_MANY_REQUESTS(429) error.
*/
@VisibleForTesting
static class DefaultRetryPredicate implements RetryPredicate {
private int errorCode;
DefaultRetryPredicate(int code) {
this.errorCode = code;
}
DefaultRetryPredicate() {
this(429);
}
/** Returns true if the response has the error code for any mutation. */
private static boolean errorCodePresent(HttpEntity responseEntity, int errorCode) {
try {
JsonNode json = parseResponse(responseEntity);
if (json.path("errors").asBoolean()) {
for (JsonNode item : json.path("items")) {
if (item.findValue("status").asInt() == errorCode) {
return true;
}
}
}
} catch (IOException e) {
LOG.warn("Could not extract error codes from responseEntity {}", responseEntity);
}
return false;
}
@Override
public boolean test(HttpEntity responseEntity) {
return errorCodePresent(responseEntity, errorCode);
}
}
}
/** A {@link PTransform} converting docs to their Bulk API counterparts. */
@AutoValue
public abstract static class DocToBulk
extends PTransform<PCollection<String>, PCollection<String>> {
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private static final int DEFAULT_RETRY_ON_CONFLICT = 5;
static {
SimpleModule module = new SimpleModule();
module.addSerializer(DocumentMetadata.class, new DocumentMetadataSerializer());
OBJECT_MAPPER.registerModule(module);
}
abstract @Nullable ConnectionConfiguration getConnectionConfiguration();
abstract Write.@Nullable FieldValueExtractFn getIdFn();
abstract Write.@Nullable FieldValueExtractFn getIndexFn();
abstract Write.@Nullable FieldValueExtractFn getRoutingFn();
abstract Write.@Nullable FieldValueExtractFn getTypeFn();
abstract Write.@Nullable FieldValueExtractFn getDocVersionFn();
abstract @Nullable String getDocVersionType();
abstract @Nullable String getUpsertScript();
abstract @Nullable Boolean getUsePartialUpdate();
abstract Write.@Nullable BooleanFieldValueExtractFn getIsDeleteFn();
abstract @Nullable Integer getBackendVersion();
abstract Builder builder();
@AutoValue.Builder
abstract static class Builder {
abstract Builder setConnectionConfiguration(ConnectionConfiguration connectionConfiguration);
abstract Builder setIdFn(Write.FieldValueExtractFn idFunction);
abstract Builder setIndexFn(Write.FieldValueExtractFn indexFn);
abstract Builder setRoutingFn(Write.FieldValueExtractFn routingFunction);
abstract Builder setTypeFn(Write.FieldValueExtractFn typeFn);
abstract Builder setDocVersionFn(Write.FieldValueExtractFn docVersionFn);
abstract Builder setDocVersionType(String docVersionType);
abstract Builder setIsDeleteFn(Write.BooleanFieldValueExtractFn isDeleteFn);
abstract Builder setUsePartialUpdate(Boolean usePartialUpdate);
abstract Builder setUpsertScript(String source);
abstract Builder setBackendVersion(Integer assumedBackendVersion);
abstract DocToBulk build();
}
/**
* Provide the Elasticsearch connection configuration object. Only required if
* withBackendVersion was not used i.e. getBackendVersion() returns null.
*
* @param connectionConfiguration the Elasticsearch {@link ConnectionConfiguration} object
* @return the {@link DocToBulk} with connection configuration set
*/
public DocToBulk withConnectionConfiguration(ConnectionConfiguration connectionConfiguration) {
checkArgument(connectionConfiguration != null, "connectionConfiguration can not be null");
return builder().setConnectionConfiguration(connectionConfiguration).build();
}
/**
* Provide a function to extract the id from the document. This id will be used as the document
* id in Elasticsearch. Should the function throw an Exception then the batch will fail and the
* exception propagated.
*
* @param idFn to extract the document ID
* @return the {@link DocToBulk} with the function set
*/
public DocToBulk withIdFn(Write.FieldValueExtractFn idFn) {
checkArgument(idFn != null, "idFn must not be null");
return builder().setIdFn(idFn).build();
}
/**
* Provide a function to extract the target index from the document allowing for dynamic
* document routing. Should the function throw an Exception then the batch will fail and the
* exception propagated.
*
* @param indexFn to extract the destination index from
* @return the {@link DocToBulk} with the function set
*/
public DocToBulk withIndexFn(Write.FieldValueExtractFn indexFn) {
checkArgument(indexFn != null, "indexFn must not be null");
return builder().setIndexFn(indexFn).build();
}
/**
* Provide a function to extract the target routing from the document allowing for dynamic
* document routing. Should the function throw an Exception then the batch will fail and the
* exception propagated.
*
* @param routingFn to extract the destination index from
* @return the {@link DocToBulk} with the function set
*/
public DocToBulk withRoutingFn(Write.FieldValueExtractFn routingFn) {
checkArgument(routingFn != null, "routingFn must not be null");
return builder().setRoutingFn(routingFn).build();
}
/**
* Provide a function to extract the target type from the document allowing for dynamic document
* routing. Should the function throw an Exception then the batch will fail and the exception
* propagated. Users are encouraged to consider carefully if multipe types are a sensible model
* <a
* href="https:
* discussed in this blog</a>.
*
* @param typeFn to extract the destination index from
* @return the {@link DocToBulk} with the function set
*/
public DocToBulk withTypeFn(Write.FieldValueExtractFn typeFn) {
checkArgument(typeFn != null, "typeFn must not be null");
return builder().setTypeFn(typeFn).build();
}
/**
* Provide an instruction to control whether partial updates or inserts (default) are issued to
* Elasticsearch.
*
* @param usePartialUpdate set to true to issue partial updates
* @return the {@link DocToBulk} with the partial update control set
*/
public DocToBulk withUsePartialUpdate(boolean usePartialUpdate) {
return builder().setUsePartialUpdate(usePartialUpdate).build();
}
/**
* Whether to use scripted updates and what script to use.
*
* @param source set to the value of the script source, painless lang
* @return the {@link DocToBulk} with the scripted updates set
*/
public DocToBulk withUpsertScript(String source) {
if (getBackendVersion() == null || getBackendVersion() == 2) {
LOG.warn("Painless scripts are not supported on Elasticsearch clusters before version 5.0");
}
return builder().setUsePartialUpdate(false).setUpsertScript(source).build();
}
/**
* Provide a function to extract the doc version from the document. This version number will be
* used as the document version in Elasticsearch. Should the function throw an Exception then
* the batch will fail and the exception propagated. Incompatible with update operations and
* should only be used with withUsePartialUpdate(false)
*
* @param docVersionFn to extract the document version
* @return the {@link DocToBulk} with the function set
*/
public DocToBulk withDocVersionFn(Write.FieldValueExtractFn docVersionFn) {
checkArgument(docVersionFn != null, "docVersionFn must not be null");
return builder().setDocVersionFn(docVersionFn).build();
}
/**
* Provide a function to extract the target operation either upsert or delete from the document
* fields allowing dynamic bulk operation decision. While using withIsDeleteFn, it should be
* taken care that the document's id extraction is defined using the withIdFn function or else
* IllegalArgumentException is thrown. Should the function throw an Exception then the batch
* will fail and the exception propagated.
*
* @param isDeleteFn set to true for deleting the specific document
* @return the {@link Write} with the function set
*/
public DocToBulk withIsDeleteFn(Write.BooleanFieldValueExtractFn isDeleteFn) {
checkArgument(isDeleteFn != null, "deleteFn is required");
return builder().setIsDeleteFn(isDeleteFn).build();
}
/**
* Provide a function to extract the doc version from the document. This version number will be
* used as the document version in Elasticsearch. Should the function throw an Exception then
* the batch will fail and the exception propagated. Incompatible with update operations and
* should only be used with withUsePartialUpdate(false)
*
* @param docVersionType the version type to use, one of {@value VERSION_TYPES}
* @return the {@link DocToBulk} with the doc version type set
*/
public DocToBulk withDocVersionType(String docVersionType) {
checkArgument(
VERSION_TYPES.contains(docVersionType),
"docVersionType must be one of " + "%s",
String.join(", ", VERSION_TYPES));
return builder().setDocVersionType(docVersionType).build();
}
/**
* Use to set explicitly which version of Elasticsearch the destination cluster is running.
* Providing this hint means there is no need for setting {@link
* DocToBulk
*
* <p>Note: if the value of @param backendVersion differs from the version the destination
* cluster is running, behavior is undefined and likely to yield errors.
*
* @param backendVersion the major version number of the version of Elasticsearch being run in
* the cluster where documents will be indexed.
* @return the {@link DocToBulk} with the Elasticsearch major version number set
*/
public DocToBulk withBackendVersion(int backendVersion) {
checkArgument(
VALID_CLUSTER_VERSIONS.contains(backendVersion),
"Backend version may only be one of " + "%s",
String.join(", ", VERSION_TYPES));
return builder().setBackendVersion(backendVersion).build();
}
@Override
public PCollection<String> expand(PCollection<String> docs) {
ConnectionConfiguration connectionConfiguration = getConnectionConfiguration();
Integer backendVersion = getBackendVersion();
Write.FieldValueExtractFn idFn = getIdFn();
Write.BooleanFieldValueExtractFn isDeleteFn = getIsDeleteFn();
checkState(
(backendVersion != null || connectionConfiguration != null),
"withBackendVersion() or withConnectionConfiguration() is required");
checkArgument(
isDeleteFn == null || idFn != null,
"Id needs to be specified by withIdFn for delete operation");
return docs.apply(ParDo.of(new DocToBulkFn(this)));
}
private static class DocumentMetadata implements Serializable {
final String index;
final String type;
final String id;
final Integer retryOnConflict;
final String routing;
final Integer backendVersion;
final String version;
final String versionType;
DocumentMetadata(
String index,
String type,
String id,
Integer retryOnConflict,
String routing,
Integer backendVersion,
String version,
String versionType) {
this.index = index;
this.id = id;
this.type = type;
this.retryOnConflict = retryOnConflict;
this.routing = routing;
this.backendVersion = backendVersion;
this.version = version;
this.versionType = versionType;
}
}
private static class DocumentMetadataSerializer extends StdSerializer<DocumentMetadata> {
private DocumentMetadataSerializer() {
super(DocumentMetadata.class);
}
@Override
public void serialize(DocumentMetadata value, JsonGenerator gen, SerializerProvider provider)
throws IOException {
gen.writeStartObject();
if (value.index != null) {
gen.writeStringField("_index", value.index);
}
if (value.type != null) {
gen.writeStringField("_type", value.type);
}
if (value.id != null) {
gen.writeStringField("_id", value.id);
}
if (value.routing != null) {
gen.writeStringField("routing", value.routing);
}
if (value.retryOnConflict != null && value.backendVersion <= 6) {
gen.writeNumberField("_retry_on_conflict", value.retryOnConflict);
}
if (value.retryOnConflict != null && value.backendVersion >= 7) {
gen.writeNumberField("retry_on_conflict", value.retryOnConflict);
}
if (value.version != null) {
gen.writeStringField("version", value.version);
}
if (value.versionType != null) {
gen.writeStringField("version_type", value.versionType);
}
gen.writeEndObject();
}
}
@VisibleForTesting
static String createBulkApiEntity(DocToBulk spec, String document, int backendVersion)
throws IOException {
String documentMetadata = "{}";
boolean isDelete = false;
if (spec.getIndexFn() != null
|| spec.getTypeFn() != null
|| spec.getIdFn() != null
|| spec.getRoutingFn() != null) {
JsonNode parsedDocument = OBJECT_MAPPER.readTree(document);
documentMetadata = getDocumentMetadata(spec, parsedDocument, backendVersion);
if (spec.getIsDeleteFn() != null) {
isDelete = spec.getIsDeleteFn().apply(parsedDocument);
}
}
if (isDelete) {
return String.format("{ \"delete\" : %s }%n", documentMetadata);
} else {
if (spec.getUsePartialUpdate()) {
return String.format(
"{ \"update\" : %s }%n{ \"doc\" : %s, " + "\"doc_as_upsert\" : true }%n",
documentMetadata, document);
} else if (spec.getUpsertScript() != null) {
return String.format(
"{ \"update\" : %s }%n{ \"script\" : {\"source\": \"%s\", "
+ "\"params\": %s}, \"upsert\" : %s, \"scripted_upsert\": true}%n",
documentMetadata, spec.getUpsertScript(), document, document);
} else {
return String.format("{ \"index\" : %s }%n%s%n", documentMetadata, document);
}
}
}
private static String lowerCaseOrNull(String input) {
return input == null ? null : input.toLowerCase();
}
/**
* Extracts the components that comprise the document address from the document using the {@link
* Write.FieldValueExtractFn} configured. This allows any or all of the index, type and document
* id to be controlled on a per document basis. If none are provided then an empty default of
* {@code {}} is returned. Sanitization of the index is performed, automatically lower-casing
* the value as required by Elasticsearch.
*
* @param parsedDocument the json from which the index, type and id may be extracted
* @return the document address as JSON or the default
* @throws IOException if the document cannot be parsed as JSON
*/
private static String getDocumentMetadata(
DocToBulk spec, JsonNode parsedDocument, int backendVersion) throws IOException {
DocumentMetadata metadata =
new DocumentMetadata(
spec.getIndexFn() != null
? lowerCaseOrNull(spec.getIndexFn().apply(parsedDocument))
: null,
spec.getTypeFn() != null ? spec.getTypeFn().apply(parsedDocument) : null,
spec.getIdFn() != null ? spec.getIdFn().apply(parsedDocument) : null,
(spec.getUsePartialUpdate()
|| (spec.getUpsertScript() != null && !spec.getUpsertScript().isEmpty()))
? DEFAULT_RETRY_ON_CONFLICT
: null,
spec.getRoutingFn() != null ? spec.getRoutingFn().apply(parsedDocument) : null,
backendVersion,
spec.getDocVersionFn() != null ? spec.getDocVersionFn().apply(parsedDocument) : null,
spec.getDocVersionType());
return OBJECT_MAPPER.writeValueAsString(metadata);
}
/** {@link DoFn} to for the {@link DocToBulk} transform. */
@VisibleForTesting
static class DocToBulkFn extends DoFn<String, String> {
private final DocToBulk spec;
private int backendVersion;
public DocToBulkFn(DocToBulk spec) {
this.spec = spec;
}
@Setup
public void setup() throws IOException {
if (spec.getBackendVersion() != null) {
backendVersion = spec.getBackendVersion();
} else {
backendVersion = ElasticsearchIO.getBackendVersion(spec.getConnectionConfiguration());
}
}
@ProcessElement
public void processElement(ProcessContext c) throws IOException {
c.output(createBulkApiEntity(spec, c.element(), backendVersion));
}
}
}
/**
* A {@link PTransform} writing data to Elasticsearch.
*
* <p>This {@link PTransform} acts as a convenience wrapper for doing both document to bulk API
* serialization as well as batching those Bulk API entities and writing them to an Elasticsearch
* cluster. This class is effectively a thin proxy for DocToBulk->BulkIO all-in-one for
* convenience and backward compatibility.
*/
public static class Write extends PTransform<PCollection<String>, PDone> {
public interface FieldValueExtractFn extends SerializableFunction<JsonNode, String> {}
public interface BooleanFieldValueExtractFn extends SerializableFunction<JsonNode, Boolean> {}
private DocToBulk docToBulk =
new AutoValue_ElasticsearchIO_DocToBulk.Builder()
.setUsePartialUpdate(false)
.build();
private BulkIO bulkIO =
new AutoValue_ElasticsearchIO_BulkIO.Builder()
.setMaxBatchSize(1000L)
.setMaxBatchSizeBytes(5L * 1024L * 1024L)
.setUseStatefulBatches(false)
.setMaxParallelRequestsPerWindow(1)
.build();
public DocToBulk getDocToBulk() {
return docToBulk;
}
public BulkIO getBulkIO() {
return bulkIO;
}
/** Refer to {@link DocToBulk
public Write withIdFn(FieldValueExtractFn idFn) {
docToBulk = docToBulk.withIdFn(idFn);
return this;
}
/** Refer to {@link DocToBulk
public Write withIndexFn(FieldValueExtractFn indexFn) {
docToBulk = docToBulk.withIndexFn(indexFn);
return this;
}
/** Refer to {@link DocToBulk
public Write withRoutingFn(FieldValueExtractFn routingFn) {
docToBulk = docToBulk.withRoutingFn(routingFn);
return this;
}
/** Refer to {@link DocToBulk
public Write withTypeFn(FieldValueExtractFn typeFn) {
docToBulk = docToBulk.withTypeFn(typeFn);
return this;
}
/** Refer to {@link DocToBulk
public Write withDocVersionFn(FieldValueExtractFn docVersionFn) {
docToBulk = docToBulk.withDocVersionFn(docVersionFn);
return this;
}
/** Refer to {@link DocToBulk
public Write withDocVersionType(String docVersionType) {
docToBulk = docToBulk.withDocVersionType(docVersionType);
return this;
}
/** Refer to {@link DocToBulk
public Write withUsePartialUpdate(boolean usePartialUpdate) {
docToBulk = docToBulk.withUsePartialUpdate(usePartialUpdate);
return this;
}
/** Refer to {@link DocToBulk
public Write withUpsertScript(String source) {
docToBulk = docToBulk.withUpsertScript(source);
return this;
}
/** Refer to {@link DocToBulk
public Write withBackendVersion(int backendVersion) {
docToBulk = docToBulk.withBackendVersion(backendVersion);
return this;
}
/** Refer to {@link DocToBulk
public Write withIsDeleteFn(Write.BooleanFieldValueExtractFn isDeleteFn) {
docToBulk = docToBulk.withIsDeleteFn(isDeleteFn);
return this;
}
/** Refer to {@link BulkIO
public Write withConnectionConfiguration(ConnectionConfiguration connectionConfiguration) {
checkArgument(connectionConfiguration != null, "connectionConfiguration can not be null");
docToBulk = docToBulk.withConnectionConfiguration(connectionConfiguration);
bulkIO = bulkIO.withConnectionConfiguration(connectionConfiguration);
return this;
}
/** Refer to {@link BulkIO
public Write withMaxBatchSize(long batchSize) {
bulkIO = bulkIO.withMaxBatchSize(batchSize);
return this;
}
/** Refer to {@link BulkIO
public Write withMaxBatchSizeBytes(long batchSizeBytes) {
bulkIO = bulkIO.withMaxBatchSizeBytes(batchSizeBytes);
return this;
}
/** Refer to {@link BulkIO
public Write withRetryConfiguration(RetryConfiguration retryConfiguration) {
bulkIO = bulkIO.withRetryConfiguration(retryConfiguration);
return this;
}
/** Refer to {@link BulkIO
public Write withIgnoreVersionConflicts(boolean ignoreVersionConflicts) {
bulkIO = bulkIO.withIgnoreVersionConflicts(ignoreVersionConflicts);
return this;
}
/** Refer to {@link BulkIO
public Write withUseStatefulBatches(boolean useStatefulBatches) {
bulkIO = bulkIO.withUseStatefulBatches(useStatefulBatches);
return this;
}
/** Refer to {@link BulkIO
public Write withMaxBufferingDuration(Duration maxBufferingDuration) {
bulkIO = bulkIO.withMaxBufferingDuration(maxBufferingDuration);
return this;
}
/** Refer to {@link BulkIO
public Write withMaxParallelRequestsPerWindow(int maxParallelRequestsPerWindow) {
bulkIO = bulkIO.withMaxParallelRequestsPerWindow(maxParallelRequestsPerWindow);
return this;
}
/** Refer to {@link BulkIO
public Write withAllowableResponseErrors(@Nullable Set<String> allowableResponseErrors) {
if (allowableResponseErrors == null) {
allowableResponseErrors = new HashSet<>();
}
bulkIO = bulkIO.withAllowableResponseErrors(allowableResponseErrors);
return this;
}
@Override
public PDone expand(PCollection<String> input) {
return input.apply(docToBulk).apply(bulkIO);
}
}
/**
* A {@link PTransform} writing Bulk API entities created by {@link ElasticsearchIO.DocToBulk} to
* an Elasticsearch cluster. Typically, using {@link ElasticsearchIO.Write} is preferred, whereas
* using {@link ElasticsearchIO.DocToBulk} and BulkIO separately is for advanced use cases such as
* mirroring data to multiple clusters or data lakes without recomputation.
*/
@AutoValue
public abstract static class BulkIO extends PTransform<PCollection<String>, PDone> {
@VisibleForTesting
static final String RETRY_ATTEMPT_LOG = "Error writing to Elasticsearch. Retry attempt[%d]";
@VisibleForTesting
static final String RETRY_FAILED_LOG =
"Error writing to ES after %d attempt(s). No more attempts allowed";
abstract @Nullable ConnectionConfiguration getConnectionConfiguration();
abstract long getMaxBatchSize();
abstract long getMaxBatchSizeBytes();
abstract @Nullable Duration getMaxBufferingDuration();
abstract boolean getUseStatefulBatches();
abstract int getMaxParallelRequestsPerWindow();
abstract @Nullable RetryConfiguration getRetryConfiguration();
abstract @Nullable Set<String> getAllowedResponseErrors();
abstract Builder builder();
@AutoValue.Builder
abstract static class Builder {
abstract Builder setConnectionConfiguration(ConnectionConfiguration connectionConfiguration);
abstract Builder setMaxBatchSize(long maxBatchSize);
abstract Builder setMaxBatchSizeBytes(long maxBatchSizeBytes);
abstract Builder setRetryConfiguration(RetryConfiguration retryConfiguration);
abstract Builder setAllowedResponseErrors(Set<String> allowedResponseErrors);
abstract Builder setMaxBufferingDuration(Duration maxBufferingDuration);
abstract Builder setUseStatefulBatches(boolean useStatefulBatches);
abstract Builder setMaxParallelRequestsPerWindow(int maxParallelRequestsPerWindow);
abstract BulkIO build();
}
/**
* Provide the Elasticsearch connection configuration object.
*
* @param connectionConfiguration the Elasticsearch {@link ConnectionConfiguration} object
* @return the {@link BulkIO} with connection configuration set
*/
public BulkIO withConnectionConfiguration(ConnectionConfiguration connectionConfiguration) {
checkArgument(connectionConfiguration != null, "connectionConfiguration can not be null");
return builder().setConnectionConfiguration(connectionConfiguration).build();
}
/**
* Provide a maximum size in number of documents for the batch see bulk API
* (https:
* docs (like Elasticsearch bulk size advice). See
* https:
* execution engine, size of bundles may vary, this sets the maximum size. Change this if you
* need to have smaller ElasticSearch bulks.
*
* @param batchSize maximum batch size in number of documents
* @return the {@link BulkIO} with connection batch size set
*/
public BulkIO withMaxBatchSize(long batchSize) {
checkArgument(batchSize > 0, "batchSize must be > 0, but was %s", batchSize);
return builder().setMaxBatchSize(batchSize).build();
}
/**
* Provide a maximum size in bytes for the batch see bulk API
* (https:
* (like Elasticsearch bulk size advice). See
* https:
* execution engine, size of bundles may vary, this sets the maximum size. Change this if you
* need to have smaller ElasticSearch bulks.
*
* @param batchSizeBytes maximum batch size in bytes
* @return the {@link BulkIO} with connection batch size in bytes set
*/
public BulkIO withMaxBatchSizeBytes(long batchSizeBytes) {
checkArgument(batchSizeBytes > 0, "batchSizeBytes must be > 0, but was %s", batchSizeBytes);
return builder().setMaxBatchSizeBytes(batchSizeBytes).build();
}
/**
* Provides configuration to retry a failed batch call to Elasticsearch. A batch is considered
* as failed if the underlying {@link RestClient} surfaces 429 HTTP status code as error for one
* or more of the items in the {@link Response}. Users should consider that retrying might
* compound the underlying problem which caused the initial failure. Users should also be aware
* that once retrying is exhausted the error is surfaced to the runner which <em>may</em> then
* opt to retry the current bundle in entirety or abort if the max number of retries of the
* runner is completed. Retrying uses an exponential backoff algorithm, with minimum backoff of
* 5 seconds and then surfacing the error once the maximum number of retries or maximum
* configuration duration is exceeded.
*
* <p>Example use:
*
* <pre>{@code
* ElasticsearchIO.write()
* .withRetryConfiguration(ElasticsearchIO.RetryConfiguration.create(10, Duration.standardMinutes(3))
* ...
* }</pre>
*
* @param retryConfiguration the rules which govern the retry behavior
* @return the {@link BulkIO} with retrying configured
*/
public BulkIO withRetryConfiguration(RetryConfiguration retryConfiguration) {
checkArgument(retryConfiguration != null, "retryConfiguration is required");
return builder().setRetryConfiguration(retryConfiguration).build();
}
/**
* Whether or not to suppress version conflict errors in a Bulk API response. This can be useful
* if your use case involves using external version types.
*
* @param ignoreVersionConflicts true to suppress version conflicts, false to surface version
* conflict errors.
* @return the {@link BulkIO} with version conflict handling configured
*/
public BulkIO withIgnoreVersionConflicts(boolean ignoreVersionConflicts) {
Set<String> allowedResponseErrors = getAllowedResponseErrors();
if (allowedResponseErrors == null) {
allowedResponseErrors = new HashSet<>();
}
if (ignoreVersionConflicts) {
allowedResponseErrors.add(VERSION_CONFLICT_ERROR);
}
return builder().setAllowedResponseErrors(allowedResponseErrors).build();
}
/**
* Provide a set of textual error types which can be contained in Bulk API response
* items[].error.type field. Any element in @param allowableResponseErrorTypes will suppress
* errors of the same type in Bulk responses.
*
* <p>See also
* https:
*
* @param allowableResponseErrorTypes
* @return the {@link BulkIO} with allowable response errors set
*/
public BulkIO withAllowableResponseErrors(@Nullable Set<String> allowableResponseErrorTypes) {
if (allowableResponseErrorTypes == null) {
allowableResponseErrorTypes = new HashSet<>();
}
return builder().setAllowedResponseErrors(allowableResponseErrorTypes).build();
}
/**
* If using {@link BulkIO
* time before buffered elements are emitted to Elasticsearch as a Bulk API request. If this
* config is not set, Bulk requests will not be issued until {@link BulkIO
* number of documents have been buffered. This may result in higher latency in particular if
* your max batch size is set to a large value and your pipeline input is low volume.
*
* @param maxBufferingDuration the maximum duration to wait before sending any buffered
* documents to Elasticsearch, regardless of maxBatchSize.
* @return the {@link BulkIO} with maximum buffering duration set
*/
public BulkIO withMaxBufferingDuration(Duration maxBufferingDuration) {
LOG.warn(
"Use of withMaxBufferingDuration requires withUseStatefulBatches(true). "
+ "Setting that automatically.");
return builder()
.setUseStatefulBatches(true)
.setMaxBufferingDuration(maxBufferingDuration)
.build();
}
/**
* Whether or not to use Stateful Processing to ensure bulk requests have the desired number of
* entities i.e. as close to the maxBatchSize as possible. By default without this feature
* enabled, Bulk requests will not contain more than maxBatchSize entities, but the lower bound
* of batch size is determined by Beam Runner bundle sizes, which may be as few as 1.
*
* @param useStatefulBatches true enables the use of Stateful Processing to ensure that batches
* are as close to the maxBatchSize as possible.
* @return the {@link BulkIO} with Stateful Processing enabled or disabled
*/
public BulkIO withUseStatefulBatches(boolean useStatefulBatches) {
return builder().setUseStatefulBatches(useStatefulBatches).build();
}
/**
* When using {@link BulkIO
* batches are maintained per-key-per-window. BE AWARE that low values for @param
* maxParallelRequestsPerWindow, in particular if the input data has a finite number of windows,
* can reduce parallelism greatly. If data is globally windowed and @param
* maxParallelRequestsPerWindow is set to 1,there will only ever be 1 request in flight. Having
* only a single request in flight can be beneficial for ensuring an Elasticsearch cluster is
* not overwhelmed by parallel requests,but may not work for all use cases. If this number is
* less than the number of maximum workers in your pipeline, the IO work will result in a
* sub-distribution of the last write step with most of the runners.
*
* @param maxParallelRequestsPerWindow the maximum number of parallel bulk requests for a window
* of data
* @return the {@link BulkIO} with maximum parallel bulk requests per window set
*/
public BulkIO withMaxParallelRequestsPerWindow(int maxParallelRequestsPerWindow) {
checkArgument(
maxParallelRequestsPerWindow > 0, "parameter value must be positive " + "a integer");
return builder().setMaxParallelRequestsPerWindow(maxParallelRequestsPerWindow).build();
}
/**
* Creates batches of documents using Stateful Processing based on user configurable settings of
* withMaxBufferingDuration and withMaxParallelRequestsPerWindow.
*
* <p>Mostly exists for testability of withMaxParallelRequestsPerWindow.
*/
@VisibleForTesting
static class StatefulBatching
extends PTransform<PCollection<String>, PCollection<KV<Integer, Iterable<String>>>> {
final BulkIO spec;
private StatefulBatching(BulkIO bulkSpec) {
spec = bulkSpec;
}
public static StatefulBatching fromSpec(BulkIO spec) {
return new StatefulBatching(spec);
}
@Override
public PCollection<KV<Integer, Iterable<String>>> expand(PCollection<String> input) {
GroupIntoBatches<Integer, String> groupIntoBatches =
GroupIntoBatches.ofSize(spec.getMaxBatchSize());
if (spec.getMaxBufferingDuration() != null) {
groupIntoBatches =
groupIntoBatches.withMaxBufferingDuration(spec.getMaxBufferingDuration());
}
return input
.apply(ParDo.of(new Reshuffle.AssignShardFn<>(spec.getMaxParallelRequestsPerWindow())))
.apply(groupIntoBatches);
}
}
@Override
public PDone expand(PCollection<String> input) {
ConnectionConfiguration connectionConfiguration = getConnectionConfiguration();
checkState(connectionConfiguration != null, "withConnectionConfiguration() is required");
if (getUseStatefulBatches()) {
input.apply(StatefulBatching.fromSpec(this)).apply(ParDo.of(new BulkIOStatefulFn(this)));
} else {
input.apply(ParDo.of(new BulkIOBundleFn(this)));
}
return PDone.in(input.getPipeline());
}
static class BulkIOBundleFn extends BulkIOBaseFn<String> {
@VisibleForTesting
BulkIOBundleFn(BulkIO bulkSpec) {
super(bulkSpec);
}
@ProcessElement
public void processElement(ProcessContext context) throws Exception {
String bulkApiEntity = context.element();
addAndMaybeFlush(bulkApiEntity);
}
}
/*
Intended for use in conjunction with {@link GroupIntoBatches}
*/
static class BulkIOStatefulFn extends BulkIOBaseFn<KV<Integer, Iterable<String>>> {
@VisibleForTesting
BulkIOStatefulFn(BulkIO bulkSpec) {
super(bulkSpec);
}
@ProcessElement
public void processElement(ProcessContext context) throws Exception {
Iterable<String> bulkApiEntities = context.element().getValue();
for (String bulkApiEntity : bulkApiEntities) {
addAndMaybeFlush(bulkApiEntity);
}
}
}
/** {@link DoFn} to for the {@link BulkIO} transform. */
@VisibleForTesting
private abstract static class BulkIOBaseFn<T> extends DoFn<T, Void> {
private static final Duration RETRY_INITIAL_BACKOFF = Duration.standardSeconds(5);
private transient FluentBackoff retryBackoff;
private BulkIO spec;
private transient RestClient restClient;
private ArrayList<String> batch;
long currentBatchSizeBytes;
protected BulkIOBaseFn(BulkIO bulkSpec) {
this.spec = bulkSpec;
}
@Setup
public void setup() throws IOException {
ConnectionConfiguration connectionConfiguration = spec.getConnectionConfiguration();
restClient = connectionConfiguration.createClient();
retryBackoff =
FluentBackoff.DEFAULT.withMaxRetries(0).withInitialBackoff(RETRY_INITIAL_BACKOFF);
if (spec.getRetryConfiguration() != null) {
retryBackoff =
FluentBackoff.DEFAULT
.withInitialBackoff(RETRY_INITIAL_BACKOFF)
.withMaxRetries(spec.getRetryConfiguration().getMaxAttempts() - 1)
.withMaxCumulativeBackoff(spec.getRetryConfiguration().getMaxDuration());
}
}
@StartBundle
public void startBundle(StartBundleContext context) {
batch = new ArrayList<>();
currentBatchSizeBytes = 0;
}
@FinishBundle
public void finishBundle(FinishBundleContext context)
throws IOException, InterruptedException {
flushBatch();
}
protected void addAndMaybeFlush(String bulkApiEntity)
throws IOException, InterruptedException {
batch.add(bulkApiEntity);
currentBatchSizeBytes += bulkApiEntity.getBytes(StandardCharsets.UTF_8).length;
if (batch.size() >= spec.getMaxBatchSize()
|| currentBatchSizeBytes >= spec.getMaxBatchSizeBytes()) {
flushBatch();
}
}
private boolean isRetryableClientException(Throwable t) {
return t.getCause() instanceof ConnectTimeoutException
|| t.getCause() instanceof SocketTimeoutException
|| t.getCause() instanceof ConnectionClosedException
|| t.getCause() instanceof ConnectException;
}
private void flushBatch() throws IOException, InterruptedException {
if (batch.isEmpty()) {
return;
}
LOG.info(
"ElasticsearchIO batch size: {}, batch size bytes: {}",
batch.size(),
currentBatchSizeBytes);
StringBuilder bulkRequest = new StringBuilder();
for (String json : batch) {
bulkRequest.append(json);
}
batch.clear();
currentBatchSizeBytes = 0L;
Response response = null;
HttpEntity responseEntity = null;
String endPoint = spec.getConnectionConfiguration().getBulkEndPoint();
HttpEntity requestBody =
new NStringEntity(bulkRequest.toString(), ContentType.APPLICATION_JSON);
try {
Request request = new Request("POST", endPoint);
request.addParameters(Collections.emptyMap());
request.setEntity(requestBody);
response = restClient.performRequest(request);
responseEntity = new BufferedHttpEntity(response.getEntity());
} catch (java.io.IOException ex) {
if (spec.getRetryConfiguration() == null || !isRetryableClientException(ex)) {
throw ex;
}
LOG.error("Caught ES timeout, retrying", ex);
}
if (spec.getRetryConfiguration() != null
&& (response == null
|| responseEntity == null
|| spec.getRetryConfiguration().getRetryPredicate().test(responseEntity))) {
if (responseEntity != null
&& spec.getRetryConfiguration().getRetryPredicate().test(responseEntity)) {
LOG.warn("ES Cluster is responding with HTP 429 - TOO_MANY_REQUESTS.");
}
responseEntity = handleRetry("POST", endPoint, Collections.emptyMap(), requestBody);
}
checkForErrors(responseEntity, spec.getAllowedResponseErrors());
}
/** retry request based on retry configuration policy. */
private HttpEntity handleRetry(
String method, String endpoint, Map<String, String> params, HttpEntity requestBody)
throws IOException, InterruptedException {
Response response;
HttpEntity responseEntity = null;
Sleeper sleeper = Sleeper.DEFAULT;
BackOff backoff = retryBackoff.backoff();
int attempt = 0;
while (BackOffUtils.next(sleeper, backoff)) {
LOG.warn(String.format(RETRY_ATTEMPT_LOG, ++attempt));
try {
Request request = new Request(method, endpoint);
request.addParameters(params);
request.setEntity(requestBody);
response = restClient.performRequest(request);
responseEntity = new BufferedHttpEntity(response.getEntity());
} catch (java.io.IOException ex) {
if (isRetryableClientException(ex)) {
LOG.error("Caught ES timeout, retrying", ex);
continue;
}
}
if (!Objects.requireNonNull(spec.getRetryConfiguration())
.getRetryPredicate()
.test(responseEntity)) {
return responseEntity;
} else {
LOG.warn("ES Cluster is responding with HTP 429 - TOO_MANY_REQUESTS.");
}
}
throw new IOException(String.format(RETRY_FAILED_LOG, attempt));
}
@Teardown
public void closeClient() throws IOException {
if (restClient != null) {
restClient.close();
}
}
}
}
static int getBackendVersion(ConnectionConfiguration connectionConfiguration) {
try (RestClient restClient = connectionConfiguration.createClient()) {
Request request = new Request("GET", "");
Response response = restClient.performRequest(request);
JsonNode jsonNode = parseResponse(response.getEntity());
int backendVersion =
Integer.parseInt(jsonNode.path("version").path("number").asText().substring(0, 1));
checkArgument(
(VALID_CLUSTER_VERSIONS.contains(backendVersion)),
"The Elasticsearch version to connect to is %s.x. "
+ "This version of the ElasticsearchIO is only compatible with "
+ "Elasticsearch v7.x, v6.x, v5.x and v2.x",
backendVersion);
return backendVersion;
} catch (IOException e) {
throw new IllegalArgumentException("Cannot get Elasticsearch version", e);
}
}
} | class BoundedElasticsearchReader extends BoundedSource.BoundedReader<String> {
private final BoundedElasticsearchSource source;
private RestClient restClient;
private String current;
private String scrollId;
private ListIterator<String> batchIterator;
private BoundedElasticsearchReader(BoundedElasticsearchSource source) {
this.source = source;
}
@Override | class BoundedElasticsearchReader extends BoundedSource.BoundedReader<String> {
private final BoundedElasticsearchSource source;
private RestClient restClient;
private String current;
private String scrollId;
private ListIterator<String> batchIterator;
private BoundedElasticsearchReader(BoundedElasticsearchSource source) {
this.source = source;
}
@Override |
```suggestion deployLogger.logApplicationPackage(Level.WARNING, msg); ``` To show this as a notification in console, e.g.:  | private RankProfile getInherited() {
if (inheritedName == null) return null;
if (inherited == null) {
inherited = resolveInherited();
if (inherited == null) {
String msg = "rank-profile '" + getName() + "' inherits '" + inheritedName +
"', but it does not exist anywhere in the inheritance of search '" +
((getSearch() != null) ? getSearch().getName() : " global rank profiles") + "'.";
if (search.getDeployProperties().featureFlags().enforceRankProfileInheritance()) {
throw new IllegalArgumentException(msg);
} else {
deployLogger.log(Level.WARNING, msg);
inherited = resolveIndependentOfInheritance();
}
} else {
List<String> children = new ArrayList<>();
children.add(createFullyQualifiedName());
verifyNoInheritanceCycle(children, inherited);
}
}
return inherited;
} | deployLogger.log(Level.WARNING, msg); | private RankProfile getInherited() {
if (inheritedName == null) return null;
if (inherited == null) {
inherited = resolveInherited();
if (inherited == null) {
String msg = "rank-profile '" + getName() + "' inherits '" + inheritedName +
"', but it does not exist anywhere in the inheritance of search '" +
((getSearch() != null) ? getSearch().getName() : " global rank profiles") + "'.";
if (search.getDeployProperties().featureFlags().enforceRankProfileInheritance()) {
throw new IllegalArgumentException(msg);
} else {
deployLogger.log(Level.WARNING, msg);
inherited = resolveIndependentOfInheritance();
}
} else {
List<String> children = new ArrayList<>();
children.add(createFullyQualifiedName());
verifyNoInheritanceCycle(children, inherited);
}
}
return inherited;
} | class RankProfile implements Cloneable {
public final static String FIRST_PHASE = "firstphase";
public final static String SECOND_PHASE = "secondphase";
/** The search definition-unique name of this rank profile */
private final String name;
/** The search definition owning this profile, or null if global (owned by a model) */
private final ImmutableSearch search;
/** The model owning this profile if it is global, or null if it is owned by a search definition */
private final VespaModel model;
/** The name of the rank profile inherited by this */
private String inheritedName = null;
private RankProfile inherited = null;
/** The match settings of this profile */
private MatchPhaseSettings matchPhaseSettings = null;
/** The rank settings of this profile */
protected Set<RankSetting> rankSettings = new java.util.LinkedHashSet<>();
/** The ranking expression to be used for first phase */
private RankingExpressionFunction firstPhaseRanking = null;
/** The ranking expression to be used for second phase */
private RankingExpressionFunction secondPhaseRanking = null;
/** Number of hits to be reranked in second phase, -1 means use default */
private int rerankCount = -1;
/** Mysterious attribute */
private int keepRankCount = -1;
private int numThreadsPerSearch = -1;
private int minHitsPerThread = -1;
private int numSearchPartitions = -1;
private Double termwiseLimit = null;
/** The drop limit used to drop hits with rank score less than or equal to this value */
private double rankScoreDropLimit = -Double.MAX_VALUE;
private Set<ReferenceNode> summaryFeatures;
private String inheritedSummaryFeatures;
private Set<ReferenceNode> rankFeatures;
/** The properties of this - a multimap */
private Map<String, List<RankProperty>> rankProperties = new LinkedHashMap<>();
private Boolean ignoreDefaultRankFeatures = null;
private Map<String, RankingExpressionFunction> functions = new LinkedHashMap<>();
private Map<String, RankingExpressionFunction> allFunctionsCached = null;
private Map<Reference, TensorType> inputFeatures = new LinkedHashMap<>();
private Set<String> filterFields = new HashSet<>();
private final RankProfileRegistry rankProfileRegistry;
/** Constants in ranking expressions */
private Map<String, Value> constants = new HashMap<>();
private final TypeSettings attributeTypes = new TypeSettings();
private final TypeSettings queryFeatureTypes = new TypeSettings();
private List<ImmutableSDField> allFieldsList;
/** Global onnx models not tied to a search definition */
private final OnnxModels onnxModels;
private final DeployLogger deployLogger;
/**
* Creates a new rank profile for a particular search definition
*
* @param name the name of the new profile
* @param search the search definition owning this profile
* @param rankProfileRegistry the {@link com.yahoo.searchdefinition.RankProfileRegistry} to use for storing
* and looking up rank profiles.
*/
public RankProfile(String name, Search search, RankProfileRegistry rankProfileRegistry) {
this.name = Objects.requireNonNull(name, "name cannot be null");
this.search = Objects.requireNonNull(search, "search cannot be null");
this.model = null;
this.onnxModels = null;
this.rankProfileRegistry = rankProfileRegistry;
this.deployLogger = search.getDeployLogger();
}
/**
* Creates a global rank profile
*
* @param name the name of the new profile
* @param model the model owning this profile
*/
public RankProfile(String name, VespaModel model, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, OnnxModels onnxModels) {
this.name = Objects.requireNonNull(name, "name cannot be null");
this.search = null;
this.model = Objects.requireNonNull(model, "model cannot be null");
this.rankProfileRegistry = rankProfileRegistry;
this.onnxModels = onnxModels;
this.deployLogger = deployLogger;
}
public String getName() { return name; }
/** Returns the search definition owning this, or null if it is global */
public ImmutableSearch getSearch() { return search; }
/** Returns the application this is part of */
public ApplicationPackage applicationPackage() {
return search != null ? search.applicationPackage() : model.applicationPackage();
}
/** Returns the ranking constants of the owner of this */
public RankingConstants rankingConstants() {
return search != null ? search.rankingConstants() : model.rankingConstants();
}
public Map<String, OnnxModel> onnxModels() {
return search != null ? search.onnxModels().asMap() : onnxModels.asMap();
}
private Stream<ImmutableSDField> allFields() {
if (search == null) return Stream.empty();
if (allFieldsList == null) {
allFieldsList = search.allFieldsList();
}
return allFieldsList.stream();
}
private Stream<ImmutableSDField> allImportedFields() {
return search != null ? search.allImportedFields() : Stream.empty();
}
/**
* Sets the name of the rank profile this inherits. Both rank profiles must be present in the same search
* definition
*/
public void setInherited(String inheritedName) {
this.inheritedName = inheritedName;
}
/** Returns the name of the profile this one inherits, or null if none is inherited */
public String getInheritedName() { return inheritedName; }
/** Returns the inherited rank profile, or null if there is none */
private RankProfile resolveIndependentOfInheritance() {
for (RankProfile rankProfile : rankProfileRegistry.all()) {
if (rankProfile.getName().equals(inheritedName)) return rankProfile;
}
return null;
}
private String createFullyQualifiedName() {
return (search != null)
? (search.getName() + "." + getName())
: getName();
}
private void verifyNoInheritanceCycle(List<String> children, RankProfile parent) {
children.add(parent.createFullyQualifiedName());
String root = children.get(0);
if (root.equals(parent.createFullyQualifiedName())) {
throw new IllegalArgumentException("There is a cycle in the inheritance for rank-profile '" + root + "' = " + children);
}
if (parent.getInherited() != null) {
verifyNoInheritanceCycle(children, parent.getInherited());
}
}
private RankProfile resolveInherited(ImmutableSearch search) {
SDDocumentType documentType = search.getDocument();
if (documentType != null) {
if (name.equals(inheritedName)) {
for (SDDocumentType baseType : documentType.getInheritedTypes()) {
RankProfile resolvedFromBase = rankProfileRegistry.resolve(baseType, inheritedName);
if (resolvedFromBase != null) return resolvedFromBase;
}
}
return rankProfileRegistry.resolve(documentType, inheritedName);
}
return rankProfileRegistry.get(search.getName(), inheritedName);
}
private RankProfile resolveInherited() {
if (inheritedName == null) return null;
return (getSearch() != null)
? resolveInherited(search)
: rankProfileRegistry.getGlobal(inheritedName);
}
/**
* Returns whether this profile inherits (directly or indirectly) the given profile
*
* @param name the profile name to compare this to.
* @return whether or not this inherits from the named profile.
*/
public boolean inherits(String name) {
RankProfile parent = getInherited();
while (parent != null) {
if (parent.getName().equals(name))
return true;
parent = parent.getInherited();
}
return false;
}
public void setMatchPhaseSettings(MatchPhaseSettings settings) {
settings.checkValid();
this.matchPhaseSettings = settings;
}
public MatchPhaseSettings getMatchPhaseSettings() {
MatchPhaseSettings settings = this.matchPhaseSettings;
if (settings != null) return settings;
if (getInherited() != null) return getInherited().getMatchPhaseSettings();
return null;
}
public void addRankSetting(RankSetting rankSetting) {
rankSettings.add(rankSetting);
}
public void addRankSetting(String fieldName, RankSetting.Type type, Object value) {
addRankSetting(new RankSetting(fieldName, type, value));
}
/**
* Returns the a rank setting of a field, or null if there is no such rank setting in this profile
*
* @param field the field whose settings to return.
* @param type the type that the field is required to be.
* @return the rank setting found, or null.
*/
RankSetting getDeclaredRankSetting(String field, RankSetting.Type type) {
for (Iterator<RankSetting> i = declaredRankSettingIterator(); i.hasNext(); ) {
RankSetting setting = i.next();
if (setting.getFieldName().equals(field) &&
setting.getType().equals(type)) {
return setting;
}
}
return null;
}
/**
* Returns a rank setting of field or index, or null if there is no such rank setting in this profile or one it
* inherits
*
* @param field the field whose settings to return
* @param type the type that the field is required to be
* @return the rank setting found, or null
*/
public RankSetting getRankSetting(String field, RankSetting.Type type) {
RankSetting rankSetting = getDeclaredRankSetting(field, type);
if (rankSetting != null) return rankSetting;
if (getInherited() != null) return getInherited().getRankSetting(field, type);
return null;
}
/**
* Returns the rank settings in this rank profile
*
* @return an iterator for the declared rank setting
*/
public Iterator<RankSetting> declaredRankSettingIterator() {
return Collections.unmodifiableSet(rankSettings).iterator();
}
/**
* Returns all settings in this profile or any profile it inherits
*
* @return an iterator for all rank settings of this
*/
public Iterator<RankSetting> rankSettingIterator() {
return rankSettings().iterator();
}
/**
* Returns a snapshot of the rank settings of this and everything it inherits.
* Changes to the returned set will not be reflected in this rank profile.
*/
public Set<RankSetting> rankSettings() {
Set<RankSetting> allSettings = new LinkedHashSet<>(rankSettings);
RankProfile parent = getInherited();
if (parent != null)
allSettings.addAll(parent.rankSettings());
return allSettings;
}
public void addConstant(String name, Value value) {
if (value instanceof TensorValue) {
TensorType type = value.type();
if (type.dimensions().stream().anyMatch(d -> d.isIndexed() && d.size().isEmpty()))
throw new IllegalArgumentException("Illegal type of constant " + name + " type " + type +
": Dense tensor dimensions must have a size");
}
constants.put(name, value.freeze());
}
public void addConstantTensor(String name, TensorValue value) {
addConstant(name, value);
}
/** Returns an unmodifiable view of the constants available in this */
public Map<String, Value> getConstants() {
if (constants.isEmpty())
return getInherited() != null ? getInherited().getConstants() : Collections.emptyMap();
if (getInherited() == null || getInherited().getConstants().isEmpty())
return Collections.unmodifiableMap(constants);
Map<String, Value> combinedConstants = new HashMap<>(getInherited().getConstants());
combinedConstants.putAll(constants);
return combinedConstants;
}
public void addAttributeType(String attributeName, String attributeType) {
attributeTypes.addType(attributeName, attributeType);
}
public Map<String, String> getAttributeTypes() {
return attributeTypes.getTypes();
}
public void addQueryFeatureType(String queryFeature, String queryFeatureType) {
queryFeatureTypes.addType(queryFeature, queryFeatureType);
}
public Map<String, String> getQueryFeatureTypes() {
return queryFeatureTypes.getTypes();
}
/**
* Returns the ranking expression to use by this. This expression must not be edited.
* Returns null if no expression is set.
*/
public RankingExpression getFirstPhaseRanking() {
RankingExpressionFunction function = getFirstPhase();
if (function == null) return null;
return function.function.getBody();
}
public RankingExpressionFunction getFirstPhase() {
if (firstPhaseRanking != null) return firstPhaseRanking;
RankProfile inherited = getInherited();
if (inherited != null) return inherited.getFirstPhase();
return null;
}
void setFirstPhaseRanking(RankingExpression rankingExpression) {
this.firstPhaseRanking = new RankingExpressionFunction(new ExpressionFunction(FIRST_PHASE, Collections.emptyList(), rankingExpression), false);
}
public void setFirstPhaseRanking(String expression) {
try {
firstPhaseRanking = new RankingExpressionFunction(parseRankingExpression(FIRST_PHASE, Collections.emptyList(), expression), false);
} catch (ParseException e) {
throw new IllegalArgumentException("Illegal first phase ranking function", e);
}
}
/**
* Returns the ranking expression to use by this. This expression must not be edited.
* Returns null if no expression is set.
*/
public RankingExpression getSecondPhaseRanking() {
RankingExpressionFunction function = getSecondPhase();
if (function == null) return null;
return function.function().getBody();
}
public RankingExpressionFunction getSecondPhase() {
if (secondPhaseRanking != null) return secondPhaseRanking;
RankProfile inherited = getInherited();
if (inherited != null) return inherited.getSecondPhase();
return null;
}
public void setSecondPhaseRanking(String expression) {
try {
secondPhaseRanking = new RankingExpressionFunction(parseRankingExpression(SECOND_PHASE, Collections.emptyList(), expression), false);
}
catch (ParseException e) {
throw new IllegalArgumentException("Illegal second phase ranking function", e);
}
}
/** Returns a read-only view of the summary features to use in this profile. This is never null */
public Set<ReferenceNode> getSummaryFeatures() {
if (inheritedSummaryFeatures != null && summaryFeatures != null) {
Set<ReferenceNode> combined = new HashSet<>();
combined.addAll(getInherited().getSummaryFeatures());
combined.addAll(summaryFeatures);
return Collections.unmodifiableSet(combined);
}
if (summaryFeatures != null) return Collections.unmodifiableSet(summaryFeatures);
if (getInherited() != null) return getInherited().getSummaryFeatures();
return Set.of();
}
private void addSummaryFeature(ReferenceNode feature) {
if (summaryFeatures == null)
summaryFeatures = new LinkedHashSet<>();
summaryFeatures.add(feature);
}
/** Adds the content of the given feature list to the internal list of summary features. */
public void addSummaryFeatures(FeatureList features) {
for (ReferenceNode feature : features) {
addSummaryFeature(feature);
}
}
/**
* Sets the name this should inherit the summary features of.
* Without setting this, this will either have the summary features of the parent,
* or if summary features are set in this, only have the summary features in this.
* With this set the resulting summary features of this will be the superset of those defined in this and
* the final (with inheritance included) summary features of the given parent.
* The profile must be the profile which is directly inherited by this.
*
*/
public void setInheritedSummaryFeatures(String parentProfile) {
if ( ! parentProfile.equals(inheritedName))
throw new IllegalArgumentException("This can only inherit the summary features of its parent, '" +
inheritedName + ", but attemtping to inherit '" + parentProfile);
this.inheritedSummaryFeatures = parentProfile;
}
/** Returns a read-only view of the rank features to use in this profile. This is never null */
public Set<ReferenceNode> getRankFeatures() {
if (rankFeatures != null) return Collections.unmodifiableSet(rankFeatures);
if (getInherited() != null) return getInherited().getRankFeatures();
return Collections.emptySet();
}
private void addRankFeature(ReferenceNode feature) {
if (rankFeatures == null)
rankFeatures = new LinkedHashSet<>();
rankFeatures.add(feature);
}
/**
* Adds the content of the given feature list to the internal list of rank features.
*
* @param features The features to add.
*/
public void addRankFeatures(FeatureList features) {
for (ReferenceNode feature : features) {
addRankFeature(feature);
}
}
/** Returns a read only flattened list view of the rank properties to use in this profile. This is never null. */
public List<RankProperty> getRankProperties() {
List<RankProperty> properties = new ArrayList<>();
for (List<RankProperty> propertyList : getRankPropertyMap().values()) {
properties.addAll(propertyList);
}
return Collections.unmodifiableList(properties);
}
/** Returns a read only map view of the rank properties to use in this profile. This is never null. */
public Map<String, List<RankProperty>> getRankPropertyMap() {
if (rankProperties.size() == 0 && getInherited() == null) return Collections.emptyMap();
if (rankProperties.size() == 0) return getInherited().getRankPropertyMap();
if (getInherited() == null) return Collections.unmodifiableMap(rankProperties);
Map<String, List<RankProperty>> combined = new LinkedHashMap<>(getInherited().getRankPropertyMap());
combined.putAll(rankProperties);
return Collections.unmodifiableMap(combined);
}
public void addRankProperty(String name, String parameter) {
addRankProperty(new RankProperty(name, parameter));
}
private void addRankProperty(RankProperty rankProperty) {
rankProperties.computeIfAbsent(rankProperty.getName(), (String key) -> new ArrayList<>(1)).add(rankProperty);
}
@Override
public String toString() {
return "rank profile '" + getName() + "'";
}
public int getRerankCount() {
return (rerankCount < 0 && (getInherited() != null))
? getInherited().getRerankCount()
: rerankCount;
}
public int getNumThreadsPerSearch() {
return (numThreadsPerSearch < 0 && (getInherited() != null))
? getInherited().getNumThreadsPerSearch()
: numThreadsPerSearch;
}
public void setNumThreadsPerSearch(int numThreads) {
this.numThreadsPerSearch = numThreads;
}
public int getMinHitsPerThread() {
return (minHitsPerThread < 0 && (getInherited() != null))
? getInherited().getMinHitsPerThread()
: minHitsPerThread;
}
public void setMinHitsPerThread(int minHits) {
this.minHitsPerThread = minHits;
}
public void setNumSearchPartitions(int numSearchPartitions) {
this.numSearchPartitions = numSearchPartitions;
}
public int getNumSearchPartitions() {
return (numSearchPartitions < 0 && (getInherited() != null))
? getInherited().getNumSearchPartitions()
: numSearchPartitions;
}
public OptionalDouble getTermwiseLimit() {
return ((termwiseLimit == null) && (getInherited() != null))
? getInherited().getTermwiseLimit()
: (termwiseLimit != null) ? OptionalDouble.of(termwiseLimit) : OptionalDouble.empty();
}
public void setTermwiseLimit(double termwiseLimit) { this.termwiseLimit = termwiseLimit; }
/** Sets the rerank count. Set to -1 to use inherited */
public void setRerankCount(int rerankCount) {
this.rerankCount = rerankCount;
}
/** Whether we should ignore the default rank features. Set to null to use inherited */
public void setIgnoreDefaultRankFeatures(Boolean ignoreDefaultRankFeatures) {
this.ignoreDefaultRankFeatures = ignoreDefaultRankFeatures;
}
public boolean getIgnoreDefaultRankFeatures() {
if (ignoreDefaultRankFeatures != null) return ignoreDefaultRankFeatures;
return (getInherited() != null) && getInherited().getIgnoreDefaultRankFeatures();
}
/** Adds a function */
public void addFunction(String name, List<String> arguments, String expression, boolean inline) {
try {
addFunction(parseRankingExpression(name, arguments, expression), inline);
}
catch (ParseException e) {
throw new IllegalArgumentException("Could not parse function '" + name + "'", e);
}
}
/** Adds a function and returns it */
public RankingExpressionFunction addFunction(ExpressionFunction function, boolean inline) {
RankingExpressionFunction rankingExpressionFunction = new RankingExpressionFunction(function, inline);
functions.put(function.getName(), rankingExpressionFunction);
allFunctionsCached = null;
return rankingExpressionFunction;
}
/**
* Use for rank profiles representing a model evaluation; it will assume
* that a input is provided with the declared type (for the purpose of
* type resolving).
**/
public void addInputFeature(String name, TensorType declaredType) {
Reference ref = Reference.fromIdentifier(name);
if (inputFeatures.containsKey(ref)) {
TensorType hadType = inputFeatures.get(ref);
if (! declaredType.equals(hadType)) {
throw new IllegalArgumentException("Tried to replace input feature "+name+" with different type: "+
hadType+" -> "+declaredType);
}
}
inputFeatures.put(ref, declaredType);
}
public RankingExpressionFunction findFunction(String name) {
RankingExpressionFunction function = functions.get(name);
return ((function == null) && (getInherited() != null))
? getInherited().findFunction(name)
: function;
}
/** Returns an unmodifiable snapshot of the functions in this */
public Map<String, RankingExpressionFunction> getFunctions() {
if (needToUpdateFunctionCache()) {
allFunctionsCached = gatherAllFunctions();
}
return allFunctionsCached;
}
private Map<String, RankingExpressionFunction> gatherAllFunctions() {
if (functions.isEmpty() && getInherited() == null) return Collections.emptyMap();
if (functions.isEmpty()) return getInherited().getFunctions();
if (getInherited() == null) return Collections.unmodifiableMap(new LinkedHashMap<>(functions));
Map<String, RankingExpressionFunction> allFunctions = new LinkedHashMap<>(getInherited().getFunctions());
allFunctions.putAll(functions);
return Collections.unmodifiableMap(allFunctions);
}
private boolean needToUpdateFunctionCache() {
if (getInherited() != null)
return (allFunctionsCached == null) || getInherited().needToUpdateFunctionCache();
return allFunctionsCached == null;
}
public int getKeepRankCount() {
if (keepRankCount >= 0) return keepRankCount;
if (getInherited() != null) return getInherited().getKeepRankCount();
return -1;
}
public void setKeepRankCount(int rerankArraySize) {
this.keepRankCount = rerankArraySize;
}
public double getRankScoreDropLimit() {
if (rankScoreDropLimit >- Double.MAX_VALUE) return rankScoreDropLimit;
if (getInherited() != null) return getInherited().getRankScoreDropLimit();
return rankScoreDropLimit;
}
public void setRankScoreDropLimit(double rankScoreDropLimit) {
this.rankScoreDropLimit = rankScoreDropLimit;
}
public Set<String> filterFields() {
return filterFields;
}
/**
* Returns all filter fields in this profile and any profile it inherits.
*
* @return the set of all filter fields
*/
public Set<String> allFilterFields() {
RankProfile parent = getInherited();
Set<String> retval = new LinkedHashSet<>();
if (parent != null) {
retval.addAll(parent.allFilterFields());
}
retval.addAll(filterFields());
return retval;
}
private ExpressionFunction parseRankingExpression(String name, List<String> arguments, String expression) throws ParseException {
if (expression.trim().length() == 0)
throw new ParseException("Encountered an empty ranking expression in " + getName()+ ", " + name + ".");
try (Reader rankingExpressionReader = openRankingExpressionReader(name, expression.trim())) {
return new ExpressionFunction(name, arguments, new RankingExpression(name, rankingExpressionReader));
}
catch (com.yahoo.searchlib.rankingexpression.parser.ParseException e) {
ParseException exception = new ParseException("Could not parse ranking expression '" + expression.trim() +
"' in " + getName()+ ", " + name + ".");
throw (ParseException)exception.initCause(e);
}
catch (IOException e) {
throw new RuntimeException("IOException parsing ranking expression '" + name + "'");
}
}
private static String extractFileName(String expression) {
String fileName = expression.substring("file:".length()).trim();
if ( ! fileName.endsWith(ApplicationPackage.RANKEXPRESSION_NAME_SUFFIX))
fileName = fileName + ApplicationPackage.RANKEXPRESSION_NAME_SUFFIX;
return fileName;
}
private Reader openRankingExpressionReader(String expName, String expression) {
if (!expression.startsWith("file:")) return new StringReader(expression);
String fileName = extractFileName(expression);
File file = new File(fileName);
if (!file.isAbsolute() && file.getPath().contains("/"))
throw new IllegalArgumentException("In " + getName() + ", " + expName + ", ranking references file '" + file +
"' in subdirectory, which is not supported.");
return search.getRankingExpression(fileName);
}
/** Shallow clones this */
@Override
public RankProfile clone() {
try {
RankProfile clone = (RankProfile)super.clone();
clone.rankSettings = new LinkedHashSet<>(this.rankSettings);
clone.matchPhaseSettings = this.matchPhaseSettings;
clone.summaryFeatures = summaryFeatures != null ? new LinkedHashSet<>(this.summaryFeatures) : null;
clone.rankFeatures = rankFeatures != null ? new LinkedHashSet<>(this.rankFeatures) : null;
clone.rankProperties = new LinkedHashMap<>(this.rankProperties);
clone.inputFeatures = new LinkedHashMap<>(this.inputFeatures);
clone.functions = new LinkedHashMap<>(this.functions);
clone.allFunctionsCached = null;
clone.filterFields = new HashSet<>(this.filterFields);
clone.constants = new HashMap<>(this.constants);
return clone;
}
catch (CloneNotSupportedException e) {
throw new RuntimeException("Won't happen", e);
}
}
/**
* Returns a copy of this where the content is optimized for execution.
* Compiled profiles should never be modified.
*/
public RankProfile compile(QueryProfileRegistry queryProfiles, ImportedMlModels importedModels) {
try {
RankProfile compiled = this.clone();
compiled.compileThis(queryProfiles, importedModels);
return compiled;
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Rank profile '" + getName() + "' is invalid", e);
}
}
private void compileThis(QueryProfileRegistry queryProfiles, ImportedMlModels importedModels) {
checkNameCollisions(getFunctions(), getConstants());
ExpressionTransforms expressionTransforms = new ExpressionTransforms();
Map<Reference, TensorType> featureTypes = collectFeatureTypes();
Map<String, RankingExpressionFunction> inlineFunctions =
compileFunctions(this::getInlineFunctions, queryProfiles, featureTypes, importedModels, Collections.emptyMap(), expressionTransforms);
firstPhaseRanking = compile(this.getFirstPhase(), queryProfiles, featureTypes, importedModels, getConstants(), inlineFunctions, expressionTransforms);
secondPhaseRanking = compile(this.getSecondPhase(), queryProfiles, featureTypes, importedModels, getConstants(), inlineFunctions, expressionTransforms);
functions = compileFunctions(this::getFunctions, queryProfiles, featureTypes, importedModels, inlineFunctions, expressionTransforms);
allFunctionsCached = null;
}
private void checkNameCollisions(Map<String, RankingExpressionFunction> functions, Map<String, Value> constants) {
for (Map.Entry<String, RankingExpressionFunction> functionEntry : functions.entrySet()) {
if (constants.containsKey(functionEntry.getKey()))
throw new IllegalArgumentException("Cannot have both a constant and function named '" +
functionEntry.getKey() + "'");
}
}
private Map<String, RankingExpressionFunction> getInlineFunctions() {
return getFunctions().entrySet().stream().filter(x -> x.getValue().inline())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
private Map<String, RankingExpressionFunction> compileFunctions(Supplier<Map<String, RankingExpressionFunction>> functions,
QueryProfileRegistry queryProfiles,
Map<Reference, TensorType> featureTypes,
ImportedMlModels importedModels,
Map<String, RankingExpressionFunction> inlineFunctions,
ExpressionTransforms expressionTransforms) {
Map<String, RankingExpressionFunction> compiledFunctions = new LinkedHashMap<>();
Map.Entry<String, RankingExpressionFunction> entry;
while (null != (entry = findUncompiledFunction(functions.get(), compiledFunctions.keySet()))) {
RankingExpressionFunction rankingExpressionFunction = entry.getValue();
RankingExpressionFunction compiled = compile(rankingExpressionFunction, queryProfiles, featureTypes,
importedModels, getConstants(), inlineFunctions, expressionTransforms);
compiledFunctions.put(entry.getKey(), compiled);
}
return compiledFunctions;
}
private static Map.Entry<String, RankingExpressionFunction> findUncompiledFunction(Map<String, RankingExpressionFunction> functions,
Set<String> compiledFunctionNames) {
for (Map.Entry<String, RankingExpressionFunction> entry : functions.entrySet()) {
if ( ! compiledFunctionNames.contains(entry.getKey()))
return entry;
}
return null;
}
private RankingExpressionFunction compile(RankingExpressionFunction function,
QueryProfileRegistry queryProfiles,
Map<Reference, TensorType> featureTypes,
ImportedMlModels importedModels,
Map<String, Value> constants,
Map<String, RankingExpressionFunction> inlineFunctions,
ExpressionTransforms expressionTransforms) {
if (function == null) return null;
RankProfileTransformContext context = new RankProfileTransformContext(this,
queryProfiles,
featureTypes,
importedModels,
constants,
inlineFunctions);
RankingExpression expression = expressionTransforms.transform(function.function().getBody(), context);
for (Map.Entry<String, String> rankProperty : context.rankProperties().entrySet()) {
addRankProperty(rankProperty.getKey(), rankProperty.getValue());
}
return function.withExpression(expression);
}
/**
* Creates a context containing the type information of all constants, attributes and query profiles
* referable from this rank profile.
*/
public MapEvaluationTypeContext typeContext(QueryProfileRegistry queryProfiles) {
return typeContext(queryProfiles, collectFeatureTypes());
}
public MapEvaluationTypeContext typeContext() { return typeContext(new QueryProfileRegistry()); }
private Map<Reference, TensorType> collectFeatureTypes() {
Map<Reference, TensorType> featureTypes = new HashMap<>();
inputFeatures.forEach((k, v) -> featureTypes.put(k, v));
allFields().forEach(field -> addAttributeFeatureTypes(field, featureTypes));
allImportedFields().forEach(field -> addAttributeFeatureTypes(field, featureTypes));
return featureTypes;
}
public MapEvaluationTypeContext typeContext(QueryProfileRegistry queryProfiles, Map<Reference, TensorType> featureTypes) {
MapEvaluationTypeContext context = new MapEvaluationTypeContext(getFunctions().values().stream()
.map(RankingExpressionFunction::function)
.collect(Collectors.toList()),
featureTypes);
getConstants().forEach((k, v) -> context.setType(FeatureNames.asConstantFeature(k), v.type()));
rankingConstants().asMap().forEach((k, v) -> context.setType(FeatureNames.asConstantFeature(k), v.getTensorType()));
for (QueryProfileType queryProfileType : queryProfiles.getTypeRegistry().allComponents()) {
for (FieldDescription field : queryProfileType.declaredFields().values()) {
TensorType type = field.getType().asTensorType();
Optional<Reference> feature = Reference.simple(field.getName());
if ( feature.isEmpty() || ! feature.get().name().equals("query")) continue;
TensorType existingType = context.getType(feature.get());
if ( ! Objects.equals(existingType, context.defaultTypeOf(feature.get())))
type = existingType.dimensionwiseGeneralizationWith(type).orElseThrow( () ->
new IllegalArgumentException(queryProfileType + " contains query feature " + feature.get() +
" with type " + field.getType().asTensorType() +
", but this is already defined in another query profile with type " +
context.getType(feature.get())));
context.setType(feature.get(), type);
}
}
for (Map.Entry<String, OnnxModel> entry : onnxModels().entrySet()) {
String modelName = entry.getKey();
OnnxModel model = entry.getValue();
Arguments args = new Arguments(new ReferenceNode(modelName));
Map<String, TensorType> inputTypes = resolveOnnxInputTypes(model, context);
TensorType defaultOutputType = model.getTensorType(model.getDefaultOutput(), inputTypes);
context.setType(new Reference("onnxModel", args, null), defaultOutputType);
for (Map.Entry<String, String> mapping : model.getOutputMap().entrySet()) {
TensorType type = model.getTensorType(mapping.getKey(), inputTypes);
context.setType(new Reference("onnxModel", args, mapping.getValue()), type);
}
}
return context;
}
private Map<String, TensorType> resolveOnnxInputTypes(OnnxModel model, MapEvaluationTypeContext context) {
Map<String, TensorType> inputTypes = new HashMap<>();
for (String onnxInputName : model.getInputMap().keySet()) {
resolveOnnxInputType(onnxInputName, model, context).ifPresent(type -> inputTypes.put(onnxInputName, type));
}
return inputTypes;
}
private Optional<TensorType> resolveOnnxInputType(String onnxInputName, OnnxModel model, MapEvaluationTypeContext context) {
String source = model.getInputMap().get(onnxInputName);
if (source != null) {
Optional<Reference> reference = Reference.simple(source);
if (reference.isPresent()) {
if (reference.get().name().equals("rankingExpression") && reference.get().simpleArgument().isPresent()) {
source = reference.get().simpleArgument().get();
} else {
return Optional.of(context.getType(reference.get()));
}
}
ExpressionFunction func = context.getFunction(source);
if (func != null) {
return Optional.of(func.getBody().type(context));
}
}
return Optional.empty();
}
private void addAttributeFeatureTypes(ImmutableSDField field, Map<Reference, TensorType> featureTypes) {
Attribute attribute = field.getAttribute();
field.getAttributes().forEach((k, a) -> {
String name = k;
if (attribute == a)
name = field.getName();
featureTypes.put(FeatureNames.asAttributeFeature(name),
a.tensorType().orElse(TensorType.empty));
});
}
/**
* A rank setting. The identity of a rank setting is its field name and type (not value).
* A rank setting is immutable.
*/
public static class RankSetting implements Serializable {
private final String fieldName;
private final Type type;
/** The rank value */
private final Object value;
public enum Type {
RANKTYPE("rank-type"),
LITERALBOOST("literal-boost"),
WEIGHT("weight"),
PREFERBITVECTOR("preferbitvector",true);
private final String name;
/** True if this setting really pertains to an index, not a field within an index */
private final boolean isIndexLevel;
Type(String name) {
this(name,false);
}
Type(String name,boolean isIndexLevel) {
this.name = name;
this.isIndexLevel=isIndexLevel;
}
/** True if this setting really pertains to an index, not a field within an index */
public boolean isIndexLevel() { return isIndexLevel; }
/** Returns the name of this type */
public String getName() {
return name;
}
public String toString() {
return "type: " + name;
}
}
public RankSetting(String fieldName, RankSetting.Type type, Object value) {
this.fieldName = fieldName;
this.type = type;
this.value = value;
}
public String getFieldName() { return fieldName; }
public Type getType() { return type; }
public Object getValue() { return value; }
/** Returns the value as an int, or a negative value if it is not an integer */
public int getIntValue() {
if (value instanceof Integer) {
return ((Integer)value);
}
else {
return -1;
}
}
@Override
public int hashCode() {
return fieldName.hashCode() + 17 * type.hashCode();
}
@Override
public boolean equals(Object object) {
if (!(object instanceof RankSetting)) {
return false;
}
RankSetting other = (RankSetting)object;
return
fieldName.equals(other.fieldName) &&
type.equals(other.type);
}
@Override
public String toString() {
return type + " setting " + fieldName + ": " + value;
}
}
/** A rank property. Rank properties are Value Objects */
public static class RankProperty implements Serializable {
private final String name;
private final String value;
public RankProperty(String name, String value) {
this.name = name;
this.value = value;
}
public String getName() { return name; }
public String getValue() { return value; }
@Override
public int hashCode() {
return name.hashCode() + 17 * value.hashCode();
}
@Override
public boolean equals(Object object) {
if (! (object instanceof RankProperty)) return false;
RankProperty other=(RankProperty)object;
return (other.name.equals(this.name) && other.value.equals(this.value));
}
@Override
public String toString() {
return name + " = " + value;
}
}
/** A function in a rank profile */
public static class RankingExpressionFunction {
private ExpressionFunction function;
/** True if this should be inlined into calling expressions. Useful for very cheap functions. */
private final boolean inline;
RankingExpressionFunction(ExpressionFunction function, boolean inline) {
this.function = function;
this.inline = inline;
}
public void setReturnType(TensorType type) {
this.function = function.withReturnType(type);
}
public ExpressionFunction function() { return function; }
public boolean inline() {
return inline && function.arguments().isEmpty();
}
RankingExpressionFunction withExpression(RankingExpression expression) {
return new RankingExpressionFunction(function.withBody(expression), inline);
}
@Override
public String toString() {
return "function " + function;
}
}
public static final class DiversitySettings {
private String attribute = null;
private int minGroups = 0;
private double cutoffFactor = 10;
private Diversity.CutoffStrategy cutoffStrategy = Diversity.CutoffStrategy.loose;
public void setAttribute(String value) { attribute = value; }
public void setMinGroups(int value) { minGroups = value; }
public void setCutoffFactor(double value) { cutoffFactor = value; }
public void setCutoffStrategy(Diversity.CutoffStrategy strategy) { cutoffStrategy = strategy; }
public String getAttribute() { return attribute; }
public int getMinGroups() { return minGroups; }
public double getCutoffFactor() { return cutoffFactor; }
public Diversity.CutoffStrategy getCutoffStrategy() { return cutoffStrategy; }
void checkValid() {
if (attribute == null || attribute.isEmpty()) {
throw new IllegalArgumentException("'diversity' did not set non-empty diversity attribute name.");
}
if (minGroups <= 0) {
throw new IllegalArgumentException("'diversity' did not set min-groups > 0");
}
if (cutoffFactor < 1.0) {
throw new IllegalArgumentException("diversity.cutoff.factor must be larger or equal to 1.0.");
}
}
}
public static class MatchPhaseSettings {
private String attribute = null;
private boolean ascending = false;
private int maxHits = 0;
private double maxFilterCoverage = 0.2;
private DiversitySettings diversity = null;
private double evaluationPoint = 0.20;
private double prePostFilterTippingPoint = 1.0;
public void setDiversity(DiversitySettings value) {
value.checkValid();
diversity = value;
}
public void setAscending(boolean value) { ascending = value; }
public void setAttribute(String value) { attribute = value; }
public void setMaxHits(int value) { maxHits = value; }
public void setMaxFilterCoverage(double value) { maxFilterCoverage = value; }
public void setEvaluationPoint(double evaluationPoint) { this.evaluationPoint = evaluationPoint; }
public void setPrePostFilterTippingPoint(double prePostFilterTippingPoint) { this.prePostFilterTippingPoint = prePostFilterTippingPoint; }
public boolean getAscending() { return ascending; }
public String getAttribute() { return attribute; }
public int getMaxHits() { return maxHits; }
public double getMaxFilterCoverage() { return maxFilterCoverage; }
public DiversitySettings getDiversity() { return diversity; }
public double getEvaluationPoint() { return evaluationPoint; }
public double getPrePostFilterTippingPoint() { return prePostFilterTippingPoint; }
public void checkValid() {
if (attribute == null) {
throw new IllegalArgumentException("match-phase did not set any attribute");
}
if (! (maxHits > 0)) {
throw new IllegalArgumentException("match-phase did not set max-hits > 0");
}
}
}
public static class TypeSettings {
private final Map<String, String> types = new HashMap<>();
void addType(String name, String type) {
types.put(name, type);
}
public Map<String, String> getTypes() {
return Collections.unmodifiableMap(types);
}
}
} | class RankProfile implements Cloneable {
public final static String FIRST_PHASE = "firstphase";
public final static String SECOND_PHASE = "secondphase";
/** The search definition-unique name of this rank profile */
private final String name;
/** The search definition owning this profile, or null if global (owned by a model) */
private final ImmutableSearch search;
/** The model owning this profile if it is global, or null if it is owned by a search definition */
private final VespaModel model;
/** The name of the rank profile inherited by this */
private String inheritedName = null;
private RankProfile inherited = null;
/** The match settings of this profile */
private MatchPhaseSettings matchPhaseSettings = null;
/** The rank settings of this profile */
protected Set<RankSetting> rankSettings = new java.util.LinkedHashSet<>();
/** The ranking expression to be used for first phase */
private RankingExpressionFunction firstPhaseRanking = null;
/** The ranking expression to be used for second phase */
private RankingExpressionFunction secondPhaseRanking = null;
/** Number of hits to be reranked in second phase, -1 means use default */
private int rerankCount = -1;
/** Mysterious attribute */
private int keepRankCount = -1;
private int numThreadsPerSearch = -1;
private int minHitsPerThread = -1;
private int numSearchPartitions = -1;
private Double termwiseLimit = null;
/** The drop limit used to drop hits with rank score less than or equal to this value */
private double rankScoreDropLimit = -Double.MAX_VALUE;
private Set<ReferenceNode> summaryFeatures;
private String inheritedSummaryFeatures;
private Set<ReferenceNode> rankFeatures;
/** The properties of this - a multimap */
private Map<String, List<RankProperty>> rankProperties = new LinkedHashMap<>();
private Boolean ignoreDefaultRankFeatures = null;
private Map<String, RankingExpressionFunction> functions = new LinkedHashMap<>();
private Map<String, RankingExpressionFunction> allFunctionsCached = null;
private Map<Reference, TensorType> inputFeatures = new LinkedHashMap<>();
private Set<String> filterFields = new HashSet<>();
private final RankProfileRegistry rankProfileRegistry;
/** Constants in ranking expressions */
private Map<String, Value> constants = new HashMap<>();
private final TypeSettings attributeTypes = new TypeSettings();
private final TypeSettings queryFeatureTypes = new TypeSettings();
private List<ImmutableSDField> allFieldsList;
/** Global onnx models not tied to a search definition */
private final OnnxModels onnxModels;
private final DeployLogger deployLogger;
/**
* Creates a new rank profile for a particular search definition
*
* @param name the name of the new profile
* @param search the search definition owning this profile
* @param rankProfileRegistry the {@link com.yahoo.searchdefinition.RankProfileRegistry} to use for storing
* and looking up rank profiles.
*/
public RankProfile(String name, Search search, RankProfileRegistry rankProfileRegistry) {
this.name = Objects.requireNonNull(name, "name cannot be null");
this.search = Objects.requireNonNull(search, "search cannot be null");
this.model = null;
this.onnxModels = null;
this.rankProfileRegistry = rankProfileRegistry;
this.deployLogger = search.getDeployLogger();
}
/**
* Creates a global rank profile
*
* @param name the name of the new profile
* @param model the model owning this profile
*/
public RankProfile(String name, VespaModel model, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, OnnxModels onnxModels) {
this.name = Objects.requireNonNull(name, "name cannot be null");
this.search = null;
this.model = Objects.requireNonNull(model, "model cannot be null");
this.rankProfileRegistry = rankProfileRegistry;
this.onnxModels = onnxModels;
this.deployLogger = deployLogger;
}
public String getName() { return name; }
/** Returns the search definition owning this, or null if it is global */
public ImmutableSearch getSearch() { return search; }
/** Returns the application this is part of */
public ApplicationPackage applicationPackage() {
return search != null ? search.applicationPackage() : model.applicationPackage();
}
/** Returns the ranking constants of the owner of this */
public RankingConstants rankingConstants() {
return search != null ? search.rankingConstants() : model.rankingConstants();
}
public Map<String, OnnxModel> onnxModels() {
return search != null ? search.onnxModels().asMap() : onnxModels.asMap();
}
private Stream<ImmutableSDField> allFields() {
if (search == null) return Stream.empty();
if (allFieldsList == null) {
allFieldsList = search.allFieldsList();
}
return allFieldsList.stream();
}
private Stream<ImmutableSDField> allImportedFields() {
return search != null ? search.allImportedFields() : Stream.empty();
}
/**
* Sets the name of the rank profile this inherits. Both rank profiles must be present in the same search
* definition
*/
public void setInherited(String inheritedName) {
this.inheritedName = inheritedName;
}
/** Returns the name of the profile this one inherits, or null if none is inherited */
public String getInheritedName() { return inheritedName; }
/** Returns the inherited rank profile, or null if there is none */
private RankProfile resolveIndependentOfInheritance() {
for (RankProfile rankProfile : rankProfileRegistry.all()) {
if (rankProfile.getName().equals(inheritedName)) return rankProfile;
}
return null;
}
private String createFullyQualifiedName() {
return (search != null)
? (search.getName() + "." + getName())
: getName();
}
private void verifyNoInheritanceCycle(List<String> children, RankProfile parent) {
children.add(parent.createFullyQualifiedName());
String root = children.get(0);
if (root.equals(parent.createFullyQualifiedName())) {
throw new IllegalArgumentException("There is a cycle in the inheritance for rank-profile '" + root + "' = " + children);
}
if (parent.getInherited() != null) {
verifyNoInheritanceCycle(children, parent.getInherited());
}
}
private RankProfile resolveInherited(ImmutableSearch search) {
SDDocumentType documentType = search.getDocument();
if (documentType != null) {
if (name.equals(inheritedName)) {
for (SDDocumentType baseType : documentType.getInheritedTypes()) {
RankProfile resolvedFromBase = rankProfileRegistry.resolve(baseType, inheritedName);
if (resolvedFromBase != null) return resolvedFromBase;
}
}
return rankProfileRegistry.resolve(documentType, inheritedName);
}
return rankProfileRegistry.get(search.getName(), inheritedName);
}
private RankProfile resolveInherited() {
if (inheritedName == null) return null;
return (getSearch() != null)
? resolveInherited(search)
: rankProfileRegistry.getGlobal(inheritedName);
}
/**
* Returns whether this profile inherits (directly or indirectly) the given profile
*
* @param name the profile name to compare this to.
* @return whether or not this inherits from the named profile.
*/
public boolean inherits(String name) {
RankProfile parent = getInherited();
while (parent != null) {
if (parent.getName().equals(name))
return true;
parent = parent.getInherited();
}
return false;
}
public void setMatchPhaseSettings(MatchPhaseSettings settings) {
settings.checkValid();
this.matchPhaseSettings = settings;
}
public MatchPhaseSettings getMatchPhaseSettings() {
MatchPhaseSettings settings = this.matchPhaseSettings;
if (settings != null) return settings;
if (getInherited() != null) return getInherited().getMatchPhaseSettings();
return null;
}
public void addRankSetting(RankSetting rankSetting) {
rankSettings.add(rankSetting);
}
public void addRankSetting(String fieldName, RankSetting.Type type, Object value) {
addRankSetting(new RankSetting(fieldName, type, value));
}
/**
* Returns the a rank setting of a field, or null if there is no such rank setting in this profile
*
* @param field the field whose settings to return.
* @param type the type that the field is required to be.
* @return the rank setting found, or null.
*/
RankSetting getDeclaredRankSetting(String field, RankSetting.Type type) {
for (Iterator<RankSetting> i = declaredRankSettingIterator(); i.hasNext(); ) {
RankSetting setting = i.next();
if (setting.getFieldName().equals(field) &&
setting.getType().equals(type)) {
return setting;
}
}
return null;
}
/**
* Returns a rank setting of field or index, or null if there is no such rank setting in this profile or one it
* inherits
*
* @param field the field whose settings to return
* @param type the type that the field is required to be
* @return the rank setting found, or null
*/
public RankSetting getRankSetting(String field, RankSetting.Type type) {
RankSetting rankSetting = getDeclaredRankSetting(field, type);
if (rankSetting != null) return rankSetting;
if (getInherited() != null) return getInherited().getRankSetting(field, type);
return null;
}
/**
* Returns the rank settings in this rank profile
*
* @return an iterator for the declared rank setting
*/
public Iterator<RankSetting> declaredRankSettingIterator() {
return Collections.unmodifiableSet(rankSettings).iterator();
}
/**
* Returns all settings in this profile or any profile it inherits
*
* @return an iterator for all rank settings of this
*/
public Iterator<RankSetting> rankSettingIterator() {
return rankSettings().iterator();
}
/**
* Returns a snapshot of the rank settings of this and everything it inherits.
* Changes to the returned set will not be reflected in this rank profile.
*/
public Set<RankSetting> rankSettings() {
Set<RankSetting> allSettings = new LinkedHashSet<>(rankSettings);
RankProfile parent = getInherited();
if (parent != null)
allSettings.addAll(parent.rankSettings());
return allSettings;
}
public void addConstant(String name, Value value) {
if (value instanceof TensorValue) {
TensorType type = value.type();
if (type.dimensions().stream().anyMatch(d -> d.isIndexed() && d.size().isEmpty()))
throw new IllegalArgumentException("Illegal type of constant " + name + " type " + type +
": Dense tensor dimensions must have a size");
}
constants.put(name, value.freeze());
}
public void addConstantTensor(String name, TensorValue value) {
addConstant(name, value);
}
/** Returns an unmodifiable view of the constants available in this */
public Map<String, Value> getConstants() {
if (constants.isEmpty())
return getInherited() != null ? getInherited().getConstants() : Collections.emptyMap();
if (getInherited() == null || getInherited().getConstants().isEmpty())
return Collections.unmodifiableMap(constants);
Map<String, Value> combinedConstants = new HashMap<>(getInherited().getConstants());
combinedConstants.putAll(constants);
return combinedConstants;
}
public void addAttributeType(String attributeName, String attributeType) {
attributeTypes.addType(attributeName, attributeType);
}
public Map<String, String> getAttributeTypes() {
return attributeTypes.getTypes();
}
public void addQueryFeatureType(String queryFeature, String queryFeatureType) {
queryFeatureTypes.addType(queryFeature, queryFeatureType);
}
public Map<String, String> getQueryFeatureTypes() {
return queryFeatureTypes.getTypes();
}
/**
* Returns the ranking expression to use by this. This expression must not be edited.
* Returns null if no expression is set.
*/
public RankingExpression getFirstPhaseRanking() {
RankingExpressionFunction function = getFirstPhase();
if (function == null) return null;
return function.function.getBody();
}
public RankingExpressionFunction getFirstPhase() {
if (firstPhaseRanking != null) return firstPhaseRanking;
RankProfile inherited = getInherited();
if (inherited != null) return inherited.getFirstPhase();
return null;
}
void setFirstPhaseRanking(RankingExpression rankingExpression) {
this.firstPhaseRanking = new RankingExpressionFunction(new ExpressionFunction(FIRST_PHASE, Collections.emptyList(), rankingExpression), false);
}
public void setFirstPhaseRanking(String expression) {
try {
firstPhaseRanking = new RankingExpressionFunction(parseRankingExpression(FIRST_PHASE, Collections.emptyList(), expression), false);
} catch (ParseException e) {
throw new IllegalArgumentException("Illegal first phase ranking function", e);
}
}
/**
* Returns the ranking expression to use by this. This expression must not be edited.
* Returns null if no expression is set.
*/
public RankingExpression getSecondPhaseRanking() {
RankingExpressionFunction function = getSecondPhase();
if (function == null) return null;
return function.function().getBody();
}
public RankingExpressionFunction getSecondPhase() {
if (secondPhaseRanking != null) return secondPhaseRanking;
RankProfile inherited = getInherited();
if (inherited != null) return inherited.getSecondPhase();
return null;
}
public void setSecondPhaseRanking(String expression) {
try {
secondPhaseRanking = new RankingExpressionFunction(parseRankingExpression(SECOND_PHASE, Collections.emptyList(), expression), false);
}
catch (ParseException e) {
throw new IllegalArgumentException("Illegal second phase ranking function", e);
}
}
/** Returns a read-only view of the summary features to use in this profile. This is never null */
public Set<ReferenceNode> getSummaryFeatures() {
if (inheritedSummaryFeatures != null && summaryFeatures != null) {
Set<ReferenceNode> combined = new HashSet<>();
combined.addAll(getInherited().getSummaryFeatures());
combined.addAll(summaryFeatures);
return Collections.unmodifiableSet(combined);
}
if (summaryFeatures != null) return Collections.unmodifiableSet(summaryFeatures);
if (getInherited() != null) return getInherited().getSummaryFeatures();
return Set.of();
}
private void addSummaryFeature(ReferenceNode feature) {
if (summaryFeatures == null)
summaryFeatures = new LinkedHashSet<>();
summaryFeatures.add(feature);
}
/** Adds the content of the given feature list to the internal list of summary features. */
public void addSummaryFeatures(FeatureList features) {
for (ReferenceNode feature : features) {
addSummaryFeature(feature);
}
}
/**
* Sets the name this should inherit the summary features of.
* Without setting this, this will either have the summary features of the parent,
* or if summary features are set in this, only have the summary features in this.
* With this set the resulting summary features of this will be the superset of those defined in this and
* the final (with inheritance included) summary features of the given parent.
* The profile must be the profile which is directly inherited by this.
*
*/
public void setInheritedSummaryFeatures(String parentProfile) {
if ( ! parentProfile.equals(inheritedName))
throw new IllegalArgumentException("This can only inherit the summary features of its parent, '" +
inheritedName + ", but attemtping to inherit '" + parentProfile);
this.inheritedSummaryFeatures = parentProfile;
}
/** Returns a read-only view of the rank features to use in this profile. This is never null */
public Set<ReferenceNode> getRankFeatures() {
if (rankFeatures != null) return Collections.unmodifiableSet(rankFeatures);
if (getInherited() != null) return getInherited().getRankFeatures();
return Collections.emptySet();
}
private void addRankFeature(ReferenceNode feature) {
if (rankFeatures == null)
rankFeatures = new LinkedHashSet<>();
rankFeatures.add(feature);
}
/**
* Adds the content of the given feature list to the internal list of rank features.
*
* @param features The features to add.
*/
public void addRankFeatures(FeatureList features) {
for (ReferenceNode feature : features) {
addRankFeature(feature);
}
}
/** Returns a read only flattened list view of the rank properties to use in this profile. This is never null. */
public List<RankProperty> getRankProperties() {
List<RankProperty> properties = new ArrayList<>();
for (List<RankProperty> propertyList : getRankPropertyMap().values()) {
properties.addAll(propertyList);
}
return Collections.unmodifiableList(properties);
}
/** Returns a read only map view of the rank properties to use in this profile. This is never null. */
public Map<String, List<RankProperty>> getRankPropertyMap() {
if (rankProperties.size() == 0 && getInherited() == null) return Collections.emptyMap();
if (rankProperties.size() == 0) return getInherited().getRankPropertyMap();
if (getInherited() == null) return Collections.unmodifiableMap(rankProperties);
Map<String, List<RankProperty>> combined = new LinkedHashMap<>(getInherited().getRankPropertyMap());
combined.putAll(rankProperties);
return Collections.unmodifiableMap(combined);
}
public void addRankProperty(String name, String parameter) {
addRankProperty(new RankProperty(name, parameter));
}
private void addRankProperty(RankProperty rankProperty) {
rankProperties.computeIfAbsent(rankProperty.getName(), (String key) -> new ArrayList<>(1)).add(rankProperty);
}
@Override
public String toString() {
return "rank profile '" + getName() + "'";
}
public int getRerankCount() {
return (rerankCount < 0 && (getInherited() != null))
? getInherited().getRerankCount()
: rerankCount;
}
public int getNumThreadsPerSearch() {
return (numThreadsPerSearch < 0 && (getInherited() != null))
? getInherited().getNumThreadsPerSearch()
: numThreadsPerSearch;
}
public void setNumThreadsPerSearch(int numThreads) {
this.numThreadsPerSearch = numThreads;
}
public int getMinHitsPerThread() {
return (minHitsPerThread < 0 && (getInherited() != null))
? getInherited().getMinHitsPerThread()
: minHitsPerThread;
}
public void setMinHitsPerThread(int minHits) {
this.minHitsPerThread = minHits;
}
public void setNumSearchPartitions(int numSearchPartitions) {
this.numSearchPartitions = numSearchPartitions;
}
public int getNumSearchPartitions() {
return (numSearchPartitions < 0 && (getInherited() != null))
? getInherited().getNumSearchPartitions()
: numSearchPartitions;
}
public OptionalDouble getTermwiseLimit() {
return ((termwiseLimit == null) && (getInherited() != null))
? getInherited().getTermwiseLimit()
: (termwiseLimit != null) ? OptionalDouble.of(termwiseLimit) : OptionalDouble.empty();
}
public void setTermwiseLimit(double termwiseLimit) { this.termwiseLimit = termwiseLimit; }
/** Sets the rerank count. Set to -1 to use inherited */
public void setRerankCount(int rerankCount) {
this.rerankCount = rerankCount;
}
/** Whether we should ignore the default rank features. Set to null to use inherited */
public void setIgnoreDefaultRankFeatures(Boolean ignoreDefaultRankFeatures) {
this.ignoreDefaultRankFeatures = ignoreDefaultRankFeatures;
}
public boolean getIgnoreDefaultRankFeatures() {
if (ignoreDefaultRankFeatures != null) return ignoreDefaultRankFeatures;
return (getInherited() != null) && getInherited().getIgnoreDefaultRankFeatures();
}
/** Adds a function */
public void addFunction(String name, List<String> arguments, String expression, boolean inline) {
try {
addFunction(parseRankingExpression(name, arguments, expression), inline);
}
catch (ParseException e) {
throw new IllegalArgumentException("Could not parse function '" + name + "'", e);
}
}
/** Adds a function and returns it */
public RankingExpressionFunction addFunction(ExpressionFunction function, boolean inline) {
RankingExpressionFunction rankingExpressionFunction = new RankingExpressionFunction(function, inline);
functions.put(function.getName(), rankingExpressionFunction);
allFunctionsCached = null;
return rankingExpressionFunction;
}
/**
* Use for rank profiles representing a model evaluation; it will assume
* that a input is provided with the declared type (for the purpose of
* type resolving).
**/
public void addInputFeature(String name, TensorType declaredType) {
Reference ref = Reference.fromIdentifier(name);
if (inputFeatures.containsKey(ref)) {
TensorType hadType = inputFeatures.get(ref);
if (! declaredType.equals(hadType)) {
throw new IllegalArgumentException("Tried to replace input feature "+name+" with different type: "+
hadType+" -> "+declaredType);
}
}
inputFeatures.put(ref, declaredType);
}
public RankingExpressionFunction findFunction(String name) {
RankingExpressionFunction function = functions.get(name);
return ((function == null) && (getInherited() != null))
? getInherited().findFunction(name)
: function;
}
/** Returns an unmodifiable snapshot of the functions in this */
public Map<String, RankingExpressionFunction> getFunctions() {
if (needToUpdateFunctionCache()) {
allFunctionsCached = gatherAllFunctions();
}
return allFunctionsCached;
}
private Map<String, RankingExpressionFunction> gatherAllFunctions() {
if (functions.isEmpty() && getInherited() == null) return Collections.emptyMap();
if (functions.isEmpty()) return getInherited().getFunctions();
if (getInherited() == null) return Collections.unmodifiableMap(new LinkedHashMap<>(functions));
Map<String, RankingExpressionFunction> allFunctions = new LinkedHashMap<>(getInherited().getFunctions());
allFunctions.putAll(functions);
return Collections.unmodifiableMap(allFunctions);
}
private boolean needToUpdateFunctionCache() {
if (getInherited() != null)
return (allFunctionsCached == null) || getInherited().needToUpdateFunctionCache();
return allFunctionsCached == null;
}
public int getKeepRankCount() {
if (keepRankCount >= 0) return keepRankCount;
if (getInherited() != null) return getInherited().getKeepRankCount();
return -1;
}
public void setKeepRankCount(int rerankArraySize) {
this.keepRankCount = rerankArraySize;
}
public double getRankScoreDropLimit() {
if (rankScoreDropLimit >- Double.MAX_VALUE) return rankScoreDropLimit;
if (getInherited() != null) return getInherited().getRankScoreDropLimit();
return rankScoreDropLimit;
}
public void setRankScoreDropLimit(double rankScoreDropLimit) {
this.rankScoreDropLimit = rankScoreDropLimit;
}
public Set<String> filterFields() {
return filterFields;
}
/**
* Returns all filter fields in this profile and any profile it inherits.
*
* @return the set of all filter fields
*/
public Set<String> allFilterFields() {
RankProfile parent = getInherited();
Set<String> retval = new LinkedHashSet<>();
if (parent != null) {
retval.addAll(parent.allFilterFields());
}
retval.addAll(filterFields());
return retval;
}
private ExpressionFunction parseRankingExpression(String name, List<String> arguments, String expression) throws ParseException {
if (expression.trim().length() == 0)
throw new ParseException("Encountered an empty ranking expression in " + getName()+ ", " + name + ".");
try (Reader rankingExpressionReader = openRankingExpressionReader(name, expression.trim())) {
return new ExpressionFunction(name, arguments, new RankingExpression(name, rankingExpressionReader));
}
catch (com.yahoo.searchlib.rankingexpression.parser.ParseException e) {
ParseException exception = new ParseException("Could not parse ranking expression '" + expression.trim() +
"' in " + getName()+ ", " + name + ".");
throw (ParseException)exception.initCause(e);
}
catch (IOException e) {
throw new RuntimeException("IOException parsing ranking expression '" + name + "'");
}
}
private static String extractFileName(String expression) {
String fileName = expression.substring("file:".length()).trim();
if ( ! fileName.endsWith(ApplicationPackage.RANKEXPRESSION_NAME_SUFFIX))
fileName = fileName + ApplicationPackage.RANKEXPRESSION_NAME_SUFFIX;
return fileName;
}
private Reader openRankingExpressionReader(String expName, String expression) {
if (!expression.startsWith("file:")) return new StringReader(expression);
String fileName = extractFileName(expression);
File file = new File(fileName);
if (!file.isAbsolute() && file.getPath().contains("/"))
throw new IllegalArgumentException("In " + getName() + ", " + expName + ", ranking references file '" + file +
"' in subdirectory, which is not supported.");
return search.getRankingExpression(fileName);
}
/** Shallow clones this */
@Override
public RankProfile clone() {
try {
RankProfile clone = (RankProfile)super.clone();
clone.rankSettings = new LinkedHashSet<>(this.rankSettings);
clone.matchPhaseSettings = this.matchPhaseSettings;
clone.summaryFeatures = summaryFeatures != null ? new LinkedHashSet<>(this.summaryFeatures) : null;
clone.rankFeatures = rankFeatures != null ? new LinkedHashSet<>(this.rankFeatures) : null;
clone.rankProperties = new LinkedHashMap<>(this.rankProperties);
clone.inputFeatures = new LinkedHashMap<>(this.inputFeatures);
clone.functions = new LinkedHashMap<>(this.functions);
clone.allFunctionsCached = null;
clone.filterFields = new HashSet<>(this.filterFields);
clone.constants = new HashMap<>(this.constants);
return clone;
}
catch (CloneNotSupportedException e) {
throw new RuntimeException("Won't happen", e);
}
}
/**
* Returns a copy of this where the content is optimized for execution.
* Compiled profiles should never be modified.
*/
public RankProfile compile(QueryProfileRegistry queryProfiles, ImportedMlModels importedModels) {
try {
RankProfile compiled = this.clone();
compiled.compileThis(queryProfiles, importedModels);
return compiled;
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Rank profile '" + getName() + "' is invalid", e);
}
}
private void compileThis(QueryProfileRegistry queryProfiles, ImportedMlModels importedModels) {
checkNameCollisions(getFunctions(), getConstants());
ExpressionTransforms expressionTransforms = new ExpressionTransforms();
Map<Reference, TensorType> featureTypes = collectFeatureTypes();
Map<String, RankingExpressionFunction> inlineFunctions =
compileFunctions(this::getInlineFunctions, queryProfiles, featureTypes, importedModels, Collections.emptyMap(), expressionTransforms);
firstPhaseRanking = compile(this.getFirstPhase(), queryProfiles, featureTypes, importedModels, getConstants(), inlineFunctions, expressionTransforms);
secondPhaseRanking = compile(this.getSecondPhase(), queryProfiles, featureTypes, importedModels, getConstants(), inlineFunctions, expressionTransforms);
functions = compileFunctions(this::getFunctions, queryProfiles, featureTypes, importedModels, inlineFunctions, expressionTransforms);
allFunctionsCached = null;
}
private void checkNameCollisions(Map<String, RankingExpressionFunction> functions, Map<String, Value> constants) {
for (Map.Entry<String, RankingExpressionFunction> functionEntry : functions.entrySet()) {
if (constants.containsKey(functionEntry.getKey()))
throw new IllegalArgumentException("Cannot have both a constant and function named '" +
functionEntry.getKey() + "'");
}
}
private Map<String, RankingExpressionFunction> getInlineFunctions() {
return getFunctions().entrySet().stream().filter(x -> x.getValue().inline())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
private Map<String, RankingExpressionFunction> compileFunctions(Supplier<Map<String, RankingExpressionFunction>> functions,
QueryProfileRegistry queryProfiles,
Map<Reference, TensorType> featureTypes,
ImportedMlModels importedModels,
Map<String, RankingExpressionFunction> inlineFunctions,
ExpressionTransforms expressionTransforms) {
Map<String, RankingExpressionFunction> compiledFunctions = new LinkedHashMap<>();
Map.Entry<String, RankingExpressionFunction> entry;
while (null != (entry = findUncompiledFunction(functions.get(), compiledFunctions.keySet()))) {
RankingExpressionFunction rankingExpressionFunction = entry.getValue();
RankingExpressionFunction compiled = compile(rankingExpressionFunction, queryProfiles, featureTypes,
importedModels, getConstants(), inlineFunctions, expressionTransforms);
compiledFunctions.put(entry.getKey(), compiled);
}
return compiledFunctions;
}
private static Map.Entry<String, RankingExpressionFunction> findUncompiledFunction(Map<String, RankingExpressionFunction> functions,
Set<String> compiledFunctionNames) {
for (Map.Entry<String, RankingExpressionFunction> entry : functions.entrySet()) {
if ( ! compiledFunctionNames.contains(entry.getKey()))
return entry;
}
return null;
}
private RankingExpressionFunction compile(RankingExpressionFunction function,
QueryProfileRegistry queryProfiles,
Map<Reference, TensorType> featureTypes,
ImportedMlModels importedModels,
Map<String, Value> constants,
Map<String, RankingExpressionFunction> inlineFunctions,
ExpressionTransforms expressionTransforms) {
if (function == null) return null;
RankProfileTransformContext context = new RankProfileTransformContext(this,
queryProfiles,
featureTypes,
importedModels,
constants,
inlineFunctions);
RankingExpression expression = expressionTransforms.transform(function.function().getBody(), context);
for (Map.Entry<String, String> rankProperty : context.rankProperties().entrySet()) {
addRankProperty(rankProperty.getKey(), rankProperty.getValue());
}
return function.withExpression(expression);
}
/**
* Creates a context containing the type information of all constants, attributes and query profiles
* referable from this rank profile.
*/
public MapEvaluationTypeContext typeContext(QueryProfileRegistry queryProfiles) {
return typeContext(queryProfiles, collectFeatureTypes());
}
public MapEvaluationTypeContext typeContext() { return typeContext(new QueryProfileRegistry()); }
private Map<Reference, TensorType> collectFeatureTypes() {
Map<Reference, TensorType> featureTypes = new HashMap<>();
inputFeatures.forEach((k, v) -> featureTypes.put(k, v));
allFields().forEach(field -> addAttributeFeatureTypes(field, featureTypes));
allImportedFields().forEach(field -> addAttributeFeatureTypes(field, featureTypes));
return featureTypes;
}
public MapEvaluationTypeContext typeContext(QueryProfileRegistry queryProfiles, Map<Reference, TensorType> featureTypes) {
MapEvaluationTypeContext context = new MapEvaluationTypeContext(getFunctions().values().stream()
.map(RankingExpressionFunction::function)
.collect(Collectors.toList()),
featureTypes);
getConstants().forEach((k, v) -> context.setType(FeatureNames.asConstantFeature(k), v.type()));
rankingConstants().asMap().forEach((k, v) -> context.setType(FeatureNames.asConstantFeature(k), v.getTensorType()));
for (QueryProfileType queryProfileType : queryProfiles.getTypeRegistry().allComponents()) {
for (FieldDescription field : queryProfileType.declaredFields().values()) {
TensorType type = field.getType().asTensorType();
Optional<Reference> feature = Reference.simple(field.getName());
if ( feature.isEmpty() || ! feature.get().name().equals("query")) continue;
TensorType existingType = context.getType(feature.get());
if ( ! Objects.equals(existingType, context.defaultTypeOf(feature.get())))
type = existingType.dimensionwiseGeneralizationWith(type).orElseThrow( () ->
new IllegalArgumentException(queryProfileType + " contains query feature " + feature.get() +
" with type " + field.getType().asTensorType() +
", but this is already defined in another query profile with type " +
context.getType(feature.get())));
context.setType(feature.get(), type);
}
}
for (Map.Entry<String, OnnxModel> entry : onnxModels().entrySet()) {
String modelName = entry.getKey();
OnnxModel model = entry.getValue();
Arguments args = new Arguments(new ReferenceNode(modelName));
Map<String, TensorType> inputTypes = resolveOnnxInputTypes(model, context);
TensorType defaultOutputType = model.getTensorType(model.getDefaultOutput(), inputTypes);
context.setType(new Reference("onnxModel", args, null), defaultOutputType);
for (Map.Entry<String, String> mapping : model.getOutputMap().entrySet()) {
TensorType type = model.getTensorType(mapping.getKey(), inputTypes);
context.setType(new Reference("onnxModel", args, mapping.getValue()), type);
}
}
return context;
}
private Map<String, TensorType> resolveOnnxInputTypes(OnnxModel model, MapEvaluationTypeContext context) {
Map<String, TensorType> inputTypes = new HashMap<>();
for (String onnxInputName : model.getInputMap().keySet()) {
resolveOnnxInputType(onnxInputName, model, context).ifPresent(type -> inputTypes.put(onnxInputName, type));
}
return inputTypes;
}
private Optional<TensorType> resolveOnnxInputType(String onnxInputName, OnnxModel model, MapEvaluationTypeContext context) {
String source = model.getInputMap().get(onnxInputName);
if (source != null) {
Optional<Reference> reference = Reference.simple(source);
if (reference.isPresent()) {
if (reference.get().name().equals("rankingExpression") && reference.get().simpleArgument().isPresent()) {
source = reference.get().simpleArgument().get();
} else {
return Optional.of(context.getType(reference.get()));
}
}
ExpressionFunction func = context.getFunction(source);
if (func != null) {
return Optional.of(func.getBody().type(context));
}
}
return Optional.empty();
}
private void addAttributeFeatureTypes(ImmutableSDField field, Map<Reference, TensorType> featureTypes) {
Attribute attribute = field.getAttribute();
field.getAttributes().forEach((k, a) -> {
String name = k;
if (attribute == a)
name = field.getName();
featureTypes.put(FeatureNames.asAttributeFeature(name),
a.tensorType().orElse(TensorType.empty));
});
}
/**
* A rank setting. The identity of a rank setting is its field name and type (not value).
* A rank setting is immutable.
*/
public static class RankSetting implements Serializable {
private final String fieldName;
private final Type type;
/** The rank value */
private final Object value;
public enum Type {
RANKTYPE("rank-type"),
LITERALBOOST("literal-boost"),
WEIGHT("weight"),
PREFERBITVECTOR("preferbitvector",true);
private final String name;
/** True if this setting really pertains to an index, not a field within an index */
private final boolean isIndexLevel;
Type(String name) {
this(name,false);
}
Type(String name,boolean isIndexLevel) {
this.name = name;
this.isIndexLevel=isIndexLevel;
}
/** True if this setting really pertains to an index, not a field within an index */
public boolean isIndexLevel() { return isIndexLevel; }
/** Returns the name of this type */
public String getName() {
return name;
}
public String toString() {
return "type: " + name;
}
}
public RankSetting(String fieldName, RankSetting.Type type, Object value) {
this.fieldName = fieldName;
this.type = type;
this.value = value;
}
public String getFieldName() { return fieldName; }
public Type getType() { return type; }
public Object getValue() { return value; }
/** Returns the value as an int, or a negative value if it is not an integer */
public int getIntValue() {
if (value instanceof Integer) {
return ((Integer)value);
}
else {
return -1;
}
}
@Override
public int hashCode() {
return fieldName.hashCode() + 17 * type.hashCode();
}
@Override
public boolean equals(Object object) {
if (!(object instanceof RankSetting)) {
return false;
}
RankSetting other = (RankSetting)object;
return
fieldName.equals(other.fieldName) &&
type.equals(other.type);
}
@Override
public String toString() {
return type + " setting " + fieldName + ": " + value;
}
}
/** A rank property. Rank properties are Value Objects */
public static class RankProperty implements Serializable {
private final String name;
private final String value;
public RankProperty(String name, String value) {
this.name = name;
this.value = value;
}
public String getName() { return name; }
public String getValue() { return value; }
@Override
public int hashCode() {
return name.hashCode() + 17 * value.hashCode();
}
@Override
public boolean equals(Object object) {
if (! (object instanceof RankProperty)) return false;
RankProperty other=(RankProperty)object;
return (other.name.equals(this.name) && other.value.equals(this.value));
}
@Override
public String toString() {
return name + " = " + value;
}
}
/** A function in a rank profile */
public static class RankingExpressionFunction {
private ExpressionFunction function;
/** True if this should be inlined into calling expressions. Useful for very cheap functions. */
private final boolean inline;
RankingExpressionFunction(ExpressionFunction function, boolean inline) {
this.function = function;
this.inline = inline;
}
public void setReturnType(TensorType type) {
this.function = function.withReturnType(type);
}
public ExpressionFunction function() { return function; }
public boolean inline() {
return inline && function.arguments().isEmpty();
}
RankingExpressionFunction withExpression(RankingExpression expression) {
return new RankingExpressionFunction(function.withBody(expression), inline);
}
@Override
public String toString() {
return "function " + function;
}
}
public static final class DiversitySettings {
private String attribute = null;
private int minGroups = 0;
private double cutoffFactor = 10;
private Diversity.CutoffStrategy cutoffStrategy = Diversity.CutoffStrategy.loose;
public void setAttribute(String value) { attribute = value; }
public void setMinGroups(int value) { minGroups = value; }
public void setCutoffFactor(double value) { cutoffFactor = value; }
public void setCutoffStrategy(Diversity.CutoffStrategy strategy) { cutoffStrategy = strategy; }
public String getAttribute() { return attribute; }
public int getMinGroups() { return minGroups; }
public double getCutoffFactor() { return cutoffFactor; }
public Diversity.CutoffStrategy getCutoffStrategy() { return cutoffStrategy; }
void checkValid() {
if (attribute == null || attribute.isEmpty()) {
throw new IllegalArgumentException("'diversity' did not set non-empty diversity attribute name.");
}
if (minGroups <= 0) {
throw new IllegalArgumentException("'diversity' did not set min-groups > 0");
}
if (cutoffFactor < 1.0) {
throw new IllegalArgumentException("diversity.cutoff.factor must be larger or equal to 1.0.");
}
}
}
public static class MatchPhaseSettings {
private String attribute = null;
private boolean ascending = false;
private int maxHits = 0;
private double maxFilterCoverage = 0.2;
private DiversitySettings diversity = null;
private double evaluationPoint = 0.20;
private double prePostFilterTippingPoint = 1.0;
public void setDiversity(DiversitySettings value) {
value.checkValid();
diversity = value;
}
public void setAscending(boolean value) { ascending = value; }
public void setAttribute(String value) { attribute = value; }
public void setMaxHits(int value) { maxHits = value; }
public void setMaxFilterCoverage(double value) { maxFilterCoverage = value; }
public void setEvaluationPoint(double evaluationPoint) { this.evaluationPoint = evaluationPoint; }
public void setPrePostFilterTippingPoint(double prePostFilterTippingPoint) { this.prePostFilterTippingPoint = prePostFilterTippingPoint; }
public boolean getAscending() { return ascending; }
public String getAttribute() { return attribute; }
public int getMaxHits() { return maxHits; }
public double getMaxFilterCoverage() { return maxFilterCoverage; }
public DiversitySettings getDiversity() { return diversity; }
public double getEvaluationPoint() { return evaluationPoint; }
public double getPrePostFilterTippingPoint() { return prePostFilterTippingPoint; }
public void checkValid() {
if (attribute == null) {
throw new IllegalArgumentException("match-phase did not set any attribute");
}
if (! (maxHits > 0)) {
throw new IllegalArgumentException("match-phase did not set max-hits > 0");
}
}
}
public static class TypeSettings {
private final Map<String, String> types = new HashMap<>();
void addType(String name, String type) {
types.put(name, type);
}
public Map<String, String> getTypes() {
return Collections.unmodifiableMap(types);
}
}
} |
why we are passing in a null value here? | private void createReport(final IBundleCoverage bundleCoverage, ModuleCoverage moduleCoverage) {
boolean containsSourceFiles = true;
for (IPackageCoverage packageCoverage : bundleCoverage.getPackages()) {
if (TesterinaConstants.DOT.equals(this.module.moduleName())) {
containsSourceFiles = packageCoverage.getName().isEmpty();
}
if (containsSourceFiles) {
for (ISourceFileCoverage sourceFileCoverage : packageCoverage.getSourceFiles()) {
String sourceFileModule = decodeIdentifier(sourceFileCoverage.getPackageName().split("/")[1]);
if (sourceFileModule.equals(this.module.moduleName().toString())
&& sourceFileCoverage.getName().contains(BLANG_SRC_FILE_SUFFIX)
&& !sourceFileCoverage.getName().contains("tests/")) {
List<Integer> coveredLines = new ArrayList<>();
List<Integer> missedLines = new ArrayList<>();
for (int i = sourceFileCoverage.getFirstLine(); i <= sourceFileCoverage.getLastLine(); i++) {
ILine line = sourceFileCoverage.getLine(i);
if (line.getStatus() == NOT_COVERED) {
missedLines.add(i);
} else if (line.getStatus() == PARTLY_COVERED || line.getStatus() == FULLY_COVERED) {
coveredLines.add(i);
}
}
Document document = null;
for (DocumentId documentId : module.documentIds()) {
if (module.document(documentId).name().equals(sourceFileCoverage.getName())) {
document = module.document(documentId);
}
}
moduleCoverage.addSourceFileCoverage(document, coveredLines, missedLines);
}
}
}
}
} | boolean containsSourceFiles = true; | private void createReport(final IBundleCoverage bundleCoverage, ModuleCoverage moduleCoverage) {
boolean containsSourceFiles = true;
for (IPackageCoverage packageCoverage : bundleCoverage.getPackages()) {
if (TesterinaConstants.DOT.equals(this.module.moduleName())) {
containsSourceFiles = packageCoverage.getName().isEmpty();
}
if (containsSourceFiles) {
for (ISourceFileCoverage sourceFileCoverage : packageCoverage.getSourceFiles()) {
String sourceFileModule = decodeIdentifier(sourceFileCoverage.getPackageName().split("/")[1]);
if (sourceFileModule.equals(this.module.moduleName().toString())
&& sourceFileCoverage.getName().contains(BLANG_SRC_FILE_SUFFIX)
&& !sourceFileCoverage.getName().contains("tests/")) {
List<Integer> coveredLines = new ArrayList<>();
List<Integer> missedLines = new ArrayList<>();
for (int i = sourceFileCoverage.getFirstLine(); i <= sourceFileCoverage.getLastLine(); i++) {
ILine line = sourceFileCoverage.getLine(i);
if (line.getStatus() == NOT_COVERED) {
missedLines.add(i);
} else if (line.getStatus() == PARTLY_COVERED || line.getStatus() == FULLY_COVERED) {
coveredLines.add(i);
}
}
Document document = null;
for (DocumentId documentId : module.documentIds()) {
if (module.document(documentId).name().equals(sourceFileCoverage.getName())) {
document = module.document(documentId);
}
}
moduleCoverage.addSourceFileCoverage(document, coveredLines, missedLines);
}
}
}
}
} | class per module
CodeCoverageUtils.unzipCompiledSource(jarPath, coverageDir, orgName, packageName, version);
} catch (NoSuchFileException e) {
if (Files.exists(coverageDir.resolve(BIN_DIR))) {
CodeCoverageUtils.deleteDirectory(coverageDir.resolve(BIN_DIR).toFile());
}
return null;
} | class per module
CodeCoverageUtils.unzipCompiledSource(jarPath, coverageDir, orgName, packageName, version);
} catch (NoSuchFileException e) {
if (Files.exists(coverageDir.resolve(BIN_DIR))) {
CodeCoverageUtils.deleteDirectory(coverageDir.resolve(BIN_DIR).toFile());
}
return null;
} |
shouldn't we check `containsKey` here as well? | private Optional<ExecutionVertex> updateAndGet(ExecutionAttemptID id) {
synchronized (tasks) {
ExecutionVertex vertex = cachedTasksById.get(id);
if (vertex != null) {
return Optional.of(vertex);
}
Map<ExecutionAttemptID, ExecutionVertex> mappings = getCurrentAttemptMappings();
if (!mappings.containsKey(id)) {
mappings.put(id, null);
}
cachedTasksById = mappings;
return Optional.ofNullable(cachedTasksById.get(id));
}
} | if (vertex != null) { | private Optional<ExecutionVertex> updateAndGet(ExecutionAttemptID id) {
synchronized (tasks) {
ExecutionVertex vertex = cachedTasksById.get(id);
if (vertex != null || cachedTasksById.containsKey(id)) {
return Optional.ofNullable(vertex);
}
Map<ExecutionAttemptID, ExecutionVertex> mappings = getCurrentAttemptMappings();
if (!mappings.containsKey(id)) {
mappings.put(id, null);
}
cachedTasksById = mappings;
return Optional.ofNullable(cachedTasksById.get(id));
}
} | class ExecutionAttemptMappingProvider {
/** A full list of tasks. */
private final List<ExecutionVertex> tasks;
/** The cached mapping, which would only be updated on miss. */
private Map<ExecutionAttemptID, ExecutionVertex> cachedTasksById;
public ExecutionAttemptMappingProvider(Iterable<ExecutionVertex> tasksIterable) {
this.tasks = new ArrayList<>();
tasksIterable.forEach(this.tasks::add);
this.cachedTasksById = new HashMap<>(tasks.size());
}
public Optional<ExecutionVertex> getVertex(ExecutionAttemptID id) {
ExecutionVertex vertex = cachedTasksById.get(id);
if (vertex != null || cachedTasksById.containsKey(id)) {
return Optional.ofNullable(vertex);
}
return updateAndGet(id);
}
private Map<ExecutionAttemptID, ExecutionVertex> getCurrentAttemptMappings() {
Map<ExecutionAttemptID, ExecutionVertex> attemptMappings = new HashMap<>(tasks.size());
for (ExecutionVertex task : tasks) {
attemptMappings.put(task.getCurrentExecutionAttempt().getAttemptId(), task);
}
return attemptMappings;
}
} | class ExecutionAttemptMappingProvider {
/** A full list of tasks. */
private final List<ExecutionVertex> tasks;
/** The cached mapping, which would only be updated on miss. */
private Map<ExecutionAttemptID, ExecutionVertex> cachedTasksById;
public ExecutionAttemptMappingProvider(Iterable<ExecutionVertex> tasksIterable) {
this.tasks = new ArrayList<>();
tasksIterable.forEach(this.tasks::add);
this.cachedTasksById = new HashMap<>(tasks.size());
}
public Optional<ExecutionVertex> getVertex(ExecutionAttemptID id) {
ExecutionVertex vertex = cachedTasksById.get(id);
if (vertex != null || cachedTasksById.containsKey(id)) {
return Optional.ofNullable(vertex);
}
return updateAndGet(id);
}
private Map<ExecutionAttemptID, ExecutionVertex> getCurrentAttemptMappings() {
Map<ExecutionAttemptID, ExecutionVertex> attemptMappings = new HashMap<>(tasks.size());
for (ExecutionVertex task : tasks) {
attemptMappings.put(task.getCurrentExecutionAttempt().getAttemptId(), task);
}
return attemptMappings;
}
} |
Any reason to not use `Text.format`? | private Mail mailOf(Notification n, Collection<String> recipients) {
var subject = new Formatter().format("[%s] Vespa Notification for %s", n.level().toString().toUpperCase(), n.type().name());
var body = new StringBuilder();
body.append("Source: ").append(n.source().toString()).append("\n")
.append("\n")
.append(String.join("\n", n.messages()));
return new Mail(recipients, subject.toString(), body.toString());
} | var subject = new Formatter().format("[%s] Vespa Notification for %s", n.level().toString().toUpperCase(), n.type().name()); | private Mail mailOf(Notification n, Collection<String> recipients) {
var subject = Text.format("[%s] Vespa Notification for %s", n.level().toString().toUpperCase(), n.type().name());
var body = new StringBuilder();
body.append("Source: ").append(n.source().toString()).append("\n")
.append("\n")
.append(String.join("\n", n.messages()));
return new Mail(recipients, subject.toString(), body.toString());
} | class Notifier {
private final CuratorDb curatorDb;
private final Mailer mailer;
private static final Logger log = Logger.getLogger(Notifier.class.getName());
public Notifier(CuratorDb curatorDb, Mailer mailer) {
this.curatorDb = curatorDb;
this.mailer = mailer;
}
public void dispatch(Notification notification) {
var tenant = curatorDb.readTenant(notification.source().tenant());
tenant.stream().forEach(t -> {
if (t instanceof CloudTenant) {
var ct = (CloudTenant) t;
ct.info().contacts().all().stream()
.filter(c -> c.audiences().contains(TenantContacts.Audience.NOTIFICATIONS))
.collect(Collectors.groupingBy(TenantContacts.Contact::type, Collectors.toList()))
.entrySet()
.forEach(e -> dispatch(notification, e.getKey(), e.getValue()));
}
});
}
private void dispatch(Notification notification, TenantContacts.Type type, Collection<? extends TenantContacts.Contact> contacts) {
switch (type) {
case EMAIL:
dispatch(notification, contacts.stream().map(c -> (TenantContacts.EmailContact) c).collect(Collectors.toList()));
break;
default:
log.fine("Unknown TenantContact Type: " + type);
}
}
private void dispatch(Notification notification, Collection<TenantContacts.EmailContact> contacts) {
mailer.send(mailOf(notification, contacts.stream().map(c -> c.email()).collect(Collectors.toList())));
}
} | class Notifier {
private final CuratorDb curatorDb;
private final Mailer mailer;
private static final Logger log = Logger.getLogger(Notifier.class.getName());
public Notifier(CuratorDb curatorDb, Mailer mailer) {
this.curatorDb = Objects.requireNonNull(curatorDb);
this.mailer = Objects.requireNonNull(mailer);
}
public void dispatch(Notification notification) {
var tenant = curatorDb.readTenant(notification.source().tenant());
tenant.stream().forEach(t -> {
if (t instanceof CloudTenant) {
var ct = (CloudTenant) t;
ct.info().contacts().all().stream()
.filter(c -> c.audiences().contains(TenantContacts.Audience.NOTIFICATIONS))
.collect(Collectors.groupingBy(TenantContacts.Contact::type, Collectors.toList()))
.entrySet()
.forEach(e -> dispatch(notification, e.getKey(), e.getValue()));
}
});
}
private void dispatch(Notification notification, TenantContacts.Type type, Collection<? extends TenantContacts.Contact> contacts) {
switch (type) {
case EMAIL:
dispatch(notification, contacts.stream().map(c -> (TenantContacts.EmailContact) c).collect(Collectors.toList()));
break;
default:
throw new IllegalArgumentException("Unknown TenantContacts type " + type.name());
}
}
private void dispatch(Notification notification, Collection<TenantContacts.EmailContact> contacts) {
try {
mailer.send(mailOf(notification, contacts.stream().map(c -> c.email()).collect(Collectors.toList())));
} catch (MailerException e) {
log.log(Level.SEVERE, "Failed sending email", e);
}
}
} |
also mark it final for consistency | public void testQueuedBuffers() throws Exception {
final NettyShuffleEnvironment network = createNettyShuffleEnvironment();
final ResultPartition localResultPartition = new ResultPartitionBuilder()
.setResultPartitionManager(network.getResultPartitionManager())
.setupBufferPoolFactoryFromNettyShuffleEnvironment(network)
.build();
final ResultPartition remoteResultPartition = new ResultPartitionBuilder()
.setResultPartitionManager(network.getResultPartitionManager())
.setupBufferPoolFactoryFromNettyShuffleEnvironment(network)
.build();
localResultPartition.setup();
remoteResultPartition.setup();
final SingleInputGate inputGate = createInputGate(network, 2, ResultPartitionType.PIPELINED);
try {
final ResultPartitionID localResultPartitionId = localResultPartition.getPartitionId();
addUnknownInputChannel(network, inputGate, localResultPartitionId, 0);
LocalInputChannel localInputChannel = InputChannelBuilder.newBuilder()
.setChannelIndex(0)
.setPartitionId(localResultPartitionId)
.setupFromNettyShuffleEnvironment(network)
.setConnectionManager(new TestingConnectionManager())
.buildLocalAndSetToGate(inputGate);
final ResultPartitionID remoteResultPartitionId = remoteResultPartition.getPartitionId();
RemoteInputChannel remoteInputChannel = InputChannelBuilder.newBuilder()
.setChannelIndex(1)
.setPartitionId(remoteResultPartitionId)
.setupFromNettyShuffleEnvironment(network)
.setConnectionManager(new TestingConnectionManager())
.buildRemoteAndSetToGate(inputGate);
inputGate.setup();
remoteInputChannel.onBuffer(TestBufferFactory.createBuffer(1), 0, 0);
assertEquals(1, inputGate.getNumberOfQueuedBuffers());
localResultPartition.addBufferConsumer(BufferBuilderTestUtils.createBufferBuilder(1).createBufferConsumer(), 0);
assertEquals(2, inputGate.getNumberOfQueuedBuffers());
} finally {
inputGate.close();
network.close();
}
} | RemoteInputChannel remoteInputChannel = InputChannelBuilder.newBuilder() | public void testQueuedBuffers() throws Exception {
final NettyShuffleEnvironment network = createNettyShuffleEnvironment();
final ResultPartition resultPartition = new ResultPartitionBuilder()
.setResultPartitionManager(network.getResultPartitionManager())
.setupBufferPoolFactoryFromNettyShuffleEnvironment(network)
.build();
final SingleInputGate inputGate = createInputGate(network, 2, ResultPartitionType.PIPELINED);
final ResultPartitionID localResultPartitionId = resultPartition.getPartitionId();
final RemoteInputChannel remoteInputChannel = InputChannelBuilder.newBuilder()
.setChannelIndex(1)
.setupFromNettyShuffleEnvironment(network)
.setConnectionManager(new TestingConnectionManager())
.buildRemoteAndSetToGate(inputGate);
InputChannelBuilder.newBuilder()
.setChannelIndex(0)
.setPartitionId(localResultPartitionId)
.setupFromNettyShuffleEnvironment(network)
.setConnectionManager(new TestingConnectionManager())
.buildLocalAndSetToGate(inputGate);
try {
resultPartition.setup();
inputGate.setup();
remoteInputChannel.onBuffer(TestBufferFactory.createBuffer(1), 0, 0);
assertEquals(1, inputGate.getNumberOfQueuedBuffers());
resultPartition.addBufferConsumer(BufferBuilderTestUtils.createFilledBufferConsumer(1), 0);
assertEquals(2, inputGate.getNumberOfQueuedBuffers());
} finally {
resultPartition.release();
inputGate.close();
network.close();
}
} | class SingleInputGateTest extends InputGateTestBase {
/**
* Tests basic correctness of buffer-or-event interleaving and correct <code>null</code> return
* value after receiving all end-of-partition events.
*/
@Test
public void testBasicGetNextLogic() throws Exception {
final SingleInputGate inputGate = createInputGate();
final TestInputChannel[] inputChannels = new TestInputChannel[]{
new TestInputChannel(inputGate, 0),
new TestInputChannel(inputGate, 1)
};
inputGate.setInputChannel(
new IntermediateResultPartitionID(), inputChannels[0]);
inputGate.setInputChannel(
new IntermediateResultPartitionID(), inputChannels[1]);
inputChannels[0].readBuffer();
inputChannels[0].readBuffer();
inputChannels[1].readBuffer();
inputChannels[1].readEndOfPartitionEvent();
inputChannels[0].readEndOfPartitionEvent();
inputGate.notifyChannelNonEmpty(inputChannels[0]);
inputGate.notifyChannelNonEmpty(inputChannels[1]);
verifyBufferOrEvent(inputGate, true, 0, true);
verifyBufferOrEvent(inputGate, true, 1, true);
verifyBufferOrEvent(inputGate, true, 0, true);
verifyBufferOrEvent(inputGate, false, 1, true);
verifyBufferOrEvent(inputGate, false, 0, false);
assertTrue(inputGate.isFinished());
for (TestInputChannel ic : inputChannels) {
ic.assertReturnedEventsAreRecycled();
}
}
@Test
public void testIsAvailable() throws Exception {
final SingleInputGate inputGate = createInputGate(1);
TestInputChannel inputChannel = new TestInputChannel(inputGate, 0);
inputGate.setInputChannel(new IntermediateResultPartitionID(), inputChannel);
testIsAvailable(inputGate, inputGate, inputChannel);
}
@Test
public void testIsAvailableAfterFinished() throws Exception {
final SingleInputGate inputGate = createInputGate(1);
TestInputChannel inputChannel = new TestInputChannel(inputGate, 0);
inputGate.setInputChannel(new IntermediateResultPartitionID(), inputChannel);
testIsAvailableAfterFinished(
inputGate,
() -> {
inputChannel.readEndOfPartitionEvent();
inputGate.notifyChannelNonEmpty(inputChannel);
});
}
@Test
public void testIsMoreAvailableReadingFromSingleInputChannel() throws Exception {
final SingleInputGate inputGate = createInputGate();
final TestInputChannel[] inputChannels = new TestInputChannel[]{
new TestInputChannel(inputGate, 0),
new TestInputChannel(inputGate, 1)
};
inputGate.setInputChannel(
new IntermediateResultPartitionID(), inputChannels[0]);
inputGate.setInputChannel(
new IntermediateResultPartitionID(), inputChannels[1]);
inputChannels[0].readBuffer();
inputChannels[0].readBuffer(false);
inputGate.notifyChannelNonEmpty(inputChannels[0]);
verifyBufferOrEvent(inputGate, true, 0, true);
verifyBufferOrEvent(inputGate, true, 0, false);
}
@Test
public void testBackwardsEventWithUninitializedChannel() throws Exception {
final TaskEventDispatcher taskEventDispatcher = mock(TaskEventDispatcher.class);
when(taskEventDispatcher.publish(any(ResultPartitionID.class), any(TaskEvent.class))).thenReturn(true);
final ResultSubpartitionView iterator = mock(ResultSubpartitionView.class);
when(iterator.getNextBuffer()).thenReturn(
new BufferAndBacklog(new NetworkBuffer(MemorySegmentFactory.allocateUnpooledSegment(1024), FreeingBufferRecycler.INSTANCE), false, 0, false));
final ResultPartitionManager partitionManager = mock(ResultPartitionManager.class);
when(partitionManager.createSubpartitionView(
any(ResultPartitionID.class),
anyInt(),
any(BufferAvailabilityListener.class))).thenReturn(iterator);
NettyShuffleEnvironment environment = createNettyShuffleEnvironment();
final SingleInputGate inputGate = createInputGate(environment, 2, ResultPartitionType.PIPELINED);
try {
ResultPartitionID localPartitionId = new ResultPartitionID();
InputChannelBuilder.newBuilder()
.setPartitionId(localPartitionId)
.setPartitionManager(partitionManager)
.setTaskEventPublisher(taskEventDispatcher)
.buildLocalAndSetToGate(inputGate);
ResultPartitionID unknownPartitionId = new ResultPartitionID();
InputChannelBuilder.newBuilder()
.setChannelIndex(1)
.setPartitionId(unknownPartitionId)
.setPartitionManager(partitionManager)
.setTaskEventPublisher(taskEventDispatcher)
.buildUnknownAndSetToGate(inputGate);
inputGate.setup();
verify(partitionManager, times(1)).createSubpartitionView(any(ResultPartitionID.class), anyInt(), any(BufferAvailabilityListener.class));
final TaskEvent event = new TestTaskEvent();
inputGate.sendTaskEvent(event);
verify(taskEventDispatcher, times(1)).publish(any(ResultPartitionID.class), any(TaskEvent.class));
ResourceID location = ResourceID.generate();
inputGate.updateInputChannel(location, createRemoteWithIdAndLocation(unknownPartitionId.getPartitionId(), location));
verify(partitionManager, times(2)).createSubpartitionView(any(ResultPartitionID.class), anyInt(), any(BufferAvailabilityListener.class));
verify(taskEventDispatcher, times(2)).publish(any(ResultPartitionID.class), any(TaskEvent.class));
}
finally {
inputGate.close();
environment.close();
}
}
/**
* Tests that an update channel does not trigger a partition request before the UDF has
* requested any partitions. Otherwise, this can lead to races when registering a listener at
* the gate (e.g. in UnionInputGate), which can result in missed buffer notifications at the
* listener.
*/
@Test
public void testUpdateChannelBeforeRequest() throws Exception {
SingleInputGate inputGate = createInputGate(1);
ResultPartitionManager partitionManager = mock(ResultPartitionManager.class);
InputChannel unknown = InputChannelBuilder.newBuilder()
.setPartitionManager(partitionManager)
.buildUnknownAndSetToGate(inputGate);
ResultPartitionID resultPartitionID = unknown.getPartitionId();
ResourceID location = ResourceID.generate();
inputGate.updateInputChannel(location, createRemoteWithIdAndLocation(resultPartitionID.getPartitionId(), location));
verify(partitionManager, never()).createSubpartitionView(
any(ResultPartitionID.class), anyInt(), any(BufferAvailabilityListener.class));
}
/**
* Tests that the release of the input gate is noticed while polling the
* channels for available data.
*/
@Test
public void testReleaseWhilePollingChannel() throws Exception {
final AtomicReference<Exception> asyncException = new AtomicReference<>();
final SingleInputGate inputGate = createInputGate(1);
InputChannelBuilder.newBuilder().buildUnknownAndSetToGate(inputGate);
Thread asyncConsumer = new Thread() {
@Override
public void run() {
try {
inputGate.getNext();
} catch (Exception e) {
asyncException.set(e);
}
}
};
asyncConsumer.start();
boolean success = false;
for (int i = 0; i < 50; i++) {
if (asyncConsumer.isAlive()) {
success = asyncConsumer.getState() == Thread.State.WAITING;
}
if (success) {
break;
} else {
Thread.sleep(100);
}
}
assertTrue("Did not trigger blocking buffer request.", success);
inputGate.close();
asyncConsumer.join();
assertNotNull(asyncException.get());
assertEquals(IllegalStateException.class, asyncException.get().getClass());
}
/**
* Tests request back off configuration is correctly forwarded to the channels.
*/
@Test
public void testRequestBackoffConfiguration() throws Exception {
IntermediateResultPartitionID[] partitionIds = new IntermediateResultPartitionID[] {
new IntermediateResultPartitionID(),
new IntermediateResultPartitionID(),
new IntermediateResultPartitionID()
};
ResourceID localLocation = ResourceID.generate();
ShuffleDescriptor[] channelDescs = new ShuffleDescriptor[]{
createRemoteWithIdAndLocation(partitionIds[0], localLocation),
createRemoteWithIdAndLocation(partitionIds[1], ResourceID.generate()),
new UnknownShuffleDescriptor(new ResultPartitionID(partitionIds[2], new ExecutionAttemptID()))};
InputGateDeploymentDescriptor gateDesc = new InputGateDeploymentDescriptor(
new IntermediateDataSetID(),
ResultPartitionType.PIPELINED,
0,
channelDescs);
int initialBackoff = 137;
int maxBackoff = 1001;
final NettyShuffleEnvironment netEnv = new NettyShuffleEnvironmentBuilder()
.setPartitionRequestInitialBackoff(initialBackoff)
.setPartitionRequestMaxBackoff(maxBackoff)
.setIsCreditBased(enableCreditBasedFlowControl)
.build();
SingleInputGate gate = new SingleInputGateFactory(
localLocation,
netEnv.getConfiguration(),
netEnv.getConnectionManager(),
netEnv.getResultPartitionManager(),
new TaskEventDispatcher(),
netEnv.getNetworkBufferPool())
.create(
"TestTask",
gateDesc,
SingleInputGateBuilder.NO_OP_PRODUCER_CHECKER,
InputChannelTestUtils.newUnregisteredInputChannelMetrics());
try {
assertEquals(gateDesc.getConsumedPartitionType(), gate.getConsumedPartitionType());
Map<IntermediateResultPartitionID, InputChannel> channelMap = gate.getInputChannels();
assertEquals(3, channelMap.size());
InputChannel localChannel = channelMap.get(partitionIds[0]);
assertEquals(LocalInputChannel.class, localChannel.getClass());
InputChannel remoteChannel = channelMap.get(partitionIds[1]);
assertEquals(RemoteInputChannel.class, remoteChannel.getClass());
InputChannel unknownChannel = channelMap.get(partitionIds[2]);
assertEquals(UnknownInputChannel.class, unknownChannel.getClass());
InputChannel[] channels =
new InputChannel[] {localChannel, remoteChannel, unknownChannel};
for (InputChannel ch : channels) {
assertEquals(0, ch.getCurrentBackoff());
assertTrue(ch.increaseBackoff());
assertEquals(initialBackoff, ch.getCurrentBackoff());
assertTrue(ch.increaseBackoff());
assertEquals(initialBackoff * 2, ch.getCurrentBackoff());
assertTrue(ch.increaseBackoff());
assertEquals(initialBackoff * 2 * 2, ch.getCurrentBackoff());
assertTrue(ch.increaseBackoff());
assertEquals(maxBackoff, ch.getCurrentBackoff());
assertFalse(ch.increaseBackoff());
}
} finally {
gate.close();
netEnv.close();
}
}
/**
* Tests that input gate requests and assigns network buffers for remote input channel.
*/
@Test
public void testRequestBuffersWithRemoteInputChannel() throws Exception {
final NettyShuffleEnvironment network = createNettyShuffleEnvironment();
final SingleInputGate inputGate = createInputGate(network, 1, ResultPartitionType.PIPELINED_BOUNDED);
int buffersPerChannel = 2;
int extraNetworkBuffersPerGate = 8;
try {
RemoteInputChannel remote =
InputChannelBuilder.newBuilder()
.setupFromNettyShuffleEnvironment(network)
.setConnectionManager(new TestingConnectionManager())
.buildRemoteAndSetToGate(inputGate);
inputGate.setup();
NetworkBufferPool bufferPool = network.getNetworkBufferPool();
if (enableCreditBasedFlowControl) {
assertEquals(buffersPerChannel, remote.getNumberOfAvailableBuffers());
assertEquals(bufferPool.getTotalNumberOfMemorySegments() - buffersPerChannel,
bufferPool.getNumberOfAvailableMemorySegments());
assertEquals(extraNetworkBuffersPerGate, bufferPool.countBuffers());
} else {
assertEquals(buffersPerChannel + extraNetworkBuffersPerGate, bufferPool.countBuffers());
}
} finally {
inputGate.close();
network.close();
}
}
/**
* Tests that input gate requests and assigns network buffers when unknown input channel
* updates to remote input channel.
*/
@Test
public void testRequestBuffersWithUnknownInputChannel() throws Exception {
final NettyShuffleEnvironment network = createNettyShuffleEnvironment();
final SingleInputGate inputGate = createInputGate(network, 1, ResultPartitionType.PIPELINED_BOUNDED);
int buffersPerChannel = 2;
int extraNetworkBuffersPerGate = 8;
try {
final ResultPartitionID resultPartitionId = new ResultPartitionID();
addUnknownInputChannel(network, inputGate, resultPartitionId, 0);
inputGate.setup();
NetworkBufferPool bufferPool = network.getNetworkBufferPool();
if (enableCreditBasedFlowControl) {
assertEquals(bufferPool.getTotalNumberOfMemorySegments(),
bufferPool.getNumberOfAvailableMemorySegments());
assertEquals(extraNetworkBuffersPerGate, bufferPool.countBuffers());
} else {
assertEquals(buffersPerChannel + extraNetworkBuffersPerGate, bufferPool.countBuffers());
}
inputGate.updateInputChannel(
ResourceID.generate(),
createRemoteWithIdAndLocation(resultPartitionId.getPartitionId(), ResourceID.generate()));
if (enableCreditBasedFlowControl) {
RemoteInputChannel remote = (RemoteInputChannel) inputGate.getInputChannels()
.get(resultPartitionId.getPartitionId());
assertEquals(buffersPerChannel, remote.getNumberOfAvailableBuffers());
assertEquals(bufferPool.getTotalNumberOfMemorySegments() - buffersPerChannel,
bufferPool.getNumberOfAvailableMemorySegments());
assertEquals(extraNetworkBuffersPerGate, bufferPool.countBuffers());
} else {
assertEquals(buffersPerChannel + extraNetworkBuffersPerGate, bufferPool.countBuffers());
}
} finally {
inputGate.close();
network.close();
}
}
/**
* Tests that input gate can successfully convert unknown input channels into local and remote
* channels.
*/
@Test
public void testUpdateUnknownInputChannel() throws Exception {
final NettyShuffleEnvironment network = createNettyShuffleEnvironment();
final ResultPartition localResultPartition = new ResultPartitionBuilder()
.setResultPartitionManager(network.getResultPartitionManager())
.setupBufferPoolFactoryFromNettyShuffleEnvironment(network)
.build();
final ResultPartition remoteResultPartition = new ResultPartitionBuilder()
.setResultPartitionManager(network.getResultPartitionManager())
.setupBufferPoolFactoryFromNettyShuffleEnvironment(network)
.build();
localResultPartition.setup();
remoteResultPartition.setup();
final SingleInputGate inputGate = createInputGate(network, 2, ResultPartitionType.PIPELINED);
try {
final ResultPartitionID localResultPartitionId = localResultPartition.getPartitionId();
addUnknownInputChannel(network, inputGate, localResultPartitionId, 0);
final ResultPartitionID remoteResultPartitionId = remoteResultPartition.getPartitionId();
addUnknownInputChannel(network, inputGate, remoteResultPartitionId, 1);
inputGate.setup();
assertThat(inputGate.getInputChannels().get(remoteResultPartitionId.getPartitionId()),
is(instanceOf((UnknownInputChannel.class))));
assertThat(inputGate.getInputChannels().get(localResultPartitionId.getPartitionId()),
is(instanceOf((UnknownInputChannel.class))));
ResourceID localLocation = ResourceID.generate();
inputGate.updateInputChannel(
localLocation,
createRemoteWithIdAndLocation(remoteResultPartitionId.getPartitionId(), ResourceID.generate()));
assertThat(inputGate.getInputChannels().get(remoteResultPartitionId.getPartitionId()),
is(instanceOf((RemoteInputChannel.class))));
assertThat(inputGate.getInputChannels().get(localResultPartitionId.getPartitionId()),
is(instanceOf((UnknownInputChannel.class))));
inputGate.updateInputChannel(
localLocation,
createRemoteWithIdAndLocation(localResultPartitionId.getPartitionId(), localLocation));
assertThat(inputGate.getInputChannels().get(remoteResultPartitionId.getPartitionId()),
is(instanceOf((RemoteInputChannel.class))));
assertThat(inputGate.getInputChannels().get(localResultPartitionId.getPartitionId()),
is(instanceOf((LocalInputChannel.class))));
} finally {
inputGate.close();
network.close();
}
}
@Test
/**
* Tests that if the {@link PartitionNotFoundException} is set onto one {@link InputChannel},
* then it would be thrown directly via {@link SingleInputGate
* could confirm the {@link SingleInputGate} would not swallow or transform the original exception.
*/
@Test
public void testPartitionNotFoundExceptionWhileGetNextBuffer() throws Exception {
final SingleInputGate inputGate = createSingleInputGate(1);
final LocalInputChannel localChannel = createLocalInputChannel(inputGate, new ResultPartitionManager());
final ResultPartitionID partitionId = localChannel.getPartitionId();
inputGate.setInputChannel(partitionId.getPartitionId(), localChannel);
localChannel.setError(new PartitionNotFoundException(partitionId));
try {
inputGate.getNext();
fail("Should throw a PartitionNotFoundException.");
} catch (PartitionNotFoundException notFound) {
assertThat(partitionId, is(notFound.getPartitionId()));
}
}
@Test
public void testInputGateRemovalFromNettyShuffleEnvironment() throws Exception {
NettyShuffleEnvironment network = createNettyShuffleEnvironment();
try {
int numberOfGates = 10;
Map<InputGateID, SingleInputGate> createdInputGatesById =
createInputGateWithLocalChannels(network, numberOfGates, 1);
assertEquals(numberOfGates, createdInputGatesById.size());
for (InputGateID id : createdInputGatesById.keySet()) {
assertThat(network.getInputGate(id).isPresent(), is(true));
createdInputGatesById.get(id).close();
assertThat(network.getInputGate(id).isPresent(), is(false));
}
} finally {
network.close();
}
}
private static Map<InputGateID, SingleInputGate> createInputGateWithLocalChannels(
NettyShuffleEnvironment network,
int numberOfGates,
@SuppressWarnings("SameParameterValue") int numberOfLocalChannels) {
ShuffleDescriptor[] channelDescs = new NettyShuffleDescriptor[numberOfLocalChannels];
for (int i = 0; i < numberOfLocalChannels; i++) {
channelDescs[i] = createRemoteWithIdAndLocation(new IntermediateResultPartitionID(), ResourceID.generate());
}
InputGateDeploymentDescriptor[] gateDescs = new InputGateDeploymentDescriptor[numberOfGates];
IntermediateDataSetID[] ids = new IntermediateDataSetID[numberOfGates];
for (int i = 0; i < numberOfGates; i++) {
ids[i] = new IntermediateDataSetID();
gateDescs[i] = new InputGateDeploymentDescriptor(
ids[i],
ResultPartitionType.PIPELINED,
0,
channelDescs);
}
ExecutionAttemptID consumerID = new ExecutionAttemptID();
SingleInputGate[] gates = network.createInputGates(
network.createShuffleIOOwnerContext("", consumerID, new UnregisteredMetricsGroup()),
SingleInputGateBuilder.NO_OP_PRODUCER_CHECKER,
Arrays.asList(gateDescs)).toArray(new SingleInputGate[] {});
Map<InputGateID, SingleInputGate> inputGatesById = new HashMap<>();
for (int i = 0; i < numberOfGates; i++) {
inputGatesById.put(new InputGateID(ids[i], consumerID), gates[i]);
}
return inputGatesById;
}
private void addUnknownInputChannel(
NettyShuffleEnvironment network,
SingleInputGate inputGate,
ResultPartitionID partitionId,
int channelIndex) {
InputChannelBuilder.newBuilder()
.setChannelIndex(channelIndex)
.setPartitionId(partitionId)
.setupFromNettyShuffleEnvironment(network)
.setConnectionManager(new TestingConnectionManager())
.buildUnknownAndSetToGate(inputGate);
}
private NettyShuffleEnvironment createNettyShuffleEnvironment() {
return new NettyShuffleEnvironmentBuilder()
.setIsCreditBased(enableCreditBasedFlowControl)
.build();
}
static void verifyBufferOrEvent(
InputGate inputGate,
boolean expectedIsBuffer,
int expectedChannelIndex,
boolean expectedMoreAvailable) throws IOException, InterruptedException {
final Optional<BufferOrEvent> bufferOrEvent = inputGate.getNext();
assertTrue(bufferOrEvent.isPresent());
assertEquals(expectedIsBuffer, bufferOrEvent.get().isBuffer());
assertEquals(expectedChannelIndex, bufferOrEvent.get().getChannelIndex());
assertEquals(expectedMoreAvailable, bufferOrEvent.get().moreAvailable());
if (!expectedMoreAvailable) {
assertFalse(inputGate.pollNext().isPresent());
}
}
} | class SingleInputGateTest extends InputGateTestBase {
/**
* Tests basic correctness of buffer-or-event interleaving and correct <code>null</code> return
* value after receiving all end-of-partition events.
*/
@Test
public void testBasicGetNextLogic() throws Exception {
final SingleInputGate inputGate = createInputGate();
final TestInputChannel[] inputChannels = new TestInputChannel[]{
new TestInputChannel(inputGate, 0),
new TestInputChannel(inputGate, 1)
};
inputGate.setInputChannel(
new IntermediateResultPartitionID(), inputChannels[0]);
inputGate.setInputChannel(
new IntermediateResultPartitionID(), inputChannels[1]);
inputChannels[0].readBuffer();
inputChannels[0].readBuffer();
inputChannels[1].readBuffer();
inputChannels[1].readEndOfPartitionEvent();
inputChannels[0].readEndOfPartitionEvent();
inputGate.notifyChannelNonEmpty(inputChannels[0]);
inputGate.notifyChannelNonEmpty(inputChannels[1]);
verifyBufferOrEvent(inputGate, true, 0, true);
verifyBufferOrEvent(inputGate, true, 1, true);
verifyBufferOrEvent(inputGate, true, 0, true);
verifyBufferOrEvent(inputGate, false, 1, true);
verifyBufferOrEvent(inputGate, false, 0, false);
assertTrue(inputGate.isFinished());
for (TestInputChannel ic : inputChannels) {
ic.assertReturnedEventsAreRecycled();
}
}
@Test
public void testIsAvailable() throws Exception {
final SingleInputGate inputGate = createInputGate(1);
TestInputChannel inputChannel = new TestInputChannel(inputGate, 0);
inputGate.setInputChannel(new IntermediateResultPartitionID(), inputChannel);
testIsAvailable(inputGate, inputGate, inputChannel);
}
@Test
public void testIsAvailableAfterFinished() throws Exception {
final SingleInputGate inputGate = createInputGate(1);
TestInputChannel inputChannel = new TestInputChannel(inputGate, 0);
inputGate.setInputChannel(new IntermediateResultPartitionID(), inputChannel);
testIsAvailableAfterFinished(
inputGate,
() -> {
inputChannel.readEndOfPartitionEvent();
inputGate.notifyChannelNonEmpty(inputChannel);
});
}
@Test
public void testIsMoreAvailableReadingFromSingleInputChannel() throws Exception {
final SingleInputGate inputGate = createInputGate();
final TestInputChannel[] inputChannels = new TestInputChannel[]{
new TestInputChannel(inputGate, 0),
new TestInputChannel(inputGate, 1)
};
inputGate.setInputChannel(
new IntermediateResultPartitionID(), inputChannels[0]);
inputGate.setInputChannel(
new IntermediateResultPartitionID(), inputChannels[1]);
inputChannels[0].readBuffer();
inputChannels[0].readBuffer(false);
inputGate.notifyChannelNonEmpty(inputChannels[0]);
verifyBufferOrEvent(inputGate, true, 0, true);
verifyBufferOrEvent(inputGate, true, 0, false);
}
@Test
public void testBackwardsEventWithUninitializedChannel() throws Exception {
final TaskEventDispatcher taskEventDispatcher = mock(TaskEventDispatcher.class);
when(taskEventDispatcher.publish(any(ResultPartitionID.class), any(TaskEvent.class))).thenReturn(true);
final ResultSubpartitionView iterator = mock(ResultSubpartitionView.class);
when(iterator.getNextBuffer()).thenReturn(
new BufferAndBacklog(new NetworkBuffer(MemorySegmentFactory.allocateUnpooledSegment(1024), FreeingBufferRecycler.INSTANCE), false, 0, false));
final ResultPartitionManager partitionManager = mock(ResultPartitionManager.class);
when(partitionManager.createSubpartitionView(
any(ResultPartitionID.class),
anyInt(),
any(BufferAvailabilityListener.class))).thenReturn(iterator);
NettyShuffleEnvironment environment = createNettyShuffleEnvironment();
final SingleInputGate inputGate = createInputGate(environment, 2, ResultPartitionType.PIPELINED);
try {
ResultPartitionID localPartitionId = new ResultPartitionID();
InputChannelBuilder.newBuilder()
.setPartitionId(localPartitionId)
.setPartitionManager(partitionManager)
.setTaskEventPublisher(taskEventDispatcher)
.buildLocalAndSetToGate(inputGate);
ResultPartitionID unknownPartitionId = new ResultPartitionID();
InputChannelBuilder.newBuilder()
.setChannelIndex(1)
.setPartitionId(unknownPartitionId)
.setPartitionManager(partitionManager)
.setTaskEventPublisher(taskEventDispatcher)
.buildUnknownAndSetToGate(inputGate);
inputGate.setup();
verify(partitionManager, times(1)).createSubpartitionView(any(ResultPartitionID.class), anyInt(), any(BufferAvailabilityListener.class));
final TaskEvent event = new TestTaskEvent();
inputGate.sendTaskEvent(event);
verify(taskEventDispatcher, times(1)).publish(any(ResultPartitionID.class), any(TaskEvent.class));
ResourceID location = ResourceID.generate();
inputGate.updateInputChannel(location, createRemoteWithIdAndLocation(unknownPartitionId.getPartitionId(), location));
verify(partitionManager, times(2)).createSubpartitionView(any(ResultPartitionID.class), anyInt(), any(BufferAvailabilityListener.class));
verify(taskEventDispatcher, times(2)).publish(any(ResultPartitionID.class), any(TaskEvent.class));
}
finally {
inputGate.close();
environment.close();
}
}
/**
* Tests that an update channel does not trigger a partition request before the UDF has
* requested any partitions. Otherwise, this can lead to races when registering a listener at
* the gate (e.g. in UnionInputGate), which can result in missed buffer notifications at the
* listener.
*/
@Test
public void testUpdateChannelBeforeRequest() throws Exception {
SingleInputGate inputGate = createInputGate(1);
ResultPartitionManager partitionManager = mock(ResultPartitionManager.class);
InputChannel unknown = InputChannelBuilder.newBuilder()
.setPartitionManager(partitionManager)
.buildUnknownAndSetToGate(inputGate);
ResultPartitionID resultPartitionID = unknown.getPartitionId();
ResourceID location = ResourceID.generate();
inputGate.updateInputChannel(location, createRemoteWithIdAndLocation(resultPartitionID.getPartitionId(), location));
verify(partitionManager, never()).createSubpartitionView(
any(ResultPartitionID.class), anyInt(), any(BufferAvailabilityListener.class));
}
/**
* Tests that the release of the input gate is noticed while polling the
* channels for available data.
*/
@Test
public void testReleaseWhilePollingChannel() throws Exception {
final AtomicReference<Exception> asyncException = new AtomicReference<>();
final SingleInputGate inputGate = createInputGate(1);
InputChannelBuilder.newBuilder().buildUnknownAndSetToGate(inputGate);
Thread asyncConsumer = new Thread() {
@Override
public void run() {
try {
inputGate.getNext();
} catch (Exception e) {
asyncException.set(e);
}
}
};
asyncConsumer.start();
boolean success = false;
for (int i = 0; i < 50; i++) {
if (asyncConsumer.isAlive()) {
success = asyncConsumer.getState() == Thread.State.WAITING;
}
if (success) {
break;
} else {
Thread.sleep(100);
}
}
assertTrue("Did not trigger blocking buffer request.", success);
inputGate.close();
asyncConsumer.join();
assertNotNull(asyncException.get());
assertEquals(IllegalStateException.class, asyncException.get().getClass());
}
/**
* Tests request back off configuration is correctly forwarded to the channels.
*/
@Test
public void testRequestBackoffConfiguration() throws Exception {
IntermediateResultPartitionID[] partitionIds = new IntermediateResultPartitionID[] {
new IntermediateResultPartitionID(),
new IntermediateResultPartitionID(),
new IntermediateResultPartitionID()
};
ResourceID localLocation = ResourceID.generate();
ShuffleDescriptor[] channelDescs = new ShuffleDescriptor[]{
createRemoteWithIdAndLocation(partitionIds[0], localLocation),
createRemoteWithIdAndLocation(partitionIds[1], ResourceID.generate()),
new UnknownShuffleDescriptor(new ResultPartitionID(partitionIds[2], new ExecutionAttemptID()))};
InputGateDeploymentDescriptor gateDesc = new InputGateDeploymentDescriptor(
new IntermediateDataSetID(),
ResultPartitionType.PIPELINED,
0,
channelDescs);
int initialBackoff = 137;
int maxBackoff = 1001;
final NettyShuffleEnvironment netEnv = new NettyShuffleEnvironmentBuilder()
.setPartitionRequestInitialBackoff(initialBackoff)
.setPartitionRequestMaxBackoff(maxBackoff)
.setIsCreditBased(enableCreditBasedFlowControl)
.build();
SingleInputGate gate = new SingleInputGateFactory(
localLocation,
netEnv.getConfiguration(),
netEnv.getConnectionManager(),
netEnv.getResultPartitionManager(),
new TaskEventDispatcher(),
netEnv.getNetworkBufferPool())
.create(
"TestTask",
gateDesc,
SingleInputGateBuilder.NO_OP_PRODUCER_CHECKER,
InputChannelTestUtils.newUnregisteredInputChannelMetrics());
try {
assertEquals(gateDesc.getConsumedPartitionType(), gate.getConsumedPartitionType());
Map<IntermediateResultPartitionID, InputChannel> channelMap = gate.getInputChannels();
assertEquals(3, channelMap.size());
InputChannel localChannel = channelMap.get(partitionIds[0]);
assertEquals(LocalInputChannel.class, localChannel.getClass());
InputChannel remoteChannel = channelMap.get(partitionIds[1]);
assertEquals(RemoteInputChannel.class, remoteChannel.getClass());
InputChannel unknownChannel = channelMap.get(partitionIds[2]);
assertEquals(UnknownInputChannel.class, unknownChannel.getClass());
InputChannel[] channels =
new InputChannel[] {localChannel, remoteChannel, unknownChannel};
for (InputChannel ch : channels) {
assertEquals(0, ch.getCurrentBackoff());
assertTrue(ch.increaseBackoff());
assertEquals(initialBackoff, ch.getCurrentBackoff());
assertTrue(ch.increaseBackoff());
assertEquals(initialBackoff * 2, ch.getCurrentBackoff());
assertTrue(ch.increaseBackoff());
assertEquals(initialBackoff * 2 * 2, ch.getCurrentBackoff());
assertTrue(ch.increaseBackoff());
assertEquals(maxBackoff, ch.getCurrentBackoff());
assertFalse(ch.increaseBackoff());
}
} finally {
gate.close();
netEnv.close();
}
}
/**
* Tests that input gate requests and assigns network buffers for remote input channel.
*/
@Test
public void testRequestBuffersWithRemoteInputChannel() throws Exception {
final NettyShuffleEnvironment network = createNettyShuffleEnvironment();
final SingleInputGate inputGate = createInputGate(network, 1, ResultPartitionType.PIPELINED_BOUNDED);
int buffersPerChannel = 2;
int extraNetworkBuffersPerGate = 8;
try {
RemoteInputChannel remote =
InputChannelBuilder.newBuilder()
.setupFromNettyShuffleEnvironment(network)
.setConnectionManager(new TestingConnectionManager())
.buildRemoteAndSetToGate(inputGate);
inputGate.setup();
NetworkBufferPool bufferPool = network.getNetworkBufferPool();
if (enableCreditBasedFlowControl) {
assertEquals(buffersPerChannel, remote.getNumberOfAvailableBuffers());
assertEquals(bufferPool.getTotalNumberOfMemorySegments() - buffersPerChannel,
bufferPool.getNumberOfAvailableMemorySegments());
assertEquals(extraNetworkBuffersPerGate, bufferPool.countBuffers());
} else {
assertEquals(buffersPerChannel + extraNetworkBuffersPerGate, bufferPool.countBuffers());
}
} finally {
inputGate.close();
network.close();
}
}
/**
* Tests that input gate requests and assigns network buffers when unknown input channel
* updates to remote input channel.
*/
@Test
public void testRequestBuffersWithUnknownInputChannel() throws Exception {
final NettyShuffleEnvironment network = createNettyShuffleEnvironment();
final SingleInputGate inputGate = createInputGate(network, 1, ResultPartitionType.PIPELINED_BOUNDED);
int buffersPerChannel = 2;
int extraNetworkBuffersPerGate = 8;
try {
final ResultPartitionID resultPartitionId = new ResultPartitionID();
addUnknownInputChannel(network, inputGate, resultPartitionId, 0);
inputGate.setup();
NetworkBufferPool bufferPool = network.getNetworkBufferPool();
if (enableCreditBasedFlowControl) {
assertEquals(bufferPool.getTotalNumberOfMemorySegments(),
bufferPool.getNumberOfAvailableMemorySegments());
assertEquals(extraNetworkBuffersPerGate, bufferPool.countBuffers());
} else {
assertEquals(buffersPerChannel + extraNetworkBuffersPerGate, bufferPool.countBuffers());
}
inputGate.updateInputChannel(
ResourceID.generate(),
createRemoteWithIdAndLocation(resultPartitionId.getPartitionId(), ResourceID.generate()));
if (enableCreditBasedFlowControl) {
RemoteInputChannel remote = (RemoteInputChannel) inputGate.getInputChannels()
.get(resultPartitionId.getPartitionId());
assertEquals(buffersPerChannel, remote.getNumberOfAvailableBuffers());
assertEquals(bufferPool.getTotalNumberOfMemorySegments() - buffersPerChannel,
bufferPool.getNumberOfAvailableMemorySegments());
assertEquals(extraNetworkBuffersPerGate, bufferPool.countBuffers());
} else {
assertEquals(buffersPerChannel + extraNetworkBuffersPerGate, bufferPool.countBuffers());
}
} finally {
inputGate.close();
network.close();
}
}
/**
* Tests that input gate can successfully convert unknown input channels into local and remote
* channels.
*/
@Test
public void testUpdateUnknownInputChannel() throws Exception {
final NettyShuffleEnvironment network = createNettyShuffleEnvironment();
final ResultPartition localResultPartition = new ResultPartitionBuilder()
.setResultPartitionManager(network.getResultPartitionManager())
.setupBufferPoolFactoryFromNettyShuffleEnvironment(network)
.build();
final ResultPartition remoteResultPartition = new ResultPartitionBuilder()
.setResultPartitionManager(network.getResultPartitionManager())
.setupBufferPoolFactoryFromNettyShuffleEnvironment(network)
.build();
localResultPartition.setup();
remoteResultPartition.setup();
final SingleInputGate inputGate = createInputGate(network, 2, ResultPartitionType.PIPELINED);
try {
final ResultPartitionID localResultPartitionId = localResultPartition.getPartitionId();
addUnknownInputChannel(network, inputGate, localResultPartitionId, 0);
final ResultPartitionID remoteResultPartitionId = remoteResultPartition.getPartitionId();
addUnknownInputChannel(network, inputGate, remoteResultPartitionId, 1);
inputGate.setup();
assertThat(inputGate.getInputChannels().get(remoteResultPartitionId.getPartitionId()),
is(instanceOf((UnknownInputChannel.class))));
assertThat(inputGate.getInputChannels().get(localResultPartitionId.getPartitionId()),
is(instanceOf((UnknownInputChannel.class))));
ResourceID localLocation = ResourceID.generate();
inputGate.updateInputChannel(
localLocation,
createRemoteWithIdAndLocation(remoteResultPartitionId.getPartitionId(), ResourceID.generate()));
assertThat(inputGate.getInputChannels().get(remoteResultPartitionId.getPartitionId()),
is(instanceOf((RemoteInputChannel.class))));
assertThat(inputGate.getInputChannels().get(localResultPartitionId.getPartitionId()),
is(instanceOf((UnknownInputChannel.class))));
inputGate.updateInputChannel(
localLocation,
createRemoteWithIdAndLocation(localResultPartitionId.getPartitionId(), localLocation));
assertThat(inputGate.getInputChannels().get(remoteResultPartitionId.getPartitionId()),
is(instanceOf((RemoteInputChannel.class))));
assertThat(inputGate.getInputChannels().get(localResultPartitionId.getPartitionId()),
is(instanceOf((LocalInputChannel.class))));
} finally {
inputGate.close();
network.close();
}
}
@Test
/**
* Tests that if the {@link PartitionNotFoundException} is set onto one {@link InputChannel},
* then it would be thrown directly via {@link SingleInputGate
* could confirm the {@link SingleInputGate} would not swallow or transform the original exception.
*/
@Test
public void testPartitionNotFoundExceptionWhileGetNextBuffer() throws Exception {
final SingleInputGate inputGate = createSingleInputGate(1);
final LocalInputChannel localChannel = createLocalInputChannel(inputGate, new ResultPartitionManager());
final ResultPartitionID partitionId = localChannel.getPartitionId();
inputGate.setInputChannel(partitionId.getPartitionId(), localChannel);
localChannel.setError(new PartitionNotFoundException(partitionId));
try {
inputGate.getNext();
fail("Should throw a PartitionNotFoundException.");
} catch (PartitionNotFoundException notFound) {
assertThat(partitionId, is(notFound.getPartitionId()));
}
}
@Test
public void testInputGateRemovalFromNettyShuffleEnvironment() throws Exception {
NettyShuffleEnvironment network = createNettyShuffleEnvironment();
try {
int numberOfGates = 10;
Map<InputGateID, SingleInputGate> createdInputGatesById =
createInputGateWithLocalChannels(network, numberOfGates, 1);
assertEquals(numberOfGates, createdInputGatesById.size());
for (InputGateID id : createdInputGatesById.keySet()) {
assertThat(network.getInputGate(id).isPresent(), is(true));
createdInputGatesById.get(id).close();
assertThat(network.getInputGate(id).isPresent(), is(false));
}
} finally {
network.close();
}
}
private static Map<InputGateID, SingleInputGate> createInputGateWithLocalChannels(
NettyShuffleEnvironment network,
int numberOfGates,
@SuppressWarnings("SameParameterValue") int numberOfLocalChannels) {
ShuffleDescriptor[] channelDescs = new NettyShuffleDescriptor[numberOfLocalChannels];
for (int i = 0; i < numberOfLocalChannels; i++) {
channelDescs[i] = createRemoteWithIdAndLocation(new IntermediateResultPartitionID(), ResourceID.generate());
}
InputGateDeploymentDescriptor[] gateDescs = new InputGateDeploymentDescriptor[numberOfGates];
IntermediateDataSetID[] ids = new IntermediateDataSetID[numberOfGates];
for (int i = 0; i < numberOfGates; i++) {
ids[i] = new IntermediateDataSetID();
gateDescs[i] = new InputGateDeploymentDescriptor(
ids[i],
ResultPartitionType.PIPELINED,
0,
channelDescs);
}
ExecutionAttemptID consumerID = new ExecutionAttemptID();
SingleInputGate[] gates = network.createInputGates(
network.createShuffleIOOwnerContext("", consumerID, new UnregisteredMetricsGroup()),
SingleInputGateBuilder.NO_OP_PRODUCER_CHECKER,
Arrays.asList(gateDescs)).toArray(new SingleInputGate[] {});
Map<InputGateID, SingleInputGate> inputGatesById = new HashMap<>();
for (int i = 0; i < numberOfGates; i++) {
inputGatesById.put(new InputGateID(ids[i], consumerID), gates[i]);
}
return inputGatesById;
}
private void addUnknownInputChannel(
NettyShuffleEnvironment network,
SingleInputGate inputGate,
ResultPartitionID partitionId,
int channelIndex) {
InputChannelBuilder.newBuilder()
.setChannelIndex(channelIndex)
.setPartitionId(partitionId)
.setupFromNettyShuffleEnvironment(network)
.setConnectionManager(new TestingConnectionManager())
.buildUnknownAndSetToGate(inputGate);
}
private NettyShuffleEnvironment createNettyShuffleEnvironment() {
return new NettyShuffleEnvironmentBuilder()
.setIsCreditBased(enableCreditBasedFlowControl)
.build();
}
static void verifyBufferOrEvent(
InputGate inputGate,
boolean expectedIsBuffer,
int expectedChannelIndex,
boolean expectedMoreAvailable) throws IOException, InterruptedException {
final Optional<BufferOrEvent> bufferOrEvent = inputGate.getNext();
assertTrue(bufferOrEvent.isPresent());
assertEquals(expectedIsBuffer, bufferOrEvent.get().isBuffer());
assertEquals(expectedChannelIndex, bufferOrEvent.get().getChannelIndex());
assertEquals(expectedMoreAvailable, bufferOrEvent.get().moreAvailable());
if (!expectedMoreAvailable) {
assertFalse(inputGate.pollNext().isPresent());
}
}
} |
Sorry for the confusion. No, there is no case that `preposition` is `not null` and `catalog` is `null`. In fact, based on our discussion, I have already removed `preposition` field from `ShowDatabasesOperation`. `catalog` is null (so the `preposition` that doesn't exist now) only when there is no `FROM/IN` clause. Do the changes in `ShowDatabasesOperation` reflect your comments or did I miss something? | public Operation convertSqlNode(SqlShowDatabases sqlShowDatabases, ConvertContext context) {
if (sqlShowDatabases.getPreposition() == null) {
return new ShowDatabasesOperation(
sqlShowDatabases.getLikeType(),
sqlShowDatabases.getLikeSqlPattern(),
sqlShowDatabases.isNotLike());
} else {
CatalogManager catalogManager = context.getCatalogManager();
String[] fullCatalogName = sqlShowDatabases.getCatalog();
String catalogName =
fullCatalogName.length == 0
? catalogManager.getCurrentCatalog()
: fullCatalogName[0];
return new ShowDatabasesOperation(
sqlShowDatabases.getPreposition(),
catalogName,
sqlShowDatabases.getLikeType(),
sqlShowDatabases.getLikeSqlPattern(),
sqlShowDatabases.isNotLike());
}
} | : fullCatalogName[0]; | public Operation convertSqlNode(SqlShowDatabases sqlShowDatabases, ConvertContext context) {
if (sqlShowDatabases.getPreposition() == null) {
return new ShowDatabasesOperation(
sqlShowDatabases.getLikeType(),
sqlShowDatabases.getLikeSqlPattern(),
sqlShowDatabases.isNotLike());
} else {
return new ShowDatabasesOperation(
sqlShowDatabases.getCatalog()[0],
sqlShowDatabases.getLikeType(),
sqlShowDatabases.getLikeSqlPattern(),
sqlShowDatabases.isNotLike());
}
} | class SqlShowDatabasesConverter implements SqlNodeConverter<SqlShowDatabases> {
@Override
} | class SqlShowDatabasesConverter implements SqlNodeConverter<SqlShowDatabases> {
@Override
} |
can we add a TODO here, because actually the record should pass the Table API unmodified, but this will come with FLUP-136 | public void testAvroToRow() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.getConfig().registerTypeWithKryoSerializer(LocalDate.class, AvroKryoSerializerUtils.JodaLocalDateSerializer.class);
env.getConfig().registerTypeWithKryoSerializer(LocalTime.class, AvroKryoSerializerUtils.JodaLocalTimeSerializer.class);
StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, EnvironmentSettings.newInstance().useBlinkPlanner().build());
Table t = tEnv.fromDataStream(testData(env));
Table result = t.select($("*"));
Iterable<Row> users = () -> DataStreamUtils.collect(tEnv.toAppendStream(result, Row.class));
List<Row> results = StreamSupport
.stream(users.spliterator(), false)
.collect(Collectors.toList());
String expected =
"black,null,Whatever,[true],[hello],true,java.nio.HeapByteBuffer[pos=0 lim=10 cap=10]," +
"2014-03-01,java.nio.HeapByteBuffer[pos=0 lim=2 cap=2],[7, -48],0.0,GREEN," +
"[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],42,{},null,null,null,123456," +
"12:12:12.000,123456,2014-03-01T12:12:12.321Z,null\n" +
"blue,null,Charlie,[],[],false,java.nio.HeapByteBuffer[pos=0 lim=10 cap=10],2014-03-01," +
"java.nio.HeapByteBuffer[pos=0 lim=2 cap=2],[7, -48],1.337,RED,null,1337,{}," +
"Berlin,42,Berlin,Bakerstreet,12049,null,null,123456,12:12:12.000,123456," +
"2014-03-01T12:12:12.321Z,null\n" +
"yellow,null,Terminator,[false],[world],false," +
"java.nio.HeapByteBuffer[pos=0 lim=10 cap=10],2014-03-01," +
"java.nio.HeapByteBuffer[pos=0 lim=2 cap=2],[7, -48],0.0,GREEN," +
"[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],1,{},null,null,null,123456," +
"12:12:12.000,123456,2014-03-01T12:12:12.321Z,null";
TestBaseUtils.compareResultAsText(results, expected);
} | "Berlin,42,Berlin,Bakerstreet,12049,null,null,123456,12:12:12.000,123456," + | public void testAvroToRow() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(
env,
EnvironmentSettings.newInstance().useBlinkPlanner().build());
Table t = tEnv.fromDataStream(testData(env));
Table result = t.select($("*"));
List<Row> results = CollectionUtil.iteratorToList(
DataStreamUtils.collect(
tEnv.toAppendStream(
result,
Row.class)));
String expected =
"black,null,Whatever,[true],[hello],true,java.nio.HeapByteBuffer[pos=0 lim=10 cap=10]," +
"2014-03-01,java.nio.HeapByteBuffer[pos=0 lim=2 cap=2],[7, -48],0.0,GREEN," +
"[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],42,{},null,null,null,00:00:00.123456," +
"12:12:12,1970-01-01T00:00:00.123456Z,2014-03-01T12:12:12.321Z,null\n" +
"blue,null,Charlie,[],[],false,java.nio.HeapByteBuffer[pos=0 lim=10 cap=10],2014-03-01," +
"java.nio.HeapByteBuffer[pos=0 lim=2 cap=2],[7, -48],1.337,RED,null,1337,{}," +
"Berlin,42,Berlin,Bakerstreet,12049,null,null,00:00:00.123456,12:12:12,1970-01-01T00:00:00.123456Z," +
"2014-03-01T12:12:12.321Z,null\n" +
"yellow,null,Terminator,[false],[world],false," +
"java.nio.HeapByteBuffer[pos=0 lim=10 cap=10],2014-03-01," +
"java.nio.HeapByteBuffer[pos=0 lim=2 cap=2],[7, -48],0.0,GREEN," +
"[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],1,{},null,null,null,00:00:00.123456," +
"12:12:12,1970-01-01T00:00:00.123456Z,2014-03-01T12:12:12.321Z,null";
TestBaseUtils.compareResultAsText(results, expected);
} | class AvroTypesITCase extends TableProgramsClusterTestBase {
private static final User USER_1 = User.newBuilder()
.setName("Charlie")
.setFavoriteColor("blue")
.setFavoriteNumber(null)
.setTypeBoolTest(false)
.setTypeDoubleTest(1.337d)
.setTypeNullTest(null)
.setTypeLongTest(1337L)
.setTypeArrayString(new ArrayList<>())
.setTypeArrayBoolean(new ArrayList<>())
.setTypeNullableArray(null)
.setTypeEnum(Colors.RED)
.setTypeMap(new HashMap<>())
.setTypeFixed(null)
.setTypeUnion(null)
.setTypeNested(
Address.newBuilder()
.setNum(42)
.setStreet("Bakerstreet")
.setCity("Berlin")
.setState("Berlin")
.setZip("12049").build())
.setTypeBytes(ByteBuffer.allocate(10))
.setTypeDate(LocalDate.parse("2014-03-01"))
.setTypeTimeMillis(LocalTime.parse("12:12:12"))
.setTypeTimeMicros(123456)
.setTypeTimestampMillis(DateTime.parse("2014-03-01T12:12:12.321Z"))
.setTypeTimestampMicros(123456L)
.setTypeDecimalBytes(ByteBuffer.wrap(BigDecimal.valueOf(2000, 2).unscaledValue().toByteArray()))
.setTypeDecimalFixed(new Fixed2(BigDecimal.valueOf(2000, 2).unscaledValue().toByteArray()))
.build();
private static final User USER_2 = User.newBuilder()
.setName("Whatever")
.setFavoriteNumber(null)
.setFavoriteColor("black")
.setTypeLongTest(42L)
.setTypeDoubleTest(0.0)
.setTypeNullTest(null)
.setTypeBoolTest(true)
.setTypeArrayString(Collections.singletonList("hello"))
.setTypeArrayBoolean(Collections.singletonList(true))
.setTypeEnum(Colors.GREEN)
.setTypeMap(new HashMap<>())
.setTypeFixed(new Fixed16())
.setTypeUnion(null)
.setTypeNested(null).setTypeDate(LocalDate.parse("2014-03-01"))
.setTypeBytes(ByteBuffer.allocate(10))
.setTypeTimeMillis(LocalTime.parse("12:12:12"))
.setTypeTimeMicros(123456)
.setTypeTimestampMillis(DateTime.parse("2014-03-01T12:12:12.321Z"))
.setTypeTimestampMicros(123456L)
.setTypeDecimalBytes(ByteBuffer.wrap(BigDecimal.valueOf(2000, 2).unscaledValue().toByteArray()))
.setTypeDecimalFixed(new Fixed2(BigDecimal.valueOf(2000, 2).unscaledValue().toByteArray()))
.build();
private static final User USER_3 = User.newBuilder()
.setName("Terminator")
.setFavoriteNumber(null)
.setFavoriteColor("yellow")
.setTypeLongTest(1L)
.setTypeDoubleTest(0.0)
.setTypeNullTest(null)
.setTypeBoolTest(false)
.setTypeArrayString(Collections.singletonList("world"))
.setTypeArrayBoolean(Collections.singletonList(false))
.setTypeEnum(Colors.GREEN)
.setTypeMap(new HashMap<>())
.setTypeFixed(new Fixed16())
.setTypeUnion(null)
.setTypeNested(null)
.setTypeBytes(ByteBuffer.allocate(10))
.setTypeDate(LocalDate.parse("2014-03-01"))
.setTypeTimeMillis(LocalTime.parse("12:12:12"))
.setTypeTimeMicros(123456)
.setTypeTimestampMillis(DateTime.parse("2014-03-01T12:12:12.321Z"))
.setTypeTimestampMicros(123456L)
.setTypeDecimalBytes(ByteBuffer.wrap(BigDecimal.valueOf(2000, 2).unscaledValue().toByteArray()))
.setTypeDecimalFixed(new Fixed2(BigDecimal.valueOf(2000, 2).unscaledValue().toByteArray()))
.build();
public AvroTypesITCase(
TestExecutionMode executionMode,
TableConfigMode tableConfigMode) {
super(executionMode, tableConfigMode);
}
@Test
@Test
public void testAvroStringAccess() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, EnvironmentSettings.newInstance().useBlinkPlanner().build());
Table t = tEnv.fromDataStream(testData(env));
Table result = t.select($("name"));
Iterable<Row> rows = () -> result.execute().collect();
List<Utf8> results = StreamSupport
.stream(rows.spliterator(), false)
.map(row -> (Utf8) row.getField(0))
.collect(Collectors.toList());
String expected = "Charlie\n" +
"Terminator\n" +
"Whatever";
TestBaseUtils.compareResultAsText(results, expected);
}
@Test
public void testAvroObjectAccess() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(
env,
EnvironmentSettings.newInstance().useBlinkPlanner().build());
Table t = tEnv.fromDataStream(testData(env));
Table result = t
.filter($("type_nested").isNotNull())
.select($("type_nested").flatten())
.as("city", "num", "state", "street", "zip");
Iterable<Address> users = () -> DataStreamUtils.collect(tEnv.toAppendStream(result, Address.class));
List<Address> results = StreamSupport
.stream(users.spliterator(), false)
.collect(Collectors.toList());
String expected = USER_1.getTypeNested().toString();
TestBaseUtils.compareResultAsText(results, expected);
}
@Test
public void testAvroToAvro() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(
env,
EnvironmentSettings.newInstance().useBlinkPlanner().build());
Table t = tEnv.fromDataStream(testData(env));
Table result = t.select($("*"));
Iterable<User> users = () -> DataStreamUtils.collect(tEnv.toAppendStream(result, User.class));
List<User> results = StreamSupport
.stream(users.spliterator(), false)
.collect(Collectors.toList());
List<User> expected = Arrays.asList(USER_1, USER_2, USER_3);
assertEquals(expected, results);
}
private DataStream<User> testData(StreamExecutionEnvironment env) {
return env.fromElements(
USER_1,
USER_2,
USER_3
);
}
} | class AvroTypesITCase extends TableProgramsClusterTestBase {
private static final User USER_1 = User.newBuilder()
.setName("Charlie")
.setFavoriteColor("blue")
.setFavoriteNumber(null)
.setTypeBoolTest(false)
.setTypeDoubleTest(1.337d)
.setTypeNullTest(null)
.setTypeLongTest(1337L)
.setTypeArrayString(new ArrayList<>())
.setTypeArrayBoolean(new ArrayList<>())
.setTypeNullableArray(null)
.setTypeEnum(Colors.RED)
.setTypeMap(new HashMap<>())
.setTypeFixed(null)
.setTypeUnion(null)
.setTypeNested(
Address.newBuilder()
.setNum(42)
.setStreet("Bakerstreet")
.setCity("Berlin")
.setState("Berlin")
.setZip("12049").build())
.setTypeBytes(ByteBuffer.allocate(10))
.setTypeDate(LocalDate.parse("2014-03-01"))
.setTypeTimeMillis(LocalTime.parse("12:12:12"))
.setTypeTimeMicros(LocalTime.ofSecondOfDay(0).plus(123456L, ChronoUnit.MICROS))
.setTypeTimestampMillis(Instant.parse("2014-03-01T12:12:12.321Z"))
.setTypeTimestampMicros(Instant.ofEpochSecond(0).plus(123456L, ChronoUnit.MICROS))
.setTypeDecimalBytes(ByteBuffer.wrap(BigDecimal.valueOf(2000, 2).unscaledValue().toByteArray()))
.setTypeDecimalFixed(new Fixed2(BigDecimal.valueOf(2000, 2).unscaledValue().toByteArray()))
.build();
private static final User USER_2 = User.newBuilder()
.setName("Whatever")
.setFavoriteNumber(null)
.setFavoriteColor("black")
.setTypeLongTest(42L)
.setTypeDoubleTest(0.0)
.setTypeNullTest(null)
.setTypeBoolTest(true)
.setTypeArrayString(Collections.singletonList("hello"))
.setTypeArrayBoolean(Collections.singletonList(true))
.setTypeEnum(Colors.GREEN)
.setTypeMap(new HashMap<>())
.setTypeFixed(new Fixed16())
.setTypeUnion(null)
.setTypeNested(null)
.setTypeDate(LocalDate.parse("2014-03-01"))
.setTypeBytes(ByteBuffer.allocate(10))
.setTypeTimeMillis(LocalTime.parse("12:12:12"))
.setTypeTimeMicros(LocalTime.ofSecondOfDay(0).plus(123456L, ChronoUnit.MICROS))
.setTypeTimestampMillis(Instant.parse("2014-03-01T12:12:12.321Z"))
.setTypeTimestampMicros(Instant.ofEpochSecond(0).plus(123456L, ChronoUnit.MICROS))
.setTypeDecimalBytes(ByteBuffer.wrap(BigDecimal.valueOf(2000, 2).unscaledValue().toByteArray()))
.setTypeDecimalFixed(new Fixed2(BigDecimal.valueOf(2000, 2).unscaledValue().toByteArray()))
.build();
private static final User USER_3 = User.newBuilder()
.setName("Terminator")
.setFavoriteNumber(null)
.setFavoriteColor("yellow")
.setTypeLongTest(1L)
.setTypeDoubleTest(0.0)
.setTypeNullTest(null)
.setTypeBoolTest(false)
.setTypeArrayString(Collections.singletonList("world"))
.setTypeArrayBoolean(Collections.singletonList(false))
.setTypeEnum(Colors.GREEN)
.setTypeMap(new HashMap<>())
.setTypeFixed(new Fixed16())
.setTypeUnion(null)
.setTypeNested(null)
.setTypeBytes(ByteBuffer.allocate(10))
.setTypeDate(LocalDate.parse("2014-03-01"))
.setTypeTimeMillis(LocalTime.parse("12:12:12"))
.setTypeTimeMicros(LocalTime.ofSecondOfDay(0).plus(123456L, ChronoUnit.MICROS))
.setTypeTimestampMillis(Instant.parse("2014-03-01T12:12:12.321Z"))
.setTypeTimestampMicros(Instant.ofEpochSecond(0).plus(123456L, ChronoUnit.MICROS))
.setTypeDecimalBytes(ByteBuffer.wrap(BigDecimal.valueOf(2000, 2).unscaledValue().toByteArray()))
.setTypeDecimalFixed(new Fixed2(BigDecimal.valueOf(2000, 2).unscaledValue().toByteArray()))
.build();
public AvroTypesITCase(
TestExecutionMode executionMode,
TableConfigMode tableConfigMode) {
super(executionMode, tableConfigMode);
}
@Test
@Test
public void testAvroStringAccess() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, EnvironmentSettings.newInstance().useBlinkPlanner().build());
Table t = tEnv.fromDataStream(testData(env));
Table result = t.select($("name"));
List<Utf8> results = CollectionUtil.iteratorToList(result.execute().collect())
.stream()
.map(row -> (Utf8) row.getField(0))
.collect(Collectors.toList());
String expected = "Charlie\n" +
"Terminator\n" +
"Whatever";
TestBaseUtils.compareResultAsText(results, expected);
}
@Test
public void testAvroObjectAccess() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(
env,
EnvironmentSettings.newInstance().useBlinkPlanner().build());
Table t = tEnv.fromDataStream(testData(env));
Table result = t
.filter($("type_nested").isNotNull())
.select($("type_nested").flatten())
.as("city", "num", "state", "street", "zip");
List<Address> results = CollectionUtil.iteratorToList(
DataStreamUtils.collect(
tEnv.toAppendStream(
result,
Address.class)));
String expected = USER_1.getTypeNested().toString();
TestBaseUtils.compareResultAsText(results, expected);
}
@Test
public void testAvroToAvro() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(
env,
EnvironmentSettings.newInstance().useBlinkPlanner().build());
Table t = tEnv.fromDataStream(testData(env));
Table result = t.select($("*"));
List<User> results = CollectionUtil.iteratorToList(
DataStreamUtils.collect(
tEnv.toAppendStream(result, User.class)));
List<User> expected = Arrays.asList(USER_1, USER_2, USER_3);
assertEquals(expected, results);
}
private DataStream<User> testData(StreamExecutionEnvironment env) {
return env.fromElements(
USER_1,
USER_2,
USER_3
);
}
} |
HBase and JDBC DataSource should be process in pluggable way. I will refactor here after merged | public static YamlProxyConfiguration load(final String path) throws IOException {
YamlProxyServerConfiguration serverConfig = loadServerConfiguration(getResourceFile(String.join("/", path, SERVER_CONFIG_FILE)));
File configPath = getResourceFile(path);
Collection<YamlProxyDatabaseConfiguration> databaseConfigs = loadDatabaseConfigurations(configPath);
Collection<YamlHBaseConfiguration> hbaseConfigs = loadHBaseConfigurations(configPath);
return new YamlProxyConfiguration(serverConfig, databaseConfigs.stream().collect(Collectors.toMap(
YamlProxyDatabaseConfiguration::getDatabaseName, each -> each, (oldValue, currentValue) -> oldValue,
LinkedHashMap::new)), hbaseConfigs.stream().collect(
Collectors.toMap(
YamlHBaseConfiguration::getDatabaseName, each -> each, (oldValue, currentValue) -> oldValue,
LinkedHashMap::new)));
} | LinkedHashMap::new))); | public static YamlProxyConfiguration load(final String path) throws IOException {
YamlProxyServerConfiguration serverConfig = loadServerConfiguration(getResourceFile(String.join("/", path, SERVER_CONFIG_FILE)));
File configPath = getResourceFile(path);
Collection<YamlProxyDatabaseConfiguration> databaseConfigs = loadDatabaseConfigurations(configPath);
return new YamlProxyConfiguration(serverConfig, databaseConfigs.stream().collect(Collectors.toMap(
YamlProxyDatabaseConfiguration::getDatabaseName, each -> each, (oldValue, currentValue) -> oldValue,
LinkedHashMap::new)));
} | class ProxyConfigurationLoader {
private static final String SERVER_CONFIG_FILE = "server.yaml";
private static final Pattern SCHEMA_CONFIG_FILE_PATTERN = Pattern.compile("config-.+\\.yaml");
private static final Pattern HBASE_CONFIG_FILE = Pattern.compile("hbase-.+\\.yaml");
/**
* Load configuration of ShardingSphere-Proxy.
*
* @param path configuration path of ShardingSphere-Proxy
* @return configuration of ShardingSphere-Proxy
* @throws IOException IO exception
*/
@SneakyThrows(URISyntaxException.class)
private static File getResourceFile(final String path) {
URL url = ProxyConfigurationLoader.class.getResource(path);
return null == url ? new File(path) : new File(url.toURI().getPath());
}
private static YamlProxyServerConfiguration loadServerConfiguration(final File yamlFile) throws IOException {
YamlProxyServerConfiguration result = YamlEngine.unmarshal(yamlFile, YamlProxyServerConfiguration.class);
return null == result ? new YamlProxyServerConfiguration() : rebuildGlobalRuleConfiguration(result);
}
private static YamlProxyServerConfiguration rebuildGlobalRuleConfiguration(final YamlProxyServerConfiguration serverConfiguration) {
serverConfiguration.getRules().removeIf(each -> each instanceof YamlGlobalRuleConfiguration);
if (null != serverConfiguration.getAuthority()) {
serverConfiguration.getRules().add(serverConfiguration.getAuthority());
}
if (null != serverConfiguration.getTransaction()) {
serverConfiguration.getRules().add(serverConfiguration.getTransaction());
}
if (null != serverConfiguration.getSqlParser()) {
serverConfiguration.getRules().add(serverConfiguration.getSqlParser());
}
if (null != serverConfiguration.getSqlTranslator()) {
serverConfiguration.getRules().add(serverConfiguration.getSqlTranslator());
}
if (null != serverConfiguration.getTraffic()) {
serverConfiguration.getRules().add(serverConfiguration.getTraffic());
}
if (null != serverConfiguration.getLogging()) {
serverConfiguration.getRules().add(serverConfiguration.getLogging());
}
return serverConfiguration;
}
private static Collection<YamlProxyDatabaseConfiguration> loadDatabaseConfigurations(final File configPath) throws IOException {
Collection<String> loadedDatabaseNames = new HashSet<>();
Collection<YamlProxyDatabaseConfiguration> result = new LinkedList<>();
for (File each : findRuleConfigurationFiles(configPath)) {
loadDatabaseConfiguration(each).ifPresent(optional -> {
Preconditions.checkState(loadedDatabaseNames.add(optional.getDatabaseName()), "Database name `%s` must unique at all database configurations.", optional.getDatabaseName());
result.add(optional);
});
}
return result;
}
private static Optional<YamlProxyDatabaseConfiguration> loadDatabaseConfiguration(final File yamlFile) throws IOException {
YamlProxyDatabaseConfiguration result = YamlEngine.unmarshal(yamlFile, YamlProxyDatabaseConfiguration.class);
if (null == result) {
return Optional.empty();
}
if (null == result.getDatabaseName()) {
result.setDatabaseName(result.getSchemaName());
}
Preconditions.checkNotNull(result.getDatabaseName(), "Property `databaseName` in file `%s` is required.", yamlFile.getName());
checkDuplicateRule(result.getRules(), yamlFile);
return Optional.of(result);
}
private static void checkDuplicateRule(final Collection<YamlRuleConfiguration> ruleConfigs, final File yamlFile) {
if (ruleConfigs.isEmpty()) {
return;
}
Map<Class<? extends RuleConfiguration>, Long> ruleConfigTypeCountMap = ruleConfigs.stream()
.collect(Collectors.groupingBy(YamlRuleConfiguration::getRuleConfigurationType, Collectors.counting()));
Optional<Entry<Class<? extends RuleConfiguration>, Long>> duplicateRuleConfig = ruleConfigTypeCountMap.entrySet().stream().filter(each -> each.getValue() > 1).findFirst();
if (duplicateRuleConfig.isPresent()) {
throw new IllegalStateException(String.format("Duplicate rule tag `!%s` in file `%s`", getDuplicateRuleTagName(duplicateRuleConfig.get().getKey()), yamlFile.getName()));
}
}
@SuppressWarnings("rawtypes")
private static Object getDuplicateRuleTagName(final Class<? extends RuleConfiguration> ruleConfigClass) {
Optional<YamlRuleConfigurationSwapper> result = ShardingSphereServiceLoader.getServiceInstances(YamlRuleConfigurationSwapper.class)
.stream().filter(each -> ruleConfigClass.equals(each.getTypeClass())).findFirst();
return result.orElseThrow(() -> new IllegalStateException("Not find rule tag name of class " + ruleConfigClass));
}
private static File[] findRuleConfigurationFiles(final File path) {
return path.listFiles(each -> SCHEMA_CONFIG_FILE_PATTERN.matcher(each.getName()).matches());
}
private static File[] findHBaseConfigurationFiles(final File path) {
return path.listFiles(each -> HBASE_CONFIG_FILE.matcher(each.getName()).matches());
}
private static Collection<YamlHBaseConfiguration> loadHBaseConfigurations(final File configPath) throws IOException {
Collection<String> loadedHBaseDatabaseNames = new HashSet<>();
Collection<YamlHBaseConfiguration> result = new LinkedList<>();
for (File each : findHBaseConfigurationFiles(configPath)) {
loadHBaseConfiguration(each).ifPresent(optional -> {
Preconditions.checkState(loadedHBaseDatabaseNames.add(optional.getDatabaseName()), "HBase Database name `%s` must unique at all database configurations.", optional.getDatabaseName());
result.add(optional);
});
}
return result;
}
private static Optional<YamlHBaseConfiguration> loadHBaseConfiguration(final File yamlFile) throws IOException {
YamlHBaseConfiguration result = YamlEngine.unmarshal(yamlFile, YamlHBaseConfiguration.class);
if (null == result) {
return Optional.empty();
}
Preconditions.checkNotNull(result.getDatabaseName(), "Property `databaseName` in file `%s` is required.", yamlFile.getName());
return Optional.of(result);
}
} | class ProxyConfigurationLoader {
private static final String SERVER_CONFIG_FILE = "server.yaml";
private static final Pattern SCHEMA_CONFIG_FILE_PATTERN = Pattern.compile("config-.+\\.yaml");
/**
* Load configuration of ShardingSphere-Proxy.
*
* @param path configuration path of ShardingSphere-Proxy
* @return configuration of ShardingSphere-Proxy
* @throws IOException IO exception
*/
@SneakyThrows(URISyntaxException.class)
private static File getResourceFile(final String path) {
URL url = ProxyConfigurationLoader.class.getResource(path);
return null == url ? new File(path) : new File(url.toURI().getPath());
}
private static YamlProxyServerConfiguration loadServerConfiguration(final File yamlFile) throws IOException {
YamlProxyServerConfiguration result = YamlEngine.unmarshal(yamlFile, YamlProxyServerConfiguration.class);
return null == result ? new YamlProxyServerConfiguration() : rebuildGlobalRuleConfiguration(result);
}
private static YamlProxyServerConfiguration rebuildGlobalRuleConfiguration(final YamlProxyServerConfiguration serverConfiguration) {
serverConfiguration.getRules().removeIf(each -> each instanceof YamlGlobalRuleConfiguration);
if (null != serverConfiguration.getAuthority()) {
serverConfiguration.getRules().add(serverConfiguration.getAuthority());
}
if (null != serverConfiguration.getTransaction()) {
serverConfiguration.getRules().add(serverConfiguration.getTransaction());
}
if (null != serverConfiguration.getSqlParser()) {
serverConfiguration.getRules().add(serverConfiguration.getSqlParser());
}
if (null != serverConfiguration.getSqlTranslator()) {
serverConfiguration.getRules().add(serverConfiguration.getSqlTranslator());
}
if (null != serverConfiguration.getTraffic()) {
serverConfiguration.getRules().add(serverConfiguration.getTraffic());
}
if (null != serverConfiguration.getLogging()) {
serverConfiguration.getRules().add(serverConfiguration.getLogging());
}
return serverConfiguration;
}
private static Collection<YamlProxyDatabaseConfiguration> loadDatabaseConfigurations(final File configPath) throws IOException {
Collection<String> loadedDatabaseNames = new HashSet<>();
Collection<YamlProxyDatabaseConfiguration> result = new LinkedList<>();
for (File each : findRuleConfigurationFiles(configPath)) {
loadDatabaseConfiguration(each).ifPresent(optional -> {
Preconditions.checkState(loadedDatabaseNames.add(optional.getDatabaseName()), "Database name `%s` must unique at all database configurations.", optional.getDatabaseName());
result.add(optional);
});
}
return result;
}
private static Optional<YamlProxyDatabaseConfiguration> loadDatabaseConfiguration(final File yamlFile) throws IOException {
YamlProxyDatabaseConfiguration result = YamlEngine.unmarshal(yamlFile, YamlProxyDatabaseConfiguration.class);
if (null == result) {
return Optional.empty();
}
if (null == result.getDatabaseName()) {
result.setDatabaseName(result.getSchemaName());
}
Preconditions.checkNotNull(result.getDatabaseName(), "Property `databaseName` in file `%s` is required.", yamlFile.getName());
checkDuplicateRule(result.getRules(), yamlFile);
return Optional.of(result);
}
private static void checkDuplicateRule(final Collection<YamlRuleConfiguration> ruleConfigs, final File yamlFile) {
if (ruleConfigs.isEmpty()) {
return;
}
Map<Class<? extends RuleConfiguration>, Long> ruleConfigTypeCountMap = ruleConfigs.stream()
.collect(Collectors.groupingBy(YamlRuleConfiguration::getRuleConfigurationType, Collectors.counting()));
Optional<Entry<Class<? extends RuleConfiguration>, Long>> duplicateRuleConfig = ruleConfigTypeCountMap.entrySet().stream().filter(each -> each.getValue() > 1).findFirst();
if (duplicateRuleConfig.isPresent()) {
throw new IllegalStateException(String.format("Duplicate rule tag `!%s` in file `%s`", getDuplicateRuleTagName(duplicateRuleConfig.get().getKey()), yamlFile.getName()));
}
}
@SuppressWarnings("rawtypes")
private static Object getDuplicateRuleTagName(final Class<? extends RuleConfiguration> ruleConfigClass) {
Optional<YamlRuleConfigurationSwapper> result = ShardingSphereServiceLoader.getServiceInstances(YamlRuleConfigurationSwapper.class)
.stream().filter(each -> ruleConfigClass.equals(each.getTypeClass())).findFirst();
return result.orElseThrow(() -> new IllegalStateException("Not find rule tag name of class " + ruleConfigClass));
}
private static File[] findRuleConfigurationFiles(final File path) {
return path.listFiles(each -> SCHEMA_CONFIG_FILE_PATTERN.matcher(each.getName()).matches());
}
} |
Thinking about this a bit more, perhaps always having an empty PCollection here would be good. Then people can unconditionally do things like add up all the errors or check for them etc. | public PCollectionRowTuple expand(PCollectionRowTuple input) {
String queryString = config.getString("query");
if (queryString == null) {
throw new IllegalArgumentException("Configuration must provide a query string.");
}
SqlTransform transform = SqlTransform.query(queryString);
EnumerationType.Value dialect =
config.getLogicalTypeValue("dialect", EnumerationType.Value.class);
if (dialect != null) {
Class<? extends QueryPlanner> queryPlannerClass =
QUERY_PLANNERS.get(QUERY_ENUMERATION.toString(dialect));
if (queryPlannerClass != null) {
transform = transform.withQueryPlannerClass(queryPlannerClass);
}
}
String ddl = config.getString("ddl");
if (ddl != null) {
transform = transform.withDdlString(ddl);
}
Boolean autoload = config.getBoolean("autoload");
if (autoload != null && autoload) {
transform = transform.withAutoLoading(true);
} else {
transform = transform.withAutoLoading(false);
Map<String, TableProvider> tableProviders = new HashMap<>();
ServiceLoader.load(TableProvider.class)
.forEach(
(provider) -> {
tableProviders.put(provider.getTableType(), provider);
});
Collection<?> tableproviderList = config.getArray("tableproviders");
if (tableproviderList != null) {
for (Object nameObj : tableproviderList) {
if (nameObj != null) {
TableProvider p = tableProviders.get(nameObj);
if (p
!= null) {
transform = transform.withTableProvider(p.getTableType(), p);
}
}
}
}
}
ErrorCapture errors = new ErrorCapture();
PCollection<Row> output = input.apply(transform.withErrorsTransformer(errors));
List<PCollection<Row>> errorList = errors.getInputs();
if (errorList.size() == 0) {
return PCollectionRowTuple.of("output", output);
} else if (errorList.size() == 1) {
return PCollectionRowTuple.of("output", output, "errors", errorList.get(0));
} else {
throw new UnsupportedOperationException(
"SqlTransform currently only supports a single dead letter queue collection");
}
} | if (errorList.size() == 0) { | public PCollectionRowTuple expand(PCollectionRowTuple input) {
String queryString = config.getString("query");
if (queryString == null) {
throw new IllegalArgumentException("Configuration must provide a query string.");
}
SqlTransform transform = SqlTransform.query(queryString);
EnumerationType.Value dialect =
config.getLogicalTypeValue("dialect", EnumerationType.Value.class);
if (dialect != null) {
Class<? extends QueryPlanner> queryPlannerClass =
QUERY_PLANNERS.get(QUERY_ENUMERATION.toString(dialect));
if (queryPlannerClass != null) {
transform = transform.withQueryPlannerClass(queryPlannerClass);
}
}
String ddl = config.getString("ddl");
if (ddl != null) {
transform = transform.withDdlString(ddl);
}
Boolean autoload = config.getBoolean("autoload");
if (autoload != null && autoload) {
transform = transform.withAutoLoading(true);
} else {
transform = transform.withAutoLoading(false);
Map<String, TableProvider> tableProviders = new HashMap<>();
ServiceLoader.load(TableProvider.class)
.forEach(
(provider) -> {
tableProviders.put(provider.getTableType(), provider);
});
Collection<?> tableproviderList = config.getArray("tableproviders");
if (tableproviderList != null) {
for (Object nameObj : tableproviderList) {
if (nameObj != null) {
TableProvider p = tableProviders.get(nameObj);
if (p
!= null) {
transform = transform.withTableProvider(p.getTableType(), p);
}
}
}
}
}
ErrorCapture errors = new ErrorCapture();
PCollection<Row> output = input.apply(transform.withErrorsTransformer(errors));
List<PCollection<Row>> errorList = errors.getInputs();
if (errorList.size() == 0) {
PCollection<Row> emptyErrors =
input
.getPipeline()
.apply(Create.empty(BeamSqlRelUtils.getErrorRowSchema(Schema.of())));
return PCollectionRowTuple.of("output", output, "errors", emptyErrors);
} else if (errorList.size() == 1) {
return PCollectionRowTuple.of("output", output, "errors", errorList.get(0));
} else {
throw new UnsupportedOperationException(
"SqlTransform currently only supports a single dead letter queue collection");
}
} | class SqlSchemaTransform implements SchemaTransform {
final Row config;
public SqlSchemaTransform(Row config) {
this.config = config;
}
@Override
public PTransform<PCollectionRowTuple, PCollectionRowTuple> buildTransform() {
return new PTransform<PCollectionRowTuple, PCollectionRowTuple>() {
@Override
};
}
} | class SqlSchemaTransform implements SchemaTransform {
final Row config;
public SqlSchemaTransform(Row config) {
this.config = config;
}
@Override
public PTransform<PCollectionRowTuple, PCollectionRowTuple> buildTransform() {
return new PTransform<PCollectionRowTuple, PCollectionRowTuple>() {
@Override
};
}
} |
```suggestion this.deploymentMetricsMaintainer = duration(10, MINUTES); ``` With so many collisions already, and at such a low rate, this one probably needs more bump? | public Intervals(SystemName system) {
this.system = Objects.requireNonNull(system);
this.defaultInterval = duration(system.isCd() || system == SystemName.dev ? 1 : 5, MINUTES);
this.outstandingChangeDeployer = duration(3, MINUTES);
this.versionStatusUpdater = duration(3, MINUTES);
this.readyJobsTrigger = duration(1, MINUTES);
this.deploymentMetricsMaintainer = duration(6, MINUTES);
this.applicationOwnershipConfirmer = duration(12, HOURS);
this.systemUpgrader = duration(90, SECONDS);
this.jobRunner = duration(105, SECONDS);
this.osUpgrader = duration(1, MINUTES);
this.contactInformationMaintainer = duration(12, HOURS);
this.nameServiceDispatcher = duration(30, SECONDS);
this.costReportMaintainer = duration(2, HOURS);
this.resourceMeterMaintainer = duration(1, MINUTES);
this.cloudEventReporter = duration(30, MINUTES);
this.resourceTagMaintainer = duration(30, MINUTES);
this.systemRoutingPolicyMaintainer = duration(10, MINUTES);
this.applicationMetaDataGarbageCollector = duration(12, HOURS);
this.containerImageExpirer = duration(2, HOURS);
this.hostSwitchUpdater = duration(12, HOURS);
this.reindexingTriggerer = duration(1, HOURS);
this.endpointCertificateMaintainer = duration(12, HOURS);
} | this.deploymentMetricsMaintainer = duration(6, MINUTES); | public Intervals(SystemName system) {
this.system = Objects.requireNonNull(system);
this.defaultInterval = duration(system.isCd() || system == SystemName.dev ? 1 : 5, MINUTES);
this.outstandingChangeDeployer = duration(3, MINUTES);
this.versionStatusUpdater = duration(3, MINUTES);
this.readyJobsTrigger = duration(1, MINUTES);
this.deploymentMetricsMaintainer = duration(10, MINUTES);
this.applicationOwnershipConfirmer = duration(12, HOURS);
this.systemUpgrader = duration(90, SECONDS);
this.jobRunner = duration(90, SECONDS);
this.osUpgrader = duration(1, MINUTES);
this.contactInformationMaintainer = duration(12, HOURS);
this.nameServiceDispatcher = duration(30, SECONDS);
this.costReportMaintainer = duration(2, HOURS);
this.resourceMeterMaintainer = duration(1, MINUTES);
this.cloudEventReporter = duration(30, MINUTES);
this.resourceTagMaintainer = duration(30, MINUTES);
this.systemRoutingPolicyMaintainer = duration(10, MINUTES);
this.applicationMetaDataGarbageCollector = duration(12, HOURS);
this.containerImageExpirer = duration(2, HOURS);
this.hostSwitchUpdater = duration(12, HOURS);
this.reindexingTriggerer = duration(1, HOURS);
this.endpointCertificateMaintainer = duration(12, HOURS);
} | class Intervals {
private static final Duration MAX_CD_INTERVAL = Duration.ofHours(1);
private final SystemName system;
private final Duration defaultInterval;
private final Duration outstandingChangeDeployer;
private final Duration versionStatusUpdater;
private final Duration readyJobsTrigger;
private final Duration deploymentMetricsMaintainer;
private final Duration applicationOwnershipConfirmer;
private final Duration systemUpgrader;
private final Duration jobRunner;
private final Duration osUpgrader;
private final Duration contactInformationMaintainer;
private final Duration nameServiceDispatcher;
private final Duration costReportMaintainer;
private final Duration resourceMeterMaintainer;
private final Duration cloudEventReporter;
private final Duration resourceTagMaintainer;
private final Duration systemRoutingPolicyMaintainer;
private final Duration applicationMetaDataGarbageCollector;
private final Duration containerImageExpirer;
private final Duration hostSwitchUpdater;
private final Duration reindexingTriggerer;
private final Duration endpointCertificateMaintainer;
private Duration duration(long amount, TemporalUnit unit) {
Duration duration = Duration.of(amount, unit);
if (system.isCd() && duration.compareTo(MAX_CD_INTERVAL) > 0) {
return MAX_CD_INTERVAL;
}
return duration;
}
} | class Intervals {
private static final Duration MAX_CD_INTERVAL = Duration.ofHours(1);
private final SystemName system;
private final Duration defaultInterval;
private final Duration outstandingChangeDeployer;
private final Duration versionStatusUpdater;
private final Duration readyJobsTrigger;
private final Duration deploymentMetricsMaintainer;
private final Duration applicationOwnershipConfirmer;
private final Duration systemUpgrader;
private final Duration jobRunner;
private final Duration osUpgrader;
private final Duration contactInformationMaintainer;
private final Duration nameServiceDispatcher;
private final Duration costReportMaintainer;
private final Duration resourceMeterMaintainer;
private final Duration cloudEventReporter;
private final Duration resourceTagMaintainer;
private final Duration systemRoutingPolicyMaintainer;
private final Duration applicationMetaDataGarbageCollector;
private final Duration containerImageExpirer;
private final Duration hostSwitchUpdater;
private final Duration reindexingTriggerer;
private final Duration endpointCertificateMaintainer;
private Duration duration(long amount, TemporalUnit unit) {
Duration duration = Duration.of(amount, unit);
if (system.isCd() && duration.compareTo(MAX_CD_INTERVAL) > 0) {
return MAX_CD_INTERVAL;
}
return duration;
}
} |
Simplified the logic. Currently, we are allowing empty application Id. @JonathanGiles do we want to disallow empty application Id? | public HttpLogOptions setApplicationId(final String applicationId) {
if (applicationId != null
&& (applicationId.length() > MAX_APPLICATION_ID_LENGTH || applicationId.contains(" "))) {
if (applicationId.contains(" ")) {
throw logger
.logExceptionAsError(new IllegalArgumentException("'applicationId' must not contain a space."));
} else {
throw logger
.logExceptionAsError(new IllegalArgumentException("'applicationId' length cannot be greater than "
+ MAX_APPLICATION_ID_LENGTH));
}
} else {
this.applicationId = applicationId;
}
return this;
} | if (applicationId != null | public HttpLogOptions setApplicationId(final String applicationId) {
if (!CoreUtils.isNullOrEmpty(applicationId)) {
if (applicationId.length() > MAX_APPLICATION_ID_LENGTH) {
throw logger
.logExceptionAsError(new IllegalArgumentException("'applicationId' length cannot be greater than "
+ MAX_APPLICATION_ID_LENGTH));
} else if (applicationId.contains(" ")) {
throw logger
.logExceptionAsError(new IllegalArgumentException("'applicationId' must not contain a space."));
} else {
this.applicationId = applicationId;
}
}
return this;
} | class HttpLogOptions {
private String applicationId;
private HttpLogDetailLevel logLevel;
private Set<String> allowedHeaderNames;
private Set<String> allowedQueryParamNames;
private final ClientLogger logger = new ClientLogger(HttpLogOptions.class);
private static final int MAX_APPLICATION_ID_LENGTH = 24;
private static final List<String> DEFAULT_HEADERS_WHITELIST = Arrays.asList(
"x-ms-client-request-id",
"x-ms-return-client-request-id",
"traceparent",
"Accept",
"Cache-Control",
"Connection",
"Content-Length",
"Content-Type",
"Date",
"ETag",
"Expires",
"If-Match",
"If-Modified-Since",
"If-None-Match",
"If-Unmodified-Since",
"Last-Modified",
"Pragma",
"Request-Id",
"Retry-After",
"Server",
"Transfer-Encoding",
"User-Agent"
);
/**
* Creates a new instance that does not log any information about HTTP requests or responses.
*/
public HttpLogOptions() {
logLevel = HttpLogDetailLevel.NONE;
allowedHeaderNames = new HashSet<>(DEFAULT_HEADERS_WHITELIST);
allowedQueryParamNames = new HashSet<>();
applicationId = null;
}
/**
* Gets the level of detail to log on HTTP messages.
*
* @return The {@link HttpLogDetailLevel}.
*/
public HttpLogDetailLevel getLogLevel() {
return logLevel;
}
/**
* Sets the level of detail to log on Http messages.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param logLevel The {@link HttpLogDetailLevel}.
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setLogLevel(final HttpLogDetailLevel logLevel) {
this.logLevel = logLevel == null ? HttpLogDetailLevel.NONE : logLevel;
return this;
}
/**
* Gets the application specific id.
*
* @return The application specific id.
*/
public String getApplicationId() {
return applicationId;
}
/**
* Sets the custom application specific id supplied by the user of the client library.
*
* @param applicationId The user specified application id.
* @return The updated HttpLogOptions object.
*/
/**
* Gets the whitelisted headers that should be logged.
*
* @return The list of whitelisted headers.
*/
public Set<String> getAllowedHeaderNames() {
return allowedHeaderNames;
}
/**
* Sets the given whitelisted headers that should be logged.
*
* <p>
* This method sets the provided header names to be the whitelisted header names which will be logged for all HTTP
* requests and responses, overwriting any previously configured headers, including the default set. Additionally,
* users can use {@link HttpLogOptions
* {@link HttpLogOptions
* allowed header names.
* </p>
*
* @param allowedHeaderNames The list of whitelisted header names from the user.
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setAllowedHeaderNames(final Set<String> allowedHeaderNames) {
this.allowedHeaderNames = allowedHeaderNames == null ? new HashSet<>() : allowedHeaderNames;
return this;
}
/**
* Sets the given whitelisted header to the default header set that should be logged.
*
* @param allowedHeaderName The whitelisted header name from the user.
* @return The updated HttpLogOptions object.
* @throws NullPointerException If {@code allowedHeaderName} is {@code null}.
*/
public HttpLogOptions addAllowedHeaderName(final String allowedHeaderName) {
Objects.requireNonNull(allowedHeaderName);
this.allowedHeaderNames.add(allowedHeaderName);
return this;
}
/**
* Gets the whitelisted query parameters.
*
* @return The list of whitelisted query parameters.
*/
public Set<String> getAllowedQueryParamNames() {
return allowedQueryParamNames;
}
/**
* Sets the given whitelisted query params to be displayed in the logging info.
*
* @param allowedQueryParamNames The list of whitelisted query params from the user.
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setAllowedQueryParamNames(final Set<String> allowedQueryParamNames) {
this.allowedQueryParamNames = allowedQueryParamNames == null ? new HashSet<>() : allowedQueryParamNames;
return this;
}
/**
* Sets the given whitelisted query param that should be logged.
*
* @param allowedQueryParamName The whitelisted query param name from the user.
* @return The updated HttpLogOptions object.
* @throws NullPointerException If {@code allowedQueryParamName} is {@code null}.
*/
public HttpLogOptions addAllowedQueryParamName(final String allowedQueryParamName) {
this.allowedQueryParamNames.add(allowedQueryParamName);
return this;
}
} | class HttpLogOptions {
private String applicationId;
private HttpLogDetailLevel logLevel;
private Set<String> allowedHeaderNames;
private Set<String> allowedQueryParamNames;
private final ClientLogger logger = new ClientLogger(HttpLogOptions.class);
private static final int MAX_APPLICATION_ID_LENGTH = 24;
private static final List<String> DEFAULT_HEADERS_WHITELIST = Arrays.asList(
"x-ms-client-request-id",
"x-ms-return-client-request-id",
"traceparent",
"Accept",
"Cache-Control",
"Connection",
"Content-Length",
"Content-Type",
"Date",
"ETag",
"Expires",
"If-Match",
"If-Modified-Since",
"If-None-Match",
"If-Unmodified-Since",
"Last-Modified",
"Pragma",
"Request-Id",
"Retry-After",
"Server",
"Transfer-Encoding",
"User-Agent"
);
/**
* Creates a new instance that does not log any information about HTTP requests or responses.
*/
public HttpLogOptions() {
logLevel = HttpLogDetailLevel.NONE;
allowedHeaderNames = new HashSet<>(DEFAULT_HEADERS_WHITELIST);
allowedQueryParamNames = new HashSet<>();
applicationId = null;
}
/**
* Gets the level of detail to log on HTTP messages.
*
* @return The {@link HttpLogDetailLevel}.
*/
public HttpLogDetailLevel getLogLevel() {
return logLevel;
}
/**
* Sets the level of detail to log on Http messages.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param logLevel The {@link HttpLogDetailLevel}.
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setLogLevel(final HttpLogDetailLevel logLevel) {
this.logLevel = logLevel == null ? HttpLogDetailLevel.NONE : logLevel;
return this;
}
/**
* Gets the whitelisted headers that should be logged.
*
* @return The list of whitelisted headers.
*/
public Set<String> getAllowedHeaderNames() {
return allowedHeaderNames;
}
/**
* Sets the given whitelisted headers that should be logged.
*
* <p>
* This method sets the provided header names to be the whitelisted header names which will be logged for all HTTP
* requests and responses, overwriting any previously configured headers, including the default set. Additionally,
* users can use {@link HttpLogOptions
* {@link HttpLogOptions
* allowed header names.
* </p>
*
* @param allowedHeaderNames The list of whitelisted header names from the user.
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setAllowedHeaderNames(final Set<String> allowedHeaderNames) {
this.allowedHeaderNames = allowedHeaderNames == null ? new HashSet<>() : allowedHeaderNames;
return this;
}
/**
* Sets the given whitelisted header to the default header set that should be logged.
*
* @param allowedHeaderName The whitelisted header name from the user.
* @return The updated HttpLogOptions object.
* @throws NullPointerException If {@code allowedHeaderName} is {@code null}.
*/
public HttpLogOptions addAllowedHeaderName(final String allowedHeaderName) {
Objects.requireNonNull(allowedHeaderName);
this.allowedHeaderNames.add(allowedHeaderName);
return this;
}
/**
* Gets the whitelisted query parameters.
*
* @return The list of whitelisted query parameters.
*/
public Set<String> getAllowedQueryParamNames() {
return allowedQueryParamNames;
}
/**
* Sets the given whitelisted query params to be displayed in the logging info.
*
* @param allowedQueryParamNames The list of whitelisted query params from the user.
* @return The updated HttpLogOptions object.
*/
public HttpLogOptions setAllowedQueryParamNames(final Set<String> allowedQueryParamNames) {
this.allowedQueryParamNames = allowedQueryParamNames == null ? new HashSet<>() : allowedQueryParamNames;
return this;
}
/**
* Sets the given whitelisted query param that should be logged.
*
* @param allowedQueryParamName The whitelisted query param name from the user.
* @return The updated HttpLogOptions object.
* @throws NullPointerException If {@code allowedQueryParamName} is {@code null}.
*/
public HttpLogOptions addAllowedQueryParamName(final String allowedQueryParamName) {
this.allowedQueryParamNames.add(allowedQueryParamName);
return this;
}
/**
* Gets the application specific id.
*
* @return The application specific id.
*/
public String getApplicationId() {
return applicationId;
}
/**
* Sets the custom application specific id supplied by the user of the client library.
*
* @param applicationId The user specified application id.
* @return The updated HttpLogOptions object.
*/
} |
Please 1. Check the valid of metadata 2. Check the permission The logic of the two should not be interspersed and executed together | public void analyze(Analyzer analyzer) throws UserException {
super.analyze(analyzer);
if (this.dbTableName != null) {
String dbName;
if (Strings.isNullOrEmpty(this.dbTableName.getDb())) {
dbName = analyzer.getDefaultDb();
} else {
dbName = ClusterNamespace.getFullName(analyzer.getClusterName(), this.dbTableName.getDb());
}
if (Strings.isNullOrEmpty(dbName)) {
ErrorReport.reportAnalysisException(ErrorCode.ERR_NO_DB_ERROR);
}
this.db = analyzer.getCatalog().getDbOrAnalysisException(dbName);
String tblName = this.dbTableName.getTbl();
if (Strings.isNullOrEmpty(tblName)) {
this.tables = this.db.getTables();
for (Table table : this.tables) {
checkAnalyzePriv(dbName, table.getName());
}
} else {
Table table = this.db.getTableOrAnalysisException(tblName);
this.tables = Collections.singletonList(table);
checkAnalyzePriv(dbName, table.getName());
}
if (this.columnNames == null || this.columnNames.isEmpty()) {
setTableIdToColumnName();
} else {
Table table = this.db.getTableOrAnalysisException(tblName);
for (String columnName : this.columnNames) {
Column column = table.getColumn(columnName);
if (column == null) {
ErrorReport.reportAnalysisException(ErrorCode.ERR_WRONG_COLUMN_NAME, columnName);
}
}
this.tableIdToColumnName.put(table.getId(), this.columnNames);
}
} else {
String dbName = analyzer.getDefaultDb();
if (Strings.isNullOrEmpty(dbName)) {
ErrorReport.reportAnalysisException(ErrorCode.ERR_NO_DB_ERROR);
}
this.db = analyzer.getCatalog().getDbOrAnalysisException(dbName);
this.tables = this.db.getTables();
for (Table table : this.tables) {
checkAnalyzePriv(dbName, table.getName());
}
setTableIdToColumnName();
}
if (this.properties != null) {
for (Map.Entry<String, String> pros : this.properties.entrySet()) {
if (!"cbo_statistics_task_timeout_sec".equals(pros.getKey())) {
throw new AnalysisException("Unsupported property: " + pros.getKey());
}
if (!StringUtils.isNumeric(pros.getValue()) || Integer.parseInt(pros.getValue()) <= 0) {
throw new AnalysisException("Invalid property value: " + pros.getValue());
}
}
} else {
this.properties = Maps.newHashMap();
this.properties.put("cbo_statistics_task_timeout_sec", String.valueOf(Config.cbo_statistics_task_timeout_sec));
}
} | if (Strings.isNullOrEmpty(dbName)) { | public void analyze(Analyzer analyzer) throws UserException {
super.analyze(analyzer);
if (this.dbTableName != null) {
this.dbTableName.analyze(analyzer);
String dbName = this.dbTableName.getDb();
String tblName = this.dbTableName.getTbl();
checkAnalyzePriv(dbName, tblName);
Database db = analyzer.getCatalog().getDbOrAnalysisException(dbName);
Table table = db.getTableOrAnalysisException(tblName);
if (this.columnNames != null && !this.columnNames.isEmpty()) {
table.readLock();
try {
List<String> baseSchema = table.getBaseSchema(false)
.stream().map(Column::getName).collect(Collectors.toList());
Optional<String> optional = this.columnNames.stream()
.filter(entity -> !baseSchema.contains(entity)).findFirst();
if (optional.isPresent()) {
String columnName = optional.get();
ErrorReport.reportAnalysisException(ErrorCode.ERR_WRONG_COLUMN_NAME, columnName);
}
} finally {
table.readUnlock();
}
}
this.dbId = db.getId();
this.tblIds.add(table.getId());
} else {
String dbName = analyzer.getDefaultDb();
if (Strings.isNullOrEmpty(dbName)) {
ErrorReport.reportAnalysisException(ErrorCode.ERR_NO_DB_ERROR);
}
Database db = analyzer.getCatalog().getDbOrAnalysisException(dbName);
db.readLock();
try {
List<Table> tables = db.getTables();
for (Table table : tables) {
checkAnalyzePriv(dbName, table.getName());
}
this.dbId = db.getId();
for (Table table : tables) {
long tblId = table.getId();
this.tblIds.add(tblId);
}
} finally {
db.readUnlock();
}
}
checkProperties();
} | class AnalyzeStmt extends DdlStmt {
private final TableName dbTableName;
private final List<String> columnNames;
private Map<String, String> properties;
private Database db;
private List<Table> tables;
private final Map<Long, List<String>> tableIdToColumnName = Maps.newHashMap();
public AnalyzeStmt(TableName dbTableName, List<String> columns, Map<String, String> properties) {
this.dbTableName = dbTableName;
this.columnNames = columns;
this.properties = properties;
}
public Database getDb() {
Preconditions.checkArgument(isAnalyzed(),
"The db name must be obtained after the parsing is complete");
return this.db;
}
public List<Table> getTables() {
Preconditions.checkArgument(isAnalyzed(),
"The db name must be obtained after the parsing is complete");
return this.tables;
}
public Map<Long, List<String>> getTableIdToColumnName() {
Preconditions.checkArgument(isAnalyzed(),
"The db name must be obtained after the parsing is complete");
return this.tableIdToColumnName;
}
public Map<String, String> getProperties() {
return this.properties;
}
@Override
@Override
public RedirectStatus getRedirectStatus() {
return RedirectStatus.FORWARD_NO_SYNC;
}
private void checkAnalyzePriv(String dbName, String tblName) throws AnalysisException {
PaloAuth auth = Catalog.getCurrentCatalog().getAuth();
if (!auth.checkTblPriv(ConnectContext.get(), dbName, tblName, PrivPredicate.SELECT)) {
ErrorReport.reportAnalysisException(
ErrorCode.ERR_TABLEACCESS_DENIED_ERROR,
"ANALYZE",
ConnectContext.get().getQualifiedUser(),
ConnectContext.get().getRemoteIP(),
dbName + ": " + tblName);
}
}
private void setTableIdToColumnName() {
for (Table table : this.tables) {
long tableId = table.getId();
List<Column> baseSchema = table.getBaseSchema();
List<String> colNames = Lists.newArrayList();
baseSchema.stream().map(Column::getName).forEach(colNames::add);
this.tableIdToColumnName.put(tableId, colNames);
}
}
} | class AnalyzeStmt extends DdlStmt {
private static final Logger LOG = LogManager.getLogger(AnalyzeStmt.class);
public static final String CBO_STATISTICS_TASK_TIMEOUT_SEC = "cbo_statistics_task_timeout_sec";
private static final ImmutableSet<String> PROPERTIES_SET = new ImmutableSet.Builder<String>()
.add(CBO_STATISTICS_TASK_TIMEOUT_SEC)
.build();
public static final Predicate<Long> DESIRED_TASK_TIMEOUT_SEC = (v) -> v > 0L;
private final TableName dbTableName;
private final List<String> columnNames;
private final Map<String, String> properties;
private long dbId;
private final Set<Long> tblIds = Sets.newHashSet();
public AnalyzeStmt(TableName dbTableName, List<String> columns, Map<String, String> properties) {
this.dbTableName = dbTableName;
this.columnNames = columns;
this.properties = properties == null ? Maps.newHashMap() : properties;
}
public long getDbId() {
Preconditions.checkArgument(isAnalyzed(),
"The dbId must be obtained after the parsing is complete");
return this.dbId;
}
public Set<Long> getTblIds() {
Preconditions.checkArgument(isAnalyzed(),
"The tblIds must be obtained after the parsing is complete");
return this.tblIds;
}
public Database getDb() throws AnalysisException {
Preconditions.checkArgument(isAnalyzed(),
"The db must be obtained after the parsing is complete");
return this.analyzer.getCatalog().getDbOrAnalysisException(this.dbId);
}
public List<Table> getTables() throws AnalysisException {
Preconditions.checkArgument(isAnalyzed(),
"The tables must be obtained after the parsing is complete");
Database db = getDb();
List<Table> tables = Lists.newArrayList();
db.readLock();
try {
for (Long tblId : this.tblIds) {
Table table = db.getTableOrAnalysisException(tblId);
tables.add(table);
}
} finally {
db.readUnlock();
}
return tables;
}
public Map<Long, List<String>> getTableIdToColumnName() throws AnalysisException {
Preconditions.checkArgument(isAnalyzed(),
"The db name must be obtained after the parsing is complete");
Map<Long, List<String>> tableIdToColumnName = Maps.newHashMap();
List<Table> tables = getTables();
if (this.columnNames == null || this.columnNames.isEmpty()) {
for (Table table : tables) {
table.readLock();
try {
long tblId = table.getId();
List<Column> baseSchema = table.getBaseSchema();
List<String> colNames = Lists.newArrayList();
baseSchema.stream().map(Column::getName).forEach(colNames::add);
tableIdToColumnName.put(tblId, colNames);
} finally {
table.readUnlock();
}
}
} else {
for (Long tblId : this.tblIds) {
tableIdToColumnName.put(tblId, this.columnNames);
}
}
return tableIdToColumnName;
}
public Map<String, String> getProperties() {
return this.properties;
}
@Override
@Override
public RedirectStatus getRedirectStatus() {
return RedirectStatus.FORWARD_NO_SYNC;
}
private void checkAnalyzePriv(String dbName, String tblName) throws AnalysisException {
PaloAuth auth = Catalog.getCurrentCatalog().getAuth();
if (!auth.checkTblPriv(ConnectContext.get(), dbName, tblName, PrivPredicate.SELECT)) {
ErrorReport.reportAnalysisException(
ErrorCode.ERR_TABLEACCESS_DENIED_ERROR,
"ANALYZE",
ConnectContext.get().getQualifiedUser(),
ConnectContext.get().getRemoteIP(),
dbName + ": " + tblName);
}
}
private void checkProperties() throws UserException {
Optional<String> optional = this.properties.keySet().stream().filter(
entity -> !PROPERTIES_SET.contains(entity)).findFirst();
if (optional.isPresent()) {
throw new AnalysisException(optional.get() + " is invalid property");
}
long taskTimeout = ((Long) Util.getLongPropertyOrDefault(this.properties.get(CBO_STATISTICS_TASK_TIMEOUT_SEC),
Config.max_cbo_statistics_task_timeout_sec, DESIRED_TASK_TIMEOUT_SEC,
CBO_STATISTICS_TASK_TIMEOUT_SEC + " should > 0")).intValue();
this.properties.put(CBO_STATISTICS_TASK_TIMEOUT_SEC, String.valueOf(taskTimeout));
}
} |
We can use a constant for ".balo". There is already one in ProjectDirConstants. We can copy it to ProjectConstants and use it. | private static Path validateBaloPath(String baloPath) {
if (baloPath == null) {
throw new IllegalArgumentException("baloPath cannot be null");
}
Path absBaloPath = Paths.get(baloPath).toAbsolutePath();
if (!absBaloPath.toFile().canRead()) {
throw new RuntimeException("insufficient privileges to balo: " + absBaloPath);
}
if (!absBaloPath.toFile().exists()) {
throw new RuntimeException("balo does not exists: " + baloPath);
}
if (!absBaloPath.toString().endsWith(".balo")) {
throw new RuntimeException("Not a balo: " + baloPath);
}
return absBaloPath;
} | if (!absBaloPath.toString().endsWith(".balo")) { | private static Path validateBaloPath(String baloPath) {
if (baloPath == null) {
throw new IllegalArgumentException("baloPath cannot be null");
}
Path absBaloPath = Paths.get(baloPath).toAbsolutePath();
if (!absBaloPath.toFile().canRead()) {
throw new RuntimeException("insufficient privileges to balo: " + absBaloPath);
}
if (!absBaloPath.toFile().exists()) {
throw new RuntimeException("balo does not exists: " + baloPath);
}
if (!absBaloPath.toString().endsWith(BLANG_COMPILED_PKG_BINARY_EXT)) {
throw new RuntimeException("Not a balo: " + baloPath);
}
return absBaloPath;
} | class BaloFiles {
private static final PathMatcher matcher = FileSystems.getDefault().getPathMatcher("glob:**.bal");
private static Gson gson = new Gson();
private BaloFiles() {
}
public static PackageData loadPackageData(String baloPath) {
Path absBaloPath = validateBaloPath(baloPath);
URI zipURI = URI.create("jar:" + absBaloPath.toUri().toString());
try (FileSystem zipFileSystem = FileSystems.newFileSystem(zipURI, new HashMap<>())) {
Path packageJsonPathInBalo = zipFileSystem.getPath("package.json");
BallerinaToml ballerinaToml = loadBallerinaToml(packageJsonPathInBalo);
Path defaultModulePathInBalo = zipFileSystem.getPath(MODULES_ROOT, ballerinaToml.getPackage().getName());
ModuleData defaultModule = loadModule(defaultModulePathInBalo);
Path modulesPathInBalo = zipFileSystem.getPath(MODULES_ROOT);
List<ModuleData> otherModules = loadOtherModules(modulesPathInBalo, defaultModulePathInBalo);
return PackageData.from(absBaloPath, defaultModule, otherModules);
} catch (IOException e) {
throw new RuntimeException("cannot read balo:" + baloPath);
}
}
private static BallerinaToml loadBallerinaToml(Path packageJsonPath) {
BallerinaToml ballerinaToml = new BallerinaToml();
if (!Files.exists(packageJsonPath)) {
throw new RuntimeException("package.json does not exists:" + packageJsonPath);
}
PackageJson packageJson;
try {
packageJson = gson.fromJson(Files.newBufferedReader(packageJsonPath), PackageJson.class);
} catch (IOException e) {
throw new RuntimeException("package.json does not exists:" + packageJsonPath);
}
validatePackageJson(packageJson);
Package tomlPackage = new Package();
tomlPackage.setOrg(packageJson.getOrganization());
tomlPackage.setName(packageJson.getName());
tomlPackage.setVersion(packageJson.getVersion());
tomlPackage.setLicense(packageJson.getLicenses());
tomlPackage.setAuthors(packageJson.getAuthors());
tomlPackage.setRepository(packageJson.getSourceRepository());
tomlPackage.setKeywords(packageJson.getKeywords());
ballerinaToml.setPkg(tomlPackage);
return ballerinaToml;
}
private static void validatePackageJson(PackageJson packageJson) {
if (packageJson.getOrganization() == null || "".equals(packageJson.getOrganization())) {
throw new RuntimeException("'organization' does not exists in 'package.json'");
}
if (packageJson.getName() == null || "".equals(packageJson.getName())) {
throw new RuntimeException("'name' does not exists in 'package.json'");
}
if (packageJson.getVersion() == null || "".equals(packageJson.getVersion())) {
throw new RuntimeException("'version' does not exists in 'package.json'");
}
}
private static ModuleData loadModule(Path modulePath) {
if (!Files.exists(modulePath)) {
throw new RuntimeException("module does not exists:" + modulePath);
}
String moduleName = String.valueOf(modulePath.getFileName());
if (moduleName.contains(".")) {
moduleName = moduleName.split("\\.")[1];
moduleName = moduleName.replace("/", "");
}
if (!ProjectUtils.validateModuleName(moduleName)) {
throw new RuntimeException("Invalid module name : '" + moduleName + "' :\n" +
"Module name can only contain alphanumerics, underscores and periods " +
"and the maximum length is 256 characters");
}
List<DocumentData> srcDocs = loadDocuments(modulePath);
List<DocumentData> testSrcDocs = Collections.emptyList();
return ModuleData.from(modulePath, srcDocs, testSrcDocs);
}
private static List<DocumentData> loadDocuments(Path dirPath) {
try (Stream<Path> pathStream = Files.walk(dirPath, 1)) {
return pathStream
.filter(matcher::matches)
.map(BaloFiles::loadDocument)
.collect(Collectors.toList());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private static DocumentData loadDocument(Path documentFilePath) {
Path fileNamePath = documentFilePath.getFileName();
String name = fileNamePath != null ? fileNamePath.toString() : "";
return DocumentData.from(name, String.valueOf(documentFilePath));
}
private static List<ModuleData> loadOtherModules(Path modulesDirPath, Path defaultModulePath) {
if (!Files.isDirectory(modulesDirPath)) {
throw new RuntimeException("'modules' directory does not exists:" + modulesDirPath);
}
try (Stream<Path> pathStream = Files.walk(modulesDirPath, 1)) {
return pathStream
.filter(path -> !path.equals(modulesDirPath))
.filter(path -> !String.valueOf(path).equals("/" + defaultModulePath + "/"))
.filter(Files::isDirectory)
.map(BaloFiles::loadModule)
.collect(Collectors.toList());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
} | class name to utils
private BaloFiles() {
} |
Could `3L` be replaced to `columnNames.size()`? So if `columnNames` value changed, it's not necessary to change the hard coded `3L`. | public void assertCalculateSuccess() {
Iterable<Object> calculate = new CRC32MatchMySQLSingleTableDataCalculator().calculate(dataCalculateParameter);
long calculateSize = StreamSupport.stream(calculate.spliterator(), false).count();
assertThat(calculateSize, is(3L));
} | assertThat(calculateSize, is(3L)); | public void assertCalculateSuccess() {
Iterable<Object> calculate = new CRC32MatchMySQLSingleTableDataCalculator().calculate(dataCalculateParameter);
long actualDatabaseTypesSize = StreamSupport.stream(calculate.spliterator(), false).count();
long expectedDatabaseTypesSize = dataCalculateParameter.getColumnNames().size();
assertThat(actualDatabaseTypesSize, is(expectedDatabaseTypesSize));
} | class CRC32MatchMySQLSingleTableDataCalculatorTest {
@Mock
private DataCalculateParameter dataCalculateParameter;
private PipelineDataSourceWrapper pipelineJobPrepareFailedException;
@Before
public void setUp() throws SQLException {
pipelineJobPrepareFailedException = mock(PipelineDataSourceWrapper.class, RETURNS_DEEP_STUBS);
Collection<String> columnNames = Arrays.asList("fieldOne", "fieldTwo", "fieldThree");
when(dataCalculateParameter.getLogicTableName()).thenReturn("tableName");
when(dataCalculateParameter.getColumnNames()).thenReturn(columnNames);
when(dataCalculateParameter.getDataSource()).thenReturn(pipelineJobPrepareFailedException);
}
@Test
public void assertCRC32MatchMySQLSingleTableDataCalculatorSuccess() {
String algorithmType = new CRC32MatchMySQLSingleTableDataCalculator().getAlgorithmType();
assertThat(algorithmType, is("CRC32_MATCH"));
}
@Test
public void assertGetDatabaseTypesSuccess() {
Collection<String> databaseTypes = new CRC32MatchMySQLSingleTableDataCalculator().getDatabaseTypes();
assertThat(databaseTypes.size(), is(1));
assertThat(databaseTypes.stream().findFirst().get(), is("MySQL"));
}
@Test
@Test(expected = PipelineDataConsistencyCheckFailedException.class)
public void assertCalculateFailed() throws SQLException {
Connection connection = mock(Connection.class, RETURNS_DEEP_STUBS);
when(pipelineJobPrepareFailedException.getConnection()).thenReturn(connection);
when(connection.prepareStatement(anyString())).thenThrow(new SQLException());
new CRC32MatchMySQLSingleTableDataCalculator().calculate(dataCalculateParameter);
}
} | class CRC32MatchMySQLSingleTableDataCalculatorTest {
@Mock
private DataCalculateParameter dataCalculateParameter;
private PipelineDataSourceWrapper pipelineDataSource;
private Connection connection;
@Mock
private PreparedStatement preparedStatement;
@Mock
private ResultSet resultSet;
@Before
public void setUp() throws SQLException {
pipelineDataSource = mock(PipelineDataSourceWrapper.class, RETURNS_DEEP_STUBS);
connection = mock(Connection.class, RETURNS_DEEP_STUBS);
Collection<String> columnNames = Arrays.asList("fieldOne", "fieldTwo", "fieldThree");
when(dataCalculateParameter.getLogicTableName()).thenReturn("tableName");
when(dataCalculateParameter.getColumnNames()).thenReturn(columnNames);
when(dataCalculateParameter.getDataSource()).thenReturn(pipelineDataSource);
}
@Test
public void assertCRC32MatchMySQLSingleTableDataCalculatorSuccess() {
String actualAlgorithmType = new CRC32MatchMySQLSingleTableDataCalculator().getAlgorithmType();
String expectedAlgorithmType = "CRC32_MATCH";
assertThat(actualAlgorithmType, is(expectedAlgorithmType));
}
@Test
public void assertGetDatabaseTypesSuccess() {
Collection<String> actualDatabaseTypes = new CRC32MatchMySQLSingleTableDataCalculator().getDatabaseTypes();
long actualDatabaseTypesSize = actualDatabaseTypes.size();
long expectedDatabaseTypesSize = new Long(1);
String actualDatabaseTypesFirstElement = actualDatabaseTypes.stream().findFirst().get();
String expectedDatabaseTypesFirstElement = "MySQL";
assertThat(actualDatabaseTypesSize, is(expectedDatabaseTypesSize));
assertThat(actualDatabaseTypesFirstElement, is(expectedDatabaseTypesFirstElement));
}
@Test
@Test
public void assertCalculateWithQuerySuccess() throws SQLException {
String sqlCommandForFieldOne = "SELECT BIT_XOR(CAST(CRC32(`fieldOne`) AS UNSIGNED)) AS checksum FROM `tableName`";
String sqlCommandForFieldTwo = "SELECT BIT_XOR(CAST(CRC32(`fieldTwo`) AS UNSIGNED)) AS checksum FROM `tableName`";
String sqlCommandForFieldThree = "SELECT BIT_XOR(CAST(CRC32(`fieldThree`) AS UNSIGNED)) AS checksum FROM `tableName`";
when(pipelineDataSource.getConnection()).thenReturn(connection);
when(connection.prepareStatement(sqlCommandForFieldOne)).thenReturn(preparedStatement);
when(connection.prepareStatement(sqlCommandForFieldTwo)).thenReturn(preparedStatement);
when(connection.prepareStatement(sqlCommandForFieldThree)).thenReturn(preparedStatement);
when(preparedStatement.executeQuery()).thenReturn(resultSet);
Iterable<Object> calculate = new CRC32MatchMySQLSingleTableDataCalculator().calculate(dataCalculateParameter);
long actualDatabaseTypesSize = StreamSupport.stream(calculate.spliterator(), false).count();
long expectedDatabaseTypesSize = dataCalculateParameter.getColumnNames().size();
assertThat(actualDatabaseTypesSize, is(expectedDatabaseTypesSize));
}
@Test(expected = PipelineDataConsistencyCheckFailedException.class)
public void assertCalculateFailed() throws SQLException {
when(pipelineDataSource.getConnection()).thenReturn(connection);
when(connection.prepareStatement(anyString())).thenThrow(new SQLException());
new CRC32MatchMySQLSingleTableDataCalculator().calculate(dataCalculateParameter);
}
} |
You can use the constant for "UTF-8" from StandardCharsets class[1] [1] https://docs.oracle.com/javase/8/docs/api/index.html?java/nio/charset/StandardCharsets.html | public void execute(Context context) {
BMap<String, BValue> channelObject = (BMap<String, BValue>) context.getRefArgument(0);
Channel channel = RabbitMQUtils.getNativeObject(channelObject, RabbitMQConstants.CHANNEL_NATIVE_OBJECT,
Channel.class, context);
BValue msgContent = context.getRefArgument(1);
String routingKey = context.getStringArgument(0);
String exchange = context.getStringArgument(1);
try {
ChannelUtils.basicPublish(channel, routingKey, msgContent.stringValue().getBytes(Charset.forName("UTF-8")),
exchange);
} catch (BallerinaException exception) {
LOGGER.error("I/O exception while publishing a message", exception);
RabbitMQUtils.returnError("RabbitMQ Client Error:", context, exception);
}
} | ChannelUtils.basicPublish(channel, routingKey, msgContent.stringValue().getBytes(Charset.forName("UTF-8")), | public void execute(Context context) {
BMap<String, BValue> channelObject = (BMap<String, BValue>) context.getRefArgument(0);
Channel channel = RabbitMQUtils.getNativeObject(channelObject, RabbitMQConstants.CHANNEL_NATIVE_OBJECT,
Channel.class, context);
BValue msgContent = context.getRefArgument(1);
String routingKey = context.getStringArgument(0);
String exchange = context.getStringArgument(1);
try {
ChannelUtils.basicPublish(channel, routingKey, msgContent.stringValue().getBytes(StandardCharsets.UTF_8),
exchange);
} catch (BallerinaException exception) {
LOGGER.error("I/O exception while publishing a message", exception);
RabbitMQUtils.returnError("RabbitMQ Client Error:", context, exception);
}
} | class BasicPublish extends BlockingNativeCallableUnit {
private static final Logger LOGGER = LoggerFactory.getLogger(BasicPublish.class);
@Override
} | class BasicPublish extends BlockingNativeCallableUnit {
private static final Logger LOGGER = LoggerFactory.getLogger(BasicPublish.class);
@Override
} |
This is a nice one-liner! | public void updateKeyStore(KeyStore keyStore, String password) {
updateKeyStore(sslContextFactory -> {
sslContextFactory.setKeyStore(keyStore);
if (password != null) {
sslContextFactory.setKeyStorePassword(null);
}
});
} | if (password != null) { | public void updateKeyStore(KeyStore keyStore, String password) {
updateKeyStore(sslContextFactory -> {
sslContextFactory.setKeyStore(keyStore);
if (password != null) {
sslContextFactory.setKeyStorePassword(null);
}
});
} | class DefaultSslKeyStoreContext implements SslKeyStoreContext {
private final SslContextFactory sslContextFactory;
public DefaultSslKeyStoreContext(SslContextFactory sslContextFactory) {
this.sslContextFactory = sslContextFactory;
}
@Override
public void updateKeyStore(KeyStore keyStore) {
updateKeyStore(keyStore, null);
}
@Override
@Override
public void updateKeyStore(String keyStorePath, String keyStoreType, String keyStorePassword) {
updateKeyStore(sslContextFactory -> {
sslContextFactory.setKeyStorePath(keyStorePath);
sslContextFactory.setKeyStoreType(keyStoreType);
sslContextFactory.setKeyStorePassword(keyStorePassword);
});
}
private void updateKeyStore(Consumer<SslContextFactory> reloader) {
try {
sslContextFactory.reload(reloader);
} catch (Exception e) {
throw new RuntimeException("Could not update keystore: " + e.getMessage(), e);
}
}
} | class DefaultSslKeyStoreContext implements SslKeyStoreContext {
private final SslContextFactory sslContextFactory;
public DefaultSslKeyStoreContext(SslContextFactory sslContextFactory) {
this.sslContextFactory = sslContextFactory;
}
@Override
public void updateKeyStore(KeyStore keyStore) {
updateKeyStore(keyStore, null);
}
@Override
@Override
public void updateKeyStore(String keyStorePath, String keyStoreType, String keyStorePassword) {
updateKeyStore(sslContextFactory -> {
sslContextFactory.setKeyStorePath(keyStorePath);
sslContextFactory.setKeyStoreType(keyStoreType);
sslContextFactory.setKeyStorePassword(keyStorePassword);
});
}
private void updateKeyStore(Consumer<SslContextFactory> reloader) {
try {
sslContextFactory.reload(reloader);
} catch (Exception e) {
throw new RuntimeException("Could not update keystore: " + e.getMessage(), e);
}
}
} |
@simplynaveen20 In Spring, you can have duplicate parameters in query which are of the same name - for example this query - `Iterable<Project> findByNameAndCreatorOrNameAndCreator(String name, String creator, String name2, String creator2);` This would get converted to this - `select * from c where c.name = @name and c.creator = @creator OR c.name = @name and c.creator = @creator` This cause problems when running the query - as Gateway throws exception saying 400 bad request. To avoid this issue, when we are generating the query params, we were appending random UUID to have different query param names for same fields. However, that was resulting in a different query text for the same query if executed twice in the same JVM. To solve the query text issue, now I am using a counter - which gets initialized when generating a query text. That way, same queries (conceptually) now will have same query text. Which is what we want. | private String generateQueryParameter(@NonNull String subject, int counter) {
return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter;
} | return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter; | private String generateQueryParameter(@NonNull String subject, int counter) {
return subject.replaceAll("[^a-zA-Z\\d]", "_") + counter;
} | class AbstractQueryGenerator {
protected AbstractQueryGenerator() {
}
private String generateUnaryQuery(@NonNull Criteria criteria) {
Assert.isTrue(criteria.getSubjectValues().isEmpty(), "Unary criteria should have no one subject value");
Assert.isTrue(CriteriaType.isUnary(criteria.getType()), "Criteria type should be unary operation");
final String subject = criteria.getSubject();
if (CriteriaType.isFunction(criteria.getType())) {
return String.format("%s(r.%s)", criteria.getType().getSqlKeyword(), subject);
} else {
return String.format("r.%s %s", subject, criteria.getType().getSqlKeyword());
}
}
private String generateBinaryQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Binary criteria should have only one subject value");
Assert.isTrue(CriteriaType.isBinary(criteria.getType()), "Criteria type should be binary operation");
final String subject = criteria.getSubject();
final Object subjectValue = toCosmosDbValue(criteria.getSubjectValues().get(0));
final String parameter = generateQueryParameter(subject, counter);
final Part.IgnoreCaseType ignoreCase = criteria.getIgnoreCase();
final String sqlKeyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter, subjectValue));
if (CriteriaType.isFunction(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter);
} else {
return getCondition(ignoreCase, sqlKeyword, subject, parameter);
}
}
/**
* Get condition string with function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("r.%s %s @%s", subject, sqlKeyword, parameter);
} else {
return String.format("UPPER(r.%s) %s UPPER(@%s)", subject, sqlKeyword, parameter);
}
}
/**
* Get condition string without function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getFunctionCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("%s(r.%s, @%s)", sqlKeyword, subject, parameter);
} else {
return String.format("%s(UPPER(r.%s), UPPER(@%s))", sqlKeyword, subject, parameter);
}
}
private String generateBetween(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
final String subject = criteria.getSubject();
final Object value1 = toCosmosDbValue(criteria.getSubjectValues().get(0));
final Object value2 = toCosmosDbValue(criteria.getSubjectValues().get(1));
final String subject1 = subject + "start";
final String subject2 = subject + "end";
final String parameter1 = generateQueryParameter(subject1, counter);
final String parameter2 = generateQueryParameter(subject2, counter);
final String keyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter1, value1));
parameters.add(Pair.of(parameter2, value2));
return String.format("(r.%s %s @%s AND @%s)", subject, keyword, parameter1, parameter2);
}
private String generateClosedQuery(@NonNull String left, @NonNull String right, CriteriaType type) {
Assert.isTrue(CriteriaType.isClosed(type)
&& CriteriaType.isBinary(type),
"Criteria type should be binary and closure operation");
return String.join(" ", left, type.getSqlKeyword(), right);
}
@SuppressWarnings("unchecked")
private String generateInQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Criteria should have only one subject value");
if (!(criteria.getSubjectValues().get(0) instanceof Collection)) {
throw new IllegalQueryException("IN keyword requires Collection type in parameters");
}
final Collection<Object> values = (Collection<Object>) criteria.getSubjectValues().get(0);
final List<String> paras = new ArrayList<>();
for (Object o : values) {
if (o instanceof String || o instanceof Integer || o instanceof Long || o instanceof Boolean) {
String key = "p" + parameters.size();
paras.add("@" + key);
parameters.add(Pair.of(key, o));
} else {
throw new IllegalQueryException("IN keyword Range only support Number and String type.");
}
}
return String.format("r.%s %s (%s)", criteria.getSubject(), criteria.getType().getSqlKeyword(),
String.join(",", paras));
}
private String generateQueryBody(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, @NonNull final AtomicInteger counter) {
final CriteriaType type = criteria.getType();
switch (type) {
case ALL:
return "";
case IN:
case NOT_IN:
return generateInQuery(criteria, parameters);
case BETWEEN:
return generateBetween(criteria, parameters, counter.getAndIncrement());
case IS_NULL:
case IS_NOT_NULL:
case FALSE:
case TRUE:
return generateUnaryQuery(criteria);
case IS_EQUAL:
case NOT:
case BEFORE:
case AFTER:
case LESS_THAN:
case LESS_THAN_EQUAL:
case GREATER_THAN:
case GREATER_THAN_EQUAL:
case CONTAINING:
case ENDS_WITH:
case STARTS_WITH:
case ARRAY_CONTAINS:
return generateBinaryQuery(criteria, parameters, counter.getAndIncrement());
case AND:
case OR:
Assert.isTrue(criteria.getSubCriteria().size() == 2,
"criteria should have two SubCriteria");
final String left = generateQueryBody(criteria.getSubCriteria().get(0), parameters, counter);
final String right = generateQueryBody(criteria.getSubCriteria().get(1), parameters, counter);
return generateClosedQuery(left, right, type);
default:
throw new UnsupportedOperationException("unsupported Criteria type: "
+ type);
}
}
/**
* Generate a query body for interface QuerySpecGenerator. The query body compose of Sql query String and its'
* parameters. The parameters organized as a list of Pair, for each pair compose parameter name and value.
*
* @param query the representation for query method.
* @return A pair tuple compose of Sql query.
*/
@NonNull
private Pair<String, List<Pair<String, Object>>> generateQueryBody(@NonNull CosmosQuery query, @NonNull final AtomicInteger counter) {
final List<Pair<String, Object>> parameters = new ArrayList<>();
String queryString = this.generateQueryBody(query.getCriteria(), parameters, counter);
if (StringUtils.hasText(queryString)) {
queryString = String.join(" ", "WHERE", queryString);
}
return Pair.of(queryString, parameters);
}
private static String getParameter(@NonNull Sort.Order order) {
Assert.isTrue(!order.isIgnoreCase(), "Ignore case is not supported");
final String direction = order.isDescending() ? "DESC" : "ASC";
return String.format("r.%s %s", order.getProperty(), direction);
}
static String generateQuerySort(@NonNull Sort sort) {
if (sort.isUnsorted()) {
return "";
}
final String queryTail = "ORDER BY";
final List<String> subjects = sort.stream().map(AbstractQueryGenerator::getParameter).collect(Collectors.toList());
return queryTail
+ " "
+ String.join(",", subjects);
}
@NonNull
private String generateQueryTail(@NonNull CosmosQuery query) {
final List<String> queryTails = new ArrayList<>();
queryTails.add(generateQuerySort(query.getSort()));
return String.join(" ", queryTails.stream().filter(StringUtils::hasText).collect(Collectors.toList()));
}
protected SqlQuerySpec generateCosmosQuery(@NonNull CosmosQuery query,
@NonNull String queryHead) {
final AtomicInteger counter = new AtomicInteger();
final Pair<String, List<Pair<String, Object>>> queryBody = generateQueryBody(query, counter);
String queryString = String.join(" ", queryHead, queryBody.getFirst(), generateQueryTail(query));
final List<Pair<String, Object>> parameters = queryBody.getSecond();
List<SqlParameter> sqlParameters = parameters.stream()
.map(p -> new SqlParameter("@" + p.getFirst(),
toCosmosDbValue(p.getSecond())))
.collect(Collectors.toList());
if (query.getLimit() > 0) {
queryString = new StringBuilder(queryString)
.append(" OFFSET 0 LIMIT ")
.append(query.getLimit()).toString();
}
return new SqlQuerySpec(queryString, sqlParameters);
}
} | class AbstractQueryGenerator {
protected AbstractQueryGenerator() {
}
private String generateUnaryQuery(@NonNull Criteria criteria) {
Assert.isTrue(criteria.getSubjectValues().isEmpty(), "Unary criteria should have no one subject value");
Assert.isTrue(CriteriaType.isUnary(criteria.getType()), "Criteria type should be unary operation");
final String subject = criteria.getSubject();
if (CriteriaType.isFunction(criteria.getType())) {
return String.format("%s(r.%s)", criteria.getType().getSqlKeyword(), subject);
} else {
return String.format("r.%s %s", subject, criteria.getType().getSqlKeyword());
}
}
private String generateBinaryQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Binary criteria should have only one subject value");
Assert.isTrue(CriteriaType.isBinary(criteria.getType()), "Criteria type should be binary operation");
final String subject = criteria.getSubject();
final Object subjectValue = toCosmosDbValue(criteria.getSubjectValues().get(0));
final String parameter = generateQueryParameter(subject, counter);
final Part.IgnoreCaseType ignoreCase = criteria.getIgnoreCase();
final String sqlKeyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter, subjectValue));
if (CriteriaType.isFunction(criteria.getType())) {
return getFunctionCondition(ignoreCase, sqlKeyword, subject, parameter);
} else {
return getCondition(ignoreCase, sqlKeyword, subject, parameter);
}
}
/**
* Get condition string with function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("r.%s %s @%s", subject, sqlKeyword, parameter);
} else {
return String.format("UPPER(r.%s) %s UPPER(@%s)", subject, sqlKeyword, parameter);
}
}
/**
* Get condition string without function
*
* @param ignoreCase ignore case flag
* @param sqlKeyword sql key word, operation name
* @param subject sql column name
* @param parameter sql filter value
* @return condition string
*/
private String getFunctionCondition(final Part.IgnoreCaseType ignoreCase, final String sqlKeyword,
final String subject, final String parameter) {
if (Part.IgnoreCaseType.NEVER == ignoreCase) {
return String.format("%s(r.%s, @%s)", sqlKeyword, subject, parameter);
} else {
return String.format("%s(UPPER(r.%s), UPPER(@%s))", sqlKeyword, subject, parameter);
}
}
private String generateBetween(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, int counter) {
final String subject = criteria.getSubject();
final Object value1 = toCosmosDbValue(criteria.getSubjectValues().get(0));
final Object value2 = toCosmosDbValue(criteria.getSubjectValues().get(1));
final String subject1 = subject + "start";
final String subject2 = subject + "end";
final String parameter1 = generateQueryParameter(subject1, counter);
final String parameter2 = generateQueryParameter(subject2, counter);
final String keyword = criteria.getType().getSqlKeyword();
parameters.add(Pair.of(parameter1, value1));
parameters.add(Pair.of(parameter2, value2));
return String.format("(r.%s %s @%s AND @%s)", subject, keyword, parameter1, parameter2);
}
private String generateClosedQuery(@NonNull String left, @NonNull String right, CriteriaType type) {
Assert.isTrue(CriteriaType.isClosed(type)
&& CriteriaType.isBinary(type),
"Criteria type should be binary and closure operation");
return String.join(" ", left, type.getSqlKeyword(), right);
}
@SuppressWarnings("unchecked")
private String generateInQuery(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters) {
Assert.isTrue(criteria.getSubjectValues().size() == 1,
"Criteria should have only one subject value");
if (!(criteria.getSubjectValues().get(0) instanceof Collection)) {
throw new IllegalQueryException("IN keyword requires Collection type in parameters");
}
final Collection<Object> values = (Collection<Object>) criteria.getSubjectValues().get(0);
final List<String> paras = new ArrayList<>();
for (Object o : values) {
if (o instanceof String || o instanceof Integer || o instanceof Long || o instanceof Boolean) {
String key = "p" + parameters.size();
paras.add("@" + key);
parameters.add(Pair.of(key, o));
} else {
throw new IllegalQueryException("IN keyword Range only support Number and String type.");
}
}
return String.format("r.%s %s (%s)", criteria.getSubject(), criteria.getType().getSqlKeyword(),
String.join(",", paras));
}
private String generateQueryBody(@NonNull Criteria criteria, @NonNull List<Pair<String, Object>> parameters, @NonNull final AtomicInteger counter) {
final CriteriaType type = criteria.getType();
switch (type) {
case ALL:
return "";
case IN:
case NOT_IN:
return generateInQuery(criteria, parameters);
case BETWEEN:
return generateBetween(criteria, parameters, counter.getAndIncrement());
case IS_NULL:
case IS_NOT_NULL:
case FALSE:
case TRUE:
return generateUnaryQuery(criteria);
case IS_EQUAL:
case NOT:
case BEFORE:
case AFTER:
case LESS_THAN:
case LESS_THAN_EQUAL:
case GREATER_THAN:
case GREATER_THAN_EQUAL:
case CONTAINING:
case ENDS_WITH:
case STARTS_WITH:
case ARRAY_CONTAINS:
return generateBinaryQuery(criteria, parameters, counter.getAndIncrement());
case AND:
case OR:
Assert.isTrue(criteria.getSubCriteria().size() == 2,
"criteria should have two SubCriteria");
final String left = generateQueryBody(criteria.getSubCriteria().get(0), parameters, counter);
final String right = generateQueryBody(criteria.getSubCriteria().get(1), parameters, counter);
return generateClosedQuery(left, right, type);
default:
throw new UnsupportedOperationException("unsupported Criteria type: "
+ type);
}
}
/**
* Generate a query body for interface QuerySpecGenerator. The query body compose of Sql query String and its'
* parameters. The parameters organized as a list of Pair, for each pair compose parameter name and value.
*
* @param query the representation for query method.
* @return A pair tuple compose of Sql query.
*/
@NonNull
private Pair<String, List<Pair<String, Object>>> generateQueryBody(@NonNull CosmosQuery query, @NonNull final AtomicInteger counter) {
final List<Pair<String, Object>> parameters = new ArrayList<>();
String queryString = this.generateQueryBody(query.getCriteria(), parameters, counter);
if (StringUtils.hasText(queryString)) {
queryString = String.join(" ", "WHERE", queryString);
}
return Pair.of(queryString, parameters);
}
private static String getParameter(@NonNull Sort.Order order) {
Assert.isTrue(!order.isIgnoreCase(), "Ignore case is not supported");
final String direction = order.isDescending() ? "DESC" : "ASC";
return String.format("r.%s %s", order.getProperty(), direction);
}
static String generateQuerySort(@NonNull Sort sort) {
if (sort.isUnsorted()) {
return "";
}
final String queryTail = "ORDER BY";
final List<String> subjects = sort.stream().map(AbstractQueryGenerator::getParameter).collect(Collectors.toList());
return queryTail
+ " "
+ String.join(",", subjects);
}
@NonNull
private String generateQueryTail(@NonNull CosmosQuery query) {
final List<String> queryTails = new ArrayList<>();
queryTails.add(generateQuerySort(query.getSort()));
return String.join(" ", queryTails.stream().filter(StringUtils::hasText).collect(Collectors.toList()));
}
protected SqlQuerySpec generateCosmosQuery(@NonNull CosmosQuery query,
@NonNull String queryHead) {
final AtomicInteger counter = new AtomicInteger();
final Pair<String, List<Pair<String, Object>>> queryBody = generateQueryBody(query, counter);
String queryString = String.join(" ", queryHead, queryBody.getFirst(), generateQueryTail(query));
final List<Pair<String, Object>> parameters = queryBody.getSecond();
List<SqlParameter> sqlParameters = parameters.stream()
.map(p -> new SqlParameter("@" + p.getFirst(),
toCosmosDbValue(p.getSecond())))
.collect(Collectors.toList());
if (query.getLimit() > 0) {
queryString = new StringBuilder(queryString)
.append(" OFFSET 0 LIMIT ")
.append(query.getLimit()).toString();
}
return new SqlQuerySpec(queryString, sqlParameters);
}
} |
this is test and it doesn't matter much. but in general you should not create a ObjectMapper per method invocation, This is costly timewise. | private void validateJson(String jsonInString) {
try {
ObjectMapper mapper = new ObjectMapper();
mapper.readTree(jsonInString);
} catch(JsonProcessingException ex) {
fail("Diagnostic string is not in json format");
}
} | ObjectMapper mapper = new ObjectMapper(); | private void validateJson(String jsonInString) {
try {
OBJECT_MAPPER.readTree(jsonInString);
} catch(JsonProcessingException ex) {
fail("Diagnostic string is not in json format");
}
} | class CosmosDiagnosticsTest extends TestSuiteBase {
private CosmosClient gatewayClient;
private CosmosClient directClient;
private CosmosContainer container;
private CosmosAsyncContainer cosmosAsyncContainer;
private CosmosClientBuilder cosmosClientBuilder;
@BeforeClass(groups = {"emulator"}, timeOut = SETUP_TIMEOUT)
public void beforeClass() throws Exception {
assertThat(this.gatewayClient).isNull();
gatewayClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.gatewayMode()
.buildClient();
directClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient());
container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
}
@AfterClass(groups = {"emulator"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.gatewayClient).isNotNull();
this.gatewayClient.close();
if (this.directClient != null) {
this.directClient.close();
}
}
@Test(groups = {"emulator"})
public void gatewayDiagnostics() {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode);
String diagnostics = createResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\"");
assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null"));
assertThat(diagnostics).contains("\"operationType\":\"Create\"");
assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\"");
assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(createResponse.getDiagnostics().getDuration()).isNotNull();
validateTransportRequestTimelineGateway(diagnostics);
validateJson(diagnostics);
}
@Test(groups = {"emulator"})
public void gatewayDiagnosticsOnException() {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = null;
CosmosClient client = null;
try {
client = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.gatewayMode()
.buildClient();
CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
createResponse = container.createItem(internalObjectNode);
CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions();
ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey"));
CosmosItemResponse<InternalObjectNode> readResponse =
container.readItem(BridgeInternal.getProperties(createResponse).getId(),
new PartitionKey("wrongPartitionKey"),
InternalObjectNode.class);
fail("request should fail as partition key is wrong");
} catch (CosmosException exception) {
String diagnostics = exception.getDiagnostics().toString();
assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\"");
assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null"));
assertThat(diagnostics).contains("\"statusCode\":404");
assertThat(diagnostics).contains("\"operationType\":\"Read\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(exception.getDiagnostics().getDuration()).isNotNull();
validateTransportRequestTimelineGateway(diagnostics);
validateJson(diagnostics);
} finally {
if (client != null) {
client.close();
}
}
}
@Test(groups = {"emulator"})
public void systemDiagnosticsForSystemStateInformation() {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode);
String diagnostics = createResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("systemInformation");
assertThat(diagnostics).contains("usedMemory");
assertThat(diagnostics).contains("availableMemory");
assertThat(diagnostics).contains("processCpuLoad");
assertThat(diagnostics).contains("systemCpuLoad");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(createResponse.getDiagnostics().getDuration()).isNotNull();
}
@Test(groups = {"emulator"})
public void directDiagnostics() {
CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode);
String diagnostics = createResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\"");
assertThat(diagnostics).contains("supplementalResponseStatisticsList");
assertThat(diagnostics).contains("\"gatewayStatistics\":null");
assertThat(diagnostics).contains("addressResolutionStatistics");
assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\"");
assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\"");
assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\"");
assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(createResponse.getDiagnostics().getDuration()).isNotNull();
validateTransportRequestTimelineDirect(diagnostics);
validateJson(diagnostics);
}
@Test(groups = {"emulator"})
public void directDiagnosticsOnException() {
CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = null;
CosmosClient client = null;
try {
client = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
createResponse = container.createItem(internalObjectNode);
CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions();
ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey"));
CosmosItemResponse<InternalObjectNode> readResponse =
cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(),
new PartitionKey("wrongPartitionKey"),
InternalObjectNode.class);
fail("request should fail as partition key is wrong");
} catch (CosmosException exception) {
String diagnostics = exception.getDiagnostics().toString();
assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\"");
assertThat(exception.getDiagnostics().getDuration()).isNotNull();
validateJson(diagnostics);
} finally {
if (client != null) {
client.close();
}
}
}
@Test(groups = {"emulator"})
public void supplementalResponseStatisticsList() throws Exception {
ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics();
for (int i = 0; i < 15; i++) {
RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(OperationType.Head, ResourceType.Document);
clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null);
}
List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics);
ObjectMapper objectMapper = new ObjectMapper();
String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics);
JsonNode jsonNode = objectMapper.readTree(diagnostics);
ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList");
assertThat(storeResponseStatistics.size()).isEqualTo(15);
assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10);
clearStoreResponseStatistics(clientSideRequestStatistics);
storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics);
assertThat(storeResponseStatistics.size()).isEqualTo(0);
for (int i = 0; i < 7; i++) {
RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(OperationType.Head, ResourceType.Document);
clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null);
}
storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics);
objectMapper = new ObjectMapper();
diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics);
jsonNode = objectMapper.readTree(diagnostics);
supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList");
assertThat(storeResponseStatistics.size()).isEqualTo(7);
assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7);
}
@Test(groups = {"emulator"})
public void serializationOnVariousScenarios() {
CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read();
String diagnostics = cosmosDatabase.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\"");
CosmosContainerResponse containerResponse = this.container.read();
diagnostics = containerResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\"");
TestItem testItem = new TestItem();
testItem.id = "TestId";
testItem.mypk = "TestPk";
CosmosItemResponse<TestItem> itemResponse = this.container.createItem(testItem);
diagnostics = itemResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\"");
testItem.id = "TestId2";
testItem.mypk = "TestPk";
itemResponse = this.container.createItem(testItem, new PartitionKey("TestPk"), null);
diagnostics = itemResponse.getDiagnostics().toString();
assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\"");
assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\"");
TestItem readTestItem = itemResponse.getItem();
diagnostics = itemResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\"");
CosmosItemResponse<InternalObjectNode> readItemResponse = this.container.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class);
InternalObjectNode properties = readItemResponse.getItem();
diagnostics = readItemResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
}
private InternalObjectNode getInternalObjectNode() {
InternalObjectNode internalObjectNode = new InternalObjectNode();
internalObjectNode.setId(UUID.randomUUID().toString());
BridgeInternal.setProperty(internalObjectNode, "mypk", "test");
return internalObjectNode;
}
private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception {
Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList");
storeResponseStatisticsField.setAccessible(true);
@SuppressWarnings({"unchecked"})
List<ClientSideRequestStatistics.StoreResponseStatistics> list
= (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics);
return list;
}
private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception {
Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList");
storeResponseStatisticsField.setAccessible(true);
storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>());
}
private void validateTransportRequestTimelineGateway(String diagnostics) {
assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\"");
assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\"");
assertThat(diagnostics).contains("\"eventName\":\"requestSent\"");
assertThat(diagnostics).contains("\"eventName\":\"transitTime\"");
assertThat(diagnostics).contains("\"eventName\":\"received\"");
}
private void validateTransportRequestTimelineDirect(String diagnostics) {
assertThat(diagnostics).contains("\"eventName\":\"created\"");
assertThat(diagnostics).contains("\"eventName\":\"queued\"");
assertThat(diagnostics).contains("\"eventName\":\"pipelined\"");
assertThat(diagnostics).contains("\"eventName\":\"transitTime\"");
assertThat(diagnostics).contains("\"eventName\":\"received\"");
assertThat(diagnostics).contains("\"eventName\":\"completed\"");
}
public static class TestItem {
public String id;
public String mypk;
public TestItem() {
}
}
} | class CosmosDiagnosticsTest extends TestSuiteBase {
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private CosmosClient gatewayClient;
private CosmosClient directClient;
private CosmosContainer container;
private CosmosAsyncContainer cosmosAsyncContainer;
@BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT)
public void beforeClass() throws Exception {
assertThat(this.gatewayClient).isNull();
gatewayClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.gatewayMode()
.buildClient();
directClient = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient());
container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
}
@AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
assertThat(this.gatewayClient).isNotNull();
this.gatewayClient.close();
if (this.directClient != null) {
this.directClient.close();
}
}
@Test(groups = {"simple"})
public void gatewayDiagnostics() {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode);
String diagnostics = createResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\"");
assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null"));
assertThat(diagnostics).contains("\"operationType\":\"Create\"");
assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\"");
assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(createResponse.getDiagnostics().getDuration()).isNotNull();
validateTransportRequestTimelineGateway(diagnostics);
validateJson(diagnostics);
}
@Test(groups = {"simple"})
public void gatewayDiagnosticsOnException() {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = null;
CosmosClient client = null;
try {
client = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.gatewayMode()
.buildClient();
CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
createResponse = container.createItem(internalObjectNode);
CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions();
ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey"));
CosmosItemResponse<InternalObjectNode> readResponse =
container.readItem(BridgeInternal.getProperties(createResponse).getId(),
new PartitionKey("wrongPartitionKey"),
InternalObjectNode.class);
fail("request should fail as partition key is wrong");
} catch (CosmosException exception) {
String diagnostics = exception.getDiagnostics().toString();
assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\"");
assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null"));
assertThat(diagnostics).contains("\"statusCode\":404");
assertThat(diagnostics).contains("\"operationType\":\"Read\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(exception.getDiagnostics().getDuration()).isNotNull();
validateTransportRequestTimelineGateway(diagnostics);
validateJson(diagnostics);
} finally {
if (client != null) {
client.close();
}
}
}
@Test(groups = {"simple"})
public void systemDiagnosticsForSystemStateInformation() {
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = this.container.createItem(internalObjectNode);
String diagnostics = createResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("systemInformation");
assertThat(diagnostics).contains("usedMemory");
assertThat(diagnostics).contains("availableMemory");
assertThat(diagnostics).contains("processCpuLoad");
assertThat(diagnostics).contains("systemCpuLoad");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(createResponse.getDiagnostics().getDuration()).isNotNull();
}
@Test(groups = {"simple"})
public void directDiagnostics() {
CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode);
String diagnostics = createResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\"");
assertThat(diagnostics).contains("supplementalResponseStatisticsList");
assertThat(diagnostics).contains("\"gatewayStatistics\":null");
assertThat(diagnostics).contains("addressResolutionStatistics");
assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\"");
assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\"");
assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\"");
assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
assertThat(createResponse.getDiagnostics().getDuration()).isNotNull();
validateTransportRequestTimelineDirect(diagnostics);
validateJson(diagnostics);
}
@Test(groups = {"simple"})
public void directDiagnosticsOnException() {
CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
InternalObjectNode internalObjectNode = getInternalObjectNode();
CosmosItemResponse<InternalObjectNode> createResponse = null;
CosmosClient client = null;
try {
client = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.directMode()
.buildClient();
CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId());
createResponse = container.createItem(internalObjectNode);
CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions();
ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey"));
CosmosItemResponse<InternalObjectNode> readResponse =
cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(),
new PartitionKey("wrongPartitionKey"),
InternalObjectNode.class);
fail("request should fail as partition key is wrong");
} catch (CosmosException exception) {
String diagnostics = exception.getDiagnostics().toString();
assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND);
assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\"");
assertThat(exception.getDiagnostics().getDuration()).isNotNull();
validateJson(diagnostics);
} finally {
if (client != null) {
client.close();
}
}
}
@Test(groups = {"simple"})
public void supplementalResponseStatisticsList() throws Exception {
ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics();
for (int i = 0; i < 15; i++) {
RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(OperationType.Head, ResourceType.Document);
clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null);
}
List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics);
ObjectMapper objectMapper = new ObjectMapper();
String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics);
JsonNode jsonNode = objectMapper.readTree(diagnostics);
ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList");
assertThat(storeResponseStatistics.size()).isEqualTo(15);
assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10);
clearStoreResponseStatistics(clientSideRequestStatistics);
storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics);
assertThat(storeResponseStatistics.size()).isEqualTo(0);
for (int i = 0; i < 7; i++) {
RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(OperationType.Head, ResourceType.Document);
clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null);
}
storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics);
objectMapper = new ObjectMapper();
diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics);
jsonNode = objectMapper.readTree(diagnostics);
supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList");
assertThat(storeResponseStatistics.size()).isEqualTo(7);
assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7);
}
@Test(groups = {"simple"})
public void serializationOnVariousScenarios() {
CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read();
String diagnostics = cosmosDatabase.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\"");
CosmosContainerResponse containerResponse = this.container.read();
diagnostics = containerResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\"");
TestItem testItem = new TestItem();
testItem.id = "TestId";
testItem.mypk = "TestPk";
CosmosItemResponse<TestItem> itemResponse = this.container.createItem(testItem);
diagnostics = itemResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\"");
testItem.id = "TestId2";
testItem.mypk = "TestPk";
itemResponse = this.container.createItem(testItem, new PartitionKey("TestPk"), null);
diagnostics = itemResponse.getDiagnostics().toString();
assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\"");
assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\"");
TestItem readTestItem = itemResponse.getItem();
diagnostics = itemResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\"");
CosmosItemResponse<InternalObjectNode> readItemResponse = this.container.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class);
InternalObjectNode properties = readItemResponse.getItem();
diagnostics = readItemResponse.getDiagnostics().toString();
assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\"");
assertThat(diagnostics).contains("\"userAgent\":\"" + Utils.getUserAgent() + "\"");
}
private InternalObjectNode getInternalObjectNode() {
InternalObjectNode internalObjectNode = new InternalObjectNode();
internalObjectNode.setId(UUID.randomUUID().toString());
BridgeInternal.setProperty(internalObjectNode, "mypk", "test");
return internalObjectNode;
}
private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception {
Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList");
storeResponseStatisticsField.setAccessible(true);
@SuppressWarnings({"unchecked"})
List<ClientSideRequestStatistics.StoreResponseStatistics> list
= (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics);
return list;
}
private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception {
Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList");
storeResponseStatisticsField.setAccessible(true);
storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>());
}
private void validateTransportRequestTimelineGateway(String diagnostics) {
assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\"");
assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\"");
assertThat(diagnostics).contains("\"eventName\":\"requestSent\"");
assertThat(diagnostics).contains("\"eventName\":\"transitTime\"");
assertThat(diagnostics).contains("\"eventName\":\"received\"");
}
private void validateTransportRequestTimelineDirect(String diagnostics) {
assertThat(diagnostics).contains("\"eventName\":\"created\"");
assertThat(diagnostics).contains("\"eventName\":\"queued\"");
assertThat(diagnostics).contains("\"eventName\":\"pipelined\"");
assertThat(diagnostics).contains("\"eventName\":\"transitTime\"");
assertThat(diagnostics).contains("\"eventName\":\"received\"");
assertThat(diagnostics).contains("\"eventName\":\"completed\"");
}
public static class TestItem {
public String id;
public String mypk;
public TestItem() {
}
}
} |
Isn't it more suitable to represent these strings Eg:- "Receiver" constants? | private void annotateLeafPsiElementNodes(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
IElementType elementType = ((LeafPsiElement) element).getElementType();
if (elementType == BallerinaTypes.AT && element.getParent() instanceof AnnotationAttachmentNode) {
Annotation annotation = holder.createInfoAnnotation(element, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.ANNOTATION);
} else if (elementType == BallerinaTypes.QUOTED_STRING) {
String text = element.getText();
Matcher matcher = VALID_ESCAPE_CHAR_PATTERN.matcher(text);
int startOffset = ((LeafPsiElement) element).getStartOffset();
while (matcher.find()) {
String group = matcher.group(0);
TextRange range = new TextRange(startOffset + matcher.start(),
startOffset + matcher.start() + group.length());
Annotation annotation = holder.createInfoAnnotation(range, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.VALID_STRING_ESCAPE);
}
matcher = INVALID_ESCAPE_CHAR_PATTERN.matcher(text);
startOffset = ((LeafPsiElement) element).getStartOffset();
while (matcher.find()) {
String group = matcher.group(3);
if (group != null) {
TextRange range = new TextRange(startOffset + matcher.start(3),
startOffset + matcher.start(3) + group.length());
Annotation annotation = holder.createInfoAnnotation(range, "Invalid string escape");
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.INVALID_STRING_ESCAPE);
}
}
AnnotationAttributeNode annotationAttributeNode = PsiTreeUtil.getParentOfType(element,
AnnotationAttributeNode.class);
boolean canHighlightParameters = canHighlightParameters(element);
if (canHighlightParameters && annotationAttributeNode != null) {
matcher = PATH_PARAMETERS_PATTERN.matcher(text);
startOffset = ((LeafPsiElement) element).getStartOffset();
while (matcher.find()) {
String value = matcher.group(2);
if (value == null) {
continue;
}
TextRange range = new TextRange(startOffset + matcher.start(1),
startOffset + matcher.start(1) + value.length() + 2);
boolean isMatchAvailable = isMatchingParamAvailable(annotationAttributeNode, value);
if (isMatchAvailable) {
Annotation annotation = holder.createInfoAnnotation(range,
"Path parameter '" + value + "'");
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.TEMPLATE_LANGUAGE_COLOR);
} else {
Annotation annotation = holder.createErrorAnnotation(range,
"Path parameter '" + value + "' not found in the resource signature");
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.INVALID_STRING_ESCAPE);
}
}
}
} else if (elementType == BallerinaTypes.STRING_TEMPLATE_LITERAL_START
|| elementType == BallerinaTypes.XML_START) {
annotateKeyword(element, holder);
} else if (elementType == BallerinaTypes.DOCUMENTATION_TEMPLATE_START ||
elementType == BallerinaTypes.DEPRECATED_TEMPLATE_START) {
annotateKeyword(element, holder, BallerinaSyntaxHighlightingColors.KEYWORD);
} else if (elementType == BallerinaTypes.STRING_TEMPLATE_EXPRESSION_START
|| elementType == BallerinaTypes.XML_EXPRESSION_START) {
annotateExpressionTemplateStart(element, holder);
} else if (elementType == BallerinaTypes.STRING_TEMPLATE_TEXT || elementType == BallerinaTypes.XML_TEXT) {
annotateText(element, holder);
} else if (elementType == BallerinaTypes.EXPRESSION_END) {
annotateStringLiteralTemplateEnd(element, holder);
} else if (elementType == BallerinaTypes.DOCUMENTATION_TEMPLATE_ATTRIBUTE_START) {
String msg = null;
switch (element.getText().charAt(0)) {
case 'T':
msg = "Receiver";
break;
case 'P':
msg = "Parameter";
break;
case 'R':
msg = "Return Value";
break;
case 'F':
msg = "Field";
break;
case 'V':
msg = "Variable";
break;
}
TextRange textRange = element.getTextRange();
TextRange newTextRange = new TextRange(textRange.getStartOffset(), textRange.getEndOffset() - 2);
Annotation annotation = holder.createInfoAnnotation(newTextRange, msg);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.DOCUMENTATION_INLINE_CODE);
} else if (element instanceof IdentifierPSINode) {
if (element.getParent() instanceof DocumentationTemplateAttributeDescriptionNode) {
Annotation annotation = holder.createInfoAnnotation(element, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.DOCUMENTATION_INLINE_CODE);
}
PsiReference reference = element.getReference();
if (reference == null || reference instanceof RecordKeyReference) {
return;
}
PsiElement resolvedElement = reference.resolve();
if (resolvedElement == null) {
return;
}
PsiElement parent = resolvedElement.getParent();
if (parent instanceof ConstantDefinitionNode) {
Annotation annotation = holder.createInfoAnnotation(element, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.CONSTANT);
} else if (parent instanceof GlobalVariableDefinitionNode) {
Annotation annotation = holder.createInfoAnnotation(element, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.GLOBAL_VARIABLE);
}
}
} | msg = "Receiver"; | private void annotateLeafPsiElementNodes(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
IElementType elementType = ((LeafPsiElement) element).getElementType();
if (elementType == BallerinaTypes.AT && element.getParent() instanceof AnnotationAttachmentNode) {
Annotation annotation = holder.createInfoAnnotation(element, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.ANNOTATION);
} else if (elementType == BallerinaTypes.QUOTED_STRING) {
String text = element.getText();
Matcher matcher = VALID_ESCAPE_CHAR_PATTERN.matcher(text);
int startOffset = ((LeafPsiElement) element).getStartOffset();
while (matcher.find()) {
String group = matcher.group(0);
TextRange range = new TextRange(startOffset + matcher.start(),
startOffset + matcher.start() + group.length());
Annotation annotation = holder.createInfoAnnotation(range, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.VALID_STRING_ESCAPE);
}
matcher = INVALID_ESCAPE_CHAR_PATTERN.matcher(text);
startOffset = ((LeafPsiElement) element).getStartOffset();
while (matcher.find()) {
String group = matcher.group(3);
if (group != null) {
TextRange range = new TextRange(startOffset + matcher.start(3),
startOffset + matcher.start(3) + group.length());
Annotation annotation = holder.createInfoAnnotation(range, "Invalid string escape");
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.INVALID_STRING_ESCAPE);
}
}
AnnotationAttributeNode annotationAttributeNode = PsiTreeUtil.getParentOfType(element,
AnnotationAttributeNode.class);
boolean canHighlightParameters = canHighlightParameters(element);
if (canHighlightParameters && annotationAttributeNode != null) {
matcher = PATH_PARAMETERS_PATTERN.matcher(text);
startOffset = ((LeafPsiElement) element).getStartOffset();
while (matcher.find()) {
String value = matcher.group(2);
if (value == null) {
continue;
}
TextRange range = new TextRange(startOffset + matcher.start(1),
startOffset + matcher.start(1) + value.length() + 2);
boolean isMatchAvailable = isMatchingParamAvailable(annotationAttributeNode, value);
if (isMatchAvailable) {
Annotation annotation = holder.createInfoAnnotation(range,
"Path parameter '" + value + "'");
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.TEMPLATE_LANGUAGE_COLOR);
} else {
Annotation annotation = holder.createErrorAnnotation(range,
"Path parameter '" + value + "' not found in the resource signature");
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.INVALID_STRING_ESCAPE);
}
}
}
} else if (elementType == BallerinaTypes.STRING_TEMPLATE_LITERAL_START
|| elementType == BallerinaTypes.XML_START) {
annotateKeyword(element, holder);
} else if (elementType == BallerinaTypes.DOCUMENTATION_TEMPLATE_START ||
elementType == BallerinaTypes.DEPRECATED_TEMPLATE_START) {
annotateKeyword(element, holder, BallerinaSyntaxHighlightingColors.KEYWORD);
} else if (elementType == BallerinaTypes.STRING_TEMPLATE_EXPRESSION_START
|| elementType == BallerinaTypes.XML_EXPRESSION_START) {
annotateExpressionTemplateStart(element, holder);
} else if (elementType == BallerinaTypes.STRING_TEMPLATE_TEXT || elementType == BallerinaTypes.XML_TEXT) {
annotateText(element, holder);
} else if (elementType == BallerinaTypes.EXPRESSION_END) {
annotateStringLiteralTemplateEnd(element, holder);
} else if (elementType == BallerinaTypes.DOCUMENTATION_TEMPLATE_ATTRIBUTE_START) {
String msg = null;
switch (element.getText().charAt(0)) {
case 'T':
msg = "Receiver";
break;
case 'P':
msg = "Parameter";
break;
case 'R':
msg = "Return Value";
break;
case 'F':
msg = "Field";
break;
case 'V':
msg = "Variable";
break;
}
TextRange textRange = element.getTextRange();
TextRange newTextRange = new TextRange(textRange.getStartOffset(), textRange.getEndOffset() - 2);
Annotation annotation = holder.createInfoAnnotation(newTextRange, msg);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.DOCUMENTATION_INLINE_CODE);
} else if (element instanceof IdentifierPSINode) {
if (element.getParent() instanceof DocumentationTemplateAttributeDescriptionNode) {
Annotation annotation = holder.createInfoAnnotation(element, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.DOCUMENTATION_INLINE_CODE);
}
PsiReference reference = element.getReference();
if (reference == null || reference instanceof RecordKeyReference) {
return;
}
PsiElement resolvedElement = reference.resolve();
if (resolvedElement == null) {
return;
}
PsiElement parent = resolvedElement.getParent();
if (parent instanceof ConstantDefinitionNode) {
Annotation annotation = holder.createInfoAnnotation(element, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.CONSTANT);
} else if (parent instanceof GlobalVariableDefinitionNode) {
Annotation annotation = holder.createInfoAnnotation(element, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.GLOBAL_VARIABLE);
}
}
} | class BallerinaAnnotator implements Annotator {
private static final String VALID_ESCAPE_CHARACTERS = "\\\\[btnfr\"'\\\\]|\\\\u[0-f]{4}|\\\\[0-3][0-7]{2}" +
"|\\\\[0-7]{1,2}";
private static final Pattern VALID_ESCAPE_CHAR_PATTERN = Pattern.compile(VALID_ESCAPE_CHARACTERS);
private static final String INVALID_ESCAPE_CHARACTERS = "((\\\\\\\\)+|(\\\\([^btnfru\"'\\\\0-7]" +
"|(u[0-f]{0,3}[^0-f]))))|(\\\\(?!.))";
private static final Pattern INVALID_ESCAPE_CHAR_PATTERN = Pattern.compile(INVALID_ESCAPE_CHARACTERS);
private static final String PATH_PARAMETERS = "(?<!=)(\\{(\\w+?)})";
private static final Pattern PATH_PARAMETERS_PATTERN = Pattern.compile(PATH_PARAMETERS);
@Override
public void annotate(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
PsiElement parent = element.getParent();
if (element instanceof AnnotationReferenceNode) {
annotateNameReferenceNodes(element, holder);
} else if (element instanceof LeafPsiElement) {
annotateLeafPsiElementNodes(element, holder);
} else if (element instanceof ConstantDefinitionNode) {
annotateConstants(element, holder);
} else if (parent instanceof ConstantDefinitionNode) {
annotateConstants(parent, holder);
} else if (element instanceof VariableReferenceNode) {
annotateVariableReferenceNodes((VariableReferenceNode) element, holder);
} else if (element instanceof AnnotationDefinitionNode) {
annotateAnnotationDefinitionNodes((AnnotationDefinitionNode) element, holder);
} else if (element instanceof ImportDeclarationNode) {
annotateImportDeclarations(element, holder);
} else if (element instanceof PackageNameNode) {
annotatePackageNameNodes(element, holder);
} else if (element instanceof GlobalVariableDefinitionNode) {
annotateGlobalVariable(element, holder);
} else if (parent instanceof GlobalVariableDefinitionNode) {
annotateGlobalVariable(parent, holder);
} else if (parent instanceof DocumentationAttachmentNode || parent instanceof DeprecatedTextNode) {
annotateDocumentation(parent, holder);
} else if (element instanceof SingleBackTickDocInlineCodeNode || element instanceof DoubleBackTickInlineCodeNode
|| element instanceof TripleBackTickInlineCodeNode
|| element instanceof SingleBackTickDeprecatedInlineCodeNode
|| element instanceof DoubleBackTickDeprecatedInlineCodeNode
|| element instanceof TripleBackTickDeprecatedInlineCodeNode) {
annotateInlineCode(element, holder);
}
}
private void annotateNameReferenceNodes(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
Annotation annotation = holder.createInfoAnnotation(element, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.ANNOTATION);
}
/**
* Checks whether the query,path parameters can be highlighted in the given element.
*
* @param element element which needs to be checked
* @return {@code true} if parameters can be highlighted, {@code false} otherwise.
*/
private boolean canHighlightParameters(@NotNull PsiElement element) {
AnnotationAttributeNode annotationAttributeNode = PsiTreeUtil.getParentOfType(element,
AnnotationAttributeNode.class);
if (annotationAttributeNode == null) {
return false;
}
AnnotationAttachmentNode annotationAttachmentNode = PsiTreeUtil.getParentOfType(annotationAttributeNode,
AnnotationAttachmentNode.class);
if (annotationAttachmentNode == null) {
return false;
}
AnnotationReferenceNode annotationReferenceNode = PsiTreeUtil.getChildOfType(annotationAttachmentNode,
AnnotationReferenceNode.class);
if (annotationReferenceNode == null) {
return false;
}
IdentifierPSINode annotationName = PsiTreeUtil.getChildOfType(annotationReferenceNode, IdentifierPSINode.class);
return annotationName != null && "resourceConfig".equals(annotationName.getText());
}
private boolean isMatchingParamAvailable(@NotNull AnnotationAttributeNode annotationAttributeNode,
@NotNull String value) {
ResourceDefinitionNode resourceDefinitionNode = PsiTreeUtil.getParentOfType(annotationAttributeNode,
ResourceDefinitionNode.class);
if (resourceDefinitionNode == null) {
return false;
}
ParameterListNode parameterListNode = PsiTreeUtil.getChildOfType(resourceDefinitionNode,
ParameterListNode.class);
if (parameterListNode == null) {
return false;
}
ParameterNode[] parameterNodes = PsiTreeUtil.getChildrenOfType(parameterListNode, ParameterNode.class);
if (parameterNodes == null) {
return false;
}
for (ParameterNode parameterNode : parameterNodes) {
AnnotationAttachmentNode annotationAttachmentNode = PsiTreeUtil.getChildOfType(parameterNode,
AnnotationAttachmentNode.class);
if (annotationAttachmentNode == null) {
continue;
}
AnnotationReferenceNode annotationReferenceNode = PsiTreeUtil.getChildOfType(annotationAttachmentNode,
AnnotationReferenceNode.class);
if (annotationReferenceNode == null) {
continue;
}
PsiElement paramType = annotationReferenceNode.getNameIdentifier();
if (paramType == null) {
continue;
}
if (!"PathParam".equals(paramType.getText())) {
continue;
}
Collection<AnnotationAttributeValueNode> annotationAttributeValueNodes =
PsiTreeUtil.findChildrenOfType(annotationAttachmentNode, AnnotationAttributeValueNode.class);
for (AnnotationAttributeValueNode annotationAttributeValueNode : annotationAttributeValueNodes) {
SimpleLiteralNode simpleLiteralNode = PsiTreeUtil.getChildOfType(annotationAttributeValueNode,
SimpleLiteralNode.class);
if (simpleLiteralNode == null || simpleLiteralNode.getFirstChild() == null) {
continue;
}
PsiElement firstChild = simpleLiteralNode.getFirstChild();
if (!(firstChild instanceof LeafPsiElement)) {
continue;
}
if (((LeafPsiElement) firstChild).getElementType() != BallerinaTypes.QUOTED_STRING) {
continue;
}
String text = firstChild.getText();
text = text.substring(1, text.length() - 1);
if (value.equals(text)) {
return true;
}
}
}
for (ParameterNode parameterNode : parameterNodes) {
AnnotationAttachmentNode annotationAttachmentNode = PsiTreeUtil.getChildOfType(parameterNode,
AnnotationAttachmentNode.class);
if (annotationAttachmentNode != null) {
continue;
}
PsiElement nameIdentifier = parameterNode.getNameIdentifier();
if (nameIdentifier == null) {
continue;
}
if (value.equals(nameIdentifier.getText())) {
return true;
}
}
return false;
}
private void annotateConstants(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
ValueTypeNameNode valueTypeNameNode = PsiTreeUtil.findChildOfType(element, ValueTypeNameNode.class);
if (valueTypeNameNode == null || valueTypeNameNode.getText().isEmpty()) {
return;
}
PsiElement nameIdentifier = ((ConstantDefinitionNode) element).getNameIdentifier();
if (nameIdentifier == null) {
return;
}
Annotation annotation = holder.createInfoAnnotation(nameIdentifier, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.CONSTANT);
}
private void annotateGlobalVariable(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
ValueTypeNameNode valueTypeNameNode = PsiTreeUtil.findChildOfType(element, ValueTypeNameNode.class);
if (valueTypeNameNode == null || valueTypeNameNode.getText().isEmpty()) {
return;
}
PsiElement nameIdentifier = ((GlobalVariableDefinitionNode) element).getNameIdentifier();
if (nameIdentifier == null) {
return;
}
Annotation annotation = holder.createInfoAnnotation(nameIdentifier, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.GLOBAL_VARIABLE);
}
private void annotateDocumentation(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
Annotation annotation = holder.createInfoAnnotation(element, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.DOCUMENTATION);
}
private void annotateInlineCode(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
Annotation annotation;
if (element instanceof SingleBackTickDocInlineCodeNode
|| element instanceof SingleBackTickDeprecatedInlineCodeNode) {
TextRange currentTextRange = element.getTextRange();
TextRange newTextRange = new TextRange(currentTextRange.getStartOffset() + 1,
currentTextRange.getEndOffset() - 1);
annotation = holder.createInfoAnnotation(newTextRange, null);
} else {
annotation = holder.createInfoAnnotation(element, null);
}
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.DOCUMENTATION_INLINE_CODE);
}
private void annotateVariableReferenceNodes(@NotNull VariableReferenceNode element,
@NotNull AnnotationHolder holder) {
PsiElement nameIdentifier = element.getNameIdentifier();
if (nameIdentifier == null) {
annotateArrayLengthField(element, holder);
return;
}
PsiReference[] references = nameIdentifier.getReferences();
for (PsiReference reference : references) {
PsiElement resolvedElement = reference.resolve();
if (resolvedElement == null) {
return;
}
PsiElement parent = resolvedElement.getParent();
if (parent instanceof ConstantDefinitionNode) {
Annotation annotation = holder.createInfoAnnotation(nameIdentifier, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.CONSTANT);
}
if (parent instanceof GlobalVariableDefinitionNode) {
Annotation annotation = holder.createInfoAnnotation(nameIdentifier, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.GLOBAL_VARIABLE);
}
}
}
private void annotateArrayLengthField(@NotNull VariableReferenceNode element, @NotNull AnnotationHolder holder) {
PsiElement lastChild = element.getLastChild();
if (lastChild == null) {
return;
}
String text = lastChild.getText();
if (!".length".equals(text)) {
return;
}
PsiElement firstChild = element.getFirstChild();
if (firstChild == null) {
return;
}
PsiFile containingFile = element.getContainingFile();
if (containingFile == null) {
return;
}
PsiReference reference = containingFile.findReferenceAt(firstChild.getTextOffset());
if (reference == null) {
return;
}
PsiElement resolvedElement = reference.resolve();
if (resolvedElement == null) {
return;
}
PsiElement parent = resolvedElement.getParent();
if (!(parent instanceof VariableDefinitionNode || parent instanceof ParameterNode
|| parent instanceof GlobalVariableDefinitionNode)) {
return;
}
boolean isArrayDefinition = BallerinaPsiImplUtil.isArrayDefinition(parent);
if (isArrayDefinition) {
Annotation annotation = holder.createInfoAnnotation(lastChild, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.STATIC_FIELD);
}
}
private void annotateAnnotationDefinitionNodes(@NotNull AnnotationDefinitionNode element,
@NotNull AnnotationHolder holder) {
PsiElement nameIdentifier = element.getNameIdentifier();
if (nameIdentifier == null) {
return;
}
Annotation annotation = holder.createInfoAnnotation(nameIdentifier, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.ANNOTATION);
}
private void annotateImportDeclarations(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
AliasNode aliasNode = PsiTreeUtil.findChildOfType(element, AliasNode.class);
if (aliasNode != null) {
Annotation annotation = holder.createInfoAnnotation(aliasNode.getTextRange(), null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.PACKAGE);
} else {
Collection<PackageNameNode> packageNameNodes = PsiTreeUtil.findChildrenOfType(element,
PackageNameNode.class);
if (!packageNameNodes.isEmpty()) {
PackageNameNode lastPackageName =
(PackageNameNode) packageNameNodes.toArray()[packageNameNodes.size() - 1];
Annotation annotation = holder.createInfoAnnotation(lastPackageName.getTextRange(), null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.PACKAGE);
}
}
}
private void annotatePackageNameNodes(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
ImportDeclarationNode importDeclarationNode = PsiTreeUtil.getParentOfType(element, ImportDeclarationNode.class);
if (importDeclarationNode != null) {
return;
}
PackageDeclarationNode packageDeclarationNode = PsiTreeUtil.getParentOfType(element,
PackageDeclarationNode.class);
if (packageDeclarationNode != null) {
return;
}
AnnotationAttachmentNode annotationAttachmentNode = PsiTreeUtil.getParentOfType(element,
AnnotationAttachmentNode.class);
if (annotationAttachmentNode != null) {
return;
}
XmlAttribNode xmlAttribNode = PsiTreeUtil.getParentOfType(element, XmlAttribNode.class);
if (xmlAttribNode != null) {
return;
}
Annotation annotation = holder.createInfoAnnotation(element.getTextRange(), null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.PACKAGE);
}
private void annotateKeyword(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
TextRange textRange = element.getTextRange();
TextRange newTextRange = new TextRange(textRange.getStartOffset(), textRange.getEndOffset() - 1);
Annotation annotation = holder.createInfoAnnotation(newTextRange, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.KEYWORD);
}
private void annotateKeyword(@NotNull PsiElement element, @NotNull AnnotationHolder holder,
@NotNull TextAttributesKey textAttributesKey) {
TextRange textRange = element.getTextRange();
TextRange newTextRange = new TextRange(textRange.getStartOffset(), textRange.getEndOffset() - 1);
Annotation annotation = holder.createInfoAnnotation(newTextRange, null);
annotation.setTextAttributes(textAttributesKey);
}
private void annotateExpressionTemplateStart(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
TextRange textRange = element.getTextRange();
TextRange newTextRange = new TextRange(textRange.getEndOffset() - 2, textRange.getEndOffset());
Annotation annotation = holder.createInfoAnnotation(newTextRange, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.TEMPLATE_LANGUAGE_COLOR);
if (textRange.getEndOffset() - 2 > textRange.getStartOffset()) {
newTextRange = new TextRange(textRange.getStartOffset(), textRange.getEndOffset() - 2);
annotation = holder.createInfoAnnotation(newTextRange, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.STRING);
}
}
private void annotateStringLiteralTemplateEnd(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
Annotation annotation = holder.createInfoAnnotation(element, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.TEMPLATE_LANGUAGE_COLOR);
}
private void annotateText(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
Annotation annotation = holder.createInfoAnnotation(element, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.STRING);
}
} | class BallerinaAnnotator implements Annotator {
private static final String VALID_ESCAPE_CHARACTERS = "\\\\[btnfr\"'\\\\]|\\\\u[0-f]{4}|\\\\[0-3][0-7]{2}" +
"|\\\\[0-7]{1,2}";
private static final Pattern VALID_ESCAPE_CHAR_PATTERN = Pattern.compile(VALID_ESCAPE_CHARACTERS);
private static final String INVALID_ESCAPE_CHARACTERS = "((\\\\\\\\)+|(\\\\([^btnfru\"'\\\\0-7]" +
"|(u[0-f]{0,3}[^0-f]))))|(\\\\(?!.))";
private static final Pattern INVALID_ESCAPE_CHAR_PATTERN = Pattern.compile(INVALID_ESCAPE_CHARACTERS);
private static final String PATH_PARAMETERS = "(?<!=)(\\{(\\w+?)})";
private static final Pattern PATH_PARAMETERS_PATTERN = Pattern.compile(PATH_PARAMETERS);
@Override
public void annotate(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
PsiElement parent = element.getParent();
if (element instanceof AnnotationReferenceNode) {
annotateNameReferenceNodes(element, holder);
} else if (element instanceof LeafPsiElement) {
annotateLeafPsiElementNodes(element, holder);
} else if (element instanceof ConstantDefinitionNode) {
annotateConstants(element, holder);
} else if (parent instanceof ConstantDefinitionNode) {
annotateConstants(parent, holder);
} else if (element instanceof VariableReferenceNode) {
annotateVariableReferenceNodes((VariableReferenceNode) element, holder);
} else if (element instanceof AnnotationDefinitionNode) {
annotateAnnotationDefinitionNodes((AnnotationDefinitionNode) element, holder);
} else if (element instanceof ImportDeclarationNode) {
annotateImportDeclarations(element, holder);
} else if (element instanceof PackageNameNode) {
annotatePackageNameNodes(element, holder);
} else if (element instanceof GlobalVariableDefinitionNode) {
annotateGlobalVariable(element, holder);
} else if (parent instanceof GlobalVariableDefinitionNode) {
annotateGlobalVariable(parent, holder);
} else if (parent instanceof DocumentationAttachmentNode || parent instanceof DeprecatedTextNode) {
annotateDocumentation(parent, holder);
} else if (element instanceof SingleBackTickDocInlineCodeNode || element instanceof DoubleBackTickInlineCodeNode
|| element instanceof TripleBackTickInlineCodeNode
|| element instanceof SingleBackTickDeprecatedInlineCodeNode
|| element instanceof DoubleBackTickDeprecatedInlineCodeNode
|| element instanceof TripleBackTickDeprecatedInlineCodeNode) {
annotateInlineCode(element, holder);
}
}
private void annotateNameReferenceNodes(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
Annotation annotation = holder.createInfoAnnotation(element, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.ANNOTATION);
}
/**
* Checks whether the query,path parameters can be highlighted in the given element.
*
* @param element element which needs to be checked
* @return {@code true} if parameters can be highlighted, {@code false} otherwise.
*/
private boolean canHighlightParameters(@NotNull PsiElement element) {
AnnotationAttributeNode annotationAttributeNode = PsiTreeUtil.getParentOfType(element,
AnnotationAttributeNode.class);
if (annotationAttributeNode == null) {
return false;
}
AnnotationAttachmentNode annotationAttachmentNode = PsiTreeUtil.getParentOfType(annotationAttributeNode,
AnnotationAttachmentNode.class);
if (annotationAttachmentNode == null) {
return false;
}
AnnotationReferenceNode annotationReferenceNode = PsiTreeUtil.getChildOfType(annotationAttachmentNode,
AnnotationReferenceNode.class);
if (annotationReferenceNode == null) {
return false;
}
IdentifierPSINode annotationName = PsiTreeUtil.getChildOfType(annotationReferenceNode, IdentifierPSINode.class);
return annotationName != null && "resourceConfig".equals(annotationName.getText());
}
private boolean isMatchingParamAvailable(@NotNull AnnotationAttributeNode annotationAttributeNode,
@NotNull String value) {
ResourceDefinitionNode resourceDefinitionNode = PsiTreeUtil.getParentOfType(annotationAttributeNode,
ResourceDefinitionNode.class);
if (resourceDefinitionNode == null) {
return false;
}
ParameterListNode parameterListNode = PsiTreeUtil.getChildOfType(resourceDefinitionNode,
ParameterListNode.class);
if (parameterListNode == null) {
return false;
}
ParameterNode[] parameterNodes = PsiTreeUtil.getChildrenOfType(parameterListNode, ParameterNode.class);
if (parameterNodes == null) {
return false;
}
for (ParameterNode parameterNode : parameterNodes) {
AnnotationAttachmentNode annotationAttachmentNode = PsiTreeUtil.getChildOfType(parameterNode,
AnnotationAttachmentNode.class);
if (annotationAttachmentNode == null) {
continue;
}
AnnotationReferenceNode annotationReferenceNode = PsiTreeUtil.getChildOfType(annotationAttachmentNode,
AnnotationReferenceNode.class);
if (annotationReferenceNode == null) {
continue;
}
PsiElement paramType = annotationReferenceNode.getNameIdentifier();
if (paramType == null) {
continue;
}
if (!"PathParam".equals(paramType.getText())) {
continue;
}
Collection<AnnotationAttributeValueNode> annotationAttributeValueNodes =
PsiTreeUtil.findChildrenOfType(annotationAttachmentNode, AnnotationAttributeValueNode.class);
for (AnnotationAttributeValueNode annotationAttributeValueNode : annotationAttributeValueNodes) {
SimpleLiteralNode simpleLiteralNode = PsiTreeUtil.getChildOfType(annotationAttributeValueNode,
SimpleLiteralNode.class);
if (simpleLiteralNode == null || simpleLiteralNode.getFirstChild() == null) {
continue;
}
PsiElement firstChild = simpleLiteralNode.getFirstChild();
if (!(firstChild instanceof LeafPsiElement)) {
continue;
}
if (((LeafPsiElement) firstChild).getElementType() != BallerinaTypes.QUOTED_STRING) {
continue;
}
String text = firstChild.getText();
text = text.substring(1, text.length() - 1);
if (value.equals(text)) {
return true;
}
}
}
for (ParameterNode parameterNode : parameterNodes) {
AnnotationAttachmentNode annotationAttachmentNode = PsiTreeUtil.getChildOfType(parameterNode,
AnnotationAttachmentNode.class);
if (annotationAttachmentNode != null) {
continue;
}
PsiElement nameIdentifier = parameterNode.getNameIdentifier();
if (nameIdentifier == null) {
continue;
}
if (value.equals(nameIdentifier.getText())) {
return true;
}
}
return false;
}
private void annotateConstants(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
ValueTypeNameNode valueTypeNameNode = PsiTreeUtil.findChildOfType(element, ValueTypeNameNode.class);
if (valueTypeNameNode == null || valueTypeNameNode.getText().isEmpty()) {
return;
}
PsiElement nameIdentifier = ((ConstantDefinitionNode) element).getNameIdentifier();
if (nameIdentifier == null) {
return;
}
Annotation annotation = holder.createInfoAnnotation(nameIdentifier, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.CONSTANT);
}
private void annotateGlobalVariable(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
ValueTypeNameNode valueTypeNameNode = PsiTreeUtil.findChildOfType(element, ValueTypeNameNode.class);
if (valueTypeNameNode == null || valueTypeNameNode.getText().isEmpty()) {
return;
}
PsiElement nameIdentifier = ((GlobalVariableDefinitionNode) element).getNameIdentifier();
if (nameIdentifier == null) {
return;
}
Annotation annotation = holder.createInfoAnnotation(nameIdentifier, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.GLOBAL_VARIABLE);
}
private void annotateDocumentation(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
Annotation annotation = holder.createInfoAnnotation(element, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.DOCUMENTATION);
}
private void annotateInlineCode(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
Annotation annotation;
if (element instanceof SingleBackTickDocInlineCodeNode
|| element instanceof SingleBackTickDeprecatedInlineCodeNode) {
TextRange currentTextRange = element.getTextRange();
TextRange newTextRange = new TextRange(currentTextRange.getStartOffset() + 1,
currentTextRange.getEndOffset() - 1);
annotation = holder.createInfoAnnotation(newTextRange, null);
} else {
annotation = holder.createInfoAnnotation(element, null);
}
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.DOCUMENTATION_INLINE_CODE);
}
private void annotateVariableReferenceNodes(@NotNull VariableReferenceNode element,
@NotNull AnnotationHolder holder) {
PsiElement nameIdentifier = element.getNameIdentifier();
if (nameIdentifier == null) {
annotateArrayLengthField(element, holder);
return;
}
PsiReference[] references = nameIdentifier.getReferences();
for (PsiReference reference : references) {
PsiElement resolvedElement = reference.resolve();
if (resolvedElement == null) {
return;
}
PsiElement parent = resolvedElement.getParent();
if (parent instanceof ConstantDefinitionNode) {
Annotation annotation = holder.createInfoAnnotation(nameIdentifier, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.CONSTANT);
}
if (parent instanceof GlobalVariableDefinitionNode) {
Annotation annotation = holder.createInfoAnnotation(nameIdentifier, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.GLOBAL_VARIABLE);
}
}
}
private void annotateArrayLengthField(@NotNull VariableReferenceNode element, @NotNull AnnotationHolder holder) {
PsiElement lastChild = element.getLastChild();
if (lastChild == null) {
return;
}
String text = lastChild.getText();
if (!".length".equals(text)) {
return;
}
PsiElement firstChild = element.getFirstChild();
if (firstChild == null) {
return;
}
PsiFile containingFile = element.getContainingFile();
if (containingFile == null) {
return;
}
PsiReference reference = containingFile.findReferenceAt(firstChild.getTextOffset());
if (reference == null) {
return;
}
PsiElement resolvedElement = reference.resolve();
if (resolvedElement == null) {
return;
}
PsiElement parent = resolvedElement.getParent();
if (!(parent instanceof VariableDefinitionNode || parent instanceof ParameterNode
|| parent instanceof GlobalVariableDefinitionNode)) {
return;
}
boolean isArrayDefinition = BallerinaPsiImplUtil.isArrayDefinition(parent);
if (isArrayDefinition) {
Annotation annotation = holder.createInfoAnnotation(lastChild, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.STATIC_FIELD);
}
}
private void annotateAnnotationDefinitionNodes(@NotNull AnnotationDefinitionNode element,
@NotNull AnnotationHolder holder) {
PsiElement nameIdentifier = element.getNameIdentifier();
if (nameIdentifier == null) {
return;
}
Annotation annotation = holder.createInfoAnnotation(nameIdentifier, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.ANNOTATION);
}
private void annotateImportDeclarations(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
AliasNode aliasNode = PsiTreeUtil.findChildOfType(element, AliasNode.class);
if (aliasNode != null) {
Annotation annotation = holder.createInfoAnnotation(aliasNode.getTextRange(), null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.PACKAGE);
} else {
Collection<PackageNameNode> packageNameNodes = PsiTreeUtil.findChildrenOfType(element,
PackageNameNode.class);
if (!packageNameNodes.isEmpty()) {
PackageNameNode lastPackageName =
(PackageNameNode) packageNameNodes.toArray()[packageNameNodes.size() - 1];
Annotation annotation = holder.createInfoAnnotation(lastPackageName.getTextRange(), null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.PACKAGE);
}
}
}
private void annotatePackageNameNodes(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
ImportDeclarationNode importDeclarationNode = PsiTreeUtil.getParentOfType(element, ImportDeclarationNode.class);
if (importDeclarationNode != null) {
return;
}
PackageDeclarationNode packageDeclarationNode = PsiTreeUtil.getParentOfType(element,
PackageDeclarationNode.class);
if (packageDeclarationNode != null) {
return;
}
AnnotationAttachmentNode annotationAttachmentNode = PsiTreeUtil.getParentOfType(element,
AnnotationAttachmentNode.class);
if (annotationAttachmentNode != null) {
return;
}
XmlAttribNode xmlAttribNode = PsiTreeUtil.getParentOfType(element, XmlAttribNode.class);
if (xmlAttribNode != null) {
return;
}
Annotation annotation = holder.createInfoAnnotation(element.getTextRange(), null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.PACKAGE);
}
private void annotateKeyword(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
TextRange textRange = element.getTextRange();
TextRange newTextRange = new TextRange(textRange.getStartOffset(), textRange.getEndOffset() - 1);
Annotation annotation = holder.createInfoAnnotation(newTextRange, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.KEYWORD);
}
private void annotateKeyword(@NotNull PsiElement element, @NotNull AnnotationHolder holder,
@NotNull TextAttributesKey textAttributesKey) {
TextRange textRange = element.getTextRange();
TextRange newTextRange = new TextRange(textRange.getStartOffset(), textRange.getEndOffset() - 1);
Annotation annotation = holder.createInfoAnnotation(newTextRange, null);
annotation.setTextAttributes(textAttributesKey);
}
private void annotateExpressionTemplateStart(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
TextRange textRange = element.getTextRange();
TextRange newTextRange = new TextRange(textRange.getEndOffset() - 2, textRange.getEndOffset());
Annotation annotation = holder.createInfoAnnotation(newTextRange, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.TEMPLATE_LANGUAGE_COLOR);
if (textRange.getEndOffset() - 2 > textRange.getStartOffset()) {
newTextRange = new TextRange(textRange.getStartOffset(), textRange.getEndOffset() - 2);
annotation = holder.createInfoAnnotation(newTextRange, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.STRING);
}
}
private void annotateStringLiteralTemplateEnd(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
Annotation annotation = holder.createInfoAnnotation(element, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.TEMPLATE_LANGUAGE_COLOR);
}
private void annotateText(@NotNull PsiElement element, @NotNull AnnotationHolder holder) {
Annotation annotation = holder.createInfoAnnotation(element, null);
annotation.setTextAttributes(BallerinaSyntaxHighlightingColors.STRING);
}
} |
Why the model name has Schema, can we call it PackageSearchResult etc | private static void searchInCentral(String query) {
try {
CentralAPIClient client = new CentralAPIClient();
PackageSearchJsonSchema packageSearchJsonSchema = client.searchPackage(query);
if (packageSearchJsonSchema.getCount() > 0) {
printPackages(packageSearchJsonSchema.getPackages(), RepoUtils.getTerminalWidth());
} else {
outStream.println("no modules found");
}
} catch (CommandException e) {
String errorMessage = e.getMessage();
if (null != errorMessage && !"".equals(errorMessage.trim())) {
if (errorMessage.contains("\n\tat")) {
errorMessage = errorMessage.substring(0, errorMessage.indexOf("\n\tat"));
}
outStream.println(errorMessage);
}
}
} | PackageSearchJsonSchema packageSearchJsonSchema = client.searchPackage(query); | private static void searchInCentral(String query) {
try {
CentralAPIClient client = new CentralAPIClient();
PackageSearchResult packageSearchResult = client.searchPackage(query);
if (packageSearchResult.getCount() > 0) {
printPackages(packageSearchResult.getPackages(), RepoUtils.getTerminalWidth());
} else {
outStream.println("no modules found");
}
} catch (CentralClientException e) {
String errorMessage = e.getMessage();
if (null != errorMessage && !"".equals(errorMessage.trim())) {
if (errorMessage.contains("\n\tat")) {
errorMessage = errorMessage.substring(0, errorMessage.indexOf("\n\tat"));
}
outStream.println(errorMessage);
}
}
} | class SearchCommand implements BLauncherCmd {
private static PrintStream outStream = System.err;
@CommandLine.Parameters
private List<String> argList;
@CommandLine.Option(names = {"--help", "-h"}, hidden = true)
private boolean helpFlag;
@CommandLine.Option(names = "--debug", hidden = true)
private String debugPort;
@Override
public void execute() {
if (helpFlag) {
String commandUsageInfo = BLauncherCmd.getCommandUsageInfo(SEARCH_COMMAND);
outStream.println(commandUsageInfo);
return;
}
if (null != debugPort) {
System.setProperty(SYSTEM_PROP_BAL_DEBUG, debugPort);
}
if (argList == null || argList.isEmpty()) {
throw createUsageExceptionWithHelp("no keyword given");
}
if (argList.size() > 1) {
throw createUsageExceptionWithHelp("too many arguments");
}
String searchArgs = argList.get(0);
searchInCentral(searchArgs);
Runtime.getRuntime().exit(0);
}
@Override
public String getName() {
return SEARCH_COMMAND;
}
@Override
public void printLongDesc(StringBuilder out) {
out.append("searches for packages within Ballerina Central \n");
}
@Override
public void printUsage(StringBuilder out) {
out.append(" ballerina search [<org>|<package>|<text>] \n");
}
@Override
public void setParentCmdParser(CommandLine parentCmdParser) {
throw new UnsupportedOperationException();
}
/**
* Search for packages in central.
*
* @param query search keyword.
*/
} | class SearchCommand implements BLauncherCmd {
private static PrintStream outStream = System.err;
@CommandLine.Parameters
private List<String> argList;
@CommandLine.Option(names = {"--help", "-h"}, hidden = true)
private boolean helpFlag;
@CommandLine.Option(names = "--debug", hidden = true)
private String debugPort;
@Override
public void execute() {
if (helpFlag) {
String commandUsageInfo = BLauncherCmd.getCommandUsageInfo(SEARCH_COMMAND);
outStream.println(commandUsageInfo);
return;
}
if (null != debugPort) {
System.setProperty(SYSTEM_PROP_BAL_DEBUG, debugPort);
}
if (argList == null || argList.isEmpty()) {
throw createUsageExceptionWithHelp("no keyword given");
}
if (argList.size() > 1) {
throw createUsageExceptionWithHelp("too many arguments");
}
String searchArgs = argList.get(0);
searchInCentral(searchArgs);
Runtime.getRuntime().exit(0);
}
@Override
public String getName() {
return SEARCH_COMMAND;
}
@Override
public void printLongDesc(StringBuilder out) {
out.append("searches for packages within Ballerina Central \n");
}
@Override
public void printUsage(StringBuilder out) {
out.append(" ballerina search [<org>|<package>|<text>] \n");
}
@Override
public void setParentCmdParser(CommandLine parentCmdParser) {
}
/**
* Search for packages in central.
*
* @param query search keyword.
*/
} |
It's removed. `tests/jballerina-unit-test/src/test/resources/test-src/query/group_by_clause_negative.bal` L74:82 | public void testNegativeCases() {
int i = 0;
BAssertUtil.validateError(negativeResult, i++, "incompatible types: expected '(any|error)[]', found 'int'",
23, 37);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element list " +
"constructor or function invocation", 29, 24);
BAssertUtil.validateError(negativeResult, i++, "incompatible types: expected 'int', found 'seq int'", 32, 28);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element list " +
"constructor or function invocation", 32, 28);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element list " +
"constructor or function invocation", 35, 25);
BAssertUtil.validateError(negativeResult, i++, "incompatible types: expected 'seq int', found 'seq int'",
36, 20);
BAssertUtil.validateError(negativeResult, i++, "operator '+' not defined for 'seq int' and 'int'", 39, 20);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element list " +
"constructor or function invocation", 39, 20);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element list " +
"constructor or function invocation", 42, 25);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element list " +
"constructor or function invocation", 42, 33);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element list " +
"constructor or function invocation", 45, 26);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 52, 39);
BAssertUtil.validateError(negativeResult, i++, "incompatible types: expected '[int,int...]', " +
"found '[int...]'", 60, 39);
BAssertUtil.validateError(negativeResult, i++, "incompatible types: expected '[int[]]', found '[int...]'",
65, 33);
BAssertUtil.validateError(negativeResult, i++, "incompatible types: expected '[int,int]', found '[int...]'",
70, 36);
BAssertUtil.validateError(negativeResult, i++, "invalid operation: type " +
"'seq record {| string name; int price1; |}' does not support field access", 87, 29);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 87, 29);
BAssertUtil.validateError(negativeResult, i++, "incompatible types: expected 'int[]', found 'seq int?'",
108, 32);
BAssertUtil.validateError(negativeResult, i++, "operator '+' not defined for 'seq int' and 'int'", 116, 40);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element" +
" list constructor or function invocation", 116, 40);
BAssertUtil.validateError(negativeResult, i++, "operator '+' not defined for 'seq int' and 'int'", 120, 33);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element" +
" list constructor or function invocation", 120, 33);
BAssertUtil.validateError(negativeResult, i++, "operator '+' not defined for 'seq int' and 'seq int'",
133, 36);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 133, 36);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 133, 45);
BAssertUtil.validateError(negativeResult, i++, "operator '+' not defined for 'seq int' and 'seq int'",
136, 28);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 136, 28);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 136, 37);
BAssertUtil.validateError(negativeResult, i++, "operator '+' not defined for 'seq int' and 'seq int'",
139, 28);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 139, 28);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 139, 37);
BAssertUtil.validateError(negativeResult, i++, "operator '+' not defined for 'seq int' and 'seq int'",
142, 28);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 142, 28);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 142, 37);
BAssertUtil.validateError(negativeResult, i++, "operator '+' not defined for 'seq int' and 'seq int'",
145, 26);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 145, 26);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 145, 35);
BAssertUtil.validateError(negativeResult, i++, "arguments not allowed after seq argument", 148, 36);
BAssertUtil.validateError(negativeResult, i++, "arguments not allowed after rest argument", 151, 39);
BAssertUtil.validateError(negativeResult, i++, "arguments not allowed after seq argument", 154, 37);
BAssertUtil.validateError(negativeResult, i++, "arguments not allowed after seq argument", 154, 40);
BAssertUtil.validateError(negativeResult, i++, "incompatible types: expected '(any|error)[]', found 'int'",
157, 37);
BAssertUtil.validateError(negativeResult, i++, "incompatible types: expected " +
"'([int,int...]|record {| int n; |})', found '[int...]'", 167, 43);
BAssertUtil.validateError(negativeResult, i++, "invalid grouping key type 'error', expected a subtype of " +
"'anydata'", 175, 26);
BAssertUtil.validateError(negativeResult, i++, "invalid grouping key type 'error', expected a subtype of " +
"'anydata'", 178, 26);
BAssertUtil.validateError(negativeResult, i++, "incompatible types: expected 'string', found 'seq string'",
200, 24);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 200, 24);
Assert.assertEquals(negativeResult.getErrorCount(), i);
} | BAssertUtil.validateError(negativeResult, i++, "invalid operation: type " + | public void testNegativeCases() {
int i = 0;
BAssertUtil.validateError(negativeResult, i++, "incompatible types: expected '(any|error)[]', found 'int'",
23, 37);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element list " +
"constructor or function invocation", 29, 24);
BAssertUtil.validateError(negativeResult, i++, "incompatible types: expected 'int', found 'seq int'", 32, 28);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element list " +
"constructor or function invocation", 32, 28);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element list " +
"constructor or function invocation", 35, 25);
BAssertUtil.validateError(negativeResult, i++, "incompatible types: expected 'seq int', found 'seq int'",
36, 20);
BAssertUtil.validateError(negativeResult, i++, "operator '+' not defined for 'seq int' and 'int'", 39, 20);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element list " +
"constructor or function invocation", 39, 20);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element list " +
"constructor or function invocation", 42, 25);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element list " +
"constructor or function invocation", 42, 33);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element list " +
"constructor or function invocation", 45, 26);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 52, 39);
BAssertUtil.validateError(negativeResult, i++, "incompatible types: expected '[int,int...]', " +
"found '[int...]'", 60, 39);
BAssertUtil.validateError(negativeResult, i++, "incompatible types: expected '[int[]]', found '[int...]'",
65, 33);
BAssertUtil.validateError(negativeResult, i++, "incompatible types: expected '[int,int]', found '[int...]'",
70, 36);
BAssertUtil.validateError(negativeResult, i++, "invalid operation: type " +
"'seq record {| string name; int price1; |}' does not support field access", 78, 29);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 78, 29);
BAssertUtil.validateError(negativeResult, i++, "incompatible types: expected 'int[]', found 'seq int?'",
99, 32);
BAssertUtil.validateError(negativeResult, i++, "operator '+' not defined for 'seq int' and 'int'", 107, 40);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element" +
" list constructor or function invocation", 107, 40);
BAssertUtil.validateError(negativeResult, i++, "operator '+' not defined for 'seq int' and 'int'", 111, 33);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element" +
" list constructor or function invocation", 111, 33);
BAssertUtil.validateError(negativeResult, i++, "operator '+' not defined for 'seq int' and 'seq int'",
124, 36);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 124, 36);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 124, 45);
BAssertUtil.validateError(negativeResult, i++, "operator '+' not defined for 'seq int' and 'seq int'",
127, 28);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 127, 28);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 127, 37);
BAssertUtil.validateError(negativeResult, i++, "operator '+' not defined for 'seq int' and 'seq int'",
130, 28);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 130, 28);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 130, 37);
BAssertUtil.validateError(negativeResult, i++, "operator '+' not defined for 'seq int' and 'seq int'",
133, 28);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 133, 28);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 133, 37);
BAssertUtil.validateError(negativeResult, i++, "operator '+' not defined for 'seq int' and 'seq int'",
136, 26);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 136, 26);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 136, 35);
BAssertUtil.validateError(negativeResult, i++, "arguments not allowed after seq argument", 139, 36);
BAssertUtil.validateError(negativeResult, i++, "arguments not allowed after rest argument", 142, 39);
BAssertUtil.validateError(negativeResult, i++, "arguments not allowed after seq argument", 145, 37);
BAssertUtil.validateError(negativeResult, i++, "arguments not allowed after seq argument", 145, 40);
BAssertUtil.validateError(negativeResult, i++, "incompatible types: expected '(any|error)[]', found 'int'",
148, 37);
BAssertUtil.validateError(negativeResult, i++, "incompatible types: expected " +
"'([int,int...]|record {| int n; |})', found '[int...]'", 158, 43);
BAssertUtil.validateError(negativeResult, i++, "invalid grouping key type 'error', expected a subtype of " +
"'anydata'", 166, 26);
BAssertUtil.validateError(negativeResult, i++, "invalid grouping key type 'error', expected a subtype of " +
"'anydata'", 169, 26);
BAssertUtil.validateError(negativeResult, i++, "incompatible types: expected 'string', found 'seq string'",
191, 24);
BAssertUtil.validateError(negativeResult, i++, "sequence variable can be used in a single element " +
"list constructor or function invocation", 191, 24);
Assert.assertEquals(negativeResult.getErrorCount(), i);
} | class GroupByClauseTest {
private CompileResult resultWithListCtr;
private CompileResult resultWithInvocation;
private CompileResult negativeResult;
private CompileResult negativeSemanticResult;
@BeforeClass
public void setup() {
resultWithListCtr = BCompileUtil.compile("test-src/query/group_by_clause_with_list_ctr.bal");
resultWithInvocation = BCompileUtil.compile("test-src/query/group_by_clause_with_invocation.bal");
negativeResult = BCompileUtil.compile("test-src/query/group_by_clause_negative.bal");
negativeSemanticResult = BCompileUtil.compile("test-src/query/group_by_clause_negative_semantic.bal");
}
@Test(dataProvider = "dataToTestGroupByClauseWithListCtr")
public void testGroupByClauseWithListCtr(String functionName) {
BRunUtil.invoke(resultWithListCtr, functionName);
}
@DataProvider
public Object[] dataToTestGroupByClauseWithListCtr() {
return new Object[] {
"testGroupByExpressionAndSelectWithGroupingKeys1",
"testGroupByExpressionAndSelectWithGroupingKeys2",
"testGroupByExpressionAndSelectWithGroupingKeys3",
"testGroupByExpressionAndSelectWithGroupingKeys4",
"testGroupByExpressionAndSelectWithGroupingKeys5",
"testGroupByExpressionAndSelectWithGroupingKeys6",
"testGroupByExpressionAndSelectWithGroupingKeys7",
"testGroupByExpressionAndSelectWithGroupingKeys8",
"testGroupByExpressionAndSelectWithGroupingKeys9",
"testGroupByExpressionAndSelectWithGroupingKeys12",
"testGroupByExpressionAndSelectWithGroupingKeysAndWhereClause1",
"testGroupByExpressionAndSelectWithGroupingKeysAndWhereClause2",
"testGroupByExpressionAndSelectWithGroupingKeysAndWhereClause3",
"testGroupByExpressionAndSelectWithGroupingKeysAndWhereClause4",
"testGroupByExpressionAndSelectWithGroupingKeysAndWhereClause5",
"testGroupByExpressionAndSelectWithGroupingKeysAndWhereClause6",
"testGroupByExpressionAndSelectWithGroupingKeysAndWhereClause7",
"testGroupByExpressionAndSelectWithGroupingKeysAndWhereClause8",
"testGroupByExpressionAndSelectWithGroupingKeysAndWhereClause9",
"testGroupByExpressionAndSelectWithGroupingKeysFromClause1",
"testGroupByExpressionAndSelectWithGroupingKeysFromClause2",
"testGroupByExpressionAndSelectWithGroupingKeysWithJoinClause1",
"testGroupByExpressionAndSelectWithGroupingKeysWithJoinClause2",
"testGroupByExpressionAndSelectWithGroupingKeysWithJoinClause3",
"testGroupByExpressionAndSelectWithGroupingKeysWithJoinClause4",
"testGroupByExpressionAndSelectWithGroupingKeysWithJoinClause5",
"testGroupByExpressionAndSelectWithGroupingKeysWithOrderbyClause1",
"testGroupByExpressionAndSelectWithGroupingKeysWithOrderbyClause2",
"testGroupByExpressionAndSelectWithGroupingKeysWithOrderbyClause3",
"testGroupByExpressionAndSelectWithGroupingKeysWithLimitClause",
"testGroupByExpressionAndSelectWithGroupingKeysWithTableResult",
"testGroupByExpressionAndSelectWithGroupingKeysWithMapResult",
"testGroupByExpressionAndSelectWithGroupingKeysWithFromClause",
"testGroupByVarDefsAndSelectWithGroupingKeys1",
"testGroupByVarDefsAndSelectWithGroupingKeys2",
"testGroupByVarDefsAndSelectWithGroupingKeys3",
"testGroupByVarDefsAndSelectWithGroupingKeys4",
"testGroupByVarDefsAndSelectWithGroupingKeys5",
"testGroupByVarDefsAndSelectWithGroupingKeys6",
"testGroupByVarDefsAndSelectWithGroupingKeys7",
"testGroupByVarDefsAndSelectWithGroupingKeys8",
"testGroupByVarDefsAndSelectWithGroupingKeys9",
"testGroupByVarDefsAndSelectWithGroupingKeys10",
"testGroupByVarDefsAndSelectWithGroupingKeysAndWhereClause1",
"testGroupByVarDefsAndSelectWithGroupingKeysAndWhereClause2",
"testGroupByVarDefsAndSelectWithGroupingKeysAndWhereClause3",
"testGroupByVarDefsAndSelectWithGroupingKeysAndWhereClause4",
"testGroupByVarDefsAndSelectWithGroupingKeysAndWhereClause5",
"testGroupByVarDefsAndSelectWithGroupingKeysAndWhereClause6",
"testGroupByVarDefsAndSelectWithGroupingKeysAndWhereClause7",
"testGroupByVarDefsAndSelectWithGroupingKeysWithJoinClause3",
"testGroupByVarDefsAndSelectWithGroupingKeysWithJoinClause4",
"testGroupByVarDefsAndSelectWithGroupingKeysWithJoinClause5",
"testGroupByVarDefsAndSelectWithGroupingKeysWithOrderbyClause1",
"testGroupByVarDefsAndSelectWithGroupingKeysWithOrderbyClause2",
"testGroupByVarDefsAndSelectWithGroupingKeysWithOrderbyClause3",
"testGroupByVarDefsAndSelectWithGroupingKeysWithLimitClause",
"testGroupByVarDefsAndSelectWithGroupingKeysWithTableResult",
"testGroupByVarDefsAndSelectWithGroupingKeysWithMapResult",
"testGroupByVarDefsAndSelectWithGroupingKeysWithFromClause",
"testGroupByExpressionAndSelectWithNonGroupingKeys2",
"testGroupByExpressionAndSelectWithNonGroupingKeys3",
"testGroupByExpressionAndSelectWithNonGroupingKeys4",
"testGroupByExpressionAndSelectWithNonGroupingKeys5",
"testGroupByExpressionAndSelectWithNonGroupingKeys6",
"testGroupByExpressionAndSelectWithNonGroupingKeys7",
"testGroupByVarDefsAndSelectWithNonGroupingKeys1",
"testGroupByVarDefsAndSelectWithNonGroupingKeys2",
"testGroupByVarDefsAndSelectWithNonGroupingKeys3",
"testGroupByExpressionWithStreamOutput",
"testGroupByExpressionWithStringOutput1",
"testGroupByExpressionWithTableOutput",
"testGroupByExpressionWithMapOutput",
"testGroupByWithDoClause",
"testGroupByExpressionAndSelectWithNonGroupingKeys8",
"testGroupByExpressionAndSelectWithNonGroupingKeys9",
"testGroupByExpressionAndSelectWithNonGroupingKeys10",
"testGroupByExpressionAndSelectWithNonGroupingKeys11",
"testGroupByExpressionAndSelectWithNonGroupingKeys12",
"testGroupByExpressionAndSelectWithNonGroupingKeys13",
"testGroupByExpressionAndSelectWithNonGroupingKeys14",
"testGroupByExpressionAndSelectWithNonGroupingKeys15",
"testGroupByExpressionAndSelectWithNonGroupingKeys16",
"testGroupByExpressionAndSelectWithNonGroupingKeys17",
"testGroupByVarDefsAndSelectWithNonGroupingKeys4",
"testGroupByVarDefsAndSelectWithNonGroupingKeys5",
"testGroupByVarDefsAndSelectWithNonGroupingKeys6",
"testGroupByVarDefsAndSelectWithNonGroupingKeys7",
"testGroupByVarDefsAndSelectWithNonGroupingKeys8",
"testGroupByVarDefsAndSelectWithNonGroupingKeysWhereClause1",
"testGroupByVarDefsAndSelectWithNonGroupingKeysWhereClause2",
"testGroupByVarDefsAndSelectWithNonGroupingKeysWhereClause3",
"testGroupByExpressionAndSelectWithNonGroupingKeys18",
"testGroupByExpressionAndSelectWithNonGroupingKeys19",
"testGroupByVarDefsAndSelectWithGroupingKeys11",
"testGroupByVarDefsAndSelectWithNonGroupingKeys9",
"testMultipleGroupBy",
"testOptionalFieldsInInput",
"testMultipleGroupByInSameQuery",
"testMultipleFromClauses",
"testOptionalFieldInput",
"testEnumInInput",
"testEmptyGroups",
"testErrorSeq",
"testGroupByExpressionAndSelectWithNonGroupingKeys1",
"testGroupByExpressionAndSelectWithGroupingKeys10",
"testGroupByExpressionAndSelectWithGroupingKeys11",
"testGroupByExpressionAndSelectWithGroupingKeys12",
"testGroupByExpressionAndSelectWithGroupingKeys13",
"testGroupbyVarDefsAndSelectWithGroupingKeysFromClause1",
"testGroupByVarDefsAndSelectWithGroupingKeysWithJoinClause1",
"testGroupByVarDefsAndSelectWithGroupingKeysWithJoinClause2",
"testGroupByVarAndSelectWithNonGroupingKeysWithJoinClause1"
};
}
@Test(dataProvider = "dataToTestGroupByClauseWithInvocation")
public void testGroupByClauseWithInvocation(String functionName) {
BRunUtil.invoke(resultWithInvocation, functionName);
}
@DataProvider
public Object[] dataToTestGroupByClauseWithInvocation() {
return new Object[]{
"testGroupByExpressionAndSelectWithNonGroupingKeys1",
"testGroupByExpressionAndSelectWithNonGroupingKeys2",
"testGroupByExpressionAndSelectWithNonGroupingKeys3",
"testGroupByExpressionAndSelectWithNonGroupingKeys4",
"testGroupByExpressionAndSelectWithNonGroupingKeys5",
"testGroupByExpressionAndSelectWithGroupingKeys1",
"testGroupByExpressionAndSelectWithGroupingKeys2",
"testGroupByExpressionWithOrderBy",
"testGroupByExpressionWithStreamOutput",
"testGroupByExpressionWithTableOutput",
"testGroupByExpressionWithMapOutput",
"testGroupByWithDoClause",
"testGroupByVarDefsAndSelectWithNonGroupingKeys1",
"testGroupByVarDefsAndSelectWithNonGroupingKeys2",
"testGroupByExpressionAndSelectWithNonGroupingKeys6",
"testMultipleGroupBy",
"testMultipleGroupByInSameQuery",
"testOptionalFieldInput",
"testGroupByExpressionAndSelectWithNonGroupingKeys6",
"testGroupByExpressionAndSelectWithNonGroupingKeys7",
"testEmptyGroups",
"testGroupByExpressionAndSelectWithNonGroupingKeys8",
"testEnumInInput",
"testGroupByExpressionAndSelectWithNonGroupingKeys9",
"testGroupByExpressionAndSelectWithNonGroupingKeys10",
"testGroupByExpressionAndSelectWithNonGroupingKeys12",
"testGroupByExpressionAndSelectWithNonGroupingKeys11"
};
}
@Test
@Test
public void testNegativeSemanticCases() {
int i = 0;
BAssertUtil.validateError(negativeSemanticResult, i++, "invalid usage of the 'check' expression operator: " +
"no matching error return type(s) in the enclosing invokable", 24, 37);
Assert.assertEquals(negativeSemanticResult.getErrorCount(), i);
}
} | class GroupByClauseTest {
private CompileResult resultWithListCtr;
private CompileResult resultWithInvocation;
private CompileResult negativeResult;
private CompileResult negativeSemanticResult;
@BeforeClass
public void setup() {
resultWithListCtr = BCompileUtil.compile("test-src/query/group_by_clause_with_list_ctr.bal");
resultWithInvocation = BCompileUtil.compile("test-src/query/group_by_clause_with_invocation.bal");
negativeResult = BCompileUtil.compile("test-src/query/group_by_clause_negative.bal");
negativeSemanticResult = BCompileUtil.compile("test-src/query/group_by_clause_negative_semantic.bal");
}
@Test(dataProvider = "dataToTestGroupByClauseWithListCtr")
public void testGroupByClauseWithListCtr(String functionName) {
BRunUtil.invoke(resultWithListCtr, functionName);
}
@DataProvider
public Object[] dataToTestGroupByClauseWithListCtr() {
return new Object[] {
"testGroupByExpressionAndSelectWithGroupingKeys1",
"testGroupByExpressionAndSelectWithGroupingKeys2",
"testGroupByExpressionAndSelectWithGroupingKeys3",
"testGroupByExpressionAndSelectWithGroupingKeys4",
"testGroupByExpressionAndSelectWithGroupingKeys5",
"testGroupByExpressionAndSelectWithGroupingKeys6",
"testGroupByExpressionAndSelectWithGroupingKeys7",
"testGroupByExpressionAndSelectWithGroupingKeys8",
"testGroupByExpressionAndSelectWithGroupingKeys9",
"testGroupByExpressionAndSelectWithGroupingKeys12",
"testGroupByExpressionAndSelectWithGroupingKeysAndWhereClause1",
"testGroupByExpressionAndSelectWithGroupingKeysAndWhereClause2",
"testGroupByExpressionAndSelectWithGroupingKeysAndWhereClause3",
"testGroupByExpressionAndSelectWithGroupingKeysAndWhereClause4",
"testGroupByExpressionAndSelectWithGroupingKeysAndWhereClause5",
"testGroupByExpressionAndSelectWithGroupingKeysAndWhereClause6",
"testGroupByExpressionAndSelectWithGroupingKeysAndWhereClause7",
"testGroupByExpressionAndSelectWithGroupingKeysAndWhereClause8",
"testGroupByExpressionAndSelectWithGroupingKeysAndWhereClause9",
"testGroupByExpressionAndSelectWithGroupingKeysFromClause1",
"testGroupByExpressionAndSelectWithGroupingKeysFromClause2",
"testGroupByExpressionAndSelectWithGroupingKeysWithJoinClause1",
"testGroupByExpressionAndSelectWithGroupingKeysWithJoinClause2",
"testGroupByExpressionAndSelectWithGroupingKeysWithJoinClause3",
"testGroupByExpressionAndSelectWithGroupingKeysWithJoinClause4",
"testGroupByExpressionAndSelectWithGroupingKeysWithJoinClause5",
"testGroupByExpressionAndSelectWithGroupingKeysWithOrderbyClause1",
"testGroupByExpressionAndSelectWithGroupingKeysWithOrderbyClause2",
"testGroupByExpressionAndSelectWithGroupingKeysWithOrderbyClause3",
"testGroupByExpressionAndSelectWithGroupingKeysWithLimitClause",
"testGroupByExpressionAndSelectWithGroupingKeysWithTableResult",
"testGroupByExpressionAndSelectWithGroupingKeysWithMapResult",
"testGroupByExpressionAndSelectWithGroupingKeysWithFromClause",
"testGroupByVarDefsAndSelectWithGroupingKeys1",
"testGroupByVarDefsAndSelectWithGroupingKeys2",
"testGroupByVarDefsAndSelectWithGroupingKeys3",
"testGroupByVarDefsAndSelectWithGroupingKeys4",
"testGroupByVarDefsAndSelectWithGroupingKeys5",
"testGroupByVarDefsAndSelectWithGroupingKeys6",
"testGroupByVarDefsAndSelectWithGroupingKeys7",
"testGroupByVarDefsAndSelectWithGroupingKeys8",
"testGroupByVarDefsAndSelectWithGroupingKeys9",
"testGroupByVarDefsAndSelectWithGroupingKeys10",
"testGroupByVarDefsAndSelectWithGroupingKeysAndWhereClause1",
"testGroupByVarDefsAndSelectWithGroupingKeysAndWhereClause2",
"testGroupByVarDefsAndSelectWithGroupingKeysAndWhereClause3",
"testGroupByVarDefsAndSelectWithGroupingKeysAndWhereClause4",
"testGroupByVarDefsAndSelectWithGroupingKeysAndWhereClause5",
"testGroupByVarDefsAndSelectWithGroupingKeysAndWhereClause6",
"testGroupByVarDefsAndSelectWithGroupingKeysAndWhereClause7",
"testGroupByVarDefsAndSelectWithGroupingKeysWithJoinClause3",
"testGroupByVarDefsAndSelectWithGroupingKeysWithJoinClause4",
"testGroupByVarDefsAndSelectWithGroupingKeysWithJoinClause5",
"testGroupByVarDefsAndSelectWithGroupingKeysWithOrderbyClause1",
"testGroupByVarDefsAndSelectWithGroupingKeysWithOrderbyClause2",
"testGroupByVarDefsAndSelectWithGroupingKeysWithOrderbyClause3",
"testGroupByVarDefsAndSelectWithGroupingKeysWithLimitClause",
"testGroupByVarDefsAndSelectWithGroupingKeysWithTableResult",
"testGroupByVarDefsAndSelectWithGroupingKeysWithMapResult",
"testGroupByVarDefsAndSelectWithGroupingKeysWithFromClause",
"testGroupByExpressionAndSelectWithNonGroupingKeys2",
"testGroupByExpressionAndSelectWithNonGroupingKeys3",
"testGroupByExpressionAndSelectWithNonGroupingKeys4",
"testGroupByExpressionAndSelectWithNonGroupingKeys5",
"testGroupByExpressionAndSelectWithNonGroupingKeys6",
"testGroupByExpressionAndSelectWithNonGroupingKeys7",
"testGroupByVarDefsAndSelectWithNonGroupingKeys1",
"testGroupByVarDefsAndSelectWithNonGroupingKeys2",
"testGroupByVarDefsAndSelectWithNonGroupingKeys3",
"testGroupByExpressionWithStreamOutput",
"testGroupByExpressionWithStringOutput1",
"testGroupByExpressionWithTableOutput",
"testGroupByExpressionWithMapOutput",
"testGroupByWithDoClause",
"testGroupByExpressionAndSelectWithNonGroupingKeys8",
"testGroupByExpressionAndSelectWithNonGroupingKeys9",
"testGroupByExpressionAndSelectWithNonGroupingKeys10",
"testGroupByExpressionAndSelectWithNonGroupingKeys11",
"testGroupByExpressionAndSelectWithNonGroupingKeys12",
"testGroupByExpressionAndSelectWithNonGroupingKeys13",
"testGroupByExpressionAndSelectWithNonGroupingKeys14",
"testGroupByExpressionAndSelectWithNonGroupingKeys15",
"testGroupByExpressionAndSelectWithNonGroupingKeys16",
"testGroupByExpressionAndSelectWithNonGroupingKeys17",
"testGroupByVarDefsAndSelectWithNonGroupingKeys4",
"testGroupByVarDefsAndSelectWithNonGroupingKeys5",
"testGroupByVarDefsAndSelectWithNonGroupingKeys6",
"testGroupByVarDefsAndSelectWithNonGroupingKeys7",
"testGroupByVarDefsAndSelectWithNonGroupingKeys8",
"testGroupByVarDefsAndSelectWithNonGroupingKeysWhereClause1",
"testGroupByVarDefsAndSelectWithNonGroupingKeysWhereClause2",
"testGroupByVarDefsAndSelectWithNonGroupingKeysWhereClause3",
"testGroupByExpressionAndSelectWithNonGroupingKeys18",
"testGroupByExpressionAndSelectWithNonGroupingKeys19",
"testGroupByVarDefsAndSelectWithGroupingKeys11",
"testGroupByVarDefsAndSelectWithNonGroupingKeys9",
"testMultipleGroupBy",
"testOptionalFieldsInInput",
"testMultipleGroupByInSameQuery",
"testMultipleFromClauses",
"testOptionalFieldInput",
"testEnumInInput",
"testEmptyGroups",
"testErrorSeq",
"testGroupByExpressionAndSelectWithNonGroupingKeys1",
"testGroupByExpressionAndSelectWithGroupingKeys10",
"testGroupByExpressionAndSelectWithGroupingKeys11",
"testGroupByExpressionAndSelectWithGroupingKeys12",
"testGroupByExpressionAndSelectWithGroupingKeys13",
"testGroupbyVarDefsAndSelectWithGroupingKeysFromClause1",
"testGroupByVarDefsAndSelectWithGroupingKeysWithJoinClause1",
"testGroupByVarDefsAndSelectWithGroupingKeysWithJoinClause2",
"testGroupByVarAndSelectWithNonGroupingKeysWithJoinClause1"
};
}
@Test(dataProvider = "dataToTestGroupByClauseWithInvocation")
public void testGroupByClauseWithInvocation(String functionName) {
BRunUtil.invoke(resultWithInvocation, functionName);
}
@DataProvider
public Object[] dataToTestGroupByClauseWithInvocation() {
return new Object[]{
"testGroupByExpressionAndSelectWithNonGroupingKeys1",
"testGroupByExpressionAndSelectWithNonGroupingKeys2",
"testGroupByExpressionAndSelectWithNonGroupingKeys3",
"testGroupByExpressionAndSelectWithNonGroupingKeys4",
"testGroupByExpressionAndSelectWithNonGroupingKeys5",
"testGroupByExpressionAndSelectWithGroupingKeys1",
"testGroupByExpressionAndSelectWithGroupingKeys2",
"testGroupByExpressionWithOrderBy",
"testGroupByExpressionWithStreamOutput",
"testGroupByExpressionWithTableOutput",
"testGroupByExpressionWithMapOutput",
"testGroupByWithDoClause",
"testGroupByVarDefsAndSelectWithNonGroupingKeys1",
"testGroupByVarDefsAndSelectWithNonGroupingKeys2",
"testGroupByExpressionAndSelectWithNonGroupingKeys6",
"testMultipleGroupBy",
"testMultipleGroupByInSameQuery",
"testOptionalFieldInput",
"testGroupByExpressionAndSelectWithNonGroupingKeys6",
"testGroupByExpressionAndSelectWithNonGroupingKeys7",
"testEmptyGroups",
"testGroupByExpressionAndSelectWithNonGroupingKeys8",
"testEnumInInput",
"testGroupByExpressionAndSelectWithNonGroupingKeys9",
"testGroupByExpressionAndSelectWithNonGroupingKeys10",
"testGroupByExpressionAndSelectWithNonGroupingKeys12",
"testGroupByExpressionAndSelectWithNonGroupingKeys11"
};
}
@Test
@Test
public void testNegativeSemanticCases() {
int i = 0;
BAssertUtil.validateError(negativeSemanticResult, i++, "invalid usage of the 'check' expression operator: " +
"no matching error return type(s) in the enclosing invokable", 24, 37);
Assert.assertEquals(negativeSemanticResult.getErrorCount(), i);
}
} |
It's fine to me, we only read serial from PG and wont write a serial column. | public static TestTable getSerialTable() {
return new TestTable(
TableSchema.builder()
.field("f0", DataTypes.SMALLINT())
.field("f1", DataTypes.INT())
.field("f2", DataTypes.SMALLINT())
.field("f3", DataTypes.INT())
.field("f4", DataTypes.BIGINT())
.field("f5", DataTypes.BIGINT())
.build(),
"f0 smallserial, " +
"f1 serial, " +
"f2 serial2, " +
"f3 serial4, " +
"f4 serial8, " +
"f5 bigserial",
"0," +
"1," +
"2," +
"3," +
"4," +
"5"
);
} | "5" | public static TestTable getSerialTable() {
return new TestTable(
TableSchema.builder()
.field("f0", DataTypes.SMALLINT().notNull())
.field("f1", DataTypes.INT().notNull())
.field("f2", DataTypes.SMALLINT().notNull())
.field("f3", DataTypes.INT().notNull())
.field("f4", DataTypes.BIGINT().notNull())
.field("f5", DataTypes.BIGINT().notNull())
.build(),
"f0 smallserial, " +
"f1 serial, " +
"f2 serial2, " +
"f3 serial4, " +
"f4 serial8, " +
"f5 bigserial",
"32767," +
"2147483647," +
"32767," +
"2147483647," +
"9223372036854775807," +
"9223372036854775807"
);
} | class TestTable {
TableSchema schema;
String pgSchemaSql;
String values;
public TestTable(TableSchema schema, String pgSchemaSql, String values) {
this.schema = schema;
this.pgSchemaSql = pgSchemaSql;
this.values = values;
}
} | class TestTable {
TableSchema schema;
String pgSchemaSql;
String values;
public TestTable(TableSchema schema, String pgSchemaSql, String values) {
this.schema = schema;
this.pgSchemaSql = pgSchemaSql;
this.values = values;
}
} |
The returned `NodeMetrics` object is shared and thus may be read outside the lock whilst concurrently being updated by another thread. Is this a problem? Should we deep-copy the metrics to make sure they're internally consistent? This is not something introduced by this PR, but just a general observation. | public Node getRecipient(List<Mirror.Entry> choices) {
if (choices.isEmpty()) return null;
double weightSum = 0.0;
Node selectedNode = null;
synchronized (this) {
for (Mirror.Entry entry : choices) {
NodeMetrics nodeMetrics = getNodeMetrics(entry);
weightSum += nodeMetrics.weight;
if (weightSum > position) {
selectedNode = new Node(entry, nodeMetrics);
break;
}
}
if (selectedNode == null) {
position -= weightSum;
selectedNode = new Node(choices.get(0), getNodeMetrics(choices.get(0)));
}
position += 1.0;
selectedNode.metrics.sent++;
}
return selectedNode;
} | selectedNode = new Node(entry, nodeMetrics); | public Node getRecipient(List<Mirror.Entry> choices) {
if (choices.isEmpty()) return null;
double weightSum = 0.0;
Node selectedNode = null;
synchronized (this) {
for (Mirror.Entry entry : choices) {
NodeMetrics nodeMetrics = getNodeMetrics(entry);
weightSum += nodeMetrics.weight;
if (weightSum > position) {
selectedNode = new Node(entry, nodeMetrics);
break;
}
}
if (selectedNode == null) {
position -= weightSum;
selectedNode = new Node(choices.get(0), getNodeMetrics(choices.get(0)));
}
position += 1.0;
selectedNode.metrics.sent++;
}
return selectedNode;
} | class Node {
Node(Mirror.Entry e, NodeMetrics m) { entry = e; metrics = m; }
Mirror.Entry entry;
NodeMetrics metrics;
} | class Node {
Node(Mirror.Entry e, NodeMetrics m) { entry = e; metrics = m; }
Mirror.Entry entry;
NodeMetrics metrics;
} |
Consider adding some additional validation of tenant id | private static void verifyValues(JsonNode root) {
var cursor = new JsonAccessor(root);
cursor.get("rules").forEachArrayElement(rule -> rule.get("conditions").forEachArrayElement(condition -> {
var dimension = condition.get("dimension");
if (dimension.isEqualTo(DimensionHelper.toWire(FetchVector.Dimension.APPLICATION_ID))) {
condition.get("values").forEachArrayElement(conditionValue -> {
String applicationIdString = conditionValue.asString()
.orElseThrow(() -> new IllegalArgumentException("Non-string application ID: " + conditionValue));
ApplicationId.fromSerializedForm(applicationIdString);
});
} else if (dimension.isEqualTo(DimensionHelper.toWire(FetchVector.Dimension.NODE_TYPE))) {
condition.get("values").forEachArrayElement(conditionValue -> {
String nodeTypeString = conditionValue.asString()
.orElseThrow(() -> new IllegalArgumentException("Non-string node type: " + conditionValue));
NodeType.valueOf(nodeTypeString);
});
} else if (dimension.isEqualTo(DimensionHelper.toWire(FetchVector.Dimension.CONSOLE_USER_EMAIL))) {
condition.get("values").forEachArrayElement(conditionValue -> conditionValue.asString()
.orElseThrow(() -> new IllegalArgumentException("Non-string email address: " + conditionValue)));
} else if (dimension.isEqualTo(DimensionHelper.toWire(FetchVector.Dimension.TENANT_ID))) {
condition.get("values").forEachArrayElement(conditionValue -> conditionValue.asString()
.orElseThrow(() -> new IllegalArgumentException("Non-string tenant ID: " + conditionValue)));
}
}));
} | condition.get("values").forEachArrayElement(conditionValue -> conditionValue.asString() | private static void verifyValues(JsonNode root) {
var cursor = new JsonAccessor(root);
cursor.get("rules").forEachArrayElement(rule -> rule.get("conditions").forEachArrayElement(condition -> {
var dimension = condition.get("dimension");
if (dimension.isEqualTo(DimensionHelper.toWire(FetchVector.Dimension.APPLICATION_ID))) {
condition.get("values").forEachArrayElement(conditionValue -> {
String applicationIdString = conditionValue.asString()
.orElseThrow(() -> new IllegalArgumentException("Non-string application ID: " + conditionValue));
ApplicationId.fromSerializedForm(applicationIdString);
});
} else if (dimension.isEqualTo(DimensionHelper.toWire(FetchVector.Dimension.NODE_TYPE))) {
condition.get("values").forEachArrayElement(conditionValue -> {
String nodeTypeString = conditionValue.asString()
.orElseThrow(() -> new IllegalArgumentException("Non-string node type: " + conditionValue));
NodeType.valueOf(nodeTypeString);
});
} else if (dimension.isEqualTo(DimensionHelper.toWire(FetchVector.Dimension.CONSOLE_USER_EMAIL))) {
condition.get("values").forEachArrayElement(conditionValue -> conditionValue.asString()
.orElseThrow(() -> new IllegalArgumentException("Non-string email address: " + conditionValue)));
} else if (dimension.isEqualTo(DimensionHelper.toWire(FetchVector.Dimension.TENANT_ID))) {
condition.get("values").forEachArrayElement(conditionValue -> conditionValue.asString()
.orElseThrow(() -> new IllegalArgumentException("Non-string tenant ID: " + conditionValue)));
}
}));
} | class SystemFlagsDataArchive {
private static final ObjectMapper mapper = new ObjectMapper();
private final Map<FlagId, Map<String, FlagData>> files;
private SystemFlagsDataArchive(Map<FlagId, Map<String, FlagData>> files) {
this.files = files;
}
public static SystemFlagsDataArchive fromZip(InputStream rawIn) {
Builder builder = new Builder();
try (ZipInputStream zipIn = new ZipInputStream(new BufferedInputStream(rawIn))) {
ZipEntry entry;
while ((entry = zipIn.getNextEntry()) != null) {
String name = entry.getName();
if (!entry.isDirectory() && name.startsWith("flags/")) {
Path filePath = Paths.get(name);
String rawData = new String(zipIn.readAllBytes(), StandardCharsets.UTF_8);
addFile(builder, rawData, filePath);
}
}
return builder.build();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public static SystemFlagsDataArchive fromDirectory(Path directory) {
Path root = directory.toAbsolutePath();
Path flagsDirectory = directory.resolve("flags");
if (!Files.isDirectory(flagsDirectory)) {
throw new IllegalArgumentException("Sub-directory 'flags' does not exist: " + flagsDirectory);
}
try (Stream<Path> directoryStream = Files.walk(root)) {
Builder builder = new Builder();
directoryStream.forEach(absolutePath -> {
Path relativePath = root.relativize(absolutePath);
if (!Files.isDirectory(absolutePath) &&
relativePath.startsWith("flags")) {
String rawData = uncheck(() -> Files.readString(absolutePath, StandardCharsets.UTF_8));
addFile(builder, rawData, relativePath);
}
});
return builder.build();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public void toZip(OutputStream out) {
ZipOutputStream zipOut = new ZipOutputStream(out);
files.forEach((flagId, fileMap) -> {
fileMap.forEach((filename, flagData) -> {
uncheck(() -> {
zipOut.putNextEntry(new ZipEntry(toFilePath(flagId, filename)));
zipOut.write(flagData.serializeToUtf8Json());
zipOut.closeEntry();
});
});
});
uncheck(zipOut::flush);
}
public Set<FlagData> flagData(FlagsTarget target) {
List<String> filenames = target.flagDataFilesPrioritized();
Set<FlagData> targetData = new HashSet<>();
files.forEach((flagId, fileMap) -> {
for (String filename : filenames) {
FlagData data = fileMap.get(filename);
if (data != null) {
if (!data.isEmpty()) {
targetData.add(data);
}
return;
}
}
});
return targetData;
}
public void validateAllFilesAreForTargets(SystemName currentSystem, Set<FlagsTarget> targets) throws IllegalArgumentException {
Set<String> validFiles = targets.stream()
.flatMap(target -> target.flagDataFilesPrioritized().stream())
.collect(Collectors.toSet());
Set<SystemName> otherSystems = Arrays.stream(SystemName.values())
.filter(systemName -> systemName != currentSystem)
.collect(Collectors.toSet());
files.forEach((flagId, fileMap) -> {
for (String filename : fileMap.keySet()) {
boolean isFileForOtherSystem = otherSystems.stream()
.anyMatch(system -> filename.startsWith(system.value() + "."));
boolean isFileForCurrentSystem = validFiles.contains(filename);
if (!isFileForOtherSystem && !isFileForCurrentSystem) {
throw new IllegalArgumentException("Unknown flag file: " + toFilePath(flagId, filename));
}
}
});
}
private static void addFile(Builder builder, String rawData, Path filePath) {
String filename = filePath.getFileName().toString();
if (filename.startsWith(".")) {
return;
}
if (!filename.endsWith(".json")) {
throw new IllegalArgumentException(String.format("Only JSON files are allowed in 'flags/' directory (found '%s')", filePath.toString()));
}
FlagId directoryDeducedFlagId = new FlagId(filePath.getName(filePath.getNameCount()-2).toString());
FlagData flagData;
if (rawData.isBlank()) {
flagData = new FlagData(directoryDeducedFlagId);
} else {
String normalizedRawData = normalizeJson(rawData);
flagData = FlagData.deserialize(normalizedRawData);
if (!directoryDeducedFlagId.equals(flagData.id())) {
throw new IllegalArgumentException(
String.format("Flag data file with flag id '%s' in directory for '%s'",
flagData.id(), directoryDeducedFlagId.toString()));
}
String serializedData = flagData.serializeToJson();
if (!JSON.equals(serializedData, normalizedRawData)) {
throw new IllegalArgumentException(filePath + " contains unknown non-comment fields: " +
"after removing any comment fields the JSON is:\n " +
normalizedRawData +
"\nbut deserializing this ended up with a JSON that are missing some of the fields:\n " +
serializedData +
"\nSee https:
}
}
if (builder.hasFile(filename, flagData)) {
throw new IllegalArgumentException(
String.format("Flag data file in '%s' contains redundant flag data for id '%s' already set in another directory!",
filePath, flagData.id()));
}
builder.addFile(filename, flagData);
}
static String normalizeJson(String json) {
JsonNode root = uncheck(() -> mapper.readTree(json));
removeCommentsRecursively(root);
verifyValues(root);
return root.toString();
}
private static void removeCommentsRecursively(JsonNode node) {
if (node instanceof ObjectNode) {
ObjectNode objectNode = (ObjectNode) node;
objectNode.remove("comment");
}
node.forEach(SystemFlagsDataArchive::removeCommentsRecursively);
}
private static String toFilePath(FlagId flagId, String filename) {
return "flags/" + flagId.toString() + "/" + filename;
}
public static class Builder {
private final Map<FlagId, Map<String, FlagData>> files = new TreeMap<>();
public Builder() {}
public Builder addFile(String filename, FlagData data) {
files.computeIfAbsent(data.id(), k -> new TreeMap<>()).put(filename, data);
return this;
}
public boolean hasFile(String filename, FlagData data) {
return files.containsKey(data.id()) && files.get(data.id()).containsKey(filename);
}
public SystemFlagsDataArchive build() {
Map<FlagId, Map<String, FlagData>> copy = new TreeMap<>();
files.forEach((flagId, map) -> copy.put(flagId, new TreeMap<>(map)));
return new SystemFlagsDataArchive(copy);
}
}
private static class JsonAccessor {
private final JsonNode jsonNode;
public JsonAccessor(JsonNode jsonNode) {
this.jsonNode = jsonNode;
}
public JsonAccessor get(String fieldName) {
if (jsonNode == null) {
return this;
} else {
return new JsonAccessor(jsonNode.get(fieldName));
}
}
public Optional<String> asString() {
return jsonNode != null && jsonNode.isTextual() ? Optional.of(jsonNode.textValue()) : Optional.empty();
}
public void forEachArrayElement(Consumer<JsonAccessor> consumer) {
if (jsonNode != null && jsonNode.isArray()) {
jsonNode.forEach(jsonNodeElement -> consumer.accept(new JsonAccessor(jsonNodeElement)));
}
}
/** Returns true if this (JsonNode) is a string and equal to value. */
public boolean isEqualTo(String value) {
return jsonNode != null && jsonNode.isTextual() && Objects.equals(jsonNode.textValue(), value);
}
@Override
public String toString() {
return jsonNode == null ? "undefined" : jsonNode.toString();
}
}
} | class SystemFlagsDataArchive {
private static final ObjectMapper mapper = new ObjectMapper();
private final Map<FlagId, Map<String, FlagData>> files;
private SystemFlagsDataArchive(Map<FlagId, Map<String, FlagData>> files) {
this.files = files;
}
public static SystemFlagsDataArchive fromZip(InputStream rawIn) {
Builder builder = new Builder();
try (ZipInputStream zipIn = new ZipInputStream(new BufferedInputStream(rawIn))) {
ZipEntry entry;
while ((entry = zipIn.getNextEntry()) != null) {
String name = entry.getName();
if (!entry.isDirectory() && name.startsWith("flags/")) {
Path filePath = Paths.get(name);
String rawData = new String(zipIn.readAllBytes(), StandardCharsets.UTF_8);
addFile(builder, rawData, filePath);
}
}
return builder.build();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public static SystemFlagsDataArchive fromDirectory(Path directory) {
Path root = directory.toAbsolutePath();
Path flagsDirectory = directory.resolve("flags");
if (!Files.isDirectory(flagsDirectory)) {
throw new IllegalArgumentException("Sub-directory 'flags' does not exist: " + flagsDirectory);
}
try (Stream<Path> directoryStream = Files.walk(root)) {
Builder builder = new Builder();
directoryStream.forEach(absolutePath -> {
Path relativePath = root.relativize(absolutePath);
if (!Files.isDirectory(absolutePath) &&
relativePath.startsWith("flags")) {
String rawData = uncheck(() -> Files.readString(absolutePath, StandardCharsets.UTF_8));
addFile(builder, rawData, relativePath);
}
});
return builder.build();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public void toZip(OutputStream out) {
ZipOutputStream zipOut = new ZipOutputStream(out);
files.forEach((flagId, fileMap) -> {
fileMap.forEach((filename, flagData) -> {
uncheck(() -> {
zipOut.putNextEntry(new ZipEntry(toFilePath(flagId, filename)));
zipOut.write(flagData.serializeToUtf8Json());
zipOut.closeEntry();
});
});
});
uncheck(zipOut::flush);
}
public Set<FlagData> flagData(FlagsTarget target) {
List<String> filenames = target.flagDataFilesPrioritized();
Set<FlagData> targetData = new HashSet<>();
files.forEach((flagId, fileMap) -> {
for (String filename : filenames) {
FlagData data = fileMap.get(filename);
if (data != null) {
if (!data.isEmpty()) {
targetData.add(data);
}
return;
}
}
});
return targetData;
}
public void validateAllFilesAreForTargets(SystemName currentSystem, Set<FlagsTarget> targets) throws IllegalArgumentException {
Set<String> validFiles = targets.stream()
.flatMap(target -> target.flagDataFilesPrioritized().stream())
.collect(Collectors.toSet());
Set<SystemName> otherSystems = Arrays.stream(SystemName.values())
.filter(systemName -> systemName != currentSystem)
.collect(Collectors.toSet());
files.forEach((flagId, fileMap) -> {
for (String filename : fileMap.keySet()) {
boolean isFileForOtherSystem = otherSystems.stream()
.anyMatch(system -> filename.startsWith(system.value() + "."));
boolean isFileForCurrentSystem = validFiles.contains(filename);
if (!isFileForOtherSystem && !isFileForCurrentSystem) {
throw new IllegalArgumentException("Unknown flag file: " + toFilePath(flagId, filename));
}
}
});
}
private static void addFile(Builder builder, String rawData, Path filePath) {
String filename = filePath.getFileName().toString();
if (filename.startsWith(".")) {
return;
}
if (!filename.endsWith(".json")) {
throw new IllegalArgumentException(String.format("Only JSON files are allowed in 'flags/' directory (found '%s')", filePath.toString()));
}
FlagId directoryDeducedFlagId = new FlagId(filePath.getName(filePath.getNameCount()-2).toString());
FlagData flagData;
if (rawData.isBlank()) {
flagData = new FlagData(directoryDeducedFlagId);
} else {
String normalizedRawData = normalizeJson(rawData);
flagData = FlagData.deserialize(normalizedRawData);
if (!directoryDeducedFlagId.equals(flagData.id())) {
throw new IllegalArgumentException(
String.format("Flag data file with flag id '%s' in directory for '%s'",
flagData.id(), directoryDeducedFlagId.toString()));
}
String serializedData = flagData.serializeToJson();
if (!JSON.equals(serializedData, normalizedRawData)) {
throw new IllegalArgumentException(filePath + " contains unknown non-comment fields: " +
"after removing any comment fields the JSON is:\n " +
normalizedRawData +
"\nbut deserializing this ended up with a JSON that are missing some of the fields:\n " +
serializedData +
"\nSee https:
}
}
if (builder.hasFile(filename, flagData)) {
throw new IllegalArgumentException(
String.format("Flag data file in '%s' contains redundant flag data for id '%s' already set in another directory!",
filePath, flagData.id()));
}
builder.addFile(filename, flagData);
}
static String normalizeJson(String json) {
JsonNode root = uncheck(() -> mapper.readTree(json));
removeCommentsRecursively(root);
verifyValues(root);
return root.toString();
}
private static void removeCommentsRecursively(JsonNode node) {
if (node instanceof ObjectNode) {
ObjectNode objectNode = (ObjectNode) node;
objectNode.remove("comment");
}
node.forEach(SystemFlagsDataArchive::removeCommentsRecursively);
}
private static String toFilePath(FlagId flagId, String filename) {
return "flags/" + flagId.toString() + "/" + filename;
}
public static class Builder {
private final Map<FlagId, Map<String, FlagData>> files = new TreeMap<>();
public Builder() {}
public Builder addFile(String filename, FlagData data) {
files.computeIfAbsent(data.id(), k -> new TreeMap<>()).put(filename, data);
return this;
}
public boolean hasFile(String filename, FlagData data) {
return files.containsKey(data.id()) && files.get(data.id()).containsKey(filename);
}
public SystemFlagsDataArchive build() {
Map<FlagId, Map<String, FlagData>> copy = new TreeMap<>();
files.forEach((flagId, map) -> copy.put(flagId, new TreeMap<>(map)));
return new SystemFlagsDataArchive(copy);
}
}
private static class JsonAccessor {
private final JsonNode jsonNode;
public JsonAccessor(JsonNode jsonNode) {
this.jsonNode = jsonNode;
}
public JsonAccessor get(String fieldName) {
if (jsonNode == null) {
return this;
} else {
return new JsonAccessor(jsonNode.get(fieldName));
}
}
public Optional<String> asString() {
return jsonNode != null && jsonNode.isTextual() ? Optional.of(jsonNode.textValue()) : Optional.empty();
}
public void forEachArrayElement(Consumer<JsonAccessor> consumer) {
if (jsonNode != null && jsonNode.isArray()) {
jsonNode.forEach(jsonNodeElement -> consumer.accept(new JsonAccessor(jsonNodeElement)));
}
}
/** Returns true if this (JsonNode) is a string and equal to value. */
public boolean isEqualTo(String value) {
return jsonNode != null && jsonNode.isTextual() && Objects.equals(jsonNode.textValue(), value);
}
@Override
public String toString() {
return jsonNode == null ? "undefined" : jsonNode.toString();
}
}
} |
Do you think we should add a new method in `ExceptionUtils` to represent this meaning? | private void ensureRunning() throws Exception {
if (wasClosed || !thread.isAlive()) {
cleanupRequests();
IllegalStateException exception = new IllegalStateException("not running");
if (thrown != null) {
exception.addSuppressed(thrown);
}
throw exception;
}
} | IllegalStateException exception = new IllegalStateException("not running"); | private void ensureRunning() throws Exception {
if (wasClosed || !thread.isAlive()) {
cleanupRequests();
IllegalStateException exception = new IllegalStateException("not running");
if (thrown != null) {
exception.addSuppressed(thrown);
}
throw exception;
}
} | class ChannelStateWriteRequestExecutorImpl implements ChannelStateWriteRequestExecutor {
private static final Logger LOG =
LoggerFactory.getLogger(ChannelStateWriteRequestExecutorImpl.class);
private final ChannelStateWriteRequestDispatcher dispatcher;
private final BlockingDeque<ChannelStateWriteRequest> deque;
private final Thread thread;
private volatile Exception thrown = null;
private volatile boolean wasClosed = false;
private final String taskName;
ChannelStateWriteRequestExecutorImpl(
String taskName, ChannelStateWriteRequestDispatcher dispatcher) {
this(taskName, dispatcher, new LinkedBlockingDeque<>());
}
ChannelStateWriteRequestExecutorImpl(
String taskName,
ChannelStateWriteRequestDispatcher dispatcher,
BlockingDeque<ChannelStateWriteRequest> deque) {
this.taskName = taskName;
this.dispatcher = dispatcher;
this.deque = deque;
this.thread = new Thread(this::run, "Channel state writer " + taskName);
this.thread.setDaemon(true);
}
@VisibleForTesting
void run() {
try {
loop();
} catch (Exception ex) {
thrown = ex;
} finally {
try {
closeAll(
this::cleanupRequests,
() ->
dispatcher.fail(
thrown == null ? new CancellationException() : thrown));
} catch (Exception e) {
thrown = ExceptionUtils.firstOrSuppressed(e, thrown);
}
}
LOG.debug("{} loop terminated", taskName);
}
private void loop() throws Exception {
while (!wasClosed) {
try {
dispatcher.dispatch(deque.take());
} catch (InterruptedException e) {
if (!wasClosed) {
LOG.debug(
taskName
+ " interrupted while waiting for a request (continue waiting)",
e);
} else {
Thread.currentThread().interrupt();
}
}
}
}
private void cleanupRequests() throws Exception {
Throwable cause = thrown == null ? new CancellationException() : thrown;
List<ChannelStateWriteRequest> drained = new ArrayList<>();
deque.drainTo(drained);
LOG.info("{} discarding {} drained requests", taskName, drained.size());
closeAll(
drained.stream()
.<AutoCloseable>map(request -> () -> request.cancel(cause))
.collect(Collectors.toList()));
}
@Override
public void start() throws IllegalStateException {
this.thread.start();
}
@Override
public void submit(ChannelStateWriteRequest request) throws Exception {
submitInternal(request, () -> deque.add(request));
}
@Override
public void submitPriority(ChannelStateWriteRequest request) throws Exception {
submitInternal(request, () -> deque.addFirst(request));
}
private void submitInternal(ChannelStateWriteRequest request, RunnableWithException action)
throws Exception {
try {
action.run();
} catch (Exception ex) {
request.cancel(ex);
throw ex;
}
ensureRunning();
}
@Override
public void close() throws IOException {
wasClosed = true;
while (thread.isAlive()) {
thread.interrupt();
try {
thread.join();
} catch (InterruptedException e) {
if (!thread.isAlive()) {
Thread.currentThread().interrupt();
}
LOG.debug(taskName + " interrupted while waiting for the writer thread to die", e);
}
}
if (thrown != null) {
throw new IOException(thrown);
}
}
@VisibleForTesting
Thread getThread() {
return thread;
}
} | class ChannelStateWriteRequestExecutorImpl implements ChannelStateWriteRequestExecutor {
private static final Logger LOG =
LoggerFactory.getLogger(ChannelStateWriteRequestExecutorImpl.class);
private final ChannelStateWriteRequestDispatcher dispatcher;
private final BlockingDeque<ChannelStateWriteRequest> deque;
private final Thread thread;
private volatile Exception thrown = null;
private volatile boolean wasClosed = false;
private final String taskName;
ChannelStateWriteRequestExecutorImpl(
String taskName, ChannelStateWriteRequestDispatcher dispatcher) {
this(taskName, dispatcher, new LinkedBlockingDeque<>());
}
ChannelStateWriteRequestExecutorImpl(
String taskName,
ChannelStateWriteRequestDispatcher dispatcher,
BlockingDeque<ChannelStateWriteRequest> deque) {
this.taskName = taskName;
this.dispatcher = dispatcher;
this.deque = deque;
this.thread = new Thread(this::run, "Channel state writer " + taskName);
this.thread.setDaemon(true);
}
@VisibleForTesting
void run() {
try {
loop();
} catch (Exception ex) {
thrown = ex;
} finally {
try {
closeAll(
this::cleanupRequests,
() ->
dispatcher.fail(
thrown == null ? new CancellationException() : thrown));
} catch (Exception e) {
thrown = ExceptionUtils.firstOrSuppressed(e, thrown);
}
}
LOG.debug("{} loop terminated", taskName);
}
private void loop() throws Exception {
while (!wasClosed) {
try {
dispatcher.dispatch(deque.take());
} catch (InterruptedException e) {
if (!wasClosed) {
LOG.debug(
taskName
+ " interrupted while waiting for a request (continue waiting)",
e);
} else {
Thread.currentThread().interrupt();
}
}
}
}
private void cleanupRequests() throws Exception {
Throwable cause = thrown == null ? new CancellationException() : thrown;
List<ChannelStateWriteRequest> drained = new ArrayList<>();
deque.drainTo(drained);
LOG.info("{} discarding {} drained requests", taskName, drained.size());
closeAll(
drained.stream()
.<AutoCloseable>map(request -> () -> request.cancel(cause))
.collect(Collectors.toList()));
}
@Override
public void start() throws IllegalStateException {
this.thread.start();
}
@Override
public void submit(ChannelStateWriteRequest request) throws Exception {
submitInternal(request, () -> deque.add(request));
}
@Override
public void submitPriority(ChannelStateWriteRequest request) throws Exception {
submitInternal(request, () -> deque.addFirst(request));
}
private void submitInternal(ChannelStateWriteRequest request, RunnableWithException action)
throws Exception {
try {
action.run();
} catch (Exception ex) {
request.cancel(ex);
throw ex;
}
ensureRunning();
}
@Override
public void close() throws IOException {
wasClosed = true;
while (thread.isAlive()) {
thread.interrupt();
try {
thread.join();
} catch (InterruptedException e) {
if (!thread.isAlive()) {
Thread.currentThread().interrupt();
}
LOG.debug(taskName + " interrupted while waiting for the writer thread to die", e);
}
}
if (thrown != null) {
throw new IOException(thrown);
}
}
@VisibleForTesting
Thread getThread() {
return thread;
}
} |
See my earlier comment, I think it should check if more than one mechanism is reg-ed - if it is only one then no need to check the mech in the context | public Uni<Boolean> sendChallenge(RoutingContext routingContext) {
routingContext.request().resume();
Uni<Boolean> result = null;
if (usePathSpecificMechanism(routingContext)) {
HttpAuthenticationMechanism matchingMech = routingContext.get(HttpAuthenticationMechanism.class.getName());
if (matchingMech != null) {
result = matchingMech.sendChallenge(routingContext);
}
}
if (result == null) {
result = mechanisms[0].sendChallenge(routingContext);
for (int i = 1; i < mechanisms.length; ++i) {
HttpAuthenticationMechanism mech = mechanisms[i];
result = result.onItem().transformToUni(new Function<Boolean, Uni<? extends Boolean>>() {
@Override
public Uni<? extends Boolean> apply(Boolean authDone) {
if (authDone) {
return Uni.createFrom().item(authDone);
}
return mech.sendChallenge(routingContext);
}
});
}
}
return result.onItem().transformToUni(new Function<Boolean, Uni<? extends Boolean>>() {
@Override
public Uni<? extends Boolean> apply(Boolean authDone) {
if (!authDone) {
routingContext.response().setStatusCode(401);
routingContext.response().end();
}
return Uni.createFrom().item(authDone);
}
});
} | if (usePathSpecificMechanism(routingContext)) { | public Uni<Boolean> sendChallenge(RoutingContext routingContext) {
routingContext.request().resume();
Uni<Boolean> result = null;
if (mechanisms.length > 1) {
HttpAuthenticationMechanism matchingMech = routingContext.get(HttpAuthenticationMechanism.class.getName());
if (matchingMech != null) {
result = matchingMech.sendChallenge(routingContext);
}
}
if (result == null) {
result = mechanisms[0].sendChallenge(routingContext);
for (int i = 1; i < mechanisms.length; ++i) {
HttpAuthenticationMechanism mech = mechanisms[i];
result = result.onItem().transformToUni(new Function<Boolean, Uni<? extends Boolean>>() {
@Override
public Uni<? extends Boolean> apply(Boolean authDone) {
if (authDone) {
return Uni.createFrom().item(authDone);
}
return mech.sendChallenge(routingContext);
}
});
}
}
return result.onItem().transformToUni(new Function<Boolean, Uni<? extends Boolean>>() {
@Override
public Uni<? extends Boolean> apply(Boolean authDone) {
if (!authDone) {
routingContext.response().setStatusCode(401);
routingContext.response().end();
}
return Uni.createFrom().item(authDone);
}
});
} | class HttpAuthenticator {
private final IdentityProviderManager identityProviderManager;
private final Instance<PathMatchingHttpSecurityPolicy> pathMatchingPolicy;
private final HttpAuthenticationMechanism[] mechanisms;
public HttpAuthenticator(IdentityProviderManager identityProviderManager,
Instance<PathMatchingHttpSecurityPolicy> pathMatchingPolicy,
Instance<HttpAuthenticationMechanism> httpAuthenticationMechanism,
Instance<IdentityProvider<?>> providers) {
this.identityProviderManager = identityProviderManager;
this.pathMatchingPolicy = pathMatchingPolicy;
List<HttpAuthenticationMechanism> mechanisms = new ArrayList<>();
for (HttpAuthenticationMechanism mechanism : httpAuthenticationMechanism) {
boolean found = false;
for (Class<? extends AuthenticationRequest> mechType : mechanism.getCredentialTypes()) {
for (IdentityProvider<?> i : providers) {
if (i.getRequestType().equals(mechType)) {
found = true;
break;
}
}
if (found == true) {
break;
}
}
if (found || mechanism.getCredentialTypes().isEmpty()) {
mechanisms.add(mechanism);
}
}
if (mechanisms.isEmpty()) {
this.mechanisms = new HttpAuthenticationMechanism[] { new NoAuthenticationMechanism() };
} else {
mechanisms.sort(new Comparator<HttpAuthenticationMechanism>() {
@Override
public int compare(HttpAuthenticationMechanism mech1, HttpAuthenticationMechanism mech2) {
return Integer.compare(mech2.getPriority(), mech1.getPriority());
}
});
this.mechanisms = mechanisms.toArray(new HttpAuthenticationMechanism[mechanisms.size()]);
}
}
IdentityProviderManager getIdentityProviderManager() {
return identityProviderManager;
}
/**
* Attempts authentication with the contents of the request. If this is possible the Uni
* will resolve to a valid SecurityIdentity when it is subscribed to. Note that Uni is lazy,
* so this may not happen until the Uni is subscribed to.
* <p>
* If invalid credentials are present then the completion stage will resolve to a
* {@link io.quarkus.security.AuthenticationFailedException}
* <p>
* If no credentials are present it will resolve to null.
*/
public Uni<SecurityIdentity> attemptAuthentication(RoutingContext routingContext) {
String pathSpecificMechanism = pathMatchingPolicy.isResolvable()
? pathMatchingPolicy.get().getAuthMechanismName(routingContext)
: null;
Uni<HttpAuthenticationMechanism> matchingMechUni = findBestCandidateMechanism(routingContext, pathSpecificMechanism);
if (matchingMechUni == null) {
return createSecurityIdentity(routingContext);
}
return matchingMechUni.onItem()
.transformToUni(new Function<HttpAuthenticationMechanism, Uni<? extends SecurityIdentity>>() {
@Override
public Uni<SecurityIdentity> apply(HttpAuthenticationMechanism mech) {
if (mech != null) {
return mech.authenticate(routingContext, identityProviderManager);
} else if (pathSpecificMechanism != null) {
return Uni.createFrom().optional(Optional.empty());
}
return createSecurityIdentity(routingContext);
}
});
}
private Uni<SecurityIdentity> createSecurityIdentity(RoutingContext routingContext) {
Uni<SecurityIdentity> result = mechanisms[0].authenticate(routingContext, identityProviderManager);
for (int i = 1; i < mechanisms.length; ++i) {
HttpAuthenticationMechanism mech = mechanisms[i];
result = result.onItem().transformToUni(new Function<SecurityIdentity, Uni<? extends SecurityIdentity>>() {
@Override
public Uni<SecurityIdentity> apply(SecurityIdentity data) {
if (data != null) {
return Uni.createFrom().item(data);
}
return mech.authenticate(routingContext, identityProviderManager);
}
});
}
return result;
}
/**
* @return
*/
private boolean usePathSpecificMechanism(RoutingContext routingContext) {
return pathMatchingPolicy.isResolvable() && pathMatchingPolicy.get().getAuthMechanismName(routingContext) != null;
}
public Uni<ChallengeData> getChallenge(RoutingContext routingContext) {
if (usePathSpecificMechanism(routingContext)) {
HttpAuthenticationMechanism matchingMech = routingContext.get(HttpAuthenticationMechanism.class.getName());
if (matchingMech != null) {
return matchingMech.getChallenge(routingContext);
}
}
Uni<ChallengeData> result = mechanisms[0].getChallenge(routingContext);
for (int i = 1; i < mechanisms.length; ++i) {
HttpAuthenticationMechanism mech = mechanisms[i];
result = result.onItem().transformToUni(new Function<ChallengeData, Uni<? extends ChallengeData>>() {
@Override
public Uni<? extends ChallengeData> apply(ChallengeData data) {
if (data != null) {
return Uni.createFrom().item(data);
}
return mech.getChallenge(routingContext);
}
});
}
return result;
}
private Uni<HttpAuthenticationMechanism> findBestCandidateMechanism(RoutingContext routingContext,
String pathSpecificMechanism) {
Uni<HttpAuthenticationMechanism> result = null;
if (pathSpecificMechanism != null) {
result = getPathSpecificMechanism(0, routingContext, pathSpecificMechanism);
for (int i = 1; i < mechanisms.length; ++i) {
int mechIndex = i;
result = result.onItem().transformToUni(
new Function<HttpAuthenticationMechanism, Uni<? extends HttpAuthenticationMechanism>>() {
@Override
public Uni<? extends HttpAuthenticationMechanism> apply(HttpAuthenticationMechanism mech) {
if (mech != null) {
return Uni.createFrom().item(mech);
}
return getPathSpecificMechanism(mechIndex, routingContext, pathSpecificMechanism);
}
});
}
}
return result;
}
private Uni<HttpAuthenticationMechanism> getPathSpecificMechanism(int index, RoutingContext routingContext,
String pathSpecificMechanism) {
return getCredentialTransport(mechanisms[index], routingContext).onItem()
.transform(new Function<HttpCredentialTransport, HttpAuthenticationMechanism>() {
@Override
public HttpAuthenticationMechanism apply(HttpCredentialTransport t) {
if (t != null && t.getAuthenticationScheme().equalsIgnoreCase(pathSpecificMechanism)) {
routingContext.put(HttpAuthenticationMechanism.class.getName(), mechanisms[index]);
return mechanisms[index];
}
return null;
}
});
}
private static Uni<HttpCredentialTransport> getCredentialTransport(HttpAuthenticationMechanism mechanism,
RoutingContext routingContext) {
try {
return mechanism.getCredentialTransport(routingContext);
} catch (UnsupportedOperationException ex) {
return Uni.createFrom().item(mechanism.getCredentialTransport());
}
}
static class NoAuthenticationMechanism implements HttpAuthenticationMechanism {
@Override
public Uni<SecurityIdentity> authenticate(RoutingContext context,
IdentityProviderManager identityProviderManager) {
return Uni.createFrom().optional(Optional.empty());
}
@Override
public Uni<ChallengeData> getChallenge(RoutingContext context) {
ChallengeData challengeData = new ChallengeData(HttpResponseStatus.FORBIDDEN.code(), null, null);
return Uni.createFrom().item(challengeData);
}
@Override
public Set<Class<? extends AuthenticationRequest>> getCredentialTypes() {
return Collections.singleton(AnonymousAuthenticationRequest.class);
}
@Override
public HttpCredentialTransport getCredentialTransport() {
return null;
}
}
static class NoopCloseTask implements Runnable {
static final NoopCloseTask INSTANCE = new NoopCloseTask();
@Override
public void run() {
}
}
} | class HttpAuthenticator {
private final IdentityProviderManager identityProviderManager;
private final Instance<PathMatchingHttpSecurityPolicy> pathMatchingPolicy;
private final HttpAuthenticationMechanism[] mechanisms;
public HttpAuthenticator(IdentityProviderManager identityProviderManager,
Instance<PathMatchingHttpSecurityPolicy> pathMatchingPolicy,
Instance<HttpAuthenticationMechanism> httpAuthenticationMechanism,
Instance<IdentityProvider<?>> providers) {
this.identityProviderManager = identityProviderManager;
this.pathMatchingPolicy = pathMatchingPolicy;
List<HttpAuthenticationMechanism> mechanisms = new ArrayList<>();
for (HttpAuthenticationMechanism mechanism : httpAuthenticationMechanism) {
boolean found = false;
for (Class<? extends AuthenticationRequest> mechType : mechanism.getCredentialTypes()) {
for (IdentityProvider<?> i : providers) {
if (i.getRequestType().equals(mechType)) {
found = true;
break;
}
}
if (found == true) {
break;
}
}
if (found || mechanism.getCredentialTypes().isEmpty()) {
mechanisms.add(mechanism);
}
}
if (mechanisms.isEmpty()) {
this.mechanisms = new HttpAuthenticationMechanism[] { new NoAuthenticationMechanism() };
} else {
mechanisms.sort(new Comparator<HttpAuthenticationMechanism>() {
@Override
public int compare(HttpAuthenticationMechanism mech1, HttpAuthenticationMechanism mech2) {
return Integer.compare(mech2.getPriority(), mech1.getPriority());
}
});
this.mechanisms = mechanisms.toArray(new HttpAuthenticationMechanism[mechanisms.size()]);
}
}
IdentityProviderManager getIdentityProviderManager() {
return identityProviderManager;
}
/**
* Attempts authentication with the contents of the request. If this is possible the Uni
* will resolve to a valid SecurityIdentity when it is subscribed to. Note that Uni is lazy,
* so this may not happen until the Uni is subscribed to.
* <p>
* If invalid credentials are present then the completion stage will resolve to a
* {@link io.quarkus.security.AuthenticationFailedException}
* <p>
* If no credentials are present it will resolve to null.
*/
public Uni<SecurityIdentity> attemptAuthentication(RoutingContext routingContext) {
String pathSpecificMechanism = pathMatchingPolicy.isResolvable()
? pathMatchingPolicy.get().getAuthMechanismName(routingContext)
: null;
Uni<HttpAuthenticationMechanism> matchingMechUni = findBestCandidateMechanism(routingContext, pathSpecificMechanism);
if (matchingMechUni == null) {
return createSecurityIdentity(routingContext);
}
return matchingMechUni.onItem()
.transformToUni(new Function<HttpAuthenticationMechanism, Uni<? extends SecurityIdentity>>() {
@Override
public Uni<SecurityIdentity> apply(HttpAuthenticationMechanism mech) {
if (mech != null) {
return mech.authenticate(routingContext, identityProviderManager);
} else if (pathSpecificMechanism != null) {
return Uni.createFrom().optional(Optional.empty());
}
return createSecurityIdentity(routingContext);
}
});
}
private Uni<SecurityIdentity> createSecurityIdentity(RoutingContext routingContext) {
Uni<SecurityIdentity> result = mechanisms[0].authenticate(routingContext, identityProviderManager);
for (int i = 1; i < mechanisms.length; ++i) {
HttpAuthenticationMechanism mech = mechanisms[i];
result = result.onItem().transformToUni(new Function<SecurityIdentity, Uni<? extends SecurityIdentity>>() {
@Override
public Uni<SecurityIdentity> apply(SecurityIdentity data) {
if (data != null) {
return Uni.createFrom().item(data);
}
return mech.authenticate(routingContext, identityProviderManager);
}
});
}
return result;
}
/**
* @return
*/
public Uni<ChallengeData> getChallenge(RoutingContext routingContext) {
if (mechanisms.length > 1) {
HttpAuthenticationMechanism matchingMech = routingContext.get(HttpAuthenticationMechanism.class.getName());
if (matchingMech != null) {
return matchingMech.getChallenge(routingContext);
}
}
Uni<ChallengeData> result = mechanisms[0].getChallenge(routingContext);
for (int i = 1; i < mechanisms.length; ++i) {
HttpAuthenticationMechanism mech = mechanisms[i];
result = result.onItem().transformToUni(new Function<ChallengeData, Uni<? extends ChallengeData>>() {
@Override
public Uni<? extends ChallengeData> apply(ChallengeData data) {
if (data != null) {
return Uni.createFrom().item(data);
}
return mech.getChallenge(routingContext);
}
});
}
return result;
}
private Uni<HttpAuthenticationMechanism> findBestCandidateMechanism(RoutingContext routingContext,
String pathSpecificMechanism) {
Uni<HttpAuthenticationMechanism> result = null;
if (pathSpecificMechanism != null) {
result = getPathSpecificMechanism(0, routingContext, pathSpecificMechanism);
for (int i = 1; i < mechanisms.length; ++i) {
int mechIndex = i;
result = result.onItem().transformToUni(
new Function<HttpAuthenticationMechanism, Uni<? extends HttpAuthenticationMechanism>>() {
@Override
public Uni<? extends HttpAuthenticationMechanism> apply(HttpAuthenticationMechanism mech) {
if (mech != null) {
return Uni.createFrom().item(mech);
}
return getPathSpecificMechanism(mechIndex, routingContext, pathSpecificMechanism);
}
});
}
}
return result;
}
private Uni<HttpAuthenticationMechanism> getPathSpecificMechanism(int index, RoutingContext routingContext,
String pathSpecificMechanism) {
return getCredentialTransport(mechanisms[index], routingContext).onItem()
.transform(new Function<HttpCredentialTransport, HttpAuthenticationMechanism>() {
@Override
public HttpAuthenticationMechanism apply(HttpCredentialTransport t) {
if (t != null && t.getAuthenticationScheme().equalsIgnoreCase(pathSpecificMechanism)) {
routingContext.put(HttpAuthenticationMechanism.class.getName(), mechanisms[index]);
return mechanisms[index];
}
return null;
}
});
}
private static Uni<HttpCredentialTransport> getCredentialTransport(HttpAuthenticationMechanism mechanism,
RoutingContext routingContext) {
try {
return mechanism.getCredentialTransport(routingContext);
} catch (UnsupportedOperationException ex) {
return Uni.createFrom().item(mechanism.getCredentialTransport());
}
}
static class NoAuthenticationMechanism implements HttpAuthenticationMechanism {
@Override
public Uni<SecurityIdentity> authenticate(RoutingContext context,
IdentityProviderManager identityProviderManager) {
return Uni.createFrom().optional(Optional.empty());
}
@Override
public Uni<ChallengeData> getChallenge(RoutingContext context) {
ChallengeData challengeData = new ChallengeData(HttpResponseStatus.FORBIDDEN.code(), null, null);
return Uni.createFrom().item(challengeData);
}
@Override
public Set<Class<? extends AuthenticationRequest>> getCredentialTypes() {
return Collections.singleton(AnonymousAuthenticationRequest.class);
}
@Override
public HttpCredentialTransport getCredentialTransport() {
return null;
}
}
static class NoopCloseTask implements Runnable {
static final NoopCloseTask INSTANCE = new NoopCloseTask();
@Override
public void run() {
}
}
} |
Even though the operator precedence is well defined here, I think it could be good to have some parentheses to be entirely explicit about the intention | public double getRetryDelay(int retry) {
long retryMultiplier = 0l;
if (retry > 1) {
retryMultiplier = Math.min(10000, 1L << (retry-1));
}
return Math.min(10.0, retryMultiplier*baseDelayUS.get()/US);
} | return Math.min(10.0, retryMultiplier*baseDelayUS.get()/US); | public double getRetryDelay(int retry) {
long retryMultiplier = 0l;
if (retry > 1) {
retryMultiplier = 1L << Math.min(20, retry-1);
}
return Math.min(10.0, (retryMultiplier*baseDelayUS.get())/US);
} | class RetryTransientErrorsPolicy implements RetryPolicy {
private static final double US = 1000000;
private final AtomicBoolean enabled = new AtomicBoolean(true);
private volatile AtomicLong baseDelayUS = new AtomicLong(1000);
/**
* Sets whether or not this policy should allow retries or not.
*
* @param enabled True to allow retries.
* @return This, to allow chaining.
*/
public RetryTransientErrorsPolicy setEnabled(boolean enabled) {
this.enabled.set(enabled);
return this;
}
/**
* Sets the base delay in seconds to wait between retries. This amount is multiplied by the retry number.
*
* @param baseDelay The time in seconds.
* @return This, to allow chaining.
*/
public RetryTransientErrorsPolicy setBaseDelay(double baseDelay) {
this.baseDelayUS.set((long)(baseDelay*US));
return this;
}
@Override
public boolean canRetry(int errorCode) {
return enabled.get() && errorCode < ErrorCode.FATAL_ERROR;
}
@Override
} | class RetryTransientErrorsPolicy implements RetryPolicy {
private static final double US = 1000000;
private final AtomicBoolean enabled = new AtomicBoolean(true);
private volatile AtomicLong baseDelayUS = new AtomicLong(1000);
/**
* Sets whether or not this policy should allow retries or not.
*
* @param enabled True to allow retries.
* @return This, to allow chaining.
*/
public RetryTransientErrorsPolicy setEnabled(boolean enabled) {
this.enabled.set(enabled);
return this;
}
/**
* Sets the base delay in seconds to wait between retries. This amount is multiplied by the retry number.
*
* @param baseDelay The time in seconds.
* @return This, to allow chaining.
*/
public RetryTransientErrorsPolicy setBaseDelay(double baseDelay) {
this.baseDelayUS.set((long)(baseDelay*US));
return this;
}
@Override
public boolean canRetry(int errorCode) {
return enabled.get() && errorCode < ErrorCode.FATAL_ERROR;
}
@Override
} |
```suggestion candidateBeList.add(backendList.get(beIndex++ % size)); ``` obtain size = backendList.size() in other place | private List<TScanRangeLocations> getShardLocations() throws UserException {
if (esTablePartitions == null) {
if (table.getLastMetaDataSyncException() != null) {
throw new UserException("fetch es table [" + table.getName() + "] metadata failure: " + table.getLastMetaDataSyncException().getLocalizedMessage());
}
throw new UserException("EsTable metadata has not been synced, Try it later");
}
Collection<Long> partitionIds = partitionPrune(esTablePartitions.getPartitionInfo());
List<EsShardPartitions> selectedIndex = Lists.newArrayList();
ArrayList<String> unPartitionedIndices = Lists.newArrayList();
ArrayList<String> partitionedIndices = Lists.newArrayList();
for (EsShardPartitions esShardPartitions : esTablePartitions.getUnPartitionedIndexStates().values()) {
selectedIndex.add(esShardPartitions);
unPartitionedIndices.add(esShardPartitions.getIndexName());
}
if (partitionIds != null) {
for (Long partitionId : partitionIds) {
EsShardPartitions indexState = esTablePartitions.getEsShardPartitions(partitionId);
selectedIndex.add(indexState);
partitionedIndices.add(indexState.getIndexName());
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("partition prune finished, unpartitioned index [{}], "
+ "partitioned index [{}]",
String.join(",", unPartitionedIndices),
String.join(",", partitionedIndices));
}
int beIndex = random.nextInt(backendList.size());
List<TScanRangeLocations> result = Lists.newArrayList();
for (EsShardPartitions indexState : selectedIndex) {
for (List<EsShardRouting> shardRouting : indexState.getShardRoutings().values()) {
Set<Backend> colocatedBes = Sets.newHashSet();
int numBe = Math.min(3, backendMap.size());
List<TNetworkAddress> shardAllocations = new ArrayList<>();
for (EsShardRouting item : shardRouting) {
shardAllocations.add(EsTable.TRANSPORT_HTTP.equals(table.getTransport()) ? item.getHttpAddress() : item.getAddress());
}
Collections.shuffle(shardAllocations, random);
for (TNetworkAddress address : shardAllocations) {
colocatedBes.addAll(backendMap.get(address.getHostname()));
}
boolean usingRandomBackend = colocatedBes.size() == 0;
List<Backend> candidateBeList = Lists.newArrayList();
if (usingRandomBackend) {
for (int i = 0; i < numBe; ++i) {
candidateBeList.add(backendList.get(beIndex++ % backendList.size()));
}
} else {
candidateBeList.addAll(colocatedBes);
Collections.shuffle(candidateBeList);
}
TScanRangeLocations locations = new TScanRangeLocations();
for (int i = 0; i < numBe && i < candidateBeList.size(); ++i) {
TScanRangeLocation location = new TScanRangeLocation();
Backend be = candidateBeList.get(i);
location.setBackend_id(be.getId());
location.setServer(new TNetworkAddress(be.getHost(), be.getBePort()));
locations.addToLocations(location);
}
TEsScanRange esScanRange = new TEsScanRange();
esScanRange.setEs_hosts(shardAllocations);
esScanRange.setIndex(shardRouting.get(0).getIndexName());
esScanRange.setType(table.getMappingType());
esScanRange.setShard_id(shardRouting.get(0).getShardId());
TScanRange scanRange = new TScanRange();
scanRange.setEs_scan_range(esScanRange);
locations.setScan_range(scanRange);
result.add(locations);
}
}
if (LOG.isDebugEnabled()) {
StringBuilder scratchBuilder = new StringBuilder();
for (TScanRangeLocations scanRangeLocations : result) {
scratchBuilder.append(scanRangeLocations.toString());
scratchBuilder.append(" ");
}
LOG.debug("ES table {} scan ranges {}", table.getName(), scratchBuilder.toString());
}
return result;
} | candidateBeList.add(backendList.get(beIndex++ % backendList.size())); | private List<TScanRangeLocations> getShardLocations() throws UserException {
if (esTablePartitions == null) {
if (table.getLastMetaDataSyncException() != null) {
throw new UserException("fetch es table [" + table.getName() + "] metadata failure: " + table.getLastMetaDataSyncException().getLocalizedMessage());
}
throw new UserException("EsTable metadata has not been synced, Try it later");
}
Collection<Long> partitionIds = partitionPrune(esTablePartitions.getPartitionInfo());
List<EsShardPartitions> selectedIndex = Lists.newArrayList();
ArrayList<String> unPartitionedIndices = Lists.newArrayList();
ArrayList<String> partitionedIndices = Lists.newArrayList();
for (EsShardPartitions esShardPartitions : esTablePartitions.getUnPartitionedIndexStates().values()) {
selectedIndex.add(esShardPartitions);
unPartitionedIndices.add(esShardPartitions.getIndexName());
}
if (partitionIds != null) {
for (Long partitionId : partitionIds) {
EsShardPartitions indexState = esTablePartitions.getEsShardPartitions(partitionId);
selectedIndex.add(indexState);
partitionedIndices.add(indexState.getIndexName());
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("partition prune finished, unpartitioned index [{}], "
+ "partitioned index [{}]",
String.join(",", unPartitionedIndices),
String.join(",", partitionedIndices));
}
int size = backendList.size();
int beIndex = random.nextInt(size);
List<TScanRangeLocations> result = Lists.newArrayList();
for (EsShardPartitions indexState : selectedIndex) {
for (List<EsShardRouting> shardRouting : indexState.getShardRoutings().values()) {
Set<Backend> colocatedBes = Sets.newHashSet();
int numBe = Math.min(3, size);
List<TNetworkAddress> shardAllocations = new ArrayList<>();
for (EsShardRouting item : shardRouting) {
shardAllocations.add(EsTable.TRANSPORT_HTTP.equals(table.getTransport()) ? item.getHttpAddress() : item.getAddress());
}
Collections.shuffle(shardAllocations, random);
for (TNetworkAddress address : shardAllocations) {
colocatedBes.addAll(backendMap.get(address.getHostname()));
}
boolean usingRandomBackend = colocatedBes.size() == 0;
List<Backend> candidateBeList = Lists.newArrayList();
if (usingRandomBackend) {
for (int i = 0; i < numBe; ++i) {
candidateBeList.add(backendList.get(beIndex++ % size));
}
} else {
candidateBeList.addAll(colocatedBes);
Collections.shuffle(candidateBeList);
}
TScanRangeLocations locations = new TScanRangeLocations();
for (int i = 0; i < numBe && i < candidateBeList.size(); ++i) {
TScanRangeLocation location = new TScanRangeLocation();
Backend be = candidateBeList.get(i);
location.setBackend_id(be.getId());
location.setServer(new TNetworkAddress(be.getHost(), be.getBePort()));
locations.addToLocations(location);
}
TEsScanRange esScanRange = new TEsScanRange();
esScanRange.setEs_hosts(shardAllocations);
esScanRange.setIndex(shardRouting.get(0).getIndexName());
esScanRange.setType(table.getMappingType());
esScanRange.setShard_id(shardRouting.get(0).getShardId());
TScanRange scanRange = new TScanRange();
scanRange.setEs_scan_range(esScanRange);
locations.setScan_range(scanRange);
result.add(locations);
}
}
if (LOG.isDebugEnabled()) {
StringBuilder scratchBuilder = new StringBuilder();
for (TScanRangeLocations scanRangeLocations : result) {
scratchBuilder.append(scanRangeLocations.toString());
scratchBuilder.append(" ");
}
LOG.debug("ES table {} scan ranges {}", table.getName(), scratchBuilder.toString());
}
return result;
} | class EsScanNode extends ScanNode {
private static final Logger LOG = LogManager.getLogger(EsScanNode.class);
private final Random random = new Random(System.currentTimeMillis());
private Multimap<String, Backend> backendMap;
private List<Backend> backendList;
private EsTablePartitions esTablePartitions;
private List<TScanRangeLocations> shardScanRanges = Lists.newArrayList();
private EsTable table;
boolean isFinalized = false;
public EsScanNode(PlanNodeId id, TupleDescriptor desc, String planNodeName) {
super(id, desc, planNodeName);
table = (EsTable) (desc.getTable());
esTablePartitions = table.getEsTablePartitions();
}
@Override
public void init(Analyzer analyzer) throws UserException {
super.init(analyzer);
assignBackends();
}
@Override
public int getNumInstances() {
return shardScanRanges.size();
}
@Override
public List<TScanRangeLocations> getScanRangeLocations(long maxScanRangeLength) {
return shardScanRanges;
}
@Override
public void finalize(Analyzer analyzer) throws UserException {
if (isFinalized) {
return;
}
try {
shardScanRanges = getShardLocations();
} catch (AnalysisException e) {
throw new UserException(e.getMessage());
}
isFinalized = true;
}
/**
* return whether can use the doc_values scan
* 0 and 1 are returned to facilitate Doris BE processing
*
* @param desc the fields needs to read from ES
* @param docValueContext the mapping for docvalues fields from origin field to doc_value fields
* @return
*/
private int useDocValueScan(TupleDescriptor desc, Map<String, String> docValueContext) {
ArrayList<SlotDescriptor> slotDescriptors = desc.getSlots();
List<String> selectedFields = new ArrayList<>(slotDescriptors.size());
for (SlotDescriptor slotDescriptor : slotDescriptors) {
selectedFields.add(slotDescriptor.getColumn().getName());
}
if (selectedFields.size() > table.maxDocValueFields()) {
return 0;
}
Set<String> docValueFields = docValueContext.keySet();
boolean useDocValue = true;
for (String selectedField : selectedFields) {
if (!docValueFields.contains(selectedField)) {
useDocValue = false;
break;
}
}
return useDocValue ? 1 : 0;
}
@Override
protected void toThrift(TPlanNode msg) {
if (EsTable.TRANSPORT_HTTP.equals(table.getTransport())) {
msg.node_type = TPlanNodeType.ES_HTTP_SCAN_NODE;
} else {
msg.node_type = TPlanNodeType.ES_SCAN_NODE;
}
Map<String, String> properties = Maps.newHashMap();
properties.put(EsTable.USER, table.getUserName());
properties.put(EsTable.PASSWORD, table.getPasswd());
TEsScanNode esScanNode = new TEsScanNode(desc.getId().asInt());
esScanNode.setProperties(properties);
if (table.isDocValueScanEnable()) {
esScanNode.setDocvalue_context(table.docValueContext());
properties.put(EsTable.DOC_VALUES_MODE, String.valueOf(useDocValueScan(desc, table.docValueContext())));
}
if (table.isKeywordSniffEnable() && table.fieldsContext().size() > 0) {
esScanNode.setFields_context(table.fieldsContext());
}
msg.es_scan_node = esScanNode;
}
private void assignBackends() throws UserException {
backendMap = HashMultimap.create();
backendList = Lists.newArrayList();
for (Backend be : Catalog.getCurrentSystemInfo().getIdToBackend().values()) {
if (be.isAlive()) {
backendMap.put(be.getHost(), be);
backendList.add(be);
}
}
if (backendMap.isEmpty()) {
throw new UserException("No Alive backends");
}
}
/**
* if the index name is an alias or index pattern, then the es table is related
* with one or more indices some indices could be pruned by using partition info
* in index settings currently only support range partition setting
*
* @param partitionInfo
* @return
* @throws AnalysisException
*/
private Collection<Long> partitionPrune(PartitionInfo partitionInfo) throws AnalysisException {
if (partitionInfo == null) {
return null;
}
PartitionPruner partitionPruner = null;
switch (partitionInfo.getType()) {
case RANGE: {
RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo;
Map<Long, Range<PartitionKey>> keyRangeById = rangePartitionInfo.getIdToRange(false);
partitionPruner = new RangePartitionPruner(keyRangeById, rangePartitionInfo.getPartitionColumns(),
columnFilters);
return partitionPruner.prune();
}
case UNPARTITIONED: {
return null;
}
default: {
return null;
}
}
}
@Override
protected String getNodeExplainString(String prefix, TExplainLevel detailLevel) {
StringBuilder output = new StringBuilder();
output.append(prefix).append("TABLE: ").append(table.getName()).append("\n");
if (null != sortColumn) {
output.append(prefix).append("SORT COLUMN: ").append(sortColumn).append("\n");
}
if (!conjuncts.isEmpty()) {
output.append(prefix).append("PREDICATES: ").append(
getExplainString(conjuncts)).append("\n");
output.append(prefix).append("LOCAL_PREDICATES: ").append(" ").append("\n");
output.append(prefix).append("REMOTE_PREDICATES: ").append(" ").append("\n");
output.append(prefix).append("ES_QUERY_DSL: ").append(" ").append("\n");
} else {
output.append(prefix).append("ES_QUERY_DSL: ").append("{\"match_all\": {}}").append("\n");
}
String indexName = table.getIndexName();
String typeName = table.getMappingType();
output.append(prefix)
.append(String.format("ES index/type: %s/%s", indexName, typeName))
.append("\n");
return output.toString();
}
} | class EsScanNode extends ScanNode {
private static final Logger LOG = LogManager.getLogger(EsScanNode.class);
private final Random random = new Random(System.currentTimeMillis());
private Multimap<String, Backend> backendMap;
private List<Backend> backendList;
private EsTablePartitions esTablePartitions;
private List<TScanRangeLocations> shardScanRanges = Lists.newArrayList();
private EsTable table;
boolean isFinalized = false;
public EsScanNode(PlanNodeId id, TupleDescriptor desc, String planNodeName) {
super(id, desc, planNodeName);
table = (EsTable) (desc.getTable());
esTablePartitions = table.getEsTablePartitions();
}
@Override
public void init(Analyzer analyzer) throws UserException {
super.init(analyzer);
assignBackends();
}
@Override
public int getNumInstances() {
return shardScanRanges.size();
}
@Override
public List<TScanRangeLocations> getScanRangeLocations(long maxScanRangeLength) {
return shardScanRanges;
}
@Override
public void finalize(Analyzer analyzer) throws UserException {
if (isFinalized) {
return;
}
try {
shardScanRanges = getShardLocations();
} catch (AnalysisException e) {
throw new UserException(e.getMessage());
}
isFinalized = true;
}
/**
* return whether can use the doc_values scan
* 0 and 1 are returned to facilitate Doris BE processing
*
* @param desc the fields needs to read from ES
* @param docValueContext the mapping for docvalues fields from origin field to doc_value fields
* @return
*/
private int useDocValueScan(TupleDescriptor desc, Map<String, String> docValueContext) {
ArrayList<SlotDescriptor> slotDescriptors = desc.getSlots();
List<String> selectedFields = new ArrayList<>(slotDescriptors.size());
for (SlotDescriptor slotDescriptor : slotDescriptors) {
selectedFields.add(slotDescriptor.getColumn().getName());
}
if (selectedFields.size() > table.maxDocValueFields()) {
return 0;
}
Set<String> docValueFields = docValueContext.keySet();
boolean useDocValue = true;
for (String selectedField : selectedFields) {
if (!docValueFields.contains(selectedField)) {
useDocValue = false;
break;
}
}
return useDocValue ? 1 : 0;
}
@Override
protected void toThrift(TPlanNode msg) {
if (EsTable.TRANSPORT_HTTP.equals(table.getTransport())) {
msg.node_type = TPlanNodeType.ES_HTTP_SCAN_NODE;
} else {
msg.node_type = TPlanNodeType.ES_SCAN_NODE;
}
Map<String, String> properties = Maps.newHashMap();
properties.put(EsTable.USER, table.getUserName());
properties.put(EsTable.PASSWORD, table.getPasswd());
TEsScanNode esScanNode = new TEsScanNode(desc.getId().asInt());
esScanNode.setProperties(properties);
if (table.isDocValueScanEnable()) {
esScanNode.setDocvalue_context(table.docValueContext());
properties.put(EsTable.DOC_VALUES_MODE, String.valueOf(useDocValueScan(desc, table.docValueContext())));
}
if (table.isKeywordSniffEnable() && table.fieldsContext().size() > 0) {
esScanNode.setFields_context(table.fieldsContext());
}
msg.es_scan_node = esScanNode;
}
private void assignBackends() throws UserException {
backendMap = HashMultimap.create();
backendList = Lists.newArrayList();
for (Backend be : Catalog.getCurrentSystemInfo().getIdToBackend().values()) {
if (be.isAlive()) {
backendMap.put(be.getHost(), be);
backendList.add(be);
}
}
if (backendMap.isEmpty()) {
throw new UserException("No Alive backends");
}
}
/**
* if the index name is an alias or index pattern, then the es table is related
* with one or more indices some indices could be pruned by using partition info
* in index settings currently only support range partition setting
*
* @param partitionInfo
* @return
* @throws AnalysisException
*/
private Collection<Long> partitionPrune(PartitionInfo partitionInfo) throws AnalysisException {
if (partitionInfo == null) {
return null;
}
PartitionPruner partitionPruner = null;
switch (partitionInfo.getType()) {
case RANGE: {
RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo;
Map<Long, Range<PartitionKey>> keyRangeById = rangePartitionInfo.getIdToRange(false);
partitionPruner = new RangePartitionPruner(keyRangeById, rangePartitionInfo.getPartitionColumns(),
columnFilters);
return partitionPruner.prune();
}
case UNPARTITIONED: {
return null;
}
default: {
return null;
}
}
}
@Override
protected String getNodeExplainString(String prefix, TExplainLevel detailLevel) {
StringBuilder output = new StringBuilder();
output.append(prefix).append("TABLE: ").append(table.getName()).append("\n");
if (null != sortColumn) {
output.append(prefix).append("SORT COLUMN: ").append(sortColumn).append("\n");
}
if (!conjuncts.isEmpty()) {
output.append(prefix).append("PREDICATES: ").append(
getExplainString(conjuncts)).append("\n");
output.append(prefix).append("LOCAL_PREDICATES: ").append(" ").append("\n");
output.append(prefix).append("REMOTE_PREDICATES: ").append(" ").append("\n");
output.append(prefix).append("ES_QUERY_DSL: ").append(" ").append("\n");
} else {
output.append(prefix).append("ES_QUERY_DSL: ").append("{\"match_all\": {}}").append("\n");
}
String indexName = table.getIndexName();
String typeName = table.getMappingType();
output.append(prefix)
.append(String.format("ES index/type: %s/%s", indexName, typeName))
.append("\n");
return output.toString();
}
} |
According to the spec yes, but in quarkus, if `quarkus.arc.transform-unproxyable-classes=true` (default value) the `final` modifier is simply removed and the method is intercepted. It should be tested in the `io.quarkus.security.test.cdi.SecurityAnnotationOnFinalMethodTest`. | void forEachMethod(ClassInfo clazz, Consumer<MethodInfo> action) {
for (MethodInfo method : clazz.methods()) {
if (method.name().startsWith("<")) {
continue;
}
if (Modifier.isPrivate(method.flags())) {
continue;
}
if (Modifier.isFinal(method.flags())) {
continue;
}
if (method.isSynthetic()) {
continue;
}
action.accept(method);
}
DotName parentClassName = clazz.superName();
if (parentClassName == null || parentClassName.equals(DotNames.OBJECT)) {
return;
}
ClassInfo parentClass = index.getClassByName(parentClassName);
if (parentClass == null) {
return;
}
forEachMethod(parentClass, action);
} | void forEachMethod(ClassInfo clazz, Consumer<MethodInfo> action) {
for (MethodInfo method : clazz.methods()) {
if (method.name().startsWith("<")) {
continue;
}
if (method.isSynthetic()) {
continue;
}
action.accept(method);
}
DotName parentClassName = clazz.superName();
if (parentClassName == null || parentClassName.equals(DotNames.OBJECT)) {
return;
}
ClassInfo parentClass = index.getClassByName(parentClassName);
if (parentClass == null) {
return;
}
forEachMethod(parentClass, action);
} | class FaultToleranceScanner {
private final IndexView index;
private final AnnotationStore annotationStore;
private final AnnotationProxyBuildItem proxy;
private final ClassOutput output;
FaultToleranceScanner(IndexView index, AnnotationStore annotationStore, AnnotationProxyBuildItem proxy,
ClassOutput output) {
this.index = index;
this.annotationStore = annotationStore;
this.proxy = proxy;
this.output = output;
}
boolean hasFTAnnotations(ClassInfo clazz) {
if (annotationStore.hasAnyAnnotation(clazz, DotNames.FT_ANNOTATIONS)) {
return true;
}
for (MethodInfo method : clazz.methods()) {
if (annotationStore.hasAnyAnnotation(method, DotNames.FT_ANNOTATIONS)) {
return true;
}
}
DotName parentClassName = clazz.superName();
if (parentClassName == null || parentClassName.equals(DotNames.OBJECT)) {
return false;
}
ClassInfo parentClass = index.getClassByName(parentClassName);
if (parentClass == null) {
return false;
}
return hasFTAnnotations(parentClass);
}
FaultToleranceMethod createFaultToleranceMethod(ClassInfo beanClass, MethodInfo method) {
Set<Class<? extends Annotation>> annotationsPresentDirectly = new HashSet<>();
FaultToleranceMethod result = new FaultToleranceMethod();
result.beanClass = JandexReflection.load(beanClass.name());
result.method = createMethodDescriptor(method);
result.asynchronous = getAnnotation(Asynchronous.class, method, beanClass, annotationsPresentDirectly);
result.bulkhead = getAnnotation(Bulkhead.class, method, beanClass, annotationsPresentDirectly);
result.circuitBreaker = getAnnotation(CircuitBreaker.class, method, beanClass, annotationsPresentDirectly);
result.fallback = getAnnotation(Fallback.class, method, beanClass, annotationsPresentDirectly);
result.retry = getAnnotation(Retry.class, method, beanClass, annotationsPresentDirectly);
result.timeout = getAnnotation(Timeout.class, method, beanClass, annotationsPresentDirectly);
result.circuitBreakerName = getAnnotation(CircuitBreakerName.class, method, beanClass, annotationsPresentDirectly);
result.customBackoff = getAnnotation(CustomBackoff.class, method, beanClass, annotationsPresentDirectly);
result.exponentialBackoff = getAnnotation(ExponentialBackoff.class, method, beanClass, annotationsPresentDirectly);
result.fibonacciBackoff = getAnnotation(FibonacciBackoff.class, method, beanClass, annotationsPresentDirectly);
result.blocking = getAnnotation(Blocking.class, method, beanClass, annotationsPresentDirectly);
result.nonBlocking = getAnnotation(NonBlocking.class, method, beanClass, annotationsPresentDirectly);
result.annotationsPresentDirectly = annotationsPresentDirectly;
return result;
}
private MethodDescriptor createMethodDescriptor(MethodInfo method) {
MethodDescriptor result = new MethodDescriptor();
result.declaringClass = JandexReflection.load(method.declaringClass().name());
result.name = method.name();
result.parameterTypes = method.parameters()
.stream()
.map(JandexReflection::loadRawType)
.toArray(Class[]::new);
result.returnType = JandexReflection.loadRawType(method.returnType());
return result;
}
private <A extends Annotation> A getAnnotation(Class<A> annotationType, MethodInfo method,
ClassInfo beanClass, Set<Class<? extends Annotation>> directlyPresent) {
DotName annotationName = DotName.createSimple(annotationType.getName());
if (annotationStore.hasAnnotation(method, annotationName)) {
directlyPresent.add(annotationType);
AnnotationInstance annotation = annotationStore.getAnnotation(method, annotationName);
return createAnnotation(annotationType, annotation);
}
return getAnnotationFromClass(annotationType, beanClass);
}
private <A extends Annotation> A getAnnotationFromClass(Class<A> annotationType, ClassInfo clazz) {
DotName annotationName = DotName.createSimple(annotationType.getName());
if (annotationStore.hasAnnotation(clazz, annotationName)) {
AnnotationInstance annotation = annotationStore.getAnnotation(clazz, annotationName);
return createAnnotation(annotationType, annotation);
}
DotName parentClassName = clazz.superName();
if (parentClassName == null || parentClassName.equals(DotNames.OBJECT)) {
return null;
}
ClassInfo parentClass = index.getClassByName(parentClassName);
if (parentClass == null) {
return null;
}
return getAnnotationFromClass(annotationType, parentClass);
}
private <A extends Annotation> A createAnnotation(Class<A> annotationType, AnnotationInstance instance) {
return proxy.builder(instance, annotationType).build(output);
}
} | class FaultToleranceScanner {
private final IndexView index;
private final AnnotationStore annotationStore;
private final AnnotationProxyBuildItem proxy;
private final ClassOutput output;
FaultToleranceScanner(IndexView index, AnnotationStore annotationStore, AnnotationProxyBuildItem proxy,
ClassOutput output) {
this.index = index;
this.annotationStore = annotationStore;
this.proxy = proxy;
this.output = output;
}
boolean hasFTAnnotations(ClassInfo clazz) {
if (annotationStore.hasAnyAnnotation(clazz, DotNames.FT_ANNOTATIONS)) {
return true;
}
for (MethodInfo method : clazz.methods()) {
if (annotationStore.hasAnyAnnotation(method, DotNames.FT_ANNOTATIONS)) {
return true;
}
}
DotName parentClassName = clazz.superName();
if (parentClassName == null || parentClassName.equals(DotNames.OBJECT)) {
return false;
}
ClassInfo parentClass = index.getClassByName(parentClassName);
if (parentClass == null) {
return false;
}
return hasFTAnnotations(parentClass);
}
FaultToleranceMethod createFaultToleranceMethod(ClassInfo beanClass, MethodInfo method) {
Set<Class<? extends Annotation>> annotationsPresentDirectly = new HashSet<>();
FaultToleranceMethod result = new FaultToleranceMethod();
result.beanClass = load(beanClass.name());
result.method = createMethodDescriptor(method);
result.asynchronous = getAnnotation(Asynchronous.class, method, beanClass, annotationsPresentDirectly);
result.bulkhead = getAnnotation(Bulkhead.class, method, beanClass, annotationsPresentDirectly);
result.circuitBreaker = getAnnotation(CircuitBreaker.class, method, beanClass, annotationsPresentDirectly);
result.fallback = getAnnotation(Fallback.class, method, beanClass, annotationsPresentDirectly);
result.retry = getAnnotation(Retry.class, method, beanClass, annotationsPresentDirectly);
result.timeout = getAnnotation(Timeout.class, method, beanClass, annotationsPresentDirectly);
result.circuitBreakerName = getAnnotation(CircuitBreakerName.class, method, beanClass, annotationsPresentDirectly);
result.customBackoff = getAnnotation(CustomBackoff.class, method, beanClass, annotationsPresentDirectly);
result.exponentialBackoff = getAnnotation(ExponentialBackoff.class, method, beanClass, annotationsPresentDirectly);
result.fibonacciBackoff = getAnnotation(FibonacciBackoff.class, method, beanClass, annotationsPresentDirectly);
result.blocking = getAnnotation(Blocking.class, method, beanClass, annotationsPresentDirectly);
result.nonBlocking = getAnnotation(NonBlocking.class, method, beanClass, annotationsPresentDirectly);
result.annotationsPresentDirectly = annotationsPresentDirectly;
return result;
}
private MethodDescriptor createMethodDescriptor(MethodInfo method) {
MethodDescriptor result = new MethodDescriptor();
result.declaringClass = load(method.declaringClass().name());
result.name = method.name();
result.parameterTypes = method.parameters()
.stream()
.map(JandexUtil::loadRawType)
.toArray(Class[]::new);
result.returnType = JandexUtil.loadRawType(method.returnType());
return result;
}
private <A extends Annotation> A getAnnotation(Class<A> annotationType, MethodInfo method,
ClassInfo beanClass, Set<Class<? extends Annotation>> directlyPresent) {
DotName annotationName = DotName.createSimple(annotationType.getName());
if (annotationStore.hasAnnotation(method, annotationName)) {
directlyPresent.add(annotationType);
AnnotationInstance annotation = annotationStore.getAnnotation(method, annotationName);
return createAnnotation(annotationType, annotation);
}
return getAnnotationFromClass(annotationType, beanClass);
}
private <A extends Annotation> A getAnnotationFromClass(Class<A> annotationType, ClassInfo clazz) {
DotName annotationName = DotName.createSimple(annotationType.getName());
if (annotationStore.hasAnnotation(clazz, annotationName)) {
AnnotationInstance annotation = annotationStore.getAnnotation(clazz, annotationName);
return createAnnotation(annotationType, annotation);
}
DotName parentClassName = clazz.superName();
if (parentClassName == null || parentClassName.equals(DotNames.OBJECT)) {
return null;
}
ClassInfo parentClass = index.getClassByName(parentClassName);
if (parentClass == null) {
return null;
}
return getAnnotationFromClass(annotationType, parentClass);
}
private <A extends Annotation> A createAnnotation(Class<A> annotationType, AnnotationInstance instance) {
return proxy.builder(instance, annotationType).build(output);
}
private static Class<?> load(DotName name) {
try {
return Thread.currentThread().getContextClassLoader().loadClass(name.toString());
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
}
} | |
Yes, it's the same format as in `dependency:tree` | public void visit(DependencyNode node) {
final Dependency dep = node.getDependency();
if(dep == null) {
return;
}
if(depth != null) {
buf.setLength(0);
if (!depth.isEmpty()) {
for (int i = 0; i < depth.size() - 1; ++i) {
if (depth.get(i)) {
buf.append("| ");
} else {
buf.append(" ");
}
}
if (depth.get(depth.size() - 1)) {
buf.append("|- ");
} else {
buf.append("\\- ");
}
}
buf.append(dep.getArtifact()).append(':').append(dep.getScope());
buildTreeConsumer.accept(buf.toString());
}
visitEnter(node);
final List<DependencyNode> children = node.getChildren();
if(!children.isEmpty()) {
final int childrenTotal = children.size();
if(childrenTotal == 1) {
if(depth != null) {
depth.add(false);
}
visit(children.get(0));
} else {
if(depth != null) {
depth.add(true);
}
int i = 0;
while(true) {
visit(children.get(i++));
if(i < childrenTotal - 1) {
continue;
} else if(i == childrenTotal) {
break;
} else if(depth != null) {
depth.set(depth.size() - 1, false);
}
}
}
if(depth != null) {
depth.remove(depth.size() - 1);
}
}
visitLeave(node);
} | visitEnter(node); | public void visit(DependencyNode node) {
final Dependency dep = node.getDependency();
if(dep == null) {
return;
}
if(depth != null) {
buf.setLength(0);
if (!depth.isEmpty()) {
for (int i = 0; i < depth.size() - 1; ++i) {
if (depth.get(i)) {
buf.append('\u2502').append(" ");
} else {
buf.append(" ");
}
}
if (depth.get(depth.size() - 1)) {
buf.append('\u251c').append('\u2500').append(' ');
} else {
buf.append('\u2514').append('\u2500').append(' ');
}
}
buf.append(dep.getArtifact()).append(':').append(dep.getScope());
buildTreeConsumer.accept(buf.toString());
}
visitEnter(node);
final List<DependencyNode> children = node.getChildren();
if(!children.isEmpty()) {
final int childrenTotal = children.size();
if(childrenTotal == 1) {
if(depth != null) {
depth.add(false);
}
visit(children.get(0));
} else {
if(depth != null) {
depth.add(true);
}
int i = 0;
while(true) {
visit(children.get(i++));
if(i < childrenTotal - 1) {
continue;
} else if(i == childrenTotal) {
break;
} else if(depth != null) {
depth.set(depth.size() - 1, false);
}
}
}
if(depth != null) {
depth.remove(depth.size() - 1);
}
}
visitLeave(node);
} | class BuildDependencyGraphVisitor {
private final Set<AppArtifactKey> appDeps;
private final StringBuilder buf;
private final Consumer<String> buildTreeConsumer;
private final List<Boolean> depth;
private DependencyNode deploymentNode;
private DependencyNode runtimeNode;
private Artifact runtimeArtifact;
private final List<DependencyNode> deploymentDepNodes = new ArrayList<>();
private final List<ArtifactRequest> requests = new ArrayList<>();
public BuildDependencyGraphVisitor(Set<AppArtifactKey> appDeps, Consumer<String> buildTreeConsumer) {
this.appDeps = appDeps;
this.buildTreeConsumer = buildTreeConsumer;
if(buildTreeConsumer == null) {
buf = null;
depth = null;
} else {
buf = new StringBuilder();
depth = new ArrayList<>();
}
}
public List<DependencyNode> getDeploymentNodes() {
return deploymentDepNodes;
}
public List<ArtifactRequest> getArtifactRequests() {
return requests;
}
private void visitEnter(DependencyNode node) {
final Dependency dep = node.getDependency();
if (deploymentNode == null) {
runtimeArtifact = DeploymentInjectingDependencyVisitor.getInjectedDependency(node);
if (runtimeArtifact != null) {
deploymentNode = node;
}
} else if (runtimeArtifact != null && runtimeNode == null && runtimeArtifact.equals(dep.getArtifact())) {
runtimeNode = node;
}
}
private void visitLeave(DependencyNode node) {
final Dependency dep = node.getDependency();
final Artifact artifact = dep.getArtifact();
if (artifact.getFile() == null) {
requests.add(new ArtifactRequest(node));
}
if (deploymentNode != null) {
if (runtimeNode == null && !appDeps.contains(new AppArtifactKey(artifact.getGroupId(),
artifact.getArtifactId(), artifact.getClassifier(), artifact.getExtension()))) {
deploymentDepNodes.add(node);
} else if (runtimeNode == node) {
runtimeNode = null;
runtimeArtifact = null;
}
if (deploymentNode == node) {
deploymentNode = null;
}
}
}
} | class BuildDependencyGraphVisitor {
private final Set<AppArtifactKey> appDeps;
private final StringBuilder buf;
private final Consumer<String> buildTreeConsumer;
private final List<Boolean> depth;
private DependencyNode deploymentNode;
private DependencyNode runtimeNode;
private Artifact runtimeArtifact;
private final List<DependencyNode> deploymentDepNodes = new ArrayList<>();
private final List<ArtifactRequest> requests = new ArrayList<>();
public BuildDependencyGraphVisitor(Set<AppArtifactKey> appDeps, Consumer<String> buildTreeConsumer) {
this.appDeps = appDeps;
this.buildTreeConsumer = buildTreeConsumer;
if(buildTreeConsumer == null) {
buf = null;
depth = null;
} else {
buf = new StringBuilder();
depth = new ArrayList<>();
}
}
public List<DependencyNode> getDeploymentNodes() {
return deploymentDepNodes;
}
public List<ArtifactRequest> getArtifactRequests() {
return requests;
}
private void visitEnter(DependencyNode node) {
final Dependency dep = node.getDependency();
if (deploymentNode == null) {
runtimeArtifact = DeploymentInjectingDependencyVisitor.getInjectedDependency(node);
if (runtimeArtifact != null) {
deploymentNode = node;
}
} else if (runtimeArtifact != null && runtimeNode == null && runtimeArtifact.equals(dep.getArtifact())) {
runtimeNode = node;
}
}
private void visitLeave(DependencyNode node) {
final Dependency dep = node.getDependency();
final Artifact artifact = dep.getArtifact();
if (artifact.getFile() == null) {
requests.add(new ArtifactRequest(node));
}
if (deploymentNode != null) {
if (runtimeNode == null && !appDeps.contains(new AppArtifactKey(artifact.getGroupId(),
artifact.getArtifactId(), artifact.getClassifier(), artifact.getExtension()))) {
deploymentDepNodes.add(node);
} else if (runtimeNode == node) {
runtimeNode = null;
runtimeArtifact = null;
}
if (deploymentNode == node) {
deploymentNode = null;
}
}
}
} |
i do not see a big difference between these tests and tests for int types... `ARRAY_REMOVE` invokes same code for both.... Probably just one (int or varchar is enough) | Stream<TestSetSpec> getTestSetSpecs() {
return Stream.of(
TestSetSpec.forFunction(BuiltInFunctionDefinitions.ARRAY_CONTAINS)
.onFieldsWithData(
new Integer[] {1, 2, 3},
null,
new String[] {"Hello", "World"},
new Row[] {
Row.of(true, LocalDate.of(2022, 4, 20)),
Row.of(true, LocalDate.of(1990, 10, 14)),
null
},
new Integer[] {1, null, 3},
new Integer[] {1, 2, 3})
.andDataTypes(
DataTypes.ARRAY(DataTypes.INT()),
DataTypes.ARRAY(DataTypes.INT()),
DataTypes.ARRAY(DataTypes.STRING()).notNull(),
DataTypes.ARRAY(
DataTypes.ROW(DataTypes.BOOLEAN(), DataTypes.DATE())),
DataTypes.ARRAY(DataTypes.INT()),
DataTypes.ARRAY(DataTypes.INT().notNull()).notNull())
.testResult(
$("f0").arrayContains(2),
"ARRAY_CONTAINS(f0, 2)",
true,
DataTypes.BOOLEAN().nullable())
.testResult(
$("f0").arrayContains(42),
"ARRAY_CONTAINS(f0, 42)",
false,
DataTypes.BOOLEAN().nullable())
.testResult(
$("f1").arrayContains(12),
"ARRAY_CONTAINS(f1, 12)",
null,
DataTypes.BOOLEAN().nullable())
.testResult(
$("f1").arrayContains(null),
"ARRAY_CONTAINS(f1, NULL)",
null,
DataTypes.BOOLEAN().nullable())
.testResult(
$("f2").arrayContains("Hello"),
"ARRAY_CONTAINS(f2, 'Hello')",
true,
DataTypes.BOOLEAN().notNull())
.testResult(
$("f3").arrayContains(row(true, LocalDate.of(1990, 10, 14))),
"ARRAY_CONTAINS(f3, (TRUE, DATE '1990-10-14'))",
true,
DataTypes.BOOLEAN())
.testResult(
$("f3").arrayContains(row(false, LocalDate.of(1990, 10, 14))),
"ARRAY_CONTAINS(f3, (FALSE, DATE '1990-10-14'))",
false,
DataTypes.BOOLEAN())
.testResult(
$("f3").arrayContains(null),
"ARRAY_CONTAINS(f3, null)",
true,
DataTypes.BOOLEAN())
.testResult(
$("f4").arrayContains(null),
"ARRAY_CONTAINS(f4, NULL)",
true,
DataTypes.BOOLEAN().nullable())
.testResult(
$("f5").arrayContains(lit(null, DataTypes.INT())),
"ARRAY_CONTAINS(f5, CAST(NULL AS INT))",
false,
DataTypes.BOOLEAN().notNull())
.testResult(
$("f5").arrayContains(lit(4, DataTypes.INT().notNull())),
"ARRAY_CONTAINS(f5, 4)",
false,
DataTypes.BOOLEAN().notNull())
.testResult(
$("f5").arrayContains(lit(3, DataTypes.INT().notNull())),
"ARRAY_CONTAINS(f5, 3)",
true,
DataTypes.BOOLEAN().notNull())
.testSqlValidationError(
"ARRAY_CONTAINS(f0, TRUE)",
"Invalid input arguments. Expected signatures are:\n"
+ "ARRAY_CONTAINS(haystack <ARRAY>, needle <ARRAY ELEMENT>)")
.testTableApiValidationError(
$("f0").arrayContains(true),
"Invalid input arguments. Expected signatures are:\n"
+ "ARRAY_CONTAINS(haystack <ARRAY>, needle <ARRAY ELEMENT>)"),
TestSetSpec.forFunction(BuiltInFunctionDefinitions.ARRAY_DISTINCT)
.onFieldsWithData(
new Integer[] {1, 2, 3},
new Integer[] {null, 1, 2, 3, 4, 5, 4, 3, 2, 1, null},
null,
new String[] {"Hello", "Hello", "Hello"},
new Row[] {
Row.of(true, LocalDate.of(2022, 4, 20)),
Row.of(true, LocalDate.of(1990, 10, 14)),
Row.of(true, LocalDate.of(1990, 10, 14)),
Row.of(true, LocalDate.of(1990, 10, 14)),
null
})
.andDataTypes(
DataTypes.ARRAY(DataTypes.INT()),
DataTypes.ARRAY(DataTypes.INT()),
DataTypes.ARRAY(DataTypes.INT()),
DataTypes.ARRAY(DataTypes.STRING()).notNull(),
DataTypes.ARRAY(
DataTypes.ROW(DataTypes.BOOLEAN(), DataTypes.DATE())))
.testResult(
$("f0").arrayDistinct(),
"ARRAY_DISTINCT(f0)",
new Integer[] {1, 2, 3},
DataTypes.ARRAY(DataTypes.INT()).nullable())
.testResult(
$("f1").arrayDistinct(),
"ARRAY_DISTINCT(f1)",
new Integer[] {null, 1, 2, 3, 4, 5},
DataTypes.ARRAY(DataTypes.INT()).nullable())
.testResult(
$("f2").arrayDistinct(),
"ARRAY_DISTINCT(f2)",
null,
DataTypes.ARRAY(DataTypes.INT()).nullable())
.testResult(
$("f3").arrayDistinct(),
"ARRAY_DISTINCT(f3)",
new String[] {"Hello"},
DataTypes.ARRAY(DataTypes.STRING()).notNull())
.testResult(
$("f4").arrayDistinct(),
"ARRAY_DISTINCT(f4)",
new Row[] {
Row.of(true, LocalDate.of(2022, 4, 20)),
Row.of(true, LocalDate.of(1990, 10, 14)),
null
},
DataTypes.ARRAY(
DataTypes.ROW(DataTypes.BOOLEAN(), DataTypes.DATE()))),
TestSetSpec.forFunction(BuiltInFunctionDefinitions.ARRAY_REMOVE)
.onFieldsWithData(
new Integer[] {1, 2, 2},
null,
new String[] {"Hello", "World"},
new Row[] {
Row.of(true, LocalDate.of(2022, 4, 20)),
Row.of(true, LocalDate.of(1990, 10, 14)),
null
},
new Integer[] {null, null, 1},
new Integer[][] {
new Integer[] {1, null, 3}, new Integer[] {0}, new Integer[] {1}
},
new Map[] {
CollectionUtil.map(entry(1, "a"), entry(2, "b")),
CollectionUtil.map(entry(3, "c"), entry(4, "d")),
null
})
.andDataTypes(
DataTypes.ARRAY(DataTypes.INT()),
DataTypes.ARRAY(DataTypes.INT()),
DataTypes.ARRAY(DataTypes.STRING()).notNull(),
DataTypes.ARRAY(
DataTypes.ROW(DataTypes.BOOLEAN(), DataTypes.DATE())),
DataTypes.ARRAY(DataTypes.INT()),
DataTypes.ARRAY(DataTypes.ARRAY(DataTypes.INT())),
DataTypes.ARRAY(DataTypes.MAP(DataTypes.INT(), DataTypes.STRING())))
.testResult(
$("f0").arrayRemove(2),
"ARRAY_REMOVE(f0, 2)",
new Integer[] {1},
DataTypes.ARRAY(DataTypes.INT()).nullable())
.testResult(
$("f0").arrayRemove(42),
"ARRAY_REMOVE(f0, 42)",
new Integer[] {1, 2, 2},
DataTypes.ARRAY(DataTypes.INT()).nullable())
.testResult(
$("f0").arrayRemove(
lit(null, DataTypes.SMALLINT())
.cast(DataTypes.INT())),
"ARRAY_REMOVE(f0, cast(NULL AS INT))",
new Integer[] {1, 2, 2},
DataTypes.ARRAY(DataTypes.INT()).nullable())
.testResult(
$("f1").arrayRemove(12),
"ARRAY_REMOVE(f1, 12)",
null,
DataTypes.ARRAY(DataTypes.INT()).nullable())
.testResult(
$("f1").arrayRemove(null),
"ARRAY_REMOVE(f1, NULL)",
null,
DataTypes.ARRAY(DataTypes.INT()).nullable())
.testResult(
$("f2").arrayRemove("Hello"),
"ARRAY_REMOVE(f2, 'Hello')",
new String[] {"World"},
DataTypes.ARRAY(DataTypes.STRING()).notNull())
.testResult(
$("f2").arrayRemove(
lit(null, DataTypes.STRING())
.cast(DataTypes.STRING())),
"ARRAY_REMOVE(f2, cast(NULL AS VARCHAR))",
new String[] {"Hello", "World"},
DataTypes.ARRAY(DataTypes.STRING()).notNull())
.testResult(
$("f3").arrayRemove(row(true, LocalDate.of(1990, 10, 14))),
"ARRAY_REMOVE(f3, (TRUE, DATE '1990-10-14'))",
new Row[] {Row.of(true, LocalDate.of(2022, 4, 20)), null},
DataTypes.ARRAY(
DataTypes.ROW(
DataTypes.BOOLEAN(), DataTypes.DATE()))
.nullable())
.testResult(
$("f3").arrayRemove(null),
"ARRAY_REMOVE(f3, null)",
new Row[] {
Row.of(true, LocalDate.of(2022, 4, 20)),
Row.of(true, LocalDate.of(1990, 10, 14)),
},
DataTypes.ARRAY(
DataTypes.ROW(
DataTypes.BOOLEAN(), DataTypes.DATE()))
.nullable())
.testResult(
$("f4").arrayRemove(null),
"ARRAY_REMOVE(f4, NULL)",
new Integer[] {1},
DataTypes.ARRAY(DataTypes.INT()).nullable())
.testResult(
$("f5").arrayRemove(new Integer[] {0}),
"ARRAY_REMOVE(f5, array[0])",
new Integer[][] {new Integer[] {1, null, 3}, new Integer[] {1}},
DataTypes.ARRAY(DataTypes.ARRAY(DataTypes.INT()).nullable()))
.testResult(
$("f6").arrayRemove(
CollectionUtil.map(entry(3, "c"), entry(4, "d"))),
"ARRAY_REMOVE(f6, MAP[3, 'c', 4, 'd'])",
new Map[] {CollectionUtil.map(entry(1, "a"), entry(2, "b")), null},
DataTypes.ARRAY(DataTypes.MAP(DataTypes.INT(), DataTypes.STRING()))
.nullable())
.testSqlValidationError(
"ARRAY_REMOVE(f0, TRUE)",
"Invalid input arguments. Expected signatures are:\n"
+ "ARRAY_REMOVE(haystack <ARRAY>, needle <ARRAY ELEMENT>)")
.testTableApiValidationError(
$("f0").arrayRemove(true),
"Invalid input arguments. Expected signatures are:\n"
+ "ARRAY_REMOVE(haystack <ARRAY>, needle <ARRAY ELEMENT>)"));
} | "ARRAY_REMOVE(f2, cast(NULL AS VARCHAR))", | Stream<TestSetSpec> getTestSetSpecs() {
return Stream.of(
TestSetSpec.forFunction(BuiltInFunctionDefinitions.ARRAY_CONTAINS)
.onFieldsWithData(
new Integer[] {1, 2, 3},
null,
new String[] {"Hello", "World"},
new Row[] {
Row.of(true, LocalDate.of(2022, 4, 20)),
Row.of(true, LocalDate.of(1990, 10, 14)),
null
},
new Integer[] {1, null, 3},
new Integer[] {1, 2, 3})
.andDataTypes(
DataTypes.ARRAY(DataTypes.INT()),
DataTypes.ARRAY(DataTypes.INT()),
DataTypes.ARRAY(DataTypes.STRING()).notNull(),
DataTypes.ARRAY(
DataTypes.ROW(DataTypes.BOOLEAN(), DataTypes.DATE())),
DataTypes.ARRAY(DataTypes.INT()),
DataTypes.ARRAY(DataTypes.INT().notNull()).notNull())
.testResult(
$("f0").arrayContains(2),
"ARRAY_CONTAINS(f0, 2)",
true,
DataTypes.BOOLEAN().nullable())
.testResult(
$("f0").arrayContains(42),
"ARRAY_CONTAINS(f0, 42)",
false,
DataTypes.BOOLEAN().nullable())
.testResult(
$("f1").arrayContains(12),
"ARRAY_CONTAINS(f1, 12)",
null,
DataTypes.BOOLEAN().nullable())
.testResult(
$("f1").arrayContains(null),
"ARRAY_CONTAINS(f1, NULL)",
null,
DataTypes.BOOLEAN().nullable())
.testResult(
$("f2").arrayContains("Hello"),
"ARRAY_CONTAINS(f2, 'Hello')",
true,
DataTypes.BOOLEAN().notNull())
.testResult(
$("f3").arrayContains(row(true, LocalDate.of(1990, 10, 14))),
"ARRAY_CONTAINS(f3, (TRUE, DATE '1990-10-14'))",
true,
DataTypes.BOOLEAN())
.testResult(
$("f3").arrayContains(row(false, LocalDate.of(1990, 10, 14))),
"ARRAY_CONTAINS(f3, (FALSE, DATE '1990-10-14'))",
false,
DataTypes.BOOLEAN())
.testResult(
$("f3").arrayContains(null),
"ARRAY_CONTAINS(f3, null)",
true,
DataTypes.BOOLEAN())
.testResult(
$("f4").arrayContains(null),
"ARRAY_CONTAINS(f4, NULL)",
true,
DataTypes.BOOLEAN().nullable())
.testResult(
$("f5").arrayContains(lit(null, DataTypes.INT())),
"ARRAY_CONTAINS(f5, CAST(NULL AS INT))",
false,
DataTypes.BOOLEAN().notNull())
.testResult(
$("f5").arrayContains(lit(4, DataTypes.INT().notNull())),
"ARRAY_CONTAINS(f5, 4)",
false,
DataTypes.BOOLEAN().notNull())
.testResult(
$("f5").arrayContains(lit(3, DataTypes.INT().notNull())),
"ARRAY_CONTAINS(f5, 3)",
true,
DataTypes.BOOLEAN().notNull())
.testSqlValidationError(
"ARRAY_CONTAINS(f0, TRUE)",
"Invalid input arguments. Expected signatures are:\n"
+ "ARRAY_CONTAINS(haystack <ARRAY>, needle <ARRAY ELEMENT>)")
.testTableApiValidationError(
$("f0").arrayContains(true),
"Invalid input arguments. Expected signatures are:\n"
+ "ARRAY_CONTAINS(haystack <ARRAY>, needle <ARRAY ELEMENT>)"),
TestSetSpec.forFunction(BuiltInFunctionDefinitions.ARRAY_DISTINCT)
.onFieldsWithData(
new Integer[] {1, 2, 3},
new Integer[] {null, 1, 2, 3, 4, 5, 4, 3, 2, 1, null},
null,
new String[] {"Hello", "Hello", "Hello"},
new Row[] {
Row.of(true, LocalDate.of(2022, 4, 20)),
Row.of(true, LocalDate.of(1990, 10, 14)),
Row.of(true, LocalDate.of(1990, 10, 14)),
Row.of(true, LocalDate.of(1990, 10, 14)),
null
})
.andDataTypes(
DataTypes.ARRAY(DataTypes.INT()),
DataTypes.ARRAY(DataTypes.INT()),
DataTypes.ARRAY(DataTypes.INT()),
DataTypes.ARRAY(DataTypes.STRING()).notNull(),
DataTypes.ARRAY(
DataTypes.ROW(DataTypes.BOOLEAN(), DataTypes.DATE())))
.testResult(
$("f0").arrayDistinct(),
"ARRAY_DISTINCT(f0)",
new Integer[] {1, 2, 3},
DataTypes.ARRAY(DataTypes.INT()).nullable())
.testResult(
$("f1").arrayDistinct(),
"ARRAY_DISTINCT(f1)",
new Integer[] {null, 1, 2, 3, 4, 5},
DataTypes.ARRAY(DataTypes.INT()).nullable())
.testResult(
$("f2").arrayDistinct(),
"ARRAY_DISTINCT(f2)",
null,
DataTypes.ARRAY(DataTypes.INT()).nullable())
.testResult(
$("f3").arrayDistinct(),
"ARRAY_DISTINCT(f3)",
new String[] {"Hello"},
DataTypes.ARRAY(DataTypes.STRING()).notNull())
.testResult(
$("f4").arrayDistinct(),
"ARRAY_DISTINCT(f4)",
new Row[] {
Row.of(true, LocalDate.of(2022, 4, 20)),
Row.of(true, LocalDate.of(1990, 10, 14)),
null
},
DataTypes.ARRAY(
DataTypes.ROW(DataTypes.BOOLEAN(), DataTypes.DATE()))),
TestSetSpec.forFunction(BuiltInFunctionDefinitions.ARRAY_REMOVE)
.onFieldsWithData(
new Integer[] {1, 2, 2},
null,
new Row[] {
Row.of(true, LocalDate.of(2022, 4, 20)),
Row.of(true, LocalDate.of(1990, 10, 14)),
null
},
new Integer[] {null, null, 1},
new Integer[][] {
new Integer[] {1, null, 3}, new Integer[] {0}, new Integer[] {1}
},
new Map[] {
CollectionUtil.map(entry(1, "a"), entry(2, "b")),
CollectionUtil.map(entry(3, "c"), entry(4, "d")),
null
})
.andDataTypes(
DataTypes.ARRAY(DataTypes.INT()),
DataTypes.ARRAY(DataTypes.INT()),
DataTypes.ARRAY(
DataTypes.ROW(DataTypes.BOOLEAN(), DataTypes.DATE())),
DataTypes.ARRAY(DataTypes.INT()),
DataTypes.ARRAY(DataTypes.ARRAY(DataTypes.INT())),
DataTypes.ARRAY(DataTypes.MAP(DataTypes.INT(), DataTypes.STRING())))
.testResult(
$("f0").arrayRemove(2),
"ARRAY_REMOVE(f0, 2)",
new Integer[] {1},
DataTypes.ARRAY(DataTypes.INT()).nullable())
.testResult(
$("f0").arrayRemove(42),
"ARRAY_REMOVE(f0, 42)",
new Integer[] {1, 2, 2},
DataTypes.ARRAY(DataTypes.INT()).nullable())
.testResult(
$("f0").arrayRemove(
lit(null, DataTypes.SMALLINT())
.cast(DataTypes.INT())),
"ARRAY_REMOVE(f0, CAST(NULL AS INT))",
new Integer[] {1, 2, 2},
DataTypes.ARRAY(DataTypes.INT()).nullable())
.testResult(
$("f1").arrayRemove(12),
"ARRAY_REMOVE(f1, 12)",
null,
DataTypes.ARRAY(DataTypes.INT()).nullable())
.testResult(
$("f1").arrayRemove(null),
"ARRAY_REMOVE(f1, NULL)",
null,
DataTypes.ARRAY(DataTypes.INT()).nullable())
.testResult(
$("f2").arrayRemove(row(true, LocalDate.of(1990, 10, 14))),
"ARRAY_REMOVE(f2, (TRUE, DATE '1990-10-14'))",
new Row[] {Row.of(true, LocalDate.of(2022, 4, 20)), null},
DataTypes.ARRAY(
DataTypes.ROW(
DataTypes.BOOLEAN(), DataTypes.DATE()))
.nullable())
.testResult(
$("f2").arrayRemove(null),
"ARRAY_REMOVE(f2, NULL)",
new Row[] {
Row.of(true, LocalDate.of(2022, 4, 20)),
Row.of(true, LocalDate.of(1990, 10, 14)),
},
DataTypes.ARRAY(
DataTypes.ROW(
DataTypes.BOOLEAN(), DataTypes.DATE()))
.nullable())
.testResult(
$("f3").arrayRemove(null),
"ARRAY_REMOVE(f3, NULL)",
new Integer[] {1},
DataTypes.ARRAY(DataTypes.INT()).nullable())
.testResult(
$("f4").arrayRemove(new Integer[] {0}),
"ARRAY_REMOVE(f4, ARRAY[0])",
new Integer[][] {new Integer[] {1, null, 3}, new Integer[] {1}},
DataTypes.ARRAY(DataTypes.ARRAY(DataTypes.INT()).nullable()))
.testResult(
$("f5").arrayRemove(
CollectionUtil.map(entry(3, "c"), entry(4, "d"))),
"ARRAY_REMOVE(f5, MAP[3, 'c', 4, 'd'])",
new Map[] {CollectionUtil.map(entry(1, "a"), entry(2, "b")), null},
DataTypes.ARRAY(DataTypes.MAP(DataTypes.INT(), DataTypes.STRING()))
.nullable())
.testSqlValidationError(
"ARRAY_REMOVE(f0, TRUE)",
"Invalid input arguments. Expected signatures are:\n"
+ "ARRAY_REMOVE(haystack <ARRAY>, needle <ARRAY ELEMENT>)")
.testTableApiValidationError(
$("f0").arrayRemove(true),
"Invalid input arguments. Expected signatures are:\n"
+ "ARRAY_REMOVE(haystack <ARRAY>, needle <ARRAY ELEMENT>)"));
} | class CollectionFunctionsITCase extends BuiltInFunctionTestBase {
@Override
} | class CollectionFunctionsITCase extends BuiltInFunctionTestBase {
@Override
} |
I don't think we should. It's more like a fire-and-forget style of communication... | public CompletionStage<Void> invoke(ScheduledExecution execution) throws Exception {
if (running.compareAndSet(false, true)) {
return delegate.invoke(execution).whenComplete((r, t) -> running.set(false));
}
LOG.debugf("Skipped scheduled invoker execution: %s", delegate.getClass().getName());
SkippedExecution payload = new SkippedExecution(execution,
"The scheduled method should not be executed concurrently");
event.fire(payload);
event.fireAsync(payload);
return CompletableFuture.completedStage(null);
} | event.fireAsync(payload); | public CompletionStage<Void> invoke(ScheduledExecution execution) throws Exception {
if (running.compareAndSet(false, true)) {
return delegate.invoke(execution).whenComplete((r, t) -> running.set(false));
}
LOG.debugf("Skipped scheduled invoker execution: %s", delegate.getClass().getName());
SkippedExecution payload = new SkippedExecution(execution,
"The scheduled method should not be executed concurrently");
event.fire(payload);
event.fireAsync(payload);
return CompletableFuture.completedStage(null);
} | class SkipConcurrentExecutionInvoker extends DelegateInvoker {
private static final Logger LOG = Logger.getLogger(SkipConcurrentExecutionInvoker.class);
private final AtomicBoolean running;
private final Event<SkippedExecution> event;
public SkipConcurrentExecutionInvoker(ScheduledInvoker delegate, Event<SkippedExecution> event) {
super(delegate);
this.running = new AtomicBoolean(false);
this.event = event;
}
@Override
} | class SkipConcurrentExecutionInvoker extends DelegateInvoker {
private static final Logger LOG = Logger.getLogger(SkipConcurrentExecutionInvoker.class);
private final AtomicBoolean running;
private final Event<SkippedExecution> event;
public SkipConcurrentExecutionInvoker(ScheduledInvoker delegate, Event<SkippedExecution> event) {
super(delegate);
this.running = new AtomicBoolean(false);
this.event = event;
}
@Override
} |
this may throw java.lang.ClassCastException. It is better to compare their class before convert. | public boolean equals(Object o) {
if (!super.equals(o)) {
return false;
}
VarcharType that = (VarcharType) o;
return len == that.len;
} | VarcharType that = (VarcharType) o; | public boolean equals(Object o) {
if (!super.equals(o)) {
return false;
}
VarcharType that = (VarcharType) o;
return len == that.len;
} | class VarcharType extends DataType {
private final int len;
public VarcharType(int len) {
this.len = len;
}
public static VarcharType createVarcharType(int len) {
return new VarcharType(len);
}
@Override
public Type toCatalogDataType() {
return ScalarType.createVarcharType(len);
}
@Override
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), len);
}
} | class VarcharType extends DataType {
private final int len;
public VarcharType(int len) {
this.len = len;
}
public static VarcharType createVarcharType(int len) {
return new VarcharType(len);
}
@Override
public Type toCatalogDataType() {
return ScalarType.createVarcharType(len);
}
@Override
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), len);
}
} |
I think we can add the constant for `CompressedSourceTest.java` at least. | public void testEmptyLzoProgress() throws IOException {
File tmpFile = tmpFolder.newFile("empty.lzo_deflate");
String filename = tmpFile.toPath().toString();
writeFile(tmpFile, new byte[0], CompressionMode.LZO);
PipelineOptions options = PipelineOptionsFactory.create();
CompressedSource<Byte> source =
CompressedSource.from(new ByteSource(filename, 1)).withDecompression(CompressionMode.LZO);
try (BoundedReader<Byte> readerOrig = source.createReader(options)) {
assertThat(readerOrig, instanceOf(CompressedReader.class));
CompressedReader<Byte> reader = (CompressedReader<Byte>) readerOrig;
assertEquals(0.0, reader.getFractionConsumed(), 1e-6);
assertEquals(0, reader.getSplitPointsConsumed());
assertEquals(1, reader.getSplitPointsRemaining());
assertFalse(reader.start());
assertEquals(1.0, reader.getFractionConsumed(), 1e-6);
assertEquals(0, reader.getSplitPointsConsumed());
assertEquals(0, reader.getSplitPointsRemaining());
}
} | assertEquals(0.0, reader.getFractionConsumed(), 1e-6); | public void testEmptyLzoProgress() throws IOException {
File tmpFile = tmpFolder.newFile("empty.lzo_deflate");
String filename = tmpFile.toPath().toString();
writeFile(tmpFile, new byte[0], CompressionMode.LZO);
PipelineOptions options = PipelineOptionsFactory.create();
CompressedSource<Byte> source =
CompressedSource.from(new ByteSource(filename, 1)).withDecompression(CompressionMode.LZO);
try (BoundedReader<Byte> readerOrig = source.createReader(options)) {
assertThat(readerOrig, instanceOf(CompressedReader.class));
CompressedReader<Byte> reader = (CompressedReader<Byte>) readerOrig;
assertEquals(0.0, reader.getFractionConsumed(), delta);
assertEquals(0, reader.getSplitPointsConsumed());
assertEquals(1, reader.getSplitPointsRemaining());
assertFalse(reader.start());
assertEquals(1.0, reader.getFractionConsumed(), delta);
assertEquals(0, reader.getSplitPointsConsumed());
assertEquals(0, reader.getSplitPointsRemaining());
}
} | class ExtractIndexFromTimestamp extends DoFn<Byte, KV<Long, Byte>> {
@ProcessElement
public void processElement(ProcessContext context) {
context.output(KV.of(context.timestamp().getMillis(), context.element()));
}
} | class ExtractIndexFromTimestamp extends DoFn<Byte, KV<Long, Byte>> {
@ProcessElement
public void processElement(ProcessContext context) {
context.output(KV.of(context.timestamp().getMillis(), context.element()));
}
} |
How about ``` Preconditions.checkState( serializedJobInformation instanceof NonOffloaded, "Trying to work with offloaded serialized job information."); NonOffloaded<JobInformation> jobInformation = (NonOffloaded<JobInformation>) serializedJobInformation; return jobInformation.serializedValue.deserializeValue(getClass().getClassLoader()); ``` ? | public TaskInformation getTaskInformation() throws IOException, ClassNotFoundException {
if (taskInformation != null) {
return taskInformation;
}
if (serializedTaskInformation instanceof NonOffloaded) {
NonOffloaded<TaskInformation> taskInformation =
(NonOffloaded<TaskInformation>) serializedTaskInformation;
return taskInformation.serializedValue.deserializeValue(getClass().getClassLoader());
}
throw new IllegalStateException(
"Trying to work with offloaded serialized task information.");
} | "Trying to work with offloaded serialized task information."); | public TaskInformation getTaskInformation() throws IOException, ClassNotFoundException {
if (taskInformation != null) {
return taskInformation;
}
if (serializedTaskInformation instanceof NonOffloaded) {
NonOffloaded<TaskInformation> taskInformation =
(NonOffloaded<TaskInformation>) serializedTaskInformation;
return taskInformation.serializedValue.deserializeValue(getClass().getClassLoader());
}
throw new IllegalStateException(
"Trying to work with offloaded serialized task information.");
} | class Offloaded<T> extends MaybeOffloaded<T> {
private static final long serialVersionUID = 4544135485379071679L;
/** The key of the offloaded value BLOB. */
public PermanentBlobKey serializedValueKey;
@SuppressWarnings("unused")
public Offloaded() {}
public Offloaded(PermanentBlobKey serializedValueKey) {
this.serializedValueKey = Preconditions.checkNotNull(serializedValueKey);
}
} | class Offloaded<T> extends MaybeOffloaded<T> {
private static final long serialVersionUID = 4544135485379071679L;
/** The key of the offloaded value BLOB. */
public PermanentBlobKey serializedValueKey;
@SuppressWarnings("unused")
public Offloaded() {}
public Offloaded(PermanentBlobKey serializedValueKey) {
this.serializedValueKey = Preconditions.checkNotNull(serializedValueKey);
}
} |
Thanks! Let's try this then ... | public void close() {
if (closed.get()) return;
closed.set(true);
synchronized (nodeTable.writeLock) {
synchronized (clusterTable.writeLock) {
for (SqlCompiler sqlCompiler : sqlCompilerPool)
sqlCompiler.close();
engine.close();
}
}
} | closed.set(true); | public void close() {
if (closed.getAndSet(true)) return;
synchronized (nodeTable.writeLock) {
synchronized (clusterTable.writeLock) {
for (SqlCompiler sqlCompiler : sqlCompilerPool)
sqlCompiler.close();
engine.close();
}
}
} | class QuestMetricsDb extends AbstractComponent implements MetricsDb {
private static final Logger log = Logger.getLogger(QuestMetricsDb.class.getName());
private final Table nodeTable;
private final Table clusterTable;
private final Clock clock;
private final String dataDir;
private final CairoEngine engine;
private final ConcurrentResourcePool<SqlCompiler> sqlCompilerPool;
private final AtomicBoolean closed = new AtomicBoolean(false);
@Inject
public QuestMetricsDb() {
this(Defaults.getDefaults().underVespaHome("var/db/vespa/autoscaling"), Clock.systemUTC());
}
public QuestMetricsDb(String dataDir, Clock clock) {
this.clock = clock;
if (dataDir.startsWith(Defaults.getDefaults().vespaHome())
&& ! new File(Defaults.getDefaults().vespaHome()).exists())
dataDir = "data";
String logConfig = dataDir + "/quest-log.conf";
IOUtils.createDirectory(logConfig);
IOUtils.writeFile(new File(logConfig), new byte[0]);
System.setProperty("out", logConfig);
this.dataDir = dataDir;
engine = new CairoEngine(new DefaultCairoConfiguration(dataDir));
sqlCompilerPool = new ConcurrentResourcePool<>(() -> new SqlCompiler(engine()));
nodeTable = new Table(dataDir, "metrics", clock);
clusterTable = new Table(dataDir, "clusterMetrics", clock);
ensureTablesExist();
}
private CairoEngine engine() {
if (closed.get())
throw new IllegalStateException("Attempted to access QuestDb after calling close");
return engine;
}
@Override
public Clock clock() { return clock; }
@Override
public void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
try {
addNodeMetricsBody(snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
nodeTable.repair(e);
addNodeMetricsBody(snapshots);
}
}
}
private void addNodeMetricsBody(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
synchronized (nodeTable.writeLock) {
try (TableWriter writer = nodeTable.getWriter()) {
for (var snapshot : snapshots) {
Optional<Long> atMillis = nodeTable.adjustOrDiscard(snapshot.getSecond().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, snapshot.getFirst());
row.putFloat(2, (float) snapshot.getSecond().load().cpu());
row.putFloat(3, (float) snapshot.getSecond().load().memory());
row.putFloat(4, (float) snapshot.getSecond().load().disk());
row.putLong(5, snapshot.getSecond().generation());
row.putBool(6, snapshot.getSecond().inService());
row.putBool(7, snapshot.getSecond().stable());
row.putFloat(8, (float) snapshot.getSecond().queryRate());
row.append();
}
writer.commit();
}
}
}
@Override
public void addClusterMetrics(ApplicationId application, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
try {
addClusterMetricsBody(application, snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
clusterTable.repair(e);
addClusterMetricsBody(application, snapshots);
}
}
}
private void addClusterMetricsBody(ApplicationId applicationId, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
synchronized (clusterTable.writeLock) {
try (TableWriter writer = clusterTable.getWriter()) {
for (var snapshot : snapshots.entrySet()) {
Optional<Long> atMillis = clusterTable.adjustOrDiscard(snapshot.getValue().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, applicationId.serializedForm());
row.putStr(1, snapshot.getKey().value());
row.putFloat(3, (float) snapshot.getValue().queryRate());
row.putFloat(4, (float) snapshot.getValue().writeRate());
row.append();
}
writer.commit();
}
}
}
@Override
public List<NodeTimeseries> getNodeTimeseries(Duration period, Set<String> hostnames) {
try {
var snapshots = getNodeSnapshots(clock.instant().minus(period), hostnames, newContext());
return snapshots.entrySet().stream()
.map(entry -> new NodeTimeseries(entry.getKey(), entry.getValue()))
.collect(Collectors.toList());
}
catch (SqlException e) {
throw new IllegalStateException("Could not read node timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public ClusterTimeseries getClusterTimeseries(ApplicationId applicationId, ClusterSpec.Id clusterId) {
try {
return getClusterSnapshots(applicationId, clusterId);
}
catch (SqlException e) {
throw new IllegalStateException("Could not read cluster timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public void gc() {
nodeTable.gc();
clusterTable.gc();
}
@Override
public void deconstruct() { close(); }
@Override
private void ensureTablesExist() {
if (nodeTable.exists())
ensureNodeTableIsUpdated();
else
createNodeTable();
if (clusterTable.exists())
ensureClusterTableIsUpdated();
else
createClusterTable();
}
private void ensureNodeTableIsUpdated() {
try {
} catch (Exception e) {
nodeTable.repair(e);
}
}
private void ensureClusterTableIsUpdated() {
try {
if (0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), clusterTable.name)) {
}
} catch (Exception e) {
clusterTable.repair(e);
}
}
private void createNodeTable() {
try {
issue("create table " + nodeTable.name +
" (hostname string, at timestamp, cpu_util float, mem_total_util float, disk_util float," +
" application_generation long, inService boolean, stable boolean, queries_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + nodeTable.name + "'", e);
}
}
private void createClusterTable() {
try {
issue("create table " + clusterTable.name +
" (application string, cluster string, at timestamp, queries_rate float, write_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + clusterTable.name + "'", e);
}
}
private ListMap<String, NodeMetricSnapshot> getNodeSnapshots(Instant startTime,
Set<String> hostnames,
SqlExecutionContext context) throws SqlException {
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
String from = formatter.format(startTime).substring(0, 19) + ".000000Z";
String to = formatter.format(clock.instant()).substring(0, 19) + ".000000Z";
String sql = "select * from " + nodeTable.name + " where at between('" + from + "', '" + to + "');";
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
ListMap<String, NodeMetricSnapshot> snapshots = new ListMap<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String hostname = record.getStr(0).toString();
if (hostnames.isEmpty() || hostnames.contains(hostname)) {
snapshots.put(hostname,
new NodeMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(1) / 1000),
new Load(record.getFloat(2),
record.getFloat(3),
record.getFloat(4)),
record.getLong(5),
record.getBool(6),
record.getBool(7),
record.getFloat(8)));
}
}
}
return snapshots;
}
}
private ClusterTimeseries getClusterSnapshots(ApplicationId application, ClusterSpec.Id cluster) throws SqlException {
String sql = "select * from " + clusterTable.name;
var context = newContext();
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
List<ClusterMetricSnapshot> snapshots = new ArrayList<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String applicationIdString = record.getStr(0).toString();
if ( ! application.serializedForm().equals(applicationIdString)) continue;
String clusterId = record.getStr(1).toString();
if (cluster.value().equals(clusterId)) {
snapshots.add(new ClusterMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(2) / 1000),
record.getFloat(3),
record.getFloat(4)));
}
}
}
return new ClusterTimeseries(cluster, snapshots);
}
}
/** Issues an SQL statement against the QuestDb engine */
private CompiledQuery issue(String sql, SqlExecutionContext context) throws SqlException {
SqlCompiler sqlCompiler = sqlCompilerPool.alloc();
try {
return sqlCompiler.compile(sql, context);
} finally {
sqlCompilerPool.free(sqlCompiler);
}
}
private SqlExecutionContext newContext() {
return new SqlExecutionContextImpl(engine(), 1);
}
/** A questDb table */
private class Table {
private final Object writeLock = new Object();
private final String name;
private final Clock clock;
private final File dir;
private long highestTimestampAdded = 0;
Table(String dataDir, String name, Clock clock) {
this.name = name;
this.clock = clock;
this.dir = new File(dataDir, name);
IOUtils.createDirectory(dir.getPath());
new File(dir + "/_txn_scoreboard").delete();
}
boolean exists() {
return 0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), name);
}
TableWriter getWriter() {
return engine().getWriter(newContext().getCairoSecurityContext(), name);
}
void gc() {
synchronized (writeLock) {
Instant oldestToKeep = clock.instant().minus(Duration.ofDays(4));
SqlExecutionContext context = newContext();
int partitions = 0;
try {
List<String> removeList = new ArrayList<>();
for (String dirEntry : dir.list()) {
File partitionDir = new File(dir, dirEntry);
if (!partitionDir.isDirectory()) continue;
partitions++;
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
Instant partitionDay = Instant.from(formatter.parse(dirEntry.substring(0, 10) + "T00:00:00"));
if (partitionDay.isBefore(oldestToKeep))
removeList.add(dirEntry);
}
if (removeList.size() < partitions && !removeList.isEmpty()) {
issue("alter table " + name + " drop partition list " +
removeList.stream().map(dir -> "'" + dir + "'").collect(Collectors.joining(",")),
context);
}
} catch (SqlException e) {
log.log(Level.WARNING, "Failed to gc old metrics data in " + dir + " table " + name, e);
}
}
}
/**
* Repairs this db on corruption.
*
* @param e the exception indicating corruption
*/
private void repair(Exception e) {
log.log(Level.WARNING, "QuestDb seems corrupted, wiping data and starting over", e);
IOUtils.recursiveDeleteDir(dir);
IOUtils.createDirectory(dir.getPath());
ensureTablesExist();
}
void ensureColumnExists(String column, String columnType) throws SqlException {
if (columnNames().contains(column)) return;
issue("alter table " + name + " add column " + column + " " + columnType, newContext());
}
private Optional<Long> adjustOrDiscard(Instant at) {
long timestamp = at.toEpochMilli();
if (timestamp >= highestTimestampAdded) {
highestTimestampAdded = timestamp;
return Optional.of(timestamp);
}
if (timestamp >= highestTimestampAdded - 60 * 1000) return Optional.of(highestTimestampAdded);
return Optional.empty();
}
private List<String> columnNames() throws SqlException {
var context = newContext();
List<String> columns = new ArrayList<>();
try (RecordCursorFactory factory = issue("show columns from " + name, context).getRecordCursorFactory()) {
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
columns.add(record.getStr(0).toString());
}
}
}
return columns;
}
}
} | class QuestMetricsDb extends AbstractComponent implements MetricsDb {
private static final Logger log = Logger.getLogger(QuestMetricsDb.class.getName());
private final Table nodeTable;
private final Table clusterTable;
private final Clock clock;
private final String dataDir;
private final CairoEngine engine;
private final ConcurrentResourcePool<SqlCompiler> sqlCompilerPool;
private final AtomicBoolean closed = new AtomicBoolean(false);
@Inject
public QuestMetricsDb() {
this(Defaults.getDefaults().underVespaHome("var/db/vespa/autoscaling"), Clock.systemUTC());
}
public QuestMetricsDb(String dataDir, Clock clock) {
this.clock = clock;
if (dataDir.startsWith(Defaults.getDefaults().vespaHome())
&& ! new File(Defaults.getDefaults().vespaHome()).exists())
dataDir = "data";
String logConfig = dataDir + "/quest-log.conf";
IOUtils.createDirectory(logConfig);
IOUtils.writeFile(new File(logConfig), new byte[0]);
System.setProperty("out", logConfig);
this.dataDir = dataDir;
engine = new CairoEngine(new DefaultCairoConfiguration(dataDir));
sqlCompilerPool = new ConcurrentResourcePool<>(() -> new SqlCompiler(engine()));
nodeTable = new Table(dataDir, "metrics", clock);
clusterTable = new Table(dataDir, "clusterMetrics", clock);
ensureTablesExist();
}
private CairoEngine engine() {
if (closed.get())
throw new IllegalStateException("Attempted to access QuestDb after calling close");
return engine;
}
@Override
public Clock clock() { return clock; }
@Override
public void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
try {
addNodeMetricsBody(snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
nodeTable.repair(e);
addNodeMetricsBody(snapshots);
}
}
}
private void addNodeMetricsBody(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
synchronized (nodeTable.writeLock) {
try (TableWriter writer = nodeTable.getWriter()) {
for (var snapshot : snapshots) {
Optional<Long> atMillis = nodeTable.adjustOrDiscard(snapshot.getSecond().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, snapshot.getFirst());
row.putFloat(2, (float) snapshot.getSecond().load().cpu());
row.putFloat(3, (float) snapshot.getSecond().load().memory());
row.putFloat(4, (float) snapshot.getSecond().load().disk());
row.putLong(5, snapshot.getSecond().generation());
row.putBool(6, snapshot.getSecond().inService());
row.putBool(7, snapshot.getSecond().stable());
row.putFloat(8, (float) snapshot.getSecond().queryRate());
row.append();
}
writer.commit();
}
}
}
@Override
public void addClusterMetrics(ApplicationId application, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
try {
addClusterMetricsBody(application, snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
clusterTable.repair(e);
addClusterMetricsBody(application, snapshots);
}
}
}
private void addClusterMetricsBody(ApplicationId applicationId, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
synchronized (clusterTable.writeLock) {
try (TableWriter writer = clusterTable.getWriter()) {
for (var snapshot : snapshots.entrySet()) {
Optional<Long> atMillis = clusterTable.adjustOrDiscard(snapshot.getValue().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, applicationId.serializedForm());
row.putStr(1, snapshot.getKey().value());
row.putFloat(3, (float) snapshot.getValue().queryRate());
row.putFloat(4, (float) snapshot.getValue().writeRate());
row.append();
}
writer.commit();
}
}
}
@Override
public List<NodeTimeseries> getNodeTimeseries(Duration period, Set<String> hostnames) {
try {
var snapshots = getNodeSnapshots(clock.instant().minus(period), hostnames, newContext());
return snapshots.entrySet().stream()
.map(entry -> new NodeTimeseries(entry.getKey(), entry.getValue()))
.collect(Collectors.toList());
}
catch (SqlException e) {
throw new IllegalStateException("Could not read node timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public ClusterTimeseries getClusterTimeseries(ApplicationId applicationId, ClusterSpec.Id clusterId) {
try {
return getClusterSnapshots(applicationId, clusterId);
}
catch (SqlException e) {
throw new IllegalStateException("Could not read cluster timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public void gc() {
nodeTable.gc();
clusterTable.gc();
}
@Override
public void deconstruct() { close(); }
@Override
private void ensureTablesExist() {
if (nodeTable.exists())
ensureNodeTableIsUpdated();
else
createNodeTable();
if (clusterTable.exists())
ensureClusterTableIsUpdated();
else
createClusterTable();
}
private void ensureNodeTableIsUpdated() {
try {
} catch (Exception e) {
nodeTable.repair(e);
}
}
private void ensureClusterTableIsUpdated() {
try {
if (0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), clusterTable.name)) {
}
} catch (Exception e) {
clusterTable.repair(e);
}
}
private void createNodeTable() {
try {
issue("create table " + nodeTable.name +
" (hostname string, at timestamp, cpu_util float, mem_total_util float, disk_util float," +
" application_generation long, inService boolean, stable boolean, queries_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + nodeTable.name + "'", e);
}
}
private void createClusterTable() {
try {
issue("create table " + clusterTable.name +
" (application string, cluster string, at timestamp, queries_rate float, write_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + clusterTable.name + "'", e);
}
}
private ListMap<String, NodeMetricSnapshot> getNodeSnapshots(Instant startTime,
Set<String> hostnames,
SqlExecutionContext context) throws SqlException {
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
String from = formatter.format(startTime).substring(0, 19) + ".000000Z";
String to = formatter.format(clock.instant()).substring(0, 19) + ".000000Z";
String sql = "select * from " + nodeTable.name + " where at between('" + from + "', '" + to + "');";
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
ListMap<String, NodeMetricSnapshot> snapshots = new ListMap<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String hostname = record.getStr(0).toString();
if (hostnames.isEmpty() || hostnames.contains(hostname)) {
snapshots.put(hostname,
new NodeMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(1) / 1000),
new Load(record.getFloat(2),
record.getFloat(3),
record.getFloat(4)),
record.getLong(5),
record.getBool(6),
record.getBool(7),
record.getFloat(8)));
}
}
}
return snapshots;
}
}
private ClusterTimeseries getClusterSnapshots(ApplicationId application, ClusterSpec.Id cluster) throws SqlException {
String sql = "select * from " + clusterTable.name;
var context = newContext();
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
List<ClusterMetricSnapshot> snapshots = new ArrayList<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String applicationIdString = record.getStr(0).toString();
if ( ! application.serializedForm().equals(applicationIdString)) continue;
String clusterId = record.getStr(1).toString();
if (cluster.value().equals(clusterId)) {
snapshots.add(new ClusterMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(2) / 1000),
record.getFloat(3),
record.getFloat(4)));
}
}
}
return new ClusterTimeseries(cluster, snapshots);
}
}
/** Issues an SQL statement against the QuestDb engine */
private CompiledQuery issue(String sql, SqlExecutionContext context) throws SqlException {
SqlCompiler sqlCompiler = sqlCompilerPool.alloc();
try {
return sqlCompiler.compile(sql, context);
} finally {
sqlCompilerPool.free(sqlCompiler);
}
}
private SqlExecutionContext newContext() {
return new SqlExecutionContextImpl(engine(), 1);
}
/** A questDb table */
private class Table {
private final Object writeLock = new Object();
private final String name;
private final Clock clock;
private final File dir;
private long highestTimestampAdded = 0;
Table(String dataDir, String name, Clock clock) {
this.name = name;
this.clock = clock;
this.dir = new File(dataDir, name);
IOUtils.createDirectory(dir.getPath());
new File(dir + "/_txn_scoreboard").delete();
}
boolean exists() {
return 0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), name);
}
TableWriter getWriter() {
return engine().getWriter(newContext().getCairoSecurityContext(), name);
}
void gc() {
synchronized (writeLock) {
Instant oldestToKeep = clock.instant().minus(Duration.ofDays(4));
SqlExecutionContext context = newContext();
int partitions = 0;
try {
List<String> removeList = new ArrayList<>();
for (String dirEntry : dir.list()) {
File partitionDir = new File(dir, dirEntry);
if (!partitionDir.isDirectory()) continue;
partitions++;
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
Instant partitionDay = Instant.from(formatter.parse(dirEntry.substring(0, 10) + "T00:00:00"));
if (partitionDay.isBefore(oldestToKeep))
removeList.add(dirEntry);
}
if (removeList.size() < partitions && !removeList.isEmpty()) {
issue("alter table " + name + " drop partition list " +
removeList.stream().map(dir -> "'" + dir + "'").collect(Collectors.joining(",")),
context);
}
} catch (SqlException e) {
log.log(Level.WARNING, "Failed to gc old metrics data in " + dir + " table " + name, e);
}
}
}
/**
* Repairs this db on corruption.
*
* @param e the exception indicating corruption
*/
private void repair(Exception e) {
log.log(Level.WARNING, "QuestDb seems corrupted, wiping data and starting over", e);
IOUtils.recursiveDeleteDir(dir);
IOUtils.createDirectory(dir.getPath());
ensureTablesExist();
}
void ensureColumnExists(String column, String columnType) throws SqlException {
if (columnNames().contains(column)) return;
issue("alter table " + name + " add column " + column + " " + columnType, newContext());
}
private Optional<Long> adjustOrDiscard(Instant at) {
long timestamp = at.toEpochMilli();
if (timestamp >= highestTimestampAdded) {
highestTimestampAdded = timestamp;
return Optional.of(timestamp);
}
if (timestamp >= highestTimestampAdded - 60 * 1000) return Optional.of(highestTimestampAdded);
return Optional.empty();
}
private List<String> columnNames() throws SqlException {
var context = newContext();
List<String> columns = new ArrayList<>();
try (RecordCursorFactory factory = issue("show columns from " + name, context).getRecordCursorFactory()) {
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
columns.add(record.getStr(0).toString());
}
}
}
return columns;
}
}
} |
Hi @pnowojski , thanks for your analysis! > I would move: SinkWriterOperatorFactory.class.getName().equals(streamOperatorFactoryClassName) > check, into the boolean StreamConfig#isSinkWriterOperatorFactory(Class<...> ...) method. > It doesn't fit there very well, BUT at least it would justify why we have the checkState in the > StreamConfig#setStreamOperatorFactory. The solution1 (`SinkWriterOperatorFactory.class.getName().equals(streamOperatorFactoryClassName)` ) is still fragile, right? When `makes SinkWriterOperatorFactory non final and implement a subclass` in the future, it still cannot support, and silently. > toBeSerializedConfigObjects.put(SERIALIZED_UDF, factory); config.setString(IS_INSTANCE_OF_SinkWriterOperatorFactory, factory instance of SinkWriterOperatorFactory); > would be better/cleaner. Either one is fine for me. The solution2 is perfectly compatible with the case of adding sub class. However, as I said before, `the getStreamOperatorFactory is called in the toString to print the class name.`, and I'd like to using the `SERIALIZED_UDF_CLASS_NAME` instead of `getStreamOperatorFactory`. If we just keep `IS_INSTANCE_OF_SinkWriterOperatorFactory`, we must call `getStreamOperatorFactory` in the `toString` method. Or we add the `IS_INSTANCE_OF_SinkWriterOperatorFactory` and `SERIALIZED_UDF_CLASS_NAME` together? Actually, I have solution3 before I create this PR: We store the `SERIALIZED_UDF_CLASS` instead of `SERIALIZED_UDF_CLASS_NAME`. ``` # setStreamOperatorFactory method toBeSerializedConfigObjects.put(SERIALIZED_UDF, factory); toBeSerializedConfigObjects.put(SERIALIZED_UDF_CLASS, factory.getClass()); ``` ``` public <T extends StreamOperatorFactory<?>> Class<T> getStreamOperatorFactoryClass(ClassLoader cl) { try { return InstantiationUtil.readObjectFromConfig(this.config, SERIALIZED_UDF_CLASS, cl); } catch (Exception e) { throw new StreamTaskException("Could not instantiate chained outputs.", e); } } ``` And check `isAssignableFrom`: ``` SinkWriterOperatorFactory.class.isAssignableFrom(getStreamOperatorFactoryClass(SinkWriterOperatorFactory.class.getClassLoader())); ``` The solution3 is fine, however, I'm worried that when there are multiple classloaders, the judgment may be wrong. That's why this PR store the ClassName instead of Class. WDYT? | public void setStreamOperatorFactory(StreamOperatorFactory<?> factory) {
if (factory != null) {
toBeSerializedConfigObjects.put(SERIALIZED_UDF, factory);
config.setString(SERIALIZED_UDF_CLASS_NAME, factory.getClass().getName());
}
} | config.setString(SERIALIZED_UDF_CLASS_NAME, factory.getClass().getName()); | public void setStreamOperatorFactory(StreamOperatorFactory<?> factory) {
if (factory != null) {
toBeSerializedConfigObjects.put(SERIALIZED_UDF, factory);
config.setString(SERIALIZED_UDF_CLASS_NAME, factory.getClass().getName());
}
} | class StreamConfig implements Serializable {
private static final long serialVersionUID = 1L;
public static final String SERIALIZED_UDF = "serializedUDF";
/**
* Introduce serializedUdfClassName to avoid unnecessarily heavy {@link
*
*/
public static final String SERIALIZED_UDF_CLASS_NAME = "serializedUdfClassName";
private static final String NUMBER_OF_OUTPUTS = "numberOfOutputs";
private static final String NUMBER_OF_NETWORK_INPUTS = "numberOfNetworkInputs";
private static final String CHAINED_OUTPUTS = "chainedOutputs";
private static final String CHAINED_TASK_CONFIG = "chainedTaskConfig_";
private static final String IS_CHAINED_VERTEX = "isChainedSubtask";
private static final String CHAIN_INDEX = "chainIndex";
private static final String VERTEX_NAME = "vertexID";
private static final String ITERATION_ID = "iterationId";
private static final String INPUTS = "inputs";
private static final String TYPE_SERIALIZER_OUT_1 = "typeSerializer_out";
private static final String TYPE_SERIALIZER_SIDEOUT_PREFIX = "typeSerializer_sideout_";
private static final String ITERATON_WAIT = "iterationWait";
private static final String OP_NONCHAINED_OUTPUTS = "opNonChainedOutputs";
private static final String VERTEX_NONCHAINED_OUTPUTS = "vertexNonChainedOutputs";
private static final String IN_STREAM_EDGES = "inStreamEdges";
private static final String OPERATOR_NAME = "operatorName";
private static final String OPERATOR_ID = "operatorID";
private static final String CHAIN_END = "chainEnd";
private static final String GRAPH_CONTAINING_LOOPS = "graphContainingLoops";
private static final String CHECKPOINTING_ENABLED = "checkpointing";
private static final String CHECKPOINT_MODE = "checkpointMode";
private static final String SAVEPOINT_DIR = "savepointdir";
private static final String CHECKPOINT_STORAGE = "checkpointstorage";
private static final String STATE_BACKEND = "statebackend";
private static final String ENABLE_CHANGE_LOG_STATE_BACKEND = "enablechangelog";
private static final String TIMER_SERVICE_PROVIDER = "timerservice";
private static final String STATE_PARTITIONER = "statePartitioner";
private static final String STATE_KEY_SERIALIZER = "statekeyser";
private static final String TIME_CHARACTERISTIC = "timechar";
private static final String MANAGED_MEMORY_FRACTION_PREFIX = "managedMemFraction.";
private static final ConfigOption<Boolean> STATE_BACKEND_USE_MANAGED_MEMORY =
ConfigOptions.key("statebackend.useManagedMemory")
.booleanType()
.noDefaultValue()
.withDescription(
"If state backend is specified, whether it uses managed memory.");
private static final CheckpointingMode DEFAULT_CHECKPOINTING_MODE =
CheckpointingMode.EXACTLY_ONCE;
private static final double DEFAULT_MANAGED_MEMORY_FRACTION = 0.0;
private final Configuration config;
private final transient Map<String, Object> toBeSerializedConfigObjects = new HashMap<>();
private final transient Map<Integer, CompletableFuture<StreamConfig>> chainedTaskFutures =
new HashMap<>();
private final transient CompletableFuture<StreamConfig> serializationFuture =
new CompletableFuture<>();
public StreamConfig(Configuration config) {
this.config = config;
}
public Configuration getConfiguration() {
return config;
}
public CompletableFuture<StreamConfig> getSerializationFuture() {
return serializationFuture;
}
/** Trigger the object config serialization and return the completable future. */
public CompletableFuture<StreamConfig> triggerSerializationAndReturnFuture(
Executor ioExecutor) {
FutureUtils.combineAll(chainedTaskFutures.values())
.thenAcceptAsync(
chainedConfigs -> {
try {
serializeAllConfigs();
InstantiationUtil.writeObjectToConfig(
chainedConfigs.stream()
.collect(
Collectors.toMap(
StreamConfig::getVertexID,
Function.identity())),
this.config,
CHAINED_TASK_CONFIG);
serializationFuture.complete(this);
} catch (Throwable throwable) {
serializationFuture.completeExceptionally(throwable);
}
},
ioExecutor);
return serializationFuture;
}
/**
* Serialize all object configs synchronously. Only used for operators which need to reconstruct
* the StreamConfig internally or test.
*/
public void serializeAllConfigs() {
toBeSerializedConfigObjects.forEach(
(key, object) -> {
try {
InstantiationUtil.writeObjectToConfig(object, this.config, key);
} catch (IOException e) {
throw new StreamTaskException(
String.format("Could not serialize object for key %s.", key), e);
}
});
}
@VisibleForTesting
public void setAndSerializeTransitiveChainedTaskConfigs(
Map<Integer, StreamConfig> chainedTaskConfigs) {
try {
InstantiationUtil.writeObjectToConfig(
chainedTaskConfigs, this.config, CHAINED_TASK_CONFIG);
} catch (IOException e) {
throw new StreamTaskException(
"Could not serialize object for key chained task config.", e);
}
}
public void setVertexID(Integer vertexID) {
config.setInteger(VERTEX_NAME, vertexID);
}
public Integer getVertexID() {
return config.getInteger(VERTEX_NAME, -1);
}
/** Fraction of managed memory reserved for the given use case that this operator should use. */
public void setManagedMemoryFractionOperatorOfUseCase(
ManagedMemoryUseCase managedMemoryUseCase, double fraction) {
final ConfigOption<Double> configOption =
getManagedMemoryFractionConfigOption(managedMemoryUseCase);
checkArgument(
fraction >= 0.0 && fraction <= 1.0,
String.format(
"%s should be in range [0.0, 1.0], but was: %s",
configOption.key(), fraction));
config.setDouble(configOption, fraction);
}
/**
* Fraction of total managed memory in the slot that this operator should use for the given use
* case.
*/
public double getManagedMemoryFractionOperatorUseCaseOfSlot(
ManagedMemoryUseCase managedMemoryUseCase,
Configuration taskManagerConfig,
ClassLoader cl) {
return ManagedMemoryUtils.convertToFractionOfSlot(
managedMemoryUseCase,
config.getDouble(getManagedMemoryFractionConfigOption(managedMemoryUseCase)),
getAllManagedMemoryUseCases(),
taskManagerConfig,
config.getOptional(STATE_BACKEND_USE_MANAGED_MEMORY),
cl);
}
private static ConfigOption<Double> getManagedMemoryFractionConfigOption(
ManagedMemoryUseCase managedMemoryUseCase) {
return ConfigOptions.key(
MANAGED_MEMORY_FRACTION_PREFIX + checkNotNull(managedMemoryUseCase))
.doubleType()
.defaultValue(DEFAULT_MANAGED_MEMORY_FRACTION);
}
private Set<ManagedMemoryUseCase> getAllManagedMemoryUseCases() {
return config.keySet().stream()
.filter((key) -> key.startsWith(MANAGED_MEMORY_FRACTION_PREFIX))
.map(
(key) ->
ManagedMemoryUseCase.valueOf(
key.replaceFirst(MANAGED_MEMORY_FRACTION_PREFIX, "")))
.collect(Collectors.toSet());
}
public void setTimeCharacteristic(TimeCharacteristic characteristic) {
config.setInteger(TIME_CHARACTERISTIC, characteristic.ordinal());
}
public TimeCharacteristic getTimeCharacteristic() {
int ordinal = config.getInteger(TIME_CHARACTERISTIC, -1);
if (ordinal >= 0) {
return TimeCharacteristic.values()[ordinal];
} else {
throw new CorruptConfigurationException("time characteristic is not set");
}
}
public void setTypeSerializerOut(TypeSerializer<?> serializer) {
setTypeSerializer(TYPE_SERIALIZER_OUT_1, serializer);
}
public <T> TypeSerializer<T> getTypeSerializerOut(ClassLoader cl) {
try {
return InstantiationUtil.readObjectFromConfig(this.config, TYPE_SERIALIZER_OUT_1, cl);
} catch (Exception e) {
throw new StreamTaskException("Could not instantiate serializer.", e);
}
}
public void setTypeSerializerSideOut(OutputTag<?> outputTag, TypeSerializer<?> serializer) {
setTypeSerializer(TYPE_SERIALIZER_SIDEOUT_PREFIX + outputTag.getId(), serializer);
}
private void setTypeSerializer(String key, TypeSerializer<?> typeWrapper) {
toBeSerializedConfigObjects.put(key, typeWrapper);
}
public <T> TypeSerializer<T> getTypeSerializerSideOut(OutputTag<?> outputTag, ClassLoader cl) {
Preconditions.checkNotNull(outputTag, "Side output id must not be null.");
try {
return InstantiationUtil.readObjectFromConfig(
this.config, TYPE_SERIALIZER_SIDEOUT_PREFIX + outputTag.getId(), cl);
} catch (Exception e) {
throw new StreamTaskException("Could not instantiate serializer.", e);
}
}
public void setupNetworkInputs(TypeSerializer<?>... serializers) {
InputConfig[] inputs = new InputConfig[serializers.length];
for (int i = 0; i < serializers.length; i++) {
inputs[i] = new NetworkInputConfig(serializers[i], i, InputRequirement.PASS_THROUGH);
}
setInputs(inputs);
}
public void setInputs(InputConfig... inputs) {
toBeSerializedConfigObjects.put(INPUTS, inputs);
}
public InputConfig[] getInputs(ClassLoader cl) {
try {
InputConfig[] inputs = InstantiationUtil.readObjectFromConfig(this.config, INPUTS, cl);
if (inputs == null) {
return new InputConfig[0];
}
return inputs;
} catch (Exception e) {
throw new StreamTaskException("Could not deserialize inputs", e);
}
}
@Deprecated
public <T> TypeSerializer<T> getTypeSerializerIn1(ClassLoader cl) {
return getTypeSerializerIn(0, cl);
}
@Deprecated
public <T> TypeSerializer<T> getTypeSerializerIn2(ClassLoader cl) {
return getTypeSerializerIn(1, cl);
}
public <T> TypeSerializer<T> getTypeSerializerIn(int index, ClassLoader cl) {
InputConfig[] inputs = getInputs(cl);
checkState(index < inputs.length);
checkState(
inputs[index] instanceof NetworkInputConfig,
"Input [%s] was assumed to be network input",
index);
return (TypeSerializer<T>) ((NetworkInputConfig) inputs[index]).typeSerializer;
}
@VisibleForTesting
public void setStreamOperator(StreamOperator<?> operator) {
setStreamOperatorFactory(SimpleOperatorFactory.of(operator));
}
@VisibleForTesting
public <T extends StreamOperator<?>> T getStreamOperator(ClassLoader cl) {
SimpleOperatorFactory<?> factory = getStreamOperatorFactory(cl);
return (T) factory.getOperator();
}
public <T extends StreamOperatorFactory<?>> T getStreamOperatorFactory(ClassLoader cl) {
try {
return InstantiationUtil.readObjectFromConfig(this.config, SERIALIZED_UDF, cl);
} catch (ClassNotFoundException e) {
String classLoaderInfo = ClassLoaderUtil.getUserCodeClassLoaderInfo(cl);
boolean loadableDoubleCheck = ClassLoaderUtil.validateClassLoadable(e, cl);
String exceptionMessage =
"Cannot load user class: "
+ e.getMessage()
+ "\nClassLoader info: "
+ classLoaderInfo
+ (loadableDoubleCheck
? "\nClass was actually found in classloader - deserialization issue."
: "\nClass not resolvable through given classloader.");
throw new StreamTaskException(exceptionMessage, e);
} catch (Exception e) {
throw new StreamTaskException("Cannot instantiate user function.", e);
}
}
public String getStreamOperatorFactoryClassName() {
return config.getString(SERIALIZED_UDF_CLASS_NAME, null);
}
public void setIterationId(String iterationId) {
config.setString(ITERATION_ID, iterationId);
}
public String getIterationId() {
return config.getString(ITERATION_ID, "");
}
public void setIterationWaitTime(long time) {
config.setLong(ITERATON_WAIT, time);
}
public long getIterationWaitTime() {
return config.getLong(ITERATON_WAIT, 0);
}
public void setNumberOfNetworkInputs(int numberOfInputs) {
config.setInteger(NUMBER_OF_NETWORK_INPUTS, numberOfInputs);
}
public int getNumberOfNetworkInputs() {
return config.getInteger(NUMBER_OF_NETWORK_INPUTS, 0);
}
public void setNumberOfOutputs(int numberOfOutputs) {
config.setInteger(NUMBER_OF_OUTPUTS, numberOfOutputs);
}
public int getNumberOfOutputs() {
return config.getInteger(NUMBER_OF_OUTPUTS, 0);
}
/** Sets the operator level non-chained outputs. */
public void setOperatorNonChainedOutputs(List<NonChainedOutput> nonChainedOutputs) {
toBeSerializedConfigObjects.put(OP_NONCHAINED_OUTPUTS, nonChainedOutputs);
}
public List<NonChainedOutput> getOperatorNonChainedOutputs(ClassLoader cl) {
try {
List<NonChainedOutput> nonChainedOutputs =
InstantiationUtil.readObjectFromConfig(this.config, OP_NONCHAINED_OUTPUTS, cl);
return nonChainedOutputs == null ? new ArrayList<>() : nonChainedOutputs;
} catch (Exception e) {
throw new StreamTaskException("Could not instantiate non chained outputs.", e);
}
}
public void setChainedOutputs(List<StreamEdge> chainedOutputs) {
toBeSerializedConfigObjects.put(CHAINED_OUTPUTS, chainedOutputs);
}
public List<StreamEdge> getChainedOutputs(ClassLoader cl) {
try {
List<StreamEdge> chainedOutputs =
InstantiationUtil.readObjectFromConfig(this.config, CHAINED_OUTPUTS, cl);
return chainedOutputs == null ? new ArrayList<StreamEdge>() : chainedOutputs;
} catch (Exception e) {
throw new StreamTaskException("Could not instantiate chained outputs.", e);
}
}
public void setInPhysicalEdges(List<StreamEdge> inEdges) {
toBeSerializedConfigObjects.put(IN_STREAM_EDGES, inEdges);
}
public List<StreamEdge> getInPhysicalEdges(ClassLoader cl) {
try {
List<StreamEdge> inEdges =
InstantiationUtil.readObjectFromConfig(this.config, IN_STREAM_EDGES, cl);
return inEdges == null ? new ArrayList<StreamEdge>() : inEdges;
} catch (Exception e) {
throw new StreamTaskException("Could not instantiate inputs.", e);
}
}
public void setCheckpointingEnabled(boolean enabled) {
config.setBoolean(CHECKPOINTING_ENABLED, enabled);
}
public boolean isCheckpointingEnabled() {
return config.getBoolean(CHECKPOINTING_ENABLED, false);
}
public void setCheckpointMode(CheckpointingMode mode) {
config.setInteger(CHECKPOINT_MODE, mode.ordinal());
}
public CheckpointingMode getCheckpointMode() {
int ordinal = config.getInteger(CHECKPOINT_MODE, -1);
if (ordinal >= 0) {
return CheckpointingMode.values()[ordinal];
} else {
return DEFAULT_CHECKPOINTING_MODE;
}
}
public void setUnalignedCheckpointsEnabled(boolean enabled) {
config.setBoolean(ExecutionCheckpointingOptions.ENABLE_UNALIGNED, enabled);
}
public boolean isUnalignedCheckpointsEnabled() {
return config.getBoolean(ExecutionCheckpointingOptions.ENABLE_UNALIGNED, false);
}
public boolean isExactlyOnceCheckpointMode() {
return getCheckpointMode() == CheckpointingMode.EXACTLY_ONCE;
}
public Duration getAlignedCheckpointTimeout() {
return config.get(ExecutionCheckpointingOptions.ALIGNED_CHECKPOINT_TIMEOUT);
}
public void setAlignedCheckpointTimeout(Duration alignedCheckpointTimeout) {
config.set(
ExecutionCheckpointingOptions.ALIGNED_CHECKPOINT_TIMEOUT, alignedCheckpointTimeout);
}
public void setMaxConcurrentCheckpoints(int maxConcurrentCheckpoints) {
config.setInteger(
ExecutionCheckpointingOptions.MAX_CONCURRENT_CHECKPOINTS, maxConcurrentCheckpoints);
}
public int getMaxConcurrentCheckpoints() {
return config.getInteger(
ExecutionCheckpointingOptions.MAX_CONCURRENT_CHECKPOINTS,
ExecutionCheckpointingOptions.MAX_CONCURRENT_CHECKPOINTS.defaultValue());
}
public int getMaxSubtasksPerChannelStateFile() {
return config.get(
ExecutionCheckpointingOptions.UNALIGNED_MAX_SUBTASKS_PER_CHANNEL_STATE_FILE);
}
public void setMaxSubtasksPerChannelStateFile(int maxSubtasksPerChannelStateFile) {
config.set(
ExecutionCheckpointingOptions.UNALIGNED_MAX_SUBTASKS_PER_CHANNEL_STATE_FILE,
maxSubtasksPerChannelStateFile);
}
/**
* Sets the job vertex level non-chained outputs. The given output list must have the same order
* with {@link JobVertex
*/
public void setVertexNonChainedOutputs(List<NonChainedOutput> nonChainedOutputs) {
toBeSerializedConfigObjects.put(VERTEX_NONCHAINED_OUTPUTS, nonChainedOutputs);
}
public List<NonChainedOutput> getVertexNonChainedOutputs(ClassLoader cl) {
try {
List<NonChainedOutput> nonChainedOutputs =
InstantiationUtil.readObjectFromConfig(
this.config, VERTEX_NONCHAINED_OUTPUTS, cl);
return nonChainedOutputs == null ? new ArrayList<>() : nonChainedOutputs;
} catch (Exception e) {
throw new StreamTaskException("Could not instantiate outputs in order.", e);
}
}
public void setTransitiveChainedTaskConfigs(Map<Integer, StreamConfig> chainedTaskConfigs) {
if (chainedTaskConfigs != null) {
chainedTaskConfigs.forEach(
(id, config) -> chainedTaskFutures.put(id, config.getSerializationFuture()));
}
}
public Map<Integer, StreamConfig> getTransitiveChainedTaskConfigs(ClassLoader cl) {
try {
Map<Integer, StreamConfig> confs =
InstantiationUtil.readObjectFromConfig(this.config, CHAINED_TASK_CONFIG, cl);
return confs == null ? new HashMap<Integer, StreamConfig>() : confs;
} catch (Exception e) {
throw new StreamTaskException("Could not instantiate configuration.", e);
}
}
public Map<Integer, StreamConfig> getTransitiveChainedTaskConfigsWithSelf(ClassLoader cl) {
Map<Integer, StreamConfig> chainedTaskConfigs = getTransitiveChainedTaskConfigs(cl);
chainedTaskConfigs.put(getVertexID(), this);
return chainedTaskConfigs;
}
public void setOperatorID(OperatorID operatorID) {
this.config.setBytes(OPERATOR_ID, operatorID.getBytes());
}
public OperatorID getOperatorID() {
byte[] operatorIDBytes = config.getBytes(OPERATOR_ID, null);
return new OperatorID(Preconditions.checkNotNull(operatorIDBytes));
}
public void setOperatorName(String name) {
this.config.setString(OPERATOR_NAME, name);
}
public String getOperatorName() {
return this.config.getString(OPERATOR_NAME, null);
}
public void setChainIndex(int index) {
this.config.setInteger(CHAIN_INDEX, index);
}
public int getChainIndex() {
return this.config.getInteger(CHAIN_INDEX, 0);
}
public void setStateBackend(StateBackend backend) {
if (backend != null) {
toBeSerializedConfigObjects.put(STATE_BACKEND, backend);
setStateBackendUsesManagedMemory(backend.useManagedMemory());
}
}
public void setChangelogStateBackendEnabled(TernaryBoolean enabled) {
toBeSerializedConfigObjects.put(ENABLE_CHANGE_LOG_STATE_BACKEND, enabled);
}
@VisibleForTesting
public void setStateBackendUsesManagedMemory(boolean usesManagedMemory) {
this.config.setBoolean(STATE_BACKEND_USE_MANAGED_MEMORY, usesManagedMemory);
}
public StateBackend getStateBackend(ClassLoader cl) {
try {
return InstantiationUtil.readObjectFromConfig(this.config, STATE_BACKEND, cl);
} catch (Exception e) {
throw new StreamTaskException("Could not instantiate statehandle provider.", e);
}
}
public TernaryBoolean isChangelogStateBackendEnabled(ClassLoader cl) {
try {
return InstantiationUtil.readObjectFromConfig(
this.config, ENABLE_CHANGE_LOG_STATE_BACKEND, cl);
} catch (Exception e) {
throw new StreamTaskException(
"Could not instantiate change log state backend enable flag.", e);
}
}
public void setSavepointDir(Path directory) {
if (directory != null) {
toBeSerializedConfigObjects.put(SAVEPOINT_DIR, directory);
}
}
public Path getSavepointDir(ClassLoader cl) {
try {
return InstantiationUtil.readObjectFromConfig(this.config, SAVEPOINT_DIR, cl);
} catch (Exception e) {
throw new StreamTaskException("Could not instantiate savepoint directory.", e);
}
}
public void setCheckpointStorage(CheckpointStorage storage) {
if (storage != null) {
toBeSerializedConfigObjects.put(CHECKPOINT_STORAGE, storage);
}
}
public CheckpointStorage getCheckpointStorage(ClassLoader cl) {
try {
return InstantiationUtil.readObjectFromConfig(this.config, CHECKPOINT_STORAGE, cl);
} catch (Exception e) {
throw new StreamTaskException("Could not instantiate checkpoint storage.", e);
}
}
public void setTimerServiceProvider(InternalTimeServiceManager.Provider timerServiceProvider) {
if (timerServiceProvider != null) {
toBeSerializedConfigObjects.put(TIMER_SERVICE_PROVIDER, timerServiceProvider);
}
}
public InternalTimeServiceManager.Provider getTimerServiceProvider(ClassLoader cl) {
try {
return InstantiationUtil.readObjectFromConfig(this.config, TIMER_SERVICE_PROVIDER, cl);
} catch (Exception e) {
throw new StreamTaskException("Could not instantiate timer service provider.", e);
}
}
public void setStatePartitioner(int input, KeySelector<?, ?> partitioner) {
toBeSerializedConfigObjects.put(STATE_PARTITIONER + input, partitioner);
}
public <IN, K extends Serializable> KeySelector<IN, K> getStatePartitioner(
int input, ClassLoader cl) {
try {
return InstantiationUtil.readObjectFromConfig(
this.config, STATE_PARTITIONER + input, cl);
} catch (Exception e) {
throw new StreamTaskException("Could not instantiate state partitioner.", e);
}
}
public void setStateKeySerializer(TypeSerializer<?> serializer) {
toBeSerializedConfigObjects.put(STATE_KEY_SERIALIZER, serializer);
}
public <K> TypeSerializer<K> getStateKeySerializer(ClassLoader cl) {
try {
return InstantiationUtil.readObjectFromConfig(this.config, STATE_KEY_SERIALIZER, cl);
} catch (Exception e) {
throw new StreamTaskException(
"Could not instantiate state key serializer from task config.", e);
}
}
public void setChainStart() {
config.setBoolean(IS_CHAINED_VERTEX, true);
}
public boolean isChainStart() {
return config.getBoolean(IS_CHAINED_VERTEX, false);
}
public void setChainEnd() {
config.setBoolean(CHAIN_END, true);
}
public boolean isChainEnd() {
return config.getBoolean(CHAIN_END, false);
}
@Override
public String toString() {
ClassLoader cl = getClass().getClassLoader();
StringBuilder builder = new StringBuilder();
builder.append("\n=======================");
builder.append("Stream Config");
builder.append("=======================");
builder.append("\nNumber of non-chained inputs: ").append(getNumberOfNetworkInputs());
builder.append("\nNumber of non-chained outputs: ").append(getNumberOfOutputs());
builder.append("\nOutput names: ").append(getOperatorNonChainedOutputs(cl));
builder.append("\nPartitioning:");
for (NonChainedOutput output : getOperatorNonChainedOutputs(cl)) {
String outputName = output.getDataSetId().toString();
builder.append("\n\t").append(outputName).append(": ").append(output.getPartitioner());
}
builder.append("\nChained subtasks: ").append(getChainedOutputs(cl));
try {
builder.append("\nOperator: ")
.append(getStreamOperatorFactory(cl).getClass().getSimpleName());
} catch (Exception e) {
builder.append("\nOperator: Missing");
}
builder.append("\nState Monitoring: ").append(isCheckpointingEnabled());
if (isChainStart() && getChainedOutputs(cl).size() > 0) {
builder.append(
"\n\n\n---------------------\nChained task configs\n---------------------\n");
builder.append(getTransitiveChainedTaskConfigs(cl));
}
return builder.toString();
}
public void setGraphContainingLoops(boolean graphContainingLoops) {
config.setBoolean(GRAPH_CONTAINING_LOOPS, graphContainingLoops);
}
public boolean isGraphContainingLoops() {
return config.getBoolean(GRAPH_CONTAINING_LOOPS, false);
}
/**
* Requirements of the different inputs of an operator. Each input can have a different
* requirement. For all {@link
* records of a given key are passed to the operator consecutively before moving on to the next
* group.
*/
public enum InputRequirement {
/**
* Records from all sorted inputs are grouped (sorted) by key and are then fed to the
* operator one group at a time. This "zig-zags" between different inputs if records for the
* same key arrive on multiple inputs to ensure that the operator sees all records with a
* key as one consecutive group.
*/
SORTED,
/**
* Records from {@link
* records from {@link
* within the different {@link
*/
PASS_THROUGH;
}
/** Interface representing chained inputs. */
public interface InputConfig extends Serializable {}
/** A representation of a Network {@link InputConfig}. */
public static class NetworkInputConfig implements InputConfig {
private final TypeSerializer<?> typeSerializer;
private final InputRequirement inputRequirement;
private int inputGateIndex;
public NetworkInputConfig(TypeSerializer<?> typeSerializer, int inputGateIndex) {
this(typeSerializer, inputGateIndex, InputRequirement.PASS_THROUGH);
}
public NetworkInputConfig(
TypeSerializer<?> typeSerializer,
int inputGateIndex,
InputRequirement inputRequirement) {
this.typeSerializer = typeSerializer;
this.inputGateIndex = inputGateIndex;
this.inputRequirement = inputRequirement;
}
public TypeSerializer<?> getTypeSerializer() {
return typeSerializer;
}
public int getInputGateIndex() {
return inputGateIndex;
}
public InputRequirement getInputRequirement() {
return inputRequirement;
}
}
/** A serialized representation of an input. */
public static class SourceInputConfig implements InputConfig {
private final StreamEdge inputEdge;
public SourceInputConfig(StreamEdge inputEdge) {
this.inputEdge = inputEdge;
}
public StreamEdge getInputEdge() {
return inputEdge;
}
@Override
public String toString() {
return inputEdge.toString();
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof SourceInputConfig)) {
return false;
}
SourceInputConfig other = (SourceInputConfig) obj;
return Objects.equals(other.inputEdge, inputEdge);
}
@Override
public int hashCode() {
return inputEdge.hashCode();
}
}
public static boolean requiresSorting(StreamConfig.InputConfig inputConfig) {
return inputConfig instanceof StreamConfig.NetworkInputConfig
&& ((StreamConfig.NetworkInputConfig) inputConfig).getInputRequirement()
== StreamConfig.InputRequirement.SORTED;
}
} | class StreamConfig implements Serializable {
private static final long serialVersionUID = 1L;
public static final String SERIALIZED_UDF = "serializedUDF";
/**
* Introduce serializedUdfClassName to avoid unnecessarily heavy {@link
*
*/
public static final String SERIALIZED_UDF_CLASS_NAME = "serializedUdfClassName";
private static final String NUMBER_OF_OUTPUTS = "numberOfOutputs";
private static final String NUMBER_OF_NETWORK_INPUTS = "numberOfNetworkInputs";
private static final String CHAINED_OUTPUTS = "chainedOutputs";
private static final String CHAINED_TASK_CONFIG = "chainedTaskConfig_";
private static final String IS_CHAINED_VERTEX = "isChainedSubtask";
private static final String CHAIN_INDEX = "chainIndex";
private static final String VERTEX_NAME = "vertexID";
private static final String ITERATION_ID = "iterationId";
private static final String INPUTS = "inputs";
private static final String TYPE_SERIALIZER_OUT_1 = "typeSerializer_out";
private static final String TYPE_SERIALIZER_SIDEOUT_PREFIX = "typeSerializer_sideout_";
private static final String ITERATON_WAIT = "iterationWait";
private static final String OP_NONCHAINED_OUTPUTS = "opNonChainedOutputs";
private static final String VERTEX_NONCHAINED_OUTPUTS = "vertexNonChainedOutputs";
private static final String IN_STREAM_EDGES = "inStreamEdges";
private static final String OPERATOR_NAME = "operatorName";
private static final String OPERATOR_ID = "operatorID";
private static final String CHAIN_END = "chainEnd";
private static final String GRAPH_CONTAINING_LOOPS = "graphContainingLoops";
private static final String CHECKPOINTING_ENABLED = "checkpointing";
private static final String CHECKPOINT_MODE = "checkpointMode";
private static final String SAVEPOINT_DIR = "savepointdir";
private static final String CHECKPOINT_STORAGE = "checkpointstorage";
private static final String STATE_BACKEND = "statebackend";
private static final String ENABLE_CHANGE_LOG_STATE_BACKEND = "enablechangelog";
private static final String TIMER_SERVICE_PROVIDER = "timerservice";
private static final String STATE_PARTITIONER = "statePartitioner";
private static final String STATE_KEY_SERIALIZER = "statekeyser";
private static final String TIME_CHARACTERISTIC = "timechar";
private static final String MANAGED_MEMORY_FRACTION_PREFIX = "managedMemFraction.";
private static final ConfigOption<Boolean> STATE_BACKEND_USE_MANAGED_MEMORY =
ConfigOptions.key("statebackend.useManagedMemory")
.booleanType()
.noDefaultValue()
.withDescription(
"If state backend is specified, whether it uses managed memory.");
private static final CheckpointingMode DEFAULT_CHECKPOINTING_MODE =
CheckpointingMode.EXACTLY_ONCE;
private static final double DEFAULT_MANAGED_MEMORY_FRACTION = 0.0;
private final Configuration config;
private final transient Map<String, Object> toBeSerializedConfigObjects = new HashMap<>();
private final transient Map<Integer, CompletableFuture<StreamConfig>> chainedTaskFutures =
new HashMap<>();
private final transient CompletableFuture<StreamConfig> serializationFuture =
new CompletableFuture<>();
public StreamConfig(Configuration config) {
this.config = config;
}
public Configuration getConfiguration() {
return config;
}
public CompletableFuture<StreamConfig> getSerializationFuture() {
return serializationFuture;
}
/** Trigger the object config serialization and return the completable future. */
public CompletableFuture<StreamConfig> triggerSerializationAndReturnFuture(
Executor ioExecutor) {
FutureUtils.combineAll(chainedTaskFutures.values())
.thenAcceptAsync(
chainedConfigs -> {
try {
serializeAllConfigs();
InstantiationUtil.writeObjectToConfig(
chainedConfigs.stream()
.collect(
Collectors.toMap(
StreamConfig::getVertexID,
Function.identity())),
this.config,
CHAINED_TASK_CONFIG);
serializationFuture.complete(this);
} catch (Throwable throwable) {
serializationFuture.completeExceptionally(throwable);
}
},
ioExecutor);
return serializationFuture;
}
/**
* Serialize all object configs synchronously. Only used for operators which need to reconstruct
* the StreamConfig internally or test.
*/
public void serializeAllConfigs() {
toBeSerializedConfigObjects.forEach(
(key, object) -> {
try {
InstantiationUtil.writeObjectToConfig(object, this.config, key);
} catch (IOException e) {
throw new StreamTaskException(
String.format("Could not serialize object for key %s.", key), e);
}
});
}
@VisibleForTesting
public void setAndSerializeTransitiveChainedTaskConfigs(
Map<Integer, StreamConfig> chainedTaskConfigs) {
try {
InstantiationUtil.writeObjectToConfig(
chainedTaskConfigs, this.config, CHAINED_TASK_CONFIG);
} catch (IOException e) {
throw new StreamTaskException(
"Could not serialize object for key chained task config.", e);
}
}
public void setVertexID(Integer vertexID) {
config.setInteger(VERTEX_NAME, vertexID);
}
public Integer getVertexID() {
return config.getInteger(VERTEX_NAME, -1);
}
/** Fraction of managed memory reserved for the given use case that this operator should use. */
public void setManagedMemoryFractionOperatorOfUseCase(
ManagedMemoryUseCase managedMemoryUseCase, double fraction) {
final ConfigOption<Double> configOption =
getManagedMemoryFractionConfigOption(managedMemoryUseCase);
checkArgument(
fraction >= 0.0 && fraction <= 1.0,
String.format(
"%s should be in range [0.0, 1.0], but was: %s",
configOption.key(), fraction));
config.setDouble(configOption, fraction);
}
/**
* Fraction of total managed memory in the slot that this operator should use for the given use
* case.
*/
public double getManagedMemoryFractionOperatorUseCaseOfSlot(
ManagedMemoryUseCase managedMemoryUseCase,
Configuration taskManagerConfig,
ClassLoader cl) {
return ManagedMemoryUtils.convertToFractionOfSlot(
managedMemoryUseCase,
config.getDouble(getManagedMemoryFractionConfigOption(managedMemoryUseCase)),
getAllManagedMemoryUseCases(),
taskManagerConfig,
config.getOptional(STATE_BACKEND_USE_MANAGED_MEMORY),
cl);
}
private static ConfigOption<Double> getManagedMemoryFractionConfigOption(
ManagedMemoryUseCase managedMemoryUseCase) {
return ConfigOptions.key(
MANAGED_MEMORY_FRACTION_PREFIX + checkNotNull(managedMemoryUseCase))
.doubleType()
.defaultValue(DEFAULT_MANAGED_MEMORY_FRACTION);
}
private Set<ManagedMemoryUseCase> getAllManagedMemoryUseCases() {
return config.keySet().stream()
.filter((key) -> key.startsWith(MANAGED_MEMORY_FRACTION_PREFIX))
.map(
(key) ->
ManagedMemoryUseCase.valueOf(
key.replaceFirst(MANAGED_MEMORY_FRACTION_PREFIX, "")))
.collect(Collectors.toSet());
}
public void setTimeCharacteristic(TimeCharacteristic characteristic) {
config.setInteger(TIME_CHARACTERISTIC, characteristic.ordinal());
}
public TimeCharacteristic getTimeCharacteristic() {
int ordinal = config.getInteger(TIME_CHARACTERISTIC, -1);
if (ordinal >= 0) {
return TimeCharacteristic.values()[ordinal];
} else {
throw new CorruptConfigurationException("time characteristic is not set");
}
}
public void setTypeSerializerOut(TypeSerializer<?> serializer) {
setTypeSerializer(TYPE_SERIALIZER_OUT_1, serializer);
}
public <T> TypeSerializer<T> getTypeSerializerOut(ClassLoader cl) {
try {
return InstantiationUtil.readObjectFromConfig(this.config, TYPE_SERIALIZER_OUT_1, cl);
} catch (Exception e) {
throw new StreamTaskException("Could not instantiate serializer.", e);
}
}
public void setTypeSerializerSideOut(OutputTag<?> outputTag, TypeSerializer<?> serializer) {
setTypeSerializer(TYPE_SERIALIZER_SIDEOUT_PREFIX + outputTag.getId(), serializer);
}
private void setTypeSerializer(String key, TypeSerializer<?> typeWrapper) {
toBeSerializedConfigObjects.put(key, typeWrapper);
}
public <T> TypeSerializer<T> getTypeSerializerSideOut(OutputTag<?> outputTag, ClassLoader cl) {
Preconditions.checkNotNull(outputTag, "Side output id must not be null.");
try {
return InstantiationUtil.readObjectFromConfig(
this.config, TYPE_SERIALIZER_SIDEOUT_PREFIX + outputTag.getId(), cl);
} catch (Exception e) {
throw new StreamTaskException("Could not instantiate serializer.", e);
}
}
public void setupNetworkInputs(TypeSerializer<?>... serializers) {
InputConfig[] inputs = new InputConfig[serializers.length];
for (int i = 0; i < serializers.length; i++) {
inputs[i] = new NetworkInputConfig(serializers[i], i, InputRequirement.PASS_THROUGH);
}
setInputs(inputs);
}
public void setInputs(InputConfig... inputs) {
toBeSerializedConfigObjects.put(INPUTS, inputs);
}
public InputConfig[] getInputs(ClassLoader cl) {
try {
InputConfig[] inputs = InstantiationUtil.readObjectFromConfig(this.config, INPUTS, cl);
if (inputs == null) {
return new InputConfig[0];
}
return inputs;
} catch (Exception e) {
throw new StreamTaskException("Could not deserialize inputs", e);
}
}
@Deprecated
public <T> TypeSerializer<T> getTypeSerializerIn1(ClassLoader cl) {
return getTypeSerializerIn(0, cl);
}
@Deprecated
public <T> TypeSerializer<T> getTypeSerializerIn2(ClassLoader cl) {
return getTypeSerializerIn(1, cl);
}
public <T> TypeSerializer<T> getTypeSerializerIn(int index, ClassLoader cl) {
InputConfig[] inputs = getInputs(cl);
checkState(index < inputs.length);
checkState(
inputs[index] instanceof NetworkInputConfig,
"Input [%s] was assumed to be network input",
index);
return (TypeSerializer<T>) ((NetworkInputConfig) inputs[index]).typeSerializer;
}
@VisibleForTesting
public void setStreamOperator(StreamOperator<?> operator) {
setStreamOperatorFactory(SimpleOperatorFactory.of(operator));
}
@VisibleForTesting
public <T extends StreamOperator<?>> T getStreamOperator(ClassLoader cl) {
SimpleOperatorFactory<?> factory = getStreamOperatorFactory(cl);
return (T) factory.getOperator();
}
public <T extends StreamOperatorFactory<?>> T getStreamOperatorFactory(ClassLoader cl) {
try {
return InstantiationUtil.readObjectFromConfig(this.config, SERIALIZED_UDF, cl);
} catch (ClassNotFoundException e) {
String classLoaderInfo = ClassLoaderUtil.getUserCodeClassLoaderInfo(cl);
boolean loadableDoubleCheck = ClassLoaderUtil.validateClassLoadable(e, cl);
String exceptionMessage =
"Cannot load user class: "
+ e.getMessage()
+ "\nClassLoader info: "
+ classLoaderInfo
+ (loadableDoubleCheck
? "\nClass was actually found in classloader - deserialization issue."
: "\nClass not resolvable through given classloader.");
throw new StreamTaskException(exceptionMessage, e);
} catch (Exception e) {
throw new StreamTaskException("Cannot instantiate user function.", e);
}
}
public String getStreamOperatorFactoryClassName() {
return config.getString(SERIALIZED_UDF_CLASS_NAME, null);
}
public void setIterationId(String iterationId) {
config.setString(ITERATION_ID, iterationId);
}
public String getIterationId() {
return config.getString(ITERATION_ID, "");
}
public void setIterationWaitTime(long time) {
config.setLong(ITERATON_WAIT, time);
}
public long getIterationWaitTime() {
return config.getLong(ITERATON_WAIT, 0);
}
public void setNumberOfNetworkInputs(int numberOfInputs) {
config.setInteger(NUMBER_OF_NETWORK_INPUTS, numberOfInputs);
}
public int getNumberOfNetworkInputs() {
return config.getInteger(NUMBER_OF_NETWORK_INPUTS, 0);
}
public void setNumberOfOutputs(int numberOfOutputs) {
config.setInteger(NUMBER_OF_OUTPUTS, numberOfOutputs);
}
public int getNumberOfOutputs() {
return config.getInteger(NUMBER_OF_OUTPUTS, 0);
}
/** Sets the operator level non-chained outputs. */
public void setOperatorNonChainedOutputs(List<NonChainedOutput> nonChainedOutputs) {
toBeSerializedConfigObjects.put(OP_NONCHAINED_OUTPUTS, nonChainedOutputs);
}
public List<NonChainedOutput> getOperatorNonChainedOutputs(ClassLoader cl) {
try {
List<NonChainedOutput> nonChainedOutputs =
InstantiationUtil.readObjectFromConfig(this.config, OP_NONCHAINED_OUTPUTS, cl);
return nonChainedOutputs == null ? new ArrayList<>() : nonChainedOutputs;
} catch (Exception e) {
throw new StreamTaskException("Could not instantiate non chained outputs.", e);
}
}
public void setChainedOutputs(List<StreamEdge> chainedOutputs) {
toBeSerializedConfigObjects.put(CHAINED_OUTPUTS, chainedOutputs);
}
public List<StreamEdge> getChainedOutputs(ClassLoader cl) {
try {
List<StreamEdge> chainedOutputs =
InstantiationUtil.readObjectFromConfig(this.config, CHAINED_OUTPUTS, cl);
return chainedOutputs == null ? new ArrayList<StreamEdge>() : chainedOutputs;
} catch (Exception e) {
throw new StreamTaskException("Could not instantiate chained outputs.", e);
}
}
public void setInPhysicalEdges(List<StreamEdge> inEdges) {
toBeSerializedConfigObjects.put(IN_STREAM_EDGES, inEdges);
}
public List<StreamEdge> getInPhysicalEdges(ClassLoader cl) {
try {
List<StreamEdge> inEdges =
InstantiationUtil.readObjectFromConfig(this.config, IN_STREAM_EDGES, cl);
return inEdges == null ? new ArrayList<StreamEdge>() : inEdges;
} catch (Exception e) {
throw new StreamTaskException("Could not instantiate inputs.", e);
}
}
public void setCheckpointingEnabled(boolean enabled) {
config.setBoolean(CHECKPOINTING_ENABLED, enabled);
}
public boolean isCheckpointingEnabled() {
return config.getBoolean(CHECKPOINTING_ENABLED, false);
}
public void setCheckpointMode(CheckpointingMode mode) {
config.setInteger(CHECKPOINT_MODE, mode.ordinal());
}
public CheckpointingMode getCheckpointMode() {
int ordinal = config.getInteger(CHECKPOINT_MODE, -1);
if (ordinal >= 0) {
return CheckpointingMode.values()[ordinal];
} else {
return DEFAULT_CHECKPOINTING_MODE;
}
}
public void setUnalignedCheckpointsEnabled(boolean enabled) {
config.setBoolean(ExecutionCheckpointingOptions.ENABLE_UNALIGNED, enabled);
}
public boolean isUnalignedCheckpointsEnabled() {
return config.getBoolean(ExecutionCheckpointingOptions.ENABLE_UNALIGNED, false);
}
public boolean isExactlyOnceCheckpointMode() {
return getCheckpointMode() == CheckpointingMode.EXACTLY_ONCE;
}
public Duration getAlignedCheckpointTimeout() {
return config.get(ExecutionCheckpointingOptions.ALIGNED_CHECKPOINT_TIMEOUT);
}
public void setAlignedCheckpointTimeout(Duration alignedCheckpointTimeout) {
config.set(
ExecutionCheckpointingOptions.ALIGNED_CHECKPOINT_TIMEOUT, alignedCheckpointTimeout);
}
public void setMaxConcurrentCheckpoints(int maxConcurrentCheckpoints) {
config.setInteger(
ExecutionCheckpointingOptions.MAX_CONCURRENT_CHECKPOINTS, maxConcurrentCheckpoints);
}
public int getMaxConcurrentCheckpoints() {
return config.getInteger(
ExecutionCheckpointingOptions.MAX_CONCURRENT_CHECKPOINTS,
ExecutionCheckpointingOptions.MAX_CONCURRENT_CHECKPOINTS.defaultValue());
}
public int getMaxSubtasksPerChannelStateFile() {
return config.get(
ExecutionCheckpointingOptions.UNALIGNED_MAX_SUBTASKS_PER_CHANNEL_STATE_FILE);
}
public void setMaxSubtasksPerChannelStateFile(int maxSubtasksPerChannelStateFile) {
config.set(
ExecutionCheckpointingOptions.UNALIGNED_MAX_SUBTASKS_PER_CHANNEL_STATE_FILE,
maxSubtasksPerChannelStateFile);
}
/**
* Sets the job vertex level non-chained outputs. The given output list must have the same order
* with {@link JobVertex
*/
public void setVertexNonChainedOutputs(List<NonChainedOutput> nonChainedOutputs) {
toBeSerializedConfigObjects.put(VERTEX_NONCHAINED_OUTPUTS, nonChainedOutputs);
}
public List<NonChainedOutput> getVertexNonChainedOutputs(ClassLoader cl) {
try {
List<NonChainedOutput> nonChainedOutputs =
InstantiationUtil.readObjectFromConfig(
this.config, VERTEX_NONCHAINED_OUTPUTS, cl);
return nonChainedOutputs == null ? new ArrayList<>() : nonChainedOutputs;
} catch (Exception e) {
throw new StreamTaskException("Could not instantiate outputs in order.", e);
}
}
public void setTransitiveChainedTaskConfigs(Map<Integer, StreamConfig> chainedTaskConfigs) {
if (chainedTaskConfigs != null) {
chainedTaskConfigs.forEach(
(id, config) -> chainedTaskFutures.put(id, config.getSerializationFuture()));
}
}
public Map<Integer, StreamConfig> getTransitiveChainedTaskConfigs(ClassLoader cl) {
try {
Map<Integer, StreamConfig> confs =
InstantiationUtil.readObjectFromConfig(this.config, CHAINED_TASK_CONFIG, cl);
return confs == null ? new HashMap<Integer, StreamConfig>() : confs;
} catch (Exception e) {
throw new StreamTaskException("Could not instantiate configuration.", e);
}
}
public Map<Integer, StreamConfig> getTransitiveChainedTaskConfigsWithSelf(ClassLoader cl) {
Map<Integer, StreamConfig> chainedTaskConfigs = getTransitiveChainedTaskConfigs(cl);
chainedTaskConfigs.put(getVertexID(), this);
return chainedTaskConfigs;
}
public void setOperatorID(OperatorID operatorID) {
this.config.setBytes(OPERATOR_ID, operatorID.getBytes());
}
public OperatorID getOperatorID() {
byte[] operatorIDBytes = config.getBytes(OPERATOR_ID, null);
return new OperatorID(Preconditions.checkNotNull(operatorIDBytes));
}
public void setOperatorName(String name) {
this.config.setString(OPERATOR_NAME, name);
}
public String getOperatorName() {
return this.config.getString(OPERATOR_NAME, null);
}
public void setChainIndex(int index) {
this.config.setInteger(CHAIN_INDEX, index);
}
public int getChainIndex() {
return this.config.getInteger(CHAIN_INDEX, 0);
}
public void setStateBackend(StateBackend backend) {
if (backend != null) {
toBeSerializedConfigObjects.put(STATE_BACKEND, backend);
setStateBackendUsesManagedMemory(backend.useManagedMemory());
}
}
public void setChangelogStateBackendEnabled(TernaryBoolean enabled) {
toBeSerializedConfigObjects.put(ENABLE_CHANGE_LOG_STATE_BACKEND, enabled);
}
@VisibleForTesting
public void setStateBackendUsesManagedMemory(boolean usesManagedMemory) {
this.config.setBoolean(STATE_BACKEND_USE_MANAGED_MEMORY, usesManagedMemory);
}
public StateBackend getStateBackend(ClassLoader cl) {
try {
return InstantiationUtil.readObjectFromConfig(this.config, STATE_BACKEND, cl);
} catch (Exception e) {
throw new StreamTaskException("Could not instantiate statehandle provider.", e);
}
}
public TernaryBoolean isChangelogStateBackendEnabled(ClassLoader cl) {
try {
return InstantiationUtil.readObjectFromConfig(
this.config, ENABLE_CHANGE_LOG_STATE_BACKEND, cl);
} catch (Exception e) {
throw new StreamTaskException(
"Could not instantiate change log state backend enable flag.", e);
}
}
public void setSavepointDir(Path directory) {
if (directory != null) {
toBeSerializedConfigObjects.put(SAVEPOINT_DIR, directory);
}
}
public Path getSavepointDir(ClassLoader cl) {
try {
return InstantiationUtil.readObjectFromConfig(this.config, SAVEPOINT_DIR, cl);
} catch (Exception e) {
throw new StreamTaskException("Could not instantiate savepoint directory.", e);
}
}
public void setCheckpointStorage(CheckpointStorage storage) {
if (storage != null) {
toBeSerializedConfigObjects.put(CHECKPOINT_STORAGE, storage);
}
}
public CheckpointStorage getCheckpointStorage(ClassLoader cl) {
try {
return InstantiationUtil.readObjectFromConfig(this.config, CHECKPOINT_STORAGE, cl);
} catch (Exception e) {
throw new StreamTaskException("Could not instantiate checkpoint storage.", e);
}
}
public void setTimerServiceProvider(InternalTimeServiceManager.Provider timerServiceProvider) {
if (timerServiceProvider != null) {
toBeSerializedConfigObjects.put(TIMER_SERVICE_PROVIDER, timerServiceProvider);
}
}
public InternalTimeServiceManager.Provider getTimerServiceProvider(ClassLoader cl) {
try {
return InstantiationUtil.readObjectFromConfig(this.config, TIMER_SERVICE_PROVIDER, cl);
} catch (Exception e) {
throw new StreamTaskException("Could not instantiate timer service provider.", e);
}
}
public void setStatePartitioner(int input, KeySelector<?, ?> partitioner) {
toBeSerializedConfigObjects.put(STATE_PARTITIONER + input, partitioner);
}
public <IN, K extends Serializable> KeySelector<IN, K> getStatePartitioner(
int input, ClassLoader cl) {
try {
return InstantiationUtil.readObjectFromConfig(
this.config, STATE_PARTITIONER + input, cl);
} catch (Exception e) {
throw new StreamTaskException("Could not instantiate state partitioner.", e);
}
}
public void setStateKeySerializer(TypeSerializer<?> serializer) {
toBeSerializedConfigObjects.put(STATE_KEY_SERIALIZER, serializer);
}
public <K> TypeSerializer<K> getStateKeySerializer(ClassLoader cl) {
try {
return InstantiationUtil.readObjectFromConfig(this.config, STATE_KEY_SERIALIZER, cl);
} catch (Exception e) {
throw new StreamTaskException(
"Could not instantiate state key serializer from task config.", e);
}
}
public void setChainStart() {
config.setBoolean(IS_CHAINED_VERTEX, true);
}
public boolean isChainStart() {
return config.getBoolean(IS_CHAINED_VERTEX, false);
}
public void setChainEnd() {
config.setBoolean(CHAIN_END, true);
}
public boolean isChainEnd() {
return config.getBoolean(CHAIN_END, false);
}
@Override
public String toString() {
ClassLoader cl = getClass().getClassLoader();
StringBuilder builder = new StringBuilder();
builder.append("\n=======================");
builder.append("Stream Config");
builder.append("=======================");
builder.append("\nNumber of non-chained inputs: ").append(getNumberOfNetworkInputs());
builder.append("\nNumber of non-chained outputs: ").append(getNumberOfOutputs());
builder.append("\nOutput names: ").append(getOperatorNonChainedOutputs(cl));
builder.append("\nPartitioning:");
for (NonChainedOutput output : getOperatorNonChainedOutputs(cl)) {
String outputName = output.getDataSetId().toString();
builder.append("\n\t").append(outputName).append(": ").append(output.getPartitioner());
}
builder.append("\nChained subtasks: ").append(getChainedOutputs(cl));
try {
builder.append("\nOperator: ")
.append(getStreamOperatorFactory(cl).getClass().getSimpleName());
} catch (Exception e) {
builder.append("\nOperator: Missing");
}
builder.append("\nState Monitoring: ").append(isCheckpointingEnabled());
if (isChainStart() && getChainedOutputs(cl).size() > 0) {
builder.append(
"\n\n\n---------------------\nChained task configs\n---------------------\n");
builder.append(getTransitiveChainedTaskConfigs(cl));
}
return builder.toString();
}
public void setGraphContainingLoops(boolean graphContainingLoops) {
config.setBoolean(GRAPH_CONTAINING_LOOPS, graphContainingLoops);
}
public boolean isGraphContainingLoops() {
return config.getBoolean(GRAPH_CONTAINING_LOOPS, false);
}
/**
* Requirements of the different inputs of an operator. Each input can have a different
* requirement. For all {@link
* records of a given key are passed to the operator consecutively before moving on to the next
* group.
*/
public enum InputRequirement {
/**
* Records from all sorted inputs are grouped (sorted) by key and are then fed to the
* operator one group at a time. This "zig-zags" between different inputs if records for the
* same key arrive on multiple inputs to ensure that the operator sees all records with a
* key as one consecutive group.
*/
SORTED,
/**
* Records from {@link
* records from {@link
* within the different {@link
*/
PASS_THROUGH;
}
/** Interface representing chained inputs. */
public interface InputConfig extends Serializable {}
/** A representation of a Network {@link InputConfig}. */
public static class NetworkInputConfig implements InputConfig {
private final TypeSerializer<?> typeSerializer;
private final InputRequirement inputRequirement;
private int inputGateIndex;
public NetworkInputConfig(TypeSerializer<?> typeSerializer, int inputGateIndex) {
this(typeSerializer, inputGateIndex, InputRequirement.PASS_THROUGH);
}
public NetworkInputConfig(
TypeSerializer<?> typeSerializer,
int inputGateIndex,
InputRequirement inputRequirement) {
this.typeSerializer = typeSerializer;
this.inputGateIndex = inputGateIndex;
this.inputRequirement = inputRequirement;
}
public TypeSerializer<?> getTypeSerializer() {
return typeSerializer;
}
public int getInputGateIndex() {
return inputGateIndex;
}
public InputRequirement getInputRequirement() {
return inputRequirement;
}
}
/** A serialized representation of an input. */
public static class SourceInputConfig implements InputConfig {
private final StreamEdge inputEdge;
public SourceInputConfig(StreamEdge inputEdge) {
this.inputEdge = inputEdge;
}
public StreamEdge getInputEdge() {
return inputEdge;
}
@Override
public String toString() {
return inputEdge.toString();
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof SourceInputConfig)) {
return false;
}
SourceInputConfig other = (SourceInputConfig) obj;
return Objects.equals(other.inputEdge, inputEdge);
}
@Override
public int hashCode() {
return inputEdge.hashCode();
}
}
public static boolean requiresSorting(StreamConfig.InputConfig inputConfig) {
return inputConfig instanceof StreamConfig.NetworkInputConfig
&& ((StreamConfig.NetworkInputConfig) inputConfig).getInputRequirement()
== StreamConfig.InputRequirement.SORTED;
}
} |
what is the difference between DUPLICATE_CREATE_TASK and TAKS_EXISTS? | public long createTask(Task task) {
if (!tryLock()) {
return TASK_CREATE_TIMEOUT;
}
try {
if (nameToTaskMap.containsKey(task.getName())) {
return TASK_EXISTS;
}
nameToTaskMap.put(task.getName(), task);
if (manualTaskMap.containsKey(task.getId())) {
return DUPLICATE_CREATE_TASK;
}
manualTaskMap.put(task.getId(), task);
return task.getId();
} finally {
unlock();
}
} | return TASK_EXISTS; | public long createTask(Task task) {
if (!tryLock()) {
return GET_TASK_LOCK_FAILED;
}
try {
if (nameToTaskMap.containsKey(task.getName())) {
return TASK_EXISTS;
}
nameToTaskMap.put(task.getName(), task);
if (manualTaskMap.containsKey(task.getId())) {
return DUPLICATE_CREATE_TASK;
}
manualTaskMap.put(task.getId(), task);
return task.getId();
} finally {
unlock();
}
} | class TaskManager {
private static final Logger LOG = LogManager.getLogger(TaskManager.class);
public static final long TASK_EXISTS = -1L;
public static final long DUPLICATE_CREATE_TASK = -2L;
public static final long TASK_CREATE_TIMEOUT = -3L;
private final Map<Long, Task> manualTaskMap;
private final Map<String, Task> nameToTaskMap;
private final TaskRunManager taskRunManager;
private final ScheduledExecutorService dispatchScheduler = Executors.newScheduledThreadPool(1);
private final QueryableReentrantLock lock;
public TaskManager() {
manualTaskMap = Maps.newConcurrentMap();
nameToTaskMap = Maps.newConcurrentMap();
taskRunManager = new TaskRunManager();
lock = new QueryableReentrantLock(true);
dispatchScheduler.scheduleAtFixedRate(() -> {
if (!tryLock()) {
return;
}
try {
taskRunManager.checkRunningTaskRun();
taskRunManager.scheduledPendingTaskRun();
} catch (Exception ex) {
LOG.warn("failed to dispatch job.", ex);
} finally {
unlock();
}
}, 0, 1, TimeUnit.SECONDS);
}
public String executeTask(String taskName) {
Task task = nameToTaskMap.get(taskName);
if (task == null) {
return null;
}
return taskRunManager.addTaskRun(TaskRunBuilder.newBuilder(task).build());
}
public void dropTask(String taskName) {
Task task = nameToTaskMap.get(taskName);
if (task == null) {
return;
}
nameToTaskMap.remove(taskName);
manualTaskMap.remove(task.getId());
}
public List<Task> showTask() {
List<Task> taskList = Lists.newArrayList();
taskList.addAll(manualTaskMap.values());
return taskList;
}
private boolean tryLock() {
try {
if (!lock.tryLock(1, TimeUnit.SECONDS)) {
Thread owner = lock.getOwner();
if (owner != null) {
LOG.warn("materialized view lock is held by: {}", Util.dumpThread(owner, 50));
} else {
LOG.warn("materialized view lock owner is null");
}
return false;
}
return true;
} catch (InterruptedException e) {
LOG.warn("got exception while getting materialized view lock", e);
}
return lock.isHeldByCurrentThread();
}
private void unlock() {
this.lock.unlock();
}
public void replayCreateTask(Task task) {
createTask(task);
}
public void replayDropTask(String taskName) {
dropTask(taskName);
}
public TaskRunManager getTaskRunManager() {
return taskRunManager;
}
public ShowResultSet handleSubmitTaskStmt(SubmitTaskStmt submitTaskStmt) throws DdlException {
Task task = TaskBuilder.buildTask(submitTaskStmt, ConnectContext.get());
Long createResult = this.createTask(task);
String taskName = task.getName();
if (createResult < 0) {
if (createResult == TASK_EXISTS) {
throw new DdlException("Task " + taskName + " already exist.");
}
throw new DdlException("Failed to create Task: " + taskName + ", ErrorCode: " + createResult);
}
String queryId = this.executeTask(taskName);
ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder();
builder.addColumn(new Column("TaskName", ScalarType.createVarchar(40)));
builder.addColumn(new Column("Status", ScalarType.createVarchar(10)));
List<String> item = ImmutableList.of(taskName, "Submitted");
List<List<String>> result = ImmutableList.of(item);
return new ShowResultSet(builder.build(), result);
}
} | class TaskManager {
private static final Logger LOG = LogManager.getLogger(TaskManager.class);
public static final long TASK_EXISTS = -1L;
public static final long DUPLICATE_CREATE_TASK = -2L;
public static final long GET_TASK_LOCK_FAILED = -3L;
private final Map<Long, Task> manualTaskMap;
private final Map<String, Task> nameToTaskMap;
private final TaskRunManager taskRunManager;
private final ScheduledExecutorService dispatchScheduler = Executors.newScheduledThreadPool(1);
private final QueryableReentrantLock lock;
public TaskManager() {
manualTaskMap = Maps.newConcurrentMap();
nameToTaskMap = Maps.newConcurrentMap();
taskRunManager = new TaskRunManager();
lock = new QueryableReentrantLock(true);
dispatchScheduler.scheduleAtFixedRate(() -> {
if (!tryLock()) {
return;
}
try {
taskRunManager.checkRunningTaskRun();
taskRunManager.scheduledPendingTaskRun();
} catch (Exception ex) {
LOG.warn("failed to dispatch job.", ex);
} finally {
unlock();
}
}, 0, 1, TimeUnit.SECONDS);
}
public String executeTask(String taskName) {
Task task = nameToTaskMap.get(taskName);
if (task == null) {
return null;
}
return taskRunManager.addTaskRun(TaskRunBuilder.newBuilder(task).build());
}
public void dropTask(String taskName) {
Task task = nameToTaskMap.get(taskName);
if (task == null) {
return;
}
nameToTaskMap.remove(taskName);
manualTaskMap.remove(task.getId());
}
public List<Task> showTask() {
List<Task> taskList = Lists.newArrayList();
taskList.addAll(manualTaskMap.values());
return taskList;
}
private boolean tryLock() {
try {
if (!lock.tryLock(1, TimeUnit.SECONDS)) {
Thread owner = lock.getOwner();
if (owner != null) {
LOG.warn("task lock is held by: {}", Util.dumpThread(owner, 50));
} else {
LOG.warn("task lock owner is null");
}
return false;
}
return true;
} catch (InterruptedException e) {
LOG.warn("got exception while getting task lock", e);
}
return lock.isHeldByCurrentThread();
}
private void unlock() {
this.lock.unlock();
}
public void replayCreateTask(Task task) {
createTask(task);
}
public void replayDropTask(String taskName) {
dropTask(taskName);
}
public TaskRunManager getTaskRunManager() {
return taskRunManager;
}
public ShowResultSet handleSubmitTaskStmt(SubmitTaskStmt submitTaskStmt) throws DdlException {
Task task = TaskBuilder.buildTask(submitTaskStmt, ConnectContext.get());
Long createResult = this.createTask(task);
String taskName = task.getName();
if (createResult < 0) {
if (createResult == TASK_EXISTS) {
throw new DdlException("Task " + taskName + " already exist.");
}
throw new DdlException("Failed to create Task: " + taskName + ", ErrorCode: " + createResult);
}
String queryId = this.executeTask(taskName);
ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder();
builder.addColumn(new Column("TaskName", ScalarType.createVarchar(40)));
builder.addColumn(new Column("Status", ScalarType.createVarchar(10)));
List<String> item = ImmutableList.of(taskName, "Submitted");
List<List<String>> result = ImmutableList.of(item);
return new ShowResultSet(builder.build(), result);
}
} |
I agree with you - your solution was more elegant. But we try to avoid lambdas for efficiency reasons. Of course we're not extreme about it: lambdas are a fine solution when they are a good solution, but I still tend to prefer avoiding them in such situations. | protected ConnectionProvider selectConnectionProvider(final String tenantIdentifier) {
LOG.debugv("selectConnectionProvider({0})", tenantIdentifier);
ConnectionProvider provider = providerMap.get(tenantIdentifier);
if (provider == null) {
final ConnectionProvider connectionProvider = resolveConnectionProvider(tenantIdentifier);
providerMap.put(tenantIdentifier, connectionProvider);
return connectionProvider;
}
return provider;
} | final ConnectionProvider connectionProvider = resolveConnectionProvider(tenantIdentifier); | protected ConnectionProvider selectConnectionProvider(final String tenantIdentifier) {
LOG.debugv("selectConnectionProvider({0})", tenantIdentifier);
ConnectionProvider provider = providerMap.get(tenantIdentifier);
if (provider == null) {
final ConnectionProvider connectionProvider = resolveConnectionProvider(tenantIdentifier);
providerMap.put(tenantIdentifier, connectionProvider);
return connectionProvider;
}
return provider;
} | class HibernateMultiTenantConnectionProvider extends AbstractMultiTenantConnectionProvider {
private static final Logger LOG = Logger.getLogger(HibernateMultiTenantConnectionProvider.class);
private final Map<String, ConnectionProvider> providerMap = new ConcurrentHashMap<>();
@Override
protected ConnectionProvider getAnyConnectionProvider() {
String tenantId = tenantResolver().getDefaultTenantId();
if (tenantId == null) {
throw new IllegalStateException("Method 'TenantResolver.getDefaultTenantId()' returned a null value. "
+ "This violates the contract of the interface!");
}
return selectConnectionProvider(tenantId);
}
@Override
private static ConnectionProvider resolveConnectionProvider(String tenantIdentifier) {
LOG.debugv("resolveConnectionProvider({0})", tenantIdentifier);
InstanceHandle<TenantConnectionResolver> instance = Arc.container().instance(TenantConnectionResolver.class);
if (!instance.isAvailable()) {
throw new IllegalStateException(
"No instance of " + TenantConnectionResolver.class.getSimpleName() + " was found. "
+ "You need to create an implementation for this interface to allow resolving the current tenant connection.");
}
TenantConnectionResolver resolver = instance.get();
ConnectionProvider cp = resolver.resolve(tenantIdentifier);
if (cp == null) {
throw new IllegalStateException("Method 'TenantConnectionResolver."
+ "resolve(String)' returned a null value. This violates the contract of the interface!");
}
return cp;
}
/**
* Retrieves the tenant resolver or fails if it is not available.
*
* @return Current tenant resolver.
*/
private static TenantResolver tenantResolver() {
InstanceHandle<TenantResolver> resolverInstance = Arc.container().instance(TenantResolver.class);
if (!resolverInstance.isAvailable()) {
throw new IllegalStateException("No instance of " + TenantResolver.class.getName() + " was found. "
+ "You need to create an implementation for this interface to allow resolving the current tenant identifier.");
}
return resolverInstance.get();
}
} | class HibernateMultiTenantConnectionProvider extends AbstractMultiTenantConnectionProvider {
private static final Logger LOG = Logger.getLogger(HibernateMultiTenantConnectionProvider.class);
private final Map<String, ConnectionProvider> providerMap = new ConcurrentHashMap<>();
@Override
protected ConnectionProvider getAnyConnectionProvider() {
String tenantId = tenantResolver().getDefaultTenantId();
if (tenantId == null) {
throw new IllegalStateException("Method 'TenantResolver.getDefaultTenantId()' returned a null value. "
+ "This violates the contract of the interface!");
}
return selectConnectionProvider(tenantId);
}
@Override
private static ConnectionProvider resolveConnectionProvider(String tenantIdentifier) {
LOG.debugv("resolveConnectionProvider({0})", tenantIdentifier);
InstanceHandle<TenantConnectionResolver> instance = Arc.container().instance(TenantConnectionResolver.class);
if (!instance.isAvailable()) {
throw new IllegalStateException(
"No instance of " + TenantConnectionResolver.class.getSimpleName() + " was found. "
+ "You need to create an implementation for this interface to allow resolving the current tenant connection.");
}
TenantConnectionResolver resolver = instance.get();
ConnectionProvider cp = resolver.resolve(tenantIdentifier);
if (cp == null) {
throw new IllegalStateException("Method 'TenantConnectionResolver."
+ "resolve(String)' returned a null value. This violates the contract of the interface!");
}
return cp;
}
/**
* Retrieves the tenant resolver or fails if it is not available.
*
* @return Current tenant resolver.
*/
private static TenantResolver tenantResolver() {
InstanceHandle<TenantResolver> resolverInstance = Arc.container().instance(TenantResolver.class);
if (!resolverInstance.isAvailable()) {
throw new IllegalStateException("No instance of " + TenantResolver.class.getName() + " was found. "
+ "You need to create an implementation for this interface to allow resolving the current tenant identifier.");
}
return resolverInstance.get();
}
} |
Hi, @terrymanu, I've addressed your concern. Please help to take another look. Thanks | void assertUnmarshalYamlAgentConfiguration() {
InputStream inputStream = getClass().getResourceAsStream("/conf/agent.yaml");
YamlAgentConfiguration actual = AgentYamlEngine.unmarshalYamlAgentConfiguration(inputStream);
assertNotNull(actual);
} | assertNotNull(actual); | void assertUnmarshalYamlAgentConfiguration() throws IOException {
try (InputStream inputStream = Files.newInputStream(new File(getResourceURL(), "/conf/agent.yaml").toPath())) {
YamlAgentConfiguration yamlAgentConfig = AgentYamlEngine.unmarshalYamlAgentConfiguration(inputStream);
Map<String, PluginConfiguration> actual = YamlPluginsConfigurationSwapper.swap(yamlAgentConfig);
assertThat(actual.size(), is(3));
assertLogFixturePluginConfiguration(actual.get("log_fixture"));
assertMetricsPluginConfiguration(actual.get("metrics_fixture"));
assertTracingPluginConfiguration(actual.get("tracing_fixture"));
}
} | class AgentYamlEngineTest {
@Test
@Test
void assertUnmarshalYamlAdvisorsConfiguration() {
InputStream inputStream = getClass().getResourceAsStream("/META-INF/conf/advisors.yaml");
YamlAdvisorsConfiguration actual = AgentYamlEngine.unmarshalYamlAdvisorsConfiguration(inputStream);
assertNotNull(actual);
}
} | class AgentYamlEngineTest {
@Test
@Test
void assertUnmarshalYamlAdvisorsConfiguration() {
InputStream inputStream = getClass().getResourceAsStream("/META-INF/conf/advisors.yaml");
YamlAdvisorsConfiguration actual = AgentYamlEngine.unmarshalYamlAdvisorsConfiguration(inputStream);
assertYamlAdvisorConfiguration(actual.getAdvisors().iterator().next());
}
private String getResourceURL() throws UnsupportedEncodingException {
return URLDecoder.decode(Objects.requireNonNull(Thread.currentThread().getContextClassLoader().getResource("")).getFile(), "UTF8");
}
private void assertLogFixturePluginConfiguration(final PluginConfiguration actual) {
assertNull(actual.getHost());
assertNull(actual.getPassword());
assertThat(actual.getPort(), is(8080));
assertThat(actual.getProps().size(), is(1));
assertThat(actual.getProps().get("key"), is("value"));
}
private void assertMetricsPluginConfiguration(final PluginConfiguration actual) {
assertThat(actual.getHost(), is("localhost"));
assertThat(actual.getPassword(), is("random"));
assertThat(actual.getPort(), is(8081));
assertThat(actual.getProps().size(), is(1));
assertThat(actual.getProps().get("key"), is("value"));
}
private void assertTracingPluginConfiguration(final PluginConfiguration actual) {
assertThat(actual.getHost(), is("localhost"));
assertThat(actual.getPassword(), is("random"));
assertThat(actual.getPort(), is(8082));
assertThat(actual.getProps().size(), is(1));
assertThat(actual.getProps().get("key"), is("value"));
}
private void assertYamlAdvisorConfiguration(final YamlAdvisorConfiguration actual) {
assertThat(actual.getTarget(), is(YamlTargetObjectFixture.class.getName()));
assertThat(actual.getAdvice(), is(YamlAdviceFixture.class.getName()));
assertThat(actual.getTarget(), is("org.apache.shardingsphere.agent.core.advisor.config.yaml.fixture.YamlTargetObjectFixture"));
assertThat(actual.getAdvice(), is("org.apache.shardingsphere.agent.core.advisor.config.yaml.fixture.YamlAdviceFixture"));
assertThat(actual.getPointcuts().size(), is(8));
List<YamlPointcutConfiguration> actualYamlPointcutConfigs = new ArrayList<>(actual.getPointcuts());
assertYamlPointcutConfiguration(actualYamlPointcutConfigs.get(0), null, "constructor", Collections.emptyList());
assertYamlPointcutConfiguration(actualYamlPointcutConfigs.get(1), null, "constructor", Collections.singletonList(createYamlPointcutParameterConfiguration(0, "java.lang.String")));
assertYamlPointcutConfiguration(actualYamlPointcutConfigs.get(2), "call", "method", Collections.emptyList());
assertYamlPointcutConfiguration(actualYamlPointcutConfigs.get(3), "call", "method", Collections.singletonList(createYamlPointcutParameterConfiguration(0, "java.lang.String")));
assertYamlPointcutConfiguration(actualYamlPointcutConfigs.get(4), "call", "method",
Arrays.asList(createYamlPointcutParameterConfiguration(0, "java.lang.String"), createYamlPointcutParameterConfiguration(1, "java.lang.String")));
assertYamlPointcutConfiguration(actualYamlPointcutConfigs.get(5), "staticCall", "method", Collections.emptyList());
assertYamlPointcutConfiguration(actualYamlPointcutConfigs.get(6), "staticCall", "method", Collections.singletonList(createYamlPointcutParameterConfiguration(0, "java.lang.String")));
assertYamlPointcutConfiguration(actualYamlPointcutConfigs.get(7), "staticCall", "method",
Arrays.asList(createYamlPointcutParameterConfiguration(0, "java.lang.String"), createYamlPointcutParameterConfiguration(1, "java.lang.String")));
}
private void assertYamlPointcutConfiguration(final YamlPointcutConfiguration actual,
final String expectedName, final String expectedType, final List<YamlPointcutParameterConfiguration> expectedParams) {
assertThat(actual.getName(), is(expectedName));
assertThat(actual.getType(), is(expectedType));
assertThat(actual.getParams().size(), is(expectedParams.size()));
int count = 0;
for (YamlPointcutParameterConfiguration each : actual.getParams()) {
assertYamlPointcutParameterConfiguration(each, expectedParams.get(count));
count++;
}
}
private void assertYamlPointcutParameterConfiguration(final YamlPointcutParameterConfiguration actual, final YamlPointcutParameterConfiguration expected) {
assertThat(actual.getIndex(), is(expected.getIndex()));
assertThat(actual.getType(), is(expected.getType()));
}
private YamlPointcutParameterConfiguration createYamlPointcutParameterConfiguration(final int index, final String type) {
YamlPointcutParameterConfiguration result = new YamlPointcutParameterConfiguration();
result.setIndex(index);
result.setType(type);
return result;
}
} |
or even pass in maxSize to the serialization logic and only serialize up to that point. | public void onReceive(Object message) {
try {
if (message instanceof AddMetric) {
AddMetric added = (AddMetric) message;
String metricName = added.metricName;
Metric metric = added.metric;
AbstractMetricGroup group = added.group;
QueryScopeInfo info = group.getQueryServiceMetricInfo(FILTER);
if (metric instanceof Counter) {
counters.put((Counter) metric, new Tuple2<>(info, FILTER.filterCharacters(metricName)));
} else if (metric instanceof Gauge) {
gauges.put((Gauge<?>) metric, new Tuple2<>(info, FILTER.filterCharacters(metricName)));
} else if (metric instanceof Histogram) {
histograms.put((Histogram) metric, new Tuple2<>(info, FILTER.filterCharacters(metricName)));
} else if (metric instanceof Meter) {
meters.put((Meter) metric, new Tuple2<>(info, FILTER.filterCharacters(metricName)));
}
} else if (message instanceof RemoveMetric) {
Metric metric = (((RemoveMetric) message).metric);
if (metric instanceof Counter) {
this.counters.remove(metric);
} else if (metric instanceof Gauge) {
this.gauges.remove(metric);
} else if (metric instanceof Histogram) {
this.histograms.remove(metric);
} else if (metric instanceof Meter) {
this.meters.remove(metric);
}
} else if (message instanceof CreateDump) {
MetricDumpSerialization.MetricSerializationResult dump = serializer.serialize(counters, gauges, histograms, meters);
int realMsgSize = dump.serializedMetrics.length;
if (realMsgSize > maximumFramesize) {
String overSizeErrorMsg = "The metric dump message size : " + realMsgSize
+ " exceeds the maximum akka framesize : " + maximumFramesize + ".";
LOG.error(overSizeErrorMsg);
getSender().tell(new Status.Failure(new IOException(overSizeErrorMsg)), getSelf());
} else {
getSender().tell(dump, getSelf());
}
} else {
LOG.warn("MetricQueryServiceActor received an invalid message. " + message.toString());
getSender().tell(new Status.Failure(new IOException("MetricQueryServiceActor received an invalid message. " + message.toString())), getSelf());
}
} catch (Exception e) {
LOG.warn("An exception occurred while processing a message.", e);
}
} | getSender().tell(new Status.Failure(new IOException(overSizeErrorMsg)), getSelf()); | public void onReceive(Object message) {
try {
if (message instanceof AddMetric) {
AddMetric added = (AddMetric) message;
String metricName = added.metricName;
Metric metric = added.metric;
AbstractMetricGroup group = added.group;
QueryScopeInfo info = group.getQueryServiceMetricInfo(FILTER);
if (metric instanceof Counter) {
counters.put((Counter) metric, new Tuple2<>(info, FILTER.filterCharacters(metricName)));
} else if (metric instanceof Gauge) {
gauges.put((Gauge<?>) metric, new Tuple2<>(info, FILTER.filterCharacters(metricName)));
} else if (metric instanceof Histogram) {
histograms.put((Histogram) metric, new Tuple2<>(info, FILTER.filterCharacters(metricName)));
} else if (metric instanceof Meter) {
meters.put((Meter) metric, new Tuple2<>(info, FILTER.filterCharacters(metricName)));
}
} else if (message instanceof RemoveMetric) {
Metric metric = (((RemoveMetric) message).metric);
if (metric instanceof Counter) {
this.counters.remove(metric);
} else if (metric instanceof Gauge) {
this.gauges.remove(metric);
} else if (metric instanceof Histogram) {
this.histograms.remove(metric);
} else if (metric instanceof Meter) {
this.meters.remove(metric);
}
} else if (message instanceof CreateDump) {
MetricDumpSerialization.MetricSerializationResult dump = serializer.serialize(counters, gauges, histograms, meters);
dump = enforceSizeLimit(dump);
getSender().tell(dump, getSelf());
} else {
LOG.warn("MetricQueryServiceActor received an invalid message. " + message.toString());
getSender().tell(new Status.Failure(new IOException("MetricQueryServiceActor received an invalid message. " + message.toString())), getSelf());
}
} catch (Exception e) {
LOG.warn("An exception occurred while processing a message.", e);
}
} | class MetricQueryService extends UntypedActor {
private static final Logger LOG = LoggerFactory.getLogger(MetricQueryService.class);
public static final String METRIC_QUERY_SERVICE_NAME = "MetricQueryService";
public static final String MAXIMUM_FRAME_SIZE_PATH = "akka.remote.netty.tcp.maximum-frame-size";
private static final CharacterFilter FILTER = new CharacterFilter() {
@Override
public String filterCharacters(String input) {
return replaceInvalidChars(input);
}
};
private final MetricDumpSerializer serializer = new MetricDumpSerializer();
private final Map<Gauge<?>, Tuple2<QueryScopeInfo, String>> gauges = new HashMap<>();
private final Map<Counter, Tuple2<QueryScopeInfo, String>> counters = new HashMap<>();
private final Map<Histogram, Tuple2<QueryScopeInfo, String>> histograms = new HashMap<>();
private final Map<Meter, Tuple2<QueryScopeInfo, String>> meters = new HashMap<>();
private long maximumFramesize;
@Override
public void preStart() throws Exception {
if (getContext().system().settings().config().hasPath(MAXIMUM_FRAME_SIZE_PATH)) {
maximumFramesize = getContext().system().settings().config().getBytes(MAXIMUM_FRAME_SIZE_PATH);
} else {
maximumFramesize = Long.MAX_VALUE;
}
}
@Override
public void postStop() {
serializer.close();
}
@Override
/**
* Lightweight method to replace unsupported characters.
* If the string does not contain any unsupported characters, this method creates no
* new string (and in fact no new objects at all).
*
* <p>Replacements:
*
* <ul>
* <li>{@code space : . ,} are replaced by {@code _} (underscore)</li>
* </ul>
*/
static String replaceInvalidChars(String str) {
char[] chars = null;
final int strLen = str.length();
int pos = 0;
for (int i = 0; i < strLen; i++) {
final char c = str.charAt(i);
switch (c) {
case ' ':
case '.':
case ':':
case ',':
if (chars == null) {
chars = str.toCharArray();
}
chars[pos++] = '_';
break;
default:
if (chars != null) {
chars[pos] = c;
}
pos++;
}
}
return chars == null ? str : new String(chars, 0, pos);
}
/**
* Starts the MetricQueryService actor in the given actor system.
*
* @param actorSystem The actor system running the MetricQueryService
* @param resourceID resource ID to disambiguate the actor name
* @return actor reference to the MetricQueryService
*/
public static ActorRef startMetricQueryService(ActorSystem actorSystem, ResourceID resourceID) {
String actorName = resourceID == null
? METRIC_QUERY_SERVICE_NAME
: METRIC_QUERY_SERVICE_NAME + "_" + resourceID.getResourceIdString();
return actorSystem.actorOf(Props.create(MetricQueryService.class), actorName);
}
/**
* Utility method to notify a MetricQueryService of an added metric.
*
* @param service MetricQueryService to notify
* @param metric added metric
* @param metricName metric name
* @param group group the metric was added on
*/
public static void notifyOfAddedMetric(ActorRef service, Metric metric, String metricName, AbstractMetricGroup group) {
service.tell(new AddMetric(metricName, metric, group), null);
}
/**
* Utility method to notify a MetricQueryService of a removed metric.
*
* @param service MetricQueryService to notify
* @param metric removed metric
*/
public static void notifyOfRemovedMetric(ActorRef service, Metric metric) {
service.tell(new RemoveMetric(metric), null);
}
private static class AddMetric {
private final String metricName;
private final Metric metric;
private final AbstractMetricGroup group;
private AddMetric(String metricName, Metric metric, AbstractMetricGroup group) {
this.metricName = metricName;
this.metric = metric;
this.group = group;
}
}
private static class RemoveMetric {
private final Metric metric;
private RemoveMetric(Metric metric) {
this.metric = metric;
}
}
public static Object getCreateDump() {
return CreateDump.INSTANCE;
}
private static class CreateDump implements Serializable {
private static final CreateDump INSTANCE = new CreateDump();
}
} | class MetricQueryService extends UntypedActor {
private static final Logger LOG = LoggerFactory.getLogger(MetricQueryService.class);
public static final String METRIC_QUERY_SERVICE_NAME = "MetricQueryService";
private static final String SIZE_EXCEEDED_LOG_TEMPLATE = "{} will not be reported as the metric dump would exceed the maximum size of {} bytes.";
private static final CharacterFilter FILTER = new CharacterFilter() {
@Override
public String filterCharacters(String input) {
return replaceInvalidChars(input);
}
};
private final MetricDumpSerializer serializer = new MetricDumpSerializer();
private final Map<Gauge<?>, Tuple2<QueryScopeInfo, String>> gauges = new HashMap<>();
private final Map<Counter, Tuple2<QueryScopeInfo, String>> counters = new HashMap<>();
private final Map<Histogram, Tuple2<QueryScopeInfo, String>> histograms = new HashMap<>();
private final Map<Meter, Tuple2<QueryScopeInfo, String>> meters = new HashMap<>();
private final long messageSizeLimit;
public MetricQueryService(long messageSizeLimit) {
this.messageSizeLimit = messageSizeLimit;
}
@Override
public void postStop() {
serializer.close();
}
@Override
private MetricDumpSerialization.MetricSerializationResult enforceSizeLimit(
MetricDumpSerialization.MetricSerializationResult serializationResult) {
int currentLength = 0;
boolean hasExceededBefore = false;
byte[] serializedCounters = serializationResult.serializedCounters;
int numCounters = serializationResult.numCounters;
if (exceedsMessageSizeLimit(currentLength + serializationResult.serializedCounters.length)) {
logDumpSizeWouldExceedLimit("Counters", hasExceededBefore);
hasExceededBefore = true;
serializedCounters = new byte[0];
numCounters = 0;
} else {
currentLength += serializedCounters.length;
}
byte[] serializedMeters = serializationResult.serializedMeters;
int numMeters = serializationResult.numMeters;
if (exceedsMessageSizeLimit(currentLength + serializationResult.serializedMeters.length)) {
logDumpSizeWouldExceedLimit("Meters", hasExceededBefore);
hasExceededBefore = true;
serializedMeters = new byte[0];
numMeters = 0;
} else {
currentLength += serializedMeters.length;
}
byte[] serializedGauges = serializationResult.serializedGauges;
int numGauges = serializationResult.numGauges;
if (exceedsMessageSizeLimit(currentLength + serializationResult.serializedGauges.length)) {
logDumpSizeWouldExceedLimit("Gauges", hasExceededBefore);
hasExceededBefore = true;
serializedGauges = new byte[0];
numGauges = 0;
} else {
currentLength += serializedGauges.length;
}
byte[] serializedHistograms = serializationResult.serializedHistograms;
int numHistograms = serializationResult.numHistograms;
if (exceedsMessageSizeLimit(currentLength + serializationResult.serializedHistograms.length)) {
logDumpSizeWouldExceedLimit("Histograms", hasExceededBefore);
hasExceededBefore = true;
serializedHistograms = new byte[0];
numHistograms = 0;
}
return new MetricDumpSerialization.MetricSerializationResult(
serializedCounters,
serializedGauges,
serializedMeters,
serializedHistograms,
numCounters,
numGauges,
numMeters,
numHistograms);
}
private boolean exceedsMessageSizeLimit(final int currentSize) {
return currentSize > messageSizeLimit;
}
private void logDumpSizeWouldExceedLimit(final String metricType, boolean hasExceededBefore) {
if (LOG.isDebugEnabled()) {
LOG.debug(SIZE_EXCEEDED_LOG_TEMPLATE, metricType, messageSizeLimit);
} else {
if (!hasExceededBefore) {
LOG.info(SIZE_EXCEEDED_LOG_TEMPLATE, "Some metrics", messageSizeLimit);
}
}
}
/**
* Lightweight method to replace unsupported characters.
* If the string does not contain any unsupported characters, this method creates no
* new string (and in fact no new objects at all).
*
* <p>Replacements:
*
* <ul>
* <li>{@code space : . ,} are replaced by {@code _} (underscore)</li>
* </ul>
*/
static String replaceInvalidChars(String str) {
char[] chars = null;
final int strLen = str.length();
int pos = 0;
for (int i = 0; i < strLen; i++) {
final char c = str.charAt(i);
switch (c) {
case ' ':
case '.':
case ':':
case ',':
if (chars == null) {
chars = str.toCharArray();
}
chars[pos++] = '_';
break;
default:
if (chars != null) {
chars[pos] = c;
}
pos++;
}
}
return chars == null ? str : new String(chars, 0, pos);
}
/**
* Starts the MetricQueryService actor in the given actor system.
*
* @param actorSystem The actor system running the MetricQueryService
* @param resourceID resource ID to disambiguate the actor name
* @return actor reference to the MetricQueryService
*/
public static ActorRef startMetricQueryService(
ActorSystem actorSystem,
ResourceID resourceID,
long maximumFramesize) {
String actorName = resourceID == null
? METRIC_QUERY_SERVICE_NAME
: METRIC_QUERY_SERVICE_NAME + "_" + resourceID.getResourceIdString();
return actorSystem.actorOf(Props.create(MetricQueryService.class, maximumFramesize), actorName);
}
/**
* Utility method to notify a MetricQueryService of an added metric.
*
* @param service MetricQueryService to notify
* @param metric added metric
* @param metricName metric name
* @param group group the metric was added on
*/
public static void notifyOfAddedMetric(ActorRef service, Metric metric, String metricName, AbstractMetricGroup group) {
service.tell(new AddMetric(metricName, metric, group), null);
}
/**
* Utility method to notify a MetricQueryService of a removed metric.
*
* @param service MetricQueryService to notify
* @param metric removed metric
*/
public static void notifyOfRemovedMetric(ActorRef service, Metric metric) {
service.tell(new RemoveMetric(metric), null);
}
private static class AddMetric {
private final String metricName;
private final Metric metric;
private final AbstractMetricGroup group;
private AddMetric(String metricName, Metric metric, AbstractMetricGroup group) {
this.metricName = metricName;
this.metric = metric;
this.group = group;
}
}
private static class RemoveMetric {
private final Metric metric;
private RemoveMetric(Metric metric) {
this.metric = metric;
}
}
public static Object getCreateDump() {
return CreateDump.INSTANCE;
}
private static class CreateDump implements Serializable {
private static final CreateDump INSTANCE = new CreateDump();
}
} |
This can be moved up to initialization and synchronized won't be needed then. | public void testWatermarkEmission() throws Exception {
final int numElements = 500;
PipelineOptions options = PipelineOptionsFactory.create();
TestCountingSource source = new TestCountingSource(numElements);
UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark> flinkWrapper =
new UnboundedSourceWrapper<>("stepName", options, source, numSplits);
assertEquals(numSplits, flinkWrapper.getSplitSources().size());
final StreamSource<
WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>,
UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark>>
sourceOperator = new StreamSource<>(flinkWrapper);
final AbstractStreamOperatorTestHarness<
WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
testHarness =
new AbstractStreamOperatorTestHarness<>(
sourceOperator,
numTasks /* max parallelism */,
numTasks /* parallelism */,
0 /* subtask index */);
testHarness.getExecutionConfig().setLatencyTrackingInterval(0);
testHarness.getExecutionConfig().setAutoWatermarkInterval(1);
testHarness.setProcessingTime(Long.MIN_VALUE);
testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime);
final ConcurrentLinkedQueue<Object> caughtExceptions = new ConcurrentLinkedQueue<>();
final CountDownLatch seenWatermarks = new CountDownLatch(2);
final int minElementsPerReader = numElements / numSplits;
final CountDownLatch minElementsCountdown = new CountDownLatch(minElementsPerReader);
source.haltEmission();
testHarness.open();
Thread sourceThread =
new Thread(
() -> {
try {
sourceOperator.run(
testHarness.getCheckpointLock(),
new TestStreamStatusMaintainer(),
new Output<
StreamRecord<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>>() {
@Override
public void emitWatermark(Watermark watermark) {
seenWatermarks.countDown();
}
@Override
public <X> void collect(
OutputTag<X> outputTag, StreamRecord<X> streamRecord) {}
@Override
public void emitLatencyMarker(LatencyMarker latencyMarker) {}
@Override
public void collect(
StreamRecord<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
windowedValueStreamRecord) {
minElementsCountdown.countDown();
}
@Override
public void close() {}
});
} catch (Exception e) {
LOG.info("Caught exception:", e);
caughtExceptions.add(e);
}
});
sourceThread.start();
while (flinkWrapper
.getLocalReaders()
.stream()
.anyMatch(reader -> reader.getWatermark().getMillis() == 0)) {
Thread.sleep(50);
}
synchronized (testHarness.getCheckpointLock()) {
testHarness.setProcessingTime(0);
}
source.continueEmission();
minElementsCountdown.await();
synchronized (testHarness.getCheckpointLock()) {
testHarness.setProcessingTime(Long.MAX_VALUE);
}
seenWatermarks.await();
if (!caughtExceptions.isEmpty()) {
fail("Caught exception(s): " + Joiner.on(",").join(caughtExceptions));
}
sourceOperator.cancel();
sourceThread.join();
} | synchronized (testHarness.getCheckpointLock()) { | public void testWatermarkEmission() throws Exception {
final int numElements = 500;
PipelineOptions options = PipelineOptionsFactory.create();
TestCountingSource source = new TestCountingSource(numElements);
UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark> flinkWrapper =
new UnboundedSourceWrapper<>("stepName", options, source, numSplits);
assertEquals(numSplits, flinkWrapper.getSplitSources().size());
final StreamSource<
WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>,
UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark>>
sourceOperator = new StreamSource<>(flinkWrapper);
final AbstractStreamOperatorTestHarness<
WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
testHarness =
new AbstractStreamOperatorTestHarness<>(
sourceOperator,
numTasks /* max parallelism */,
numTasks /* parallelism */,
0 /* subtask index */);
testHarness.getExecutionConfig().setLatencyTrackingInterval(0);
testHarness.getExecutionConfig().setAutoWatermarkInterval(1);
testHarness.setProcessingTime(Long.MIN_VALUE);
testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime);
final ConcurrentLinkedQueue<Object> caughtExceptions = new ConcurrentLinkedQueue<>();
final CountDownLatch seenWatermarks = new CountDownLatch(2);
final int minElementsPerReader = numElements / numSplits;
final CountDownLatch minElementsCountdown = new CountDownLatch(minElementsPerReader);
source.haltEmission();
testHarness.open();
Thread sourceThread =
new Thread(
() -> {
try {
sourceOperator.run(
testHarness.getCheckpointLock(),
new TestStreamStatusMaintainer(),
new Output<
StreamRecord<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>>() {
@Override
public void emitWatermark(Watermark watermark) {
seenWatermarks.countDown();
}
@Override
public <X> void collect(
OutputTag<X> outputTag, StreamRecord<X> streamRecord) {}
@Override
public void emitLatencyMarker(LatencyMarker latencyMarker) {}
@Override
public void collect(
StreamRecord<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
windowedValueStreamRecord) {
minElementsCountdown.countDown();
}
@Override
public void close() {}
});
} catch (Exception e) {
LOG.info("Caught exception:", e);
caughtExceptions.add(e);
}
});
sourceThread.start();
while (flinkWrapper
.getLocalReaders()
.stream()
.anyMatch(reader -> reader.getWatermark().getMillis() == 0)) {
Thread.sleep(50);
}
synchronized (testHarness.getCheckpointLock()) {
testHarness.setProcessingTime(0);
}
source.continueEmission();
minElementsCountdown.await();
synchronized (testHarness.getCheckpointLock()) {
testHarness.setProcessingTime(Long.MAX_VALUE);
}
seenWatermarks.await();
if (!caughtExceptions.isEmpty()) {
fail("Caught exception(s): " + Joiner.on(",").join(caughtExceptions));
}
sourceOperator.cancel();
sourceThread.join();
} | class ParameterizedUnboundedSourceWrapperTest {
private final int numTasks;
private final int numSplits;
public ParameterizedUnboundedSourceWrapperTest(int numTasks, int numSplits) {
this.numTasks = numTasks;
this.numSplits = numSplits;
}
@Parameterized.Parameters(name = "numTasks = {0}; numSplits={1}")
public static Collection<Object[]> data() {
/*
* Parameters for initializing the tests:
* {numTasks, numSplits}
* The test currently assumes powers of two for some assertions.
*/
return Arrays.asList(
new Object[][] {
{1, 1}, {1, 2}, {1, 4},
{2, 1}, {2, 2}, {2, 4},
{4, 1}, {4, 2}, {4, 4}
});
}
/**
* Creates a {@link UnboundedSourceWrapper} that has one or multiple readers per source. If
* numSplits > numTasks the source has one source will manage multiple readers.
*/
@Test(timeout = 30_000)
public void testValueEmission() throws Exception {
final int numElementsPerShard = 20;
FlinkPipelineOptions options = PipelineOptionsFactory.as(FlinkPipelineOptions.class);
options.setShutdownSourcesOnFinalWatermark(true);
final long[] numElementsReceived = {0L};
final int[] numWatermarksReceived = {0};
TestCountingSource source =
new TestCountingSource(numElementsPerShard).withFixedNumSplits(numSplits);
for (int subtaskIndex = 0; subtaskIndex < numTasks; subtaskIndex++) {
UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark> flinkWrapper =
new UnboundedSourceWrapper<>("stepName", options, source, numTasks);
assertEquals(numSplits, flinkWrapper.getSplitSources().size());
StreamSource<
WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>,
UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark>>
sourceOperator = new StreamSource<>(flinkWrapper);
AbstractStreamOperatorTestHarness<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
testHarness =
new AbstractStreamOperatorTestHarness<>(
sourceOperator,
numTasks /* max parallelism */,
numTasks /* parallelism */,
subtaskIndex /* subtask index */);
testHarness.setProcessingTime(System.currentTimeMillis());
Thread processingTimeUpdateThread =
new Thread() {
@Override
public void run() {
while (true) {
try {
testHarness.setProcessingTime(System.currentTimeMillis());
Thread.sleep(100);
} catch (InterruptedException e) {
break;
} catch (Exception e) {
LOG.error("Unexpected error advancing processing time", e);
break;
}
}
}
};
processingTimeUpdateThread.start();
testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime);
try {
testHarness.open();
sourceOperator.run(
testHarness.getCheckpointLock(),
new TestStreamStatusMaintainer(),
new Output<StreamRecord<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>>() {
private boolean hasSeenMaxWatermark = false;
@Override
public void emitWatermark(Watermark watermark) {
if (!hasSeenMaxWatermark
&& watermark.getTimestamp()
>= BoundedWindow.TIMESTAMP_MAX_VALUE.getMillis()) {
numWatermarksReceived[0]++;
hasSeenMaxWatermark = true;
}
}
@Override
public <X> void collect(OutputTag<X> outputTag, StreamRecord<X> streamRecord) {
collect((StreamRecord) streamRecord);
}
@Override
public void emitLatencyMarker(LatencyMarker latencyMarker) {}
@Override
public void collect(
StreamRecord<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
windowedValueStreamRecord) {
numElementsReceived[0]++;
}
@Override
public void close() {}
});
} finally {
processingTimeUpdateThread.interrupt();
processingTimeUpdateThread.join();
}
}
assertEquals(numElementsPerShard * numSplits, numElementsReceived[0]);
assertEquals(numTasks, numWatermarksReceived[0]);
}
/**
* Creates a {@link UnboundedSourceWrapper} that has one or multiple readers per source. If
* numSplits > numTasks the source will manage multiple readers.
*
* <p>This test verifies that watermarks are correctly forwarded.
*/
@Test(timeout = 30_000)
/**
* Verify that snapshot/restore work as expected. We bring up a source and cancel after seeing a
* certain number of elements. Then we snapshot that source, bring up a completely new source
* that we restore from the snapshot and verify that we see all expected elements in the end.
*/
@Test
public void testRestore() throws Exception {
final int numElements = 20;
final Object checkpointLock = new Object();
PipelineOptions options = PipelineOptionsFactory.create();
TestCountingSource source = new TestCountingSource(numElements);
UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark> flinkWrapper =
new UnboundedSourceWrapper<>("stepName", options, source, numSplits);
assertEquals(numSplits, flinkWrapper.getSplitSources().size());
StreamSource<
WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>,
UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark>>
sourceOperator = new StreamSource<>(flinkWrapper);
AbstractStreamOperatorTestHarness<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
testHarness =
new AbstractStreamOperatorTestHarness<>(
sourceOperator,
numTasks /* max parallelism */,
numTasks /* parallelism */,
0 /* subtask index */);
testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime);
final Set<KV<Integer, Integer>> emittedElements = new HashSet<>();
boolean readFirstBatchOfElements = false;
try {
testHarness.open();
sourceOperator.run(
checkpointLock,
new TestStreamStatusMaintainer(),
new Output<StreamRecord<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>>() {
private int count = 0;
@Override
public void emitWatermark(Watermark watermark) {}
@Override
public <X> void collect(OutputTag<X> outputTag, StreamRecord<X> streamRecord) {
collect((StreamRecord) streamRecord);
}
@Override
public void emitLatencyMarker(LatencyMarker latencyMarker) {}
@Override
public void collect(
StreamRecord<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
windowedValueStreamRecord) {
emittedElements.add(windowedValueStreamRecord.getValue().getValue().getValue());
count++;
if (count >= numElements / 2) {
throw new SuccessException();
}
}
@Override
public void close() {}
});
} catch (SuccessException e) {
readFirstBatchOfElements = true;
}
assertTrue("Did not successfully read first batch of elements.", readFirstBatchOfElements);
OperatorSubtaskState snapshot = testHarness.snapshot(0, 0);
final ArrayList<Integer> finalizeList = new ArrayList<>();
TestCountingSource.setFinalizeTracker(finalizeList);
testHarness.notifyOfCompletedCheckpoint(0);
assertEquals(flinkWrapper.getLocalSplitSources().size(), finalizeList.size());
TestCountingSource restoredSource = new TestCountingSource(numElements);
UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark>
restoredFlinkWrapper =
new UnboundedSourceWrapper<>("stepName", options, restoredSource, numSplits);
assertEquals(numSplits, restoredFlinkWrapper.getSplitSources().size());
StreamSource<
WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>,
UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark>>
restoredSourceOperator = new StreamSource<>(restoredFlinkWrapper);
AbstractStreamOperatorTestHarness<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
restoredTestHarness =
new AbstractStreamOperatorTestHarness<>(
restoredSourceOperator,
numTasks /* max parallelism */,
1 /* parallelism */,
0 /* subtask index */);
restoredTestHarness.setTimeCharacteristic(TimeCharacteristic.EventTime);
restoredTestHarness.initializeState(snapshot);
boolean readSecondBatchOfElements = false;
try {
restoredTestHarness.open();
restoredSourceOperator.run(
checkpointLock,
new TestStreamStatusMaintainer(),
new Output<StreamRecord<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>>() {
private int count = 0;
@Override
public void emitWatermark(Watermark watermark) {}
@Override
public <X> void collect(OutputTag<X> outputTag, StreamRecord<X> streamRecord) {
collect((StreamRecord) streamRecord);
}
@Override
public void emitLatencyMarker(LatencyMarker latencyMarker) {}
@Override
public void collect(
StreamRecord<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
windowedValueStreamRecord) {
emittedElements.add(windowedValueStreamRecord.getValue().getValue().getValue());
count++;
if (count >= numElements / 2) {
throw new SuccessException();
}
}
@Override
public void close() {}
});
} catch (SuccessException e) {
readSecondBatchOfElements = true;
}
assertEquals(
Math.max(1, numSplits / numTasks), restoredFlinkWrapper.getLocalSplitSources().size());
assertTrue("Did not successfully read second batch of elements.", readSecondBatchOfElements);
assertTrue(emittedElements.size() == numElements);
}
@Test
public void testNullCheckpoint() throws Exception {
final int numElements = 20;
PipelineOptions options = PipelineOptionsFactory.create();
TestCountingSource source =
new TestCountingSource(numElements) {
@Override
public Coder<CounterMark> getCheckpointMarkCoder() {
return null;
}
};
UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark> flinkWrapper =
new UnboundedSourceWrapper<>("stepName", options, source, numSplits);
StreamSource<
WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>,
UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark>>
sourceOperator = new StreamSource<>(flinkWrapper);
AbstractStreamOperatorTestHarness<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
testHarness =
new AbstractStreamOperatorTestHarness<>(
sourceOperator,
numTasks /* max parallelism */,
numTasks /* parallelism */,
0 /* subtask index */);
testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime);
testHarness.open();
OperatorSubtaskState snapshot = testHarness.snapshot(0, 0);
UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark>
restoredFlinkWrapper =
new UnboundedSourceWrapper<>(
"stepName", options, new TestCountingSource(numElements), numSplits);
StreamSource<
WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>,
UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark>>
restoredSourceOperator = new StreamSource<>(restoredFlinkWrapper);
AbstractStreamOperatorTestHarness<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
restoredTestHarness =
new AbstractStreamOperatorTestHarness<>(
restoredSourceOperator,
numTasks /* max parallelism */,
1 /* parallelism */,
0 /* subtask index */);
restoredTestHarness.setup();
restoredTestHarness.initializeState(snapshot);
restoredTestHarness.open();
assertEquals(0, restoredFlinkWrapper.getLocalSplitSources().size());
}
/** A special {@link RuntimeException} that we throw to signal that the test was successful. */
private static class SuccessException extends RuntimeException {}
} | class ParameterizedUnboundedSourceWrapperTest {
private final int numTasks;
private final int numSplits;
public ParameterizedUnboundedSourceWrapperTest(int numTasks, int numSplits) {
this.numTasks = numTasks;
this.numSplits = numSplits;
}
@Parameterized.Parameters(name = "numTasks = {0}; numSplits={1}")
public static Collection<Object[]> data() {
/*
* Parameters for initializing the tests:
* {numTasks, numSplits}
* The test currently assumes powers of two for some assertions.
*/
return Arrays.asList(
new Object[][] {
{1, 1}, {1, 2}, {1, 4},
{2, 1}, {2, 2}, {2, 4},
{4, 1}, {4, 2}, {4, 4}
});
}
/**
* Creates a {@link UnboundedSourceWrapper} that has one or multiple readers per source. If
* numSplits > numTasks the source has one source will manage multiple readers.
*/
@Test(timeout = 30_000)
public void testValueEmission() throws Exception {
final int numElementsPerShard = 20;
FlinkPipelineOptions options = PipelineOptionsFactory.as(FlinkPipelineOptions.class);
options.setShutdownSourcesOnFinalWatermark(true);
final long[] numElementsReceived = {0L};
final int[] numWatermarksReceived = {0};
TestCountingSource source =
new TestCountingSource(numElementsPerShard).withFixedNumSplits(numSplits);
for (int subtaskIndex = 0; subtaskIndex < numTasks; subtaskIndex++) {
UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark> flinkWrapper =
new UnboundedSourceWrapper<>("stepName", options, source, numTasks);
assertEquals(numSplits, flinkWrapper.getSplitSources().size());
StreamSource<
WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>,
UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark>>
sourceOperator = new StreamSource<>(flinkWrapper);
AbstractStreamOperatorTestHarness<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
testHarness =
new AbstractStreamOperatorTestHarness<>(
sourceOperator,
numTasks /* max parallelism */,
numTasks /* parallelism */,
subtaskIndex /* subtask index */);
testHarness.setProcessingTime(System.currentTimeMillis());
Thread processingTimeUpdateThread =
new Thread() {
@Override
public void run() {
while (true) {
try {
testHarness.setProcessingTime(System.currentTimeMillis());
Thread.sleep(100);
} catch (InterruptedException e) {
break;
} catch (Exception e) {
LOG.error("Unexpected error advancing processing time", e);
break;
}
}
}
};
processingTimeUpdateThread.start();
testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime);
try {
testHarness.open();
sourceOperator.run(
testHarness.getCheckpointLock(),
new TestStreamStatusMaintainer(),
new Output<StreamRecord<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>>() {
private boolean hasSeenMaxWatermark = false;
@Override
public void emitWatermark(Watermark watermark) {
if (!hasSeenMaxWatermark
&& watermark.getTimestamp()
>= BoundedWindow.TIMESTAMP_MAX_VALUE.getMillis()) {
numWatermarksReceived[0]++;
hasSeenMaxWatermark = true;
}
}
@Override
public <X> void collect(OutputTag<X> outputTag, StreamRecord<X> streamRecord) {
collect((StreamRecord) streamRecord);
}
@Override
public void emitLatencyMarker(LatencyMarker latencyMarker) {}
@Override
public void collect(
StreamRecord<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
windowedValueStreamRecord) {
numElementsReceived[0]++;
}
@Override
public void close() {}
});
} finally {
processingTimeUpdateThread.interrupt();
processingTimeUpdateThread.join();
}
}
assertEquals(numElementsPerShard * numSplits, numElementsReceived[0]);
assertEquals(numTasks, numWatermarksReceived[0]);
}
/**
* Creates a {@link UnboundedSourceWrapper} that has one or multiple readers per source. If
* numSplits > numTasks the source will manage multiple readers.
*
* <p>This test verifies that watermarks are correctly forwarded.
*/
@Test(timeout = 30_000)
/**
* Verify that snapshot/restore work as expected. We bring up a source and cancel after seeing a
* certain number of elements. Then we snapshot that source, bring up a completely new source
* that we restore from the snapshot and verify that we see all expected elements in the end.
*/
@Test
public void testRestore() throws Exception {
final int numElements = 20;
final Object checkpointLock = new Object();
PipelineOptions options = PipelineOptionsFactory.create();
TestCountingSource source = new TestCountingSource(numElements);
UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark> flinkWrapper =
new UnboundedSourceWrapper<>("stepName", options, source, numSplits);
assertEquals(numSplits, flinkWrapper.getSplitSources().size());
StreamSource<
WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>,
UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark>>
sourceOperator = new StreamSource<>(flinkWrapper);
AbstractStreamOperatorTestHarness<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
testHarness =
new AbstractStreamOperatorTestHarness<>(
sourceOperator,
numTasks /* max parallelism */,
numTasks /* parallelism */,
0 /* subtask index */);
testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime);
final Set<KV<Integer, Integer>> emittedElements = new HashSet<>();
boolean readFirstBatchOfElements = false;
try {
testHarness.open();
sourceOperator.run(
checkpointLock,
new TestStreamStatusMaintainer(),
new Output<StreamRecord<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>>() {
private int count = 0;
@Override
public void emitWatermark(Watermark watermark) {}
@Override
public <X> void collect(OutputTag<X> outputTag, StreamRecord<X> streamRecord) {
collect((StreamRecord) streamRecord);
}
@Override
public void emitLatencyMarker(LatencyMarker latencyMarker) {}
@Override
public void collect(
StreamRecord<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
windowedValueStreamRecord) {
emittedElements.add(windowedValueStreamRecord.getValue().getValue().getValue());
count++;
if (count >= numElements / 2) {
throw new SuccessException();
}
}
@Override
public void close() {}
});
} catch (SuccessException e) {
readFirstBatchOfElements = true;
}
assertTrue("Did not successfully read first batch of elements.", readFirstBatchOfElements);
OperatorSubtaskState snapshot = testHarness.snapshot(0, 0);
final ArrayList<Integer> finalizeList = new ArrayList<>();
TestCountingSource.setFinalizeTracker(finalizeList);
testHarness.notifyOfCompletedCheckpoint(0);
assertEquals(flinkWrapper.getLocalSplitSources().size(), finalizeList.size());
TestCountingSource restoredSource = new TestCountingSource(numElements);
UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark>
restoredFlinkWrapper =
new UnboundedSourceWrapper<>("stepName", options, restoredSource, numSplits);
assertEquals(numSplits, restoredFlinkWrapper.getSplitSources().size());
StreamSource<
WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>,
UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark>>
restoredSourceOperator = new StreamSource<>(restoredFlinkWrapper);
AbstractStreamOperatorTestHarness<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
restoredTestHarness =
new AbstractStreamOperatorTestHarness<>(
restoredSourceOperator,
numTasks /* max parallelism */,
1 /* parallelism */,
0 /* subtask index */);
restoredTestHarness.setTimeCharacteristic(TimeCharacteristic.EventTime);
restoredTestHarness.initializeState(snapshot);
boolean readSecondBatchOfElements = false;
try {
restoredTestHarness.open();
restoredSourceOperator.run(
checkpointLock,
new TestStreamStatusMaintainer(),
new Output<StreamRecord<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>>() {
private int count = 0;
@Override
public void emitWatermark(Watermark watermark) {}
@Override
public <X> void collect(OutputTag<X> outputTag, StreamRecord<X> streamRecord) {
collect((StreamRecord) streamRecord);
}
@Override
public void emitLatencyMarker(LatencyMarker latencyMarker) {}
@Override
public void collect(
StreamRecord<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
windowedValueStreamRecord) {
emittedElements.add(windowedValueStreamRecord.getValue().getValue().getValue());
count++;
if (count >= numElements / 2) {
throw new SuccessException();
}
}
@Override
public void close() {}
});
} catch (SuccessException e) {
readSecondBatchOfElements = true;
}
assertEquals(
Math.max(1, numSplits / numTasks), restoredFlinkWrapper.getLocalSplitSources().size());
assertTrue("Did not successfully read second batch of elements.", readSecondBatchOfElements);
assertTrue(emittedElements.size() == numElements);
}
@Test
public void testNullCheckpoint() throws Exception {
final int numElements = 20;
PipelineOptions options = PipelineOptionsFactory.create();
TestCountingSource source =
new TestCountingSource(numElements) {
@Override
public Coder<CounterMark> getCheckpointMarkCoder() {
return null;
}
};
UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark> flinkWrapper =
new UnboundedSourceWrapper<>("stepName", options, source, numSplits);
StreamSource<
WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>,
UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark>>
sourceOperator = new StreamSource<>(flinkWrapper);
AbstractStreamOperatorTestHarness<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
testHarness =
new AbstractStreamOperatorTestHarness<>(
sourceOperator,
numTasks /* max parallelism */,
numTasks /* parallelism */,
0 /* subtask index */);
testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime);
testHarness.open();
OperatorSubtaskState snapshot = testHarness.snapshot(0, 0);
UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark>
restoredFlinkWrapper =
new UnboundedSourceWrapper<>(
"stepName", options, new TestCountingSource(numElements), numSplits);
StreamSource<
WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>,
UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark>>
restoredSourceOperator = new StreamSource<>(restoredFlinkWrapper);
AbstractStreamOperatorTestHarness<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
restoredTestHarness =
new AbstractStreamOperatorTestHarness<>(
restoredSourceOperator,
numTasks /* max parallelism */,
1 /* parallelism */,
0 /* subtask index */);
restoredTestHarness.setup();
restoredTestHarness.initializeState(snapshot);
restoredTestHarness.open();
assertEquals(0, restoredFlinkWrapper.getLocalSplitSources().size());
}
/** A special {@link RuntimeException} that we throw to signal that the test was successful. */
private static class SuccessException extends RuntimeException {}
} |
Yes, the `length` check is moved to the constructor of `SqlShowDatabases`, and I added a test case to catch the exception in `catalog_database.q ` | public Operation convertSqlNode(SqlShowDatabases sqlShowDatabases, ConvertContext context) {
if (sqlShowDatabases.getPreposition() == null) {
return new ShowDatabasesOperation(
sqlShowDatabases.getLikeType(),
sqlShowDatabases.getLikeSqlPattern(),
sqlShowDatabases.isNotLike());
} else {
CatalogManager catalogManager = context.getCatalogManager();
String[] fullCatalogName = sqlShowDatabases.getCatalog();
if (fullCatalogName.length > 1) {
throw new ValidationException(
String.format(
"Show databases from/in identifier [ %s ] format error, catalog cannot contain dot character.",
String.join(".", fullCatalogName)));
}
String catalogName =
fullCatalogName.length == 0
? catalogManager.getCurrentCatalog()
: fullCatalogName[0];
return new ShowDatabasesOperation(
sqlShowDatabases.getPreposition(),
catalogName,
sqlShowDatabases.getLikeType(),
sqlShowDatabases.getLikeSqlPattern(),
sqlShowDatabases.isNotLike());
}
} | if (fullCatalogName.length > 1) { | public Operation convertSqlNode(SqlShowDatabases sqlShowDatabases, ConvertContext context) {
if (sqlShowDatabases.getPreposition() == null) {
return new ShowDatabasesOperation(
sqlShowDatabases.getLikeType(),
sqlShowDatabases.getLikeSqlPattern(),
sqlShowDatabases.isNotLike());
} else {
return new ShowDatabasesOperation(
sqlShowDatabases.getCatalog()[0],
sqlShowDatabases.getLikeType(),
sqlShowDatabases.getLikeSqlPattern(),
sqlShowDatabases.isNotLike());
}
} | class SqlShowDatabasesConverter implements SqlNodeConverter<SqlShowDatabases> {
@Override
} | class SqlShowDatabasesConverter implements SqlNodeConverter<SqlShowDatabases> {
@Override
} |
nit: 10000000 name this something so its clear | public void testActiveThreadMetric() throws Exception {
int maxThreads = 5;
int threadExpirationSec = 60;
CountDownLatch processStart1 = new CountDownLatch(2);
CountDownLatch processStart2 = new CountDownLatch(3);
CountDownLatch processStart3 = new CountDownLatch(4);
AtomicBoolean stop = new AtomicBoolean(false);
BoundedQueueExecutor executor =
new BoundedQueueExecutor(
maxThreads,
threadExpirationSec,
TimeUnit.SECONDS,
maxThreads,
10000000,
new ThreadFactoryBuilder()
.setNameFormat("DataflowWorkUnits-%d")
.setDaemon(true)
.build());
ComputationState computationState =
new ComputationState(
"computation",
defaultMapTask(Arrays.asList(makeSourceInstruction(StringUtf8Coder.of()))),
executor,
ImmutableMap.of(),
null);
ShardedKey key1Shard1 = ShardedKey.create(ByteString.copyFromUtf8("key1"), 1);
Consumer<Work> sleepProcessWorkFn =
unused -> {
processStart1.countDown();
processStart2.countDown();
processStart3.countDown();
int count = 0;
while (!stop.get()) {
count += 1;
}
};
Work m2 = createMockWork(2, sleepProcessWorkFn);
Work m3 = createMockWork(3, sleepProcessWorkFn);
Work m4 = createMockWork(4, sleepProcessWorkFn);
assertEquals(0, executor.activeCount());
assertTrue(computationState.activateWork(key1Shard1, m2));
executor.execute(m2, m2.getWorkItem().getSerializedSize());
processStart1.await();
assertEquals(2, executor.activeCount());
assertTrue(computationState.activateWork(key1Shard1, m3));
assertTrue(computationState.activateWork(key1Shard1, m4));
executor.execute(m3, m3.getWorkItem().getSerializedSize());
processStart2.await();
assertEquals(3, executor.activeCount());
executor.execute(m4, m4.getWorkItem().getSerializedSize());
processStart3.await();
assertEquals(4, executor.activeCount());
stop.set(true);
executor.shutdown();
} | 10000000, | public void testActiveThreadMetric() throws Exception {
int maxThreads = 5;
int threadExpirationSec = 60;
CountDownLatch processStart1 = new CountDownLatch(2);
CountDownLatch processStart2 = new CountDownLatch(3);
CountDownLatch processStart3 = new CountDownLatch(4);
AtomicBoolean stop = new AtomicBoolean(false);
BoundedQueueExecutor executor =
new BoundedQueueExecutor(
maxThreads,
threadExpirationSec,
TimeUnit.SECONDS,
maxThreads,
MAXIMUM_BYTES_OUTSTANDING,
new ThreadFactoryBuilder()
.setNameFormat("DataflowWorkUnits-%d")
.setDaemon(true)
.build());
ComputationState computationState =
new ComputationState(
"computation",
defaultMapTask(Arrays.asList(makeSourceInstruction(StringUtf8Coder.of()))),
executor,
ImmutableMap.of(),
null);
ShardedKey key1Shard1 = ShardedKey.create(ByteString.copyFromUtf8("key1"), 1);
Consumer<Work> sleepProcessWorkFn =
unused -> {
processStart1.countDown();
processStart2.countDown();
processStart3.countDown();
int count = 0;
while (!stop.get()) {
count += 1;
}
};
Work m2 = createMockWork(2, sleepProcessWorkFn);
Work m3 = createMockWork(3, sleepProcessWorkFn);
Work m4 = createMockWork(4, sleepProcessWorkFn);
assertEquals(0, executor.activeCount());
assertTrue(computationState.activateWork(key1Shard1, m2));
executor.execute(m2, m2.getWorkItem().getSerializedSize());
processStart1.await();
assertEquals(2, executor.activeCount());
assertTrue(computationState.activateWork(key1Shard1, m3));
assertTrue(computationState.activateWork(key1Shard1, m4));
executor.execute(m3, m3.getWorkItem().getSerializedSize());
processStart2.await();
assertEquals(3, executor.activeCount());
executor.execute(m4, m4.getWorkItem().getSerializedSize());
processStart3.await();
assertEquals(4, executor.activeCount());
stop.set(true);
executor.shutdown();
} | class StreamingDataflowWorkerTest {
private static final Logger LOG = LoggerFactory.getLogger(StreamingDataflowWorkerTest.class);
private static final IntervalWindow DEFAULT_WINDOW =
new IntervalWindow(new Instant(1234), Duration.millis(1000));
private static final IntervalWindow WINDOW_AT_ZERO =
new IntervalWindow(new Instant(0), new Instant(1000));
private static final IntervalWindow WINDOW_AT_ONE_SECOND =
new IntervalWindow(new Instant(1000), new Instant(2000));
private static final Coder<IntervalWindow> DEFAULT_WINDOW_CODER = IntervalWindow.getCoder();
private static final Coder<Collection<IntervalWindow>> DEFAULT_WINDOW_COLLECTION_CODER =
CollectionCoder.of(DEFAULT_WINDOW_CODER);
private static final String DEFAULT_COMPUTATION_ID = "computation";
private static final String DEFAULT_MAP_STAGE_NAME = "computation";
private static final String DEFAULT_MAP_SYSTEM_NAME = "computation";
private static final String DEFAULT_OUTPUT_ORIGINAL_NAME = "originalName";
private static final String DEFAULT_OUTPUT_SYSTEM_NAME = "systemName";
private static final String DEFAULT_PARDO_SYSTEM_NAME = "parDo";
private static final String DEFAULT_PARDO_ORIGINAL_NAME = "parDoOriginalName";
private static final String DEFAULT_PARDO_USER_NAME = "parDoUserName";
private static final String DEFAULT_PARDO_STATE_FAMILY = "parDoStateFamily";
private static final String DEFAULT_SOURCE_SYSTEM_NAME = "source";
private static final String DEFAULT_SOURCE_ORIGINAL_NAME = "sourceOriginalName";
private static final String DEFAULT_SINK_SYSTEM_NAME = "sink";
private static final String DEFAULT_SINK_ORIGINAL_NAME = "sinkOriginalName";
private static final String DEFAULT_SOURCE_COMPUTATION_ID = "upstream";
private static final String DEFAULT_KEY_STRING = "key";
private static final long DEFAULT_SHARDING_KEY = 12345;
private static final ByteString DEFAULT_KEY_BYTES = ByteString.copyFromUtf8(DEFAULT_KEY_STRING);
private static final String DEFAULT_DATA_STRING = "data";
private static final String DEFAULT_DESTINATION_STREAM_ID = "out";
private static final Function<GetDataRequest, GetDataResponse> EMPTY_DATA_RESPONDER =
(GetDataRequest request) -> {
GetDataResponse.Builder builder = GetDataResponse.newBuilder();
for (ComputationGetDataRequest compRequest : request.getRequestsList()) {
ComputationGetDataResponse.Builder compBuilder =
builder.addDataBuilder().setComputationId(compRequest.getComputationId());
for (KeyedGetDataRequest keyRequest : compRequest.getRequestsList()) {
KeyedGetDataResponse.Builder keyBuilder =
compBuilder
.addDataBuilder()
.setKey(keyRequest.getKey())
.setShardingKey(keyRequest.getShardingKey());
keyBuilder.addAllValues(keyRequest.getValuesToFetchList());
keyBuilder.addAllBags(keyRequest.getBagsToFetchList());
keyBuilder.addAllWatermarkHolds(keyRequest.getWatermarkHoldsToFetchList());
}
}
return builder.build();
};
private final boolean streamingEngine;
private final Supplier<Long> idGenerator =
new Supplier<Long>() {
private final AtomicLong idGenerator = new AtomicLong(1L);
@Override
public Long get() {
return idGenerator.getAndIncrement();
}
};
@Rule public BlockingFn blockingFn = new BlockingFn();
@Rule public TestRule restoreMDC = new RestoreDataflowLoggingMDC();
@Rule public ErrorCollector errorCollector = new ErrorCollector();
WorkUnitClient mockWorkUnitClient = mock(WorkUnitClient.class);
HotKeyLogger hotKeyLogger = mock(HotKeyLogger.class);
public StreamingDataflowWorkerTest(Boolean streamingEngine) {
this.streamingEngine = streamingEngine;
}
@Parameterized.Parameters(name = "{index}: [streamingEngine={0}]")
public static Iterable<Object[]> data() {
return Arrays.asList(new Object[][] {{false}, {true}});
}
private static CounterUpdate getCounter(Iterable<CounterUpdate> counters, String name) {
for (CounterUpdate counter : counters) {
if (counter.getNameAndKind().getName().equals(name)) {
return counter;
}
}
return null;
}
static Work createMockWork(long workToken) {
return Work.create(
Windmill.WorkItem.newBuilder().setKey(ByteString.EMPTY).setWorkToken(workToken).build(),
Instant::now,
Collections.emptyList(),
work -> {});
}
static Work createMockWork(long workToken, Consumer<Work> processWorkFn) {
return Work.create(
Windmill.WorkItem.newBuilder().setKey(ByteString.EMPTY).setWorkToken(workToken).build(),
Instant::now,
Collections.emptyList(),
processWorkFn);
}
private byte[] intervalWindowBytes(IntervalWindow window) throws Exception {
return CoderUtils.encodeToByteArray(
DEFAULT_WINDOW_COLLECTION_CODER, Collections.singletonList(window));
}
private String keyStringForIndex(int index) {
return DEFAULT_KEY_STRING + index;
}
private String dataStringForIndex(long index) {
return DEFAULT_DATA_STRING + index;
}
private ParallelInstruction makeWindowingSourceInstruction(Coder<?> coder) {
CloudObject timerCloudObject =
CloudObject.forClassName(
"com.google.cloud.dataflow.sdk.util.TimerOrElement$TimerOrElementCoder");
List<CloudObject> component =
Collections.singletonList(CloudObjects.asCloudObject(coder, /* sdkComponents= */ null));
Structs.addList(timerCloudObject, PropertyNames.COMPONENT_ENCODINGS, component);
CloudObject encodedCoder = CloudObject.forClassName("kind:windowed_value");
Structs.addBoolean(encodedCoder, PropertyNames.IS_WRAPPER, true);
Structs.addList(
encodedCoder,
PropertyNames.COMPONENT_ENCODINGS,
ImmutableList.of(
timerCloudObject,
CloudObjects.asCloudObject(IntervalWindowCoder.of(), /* sdkComponents= */ null)));
return new ParallelInstruction()
.setSystemName(DEFAULT_SOURCE_SYSTEM_NAME)
.setOriginalName(DEFAULT_SOURCE_ORIGINAL_NAME)
.setRead(
new ReadInstruction()
.setSource(
new Source()
.setSpec(CloudObject.forClass(WindowingWindmillReader.class))
.setCodec(encodedCoder)))
.setOutputs(
Collections.singletonList(
new InstructionOutput()
.setName(Long.toString(idGenerator.get()))
.setCodec(encodedCoder)
.setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME)
.setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME)));
}
private ParallelInstruction makeSourceInstruction(Coder<?> coder) {
return new ParallelInstruction()
.setSystemName(DEFAULT_SOURCE_SYSTEM_NAME)
.setOriginalName(DEFAULT_SOURCE_ORIGINAL_NAME)
.setRead(
new ReadInstruction()
.setSource(
new Source()
.setSpec(CloudObject.forClass(UngroupedWindmillReader.class))
.setCodec(
CloudObjects.asCloudObject(
WindowedValue.getFullCoder(coder, IntervalWindow.getCoder()),
/* sdkComponents= */ null))))
.setOutputs(
Collections.singletonList(
new InstructionOutput()
.setName(Long.toString(idGenerator.get()))
.setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME)
.setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME)
.setCodec(
CloudObjects.asCloudObject(
WindowedValue.getFullCoder(coder, IntervalWindow.getCoder()),
/* sdkComponents= */ null))));
}
private ParallelInstruction makeDoFnInstruction(
DoFn<?, ?> doFn,
int producerIndex,
Coder<?> outputCoder,
WindowingStrategy<?, ?> windowingStrategy) {
CloudObject spec = CloudObject.forClassName("DoFn");
addString(
spec,
PropertyNames.SERIALIZED_FN,
StringUtils.byteArrayToJsonString(
SerializableUtils.serializeToByteArray(
DoFnInfo.forFn(
doFn,
windowingStrategy /* windowing strategy */,
null /* side input views */,
null /* input coder */,
new TupleTag<>(PropertyNames.OUTPUT) /* main output id */,
DoFnSchemaInformation.create(),
Collections.emptyMap()))));
return new ParallelInstruction()
.setSystemName(DEFAULT_PARDO_SYSTEM_NAME)
.setName(DEFAULT_PARDO_USER_NAME)
.setOriginalName(DEFAULT_PARDO_ORIGINAL_NAME)
.setParDo(
new ParDoInstruction()
.setInput(
new InstructionInput()
.setProducerInstructionIndex(producerIndex)
.setOutputNum(0))
.setNumOutputs(1)
.setUserFn(spec)
.setMultiOutputInfos(
Collections.singletonList(new MultiOutputInfo().setTag(PropertyNames.OUTPUT))))
.setOutputs(
Collections.singletonList(
new InstructionOutput()
.setName(PropertyNames.OUTPUT)
.setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME)
.setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME)
.setCodec(
CloudObjects.asCloudObject(
WindowedValue.getFullCoder(
outputCoder, windowingStrategy.getWindowFn().windowCoder()),
/* sdkComponents= */ null))));
}
private ParallelInstruction makeDoFnInstruction(
DoFn<?, ?> doFn, int producerIndex, Coder<?> outputCoder) {
WindowingStrategy<?, ?> windowingStrategy =
WindowingStrategy.of(FixedWindows.of(Duration.millis(10)));
return makeDoFnInstruction(doFn, producerIndex, outputCoder, windowingStrategy);
}
private ParallelInstruction makeSinkInstruction(
String streamId,
Coder<?> coder,
int producerIndex,
Coder<? extends BoundedWindow> windowCoder) {
CloudObject spec = CloudObject.forClass(WindmillSink.class);
addString(spec, "stream_id", streamId);
return new ParallelInstruction()
.setSystemName(DEFAULT_SINK_SYSTEM_NAME)
.setOriginalName(DEFAULT_SINK_ORIGINAL_NAME)
.setWrite(
new WriteInstruction()
.setInput(
new InstructionInput()
.setProducerInstructionIndex(producerIndex)
.setOutputNum(0))
.setSink(
new Sink()
.setSpec(spec)
.setCodec(
CloudObjects.asCloudObject(
WindowedValue.getFullCoder(coder, windowCoder),
/* sdkComponents= */ null))));
}
private ParallelInstruction makeSinkInstruction(
Coder<?> coder, int producerIndex, Coder<? extends BoundedWindow> windowCoder) {
return makeSinkInstruction(DEFAULT_DESTINATION_STREAM_ID, coder, producerIndex, windowCoder);
}
private ParallelInstruction makeSinkInstruction(Coder<?> coder, int producerIndex) {
return makeSinkInstruction(coder, producerIndex, IntervalWindow.getCoder());
}
/**
* Returns a {@link MapTask} with the provided {@code instructions} and default values everywhere
* else.
*/
private MapTask defaultMapTask(List<ParallelInstruction> instructions) {
MapTask mapTask =
new MapTask()
.setStageName(DEFAULT_MAP_STAGE_NAME)
.setSystemName(DEFAULT_MAP_SYSTEM_NAME)
.setInstructions(instructions);
mapTask.setFactory(Transport.getJsonFactory());
return mapTask;
}
private Windmill.GetWorkResponse buildInput(String input, byte[] metadata) throws Exception {
Windmill.GetWorkResponse.Builder builder = Windmill.GetWorkResponse.newBuilder();
TextFormat.merge(input, builder);
if (metadata != null) {
Windmill.InputMessageBundle.Builder messageBundleBuilder =
builder.getWorkBuilder(0).getWorkBuilder(0).getMessageBundlesBuilder(0);
for (Windmill.Message.Builder messageBuilder :
messageBundleBuilder.getMessagesBuilderList()) {
messageBuilder.setMetadata(addPaneTag(PaneInfo.NO_FIRING, metadata));
}
}
return builder.build();
}
private Windmill.GetWorkResponse buildSessionInput(
int workToken,
long inputWatermark,
long outputWatermark,
List<Long> inputs,
List<Timer> timers)
throws Exception {
Windmill.WorkItem.Builder builder = Windmill.WorkItem.newBuilder();
builder.setKey(DEFAULT_KEY_BYTES);
builder.setShardingKey(DEFAULT_SHARDING_KEY);
builder.setCacheToken(1);
builder.setWorkToken(workToken);
builder.setOutputDataWatermark(outputWatermark * 1000);
if (!inputs.isEmpty()) {
InputMessageBundle.Builder messageBuilder =
Windmill.InputMessageBundle.newBuilder()
.setSourceComputationId(DEFAULT_SOURCE_COMPUTATION_ID);
for (Long input : inputs) {
messageBuilder.addMessages(
Windmill.Message.newBuilder()
.setTimestamp(input)
.setData(ByteString.copyFromUtf8(dataStringForIndex(input)))
.setMetadata(
addPaneTag(
PaneInfo.NO_FIRING,
intervalWindowBytes(
new IntervalWindow(
new Instant(input),
new Instant(input).plus(Duration.millis(10)))))));
}
builder.addMessageBundles(messageBuilder);
}
if (!timers.isEmpty()) {
builder.setTimers(Windmill.TimerBundle.newBuilder().addAllTimers(timers));
}
return Windmill.GetWorkResponse.newBuilder()
.addWork(
Windmill.ComputationWorkItems.newBuilder()
.setComputationId(DEFAULT_COMPUTATION_ID)
.setInputDataWatermark(inputWatermark * 1000)
.addWork(builder))
.build();
}
private Windmill.GetWorkResponse makeInput(int index, long timestamp) throws Exception {
return makeInput(index, timestamp, keyStringForIndex(index), DEFAULT_SHARDING_KEY);
}
private Windmill.GetWorkResponse makeInput(
int index, long timestamp, String key, long shardingKey) throws Exception {
return buildInput(
"work {"
+ " computation_id: \""
+ DEFAULT_COMPUTATION_ID
+ "\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \""
+ key
+ "\""
+ " sharding_key: "
+ shardingKey
+ " work_token: "
+ index
+ " cache_token: 3"
+ " hot_key_info {"
+ " hot_key_age_usec: 1000000"
+ " }"
+ " message_bundles {"
+ " source_computation_id: \""
+ DEFAULT_SOURCE_COMPUTATION_ID
+ "\""
+ " messages {"
+ " timestamp: "
+ timestamp
+ " data: \"data"
+ index
+ "\""
+ " }"
+ " }"
+ " }"
+ "}",
CoderUtils.encodeToByteArray(
CollectionCoder.of(IntervalWindow.getCoder()),
Collections.singletonList(DEFAULT_WINDOW)));
}
/**
* Returns a {@link org.apache.beam.runners.dataflow.windmill.Windmill.WorkItemCommitRequest}
* builder parsed from the provided text format proto.
*/
private WorkItemCommitRequest.Builder parseCommitRequest(String output) throws Exception {
WorkItemCommitRequest.Builder builder = Windmill.WorkItemCommitRequest.newBuilder();
TextFormat.merge(output, builder);
return builder;
}
/** Sets the metadata of all the contained messages in this WorkItemCommitRequest. */
private WorkItemCommitRequest.Builder setMessagesMetadata(
PaneInfo pane, byte[] windowBytes, WorkItemCommitRequest.Builder builder) throws Exception {
if (windowBytes != null) {
KeyedMessageBundle.Builder bundles = builder.getOutputMessagesBuilder(0).getBundlesBuilder(0);
for (int i = 0; i < bundles.getMessagesCount(); i++) {
bundles.getMessagesBuilder(i).setMetadata(addPaneTag(pane, windowBytes));
}
}
return builder;
}
/** Reset value update timestamps to zero. */
private WorkItemCommitRequest.Builder setValuesTimestamps(WorkItemCommitRequest.Builder builder) {
for (int i = 0; i < builder.getValueUpdatesCount(); i++) {
builder.getValueUpdatesBuilder(i).getValueBuilder().setTimestamp(0);
}
return builder;
}
private WorkItemCommitRequest.Builder makeExpectedOutput(int index, long timestamp)
throws Exception {
return makeExpectedOutput(
index, timestamp, keyStringForIndex(index), DEFAULT_SHARDING_KEY, keyStringForIndex(index));
}
private WorkItemCommitRequest.Builder makeExpectedOutput(
int index, long timestamp, String key, long shardingKey, String outKey) throws Exception {
StringBuilder expectedCommitRequestBuilder =
initializeExpectedCommitRequest(key, shardingKey, index);
appendCommitOutputMessages(expectedCommitRequestBuilder, index, timestamp, outKey);
return setMessagesMetadata(
PaneInfo.NO_FIRING,
intervalWindowBytes(DEFAULT_WINDOW),
parseCommitRequest(expectedCommitRequestBuilder.toString()));
}
private WorkItemCommitRequest removeDynamicFields(WorkItemCommitRequest request) {
return request.toBuilder().clearPerWorkItemLatencyAttributions().build();
}
private WorkItemCommitRequest.Builder makeExpectedTruncationRequestOutput(
int index, String key, long shardingKey, long estimatedSize) throws Exception {
StringBuilder expectedCommitRequestBuilder =
initializeExpectedCommitRequest(key, shardingKey, index, false);
appendCommitTruncationFields(expectedCommitRequestBuilder, estimatedSize);
return parseCommitRequest(expectedCommitRequestBuilder.toString());
}
private StringBuilder initializeExpectedCommitRequest(
String key, long shardingKey, int index, Boolean hasSourceBytesProcessed) {
StringBuilder requestBuilder = new StringBuilder();
requestBuilder.append("key: \"");
requestBuilder.append(key);
requestBuilder.append("\" ");
requestBuilder.append("sharding_key: ");
requestBuilder.append(shardingKey);
requestBuilder.append(" ");
requestBuilder.append("work_token: ");
requestBuilder.append(index);
requestBuilder.append(" ");
requestBuilder.append("cache_token: 3 ");
if (hasSourceBytesProcessed) requestBuilder.append("source_bytes_processed: 0 ");
return requestBuilder;
}
private StringBuilder initializeExpectedCommitRequest(String key, long shardingKey, int index) {
return initializeExpectedCommitRequest(key, shardingKey, index, true);
}
private StringBuilder appendCommitOutputMessages(
StringBuilder requestBuilder, int index, long timestamp, String outKey) {
requestBuilder.append("output_messages {");
requestBuilder.append(" destination_stream_id: \"");
requestBuilder.append(DEFAULT_DESTINATION_STREAM_ID);
requestBuilder.append("\"");
requestBuilder.append(" bundles {");
requestBuilder.append(" key: \"");
requestBuilder.append(outKey);
requestBuilder.append("\"");
requestBuilder.append(" messages {");
requestBuilder.append(" timestamp: ");
requestBuilder.append(timestamp);
requestBuilder.append(" data: \"");
requestBuilder.append(dataStringForIndex(index));
requestBuilder.append("\"");
requestBuilder.append(" metadata: \"\"");
requestBuilder.append(" }");
requestBuilder.append(" messages_ids: \"\"");
requestBuilder.append(" }");
requestBuilder.append("}");
return requestBuilder;
}
private StringBuilder appendCommitTruncationFields(
StringBuilder requestBuilder, long estimatedSize) {
requestBuilder.append("exceeds_max_work_item_commit_bytes: true ");
requestBuilder.append("estimated_work_item_commit_bytes: ");
requestBuilder.append(estimatedSize);
return requestBuilder;
}
private StreamingComputationConfig makeDefaultStreamingComputationConfig(
List<ParallelInstruction> instructions) {
StreamingComputationConfig config = new StreamingComputationConfig();
config.setComputationId(DEFAULT_COMPUTATION_ID);
config.setSystemName(DEFAULT_MAP_SYSTEM_NAME);
config.setStageName(DEFAULT_MAP_STAGE_NAME);
config.setInstructions(instructions);
return config;
}
private ByteString addPaneTag(PaneInfo pane, byte[] windowBytes) throws IOException {
ByteStringOutputStream output = new ByteStringOutputStream();
PaneInfo.PaneInfoCoder.INSTANCE.encode(pane, output, Context.OUTER);
output.write(windowBytes);
return output.toByteString();
}
private StreamingDataflowWorkerOptions createTestingPipelineOptions(
FakeWindmillServer server, String... args) {
List<String> argsList = Lists.newArrayList(args);
if (streamingEngine) {
argsList.add("--experiments=enable_streaming_engine");
}
StreamingDataflowWorkerOptions options =
PipelineOptionsFactory.fromArgs(argsList.toArray(new String[0]))
.as(StreamingDataflowWorkerOptions.class);
options.setAppName("StreamingWorkerHarnessTest");
options.setJobId("test_job_id");
options.setStreaming(true);
options.setWindmillServerStub(server);
options.setActiveWorkRefreshPeriodMillis(0);
return options;
}
private StreamingDataflowWorker makeWorker(
List<ParallelInstruction> instructions,
StreamingDataflowWorkerOptions options,
boolean publishCounters,
Supplier<Instant> clock,
Function<String, ScheduledExecutorService> executorSupplier)
throws Exception {
StreamingDataflowWorker worker =
new StreamingDataflowWorker(
Collections.singletonList(defaultMapTask(instructions)),
IntrinsicMapTaskExecutorFactory.defaultFactory(),
mockWorkUnitClient,
options,
publishCounters,
hotKeyLogger,
clock,
executorSupplier);
worker.addStateNameMappings(
ImmutableMap.of(DEFAULT_PARDO_USER_NAME, DEFAULT_PARDO_STATE_FAMILY));
return worker;
}
private StreamingDataflowWorker makeWorker(
List<ParallelInstruction> instructions,
StreamingDataflowWorkerOptions options,
boolean publishCounters)
throws Exception {
return makeWorker(
instructions,
options,
publishCounters,
Instant::now,
(threadName) -> Executors.newSingleThreadScheduledExecutor());
}
@Test
public void testBasicHarness() throws Exception {
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */);
worker.start();
final int numIters = 2000;
for (int i = 0; i < numIters; ++i) {
server.whenGetWorkCalled().thenReturn(makeInput(i, TimeUnit.MILLISECONDS.toMicros(i)));
}
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(numIters);
worker.stop();
for (int i = 0; i < numIters; ++i) {
assertTrue(result.containsKey((long) i));
assertEquals(
makeExpectedOutput(i, TimeUnit.MILLISECONDS.toMicros(i)).build(),
removeDynamicFields(result.get((long) i)));
}
verify(hotKeyLogger, atLeastOnce()).logHotKeyDetection(nullable(String.class), any());
}
@Test
public void testBasic() throws Exception {
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
server.setIsReady(false);
StreamingConfigTask streamingConfig = new StreamingConfigTask();
streamingConfig.setStreamingComputationConfigs(
ImmutableList.of(makeDefaultStreamingComputationConfig(instructions)));
streamingConfig.setWindmillServiceEndpoint("foo");
WorkItem workItem = new WorkItem();
workItem.setStreamingConfigTask(streamingConfig);
when(mockWorkUnitClient.getGlobalStreamingConfigWorkItem()).thenReturn(Optional.of(workItem));
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */);
worker.start();
final int numIters = 2000;
for (int i = 0; i < numIters; ++i) {
server.whenGetWorkCalled().thenReturn(makeInput(i, TimeUnit.MILLISECONDS.toMicros(i)));
}
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(numIters);
worker.stop();
for (int i = 0; i < numIters; ++i) {
assertTrue(result.containsKey((long) i));
assertEquals(
makeExpectedOutput(i, TimeUnit.MILLISECONDS.toMicros(i)).build(),
removeDynamicFields(result.get((long) i)));
}
verify(hotKeyLogger, atLeastOnce()).logHotKeyDetection(nullable(String.class), any());
}
@Test
public void testHotKeyLogging() throws Exception {
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of())),
makeSinkInstruction(KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of()), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
server.setIsReady(false);
StreamingConfigTask streamingConfig = new StreamingConfigTask();
streamingConfig.setStreamingComputationConfigs(
ImmutableList.of(makeDefaultStreamingComputationConfig(instructions)));
streamingConfig.setWindmillServiceEndpoint("foo");
WorkItem workItem = new WorkItem();
workItem.setStreamingConfigTask(streamingConfig);
when(mockWorkUnitClient.getGlobalStreamingConfigWorkItem()).thenReturn(Optional.of(workItem));
StreamingDataflowWorkerOptions options =
createTestingPipelineOptions(server, "--hotKeyLoggingEnabled=true");
StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */);
worker.start();
final int numIters = 2000;
for (int i = 0; i < numIters; ++i) {
server
.whenGetWorkCalled()
.thenReturn(makeInput(i, TimeUnit.MILLISECONDS.toMicros(i), "key", DEFAULT_SHARDING_KEY));
}
server.waitForAndGetCommits(numIters);
worker.stop();
verify(hotKeyLogger, atLeastOnce())
.logHotKeyDetection(nullable(String.class), any(), eq("key"));
}
@Test
public void testHotKeyLoggingNotEnabled() throws Exception {
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of())),
makeSinkInstruction(KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of()), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
server.setIsReady(false);
StreamingConfigTask streamingConfig = new StreamingConfigTask();
streamingConfig.setStreamingComputationConfigs(
ImmutableList.of(makeDefaultStreamingComputationConfig(instructions)));
streamingConfig.setWindmillServiceEndpoint("foo");
WorkItem workItem = new WorkItem();
workItem.setStreamingConfigTask(streamingConfig);
when(mockWorkUnitClient.getGlobalStreamingConfigWorkItem()).thenReturn(Optional.of(workItem));
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */);
worker.start();
final int numIters = 2000;
for (int i = 0; i < numIters; ++i) {
server
.whenGetWorkCalled()
.thenReturn(makeInput(i, TimeUnit.MILLISECONDS.toMicros(i), "key", DEFAULT_SHARDING_KEY));
}
server.waitForAndGetCommits(numIters);
worker.stop();
verify(hotKeyLogger, atLeastOnce()).logHotKeyDetection(nullable(String.class), any());
}
@Test
public void testIgnoreRetriedKeys() throws Exception {
final int numIters = 4;
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeDoFnInstruction(blockingFn, 0, StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */);
worker.start();
for (int i = 0; i < numIters; ++i) {
server
.whenGetWorkCalled()
.thenReturn(
makeInput(
i, TimeUnit.MILLISECONDS.toMicros(i), keyStringForIndex(i), DEFAULT_SHARDING_KEY))
.thenReturn(
makeInput(
i + 1000,
TimeUnit.MILLISECONDS.toMicros(i),
keyStringForIndex(i),
DEFAULT_SHARDING_KEY + 1));
}
BlockingFn.counter.acquire(numIters * 2);
for (int i = 0; i < numIters; ++i) {
server
.whenGetWorkCalled()
.thenReturn(makeInput(i, TimeUnit.MILLISECONDS.toMicros(i)))
.thenReturn(
makeInput(
i + 1000,
TimeUnit.MILLISECONDS.toMicros(i),
keyStringForIndex(i),
DEFAULT_SHARDING_KEY + 1));
}
server.waitForEmptyWorkQueue();
for (int i = 0; i < numIters; ++i) {
server
.whenGetWorkCalled()
.thenReturn(
makeInput(
i + numIters,
TimeUnit.MILLISECONDS.toMicros(i),
keyStringForIndex(i),
DEFAULT_SHARDING_KEY));
}
server.waitForEmptyWorkQueue();
BlockingFn.blocker.countDown();
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(numIters * 3);
for (int i = 0; i < numIters; ++i) {
assertTrue(result.containsKey((long) i));
assertEquals(
makeExpectedOutput(i, TimeUnit.MILLISECONDS.toMicros(i)).build(),
removeDynamicFields(result.get((long) i)));
assertTrue(result.containsKey((long) i + 1000));
assertEquals(
makeExpectedOutput(
i + 1000,
TimeUnit.MILLISECONDS.toMicros(i),
keyStringForIndex(i),
DEFAULT_SHARDING_KEY + 1,
keyStringForIndex(i))
.build(),
removeDynamicFields(result.get((long) i + 1000)));
assertTrue(result.containsKey((long) i + numIters));
assertEquals(
makeExpectedOutput(
i + numIters,
TimeUnit.MILLISECONDS.toMicros(i),
keyStringForIndex(i),
DEFAULT_SHARDING_KEY,
keyStringForIndex(i))
.build(),
removeDynamicFields(result.get((long) i + numIters)));
}
for (int i = 0; i < numIters; ++i) {
server
.whenGetWorkCalled()
.thenReturn(
makeInput(
i + numIters * 2,
TimeUnit.MILLISECONDS.toMicros(i),
keyStringForIndex(i),
DEFAULT_SHARDING_KEY));
}
result = server.waitForAndGetCommits(numIters);
worker.stop();
for (int i = 0; i < numIters; ++i) {
assertTrue(result.containsKey((long) i + numIters * 2));
assertEquals(
makeExpectedOutput(
i + numIters * 2,
TimeUnit.MILLISECONDS.toMicros(i),
keyStringForIndex(i),
DEFAULT_SHARDING_KEY,
keyStringForIndex(i))
.build(),
removeDynamicFields(result.get((long) i + numIters * 2)));
}
}
@Test(timeout = 10000)
public void testNumberOfWorkerHarnessThreadsIsHonored() throws Exception {
int expectedNumberOfThreads = 5;
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeDoFnInstruction(blockingFn, 0, StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
options.setNumberOfWorkerHarnessThreads(expectedNumberOfThreads);
StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */);
worker.start();
for (int i = 0; i < expectedNumberOfThreads * 2; ++i) {
server.whenGetWorkCalled().thenReturn(makeInput(i, TimeUnit.MILLISECONDS.toMicros(i)));
}
BlockingFn.counter.acquire(expectedNumberOfThreads);
if (BlockingFn.counter.tryAcquire(500, TimeUnit.MILLISECONDS)) {
fail(
"Expected number of threads "
+ expectedNumberOfThreads
+ " does not match actual "
+ "number of work items processed concurrently "
+ BlockingFn.callCounter.get()
+ ".");
}
BlockingFn.blocker.countDown();
}
@Test
public void testKeyTokenInvalidException() throws Exception {
if (streamingEngine) {
return;
}
KvCoder<String, String> kvCoder = KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of());
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(kvCoder),
makeDoFnInstruction(new KeyTokenInvalidFn(), 0, kvCoder),
makeSinkInstruction(kvCoder, 1));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
server
.whenGetWorkCalled()
.thenReturn(makeInput(0, 0, DEFAULT_KEY_STRING, DEFAULT_SHARDING_KEY));
StreamingDataflowWorker worker =
makeWorker(instructions, createTestingPipelineOptions(server), true /* publishCounters */);
worker.start();
server.waitForEmptyWorkQueue();
server
.whenGetWorkCalled()
.thenReturn(makeInput(1, 0, DEFAULT_KEY_STRING, DEFAULT_SHARDING_KEY));
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
assertEquals(
makeExpectedOutput(1, 0, DEFAULT_KEY_STRING, DEFAULT_SHARDING_KEY, DEFAULT_KEY_STRING)
.build(),
removeDynamicFields(result.get(1L)));
assertEquals(1, result.size());
}
@Test
public void testKeyCommitTooLargeException() throws Exception {
KvCoder<String, String> kvCoder = KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of());
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(kvCoder),
makeDoFnInstruction(new LargeCommitFn(), 0, kvCoder),
makeSinkInstruction(kvCoder, 1));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
server.setExpectedExceptionCount(1);
StreamingDataflowWorker worker =
makeWorker(instructions, createTestingPipelineOptions(server), true /* publishCounters */);
worker.setMaxWorkItemCommitBytes(1000);
worker.start();
server
.whenGetWorkCalled()
.thenReturn(makeInput(1, 0, "large_key", DEFAULT_SHARDING_KEY))
.thenReturn(makeInput(2, 0, "key", DEFAULT_SHARDING_KEY));
server.waitForEmptyWorkQueue();
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
assertEquals(2, result.size());
assertEquals(
makeExpectedOutput(2, 0, "key", DEFAULT_SHARDING_KEY, "key").build(),
removeDynamicFields(result.get(2L)));
assertTrue(result.containsKey(1L));
WorkItemCommitRequest largeCommit = result.get(1L);
assertEquals("large_key", largeCommit.getKey().toStringUtf8());
assertEquals(
makeExpectedTruncationRequestOutput(
1, "large_key", DEFAULT_SHARDING_KEY, largeCommit.getEstimatedWorkItemCommitBytes())
.build(),
largeCommit);
assertTrue(largeCommit.getEstimatedWorkItemCommitBytes() > 1000);
int maxTries = 10;
while (--maxTries > 0) {
worker.reportPeriodicWorkerUpdates();
Uninterruptibles.sleepUninterruptibly(1000, TimeUnit.MILLISECONDS);
}
ArgumentCaptor<WorkItemStatus> workItemStatusCaptor =
ArgumentCaptor.forClass(WorkItemStatus.class);
verify(mockWorkUnitClient, atLeast(2)).reportWorkItemStatus(workItemStatusCaptor.capture());
List<WorkItemStatus> capturedStatuses = workItemStatusCaptor.getAllValues();
boolean foundErrors = false;
for (WorkItemStatus status : capturedStatuses) {
if (!status.getErrors().isEmpty()) {
assertFalse(foundErrors);
foundErrors = true;
String errorMessage = status.getErrors().get(0).getMessage();
assertThat(errorMessage, Matchers.containsString("KeyCommitTooLargeException"));
}
}
assertTrue(foundErrors);
}
@Test
public void testKeyChange() throws Exception {
KvCoder<String, String> kvCoder = KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of());
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(kvCoder),
makeDoFnInstruction(new ChangeKeysFn(), 0, kvCoder),
makeSinkInstruction(kvCoder, 1));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
for (int i = 0; i < 2; i++) {
server
.whenGetWorkCalled()
.thenReturn(
makeInput(
i, TimeUnit.MILLISECONDS.toMicros(i), keyStringForIndex(i), DEFAULT_SHARDING_KEY))
.thenReturn(
makeInput(
i + 1000,
TimeUnit.MILLISECONDS.toMicros(i),
keyStringForIndex(i),
DEFAULT_SHARDING_KEY + i));
}
StreamingDataflowWorker worker =
makeWorker(instructions, createTestingPipelineOptions(server), true /* publishCounters */);
worker.start();
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(4);
for (int i = 0; i < 2; i++) {
assertTrue(result.containsKey((long) i));
assertEquals(
makeExpectedOutput(
i,
TimeUnit.MILLISECONDS.toMicros(i),
keyStringForIndex(i),
DEFAULT_SHARDING_KEY,
keyStringForIndex(i) + "_data" + i)
.build(),
removeDynamicFields(result.get((long) i)));
assertTrue(result.containsKey((long) i + 1000));
assertEquals(
makeExpectedOutput(
i + 1000,
TimeUnit.MILLISECONDS.toMicros(i),
keyStringForIndex(i),
DEFAULT_SHARDING_KEY + i,
keyStringForIndex(i) + "_data" + (i + 1000))
.build(),
removeDynamicFields(result.get((long) i + 1000)));
}
}
@Test(timeout = 30000)
public void testExceptions() throws Exception {
if (streamingEngine) {
return;
}
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeDoFnInstruction(new TestExceptionFn(), 0, StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 1));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
server.setExpectedExceptionCount(1);
String keyString = keyStringForIndex(0);
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \""
+ DEFAULT_COMPUTATION_ID
+ "\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \""
+ keyString
+ "\""
+ " sharding_key: 1"
+ " work_token: 0"
+ " cache_token: 1"
+ " message_bundles {"
+ " source_computation_id: \""
+ DEFAULT_SOURCE_COMPUTATION_ID
+ "\""
+ " messages {"
+ " timestamp: 0"
+ " data: \"0\""
+ " }"
+ " }"
+ " }"
+ "}",
CoderUtils.encodeToByteArray(
CollectionCoder.of(IntervalWindow.getCoder()),
Collections.singletonList(DEFAULT_WINDOW))));
StreamingDataflowWorker worker =
makeWorker(instructions, createTestingPipelineOptions(server), true /* publishCounters */);
worker.start();
server.waitForEmptyWorkQueue();
int maxTries = 10;
while (maxTries-- > 0 && !worker.workExecutorIsEmpty()) {
Uninterruptibles.sleepUninterruptibly(1000, TimeUnit.MILLISECONDS);
}
assertTrue(worker.workExecutorIsEmpty());
maxTries = 10;
while (maxTries-- > 0) {
worker.reportPeriodicWorkerUpdates();
Uninterruptibles.sleepUninterruptibly(1000, TimeUnit.MILLISECONDS);
}
ArgumentCaptor<WorkItemStatus> workItemStatusCaptor =
ArgumentCaptor.forClass(WorkItemStatus.class);
verify(mockWorkUnitClient, atLeast(1)).reportWorkItemStatus(workItemStatusCaptor.capture());
List<WorkItemStatus> capturedStatuses = workItemStatusCaptor.getAllValues();
boolean foundErrors = false;
int lastUpdateWithoutErrors = 0;
int lastUpdateWithErrors = 0;
for (WorkItemStatus status : capturedStatuses) {
if (status.getErrors().isEmpty()) {
lastUpdateWithoutErrors++;
continue;
}
lastUpdateWithErrors++;
assertFalse(foundErrors);
foundErrors = true;
String stacktrace = status.getErrors().get(0).getMessage();
assertThat(stacktrace, Matchers.containsString("Exception!"));
assertThat(stacktrace, Matchers.containsString("Another exception!"));
assertThat(stacktrace, Matchers.containsString("processElement"));
}
assertTrue(foundErrors);
assertTrue(lastUpdateWithoutErrors > lastUpdateWithErrors);
assertThat(server.getStatsReceived().size(), Matchers.greaterThanOrEqualTo(1));
Windmill.ReportStatsRequest stats = server.getStatsReceived().get(0);
assertEquals(DEFAULT_COMPUTATION_ID, stats.getComputationId());
assertEquals(keyString, stats.getKey().toStringUtf8());
assertEquals(0, stats.getWorkToken());
assertEquals(1, stats.getShardingKey());
}
@Test
public void testAssignWindows() throws Exception {
Duration gapDuration = Duration.standardSeconds(1);
CloudObject spec = CloudObject.forClassName("AssignWindowsDoFn");
SdkComponents sdkComponents = SdkComponents.create();
sdkComponents.registerEnvironment(Environments.JAVA_SDK_HARNESS_ENVIRONMENT);
addString(
spec,
PropertyNames.SERIALIZED_FN,
StringUtils.byteArrayToJsonString(
WindowingStrategyTranslation.toMessageProto(
WindowingStrategy.of(FixedWindows.of(gapDuration)), sdkComponents)
.toByteArray()));
ParallelInstruction addWindowsInstruction =
new ParallelInstruction()
.setSystemName("AssignWindows")
.setName("AssignWindows")
.setOriginalName("AssignWindowsOriginal")
.setParDo(
new ParDoInstruction()
.setInput(new InstructionInput().setProducerInstructionIndex(0).setOutputNum(0))
.setNumOutputs(1)
.setUserFn(spec))
.setOutputs(
Collections.singletonList(
new InstructionOutput()
.setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME)
.setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME)
.setName("output")
.setCodec(
CloudObjects.asCloudObject(
WindowedValue.getFullCoder(
StringUtf8Coder.of(), IntervalWindow.getCoder()),
/* sdkComponents= */ null))));
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
addWindowsInstruction,
makeSinkInstruction(StringUtf8Coder.of(), 1));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
int timestamp1 = 0;
int timestamp2 = 1000000;
server
.whenGetWorkCalled()
.thenReturn(makeInput(timestamp1, timestamp1))
.thenReturn(makeInput(timestamp2, timestamp2));
StreamingDataflowWorker worker =
makeWorker(instructions, createTestingPipelineOptions(server), false /* publishCounters */);
worker.start();
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(2);
assertThat(
removeDynamicFields(result.get((long) timestamp1)),
equalTo(
setMessagesMetadata(
PaneInfo.NO_FIRING,
intervalWindowBytes(WINDOW_AT_ZERO),
makeExpectedOutput(timestamp1, timestamp1))
.build()));
assertThat(
removeDynamicFields(result.get((long) timestamp2)),
equalTo(
setMessagesMetadata(
PaneInfo.NO_FIRING,
intervalWindowBytes(WINDOW_AT_ONE_SECOND),
makeExpectedOutput(timestamp2, timestamp2))
.build()));
}
private void verifyTimers(WorkItemCommitRequest commit, Timer... timers) {
assertThat(commit.getOutputTimersList(), Matchers.containsInAnyOrder(timers));
}
private void verifyHolds(WorkItemCommitRequest commit, WatermarkHold... watermarkHolds) {
assertThat(commit.getWatermarkHoldsList(), Matchers.containsInAnyOrder(watermarkHolds));
}
private Timer buildWatermarkTimer(String tagPrefix, long timestampMillis) {
return buildWatermarkTimer(tagPrefix, timestampMillis, false);
}
private Timer buildWatermarkTimer(String tagPrefix, long timestampMillis, boolean delete) {
Timer.Builder builder =
Timer.newBuilder()
.setTag(ByteString.copyFromUtf8(tagPrefix + ":" + timestampMillis))
.setType(Type.WATERMARK)
.setStateFamily("MergeWindows");
if (!delete) {
builder.setTimestamp(timestampMillis * 1000);
builder.setMetadataTimestamp(timestampMillis * 1000);
}
return builder.build();
}
private WatermarkHold buildHold(String tag, long timestamp, boolean reset) {
WatermarkHold.Builder builder =
WatermarkHold.newBuilder()
.setTag(ByteString.copyFromUtf8(tag))
.setStateFamily("MergeWindows");
if (reset) {
builder.setReset(true);
}
if (timestamp >= 0) {
builder.addTimestamps(timestamp * 1000);
}
return builder.build();
}
@Test
public void testMergeWindows() throws Exception {
Coder<KV<String, String>> kvCoder = KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of());
Coder<WindowedValue<KV<String, String>>> windowedKvCoder =
FullWindowedValueCoder.of(kvCoder, IntervalWindow.getCoder());
KvCoder<String, List<String>> groupedCoder =
KvCoder.of(StringUtf8Coder.of(), ListCoder.of(StringUtf8Coder.of()));
Coder<WindowedValue<KV<String, List<String>>>> windowedGroupedCoder =
FullWindowedValueCoder.of(groupedCoder, IntervalWindow.getCoder());
CloudObject spec = CloudObject.forClassName("MergeWindowsDoFn");
SdkComponents sdkComponents = SdkComponents.create();
sdkComponents.registerEnvironment(Environments.JAVA_SDK_HARNESS_ENVIRONMENT);
addString(
spec,
PropertyNames.SERIALIZED_FN,
StringUtils.byteArrayToJsonString(
WindowingStrategyTranslation.toMessageProto(
WindowingStrategy.of(FixedWindows.of(Duration.standardSeconds(1)))
.withTimestampCombiner(TimestampCombiner.EARLIEST),
sdkComponents)
.toByteArray()));
addObject(
spec,
WorkerPropertyNames.INPUT_CODER,
CloudObjects.asCloudObject(windowedKvCoder, /* sdkComponents= */ null));
ParallelInstruction mergeWindowsInstruction =
new ParallelInstruction()
.setSystemName("MergeWindows-System")
.setName("MergeWindowsStep")
.setOriginalName("MergeWindowsOriginal")
.setParDo(
new ParDoInstruction()
.setInput(new InstructionInput().setProducerInstructionIndex(0).setOutputNum(0))
.setNumOutputs(1)
.setUserFn(spec))
.setOutputs(
Collections.singletonList(
new InstructionOutput()
.setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME)
.setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME)
.setName("output")
.setCodec(
CloudObjects.asCloudObject(
windowedGroupedCoder, /* sdkComponents= */ null))));
List<ParallelInstruction> instructions =
Arrays.asList(
makeWindowingSourceInstruction(kvCoder),
mergeWindowsInstruction,
makeSinkInstruction(groupedCoder, 1));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorker worker =
makeWorker(instructions, createTestingPipelineOptions(server), false /* publishCounters */);
Map<String, String> nameMap = new HashMap<>();
nameMap.put("MergeWindowsStep", "MergeWindows");
worker.addStateNameMappings(nameMap);
worker.start();
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \""
+ DEFAULT_COMPUTATION_ID
+ "\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \""
+ DEFAULT_KEY_STRING
+ "\""
+ " sharding_key: "
+ DEFAULT_SHARDING_KEY
+ " cache_token: 1"
+ " work_token: 1"
+ " message_bundles {"
+ " source_computation_id: \""
+ DEFAULT_SOURCE_COMPUTATION_ID
+ "\""
+ " messages {"
+ " timestamp: 0"
+ " data: \""
+ dataStringForIndex(0)
+ "\""
+ " }"
+ " }"
+ " }"
+ "}",
intervalWindowBytes(WINDOW_AT_ZERO)));
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
Iterable<CounterUpdate> counters = worker.buildCounters();
String window = "/gAAAAAAAA-joBw/";
String timerTagPrefix = "/s" + window + "+0";
ByteString bufferTag = ByteString.copyFromUtf8(window + "+ubuf");
ByteString paneInfoTag = ByteString.copyFromUtf8(window + "+upane");
String watermarkDataHoldTag = window + "+uhold";
String watermarkExtraHoldTag = window + "+uextra";
String stateFamily = "MergeWindows";
ByteString bufferData = ByteString.copyFromUtf8("data0");
ByteString outputData =
ByteString.copyFrom(
new byte[] {
(byte) 0xff,
(byte) 0xff,
(byte) 0xff,
(byte) 0xff,
0x01,
0x05,
0x64,
0x61,
0x74,
0x61,
0x30,
0x00
});
long timerTimestamp = 999000L;
WorkItemCommitRequest actualOutput = result.get(1L);
verifyTimers(actualOutput, buildWatermarkTimer(timerTagPrefix, 999));
assertThat(
actualOutput.getBagUpdatesList(),
Matchers.contains(
Matchers.equalTo(
Windmill.TagBag.newBuilder()
.setTag(bufferTag)
.setStateFamily(stateFamily)
.addValues(bufferData)
.build())));
verifyHolds(actualOutput, buildHold(watermarkDataHoldTag, 0, false));
assertEquals(0L, splitIntToLong(getCounter(counters, "WindmillStateBytesRead").getInteger()));
assertEquals(
Windmill.WorkItemCommitRequest.newBuilder(actualOutput)
.clearCounterUpdates()
.clearOutputMessages()
.clearPerWorkItemLatencyAttributions()
.build()
.getSerializedSize(),
splitIntToLong(getCounter(counters, "WindmillStateBytesWritten").getInteger()));
assertEquals(
VarInt.getLength(0L)
+ dataStringForIndex(0).length()
+ addPaneTag(PaneInfo.NO_FIRING, intervalWindowBytes(WINDOW_AT_ZERO)).size()
+ 5L
,
splitIntToLong(getCounter(counters, "WindmillShuffleBytesRead").getInteger()));
Windmill.GetWorkResponse.Builder getWorkResponse = Windmill.GetWorkResponse.newBuilder();
getWorkResponse
.addWorkBuilder()
.setComputationId(DEFAULT_COMPUTATION_ID)
.setInputDataWatermark(timerTimestamp + 1000)
.addWorkBuilder()
.setKey(ByteString.copyFromUtf8(DEFAULT_KEY_STRING))
.setShardingKey(DEFAULT_SHARDING_KEY)
.setWorkToken(2)
.setCacheToken(1)
.getTimersBuilder()
.addTimers(buildWatermarkTimer(timerTagPrefix, timerTimestamp));
server.whenGetWorkCalled().thenReturn(getWorkResponse.build());
long expectedBytesRead = 0L;
Windmill.GetDataResponse.Builder dataResponse = Windmill.GetDataResponse.newBuilder();
Windmill.KeyedGetDataResponse.Builder dataBuilder =
dataResponse
.addDataBuilder()
.setComputationId(DEFAULT_COMPUTATION_ID)
.addDataBuilder()
.setKey(ByteString.copyFromUtf8(DEFAULT_KEY_STRING))
.setShardingKey(DEFAULT_SHARDING_KEY);
dataBuilder
.addBagsBuilder()
.setTag(bufferTag)
.setStateFamily(stateFamily)
.addValues(bufferData);
dataBuilder
.addWatermarkHoldsBuilder()
.setTag(ByteString.copyFromUtf8(watermarkDataHoldTag))
.setStateFamily(stateFamily)
.addTimestamps(0);
dataBuilder
.addWatermarkHoldsBuilder()
.setTag(ByteString.copyFromUtf8(watermarkExtraHoldTag))
.setStateFamily(stateFamily)
.addTimestamps(0);
dataBuilder
.addValuesBuilder()
.setTag(paneInfoTag)
.setStateFamily(stateFamily)
.getValueBuilder()
.setTimestamp(0)
.setData(ByteString.EMPTY);
server.whenGetDataCalled().thenReturn(dataResponse.build());
expectedBytesRead += dataBuilder.build().getSerializedSize();
result = server.waitForAndGetCommits(1);
counters = worker.buildCounters();
actualOutput = result.get(2L);
assertEquals(1, actualOutput.getOutputMessagesCount());
assertEquals(
DEFAULT_DESTINATION_STREAM_ID, actualOutput.getOutputMessages(0).getDestinationStreamId());
assertEquals(
DEFAULT_KEY_STRING,
actualOutput.getOutputMessages(0).getBundles(0).getKey().toStringUtf8());
assertEquals(0, actualOutput.getOutputMessages(0).getBundles(0).getMessages(0).getTimestamp());
assertEquals(
outputData, actualOutput.getOutputMessages(0).getBundles(0).getMessages(0).getData());
ByteString metadata =
actualOutput.getOutputMessages(0).getBundles(0).getMessages(0).getMetadata();
InputStream inStream = metadata.newInput();
assertEquals(
PaneInfo.createPane(true, true, Timing.ON_TIME), PaneInfoCoder.INSTANCE.decode(inStream));
assertEquals(
Collections.singletonList(WINDOW_AT_ZERO),
DEFAULT_WINDOW_COLLECTION_CODER.decode(inStream, Coder.Context.OUTER));
assertThat(
"" + actualOutput.getValueUpdatesList(),
actualOutput.getValueUpdatesList(),
Matchers.contains(
Matchers.equalTo(
Windmill.TagValue.newBuilder()
.setTag(paneInfoTag)
.setStateFamily(stateFamily)
.setValue(
Windmill.Value.newBuilder()
.setTimestamp(Long.MAX_VALUE)
.setData(ByteString.EMPTY))
.build())));
assertThat(
"" + actualOutput.getBagUpdatesList(),
actualOutput.getBagUpdatesList(),
Matchers.contains(
Matchers.equalTo(
Windmill.TagBag.newBuilder()
.setTag(bufferTag)
.setStateFamily(stateFamily)
.setDeleteAll(true)
.build())));
verifyHolds(
actualOutput,
buildHold(watermarkDataHoldTag, -1, true),
buildHold(watermarkExtraHoldTag, -1, true));
assertEquals(
expectedBytesRead,
splitIntToLong(getCounter(counters, "WindmillStateBytesRead").getInteger()));
assertEquals(
Windmill.WorkItemCommitRequest.newBuilder(removeDynamicFields(actualOutput))
.clearCounterUpdates()
.clearOutputMessages()
.build()
.getSerializedSize(),
splitIntToLong(getCounter(counters, "WindmillStateBytesWritten").getInteger()));
assertEquals(0L, splitIntToLong(getCounter(counters, "WindmillShuffleBytesRead").getInteger()));
}
@Test
public void testMergeWindowsCaching() throws Exception {
Coder<KV<String, String>> kvCoder = KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of());
Coder<WindowedValue<KV<String, String>>> windowedKvCoder =
FullWindowedValueCoder.of(kvCoder, IntervalWindow.getCoder());
KvCoder<String, List<String>> groupedCoder =
KvCoder.of(StringUtf8Coder.of(), ListCoder.of(StringUtf8Coder.of()));
Coder<WindowedValue<KV<String, List<String>>>> windowedGroupedCoder =
FullWindowedValueCoder.of(groupedCoder, IntervalWindow.getCoder());
CloudObject spec = CloudObject.forClassName("MergeWindowsDoFn");
SdkComponents sdkComponents = SdkComponents.create();
sdkComponents.registerEnvironment(Environments.JAVA_SDK_HARNESS_ENVIRONMENT);
addString(
spec,
PropertyNames.SERIALIZED_FN,
StringUtils.byteArrayToJsonString(
WindowingStrategyTranslation.toMessageProto(
WindowingStrategy.of(FixedWindows.of(Duration.standardSeconds(1)))
.withTimestampCombiner(TimestampCombiner.EARLIEST),
sdkComponents)
.toByteArray()));
addObject(
spec,
WorkerPropertyNames.INPUT_CODER,
CloudObjects.asCloudObject(windowedKvCoder, /* sdkComponents= */ null));
ParallelInstruction mergeWindowsInstruction =
new ParallelInstruction()
.setSystemName("MergeWindows-System")
.setName("MergeWindowsStep")
.setOriginalName("MergeWindowsOriginal")
.setParDo(
new ParDoInstruction()
.setInput(new InstructionInput().setProducerInstructionIndex(0).setOutputNum(0))
.setNumOutputs(1)
.setUserFn(spec))
.setOutputs(
Collections.singletonList(
new InstructionOutput()
.setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME)
.setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME)
.setName("output")
.setCodec(
CloudObjects.asCloudObject(
windowedGroupedCoder, /* sdkComponents= */ null))));
List<ParallelInstruction> instructions =
Arrays.asList(
makeWindowingSourceInstruction(kvCoder),
mergeWindowsInstruction,
makeDoFnInstruction(new PassthroughDoFn(), 1, groupedCoder),
makeSinkInstruction(groupedCoder, 2));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorker worker =
makeWorker(instructions, createTestingPipelineOptions(server), false /* publishCounters */);
Map<String, String> nameMap = new HashMap<>();
nameMap.put("MergeWindowsStep", "MergeWindows");
worker.addStateNameMappings(nameMap);
worker.start();
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \""
+ DEFAULT_COMPUTATION_ID
+ "\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \""
+ DEFAULT_KEY_STRING
+ "\""
+ " sharding_key: "
+ DEFAULT_SHARDING_KEY
+ " cache_token: 1"
+ " work_token: 1"
+ " is_new_key: 1"
+ " message_bundles {"
+ " source_computation_id: \""
+ DEFAULT_SOURCE_COMPUTATION_ID
+ "\""
+ " messages {"
+ " timestamp: 0"
+ " data: \""
+ dataStringForIndex(0)
+ "\""
+ " }"
+ " }"
+ " }"
+ "}",
intervalWindowBytes(WINDOW_AT_ZERO)));
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
Iterable<CounterUpdate> counters = worker.buildCounters();
String window = "/gAAAAAAAA-joBw/";
String timerTagPrefix = "/s" + window + "+0";
ByteString bufferTag = ByteString.copyFromUtf8(window + "+ubuf");
ByteString paneInfoTag = ByteString.copyFromUtf8(window + "+upane");
String watermarkDataHoldTag = window + "+uhold";
String watermarkExtraHoldTag = window + "+uextra";
String stateFamily = "MergeWindows";
ByteString bufferData = ByteString.copyFromUtf8("data0");
ByteString outputData =
ByteString.copyFrom(
new byte[] {
(byte) 0xff,
(byte) 0xff,
(byte) 0xff,
(byte) 0xff,
0x01,
0x05,
0x64,
0x61,
0x74,
0x61,
0x30,
0x00
});
long timerTimestamp = 999000L;
WorkItemCommitRequest actualOutput = result.get(1L);
verifyTimers(actualOutput, buildWatermarkTimer(timerTagPrefix, 999));
assertThat(
actualOutput.getBagUpdatesList(),
Matchers.contains(
Matchers.equalTo(
Windmill.TagBag.newBuilder()
.setTag(bufferTag)
.setStateFamily(stateFamily)
.addValues(bufferData)
.build())));
verifyHolds(actualOutput, buildHold(watermarkDataHoldTag, 0, false));
assertEquals(0L, splitIntToLong(getCounter(counters, "WindmillStateBytesRead").getInteger()));
assertEquals(
Windmill.WorkItemCommitRequest.newBuilder(removeDynamicFields(actualOutput))
.clearCounterUpdates()
.clearOutputMessages()
.build()
.getSerializedSize(),
splitIntToLong(getCounter(counters, "WindmillStateBytesWritten").getInteger()));
assertEquals(
VarInt.getLength(0L)
+ dataStringForIndex(0).length()
+ addPaneTag(PaneInfo.NO_FIRING, intervalWindowBytes(WINDOW_AT_ZERO)).size()
+ 5L
,
splitIntToLong(getCounter(counters, "WindmillShuffleBytesRead").getInteger()));
Windmill.GetWorkResponse.Builder getWorkResponse = Windmill.GetWorkResponse.newBuilder();
getWorkResponse
.addWorkBuilder()
.setComputationId(DEFAULT_COMPUTATION_ID)
.setInputDataWatermark(timerTimestamp + 1000)
.addWorkBuilder()
.setKey(ByteString.copyFromUtf8(DEFAULT_KEY_STRING))
.setShardingKey(DEFAULT_SHARDING_KEY)
.setWorkToken(2)
.setCacheToken(1)
.getTimersBuilder()
.addTimers(buildWatermarkTimer(timerTagPrefix, timerTimestamp));
server.whenGetWorkCalled().thenReturn(getWorkResponse.build());
long expectedBytesRead = 0L;
Windmill.GetDataResponse.Builder dataResponse = Windmill.GetDataResponse.newBuilder();
Windmill.KeyedGetDataResponse.Builder dataBuilder =
dataResponse
.addDataBuilder()
.setComputationId(DEFAULT_COMPUTATION_ID)
.addDataBuilder()
.setKey(ByteString.copyFromUtf8(DEFAULT_KEY_STRING))
.setShardingKey(DEFAULT_SHARDING_KEY);
dataBuilder
.addWatermarkHoldsBuilder()
.setTag(ByteString.copyFromUtf8(watermarkExtraHoldTag))
.setStateFamily(stateFamily)
.addTimestamps(0);
dataBuilder
.addValuesBuilder()
.setTag(paneInfoTag)
.setStateFamily(stateFamily)
.getValueBuilder()
.setTimestamp(0)
.setData(ByteString.EMPTY);
server.whenGetDataCalled().thenReturn(dataResponse.build());
expectedBytesRead += dataBuilder.build().getSerializedSize();
result = server.waitForAndGetCommits(1);
counters = worker.buildCounters();
actualOutput = result.get(2L);
assertEquals(1, actualOutput.getOutputMessagesCount());
assertEquals(
DEFAULT_DESTINATION_STREAM_ID, actualOutput.getOutputMessages(0).getDestinationStreamId());
assertEquals(
DEFAULT_KEY_STRING,
actualOutput.getOutputMessages(0).getBundles(0).getKey().toStringUtf8());
assertEquals(0, actualOutput.getOutputMessages(0).getBundles(0).getMessages(0).getTimestamp());
assertEquals(
outputData, actualOutput.getOutputMessages(0).getBundles(0).getMessages(0).getData());
ByteString metadata =
actualOutput.getOutputMessages(0).getBundles(0).getMessages(0).getMetadata();
InputStream inStream = metadata.newInput();
assertEquals(
PaneInfo.createPane(true, true, Timing.ON_TIME), PaneInfoCoder.INSTANCE.decode(inStream));
assertEquals(
Collections.singletonList(WINDOW_AT_ZERO),
DEFAULT_WINDOW_COLLECTION_CODER.decode(inStream, Coder.Context.OUTER));
assertThat(
"" + actualOutput.getValueUpdatesList(),
actualOutput.getValueUpdatesList(),
Matchers.contains(
Matchers.equalTo(
Windmill.TagValue.newBuilder()
.setTag(paneInfoTag)
.setStateFamily(stateFamily)
.setValue(
Windmill.Value.newBuilder()
.setTimestamp(Long.MAX_VALUE)
.setData(ByteString.EMPTY))
.build())));
assertThat(
"" + actualOutput.getBagUpdatesList(),
actualOutput.getBagUpdatesList(),
Matchers.contains(
Matchers.equalTo(
Windmill.TagBag.newBuilder()
.setTag(bufferTag)
.setStateFamily(stateFamily)
.setDeleteAll(true)
.build())));
verifyHolds(
actualOutput,
buildHold(watermarkDataHoldTag, -1, true),
buildHold(watermarkExtraHoldTag, -1, true));
assertEquals(
expectedBytesRead,
splitIntToLong(getCounter(counters, "WindmillStateBytesRead").getInteger()));
assertEquals(
Windmill.WorkItemCommitRequest.newBuilder(removeDynamicFields(actualOutput))
.clearCounterUpdates()
.clearOutputMessages()
.build()
.getSerializedSize(),
splitIntToLong(getCounter(counters, "WindmillStateBytesWritten").getInteger()));
assertEquals(0L, splitIntToLong(getCounter(counters, "WindmillShuffleBytesRead").getInteger()));
CacheStats stats = worker.stateCache.getCacheStats();
LOG.info("cache stats {}", stats);
assertEquals(1, stats.hitCount());
assertEquals(4, stats.missCount());
}
private void runMergeSessionsActions(List<Action> actions) throws Exception {
Coder<KV<String, String>> kvCoder = KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of());
Coder<WindowedValue<KV<String, String>>> windowedKvCoder =
FullWindowedValueCoder.of(kvCoder, IntervalWindow.getCoder());
KvCoder<String, List<String>> groupedCoder =
KvCoder.of(StringUtf8Coder.of(), ListCoder.of(StringUtf8Coder.of()));
Coder<WindowedValue<KV<String, List<String>>>> windowedGroupedCoder =
FullWindowedValueCoder.of(groupedCoder, IntervalWindow.getCoder());
CloudObject spec = CloudObject.forClassName("MergeWindowsDoFn");
SdkComponents sdkComponents = SdkComponents.create();
sdkComponents.registerEnvironment(Environments.JAVA_SDK_HARNESS_ENVIRONMENT);
addString(
spec,
PropertyNames.SERIALIZED_FN,
StringUtils.byteArrayToJsonString(
WindowingStrategyTranslation.toMessageProto(
WindowingStrategy.of(Sessions.withGapDuration(Duration.millis(10)))
.withMode(AccumulationMode.DISCARDING_FIRED_PANES)
.withTrigger(
Repeatedly.forever(
AfterWatermark.pastEndOfWindow()
.withLateFirings(AfterPane.elementCountAtLeast(1))))
.withAllowedLateness(Duration.standardMinutes(60)),
sdkComponents)
.toByteArray()));
addObject(
spec,
WorkerPropertyNames.INPUT_CODER,
CloudObjects.asCloudObject(windowedKvCoder, /* sdkComponents= */ null));
ParallelInstruction mergeWindowsInstruction =
new ParallelInstruction()
.setSystemName("MergeWindows-System")
.setName("MergeWindowsStep")
.setOriginalName("MergeWindowsOriginal")
.setParDo(
new ParDoInstruction()
.setInput(new InstructionInput().setProducerInstructionIndex(0).setOutputNum(0))
.setNumOutputs(1)
.setUserFn(spec))
.setOutputs(
Collections.singletonList(
new InstructionOutput()
.setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME)
.setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME)
.setName("output")
.setCodec(
CloudObjects.asCloudObject(
windowedGroupedCoder, /* sdkComponents= */ null))));
List<ParallelInstruction> instructions =
Arrays.asList(
makeWindowingSourceInstruction(kvCoder),
mergeWindowsInstruction,
makeSinkInstruction(groupedCoder, 1));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorker worker =
makeWorker(instructions, createTestingPipelineOptions(server), false /* publishCounters */);
Map<String, String> nameMap = new HashMap<>();
nameMap.put("MergeWindowsStep", "MergeWindows");
worker.addStateNameMappings(nameMap);
worker.start();
server.whenGetDataCalled().answerByDefault(EMPTY_DATA_RESPONDER);
for (int i = 0; i < actions.size(); ++i) {
Action action = actions.get(i);
server.whenGetWorkCalled().thenReturn(action.response);
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
WorkItemCommitRequest actualOutput = result.get(i + 1L);
assertThat(actualOutput, Matchers.not(Matchers.nullValue()));
verifyTimers(actualOutput, action.expectedTimers);
verifyHolds(actualOutput, action.expectedHolds);
}
}
@Test
public void testMergeSessionWindows() throws Exception {
runMergeSessionsActions(
Collections.singletonList(
new Action(
buildSessionInput(
1, 40, 0, Collections.singletonList(1L), Collections.EMPTY_LIST))
.withHolds(
buildHold("/gAAAAAAAAAsK/+uhold", -1, true),
buildHold("/gAAAAAAAAAsK/+uextra", -1, true))
.withTimers(buildWatermarkTimer("/s/gAAAAAAAAAsK/+0", 3600010))));
runMergeSessionsActions(
Arrays.asList(
new Action(
buildSessionInput(
1, 0, 0, Collections.singletonList(1L), Collections.EMPTY_LIST))
.withHolds(buildHold("/gAAAAAAAAAsK/+uhold", 10, false))
.withTimers(
buildWatermarkTimer("/s/gAAAAAAAAAsK/+0", 10),
buildWatermarkTimer("/s/gAAAAAAAAAsK/+0", 3600010)),
new Action(
buildSessionInput(
2,
30,
0,
Collections.EMPTY_LIST,
Collections.singletonList(buildWatermarkTimer("/s/gAAAAAAAAAsK/+0", 10))))
.withTimers(buildWatermarkTimer("/s/gAAAAAAAAAsK/+0", 3600010))
.withHolds(
buildHold("/gAAAAAAAAAsK/+uhold", -1, true),
buildHold("/gAAAAAAAAAsK/+uextra", -1, true)),
new Action(
buildSessionInput(
3, 30, 0, Collections.singletonList(8L), Collections.EMPTY_LIST))
.withTimers(
buildWatermarkTimer("/s/gAAAAAAAABIR/+0", 3600017),
buildWatermarkTimer("/s/gAAAAAAAAAsK/+0", 10, true),
buildWatermarkTimer("/s/gAAAAAAAAAsK/+0", 3600010, true))
.withHolds(
buildHold("/gAAAAAAAAAsK/+uhold", -1, true),
buildHold("/gAAAAAAAAAsK/+uextra", -1, true)),
new Action(
buildSessionInput(
4, 30, 0, Collections.singletonList(31L), Collections.EMPTY_LIST))
.withTimers(
buildWatermarkTimer("/s/gAAAAAAAACkK/+0", 3600040),
buildWatermarkTimer("/s/gAAAAAAAACkK/+0", 40))
.withHolds(buildHold("/gAAAAAAAACkK/+uhold", 40, false)),
new Action(buildSessionInput(5, 30, 0, Arrays.asList(17L, 23L), Collections.EMPTY_LIST))
.withTimers(
buildWatermarkTimer("/s/gAAAAAAAACkK/+0", 3600040, true),
buildWatermarkTimer("/s/gAAAAAAAACkK/+0", 40, true),
buildWatermarkTimer("/s/gAAAAAAAABIR/+0", 3600017, true),
buildWatermarkTimer("/s/gAAAAAAAABIR/+0", 17, true),
buildWatermarkTimer("/s/gAAAAAAAACko/+0", 40),
buildWatermarkTimer("/s/gAAAAAAAACko/+0", 3600040))
.withHolds(
buildHold("/gAAAAAAAACkK/+uhold", -1, true),
buildHold("/gAAAAAAAACkK/+uextra", -1, true),
buildHold("/gAAAAAAAAAsK/+uhold", 40, true),
buildHold("/gAAAAAAAAAsK/+uextra", 3600040, true)),
new Action(
buildSessionInput(
6,
50,
0,
Collections.EMPTY_LIST,
Collections.singletonList(buildWatermarkTimer("/s/gAAAAAAAACko/+0", 40))))
.withTimers(buildWatermarkTimer("/s/gAAAAAAAACko/+0", 3600040))
.withHolds(
buildHold("/gAAAAAAAAAsK/+uhold", -1, true),
buildHold("/gAAAAAAAAAsK/+uextra", -1, true))));
}
private List<ParallelInstruction> makeUnboundedSourcePipeline() throws Exception {
return makeUnboundedSourcePipeline(1, new PrintFn());
}
private List<ParallelInstruction> makeUnboundedSourcePipeline(
int numMessagesPerShard,
DoFn<ValueWithRecordId<KV<Integer, Integer>>, String> doFn)
throws Exception {
DataflowPipelineOptions options =
PipelineOptionsFactory.create().as(DataflowPipelineOptions.class);
options.setNumWorkers(1);
CloudObject codec =
CloudObjects.asCloudObject(
WindowedValue.getFullCoder(
ValueWithRecordId.ValueWithRecordIdCoder.of(
KvCoder.of(VarIntCoder.of(), VarIntCoder.of())),
GlobalWindow.Coder.INSTANCE),
/* sdkComponents= */ null);
return Arrays.asList(
new ParallelInstruction()
.setSystemName("Read")
.setOriginalName("OriginalReadName")
.setRead(
new ReadInstruction()
.setSource(
CustomSources.serializeToCloudSource(
new TestCountingSource(numMessagesPerShard), options)
.setCodec(codec)))
.setOutputs(
Collections.singletonList(
new InstructionOutput()
.setName("read_output")
.setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME)
.setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME)
.setCodec(codec))),
makeDoFnInstruction(doFn, 0, StringUtf8Coder.of(), WindowingStrategy.globalDefault()),
makeSinkInstruction(StringUtf8Coder.of(), 1, GlobalWindow.Coder.INSTANCE));
}
@Test
public void testUnboundedSources() throws Exception {
List<Integer> finalizeTracker = Lists.newArrayList();
TestCountingSource.setFinalizeTracker(finalizeTracker);
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorker worker =
makeWorker(
makeUnboundedSourcePipeline(),
createTestingPipelineOptions(server),
false /* publishCounters */);
worker.start();
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \"computation\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \"0000000000000001\""
+ " sharding_key: 1"
+ " work_token: 1"
+ " cache_token: 1"
+ " }"
+ "}",
null));
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
Iterable<CounterUpdate> counters = worker.buildCounters();
Windmill.WorkItemCommitRequest commit = result.get(1L);
UnsignedLong finalizeId =
UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0));
assertThat(
removeDynamicFields(commit),
equalTo(
setMessagesMetadata(
PaneInfo.NO_FIRING,
CoderUtils.encodeToByteArray(
CollectionCoder.of(GlobalWindow.Coder.INSTANCE),
Collections.singletonList(GlobalWindow.INSTANCE)),
parseCommitRequest(
"key: \"0000000000000001\" "
+ "sharding_key: 1 "
+ "work_token: 1 "
+ "cache_token: 1 "
+ "source_backlog_bytes: 7 "
+ "source_bytes_processed: 18 "
+ "output_messages {"
+ " destination_stream_id: \"out\""
+ " bundles {"
+ " key: \"0000000000000001\""
+ " messages {"
+ " timestamp: 0"
+ " data: \"0:0\""
+ " }"
+ " messages_ids: \"\""
+ " }"
+ "} "
+ "source_state_updates {"
+ " state: \"\000\""
+ " finalize_ids: "
+ finalizeId
+ "} "
+ "source_watermark: 1000"))
.build()));
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \"computation\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \"0000000000000001\""
+ " sharding_key: 1"
+ " work_token: 2"
+ " cache_token: 1"
+ " source_state {"
+ " state: \"\001\""
+ " finalize_ids: "
+ finalizeId
+ " } "
+ " }"
+ "}",
null));
result = server.waitForAndGetCommits(1);
counters = worker.buildCounters();
commit = result.get(2L);
finalizeId = UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0));
assertThat(
removeDynamicFields(commit),
equalTo(
parseCommitRequest(
"key: \"0000000000000001\" "
+ "sharding_key: 1 "
+ "work_token: 2 "
+ "cache_token: 1 "
+ "source_backlog_bytes: 7 "
+ "source_bytes_processed: 0 "
+ "source_state_updates {"
+ " state: \"\000\""
+ " finalize_ids: "
+ finalizeId
+ "} "
+ "source_watermark: 1000")
.build()));
assertThat(finalizeTracker, contains(0));
assertNull(getCounter(counters, "dataflow_input_size-computation"));
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \"computation\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \"0000000000000002\""
+ " sharding_key: 2"
+ " work_token: 3"
+ " cache_token: 2"
+ " source_state {"
+ " state: \"\000\""
+ " } "
+ " }"
+ "}",
null));
result = server.waitForAndGetCommits(1);
counters = worker.buildCounters();
commit = result.get(3L);
finalizeId = UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0));
assertThat(
removeDynamicFields(commit),
equalTo(
parseCommitRequest(
"key: \"0000000000000002\" "
+ "sharding_key: 2 "
+ "work_token: 3 "
+ "cache_token: 2 "
+ "source_backlog_bytes: 7 "
+ "source_bytes_processed: 0 "
+ "source_state_updates {"
+ " state: \"\000\""
+ " finalize_ids: "
+ finalizeId
+ "} "
+ "source_watermark: 1000")
.build()));
assertNull(getCounter(counters, "dataflow_input_size-computation"));
}
@Test
public void testUnboundedSourcesDrain() throws Exception {
List<Integer> finalizeTracker = Lists.newArrayList();
TestCountingSource.setFinalizeTracker(finalizeTracker);
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorker worker =
makeWorker(
makeUnboundedSourcePipeline(),
createTestingPipelineOptions(server),
true /* publishCounters */);
worker.start();
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \"computation\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \"0000000000000001\""
+ " sharding_key: 1"
+ " work_token: 2"
+ " cache_token: 3"
+ " }"
+ "}",
null));
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
Windmill.WorkItemCommitRequest commit = result.get(2L);
UnsignedLong finalizeId =
UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0));
assertThat(
removeDynamicFields(commit),
equalTo(
setMessagesMetadata(
PaneInfo.NO_FIRING,
CoderUtils.encodeToByteArray(
CollectionCoder.of(GlobalWindow.Coder.INSTANCE),
Collections.singletonList(GlobalWindow.INSTANCE)),
parseCommitRequest(
"key: \"0000000000000001\" "
+ "sharding_key: 1 "
+ "work_token: 2 "
+ "cache_token: 3 "
+ "source_backlog_bytes: 7 "
+ "source_bytes_processed: 18 "
+ "output_messages {"
+ " destination_stream_id: \"out\""
+ " bundles {"
+ " key: \"0000000000000001\""
+ " messages {"
+ " timestamp: 0"
+ " data: \"0:0\""
+ " }"
+ " messages_ids: \"\""
+ " }"
+ "} "
+ "source_state_updates {"
+ " state: \"\000\""
+ " finalize_ids: "
+ finalizeId
+ "} "
+ "source_watermark: 1000"))
.build()));
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \"computation\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \"0000000000000001\""
+ " sharding_key: 1"
+ " work_token: 3"
+ " cache_token: 3"
+ " source_state {"
+ " only_finalize: true"
+ " finalize_ids: "
+ finalizeId
+ " }"
+ " }"
+ "}",
null));
result = server.waitForAndGetCommits(1);
commit = result.get(3L);
assertThat(
commit,
equalTo(
parseCommitRequest(
"key: \"0000000000000001\" "
+ "sharding_key: 1 "
+ "work_token: 3 "
+ "cache_token: 3 "
+ "source_state_updates {"
+ " only_finalize: true"
+ "} ")
.build()));
assertThat(finalizeTracker, contains(0));
}
@Test
public void testUnboundedSourceWorkRetry() throws Exception {
List<Integer> finalizeTracker = Lists.newArrayList();
TestCountingSource.setFinalizeTracker(finalizeTracker);
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
options.setWorkerCacheMb(0);
StreamingDataflowWorker worker =
makeWorker(makeUnboundedSourcePipeline(), options, false /* publishCounters */);
worker.start();
Windmill.GetWorkResponse work =
buildInput(
"work {"
+ " computation_id: \"computation\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \"0000000000000001\""
+ " sharding_key: 1"
+ " work_token: 1"
+ " cache_token: 1"
+ " }"
+ "}",
null);
server.whenGetWorkCalled().thenReturn(work);
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
Iterable<CounterUpdate> counters = worker.buildCounters();
Windmill.WorkItemCommitRequest commit = result.get(1L);
UnsignedLong finalizeId =
UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0));
Windmill.WorkItemCommitRequest expectedCommit =
setMessagesMetadata(
PaneInfo.NO_FIRING,
CoderUtils.encodeToByteArray(
CollectionCoder.of(GlobalWindow.Coder.INSTANCE),
Collections.singletonList(GlobalWindow.INSTANCE)),
parseCommitRequest(
"key: \"0000000000000001\" "
+ "sharding_key: 1 "
+ "work_token: 1 "
+ "cache_token: 1 "
+ "source_backlog_bytes: 7 "
+ "source_bytes_processed: 18 "
+ "output_messages {"
+ " destination_stream_id: \"out\""
+ " bundles {"
+ " key: \"0000000000000001\""
+ " messages {"
+ " timestamp: 0"
+ " data: \"0:0\""
+ " }"
+ " messages_ids: \"\""
+ " }"
+ "} "
+ "source_state_updates {"
+ " state: \"\000\""
+ " finalize_ids: "
+ finalizeId
+ "} "
+ "source_watermark: 1000"))
.build();
assertThat(removeDynamicFields(commit), equalTo(expectedCommit));
server.clearCommitsReceived();
server.whenGetWorkCalled().thenReturn(work);
result = server.waitForAndGetCommits(1);
commit = result.get(1L);
finalizeId = UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0));
Windmill.WorkItemCommitRequest.Builder commitBuilder = expectedCommit.toBuilder();
commitBuilder
.getSourceStateUpdatesBuilder()
.setFinalizeIds(0, commit.getSourceStateUpdates().getFinalizeIds(0));
expectedCommit = commitBuilder.build();
assertThat(removeDynamicFields(commit), equalTo(expectedCommit));
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \"computation\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \"0000000000000001\""
+ " sharding_key: 1"
+ " work_token: 2"
+ " cache_token: 1"
+ " source_state {"
+ " state: \"\001\""
+ " finalize_ids: "
+ finalizeId
+ " } "
+ " }"
+ "}",
null));
result = server.waitForAndGetCommits(1);
commit = result.get(2L);
finalizeId = UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0));
assertThat(
removeDynamicFields(commit),
equalTo(
parseCommitRequest(
"key: \"0000000000000001\" "
+ "sharding_key: 1 "
+ "work_token: 2 "
+ "cache_token: 1 "
+ "source_backlog_bytes: 7 "
+ "source_bytes_processed: 0 "
+ "source_state_updates {"
+ " state: \"\000\""
+ " finalize_ids: "
+ finalizeId
+ "} "
+ "source_watermark: 1000")
.build()));
assertThat(finalizeTracker, contains(0));
}
@Test
public void testActiveWork() throws Exception {
BoundedQueueExecutor mockExecutor = Mockito.mock(BoundedQueueExecutor.class);
ComputationState computationState =
new ComputationState(
"computation",
defaultMapTask(Collections.singletonList(makeSourceInstruction(StringUtf8Coder.of()))),
mockExecutor,
ImmutableMap.of(),
null);
ShardedKey key1 = ShardedKey.create(ByteString.copyFromUtf8("key1"), 1);
ShardedKey key2 = ShardedKey.create(ByteString.copyFromUtf8("key2"), 2);
Work m1 = createMockWork(1);
assertTrue(computationState.activateWork(key1, m1));
Mockito.verify(mockExecutor).execute(m1, m1.getWorkItem().getSerializedSize());
computationState.completeWorkAndScheduleNextWorkForKey(key1, 1);
Mockito.verifyNoMoreInteractions(mockExecutor);
Work m2 = createMockWork(2);
assertTrue(computationState.activateWork(key1, m2));
Mockito.verify(mockExecutor).execute(m2, m2.getWorkItem().getSerializedSize());
Work m3 = createMockWork(3);
assertTrue(computationState.activateWork(key1, m3));
Mockito.verifyNoMoreInteractions(mockExecutor);
Work m4 = createMockWork(4);
assertTrue(computationState.activateWork(key2, m4));
Mockito.verify(mockExecutor).execute(m4, m4.getWorkItem().getSerializedSize());
computationState.completeWorkAndScheduleNextWorkForKey(key2, 4);
Mockito.verifyNoMoreInteractions(mockExecutor);
computationState.completeWorkAndScheduleNextWorkForKey(key1, 2);
Mockito.verify(mockExecutor).forceExecute(m3, m3.getWorkItem().getSerializedSize());
computationState.completeWorkAndScheduleNextWorkForKey(key1, 3);
Mockito.verifyNoMoreInteractions(mockExecutor);
Work m5 = createMockWork(5);
computationState.activateWork(key1, m5);
Mockito.verify(mockExecutor).execute(m5, m5.getWorkItem().getSerializedSize());
assertFalse(computationState.activateWork(key1, m5));
Mockito.verifyNoMoreInteractions(mockExecutor);
computationState.completeWorkAndScheduleNextWorkForKey(key1, 5);
Mockito.verifyNoMoreInteractions(mockExecutor);
}
@Test
public void testActiveWorkForShardedKeys() throws Exception {
BoundedQueueExecutor mockExecutor = Mockito.mock(BoundedQueueExecutor.class);
ComputationState computationState =
new ComputationState(
"computation",
defaultMapTask(Collections.singletonList(makeSourceInstruction(StringUtf8Coder.of()))),
mockExecutor,
ImmutableMap.of(),
null);
ShardedKey key1Shard1 = ShardedKey.create(ByteString.copyFromUtf8("key1"), 1);
ShardedKey key1Shard2 = ShardedKey.create(ByteString.copyFromUtf8("key1"), 2);
Work m1 = createMockWork(1);
assertTrue(computationState.activateWork(key1Shard1, m1));
Mockito.verify(mockExecutor).execute(m1, m1.getWorkItem().getSerializedSize());
computationState.completeWorkAndScheduleNextWorkForKey(key1Shard1, 1);
Mockito.verifyNoMoreInteractions(mockExecutor);
Work m2 = createMockWork(2);
assertTrue(computationState.activateWork(key1Shard1, m2));
Mockito.verify(mockExecutor).execute(m2, m2.getWorkItem().getSerializedSize());
Work m3 = createMockWork(3);
assertTrue(computationState.activateWork(key1Shard1, m3));
Mockito.verifyNoMoreInteractions(mockExecutor);
Work m4 = createMockWork(3);
assertFalse(computationState.activateWork(key1Shard1, m4));
Mockito.verifyNoMoreInteractions(mockExecutor);
assertTrue(computationState.activateWork(key1Shard2, m4));
Mockito.verify(mockExecutor).execute(m4, m4.getWorkItem().getSerializedSize());
assertFalse(computationState.activateWork(key1Shard2, m4));
computationState.completeWorkAndScheduleNextWorkForKey(key1Shard2, 3);
Mockito.verifyNoMoreInteractions(mockExecutor);
}
@Test
@Ignore
public void testMaxThreadMetric() throws Exception {
int maxThreads = 2;
int threadExpiration = 60;
BoundedQueueExecutor executor =
new BoundedQueueExecutor(
maxThreads,
threadExpiration,
TimeUnit.SECONDS,
maxThreads,
10000000,
new ThreadFactoryBuilder()
.setNameFormat("DataflowWorkUnits-%d")
.setDaemon(true)
.build());
ComputationState computationState =
new ComputationState(
"computation",
defaultMapTask(Collections.singletonList(makeSourceInstruction(StringUtf8Coder.of()))),
executor,
ImmutableMap.of(),
null);
ShardedKey key1Shard1 = ShardedKey.create(ByteString.copyFromUtf8("key1"), 1);
Consumer<Work> sleepProcessWorkFn =
unused -> {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
};
Work m2 = createMockWork(2, sleepProcessWorkFn);
Work m3 = createMockWork(3, sleepProcessWorkFn);
assertTrue(computationState.activateWork(key1Shard1, m2));
assertTrue(computationState.activateWork(key1Shard1, m3));
executor.execute(m2, m2.getWorkItem().getSerializedSize());
executor.execute(m3, m3.getWorkItem().getSerializedSize());
long i = 990L;
assertTrue(executor.allThreadsActiveTime() >= i);
executor.shutdown();
}
volatile boolean stop = false;
@Test
@Test
public void testActiveThreadMetric() throws Exception {
int maxThreads = 5;
int threadExpirationSec = 60;
CountDownLatch processStart1 = new CountDownLatch(2);
CountDownLatch processStart2 = new CountDownLatch(3);
CountDownLatch processStart3 = new CountDownLatch(4);
AtomicBoolean stop = new AtomicBoolean(false);
BoundedQueueExecutor executor =
new BoundedQueueExecutor(
maxThreads,
threadExpirationSec,
TimeUnit.SECONDS,
maxThreads,
10000000,
new ThreadFactoryBuilder()
.setNameFormat("DataflowWorkUnits-%d")
.setDaemon(true)
.build());
ComputationState computationState =
new ComputationState(
"computation",
defaultMapTask(Arrays.asList(makeSourceInstruction(StringUtf8Coder.of()))),
executor,
ImmutableMap.of(),
null);
ShardedKey key1Shard1 = ShardedKey.create(ByteString.copyFromUtf8("key1"), 1);
Consumer<Work> sleepProcessWorkFn =
unused -> {
processStart1.countDown();
processStart2.countDown();
processStart3.countDown();
int count = 0;
while (!stop.get()) {
count += 1;
}
};
Work m2 = createMockWork(2, sleepProcessWorkFn);
Work m3 = createMockWork(3, sleepProcessWorkFn);
Work m4 = createMockWork(4, sleepProcessWorkFn);
assertEquals(0, executor.activeCount());
assertTrue(computationState.activateWork(key1Shard1, m2));
executor.execute(m2, m2.getWorkItem().getSerializedSize());
processStart1.await();
assertEquals(2, executor.activeCount());
assertTrue(computationState.activateWork(key1Shard1, m3));
assertTrue(computationState.activateWork(key1Shard1, m4));
executor.execute(m3, m3.getWorkItem().getSerializedSize());
processStart2.await();
assertEquals(3, executor.activeCount());
executor.execute(m4, m4.getWorkItem().getSerializedSize());
processStart3.await();
assertEquals(4, executor.activeCount());
stop.set(true);
executor.shutdown();
}
@Test
public void testOutstandingBytesMetric() throws Exception {
int maxThreads = 5;
int threadExpirationSec = 60;
CountDownLatch processStart1 = new CountDownLatch(2);
CountDownLatch processStart2 = new CountDownLatch(3);
CountDownLatch processStart3 = new CountDownLatch(4);
AtomicBoolean stop = new AtomicBoolean(false);
BoundedQueueExecutor executor =
new BoundedQueueExecutor(
maxThreads,
threadExpirationSec,
TimeUnit.SECONDS,
maxThreads,
10000000,
new ThreadFactoryBuilder()
.setNameFormat("DataflowWorkUnits-%d")
.setDaemon(true)
.build());
ComputationState computationState =
new ComputationState(
"computation",
defaultMapTask(Arrays.asList(makeSourceInstruction(StringUtf8Coder.of()))),
executor,
ImmutableMap.of(),
null);
ShardedKey key1Shard1 = ShardedKey.create(ByteString.copyFromUtf8("key1"), 1);
Consumer<Work> sleepProcessWorkFn =
unused -> {
processStart1.countDown();
processStart2.countDown();
processStart3.countDown();
int count = 0;
while (!stop.get()) {
count += 1;
}
};
Work m2 = createMockWork(2, sleepProcessWorkFn);
Work m3 = createMockWork(3, sleepProcessWorkFn);
Work m4 = createMockWork(4, sleepProcessWorkFn);
assertEquals(0, executor.bytesOutstanding());
long bytes = m2.getWorkItem().getSerializedSize();
assertTrue(computationState.activateWork(key1Shard1, m2));
bytes += m2.getWorkItem().getSerializedSize();
executor.execute(m2, m2.getWorkItem().getSerializedSize());
processStart1.await();
assertEquals(bytes, executor.bytesOutstanding());
assertTrue(computationState.activateWork(key1Shard1, m3));
assertTrue(computationState.activateWork(key1Shard1, m4));
bytes += m3.getWorkItem().getSerializedSize();
executor.execute(m3, m3.getWorkItem().getSerializedSize());
processStart2.await();
assertEquals(bytes, executor.bytesOutstanding());
bytes += m4.getWorkItem().getSerializedSize();
executor.execute(m4, m4.getWorkItem().getSerializedSize());
processStart3.await();
assertEquals(bytes, executor.bytesOutstanding());
stop.set(true);
executor.shutdown();
}
@Test
public void testOutstandingBundlesMetric() throws Exception {
int maxThreads = 5;
int threadExpirationSec = 60;
CountDownLatch processStart1 = new CountDownLatch(2);
CountDownLatch processStart2 = new CountDownLatch(3);
CountDownLatch processStart3 = new CountDownLatch(4);
AtomicBoolean stop = new AtomicBoolean(false);
BoundedQueueExecutor executor =
new BoundedQueueExecutor(
maxThreads,
threadExpirationSec,
TimeUnit.SECONDS,
maxThreads,
10000000,
new ThreadFactoryBuilder()
.setNameFormat("DataflowWorkUnits-%d")
.setDaemon(true)
.build());
ComputationState computationState =
new ComputationState(
"computation",
defaultMapTask(Arrays.asList(makeSourceInstruction(StringUtf8Coder.of()))),
executor,
ImmutableMap.of(),
null);
ShardedKey key1Shard1 = ShardedKey.create(ByteString.copyFromUtf8("key1"), 1);
Consumer<Work> sleepProcessWorkFn =
unused -> {
processStart1.countDown();
processStart2.countDown();
processStart3.countDown();
int count = 0;
while (!stop.get()) {
count += 1;
}
};
Work m2 = createMockWork(2, sleepProcessWorkFn);
Work m3 = createMockWork(3, sleepProcessWorkFn);
Work m4 = createMockWork(4, sleepProcessWorkFn);
assertEquals(0, executor.elementsOutstanding());
assertTrue(computationState.activateWork(key1Shard1, m2));
executor.execute(m2, m2.getWorkItem().getSerializedSize());
processStart1.await();
assertEquals(2, executor.elementsOutstanding());
assertTrue(computationState.activateWork(key1Shard1, m3));
assertTrue(computationState.activateWork(key1Shard1, m4));
executor.execute(m3, m3.getWorkItem().getSerializedSize());
processStart2.await();
assertEquals(3, executor.elementsOutstanding());
executor.execute(m4, m4.getWorkItem().getSerializedSize());
processStart3.await();
assertEquals(4, executor.elementsOutstanding());
stop.set(true);
executor.shutdown();
}
@Test
public void testExceptionInvalidatesCache() throws Exception {
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
server.setExpectedExceptionCount(2);
DataflowPipelineOptions options = createTestingPipelineOptions(server);
options.setNumWorkers(1);
DataflowPipelineDebugOptions debugOptions = options.as(DataflowPipelineDebugOptions.class);
debugOptions.setUnboundedReaderMaxElements(1);
CloudObject codec =
CloudObjects.asCloudObject(
WindowedValue.getFullCoder(
ValueWithRecordId.ValueWithRecordIdCoder.of(
KvCoder.of(VarIntCoder.of(), VarIntCoder.of())),
GlobalWindow.Coder.INSTANCE),
/* sdkComponents= */ null);
TestCountingSource counter = new TestCountingSource(3).withThrowOnFirstSnapshot(true);
List<ParallelInstruction> instructions =
Arrays.asList(
new ParallelInstruction()
.setOriginalName("OriginalReadName")
.setSystemName("Read")
.setName(DEFAULT_PARDO_USER_NAME)
.setRead(
new ReadInstruction()
.setSource(
CustomSources.serializeToCloudSource(counter, options).setCodec(codec)))
.setOutputs(
Collections.singletonList(
new InstructionOutput()
.setName("read_output")
.setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME)
.setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME)
.setCodec(codec))),
makeDoFnInstruction(
new TestExceptionInvalidatesCacheFn(),
0,
StringUtf8Coder.of(),
WindowingStrategy.globalDefault()),
makeSinkInstruction(StringUtf8Coder.of(), 1, GlobalWindow.Coder.INSTANCE));
StreamingDataflowWorker worker =
makeWorker(
instructions,
options.as(StreamingDataflowWorkerOptions.class),
true /* publishCounters */);
worker.setRetryLocallyDelayMs(100);
worker.start();
for (int i = 0; i < 3; i++) {
ByteString state;
if (i == 0 || i == 1) {
state = ByteString.EMPTY;
} else {
state = ByteString.copyFrom(new byte[] {42});
}
Windmill.GetDataResponse.Builder dataResponse = Windmill.GetDataResponse.newBuilder();
dataResponse
.addDataBuilder()
.setComputationId(DEFAULT_COMPUTATION_ID)
.addDataBuilder()
.setKey(ByteString.copyFromUtf8("0000000000000001"))
.setShardingKey(1)
.addValuesBuilder()
.setTag(ByteString.copyFromUtf8("
.setStateFamily(DEFAULT_PARDO_STATE_FAMILY)
.getValueBuilder()
.setTimestamp(0)
.setData(state);
server.whenGetDataCalled().thenReturn(dataResponse.build());
}
for (int i = 0; i < 3; i++) {
StringBuilder sb = new StringBuilder();
sb.append("work {\n");
sb.append(" computation_id: \"computation\"\n");
sb.append(" input_data_watermark: 0\n");
sb.append(" work {\n");
sb.append(" key: \"0000000000000001\"\n");
sb.append(" sharding_key: 1\n");
sb.append(" work_token: ");
sb.append(i);
sb.append(" cache_token: 1");
sb.append("\n");
if (i > 0) {
int previousCheckpoint = i - 1;
sb.append(" source_state {\n");
sb.append(" state: \"");
sb.append((char) previousCheckpoint);
sb.append("\"\n");
sb.append(" }\n");
}
sb.append(" }\n");
sb.append("}\n");
server.whenGetWorkCalled().thenReturn(buildInput(sb.toString(), null));
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
Windmill.WorkItemCommitRequest commit = result.get((long) i);
UnsignedLong finalizeId =
UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0));
sb = new StringBuilder();
sb.append("key: \"0000000000000001\"\n");
sb.append("sharding_key: 1\n");
sb.append("work_token: ");
sb.append(i);
sb.append("\n");
sb.append("cache_token: 1\n");
sb.append("output_messages {\n");
sb.append(" destination_stream_id: \"out\"\n");
sb.append(" bundles {\n");
sb.append(" key: \"0000000000000001\"\n");
int messageNum = i;
sb.append(" messages {\n");
sb.append(" timestamp: ");
sb.append(messageNum * 1000);
sb.append("\n");
sb.append(" data: \"0:");
sb.append(messageNum);
sb.append("\"\n");
sb.append(" }\n");
sb.append(" messages_ids: \"\"\n");
sb.append(" }\n");
sb.append("}\n");
if (i == 0) {
sb.append("value_updates {\n");
sb.append(" tag: \"
sb.append(" value {\n");
sb.append(" timestamp: 0\n");
sb.append(" data: \"");
sb.append((char) 42);
sb.append("\"\n");
sb.append(" }\n");
sb.append(" state_family: \"parDoStateFamily\"\n");
sb.append("}\n");
}
int sourceState = i;
sb.append("source_state_updates {\n");
sb.append(" state: \"");
sb.append((char) sourceState);
sb.append("\"\n");
sb.append(" finalize_ids: ");
sb.append(finalizeId);
sb.append("}\n");
sb.append("source_watermark: ");
sb.append((sourceState + 1) * 1000);
sb.append("\n");
sb.append("source_backlog_bytes: 7\n");
assertThat(
setValuesTimestamps(
removeDynamicFields(commit)
.toBuilder()
.clearOutputTimers()
.clearSourceBytesProcessed())
.build(),
equalTo(
setMessagesMetadata(
PaneInfo.NO_FIRING,
CoderUtils.encodeToByteArray(
CollectionCoder.of(GlobalWindow.Coder.INSTANCE),
ImmutableList.of(GlobalWindow.INSTANCE)),
parseCommitRequest(sb.toString()))
.build()));
}
}
@Test
public void testHugeCommits() throws Exception {
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeDoFnInstruction(new FanoutFn(), 0, StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */);
worker.start();
server.whenGetWorkCalled().thenReturn(makeInput(0, TimeUnit.MILLISECONDS.toMicros(0)));
server.waitForAndGetCommits(0);
worker.stop();
}
@Test
public void testActiveWorkRefresh() throws Exception {
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeDoFnInstruction(new SlowDoFn(), 0, StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
options.setActiveWorkRefreshPeriodMillis(100);
StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */);
worker.start();
server.whenGetWorkCalled().thenReturn(makeInput(0, TimeUnit.MILLISECONDS.toMicros(0)));
server.waitForAndGetCommits(1);
worker.stop();
assertThat(server.numGetDataRequests(), greaterThan(0));
}
@Test
public void testLatencyAttributionProtobufsPopulated() {
FakeClock clock = new FakeClock();
Work work = Work.create(null, clock, Collections.emptyList(), unused -> {});
clock.sleep(Duration.millis(10));
work.setState(Work.State.PROCESSING);
clock.sleep(Duration.millis(20));
work.setState(Work.State.READING);
clock.sleep(Duration.millis(30));
work.setState(Work.State.PROCESSING);
clock.sleep(Duration.millis(40));
work.setState(Work.State.COMMIT_QUEUED);
clock.sleep(Duration.millis(50));
work.setState(Work.State.COMMITTING);
clock.sleep(Duration.millis(60));
Iterator<LatencyAttribution> it = work.getLatencyAttributions().iterator();
assertTrue(it.hasNext());
LatencyAttribution lat = it.next();
assertSame(State.QUEUED, lat.getState());
assertEquals(10, lat.getTotalDurationMillis());
assertTrue(it.hasNext());
lat = it.next();
assertSame(State.ACTIVE, lat.getState());
assertEquals(60, lat.getTotalDurationMillis());
assertTrue(it.hasNext());
lat = it.next();
assertSame(State.READING, lat.getState());
assertEquals(30, lat.getTotalDurationMillis());
assertTrue(it.hasNext());
lat = it.next();
assertSame(State.COMMITTING, lat.getState());
assertEquals(110, lat.getTotalDurationMillis());
assertFalse(it.hasNext());
}
@Test
public void testLatencyAttributionToQueuedState() throws Exception {
final int workToken = 3232;
FakeClock clock = new FakeClock();
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeDoFnInstruction(
new FakeSlowDoFn(clock, Duration.millis(1000)), 0, StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
options.setActiveWorkRefreshPeriodMillis(100);
options.setNumberOfWorkerHarnessThreads(1);
StreamingDataflowWorker worker =
makeWorker(
instructions,
options,
false /* publishCounters */,
clock,
clock::newFakeScheduledExecutor);
worker.start();
ActiveWorkRefreshSink awrSink = new ActiveWorkRefreshSink(EMPTY_DATA_RESPONDER);
server.whenGetDataCalled().answerByDefault(awrSink::getData).delayEachResponseBy(Duration.ZERO);
server
.whenGetWorkCalled()
.thenReturn(makeInput(workToken + 1, 0 /* timestamp */))
.thenReturn(makeInput(workToken, 1 /* timestamp */));
server.waitForAndGetCommits(2);
worker.stop();
assertEquals(
awrSink.getLatencyAttributionDuration(workToken, State.QUEUED), Duration.millis(1000));
assertEquals(awrSink.getLatencyAttributionDuration(workToken + 1, State.QUEUED), Duration.ZERO);
}
@Test
public void testLatencyAttributionToActiveState() throws Exception {
final int workToken = 4242;
FakeClock clock = new FakeClock();
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeDoFnInstruction(
new FakeSlowDoFn(clock, Duration.millis(1000)), 0, StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
options.setActiveWorkRefreshPeriodMillis(100);
StreamingDataflowWorker worker =
makeWorker(
instructions,
options,
false /* publishCounters */,
clock,
clock::newFakeScheduledExecutor);
worker.start();
ActiveWorkRefreshSink awrSink = new ActiveWorkRefreshSink(EMPTY_DATA_RESPONDER);
server.whenGetDataCalled().answerByDefault(awrSink::getData).delayEachResponseBy(Duration.ZERO);
server.whenGetWorkCalled().thenReturn(makeInput(workToken, 0 /* timestamp */));
server.waitForAndGetCommits(1);
worker.stop();
assertEquals(
awrSink.getLatencyAttributionDuration(workToken, State.ACTIVE), Duration.millis(1000));
}
@Test
public void testLatencyAttributionToReadingState() throws Exception {
final int workToken = 5454;
FakeClock clock = new FakeClock();
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeDoFnInstruction(new ReadingDoFn(), 0, StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
options.setActiveWorkRefreshPeriodMillis(100);
StreamingDataflowWorker worker =
makeWorker(
instructions,
options,
false /* publishCounters */,
clock,
clock::newFakeScheduledExecutor);
worker.start();
ActiveWorkRefreshSink awrSink =
new ActiveWorkRefreshSink(
(request) -> {
clock.sleep(Duration.millis(1000));
return EMPTY_DATA_RESPONDER.apply(request);
});
server.whenGetDataCalled().answerByDefault(awrSink::getData).delayEachResponseBy(Duration.ZERO);
server.whenGetWorkCalled().thenReturn(makeInput(workToken, 0 /* timestamp */));
server.waitForAndGetCommits(1);
worker.stop();
assertEquals(
awrSink.getLatencyAttributionDuration(workToken, State.READING), Duration.millis(1000));
}
@Test
public void testLatencyAttributionToCommittingState() throws Exception {
final int workToken = 6464;
FakeClock clock = new FakeClock();
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
server
.whenCommitWorkCalled()
.answerByDefault(
(request) -> {
clock.sleep(Duration.millis(1000));
return Windmill.CommitWorkResponse.getDefaultInstance();
});
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
options.setActiveWorkRefreshPeriodMillis(100);
StreamingDataflowWorker worker =
makeWorker(
instructions,
options,
false /* publishCounters */,
clock,
clock::newFakeScheduledExecutor);
worker.start();
ActiveWorkRefreshSink awrSink = new ActiveWorkRefreshSink(EMPTY_DATA_RESPONDER);
server.whenGetDataCalled().answerByDefault(awrSink::getData).delayEachResponseBy(Duration.ZERO);
server.whenGetWorkCalled().thenReturn(makeInput(workToken, TimeUnit.MILLISECONDS.toMicros(0)));
server.waitForAndGetCommits(1);
worker.stop();
assertEquals(
awrSink.getLatencyAttributionDuration(workToken, State.COMMITTING), Duration.millis(1000));
}
@Test
public void testLatencyAttributionPopulatedInCommitRequest() throws Exception {
final int workToken = 7272;
long dofnWaitTimeMs = 1000;
FakeClock clock = new FakeClock();
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeDoFnInstruction(
new FakeSlowDoFn(clock, Duration.millis(dofnWaitTimeMs)), 0, StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
options.setActiveWorkRefreshPeriodMillis(100);
options.setNumberOfWorkerHarnessThreads(1);
StreamingDataflowWorker worker =
makeWorker(
instructions,
options,
false /* publishCounters */,
clock,
clock::newFakeScheduledExecutor);
worker.start();
ActiveWorkRefreshSink awrSink = new ActiveWorkRefreshSink(EMPTY_DATA_RESPONDER);
server.whenGetDataCalled().answerByDefault(awrSink::getData).delayEachResponseBy(Duration.ZERO);
server.whenGetWorkCalled().thenReturn(makeInput(workToken, 1 /* timestamp */));
Map<Long, WorkItemCommitRequest> workItemCommitRequest = server.waitForAndGetCommits(1);
worker.stop();
assertEquals(
workItemCommitRequest.get((long) workToken).getPerWorkItemLatencyAttributions(0),
LatencyAttribution.newBuilder()
.setState(State.ACTIVE)
.setTotalDurationMillis(dofnWaitTimeMs)
.build());
if (streamingEngine) {
assertEquals(
workItemCommitRequest.get((long) workToken).getPerWorkItemLatencyAttributions(1),
LatencyAttribution.newBuilder()
.setState(State.GET_WORK_IN_TRANSIT_TO_USER_WORKER)
.setTotalDurationMillis(1000)
.build());
}
}
@Test
public void testLimitOnOutputBundleSize() throws Exception {
List<Integer> finalizeTracker = Lists.newArrayList();
TestCountingSource.setFinalizeTracker(finalizeTracker);
final int numMessagesInCustomSourceShard = 100000;
final int inflatedSizePerMessage = 10000;
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorker worker =
makeWorker(
makeUnboundedSourcePipeline(
numMessagesInCustomSourceShard, new InflateDoFn(inflatedSizePerMessage)),
createTestingPipelineOptions(server),
false /* publishCounters */);
worker.start();
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \"computation\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \"0000000000000001\""
+ " sharding_key: 1"
+ " work_token: 1"
+ " cache_token: 1"
+ " }"
+ "}",
null));
Matcher<Integer> isWithinBundleSizeLimits =
both(greaterThan(StreamingDataflowWorker.MAX_SINK_BYTES * 9 / 10))
.and(lessThan(StreamingDataflowWorker.MAX_SINK_BYTES * 11 / 10));
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
Windmill.WorkItemCommitRequest commit = result.get(1L);
assertThat(commit.getSerializedSize(), isWithinBundleSizeLimits);
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \"computation\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \"0000000000000001\""
+ " sharding_key: 1"
+ " work_token: 2"
+ " cache_token: 1"
+ " }"
+ "}",
null));
result = server.waitForAndGetCommits(1);
commit = result.get(2L);
assertThat(commit.getSerializedSize(), isWithinBundleSizeLimits);
}
@Test
public void testLimitOnOutputBundleSizeWithMultipleSinks() throws Exception {
List<Integer> finalizeTracker = Lists.newArrayList();
TestCountingSource.setFinalizeTracker(finalizeTracker);
final int numMessagesInCustomSourceShard = 100000;
final int inflatedSizePerMessage = 10000;
List<ParallelInstruction> instructions = new ArrayList<>();
instructions.addAll(
makeUnboundedSourcePipeline(
numMessagesInCustomSourceShard, new InflateDoFn(inflatedSizePerMessage)));
instructions.add(
makeSinkInstruction(
DEFAULT_DESTINATION_STREAM_ID + "-1",
StringUtf8Coder.of(),
1,
GlobalWindow.Coder.INSTANCE));
instructions.add(
makeSinkInstruction(
DEFAULT_DESTINATION_STREAM_ID + "-2",
StringUtf8Coder.of(),
1,
GlobalWindow.Coder.INSTANCE));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorker worker =
makeWorker(instructions, createTestingPipelineOptions(server), true /* publishCounters */);
worker.start();
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \"computation\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \"0000000000000001\""
+ " sharding_key: 1"
+ " work_token: 1"
+ " cache_token: 1"
+ " }"
+ "}",
null));
Matcher<Integer> isWithinBundleSizeLimits =
both(greaterThan(StreamingDataflowWorker.MAX_SINK_BYTES * 9 / 10))
.and(lessThan(StreamingDataflowWorker.MAX_SINK_BYTES * 11 / 10));
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
Windmill.WorkItemCommitRequest commit = result.get(1L);
assertThat(commit.getSerializedSize(), isWithinBundleSizeLimits);
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \"computation\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \"0000000000000001\""
+ " sharding_key: 1"
+ " work_token: 2"
+ " cache_token: 1"
+ " }"
+ "}",
null));
result = server.waitForAndGetCommits(1);
commit = result.get(2L);
assertThat(commit.getSerializedSize(), isWithinBundleSizeLimits);
}
@Test
public void testStuckCommit() throws Exception {
if (!streamingEngine) {
return;
}
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
options.setStuckCommitDurationMillis(2000);
StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */);
worker.start();
server.setDropStreamingCommits(true);
server
.whenGetWorkCalled()
.thenReturn(makeInput(10, TimeUnit.MILLISECONDS.toMicros(2), DEFAULT_KEY_STRING, 1))
.thenReturn(makeInput(15, TimeUnit.MILLISECONDS.toMicros(3), DEFAULT_KEY_STRING, 5));
ConcurrentHashMap<Long, Consumer<CommitStatus>> droppedCommits =
server.waitForDroppedCommits(2);
server.setDropStreamingCommits(false);
server
.whenGetWorkCalled()
.thenReturn(makeInput(1, TimeUnit.MILLISECONDS.toMicros(1), DEFAULT_KEY_STRING, 1));
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
droppedCommits.values().iterator().next().accept(CommitStatus.OK);
worker.stop();
assertTrue(result.containsKey(1L));
assertEquals(
makeExpectedOutput(
1, TimeUnit.MILLISECONDS.toMicros(1), DEFAULT_KEY_STRING, 1, DEFAULT_KEY_STRING)
.build(),
removeDynamicFields(result.get(1L)));
}
static class BlockingFn extends DoFn<String, String> implements TestRule {
public static CountDownLatch blocker = new CountDownLatch(1);
public static Semaphore counter = new Semaphore(0);
public static AtomicInteger callCounter = new AtomicInteger(0);
@ProcessElement
public void processElement(ProcessContext c) throws InterruptedException {
callCounter.incrementAndGet();
counter.release();
blocker.await();
c.output(c.element());
}
@Override
public Statement apply(final Statement base, final Description description) {
return new Statement() {
@Override
public void evaluate() throws Throwable {
blocker = new CountDownLatch(1);
counter = new Semaphore(0);
callCounter = new AtomicInteger();
base.evaluate();
}
};
}
}
static class KeyTokenInvalidFn extends DoFn<KV<String, String>, KV<String, String>> {
static boolean thrown = false;
@ProcessElement
public void processElement(ProcessContext c) {
if (!thrown) {
thrown = true;
throw new KeyTokenInvalidException("key");
} else {
c.output(c.element());
}
}
}
static class LargeCommitFn extends DoFn<KV<String, String>, KV<String, String>> {
@ProcessElement
public void processElement(ProcessContext c) {
if (c.element().getKey().equals("large_key")) {
StringBuilder s = new StringBuilder();
for (int i = 0; i < 100; ++i) {
s.append("large_commit");
}
c.output(KV.of(c.element().getKey(), s.toString()));
} else {
c.output(c.element());
}
}
}
static class ChangeKeysFn extends DoFn<KV<String, String>, KV<String, String>> {
@ProcessElement
public void processElement(ProcessContext c) {
KV<String, String> elem = c.element();
c.output(KV.of(elem.getKey() + "_" + elem.getValue(), elem.getValue()));
}
}
static class TestExceptionFn extends DoFn<String, String> {
boolean firstTime = true;
@ProcessElement
public void processElement(ProcessContext c) throws Exception {
if (firstTime) {
firstTime = false;
try {
throw new Exception("Exception!");
} catch (Exception e) {
throw new Exception("Another exception!", e);
}
}
}
}
static class PassthroughDoFn
extends DoFn<KV<String, Iterable<String>>, KV<String, Iterable<String>>> {
@ProcessElement
public void processElement(ProcessContext c) {
c.output(c.element());
}
}
static class Action {
GetWorkResponse response;
Timer[] expectedTimers = new Timer[] {};
WatermarkHold[] expectedHolds = new WatermarkHold[] {};
public Action(GetWorkResponse response) {
this.response = response;
}
Action withHolds(WatermarkHold... holds) {
this.expectedHolds = holds;
return this;
}
Action withTimers(Timer... timers) {
this.expectedTimers = timers;
return this;
}
}
static class PrintFn extends DoFn<ValueWithRecordId<KV<Integer, Integer>>, String> {
@ProcessElement
public void processElement(ProcessContext c) {
KV<Integer, Integer> elem = c.element().getValue();
c.output(elem.getKey() + ":" + elem.getValue());
}
}
private static class MockWork {
Work create(long workToken) {
return Work.create(
Windmill.WorkItem.newBuilder().setKey(ByteString.EMPTY).setWorkToken(workToken).build(),
Instant::now,
Collections.emptyList(),
work -> {});
}
}
static class TestExceptionInvalidatesCacheFn
extends DoFn<ValueWithRecordId<KV<Integer, Integer>>, String> {
static boolean thrown = false;
@StateId("int")
private final StateSpec<ValueState<Integer>> counter = StateSpecs.value(VarIntCoder.of());
@ProcessElement
public void processElement(ProcessContext c, @StateId("int") ValueState<Integer> state)
throws Exception {
KV<Integer, Integer> elem = c.element().getValue();
if (elem.getValue() == 0) {
LOG.error("**** COUNTER 0 ****");
assertNull(state.read());
state.write(42);
assertEquals((Integer) 42, state.read());
} else if (elem.getValue() == 1) {
LOG.error("**** COUNTER 1 ****");
assertEquals((Integer) 42, state.read());
} else if (elem.getValue() == 2) {
if (!thrown) {
LOG.error("**** COUNTER 2 (will throw) ****");
thrown = true;
throw new Exception("Exception!");
}
LOG.error("**** COUNTER 2 (retry) ****");
assertEquals((Integer) 42, state.read());
} else {
throw new RuntimeException("only expecting values [0,2]");
}
c.output(elem.getKey() + ":" + elem.getValue());
}
}
private static class FanoutFn extends DoFn<String, String> {
@ProcessElement
public void processElement(ProcessContext c) {
StringBuilder builder = new StringBuilder(1000000);
for (int i = 0; i < 1000000; i++) {
builder.append(' ');
}
String largeString = builder.toString();
for (int i = 0; i < 3000; i++) {
c.output(largeString);
}
}
}
private static class SlowDoFn extends DoFn<String, String> {
@ProcessElement
public void processElement(ProcessContext c) throws Exception {
Thread.sleep(1000);
c.output(c.element());
}
}
static class FakeClock implements Supplier<Instant> {
private final PriorityQueue<Job> jobs = new PriorityQueue<>();
private Instant now = Instant.now();
public ScheduledExecutorService newFakeScheduledExecutor(String unused) {
return new FakeScheduledExecutor();
}
@Override
public synchronized Instant get() {
return now;
}
public synchronized void clear() {
jobs.clear();
}
public synchronized void sleep(Duration duration) {
if (duration.isShorterThan(Duration.ZERO)) {
throw new UnsupportedOperationException("Cannot sleep backwards in time");
}
Instant endOfSleep = now.plus(duration);
while (true) {
Job job = jobs.peek();
if (job == null || job.when.isAfter(endOfSleep)) {
break;
}
jobs.remove();
now = job.when;
job.work.run();
}
now = endOfSleep;
}
private synchronized void schedule(Duration fromNow, Runnable work) {
jobs.add(new Job(now.plus(fromNow), work));
}
private static class Job implements Comparable<Job> {
final Instant when;
final Runnable work;
Job(Instant when, Runnable work) {
this.when = when;
this.work = work;
}
@Override
public int compareTo(Job job) {
return when.compareTo(job.when);
}
}
private class FakeScheduledExecutor implements ScheduledExecutorService {
@Override
public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException {
return true;
}
@Override
public void execute(Runnable command) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks)
throws InterruptedException {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public <T> List<Future<T>> invokeAll(
Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit)
throws InterruptedException {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public <T> T invokeAny(Collection<? extends Callable<T>> tasks)
throws ExecutionException, InterruptedException {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public <T> T invokeAny(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit)
throws ExecutionException, InterruptedException, TimeoutException {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public boolean isShutdown() {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public boolean isTerminated() {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public void shutdown() {}
@Override
public List<Runnable> shutdownNow() {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public <T> Future<T> submit(Callable<T> task) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public Future<?> submit(Runnable task) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public <T> Future<T> submit(Runnable task, T result) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public <V> ScheduledFuture<V> schedule(Callable<V> callable, long delay, TimeUnit unit) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public ScheduledFuture<?> scheduleAtFixedRate(
Runnable command, long initialDelay, long period, TimeUnit unit) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public ScheduledFuture<?> scheduleWithFixedDelay(
Runnable command, long initialDelay, long delay, TimeUnit unit) {
if (delay <= 0) {
throw new UnsupportedOperationException(
"Please supply a delay > 0 to scheduleWithFixedDelay");
}
FakeClock.this.schedule(
Duration.millis(unit.toMillis(initialDelay)),
new Runnable() {
@Override
public void run() {
command.run();
FakeClock.this.schedule(Duration.millis(unit.toMillis(delay)), this);
}
});
FakeClock.this.sleep(Duration.ZERO);
return null;
}
}
}
private static class FakeSlowDoFn extends DoFn<String, String> {
private static FakeClock clock;
private final Duration sleep;
FakeSlowDoFn(FakeClock clock, Duration sleep) {
FakeSlowDoFn.clock = clock;
this.sleep = sleep;
}
@ProcessElement
public void processElement(ProcessContext c) throws Exception {
clock.sleep(sleep);
c.output(c.element());
}
}
static class ActiveWorkRefreshSink {
private final Function<GetDataRequest, GetDataResponse> responder;
private final Map<Long, EnumMap<LatencyAttribution.State, Duration>> totalDurations =
new HashMap<>();
ActiveWorkRefreshSink(Function<GetDataRequest, GetDataResponse> responder) {
this.responder = responder;
}
Duration getLatencyAttributionDuration(long workToken, LatencyAttribution.State state) {
EnumMap<LatencyAttribution.State, Duration> durations = totalDurations.get(workToken);
return durations == null ? Duration.ZERO : durations.getOrDefault(state, Duration.ZERO);
}
boolean isActiveWorkRefresh(GetDataRequest request) {
for (ComputationGetDataRequest computationRequest : request.getRequestsList()) {
if (!computationRequest.getComputationId().equals(DEFAULT_COMPUTATION_ID)) {
return false;
}
for (KeyedGetDataRequest keyedRequest : computationRequest.getRequestsList()) {
if (keyedRequest.getWorkToken() == 0
|| keyedRequest.getShardingKey() != DEFAULT_SHARDING_KEY
|| keyedRequest.getValuesToFetchCount() != 0
|| keyedRequest.getBagsToFetchCount() != 0
|| keyedRequest.getTagValuePrefixesToFetchCount() != 0
|| keyedRequest.getWatermarkHoldsToFetchCount() != 0) {
return false;
}
}
}
return true;
}
GetDataResponse getData(GetDataRequest request) {
if (!isActiveWorkRefresh(request)) {
return responder.apply(request);
}
for (ComputationGetDataRequest computationRequest : request.getRequestsList()) {
for (KeyedGetDataRequest keyedRequest : computationRequest.getRequestsList()) {
for (LatencyAttribution la : keyedRequest.getLatencyAttributionList()) {
EnumMap<LatencyAttribution.State, Duration> durations =
totalDurations.computeIfAbsent(
keyedRequest.getWorkToken(),
(Long workToken) ->
new EnumMap<LatencyAttribution.State, Duration>(
LatencyAttribution.State.class));
Duration cur = Duration.millis(la.getTotalDurationMillis());
durations.compute(la.getState(), (s, d) -> d == null || d.isShorterThan(cur) ? cur : d);
}
}
}
return EMPTY_DATA_RESPONDER.apply(request);
}
}
static class ReadingDoFn extends DoFn<String, String> {
@StateId("int")
private final StateSpec<ValueState<Integer>> counter = StateSpecs.value(VarIntCoder.of());
@ProcessElement
public void processElement(ProcessContext c, @StateId("int") ValueState<Integer> state) {
state.read();
c.output(c.element());
}
}
/** For each input element, emits a large string. */
private static class InflateDoFn extends DoFn<ValueWithRecordId<KV<Integer, Integer>>, String> {
final int inflatedSize;
/** For each input elements, outputs a string of this length */
InflateDoFn(int inflatedSize) {
this.inflatedSize = inflatedSize;
}
@ProcessElement
public void processElement(ProcessContext c) {
char[] chars = new char[inflatedSize];
Arrays.fill(chars, ' ');
c.output(new String(chars));
}
}
} | class StreamingDataflowWorkerTest {
private static final Logger LOG = LoggerFactory.getLogger(StreamingDataflowWorkerTest.class);
private static final IntervalWindow DEFAULT_WINDOW =
new IntervalWindow(new Instant(1234), Duration.millis(1000));
private static final IntervalWindow WINDOW_AT_ZERO =
new IntervalWindow(new Instant(0), new Instant(1000));
private static final IntervalWindow WINDOW_AT_ONE_SECOND =
new IntervalWindow(new Instant(1000), new Instant(2000));
private static final Coder<IntervalWindow> DEFAULT_WINDOW_CODER = IntervalWindow.getCoder();
private static final Coder<Collection<IntervalWindow>> DEFAULT_WINDOW_COLLECTION_CODER =
CollectionCoder.of(DEFAULT_WINDOW_CODER);
private static final String DEFAULT_COMPUTATION_ID = "computation";
private static final String DEFAULT_MAP_STAGE_NAME = "computation";
private static final String DEFAULT_MAP_SYSTEM_NAME = "computation";
private static final String DEFAULT_OUTPUT_ORIGINAL_NAME = "originalName";
private static final String DEFAULT_OUTPUT_SYSTEM_NAME = "systemName";
private static final String DEFAULT_PARDO_SYSTEM_NAME = "parDo";
private static final String DEFAULT_PARDO_ORIGINAL_NAME = "parDoOriginalName";
private static final String DEFAULT_PARDO_USER_NAME = "parDoUserName";
private static final String DEFAULT_PARDO_STATE_FAMILY = "parDoStateFamily";
private static final String DEFAULT_SOURCE_SYSTEM_NAME = "source";
private static final String DEFAULT_SOURCE_ORIGINAL_NAME = "sourceOriginalName";
private static final String DEFAULT_SINK_SYSTEM_NAME = "sink";
private static final String DEFAULT_SINK_ORIGINAL_NAME = "sinkOriginalName";
private static final String DEFAULT_SOURCE_COMPUTATION_ID = "upstream";
private static final String DEFAULT_KEY_STRING = "key";
private static final long DEFAULT_SHARDING_KEY = 12345;
private static final ByteString DEFAULT_KEY_BYTES = ByteString.copyFromUtf8(DEFAULT_KEY_STRING);
private static final String DEFAULT_DATA_STRING = "data";
private static final String DEFAULT_DESTINATION_STREAM_ID = "out";
private static final long MAXIMUM_BYTES_OUTSTANDING = 10000000;
private static final Function<GetDataRequest, GetDataResponse> EMPTY_DATA_RESPONDER =
(GetDataRequest request) -> {
GetDataResponse.Builder builder = GetDataResponse.newBuilder();
for (ComputationGetDataRequest compRequest : request.getRequestsList()) {
ComputationGetDataResponse.Builder compBuilder =
builder.addDataBuilder().setComputationId(compRequest.getComputationId());
for (KeyedGetDataRequest keyRequest : compRequest.getRequestsList()) {
KeyedGetDataResponse.Builder keyBuilder =
compBuilder
.addDataBuilder()
.setKey(keyRequest.getKey())
.setShardingKey(keyRequest.getShardingKey());
keyBuilder.addAllValues(keyRequest.getValuesToFetchList());
keyBuilder.addAllBags(keyRequest.getBagsToFetchList());
keyBuilder.addAllWatermarkHolds(keyRequest.getWatermarkHoldsToFetchList());
}
}
return builder.build();
};
private final boolean streamingEngine;
private final Supplier<Long> idGenerator =
new Supplier<Long>() {
private final AtomicLong idGenerator = new AtomicLong(1L);
@Override
public Long get() {
return idGenerator.getAndIncrement();
}
};
@Rule public transient Timeout globalTimeout = Timeout.seconds(600);
@Rule public BlockingFn blockingFn = new BlockingFn();
@Rule public TestRule restoreMDC = new RestoreDataflowLoggingMDC();
@Rule public ErrorCollector errorCollector = new ErrorCollector();
WorkUnitClient mockWorkUnitClient = mock(WorkUnitClient.class);
HotKeyLogger hotKeyLogger = mock(HotKeyLogger.class);
public StreamingDataflowWorkerTest(Boolean streamingEngine) {
this.streamingEngine = streamingEngine;
}
@Parameterized.Parameters(name = "{index}: [streamingEngine={0}]")
public static Iterable<Object[]> data() {
return Arrays.asList(new Object[][] {{false}, {true}});
}
private static CounterUpdate getCounter(Iterable<CounterUpdate> counters, String name) {
for (CounterUpdate counter : counters) {
if (counter.getNameAndKind().getName().equals(name)) {
return counter;
}
}
return null;
}
static Work createMockWork(long workToken) {
return createMockWork(workToken, work -> {});
}
static Work createMockWork(long workToken, Consumer<Work> processWorkFn) {
return Work.create(
Windmill.WorkItem.newBuilder().setKey(ByteString.EMPTY).setWorkToken(workToken).build(),
Instant::now,
Collections.emptyList(),
processWorkFn);
}
private byte[] intervalWindowBytes(IntervalWindow window) throws Exception {
return CoderUtils.encodeToByteArray(
DEFAULT_WINDOW_COLLECTION_CODER, Collections.singletonList(window));
}
private String keyStringForIndex(int index) {
return DEFAULT_KEY_STRING + index;
}
private String dataStringForIndex(long index) {
return DEFAULT_DATA_STRING + index;
}
private ParallelInstruction makeWindowingSourceInstruction(Coder<?> coder) {
CloudObject timerCloudObject =
CloudObject.forClassName(
"com.google.cloud.dataflow.sdk.util.TimerOrElement$TimerOrElementCoder");
List<CloudObject> component =
Collections.singletonList(CloudObjects.asCloudObject(coder, /* sdkComponents= */ null));
Structs.addList(timerCloudObject, PropertyNames.COMPONENT_ENCODINGS, component);
CloudObject encodedCoder = CloudObject.forClassName("kind:windowed_value");
Structs.addBoolean(encodedCoder, PropertyNames.IS_WRAPPER, true);
Structs.addList(
encodedCoder,
PropertyNames.COMPONENT_ENCODINGS,
ImmutableList.of(
timerCloudObject,
CloudObjects.asCloudObject(IntervalWindowCoder.of(), /* sdkComponents= */ null)));
return new ParallelInstruction()
.setSystemName(DEFAULT_SOURCE_SYSTEM_NAME)
.setOriginalName(DEFAULT_SOURCE_ORIGINAL_NAME)
.setRead(
new ReadInstruction()
.setSource(
new Source()
.setSpec(CloudObject.forClass(WindowingWindmillReader.class))
.setCodec(encodedCoder)))
.setOutputs(
Collections.singletonList(
new InstructionOutput()
.setName(Long.toString(idGenerator.get()))
.setCodec(encodedCoder)
.setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME)
.setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME)));
}
private ParallelInstruction makeSourceInstruction(Coder<?> coder) {
return new ParallelInstruction()
.setSystemName(DEFAULT_SOURCE_SYSTEM_NAME)
.setOriginalName(DEFAULT_SOURCE_ORIGINAL_NAME)
.setRead(
new ReadInstruction()
.setSource(
new Source()
.setSpec(CloudObject.forClass(UngroupedWindmillReader.class))
.setCodec(
CloudObjects.asCloudObject(
WindowedValue.getFullCoder(coder, IntervalWindow.getCoder()),
/* sdkComponents= */ null))))
.setOutputs(
Collections.singletonList(
new InstructionOutput()
.setName(Long.toString(idGenerator.get()))
.setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME)
.setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME)
.setCodec(
CloudObjects.asCloudObject(
WindowedValue.getFullCoder(coder, IntervalWindow.getCoder()),
/* sdkComponents= */ null))));
}
private ParallelInstruction makeDoFnInstruction(
DoFn<?, ?> doFn,
int producerIndex,
Coder<?> outputCoder,
WindowingStrategy<?, ?> windowingStrategy) {
CloudObject spec = CloudObject.forClassName("DoFn");
addString(
spec,
PropertyNames.SERIALIZED_FN,
StringUtils.byteArrayToJsonString(
SerializableUtils.serializeToByteArray(
DoFnInfo.forFn(
doFn,
windowingStrategy /* windowing strategy */,
null /* side input views */,
null /* input coder */,
new TupleTag<>(PropertyNames.OUTPUT) /* main output id */,
DoFnSchemaInformation.create(),
Collections.emptyMap()))));
return new ParallelInstruction()
.setSystemName(DEFAULT_PARDO_SYSTEM_NAME)
.setName(DEFAULT_PARDO_USER_NAME)
.setOriginalName(DEFAULT_PARDO_ORIGINAL_NAME)
.setParDo(
new ParDoInstruction()
.setInput(
new InstructionInput()
.setProducerInstructionIndex(producerIndex)
.setOutputNum(0))
.setNumOutputs(1)
.setUserFn(spec)
.setMultiOutputInfos(
Collections.singletonList(new MultiOutputInfo().setTag(PropertyNames.OUTPUT))))
.setOutputs(
Collections.singletonList(
new InstructionOutput()
.setName(PropertyNames.OUTPUT)
.setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME)
.setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME)
.setCodec(
CloudObjects.asCloudObject(
WindowedValue.getFullCoder(
outputCoder, windowingStrategy.getWindowFn().windowCoder()),
/* sdkComponents= */ null))));
}
private ParallelInstruction makeDoFnInstruction(
DoFn<?, ?> doFn, int producerIndex, Coder<?> outputCoder) {
WindowingStrategy<?, ?> windowingStrategy =
WindowingStrategy.of(FixedWindows.of(Duration.millis(10)));
return makeDoFnInstruction(doFn, producerIndex, outputCoder, windowingStrategy);
}
private ParallelInstruction makeSinkInstruction(
String streamId,
Coder<?> coder,
int producerIndex,
Coder<? extends BoundedWindow> windowCoder) {
CloudObject spec = CloudObject.forClass(WindmillSink.class);
addString(spec, "stream_id", streamId);
return new ParallelInstruction()
.setSystemName(DEFAULT_SINK_SYSTEM_NAME)
.setOriginalName(DEFAULT_SINK_ORIGINAL_NAME)
.setWrite(
new WriteInstruction()
.setInput(
new InstructionInput()
.setProducerInstructionIndex(producerIndex)
.setOutputNum(0))
.setSink(
new Sink()
.setSpec(spec)
.setCodec(
CloudObjects.asCloudObject(
WindowedValue.getFullCoder(coder, windowCoder),
/* sdkComponents= */ null))));
}
private ParallelInstruction makeSinkInstruction(
Coder<?> coder, int producerIndex, Coder<? extends BoundedWindow> windowCoder) {
return makeSinkInstruction(DEFAULT_DESTINATION_STREAM_ID, coder, producerIndex, windowCoder);
}
private ParallelInstruction makeSinkInstruction(Coder<?> coder, int producerIndex) {
return makeSinkInstruction(coder, producerIndex, IntervalWindow.getCoder());
}
/**
* Returns a {@link MapTask} with the provided {@code instructions} and default values everywhere
* else.
*/
private MapTask defaultMapTask(List<ParallelInstruction> instructions) {
MapTask mapTask =
new MapTask()
.setStageName(DEFAULT_MAP_STAGE_NAME)
.setSystemName(DEFAULT_MAP_SYSTEM_NAME)
.setInstructions(instructions);
mapTask.setFactory(Transport.getJsonFactory());
return mapTask;
}
private Windmill.GetWorkResponse buildInput(String input, byte[] metadata) throws Exception {
Windmill.GetWorkResponse.Builder builder = Windmill.GetWorkResponse.newBuilder();
TextFormat.merge(input, builder);
if (metadata != null) {
Windmill.InputMessageBundle.Builder messageBundleBuilder =
builder.getWorkBuilder(0).getWorkBuilder(0).getMessageBundlesBuilder(0);
for (Windmill.Message.Builder messageBuilder :
messageBundleBuilder.getMessagesBuilderList()) {
messageBuilder.setMetadata(addPaneTag(PaneInfo.NO_FIRING, metadata));
}
}
return builder.build();
}
private Windmill.GetWorkResponse buildSessionInput(
int workToken,
long inputWatermark,
long outputWatermark,
List<Long> inputs,
List<Timer> timers)
throws Exception {
Windmill.WorkItem.Builder builder = Windmill.WorkItem.newBuilder();
builder.setKey(DEFAULT_KEY_BYTES);
builder.setShardingKey(DEFAULT_SHARDING_KEY);
builder.setCacheToken(1);
builder.setWorkToken(workToken);
builder.setOutputDataWatermark(outputWatermark * 1000);
if (!inputs.isEmpty()) {
InputMessageBundle.Builder messageBuilder =
Windmill.InputMessageBundle.newBuilder()
.setSourceComputationId(DEFAULT_SOURCE_COMPUTATION_ID);
for (Long input : inputs) {
messageBuilder.addMessages(
Windmill.Message.newBuilder()
.setTimestamp(input)
.setData(ByteString.copyFromUtf8(dataStringForIndex(input)))
.setMetadata(
addPaneTag(
PaneInfo.NO_FIRING,
intervalWindowBytes(
new IntervalWindow(
new Instant(input),
new Instant(input).plus(Duration.millis(10)))))));
}
builder.addMessageBundles(messageBuilder);
}
if (!timers.isEmpty()) {
builder.setTimers(Windmill.TimerBundle.newBuilder().addAllTimers(timers));
}
return Windmill.GetWorkResponse.newBuilder()
.addWork(
Windmill.ComputationWorkItems.newBuilder()
.setComputationId(DEFAULT_COMPUTATION_ID)
.setInputDataWatermark(inputWatermark * 1000)
.addWork(builder))
.build();
}
private Windmill.GetWorkResponse makeInput(int index, long timestamp) throws Exception {
return makeInput(index, timestamp, keyStringForIndex(index), DEFAULT_SHARDING_KEY);
}
private Windmill.GetWorkResponse makeInput(
int index, long timestamp, String key, long shardingKey) throws Exception {
return buildInput(
"work {"
+ " computation_id: \""
+ DEFAULT_COMPUTATION_ID
+ "\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \""
+ key
+ "\""
+ " sharding_key: "
+ shardingKey
+ " work_token: "
+ index
+ " cache_token: 3"
+ " hot_key_info {"
+ " hot_key_age_usec: 1000000"
+ " }"
+ " message_bundles {"
+ " source_computation_id: \""
+ DEFAULT_SOURCE_COMPUTATION_ID
+ "\""
+ " messages {"
+ " timestamp: "
+ timestamp
+ " data: \"data"
+ index
+ "\""
+ " }"
+ " }"
+ " }"
+ "}",
CoderUtils.encodeToByteArray(
CollectionCoder.of(IntervalWindow.getCoder()),
Collections.singletonList(DEFAULT_WINDOW)));
}
/**
* Returns a {@link org.apache.beam.runners.dataflow.windmill.Windmill.WorkItemCommitRequest}
* builder parsed from the provided text format proto.
*/
private WorkItemCommitRequest.Builder parseCommitRequest(String output) throws Exception {
WorkItemCommitRequest.Builder builder = Windmill.WorkItemCommitRequest.newBuilder();
TextFormat.merge(output, builder);
return builder;
}
/** Sets the metadata of all the contained messages in this WorkItemCommitRequest. */
private WorkItemCommitRequest.Builder setMessagesMetadata(
PaneInfo pane, byte[] windowBytes, WorkItemCommitRequest.Builder builder) throws Exception {
if (windowBytes != null) {
KeyedMessageBundle.Builder bundles = builder.getOutputMessagesBuilder(0).getBundlesBuilder(0);
for (int i = 0; i < bundles.getMessagesCount(); i++) {
bundles.getMessagesBuilder(i).setMetadata(addPaneTag(pane, windowBytes));
}
}
return builder;
}
/** Reset value update timestamps to zero. */
private WorkItemCommitRequest.Builder setValuesTimestamps(WorkItemCommitRequest.Builder builder) {
for (int i = 0; i < builder.getValueUpdatesCount(); i++) {
builder.getValueUpdatesBuilder(i).getValueBuilder().setTimestamp(0);
}
return builder;
}
private WorkItemCommitRequest.Builder makeExpectedOutput(int index, long timestamp)
throws Exception {
return makeExpectedOutput(
index, timestamp, keyStringForIndex(index), DEFAULT_SHARDING_KEY, keyStringForIndex(index));
}
private WorkItemCommitRequest.Builder makeExpectedOutput(
int index, long timestamp, String key, long shardingKey, String outKey) throws Exception {
StringBuilder expectedCommitRequestBuilder =
initializeExpectedCommitRequest(key, shardingKey, index);
appendCommitOutputMessages(expectedCommitRequestBuilder, index, timestamp, outKey);
return setMessagesMetadata(
PaneInfo.NO_FIRING,
intervalWindowBytes(DEFAULT_WINDOW),
parseCommitRequest(expectedCommitRequestBuilder.toString()));
}
private WorkItemCommitRequest removeDynamicFields(WorkItemCommitRequest request) {
return request.toBuilder().clearPerWorkItemLatencyAttributions().build();
}
private WorkItemCommitRequest.Builder makeExpectedTruncationRequestOutput(
int index, String key, long shardingKey, long estimatedSize) throws Exception {
StringBuilder expectedCommitRequestBuilder =
initializeExpectedCommitRequest(key, shardingKey, index, false);
appendCommitTruncationFields(expectedCommitRequestBuilder, estimatedSize);
return parseCommitRequest(expectedCommitRequestBuilder.toString());
}
private StringBuilder initializeExpectedCommitRequest(
String key, long shardingKey, int index, Boolean hasSourceBytesProcessed) {
StringBuilder requestBuilder = new StringBuilder();
requestBuilder.append("key: \"");
requestBuilder.append(key);
requestBuilder.append("\" ");
requestBuilder.append("sharding_key: ");
requestBuilder.append(shardingKey);
requestBuilder.append(" ");
requestBuilder.append("work_token: ");
requestBuilder.append(index);
requestBuilder.append(" ");
requestBuilder.append("cache_token: 3 ");
if (hasSourceBytesProcessed) requestBuilder.append("source_bytes_processed: 0 ");
return requestBuilder;
}
private StringBuilder initializeExpectedCommitRequest(String key, long shardingKey, int index) {
return initializeExpectedCommitRequest(key, shardingKey, index, true);
}
private StringBuilder appendCommitOutputMessages(
StringBuilder requestBuilder, int index, long timestamp, String outKey) {
requestBuilder.append("output_messages {");
requestBuilder.append(" destination_stream_id: \"");
requestBuilder.append(DEFAULT_DESTINATION_STREAM_ID);
requestBuilder.append("\"");
requestBuilder.append(" bundles {");
requestBuilder.append(" key: \"");
requestBuilder.append(outKey);
requestBuilder.append("\"");
requestBuilder.append(" messages {");
requestBuilder.append(" timestamp: ");
requestBuilder.append(timestamp);
requestBuilder.append(" data: \"");
requestBuilder.append(dataStringForIndex(index));
requestBuilder.append("\"");
requestBuilder.append(" metadata: \"\"");
requestBuilder.append(" }");
requestBuilder.append(" messages_ids: \"\"");
requestBuilder.append(" }");
requestBuilder.append("}");
return requestBuilder;
}
private StringBuilder appendCommitTruncationFields(
StringBuilder requestBuilder, long estimatedSize) {
requestBuilder.append("exceeds_max_work_item_commit_bytes: true ");
requestBuilder.append("estimated_work_item_commit_bytes: ");
requestBuilder.append(estimatedSize);
return requestBuilder;
}
private StreamingComputationConfig makeDefaultStreamingComputationConfig(
List<ParallelInstruction> instructions) {
StreamingComputationConfig config = new StreamingComputationConfig();
config.setComputationId(DEFAULT_COMPUTATION_ID);
config.setSystemName(DEFAULT_MAP_SYSTEM_NAME);
config.setStageName(DEFAULT_MAP_STAGE_NAME);
config.setInstructions(instructions);
return config;
}
private ByteString addPaneTag(PaneInfo pane, byte[] windowBytes) throws IOException {
ByteStringOutputStream output = new ByteStringOutputStream();
PaneInfo.PaneInfoCoder.INSTANCE.encode(pane, output, Context.OUTER);
output.write(windowBytes);
return output.toByteString();
}
private StreamingDataflowWorkerOptions createTestingPipelineOptions(
FakeWindmillServer server, String... args) {
List<String> argsList = Lists.newArrayList(args);
if (streamingEngine) {
argsList.add("--experiments=enable_streaming_engine");
}
StreamingDataflowWorkerOptions options =
PipelineOptionsFactory.fromArgs(argsList.toArray(new String[0]))
.as(StreamingDataflowWorkerOptions.class);
options.setAppName("StreamingWorkerHarnessTest");
options.setJobId("test_job_id");
options.setStreaming(true);
options.setWindmillServerStub(server);
options.setActiveWorkRefreshPeriodMillis(0);
return options;
}
private StreamingDataflowWorker makeWorker(
List<ParallelInstruction> instructions,
StreamingDataflowWorkerOptions options,
boolean publishCounters,
Supplier<Instant> clock,
Function<String, ScheduledExecutorService> executorSupplier)
throws Exception {
StreamingDataflowWorker worker =
new StreamingDataflowWorker(
Collections.singletonList(defaultMapTask(instructions)),
IntrinsicMapTaskExecutorFactory.defaultFactory(),
mockWorkUnitClient,
options,
publishCounters,
hotKeyLogger,
clock,
executorSupplier);
worker.addStateNameMappings(
ImmutableMap.of(DEFAULT_PARDO_USER_NAME, DEFAULT_PARDO_STATE_FAMILY));
return worker;
}
private StreamingDataflowWorker makeWorker(
List<ParallelInstruction> instructions,
StreamingDataflowWorkerOptions options,
boolean publishCounters)
throws Exception {
return makeWorker(
instructions,
options,
publishCounters,
Instant::now,
(threadName) -> Executors.newSingleThreadScheduledExecutor());
}
@Test
public void testBasicHarness() throws Exception {
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */);
worker.start();
final int numIters = 2000;
for (int i = 0; i < numIters; ++i) {
server.whenGetWorkCalled().thenReturn(makeInput(i, TimeUnit.MILLISECONDS.toMicros(i)));
}
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(numIters);
worker.stop();
for (int i = 0; i < numIters; ++i) {
assertTrue(result.containsKey((long) i));
assertEquals(
makeExpectedOutput(i, TimeUnit.MILLISECONDS.toMicros(i)).build(),
removeDynamicFields(result.get((long) i)));
}
verify(hotKeyLogger, atLeastOnce()).logHotKeyDetection(nullable(String.class), any());
}
@Test
public void testBasic() throws Exception {
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
server.setIsReady(false);
StreamingConfigTask streamingConfig = new StreamingConfigTask();
streamingConfig.setStreamingComputationConfigs(
ImmutableList.of(makeDefaultStreamingComputationConfig(instructions)));
streamingConfig.setWindmillServiceEndpoint("foo");
WorkItem workItem = new WorkItem();
workItem.setStreamingConfigTask(streamingConfig);
when(mockWorkUnitClient.getGlobalStreamingConfigWorkItem()).thenReturn(Optional.of(workItem));
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */);
worker.start();
final int numIters = 2000;
for (int i = 0; i < numIters; ++i) {
server.whenGetWorkCalled().thenReturn(makeInput(i, TimeUnit.MILLISECONDS.toMicros(i)));
}
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(numIters);
worker.stop();
for (int i = 0; i < numIters; ++i) {
assertTrue(result.containsKey((long) i));
assertEquals(
makeExpectedOutput(i, TimeUnit.MILLISECONDS.toMicros(i)).build(),
removeDynamicFields(result.get((long) i)));
}
verify(hotKeyLogger, atLeastOnce()).logHotKeyDetection(nullable(String.class), any());
}
@Test
public void testHotKeyLogging() throws Exception {
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of())),
makeSinkInstruction(KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of()), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
server.setIsReady(false);
StreamingConfigTask streamingConfig = new StreamingConfigTask();
streamingConfig.setStreamingComputationConfigs(
ImmutableList.of(makeDefaultStreamingComputationConfig(instructions)));
streamingConfig.setWindmillServiceEndpoint("foo");
WorkItem workItem = new WorkItem();
workItem.setStreamingConfigTask(streamingConfig);
when(mockWorkUnitClient.getGlobalStreamingConfigWorkItem()).thenReturn(Optional.of(workItem));
StreamingDataflowWorkerOptions options =
createTestingPipelineOptions(server, "--hotKeyLoggingEnabled=true");
StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */);
worker.start();
final int numIters = 2000;
for (int i = 0; i < numIters; ++i) {
server
.whenGetWorkCalled()
.thenReturn(makeInput(i, TimeUnit.MILLISECONDS.toMicros(i), "key", DEFAULT_SHARDING_KEY));
}
server.waitForAndGetCommits(numIters);
worker.stop();
verify(hotKeyLogger, atLeastOnce())
.logHotKeyDetection(nullable(String.class), any(), eq("key"));
}
@Test
public void testHotKeyLoggingNotEnabled() throws Exception {
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of())),
makeSinkInstruction(KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of()), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
server.setIsReady(false);
StreamingConfigTask streamingConfig = new StreamingConfigTask();
streamingConfig.setStreamingComputationConfigs(
ImmutableList.of(makeDefaultStreamingComputationConfig(instructions)));
streamingConfig.setWindmillServiceEndpoint("foo");
WorkItem workItem = new WorkItem();
workItem.setStreamingConfigTask(streamingConfig);
when(mockWorkUnitClient.getGlobalStreamingConfigWorkItem()).thenReturn(Optional.of(workItem));
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */);
worker.start();
final int numIters = 2000;
for (int i = 0; i < numIters; ++i) {
server
.whenGetWorkCalled()
.thenReturn(makeInput(i, TimeUnit.MILLISECONDS.toMicros(i), "key", DEFAULT_SHARDING_KEY));
}
server.waitForAndGetCommits(numIters);
worker.stop();
verify(hotKeyLogger, atLeastOnce()).logHotKeyDetection(nullable(String.class), any());
}
@Test
public void testIgnoreRetriedKeys() throws Exception {
final int numIters = 4;
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeDoFnInstruction(blockingFn, 0, StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */);
worker.start();
for (int i = 0; i < numIters; ++i) {
server
.whenGetWorkCalled()
.thenReturn(
makeInput(
i, TimeUnit.MILLISECONDS.toMicros(i), keyStringForIndex(i), DEFAULT_SHARDING_KEY))
.thenReturn(
makeInput(
i + 1000,
TimeUnit.MILLISECONDS.toMicros(i),
keyStringForIndex(i),
DEFAULT_SHARDING_KEY + 1));
}
BlockingFn.counter.acquire(numIters * 2);
for (int i = 0; i < numIters; ++i) {
server
.whenGetWorkCalled()
.thenReturn(makeInput(i, TimeUnit.MILLISECONDS.toMicros(i)))
.thenReturn(
makeInput(
i + 1000,
TimeUnit.MILLISECONDS.toMicros(i),
keyStringForIndex(i),
DEFAULT_SHARDING_KEY + 1));
}
server.waitForEmptyWorkQueue();
for (int i = 0; i < numIters; ++i) {
server
.whenGetWorkCalled()
.thenReturn(
makeInput(
i + numIters,
TimeUnit.MILLISECONDS.toMicros(i),
keyStringForIndex(i),
DEFAULT_SHARDING_KEY));
}
server.waitForEmptyWorkQueue();
BlockingFn.blocker.countDown();
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(numIters * 3);
for (int i = 0; i < numIters; ++i) {
assertTrue(result.containsKey((long) i));
assertEquals(
makeExpectedOutput(i, TimeUnit.MILLISECONDS.toMicros(i)).build(),
removeDynamicFields(result.get((long) i)));
assertTrue(result.containsKey((long) i + 1000));
assertEquals(
makeExpectedOutput(
i + 1000,
TimeUnit.MILLISECONDS.toMicros(i),
keyStringForIndex(i),
DEFAULT_SHARDING_KEY + 1,
keyStringForIndex(i))
.build(),
removeDynamicFields(result.get((long) i + 1000)));
assertTrue(result.containsKey((long) i + numIters));
assertEquals(
makeExpectedOutput(
i + numIters,
TimeUnit.MILLISECONDS.toMicros(i),
keyStringForIndex(i),
DEFAULT_SHARDING_KEY,
keyStringForIndex(i))
.build(),
removeDynamicFields(result.get((long) i + numIters)));
}
for (int i = 0; i < numIters; ++i) {
server
.whenGetWorkCalled()
.thenReturn(
makeInput(
i + numIters * 2,
TimeUnit.MILLISECONDS.toMicros(i),
keyStringForIndex(i),
DEFAULT_SHARDING_KEY));
}
result = server.waitForAndGetCommits(numIters);
worker.stop();
for (int i = 0; i < numIters; ++i) {
assertTrue(result.containsKey((long) i + numIters * 2));
assertEquals(
makeExpectedOutput(
i + numIters * 2,
TimeUnit.MILLISECONDS.toMicros(i),
keyStringForIndex(i),
DEFAULT_SHARDING_KEY,
keyStringForIndex(i))
.build(),
removeDynamicFields(result.get((long) i + numIters * 2)));
}
}
@Test(timeout = 10000)
public void testNumberOfWorkerHarnessThreadsIsHonored() throws Exception {
int expectedNumberOfThreads = 5;
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeDoFnInstruction(blockingFn, 0, StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
options.setNumberOfWorkerHarnessThreads(expectedNumberOfThreads);
StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */);
worker.start();
for (int i = 0; i < expectedNumberOfThreads * 2; ++i) {
server.whenGetWorkCalled().thenReturn(makeInput(i, TimeUnit.MILLISECONDS.toMicros(i)));
}
BlockingFn.counter.acquire(expectedNumberOfThreads);
if (BlockingFn.counter.tryAcquire(500, TimeUnit.MILLISECONDS)) {
fail(
"Expected number of threads "
+ expectedNumberOfThreads
+ " does not match actual "
+ "number of work items processed concurrently "
+ BlockingFn.callCounter.get()
+ ".");
}
BlockingFn.blocker.countDown();
}
@Test
public void testKeyTokenInvalidException() throws Exception {
if (streamingEngine) {
return;
}
KvCoder<String, String> kvCoder = KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of());
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(kvCoder),
makeDoFnInstruction(new KeyTokenInvalidFn(), 0, kvCoder),
makeSinkInstruction(kvCoder, 1));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
server
.whenGetWorkCalled()
.thenReturn(makeInput(0, 0, DEFAULT_KEY_STRING, DEFAULT_SHARDING_KEY));
StreamingDataflowWorker worker =
makeWorker(instructions, createTestingPipelineOptions(server), true /* publishCounters */);
worker.start();
server.waitForEmptyWorkQueue();
server
.whenGetWorkCalled()
.thenReturn(makeInput(1, 0, DEFAULT_KEY_STRING, DEFAULT_SHARDING_KEY));
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
assertEquals(
makeExpectedOutput(1, 0, DEFAULT_KEY_STRING, DEFAULT_SHARDING_KEY, DEFAULT_KEY_STRING)
.build(),
removeDynamicFields(result.get(1L)));
assertEquals(1, result.size());
}
@Test
public void testKeyCommitTooLargeException() throws Exception {
KvCoder<String, String> kvCoder = KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of());
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(kvCoder),
makeDoFnInstruction(new LargeCommitFn(), 0, kvCoder),
makeSinkInstruction(kvCoder, 1));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
server.setExpectedExceptionCount(1);
StreamingDataflowWorker worker =
makeWorker(instructions, createTestingPipelineOptions(server), true /* publishCounters */);
worker.setMaxWorkItemCommitBytes(1000);
worker.start();
server
.whenGetWorkCalled()
.thenReturn(makeInput(1, 0, "large_key", DEFAULT_SHARDING_KEY))
.thenReturn(makeInput(2, 0, "key", DEFAULT_SHARDING_KEY));
server.waitForEmptyWorkQueue();
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
assertEquals(2, result.size());
assertEquals(
makeExpectedOutput(2, 0, "key", DEFAULT_SHARDING_KEY, "key").build(),
removeDynamicFields(result.get(2L)));
assertTrue(result.containsKey(1L));
WorkItemCommitRequest largeCommit = result.get(1L);
assertEquals("large_key", largeCommit.getKey().toStringUtf8());
assertEquals(
makeExpectedTruncationRequestOutput(
1, "large_key", DEFAULT_SHARDING_KEY, largeCommit.getEstimatedWorkItemCommitBytes())
.build(),
largeCommit);
assertTrue(largeCommit.getEstimatedWorkItemCommitBytes() > 1000);
int maxTries = 10;
while (--maxTries > 0) {
worker.reportPeriodicWorkerUpdates();
Uninterruptibles.sleepUninterruptibly(1000, TimeUnit.MILLISECONDS);
}
ArgumentCaptor<WorkItemStatus> workItemStatusCaptor =
ArgumentCaptor.forClass(WorkItemStatus.class);
verify(mockWorkUnitClient, atLeast(2)).reportWorkItemStatus(workItemStatusCaptor.capture());
List<WorkItemStatus> capturedStatuses = workItemStatusCaptor.getAllValues();
boolean foundErrors = false;
for (WorkItemStatus status : capturedStatuses) {
if (!status.getErrors().isEmpty()) {
assertFalse(foundErrors);
foundErrors = true;
String errorMessage = status.getErrors().get(0).getMessage();
assertThat(errorMessage, Matchers.containsString("KeyCommitTooLargeException"));
}
}
assertTrue(foundErrors);
}
@Test
public void testKeyChange() throws Exception {
KvCoder<String, String> kvCoder = KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of());
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(kvCoder),
makeDoFnInstruction(new ChangeKeysFn(), 0, kvCoder),
makeSinkInstruction(kvCoder, 1));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
for (int i = 0; i < 2; i++) {
server
.whenGetWorkCalled()
.thenReturn(
makeInput(
i, TimeUnit.MILLISECONDS.toMicros(i), keyStringForIndex(i), DEFAULT_SHARDING_KEY))
.thenReturn(
makeInput(
i + 1000,
TimeUnit.MILLISECONDS.toMicros(i),
keyStringForIndex(i),
DEFAULT_SHARDING_KEY + i));
}
StreamingDataflowWorker worker =
makeWorker(instructions, createTestingPipelineOptions(server), true /* publishCounters */);
worker.start();
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(4);
for (int i = 0; i < 2; i++) {
assertTrue(result.containsKey((long) i));
assertEquals(
makeExpectedOutput(
i,
TimeUnit.MILLISECONDS.toMicros(i),
keyStringForIndex(i),
DEFAULT_SHARDING_KEY,
keyStringForIndex(i) + "_data" + i)
.build(),
removeDynamicFields(result.get((long) i)));
assertTrue(result.containsKey((long) i + 1000));
assertEquals(
makeExpectedOutput(
i + 1000,
TimeUnit.MILLISECONDS.toMicros(i),
keyStringForIndex(i),
DEFAULT_SHARDING_KEY + i,
keyStringForIndex(i) + "_data" + (i + 1000))
.build(),
removeDynamicFields(result.get((long) i + 1000)));
}
}
@Test(timeout = 30000)
public void testExceptions() throws Exception {
if (streamingEngine) {
return;
}
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeDoFnInstruction(new TestExceptionFn(), 0, StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 1));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
server.setExpectedExceptionCount(1);
String keyString = keyStringForIndex(0);
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \""
+ DEFAULT_COMPUTATION_ID
+ "\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \""
+ keyString
+ "\""
+ " sharding_key: 1"
+ " work_token: 0"
+ " cache_token: 1"
+ " message_bundles {"
+ " source_computation_id: \""
+ DEFAULT_SOURCE_COMPUTATION_ID
+ "\""
+ " messages {"
+ " timestamp: 0"
+ " data: \"0\""
+ " }"
+ " }"
+ " }"
+ "}",
CoderUtils.encodeToByteArray(
CollectionCoder.of(IntervalWindow.getCoder()),
Collections.singletonList(DEFAULT_WINDOW))));
StreamingDataflowWorker worker =
makeWorker(instructions, createTestingPipelineOptions(server), true /* publishCounters */);
worker.start();
server.waitForEmptyWorkQueue();
int maxTries = 10;
while (maxTries-- > 0 && !worker.workExecutorIsEmpty()) {
Uninterruptibles.sleepUninterruptibly(1000, TimeUnit.MILLISECONDS);
}
assertTrue(worker.workExecutorIsEmpty());
maxTries = 10;
while (maxTries-- > 0) {
worker.reportPeriodicWorkerUpdates();
Uninterruptibles.sleepUninterruptibly(1000, TimeUnit.MILLISECONDS);
}
ArgumentCaptor<WorkItemStatus> workItemStatusCaptor =
ArgumentCaptor.forClass(WorkItemStatus.class);
verify(mockWorkUnitClient, atLeast(1)).reportWorkItemStatus(workItemStatusCaptor.capture());
List<WorkItemStatus> capturedStatuses = workItemStatusCaptor.getAllValues();
boolean foundErrors = false;
int lastUpdateWithoutErrors = 0;
int lastUpdateWithErrors = 0;
for (WorkItemStatus status : capturedStatuses) {
if (status.getErrors().isEmpty()) {
lastUpdateWithoutErrors++;
continue;
}
lastUpdateWithErrors++;
assertFalse(foundErrors);
foundErrors = true;
String stacktrace = status.getErrors().get(0).getMessage();
assertThat(stacktrace, Matchers.containsString("Exception!"));
assertThat(stacktrace, Matchers.containsString("Another exception!"));
assertThat(stacktrace, Matchers.containsString("processElement"));
}
assertTrue(foundErrors);
assertTrue(lastUpdateWithoutErrors > lastUpdateWithErrors);
assertThat(server.getStatsReceived().size(), Matchers.greaterThanOrEqualTo(1));
Windmill.ReportStatsRequest stats = server.getStatsReceived().get(0);
assertEquals(DEFAULT_COMPUTATION_ID, stats.getComputationId());
assertEquals(keyString, stats.getKey().toStringUtf8());
assertEquals(0, stats.getWorkToken());
assertEquals(1, stats.getShardingKey());
}
@Test
public void testAssignWindows() throws Exception {
Duration gapDuration = Duration.standardSeconds(1);
CloudObject spec = CloudObject.forClassName("AssignWindowsDoFn");
SdkComponents sdkComponents = SdkComponents.create();
sdkComponents.registerEnvironment(Environments.JAVA_SDK_HARNESS_ENVIRONMENT);
addString(
spec,
PropertyNames.SERIALIZED_FN,
StringUtils.byteArrayToJsonString(
WindowingStrategyTranslation.toMessageProto(
WindowingStrategy.of(FixedWindows.of(gapDuration)), sdkComponents)
.toByteArray()));
ParallelInstruction addWindowsInstruction =
new ParallelInstruction()
.setSystemName("AssignWindows")
.setName("AssignWindows")
.setOriginalName("AssignWindowsOriginal")
.setParDo(
new ParDoInstruction()
.setInput(new InstructionInput().setProducerInstructionIndex(0).setOutputNum(0))
.setNumOutputs(1)
.setUserFn(spec))
.setOutputs(
Collections.singletonList(
new InstructionOutput()
.setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME)
.setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME)
.setName("output")
.setCodec(
CloudObjects.asCloudObject(
WindowedValue.getFullCoder(
StringUtf8Coder.of(), IntervalWindow.getCoder()),
/* sdkComponents= */ null))));
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
addWindowsInstruction,
makeSinkInstruction(StringUtf8Coder.of(), 1));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
int timestamp1 = 0;
int timestamp2 = 1000000;
server
.whenGetWorkCalled()
.thenReturn(makeInput(timestamp1, timestamp1))
.thenReturn(makeInput(timestamp2, timestamp2));
StreamingDataflowWorker worker =
makeWorker(instructions, createTestingPipelineOptions(server), false /* publishCounters */);
worker.start();
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(2);
assertThat(
removeDynamicFields(result.get((long) timestamp1)),
equalTo(
setMessagesMetadata(
PaneInfo.NO_FIRING,
intervalWindowBytes(WINDOW_AT_ZERO),
makeExpectedOutput(timestamp1, timestamp1))
.build()));
assertThat(
removeDynamicFields(result.get((long) timestamp2)),
equalTo(
setMessagesMetadata(
PaneInfo.NO_FIRING,
intervalWindowBytes(WINDOW_AT_ONE_SECOND),
makeExpectedOutput(timestamp2, timestamp2))
.build()));
}
private void verifyTimers(WorkItemCommitRequest commit, Timer... timers) {
assertThat(commit.getOutputTimersList(), Matchers.containsInAnyOrder(timers));
}
private void verifyHolds(WorkItemCommitRequest commit, WatermarkHold... watermarkHolds) {
assertThat(commit.getWatermarkHoldsList(), Matchers.containsInAnyOrder(watermarkHolds));
}
private Timer buildWatermarkTimer(String tagPrefix, long timestampMillis) {
return buildWatermarkTimer(tagPrefix, timestampMillis, false);
}
private Timer buildWatermarkTimer(String tagPrefix, long timestampMillis, boolean delete) {
Timer.Builder builder =
Timer.newBuilder()
.setTag(ByteString.copyFromUtf8(tagPrefix + ":" + timestampMillis))
.setType(Type.WATERMARK)
.setStateFamily("MergeWindows");
if (!delete) {
builder.setTimestamp(timestampMillis * 1000);
builder.setMetadataTimestamp(timestampMillis * 1000);
}
return builder.build();
}
private WatermarkHold buildHold(String tag, long timestamp, boolean reset) {
WatermarkHold.Builder builder =
WatermarkHold.newBuilder()
.setTag(ByteString.copyFromUtf8(tag))
.setStateFamily("MergeWindows");
if (reset) {
builder.setReset(true);
}
if (timestamp >= 0) {
builder.addTimestamps(timestamp * 1000);
}
return builder.build();
}
@Test
public void testMergeWindows() throws Exception {
Coder<KV<String, String>> kvCoder = KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of());
Coder<WindowedValue<KV<String, String>>> windowedKvCoder =
FullWindowedValueCoder.of(kvCoder, IntervalWindow.getCoder());
KvCoder<String, List<String>> groupedCoder =
KvCoder.of(StringUtf8Coder.of(), ListCoder.of(StringUtf8Coder.of()));
Coder<WindowedValue<KV<String, List<String>>>> windowedGroupedCoder =
FullWindowedValueCoder.of(groupedCoder, IntervalWindow.getCoder());
CloudObject spec = CloudObject.forClassName("MergeWindowsDoFn");
SdkComponents sdkComponents = SdkComponents.create();
sdkComponents.registerEnvironment(Environments.JAVA_SDK_HARNESS_ENVIRONMENT);
addString(
spec,
PropertyNames.SERIALIZED_FN,
StringUtils.byteArrayToJsonString(
WindowingStrategyTranslation.toMessageProto(
WindowingStrategy.of(FixedWindows.of(Duration.standardSeconds(1)))
.withTimestampCombiner(TimestampCombiner.EARLIEST),
sdkComponents)
.toByteArray()));
addObject(
spec,
WorkerPropertyNames.INPUT_CODER,
CloudObjects.asCloudObject(windowedKvCoder, /* sdkComponents= */ null));
ParallelInstruction mergeWindowsInstruction =
new ParallelInstruction()
.setSystemName("MergeWindows-System")
.setName("MergeWindowsStep")
.setOriginalName("MergeWindowsOriginal")
.setParDo(
new ParDoInstruction()
.setInput(new InstructionInput().setProducerInstructionIndex(0).setOutputNum(0))
.setNumOutputs(1)
.setUserFn(spec))
.setOutputs(
Collections.singletonList(
new InstructionOutput()
.setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME)
.setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME)
.setName("output")
.setCodec(
CloudObjects.asCloudObject(
windowedGroupedCoder, /* sdkComponents= */ null))));
List<ParallelInstruction> instructions =
Arrays.asList(
makeWindowingSourceInstruction(kvCoder),
mergeWindowsInstruction,
makeSinkInstruction(groupedCoder, 1));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorker worker =
makeWorker(instructions, createTestingPipelineOptions(server), false /* publishCounters */);
Map<String, String> nameMap = new HashMap<>();
nameMap.put("MergeWindowsStep", "MergeWindows");
worker.addStateNameMappings(nameMap);
worker.start();
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \""
+ DEFAULT_COMPUTATION_ID
+ "\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \""
+ DEFAULT_KEY_STRING
+ "\""
+ " sharding_key: "
+ DEFAULT_SHARDING_KEY
+ " cache_token: 1"
+ " work_token: 1"
+ " message_bundles {"
+ " source_computation_id: \""
+ DEFAULT_SOURCE_COMPUTATION_ID
+ "\""
+ " messages {"
+ " timestamp: 0"
+ " data: \""
+ dataStringForIndex(0)
+ "\""
+ " }"
+ " }"
+ " }"
+ "}",
intervalWindowBytes(WINDOW_AT_ZERO)));
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
Iterable<CounterUpdate> counters = worker.buildCounters();
String window = "/gAAAAAAAA-joBw/";
String timerTagPrefix = "/s" + window + "+0";
ByteString bufferTag = ByteString.copyFromUtf8(window + "+ubuf");
ByteString paneInfoTag = ByteString.copyFromUtf8(window + "+upane");
String watermarkDataHoldTag = window + "+uhold";
String watermarkExtraHoldTag = window + "+uextra";
String stateFamily = "MergeWindows";
ByteString bufferData = ByteString.copyFromUtf8("data0");
ByteString outputData =
ByteString.copyFrom(
new byte[] {
(byte) 0xff,
(byte) 0xff,
(byte) 0xff,
(byte) 0xff,
0x01,
0x05,
0x64,
0x61,
0x74,
0x61,
0x30,
0x00
});
long timerTimestamp = 999000L;
WorkItemCommitRequest actualOutput = result.get(1L);
verifyTimers(actualOutput, buildWatermarkTimer(timerTagPrefix, 999));
assertThat(
actualOutput.getBagUpdatesList(),
Matchers.contains(
Matchers.equalTo(
Windmill.TagBag.newBuilder()
.setTag(bufferTag)
.setStateFamily(stateFamily)
.addValues(bufferData)
.build())));
verifyHolds(actualOutput, buildHold(watermarkDataHoldTag, 0, false));
assertEquals(0L, splitIntToLong(getCounter(counters, "WindmillStateBytesRead").getInteger()));
assertEquals(
Windmill.WorkItemCommitRequest.newBuilder(actualOutput)
.clearCounterUpdates()
.clearOutputMessages()
.clearPerWorkItemLatencyAttributions()
.build()
.getSerializedSize(),
splitIntToLong(getCounter(counters, "WindmillStateBytesWritten").getInteger()));
assertEquals(
VarInt.getLength(0L)
+ dataStringForIndex(0).length()
+ addPaneTag(PaneInfo.NO_FIRING, intervalWindowBytes(WINDOW_AT_ZERO)).size()
+ 5L
,
splitIntToLong(getCounter(counters, "WindmillShuffleBytesRead").getInteger()));
Windmill.GetWorkResponse.Builder getWorkResponse = Windmill.GetWorkResponse.newBuilder();
getWorkResponse
.addWorkBuilder()
.setComputationId(DEFAULT_COMPUTATION_ID)
.setInputDataWatermark(timerTimestamp + 1000)
.addWorkBuilder()
.setKey(ByteString.copyFromUtf8(DEFAULT_KEY_STRING))
.setShardingKey(DEFAULT_SHARDING_KEY)
.setWorkToken(2)
.setCacheToken(1)
.getTimersBuilder()
.addTimers(buildWatermarkTimer(timerTagPrefix, timerTimestamp));
server.whenGetWorkCalled().thenReturn(getWorkResponse.build());
long expectedBytesRead = 0L;
Windmill.GetDataResponse.Builder dataResponse = Windmill.GetDataResponse.newBuilder();
Windmill.KeyedGetDataResponse.Builder dataBuilder =
dataResponse
.addDataBuilder()
.setComputationId(DEFAULT_COMPUTATION_ID)
.addDataBuilder()
.setKey(ByteString.copyFromUtf8(DEFAULT_KEY_STRING))
.setShardingKey(DEFAULT_SHARDING_KEY);
dataBuilder
.addBagsBuilder()
.setTag(bufferTag)
.setStateFamily(stateFamily)
.addValues(bufferData);
dataBuilder
.addWatermarkHoldsBuilder()
.setTag(ByteString.copyFromUtf8(watermarkDataHoldTag))
.setStateFamily(stateFamily)
.addTimestamps(0);
dataBuilder
.addWatermarkHoldsBuilder()
.setTag(ByteString.copyFromUtf8(watermarkExtraHoldTag))
.setStateFamily(stateFamily)
.addTimestamps(0);
dataBuilder
.addValuesBuilder()
.setTag(paneInfoTag)
.setStateFamily(stateFamily)
.getValueBuilder()
.setTimestamp(0)
.setData(ByteString.EMPTY);
server.whenGetDataCalled().thenReturn(dataResponse.build());
expectedBytesRead += dataBuilder.build().getSerializedSize();
result = server.waitForAndGetCommits(1);
counters = worker.buildCounters();
actualOutput = result.get(2L);
assertEquals(1, actualOutput.getOutputMessagesCount());
assertEquals(
DEFAULT_DESTINATION_STREAM_ID, actualOutput.getOutputMessages(0).getDestinationStreamId());
assertEquals(
DEFAULT_KEY_STRING,
actualOutput.getOutputMessages(0).getBundles(0).getKey().toStringUtf8());
assertEquals(0, actualOutput.getOutputMessages(0).getBundles(0).getMessages(0).getTimestamp());
assertEquals(
outputData, actualOutput.getOutputMessages(0).getBundles(0).getMessages(0).getData());
ByteString metadata =
actualOutput.getOutputMessages(0).getBundles(0).getMessages(0).getMetadata();
InputStream inStream = metadata.newInput();
assertEquals(
PaneInfo.createPane(true, true, Timing.ON_TIME), PaneInfoCoder.INSTANCE.decode(inStream));
assertEquals(
Collections.singletonList(WINDOW_AT_ZERO),
DEFAULT_WINDOW_COLLECTION_CODER.decode(inStream, Coder.Context.OUTER));
assertThat(
"" + actualOutput.getValueUpdatesList(),
actualOutput.getValueUpdatesList(),
Matchers.contains(
Matchers.equalTo(
Windmill.TagValue.newBuilder()
.setTag(paneInfoTag)
.setStateFamily(stateFamily)
.setValue(
Windmill.Value.newBuilder()
.setTimestamp(Long.MAX_VALUE)
.setData(ByteString.EMPTY))
.build())));
assertThat(
"" + actualOutput.getBagUpdatesList(),
actualOutput.getBagUpdatesList(),
Matchers.contains(
Matchers.equalTo(
Windmill.TagBag.newBuilder()
.setTag(bufferTag)
.setStateFamily(stateFamily)
.setDeleteAll(true)
.build())));
verifyHolds(
actualOutput,
buildHold(watermarkDataHoldTag, -1, true),
buildHold(watermarkExtraHoldTag, -1, true));
assertEquals(
expectedBytesRead,
splitIntToLong(getCounter(counters, "WindmillStateBytesRead").getInteger()));
assertEquals(
Windmill.WorkItemCommitRequest.newBuilder(removeDynamicFields(actualOutput))
.clearCounterUpdates()
.clearOutputMessages()
.build()
.getSerializedSize(),
splitIntToLong(getCounter(counters, "WindmillStateBytesWritten").getInteger()));
assertEquals(0L, splitIntToLong(getCounter(counters, "WindmillShuffleBytesRead").getInteger()));
}
@Test
public void testMergeWindowsCaching() throws Exception {
Coder<KV<String, String>> kvCoder = KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of());
Coder<WindowedValue<KV<String, String>>> windowedKvCoder =
FullWindowedValueCoder.of(kvCoder, IntervalWindow.getCoder());
KvCoder<String, List<String>> groupedCoder =
KvCoder.of(StringUtf8Coder.of(), ListCoder.of(StringUtf8Coder.of()));
Coder<WindowedValue<KV<String, List<String>>>> windowedGroupedCoder =
FullWindowedValueCoder.of(groupedCoder, IntervalWindow.getCoder());
CloudObject spec = CloudObject.forClassName("MergeWindowsDoFn");
SdkComponents sdkComponents = SdkComponents.create();
sdkComponents.registerEnvironment(Environments.JAVA_SDK_HARNESS_ENVIRONMENT);
addString(
spec,
PropertyNames.SERIALIZED_FN,
StringUtils.byteArrayToJsonString(
WindowingStrategyTranslation.toMessageProto(
WindowingStrategy.of(FixedWindows.of(Duration.standardSeconds(1)))
.withTimestampCombiner(TimestampCombiner.EARLIEST),
sdkComponents)
.toByteArray()));
addObject(
spec,
WorkerPropertyNames.INPUT_CODER,
CloudObjects.asCloudObject(windowedKvCoder, /* sdkComponents= */ null));
ParallelInstruction mergeWindowsInstruction =
new ParallelInstruction()
.setSystemName("MergeWindows-System")
.setName("MergeWindowsStep")
.setOriginalName("MergeWindowsOriginal")
.setParDo(
new ParDoInstruction()
.setInput(new InstructionInput().setProducerInstructionIndex(0).setOutputNum(0))
.setNumOutputs(1)
.setUserFn(spec))
.setOutputs(
Collections.singletonList(
new InstructionOutput()
.setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME)
.setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME)
.setName("output")
.setCodec(
CloudObjects.asCloudObject(
windowedGroupedCoder, /* sdkComponents= */ null))));
List<ParallelInstruction> instructions =
Arrays.asList(
makeWindowingSourceInstruction(kvCoder),
mergeWindowsInstruction,
makeDoFnInstruction(new PassthroughDoFn(), 1, groupedCoder),
makeSinkInstruction(groupedCoder, 2));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorker worker =
makeWorker(instructions, createTestingPipelineOptions(server), false /* publishCounters */);
Map<String, String> nameMap = new HashMap<>();
nameMap.put("MergeWindowsStep", "MergeWindows");
worker.addStateNameMappings(nameMap);
worker.start();
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \""
+ DEFAULT_COMPUTATION_ID
+ "\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \""
+ DEFAULT_KEY_STRING
+ "\""
+ " sharding_key: "
+ DEFAULT_SHARDING_KEY
+ " cache_token: 1"
+ " work_token: 1"
+ " is_new_key: 1"
+ " message_bundles {"
+ " source_computation_id: \""
+ DEFAULT_SOURCE_COMPUTATION_ID
+ "\""
+ " messages {"
+ " timestamp: 0"
+ " data: \""
+ dataStringForIndex(0)
+ "\""
+ " }"
+ " }"
+ " }"
+ "}",
intervalWindowBytes(WINDOW_AT_ZERO)));
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
Iterable<CounterUpdate> counters = worker.buildCounters();
String window = "/gAAAAAAAA-joBw/";
String timerTagPrefix = "/s" + window + "+0";
ByteString bufferTag = ByteString.copyFromUtf8(window + "+ubuf");
ByteString paneInfoTag = ByteString.copyFromUtf8(window + "+upane");
String watermarkDataHoldTag = window + "+uhold";
String watermarkExtraHoldTag = window + "+uextra";
String stateFamily = "MergeWindows";
ByteString bufferData = ByteString.copyFromUtf8("data0");
ByteString outputData =
ByteString.copyFrom(
new byte[] {
(byte) 0xff,
(byte) 0xff,
(byte) 0xff,
(byte) 0xff,
0x01,
0x05,
0x64,
0x61,
0x74,
0x61,
0x30,
0x00
});
long timerTimestamp = 999000L;
WorkItemCommitRequest actualOutput = result.get(1L);
verifyTimers(actualOutput, buildWatermarkTimer(timerTagPrefix, 999));
assertThat(
actualOutput.getBagUpdatesList(),
Matchers.contains(
Matchers.equalTo(
Windmill.TagBag.newBuilder()
.setTag(bufferTag)
.setStateFamily(stateFamily)
.addValues(bufferData)
.build())));
verifyHolds(actualOutput, buildHold(watermarkDataHoldTag, 0, false));
assertEquals(0L, splitIntToLong(getCounter(counters, "WindmillStateBytesRead").getInteger()));
assertEquals(
Windmill.WorkItemCommitRequest.newBuilder(removeDynamicFields(actualOutput))
.clearCounterUpdates()
.clearOutputMessages()
.build()
.getSerializedSize(),
splitIntToLong(getCounter(counters, "WindmillStateBytesWritten").getInteger()));
assertEquals(
VarInt.getLength(0L)
+ dataStringForIndex(0).length()
+ addPaneTag(PaneInfo.NO_FIRING, intervalWindowBytes(WINDOW_AT_ZERO)).size()
+ 5L
,
splitIntToLong(getCounter(counters, "WindmillShuffleBytesRead").getInteger()));
Windmill.GetWorkResponse.Builder getWorkResponse = Windmill.GetWorkResponse.newBuilder();
getWorkResponse
.addWorkBuilder()
.setComputationId(DEFAULT_COMPUTATION_ID)
.setInputDataWatermark(timerTimestamp + 1000)
.addWorkBuilder()
.setKey(ByteString.copyFromUtf8(DEFAULT_KEY_STRING))
.setShardingKey(DEFAULT_SHARDING_KEY)
.setWorkToken(2)
.setCacheToken(1)
.getTimersBuilder()
.addTimers(buildWatermarkTimer(timerTagPrefix, timerTimestamp));
server.whenGetWorkCalled().thenReturn(getWorkResponse.build());
long expectedBytesRead = 0L;
Windmill.GetDataResponse.Builder dataResponse = Windmill.GetDataResponse.newBuilder();
Windmill.KeyedGetDataResponse.Builder dataBuilder =
dataResponse
.addDataBuilder()
.setComputationId(DEFAULT_COMPUTATION_ID)
.addDataBuilder()
.setKey(ByteString.copyFromUtf8(DEFAULT_KEY_STRING))
.setShardingKey(DEFAULT_SHARDING_KEY);
dataBuilder
.addWatermarkHoldsBuilder()
.setTag(ByteString.copyFromUtf8(watermarkExtraHoldTag))
.setStateFamily(stateFamily)
.addTimestamps(0);
dataBuilder
.addValuesBuilder()
.setTag(paneInfoTag)
.setStateFamily(stateFamily)
.getValueBuilder()
.setTimestamp(0)
.setData(ByteString.EMPTY);
server.whenGetDataCalled().thenReturn(dataResponse.build());
expectedBytesRead += dataBuilder.build().getSerializedSize();
result = server.waitForAndGetCommits(1);
counters = worker.buildCounters();
actualOutput = result.get(2L);
assertEquals(1, actualOutput.getOutputMessagesCount());
assertEquals(
DEFAULT_DESTINATION_STREAM_ID, actualOutput.getOutputMessages(0).getDestinationStreamId());
assertEquals(
DEFAULT_KEY_STRING,
actualOutput.getOutputMessages(0).getBundles(0).getKey().toStringUtf8());
assertEquals(0, actualOutput.getOutputMessages(0).getBundles(0).getMessages(0).getTimestamp());
assertEquals(
outputData, actualOutput.getOutputMessages(0).getBundles(0).getMessages(0).getData());
ByteString metadata =
actualOutput.getOutputMessages(0).getBundles(0).getMessages(0).getMetadata();
InputStream inStream = metadata.newInput();
assertEquals(
PaneInfo.createPane(true, true, Timing.ON_TIME), PaneInfoCoder.INSTANCE.decode(inStream));
assertEquals(
Collections.singletonList(WINDOW_AT_ZERO),
DEFAULT_WINDOW_COLLECTION_CODER.decode(inStream, Coder.Context.OUTER));
assertThat(
"" + actualOutput.getValueUpdatesList(),
actualOutput.getValueUpdatesList(),
Matchers.contains(
Matchers.equalTo(
Windmill.TagValue.newBuilder()
.setTag(paneInfoTag)
.setStateFamily(stateFamily)
.setValue(
Windmill.Value.newBuilder()
.setTimestamp(Long.MAX_VALUE)
.setData(ByteString.EMPTY))
.build())));
assertThat(
"" + actualOutput.getBagUpdatesList(),
actualOutput.getBagUpdatesList(),
Matchers.contains(
Matchers.equalTo(
Windmill.TagBag.newBuilder()
.setTag(bufferTag)
.setStateFamily(stateFamily)
.setDeleteAll(true)
.build())));
verifyHolds(
actualOutput,
buildHold(watermarkDataHoldTag, -1, true),
buildHold(watermarkExtraHoldTag, -1, true));
assertEquals(
expectedBytesRead,
splitIntToLong(getCounter(counters, "WindmillStateBytesRead").getInteger()));
assertEquals(
Windmill.WorkItemCommitRequest.newBuilder(removeDynamicFields(actualOutput))
.clearCounterUpdates()
.clearOutputMessages()
.build()
.getSerializedSize(),
splitIntToLong(getCounter(counters, "WindmillStateBytesWritten").getInteger()));
assertEquals(0L, splitIntToLong(getCounter(counters, "WindmillShuffleBytesRead").getInteger()));
CacheStats stats = worker.stateCache.getCacheStats();
LOG.info("cache stats {}", stats);
assertEquals(1, stats.hitCount());
assertEquals(4, stats.missCount());
}
private void runMergeSessionsActions(List<Action> actions) throws Exception {
Coder<KV<String, String>> kvCoder = KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of());
Coder<WindowedValue<KV<String, String>>> windowedKvCoder =
FullWindowedValueCoder.of(kvCoder, IntervalWindow.getCoder());
KvCoder<String, List<String>> groupedCoder =
KvCoder.of(StringUtf8Coder.of(), ListCoder.of(StringUtf8Coder.of()));
Coder<WindowedValue<KV<String, List<String>>>> windowedGroupedCoder =
FullWindowedValueCoder.of(groupedCoder, IntervalWindow.getCoder());
CloudObject spec = CloudObject.forClassName("MergeWindowsDoFn");
SdkComponents sdkComponents = SdkComponents.create();
sdkComponents.registerEnvironment(Environments.JAVA_SDK_HARNESS_ENVIRONMENT);
addString(
spec,
PropertyNames.SERIALIZED_FN,
StringUtils.byteArrayToJsonString(
WindowingStrategyTranslation.toMessageProto(
WindowingStrategy.of(Sessions.withGapDuration(Duration.millis(10)))
.withMode(AccumulationMode.DISCARDING_FIRED_PANES)
.withTrigger(
Repeatedly.forever(
AfterWatermark.pastEndOfWindow()
.withLateFirings(AfterPane.elementCountAtLeast(1))))
.withAllowedLateness(Duration.standardMinutes(60)),
sdkComponents)
.toByteArray()));
addObject(
spec,
WorkerPropertyNames.INPUT_CODER,
CloudObjects.asCloudObject(windowedKvCoder, /* sdkComponents= */ null));
ParallelInstruction mergeWindowsInstruction =
new ParallelInstruction()
.setSystemName("MergeWindows-System")
.setName("MergeWindowsStep")
.setOriginalName("MergeWindowsOriginal")
.setParDo(
new ParDoInstruction()
.setInput(new InstructionInput().setProducerInstructionIndex(0).setOutputNum(0))
.setNumOutputs(1)
.setUserFn(spec))
.setOutputs(
Collections.singletonList(
new InstructionOutput()
.setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME)
.setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME)
.setName("output")
.setCodec(
CloudObjects.asCloudObject(
windowedGroupedCoder, /* sdkComponents= */ null))));
List<ParallelInstruction> instructions =
Arrays.asList(
makeWindowingSourceInstruction(kvCoder),
mergeWindowsInstruction,
makeSinkInstruction(groupedCoder, 1));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorker worker =
makeWorker(instructions, createTestingPipelineOptions(server), false /* publishCounters */);
Map<String, String> nameMap = new HashMap<>();
nameMap.put("MergeWindowsStep", "MergeWindows");
worker.addStateNameMappings(nameMap);
worker.start();
server.whenGetDataCalled().answerByDefault(EMPTY_DATA_RESPONDER);
for (int i = 0; i < actions.size(); ++i) {
Action action = actions.get(i);
server.whenGetWorkCalled().thenReturn(action.response);
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
WorkItemCommitRequest actualOutput = result.get(i + 1L);
assertThat(actualOutput, Matchers.not(Matchers.nullValue()));
verifyTimers(actualOutput, action.expectedTimers);
verifyHolds(actualOutput, action.expectedHolds);
}
}
@Test
public void testMergeSessionWindows() throws Exception {
runMergeSessionsActions(
Collections.singletonList(
new Action(
buildSessionInput(
1, 40, 0, Collections.singletonList(1L), Collections.EMPTY_LIST))
.withHolds(
buildHold("/gAAAAAAAAAsK/+uhold", -1, true),
buildHold("/gAAAAAAAAAsK/+uextra", -1, true))
.withTimers(buildWatermarkTimer("/s/gAAAAAAAAAsK/+0", 3600010))));
runMergeSessionsActions(
Arrays.asList(
new Action(
buildSessionInput(
1, 0, 0, Collections.singletonList(1L), Collections.EMPTY_LIST))
.withHolds(buildHold("/gAAAAAAAAAsK/+uhold", 10, false))
.withTimers(
buildWatermarkTimer("/s/gAAAAAAAAAsK/+0", 10),
buildWatermarkTimer("/s/gAAAAAAAAAsK/+0", 3600010)),
new Action(
buildSessionInput(
2,
30,
0,
Collections.EMPTY_LIST,
Collections.singletonList(buildWatermarkTimer("/s/gAAAAAAAAAsK/+0", 10))))
.withTimers(buildWatermarkTimer("/s/gAAAAAAAAAsK/+0", 3600010))
.withHolds(
buildHold("/gAAAAAAAAAsK/+uhold", -1, true),
buildHold("/gAAAAAAAAAsK/+uextra", -1, true)),
new Action(
buildSessionInput(
3, 30, 0, Collections.singletonList(8L), Collections.EMPTY_LIST))
.withTimers(
buildWatermarkTimer("/s/gAAAAAAAABIR/+0", 3600017),
buildWatermarkTimer("/s/gAAAAAAAAAsK/+0", 10, true),
buildWatermarkTimer("/s/gAAAAAAAAAsK/+0", 3600010, true))
.withHolds(
buildHold("/gAAAAAAAAAsK/+uhold", -1, true),
buildHold("/gAAAAAAAAAsK/+uextra", -1, true)),
new Action(
buildSessionInput(
4, 30, 0, Collections.singletonList(31L), Collections.EMPTY_LIST))
.withTimers(
buildWatermarkTimer("/s/gAAAAAAAACkK/+0", 3600040),
buildWatermarkTimer("/s/gAAAAAAAACkK/+0", 40))
.withHolds(buildHold("/gAAAAAAAACkK/+uhold", 40, false)),
new Action(buildSessionInput(5, 30, 0, Arrays.asList(17L, 23L), Collections.EMPTY_LIST))
.withTimers(
buildWatermarkTimer("/s/gAAAAAAAACkK/+0", 3600040, true),
buildWatermarkTimer("/s/gAAAAAAAACkK/+0", 40, true),
buildWatermarkTimer("/s/gAAAAAAAABIR/+0", 3600017, true),
buildWatermarkTimer("/s/gAAAAAAAABIR/+0", 17, true),
buildWatermarkTimer("/s/gAAAAAAAACko/+0", 40),
buildWatermarkTimer("/s/gAAAAAAAACko/+0", 3600040))
.withHolds(
buildHold("/gAAAAAAAACkK/+uhold", -1, true),
buildHold("/gAAAAAAAACkK/+uextra", -1, true),
buildHold("/gAAAAAAAAAsK/+uhold", 40, true),
buildHold("/gAAAAAAAAAsK/+uextra", 3600040, true)),
new Action(
buildSessionInput(
6,
50,
0,
Collections.EMPTY_LIST,
Collections.singletonList(buildWatermarkTimer("/s/gAAAAAAAACko/+0", 40))))
.withTimers(buildWatermarkTimer("/s/gAAAAAAAACko/+0", 3600040))
.withHolds(
buildHold("/gAAAAAAAAAsK/+uhold", -1, true),
buildHold("/gAAAAAAAAAsK/+uextra", -1, true))));
}
private List<ParallelInstruction> makeUnboundedSourcePipeline() throws Exception {
return makeUnboundedSourcePipeline(1, new PrintFn());
}
private List<ParallelInstruction> makeUnboundedSourcePipeline(
int numMessagesPerShard,
DoFn<ValueWithRecordId<KV<Integer, Integer>>, String> doFn)
throws Exception {
DataflowPipelineOptions options =
PipelineOptionsFactory.create().as(DataflowPipelineOptions.class);
options.setNumWorkers(1);
CloudObject codec =
CloudObjects.asCloudObject(
WindowedValue.getFullCoder(
ValueWithRecordId.ValueWithRecordIdCoder.of(
KvCoder.of(VarIntCoder.of(), VarIntCoder.of())),
GlobalWindow.Coder.INSTANCE),
/* sdkComponents= */ null);
return Arrays.asList(
new ParallelInstruction()
.setSystemName("Read")
.setOriginalName("OriginalReadName")
.setRead(
new ReadInstruction()
.setSource(
CustomSources.serializeToCloudSource(
new TestCountingSource(numMessagesPerShard), options)
.setCodec(codec)))
.setOutputs(
Collections.singletonList(
new InstructionOutput()
.setName("read_output")
.setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME)
.setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME)
.setCodec(codec))),
makeDoFnInstruction(doFn, 0, StringUtf8Coder.of(), WindowingStrategy.globalDefault()),
makeSinkInstruction(StringUtf8Coder.of(), 1, GlobalWindow.Coder.INSTANCE));
}
@Test
public void testUnboundedSources() throws Exception {
List<Integer> finalizeTracker = Lists.newArrayList();
TestCountingSource.setFinalizeTracker(finalizeTracker);
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorker worker =
makeWorker(
makeUnboundedSourcePipeline(),
createTestingPipelineOptions(server),
false /* publishCounters */);
worker.start();
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \"computation\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \"0000000000000001\""
+ " sharding_key: 1"
+ " work_token: 1"
+ " cache_token: 1"
+ " }"
+ "}",
null));
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
Iterable<CounterUpdate> counters = worker.buildCounters();
Windmill.WorkItemCommitRequest commit = result.get(1L);
UnsignedLong finalizeId =
UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0));
assertThat(
removeDynamicFields(commit),
equalTo(
setMessagesMetadata(
PaneInfo.NO_FIRING,
CoderUtils.encodeToByteArray(
CollectionCoder.of(GlobalWindow.Coder.INSTANCE),
Collections.singletonList(GlobalWindow.INSTANCE)),
parseCommitRequest(
"key: \"0000000000000001\" "
+ "sharding_key: 1 "
+ "work_token: 1 "
+ "cache_token: 1 "
+ "source_backlog_bytes: 7 "
+ "source_bytes_processed: 18 "
+ "output_messages {"
+ " destination_stream_id: \"out\""
+ " bundles {"
+ " key: \"0000000000000001\""
+ " messages {"
+ " timestamp: 0"
+ " data: \"0:0\""
+ " }"
+ " messages_ids: \"\""
+ " }"
+ "} "
+ "source_state_updates {"
+ " state: \"\000\""
+ " finalize_ids: "
+ finalizeId
+ "} "
+ "source_watermark: 1000"))
.build()));
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \"computation\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \"0000000000000001\""
+ " sharding_key: 1"
+ " work_token: 2"
+ " cache_token: 1"
+ " source_state {"
+ " state: \"\001\""
+ " finalize_ids: "
+ finalizeId
+ " } "
+ " }"
+ "}",
null));
result = server.waitForAndGetCommits(1);
counters = worker.buildCounters();
commit = result.get(2L);
finalizeId = UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0));
assertThat(
removeDynamicFields(commit),
equalTo(
parseCommitRequest(
"key: \"0000000000000001\" "
+ "sharding_key: 1 "
+ "work_token: 2 "
+ "cache_token: 1 "
+ "source_backlog_bytes: 7 "
+ "source_bytes_processed: 0 "
+ "source_state_updates {"
+ " state: \"\000\""
+ " finalize_ids: "
+ finalizeId
+ "} "
+ "source_watermark: 1000")
.build()));
assertThat(finalizeTracker, contains(0));
assertNull(getCounter(counters, "dataflow_input_size-computation"));
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \"computation\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \"0000000000000002\""
+ " sharding_key: 2"
+ " work_token: 3"
+ " cache_token: 2"
+ " source_state {"
+ " state: \"\000\""
+ " } "
+ " }"
+ "}",
null));
result = server.waitForAndGetCommits(1);
counters = worker.buildCounters();
commit = result.get(3L);
finalizeId = UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0));
assertThat(
removeDynamicFields(commit),
equalTo(
parseCommitRequest(
"key: \"0000000000000002\" "
+ "sharding_key: 2 "
+ "work_token: 3 "
+ "cache_token: 2 "
+ "source_backlog_bytes: 7 "
+ "source_bytes_processed: 0 "
+ "source_state_updates {"
+ " state: \"\000\""
+ " finalize_ids: "
+ finalizeId
+ "} "
+ "source_watermark: 1000")
.build()));
assertNull(getCounter(counters, "dataflow_input_size-computation"));
}
@Test
public void testUnboundedSourcesDrain() throws Exception {
List<Integer> finalizeTracker = Lists.newArrayList();
TestCountingSource.setFinalizeTracker(finalizeTracker);
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorker worker =
makeWorker(
makeUnboundedSourcePipeline(),
createTestingPipelineOptions(server),
true /* publishCounters */);
worker.start();
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \"computation\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \"0000000000000001\""
+ " sharding_key: 1"
+ " work_token: 2"
+ " cache_token: 3"
+ " }"
+ "}",
null));
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
Windmill.WorkItemCommitRequest commit = result.get(2L);
UnsignedLong finalizeId =
UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0));
assertThat(
removeDynamicFields(commit),
equalTo(
setMessagesMetadata(
PaneInfo.NO_FIRING,
CoderUtils.encodeToByteArray(
CollectionCoder.of(GlobalWindow.Coder.INSTANCE),
Collections.singletonList(GlobalWindow.INSTANCE)),
parseCommitRequest(
"key: \"0000000000000001\" "
+ "sharding_key: 1 "
+ "work_token: 2 "
+ "cache_token: 3 "
+ "source_backlog_bytes: 7 "
+ "source_bytes_processed: 18 "
+ "output_messages {"
+ " destination_stream_id: \"out\""
+ " bundles {"
+ " key: \"0000000000000001\""
+ " messages {"
+ " timestamp: 0"
+ " data: \"0:0\""
+ " }"
+ " messages_ids: \"\""
+ " }"
+ "} "
+ "source_state_updates {"
+ " state: \"\000\""
+ " finalize_ids: "
+ finalizeId
+ "} "
+ "source_watermark: 1000"))
.build()));
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \"computation\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \"0000000000000001\""
+ " sharding_key: 1"
+ " work_token: 3"
+ " cache_token: 3"
+ " source_state {"
+ " only_finalize: true"
+ " finalize_ids: "
+ finalizeId
+ " }"
+ " }"
+ "}",
null));
result = server.waitForAndGetCommits(1);
commit = result.get(3L);
assertThat(
commit,
equalTo(
parseCommitRequest(
"key: \"0000000000000001\" "
+ "sharding_key: 1 "
+ "work_token: 3 "
+ "cache_token: 3 "
+ "source_state_updates {"
+ " only_finalize: true"
+ "} ")
.build()));
assertThat(finalizeTracker, contains(0));
}
@Test
public void testUnboundedSourceWorkRetry() throws Exception {
List<Integer> finalizeTracker = Lists.newArrayList();
TestCountingSource.setFinalizeTracker(finalizeTracker);
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
options.setWorkerCacheMb(0);
StreamingDataflowWorker worker =
makeWorker(makeUnboundedSourcePipeline(), options, false /* publishCounters */);
worker.start();
Windmill.GetWorkResponse work =
buildInput(
"work {"
+ " computation_id: \"computation\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \"0000000000000001\""
+ " sharding_key: 1"
+ " work_token: 1"
+ " cache_token: 1"
+ " }"
+ "}",
null);
server.whenGetWorkCalled().thenReturn(work);
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
Iterable<CounterUpdate> counters = worker.buildCounters();
Windmill.WorkItemCommitRequest commit = result.get(1L);
UnsignedLong finalizeId =
UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0));
Windmill.WorkItemCommitRequest expectedCommit =
setMessagesMetadata(
PaneInfo.NO_FIRING,
CoderUtils.encodeToByteArray(
CollectionCoder.of(GlobalWindow.Coder.INSTANCE),
Collections.singletonList(GlobalWindow.INSTANCE)),
parseCommitRequest(
"key: \"0000000000000001\" "
+ "sharding_key: 1 "
+ "work_token: 1 "
+ "cache_token: 1 "
+ "source_backlog_bytes: 7 "
+ "source_bytes_processed: 18 "
+ "output_messages {"
+ " destination_stream_id: \"out\""
+ " bundles {"
+ " key: \"0000000000000001\""
+ " messages {"
+ " timestamp: 0"
+ " data: \"0:0\""
+ " }"
+ " messages_ids: \"\""
+ " }"
+ "} "
+ "source_state_updates {"
+ " state: \"\000\""
+ " finalize_ids: "
+ finalizeId
+ "} "
+ "source_watermark: 1000"))
.build();
assertThat(removeDynamicFields(commit), equalTo(expectedCommit));
server.clearCommitsReceived();
server.whenGetWorkCalled().thenReturn(work);
result = server.waitForAndGetCommits(1);
commit = result.get(1L);
finalizeId = UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0));
Windmill.WorkItemCommitRequest.Builder commitBuilder = expectedCommit.toBuilder();
commitBuilder
.getSourceStateUpdatesBuilder()
.setFinalizeIds(0, commit.getSourceStateUpdates().getFinalizeIds(0));
expectedCommit = commitBuilder.build();
assertThat(removeDynamicFields(commit), equalTo(expectedCommit));
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \"computation\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \"0000000000000001\""
+ " sharding_key: 1"
+ " work_token: 2"
+ " cache_token: 1"
+ " source_state {"
+ " state: \"\001\""
+ " finalize_ids: "
+ finalizeId
+ " } "
+ " }"
+ "}",
null));
result = server.waitForAndGetCommits(1);
commit = result.get(2L);
finalizeId = UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0));
assertThat(
removeDynamicFields(commit),
equalTo(
parseCommitRequest(
"key: \"0000000000000001\" "
+ "sharding_key: 1 "
+ "work_token: 2 "
+ "cache_token: 1 "
+ "source_backlog_bytes: 7 "
+ "source_bytes_processed: 0 "
+ "source_state_updates {"
+ " state: \"\000\""
+ " finalize_ids: "
+ finalizeId
+ "} "
+ "source_watermark: 1000")
.build()));
assertThat(finalizeTracker, contains(0));
}
@Test
public void testActiveWork() throws Exception {
BoundedQueueExecutor mockExecutor = Mockito.mock(BoundedQueueExecutor.class);
ComputationState computationState =
new ComputationState(
"computation",
defaultMapTask(Collections.singletonList(makeSourceInstruction(StringUtf8Coder.of()))),
mockExecutor,
ImmutableMap.of(),
null);
ShardedKey key1 = ShardedKey.create(ByteString.copyFromUtf8("key1"), 1);
ShardedKey key2 = ShardedKey.create(ByteString.copyFromUtf8("key2"), 2);
Work m1 = createMockWork(1);
assertTrue(computationState.activateWork(key1, m1));
Mockito.verify(mockExecutor).execute(m1, m1.getWorkItem().getSerializedSize());
computationState.completeWorkAndScheduleNextWorkForKey(key1, 1);
Mockito.verifyNoMoreInteractions(mockExecutor);
Work m2 = createMockWork(2);
assertTrue(computationState.activateWork(key1, m2));
Mockito.verify(mockExecutor).execute(m2, m2.getWorkItem().getSerializedSize());
Work m3 = createMockWork(3);
assertTrue(computationState.activateWork(key1, m3));
Mockito.verifyNoMoreInteractions(mockExecutor);
Work m4 = createMockWork(4);
assertTrue(computationState.activateWork(key2, m4));
Mockito.verify(mockExecutor).execute(m4, m4.getWorkItem().getSerializedSize());
computationState.completeWorkAndScheduleNextWorkForKey(key2, 4);
Mockito.verifyNoMoreInteractions(mockExecutor);
computationState.completeWorkAndScheduleNextWorkForKey(key1, 2);
Mockito.verify(mockExecutor).forceExecute(m3, m3.getWorkItem().getSerializedSize());
computationState.completeWorkAndScheduleNextWorkForKey(key1, 3);
Mockito.verifyNoMoreInteractions(mockExecutor);
Work m5 = createMockWork(5);
computationState.activateWork(key1, m5);
Mockito.verify(mockExecutor).execute(m5, m5.getWorkItem().getSerializedSize());
assertFalse(computationState.activateWork(key1, m5));
Mockito.verifyNoMoreInteractions(mockExecutor);
computationState.completeWorkAndScheduleNextWorkForKey(key1, 5);
Mockito.verifyNoMoreInteractions(mockExecutor);
}
@Test
public void testActiveWorkForShardedKeys() throws Exception {
BoundedQueueExecutor mockExecutor = Mockito.mock(BoundedQueueExecutor.class);
ComputationState computationState =
new ComputationState(
"computation",
defaultMapTask(Collections.singletonList(makeSourceInstruction(StringUtf8Coder.of()))),
mockExecutor,
ImmutableMap.of(),
null);
ShardedKey key1Shard1 = ShardedKey.create(ByteString.copyFromUtf8("key1"), 1);
ShardedKey key1Shard2 = ShardedKey.create(ByteString.copyFromUtf8("key1"), 2);
Work m1 = createMockWork(1);
assertTrue(computationState.activateWork(key1Shard1, m1));
Mockito.verify(mockExecutor).execute(m1, m1.getWorkItem().getSerializedSize());
computationState.completeWorkAndScheduleNextWorkForKey(key1Shard1, 1);
Mockito.verifyNoMoreInteractions(mockExecutor);
Work m2 = createMockWork(2);
assertTrue(computationState.activateWork(key1Shard1, m2));
Mockito.verify(mockExecutor).execute(m2, m2.getWorkItem().getSerializedSize());
Work m3 = createMockWork(3);
assertTrue(computationState.activateWork(key1Shard1, m3));
Mockito.verifyNoMoreInteractions(mockExecutor);
Work m4 = createMockWork(3);
assertFalse(computationState.activateWork(key1Shard1, m4));
Mockito.verifyNoMoreInteractions(mockExecutor);
assertTrue(computationState.activateWork(key1Shard2, m4));
Mockito.verify(mockExecutor).execute(m4, m4.getWorkItem().getSerializedSize());
assertFalse(computationState.activateWork(key1Shard2, m4));
computationState.completeWorkAndScheduleNextWorkForKey(key1Shard2, 3);
Mockito.verifyNoMoreInteractions(mockExecutor);
}
@Test
@Ignore
public void testMaxThreadMetric() throws Exception {
int maxThreads = 2;
int threadExpiration = 60;
BoundedQueueExecutor executor =
new BoundedQueueExecutor(
maxThreads,
threadExpiration,
TimeUnit.SECONDS,
maxThreads,
MAXIMUM_BYTES_OUTSTANDING,
new ThreadFactoryBuilder()
.setNameFormat("DataflowWorkUnits-%d")
.setDaemon(true)
.build());
ComputationState computationState =
new ComputationState(
"computation",
defaultMapTask(Collections.singletonList(makeSourceInstruction(StringUtf8Coder.of()))),
executor,
ImmutableMap.of(),
null);
ShardedKey key1Shard1 = ShardedKey.create(ByteString.copyFromUtf8("key1"), 1);
Consumer<Work> sleepProcessWorkFn =
unused -> {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
};
Work m2 = createMockWork(2, sleepProcessWorkFn);
Work m3 = createMockWork(3, sleepProcessWorkFn);
assertTrue(computationState.activateWork(key1Shard1, m2));
assertTrue(computationState.activateWork(key1Shard1, m3));
executor.execute(m2, m2.getWorkItem().getSerializedSize());
executor.execute(m3, m3.getWorkItem().getSerializedSize());
long i = 990L;
assertTrue(executor.allThreadsActiveTime() >= i);
executor.shutdown();
}
@Test
@Test
public void testOutstandingBytesMetric() throws Exception {
int maxThreads = 5;
int threadExpirationSec = 60;
CountDownLatch processStart1 = new CountDownLatch(2);
CountDownLatch processStart2 = new CountDownLatch(3);
CountDownLatch processStart3 = new CountDownLatch(4);
AtomicBoolean stop = new AtomicBoolean(false);
BoundedQueueExecutor executor =
new BoundedQueueExecutor(
maxThreads,
threadExpirationSec,
TimeUnit.SECONDS,
maxThreads,
MAXIMUM_BYTES_OUTSTANDING,
new ThreadFactoryBuilder()
.setNameFormat("DataflowWorkUnits-%d")
.setDaemon(true)
.build());
ComputationState computationState =
new ComputationState(
"computation",
defaultMapTask(Arrays.asList(makeSourceInstruction(StringUtf8Coder.of()))),
executor,
ImmutableMap.of(),
null);
ShardedKey key1Shard1 = ShardedKey.create(ByteString.copyFromUtf8("key1"), 1);
Consumer<Work> sleepProcessWorkFn =
unused -> {
processStart1.countDown();
processStart2.countDown();
processStart3.countDown();
int count = 0;
while (!stop.get()) {
count += 1;
}
};
Work m2 = createMockWork(2, sleepProcessWorkFn);
Work m3 = createMockWork(3, sleepProcessWorkFn);
Work m4 = createMockWork(4, sleepProcessWorkFn);
assertEquals(0, executor.bytesOutstanding());
long bytes = m2.getWorkItem().getSerializedSize();
assertTrue(computationState.activateWork(key1Shard1, m2));
bytes += m2.getWorkItem().getSerializedSize();
executor.execute(m2, m2.getWorkItem().getSerializedSize());
processStart1.await();
assertEquals(bytes, executor.bytesOutstanding());
assertTrue(computationState.activateWork(key1Shard1, m3));
assertTrue(computationState.activateWork(key1Shard1, m4));
bytes += m3.getWorkItem().getSerializedSize();
executor.execute(m3, m3.getWorkItem().getSerializedSize());
processStart2.await();
assertEquals(bytes, executor.bytesOutstanding());
bytes += m4.getWorkItem().getSerializedSize();
executor.execute(m4, m4.getWorkItem().getSerializedSize());
processStart3.await();
assertEquals(bytes, executor.bytesOutstanding());
stop.set(true);
executor.shutdown();
}
@Test
public void testOutstandingBundlesMetric() throws Exception {
int maxThreads = 5;
int threadExpirationSec = 60;
CountDownLatch processStart1 = new CountDownLatch(2);
CountDownLatch processStart2 = new CountDownLatch(3);
CountDownLatch processStart3 = new CountDownLatch(4);
AtomicBoolean stop = new AtomicBoolean(false);
BoundedQueueExecutor executor =
new BoundedQueueExecutor(
maxThreads,
threadExpirationSec,
TimeUnit.SECONDS,
maxThreads,
MAXIMUM_BYTES_OUTSTANDING,
new ThreadFactoryBuilder()
.setNameFormat("DataflowWorkUnits-%d")
.setDaemon(true)
.build());
ComputationState computationState =
new ComputationState(
"computation",
defaultMapTask(Arrays.asList(makeSourceInstruction(StringUtf8Coder.of()))),
executor,
ImmutableMap.of(),
null);
ShardedKey key1Shard1 = ShardedKey.create(ByteString.copyFromUtf8("key1"), 1);
Consumer<Work> sleepProcessWorkFn =
unused -> {
processStart1.countDown();
processStart2.countDown();
processStart3.countDown();
int count = 0;
while (!stop.get()) {
count += 1;
}
};
Work m2 = createMockWork(2, sleepProcessWorkFn);
Work m3 = createMockWork(3, sleepProcessWorkFn);
Work m4 = createMockWork(4, sleepProcessWorkFn);
assertEquals(0, executor.elementsOutstanding());
assertTrue(computationState.activateWork(key1Shard1, m2));
executor.execute(m2, m2.getWorkItem().getSerializedSize());
processStart1.await();
assertEquals(2, executor.elementsOutstanding());
assertTrue(computationState.activateWork(key1Shard1, m3));
assertTrue(computationState.activateWork(key1Shard1, m4));
executor.execute(m3, m3.getWorkItem().getSerializedSize());
processStart2.await();
assertEquals(3, executor.elementsOutstanding());
executor.execute(m4, m4.getWorkItem().getSerializedSize());
processStart3.await();
assertEquals(4, executor.elementsOutstanding());
stop.set(true);
executor.shutdown();
}
@Test
public void testExceptionInvalidatesCache() throws Exception {
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
server.setExpectedExceptionCount(2);
DataflowPipelineOptions options = createTestingPipelineOptions(server);
options.setNumWorkers(1);
DataflowPipelineDebugOptions debugOptions = options.as(DataflowPipelineDebugOptions.class);
debugOptions.setUnboundedReaderMaxElements(1);
CloudObject codec =
CloudObjects.asCloudObject(
WindowedValue.getFullCoder(
ValueWithRecordId.ValueWithRecordIdCoder.of(
KvCoder.of(VarIntCoder.of(), VarIntCoder.of())),
GlobalWindow.Coder.INSTANCE),
/* sdkComponents= */ null);
TestCountingSource counter = new TestCountingSource(3).withThrowOnFirstSnapshot(true);
List<ParallelInstruction> instructions =
Arrays.asList(
new ParallelInstruction()
.setOriginalName("OriginalReadName")
.setSystemName("Read")
.setName(DEFAULT_PARDO_USER_NAME)
.setRead(
new ReadInstruction()
.setSource(
CustomSources.serializeToCloudSource(counter, options).setCodec(codec)))
.setOutputs(
Collections.singletonList(
new InstructionOutput()
.setName("read_output")
.setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME)
.setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME)
.setCodec(codec))),
makeDoFnInstruction(
new TestExceptionInvalidatesCacheFn(),
0,
StringUtf8Coder.of(),
WindowingStrategy.globalDefault()),
makeSinkInstruction(StringUtf8Coder.of(), 1, GlobalWindow.Coder.INSTANCE));
StreamingDataflowWorker worker =
makeWorker(
instructions,
options.as(StreamingDataflowWorkerOptions.class),
true /* publishCounters */);
worker.setRetryLocallyDelayMs(100);
worker.start();
for (int i = 0; i < 3; i++) {
ByteString state;
if (i == 0 || i == 1) {
state = ByteString.EMPTY;
} else {
state = ByteString.copyFrom(new byte[] {42});
}
Windmill.GetDataResponse.Builder dataResponse = Windmill.GetDataResponse.newBuilder();
dataResponse
.addDataBuilder()
.setComputationId(DEFAULT_COMPUTATION_ID)
.addDataBuilder()
.setKey(ByteString.copyFromUtf8("0000000000000001"))
.setShardingKey(1)
.addValuesBuilder()
.setTag(ByteString.copyFromUtf8("
.setStateFamily(DEFAULT_PARDO_STATE_FAMILY)
.getValueBuilder()
.setTimestamp(0)
.setData(state);
server.whenGetDataCalled().thenReturn(dataResponse.build());
}
for (int i = 0; i < 3; i++) {
StringBuilder sb = new StringBuilder();
sb.append("work {\n");
sb.append(" computation_id: \"computation\"\n");
sb.append(" input_data_watermark: 0\n");
sb.append(" work {\n");
sb.append(" key: \"0000000000000001\"\n");
sb.append(" sharding_key: 1\n");
sb.append(" work_token: ");
sb.append(i);
sb.append(" cache_token: 1");
sb.append("\n");
if (i > 0) {
int previousCheckpoint = i - 1;
sb.append(" source_state {\n");
sb.append(" state: \"");
sb.append((char) previousCheckpoint);
sb.append("\"\n");
sb.append(" }\n");
}
sb.append(" }\n");
sb.append("}\n");
server.whenGetWorkCalled().thenReturn(buildInput(sb.toString(), null));
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
Windmill.WorkItemCommitRequest commit = result.get((long) i);
UnsignedLong finalizeId =
UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0));
sb = new StringBuilder();
sb.append("key: \"0000000000000001\"\n");
sb.append("sharding_key: 1\n");
sb.append("work_token: ");
sb.append(i);
sb.append("\n");
sb.append("cache_token: 1\n");
sb.append("output_messages {\n");
sb.append(" destination_stream_id: \"out\"\n");
sb.append(" bundles {\n");
sb.append(" key: \"0000000000000001\"\n");
int messageNum = i;
sb.append(" messages {\n");
sb.append(" timestamp: ");
sb.append(messageNum * 1000);
sb.append("\n");
sb.append(" data: \"0:");
sb.append(messageNum);
sb.append("\"\n");
sb.append(" }\n");
sb.append(" messages_ids: \"\"\n");
sb.append(" }\n");
sb.append("}\n");
if (i == 0) {
sb.append("value_updates {\n");
sb.append(" tag: \"
sb.append(" value {\n");
sb.append(" timestamp: 0\n");
sb.append(" data: \"");
sb.append((char) 42);
sb.append("\"\n");
sb.append(" }\n");
sb.append(" state_family: \"parDoStateFamily\"\n");
sb.append("}\n");
}
int sourceState = i;
sb.append("source_state_updates {\n");
sb.append(" state: \"");
sb.append((char) sourceState);
sb.append("\"\n");
sb.append(" finalize_ids: ");
sb.append(finalizeId);
sb.append("}\n");
sb.append("source_watermark: ");
sb.append((sourceState + 1) * 1000);
sb.append("\n");
sb.append("source_backlog_bytes: 7\n");
assertThat(
setValuesTimestamps(
removeDynamicFields(commit)
.toBuilder()
.clearOutputTimers()
.clearSourceBytesProcessed())
.build(),
equalTo(
setMessagesMetadata(
PaneInfo.NO_FIRING,
CoderUtils.encodeToByteArray(
CollectionCoder.of(GlobalWindow.Coder.INSTANCE),
ImmutableList.of(GlobalWindow.INSTANCE)),
parseCommitRequest(sb.toString()))
.build()));
}
}
@Test
public void testHugeCommits() throws Exception {
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeDoFnInstruction(new FanoutFn(), 0, StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */);
worker.start();
server.whenGetWorkCalled().thenReturn(makeInput(0, TimeUnit.MILLISECONDS.toMicros(0)));
server.waitForAndGetCommits(0);
worker.stop();
}
@Test
public void testActiveWorkRefresh() throws Exception {
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeDoFnInstruction(new SlowDoFn(), 0, StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
options.setActiveWorkRefreshPeriodMillis(100);
StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */);
worker.start();
server.whenGetWorkCalled().thenReturn(makeInput(0, TimeUnit.MILLISECONDS.toMicros(0)));
server.waitForAndGetCommits(1);
worker.stop();
assertThat(server.numGetDataRequests(), greaterThan(0));
}
@Test
public void testLatencyAttributionProtobufsPopulated() {
FakeClock clock = new FakeClock();
Work work = Work.create(null, clock, Collections.emptyList(), unused -> {});
clock.sleep(Duration.millis(10));
work.setState(Work.State.PROCESSING);
clock.sleep(Duration.millis(20));
work.setState(Work.State.READING);
clock.sleep(Duration.millis(30));
work.setState(Work.State.PROCESSING);
clock.sleep(Duration.millis(40));
work.setState(Work.State.COMMIT_QUEUED);
clock.sleep(Duration.millis(50));
work.setState(Work.State.COMMITTING);
clock.sleep(Duration.millis(60));
Iterator<LatencyAttribution> it = work.getLatencyAttributions().iterator();
assertTrue(it.hasNext());
LatencyAttribution lat = it.next();
assertSame(State.QUEUED, lat.getState());
assertEquals(10, lat.getTotalDurationMillis());
assertTrue(it.hasNext());
lat = it.next();
assertSame(State.ACTIVE, lat.getState());
assertEquals(60, lat.getTotalDurationMillis());
assertTrue(it.hasNext());
lat = it.next();
assertSame(State.READING, lat.getState());
assertEquals(30, lat.getTotalDurationMillis());
assertTrue(it.hasNext());
lat = it.next();
assertSame(State.COMMITTING, lat.getState());
assertEquals(110, lat.getTotalDurationMillis());
assertFalse(it.hasNext());
}
@Test
public void testLatencyAttributionToQueuedState() throws Exception {
final int workToken = 3232;
FakeClock clock = new FakeClock();
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeDoFnInstruction(
new FakeSlowDoFn(clock, Duration.millis(1000)), 0, StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
options.setActiveWorkRefreshPeriodMillis(100);
options.setNumberOfWorkerHarnessThreads(1);
StreamingDataflowWorker worker =
makeWorker(
instructions,
options,
false /* publishCounters */,
clock,
clock::newFakeScheduledExecutor);
worker.start();
ActiveWorkRefreshSink awrSink = new ActiveWorkRefreshSink(EMPTY_DATA_RESPONDER);
server.whenGetDataCalled().answerByDefault(awrSink::getData).delayEachResponseBy(Duration.ZERO);
server
.whenGetWorkCalled()
.thenReturn(makeInput(workToken + 1, 0 /* timestamp */))
.thenReturn(makeInput(workToken, 1 /* timestamp */));
server.waitForAndGetCommits(2);
worker.stop();
assertEquals(
awrSink.getLatencyAttributionDuration(workToken, State.QUEUED), Duration.millis(1000));
assertEquals(awrSink.getLatencyAttributionDuration(workToken + 1, State.QUEUED), Duration.ZERO);
}
@Test
public void testLatencyAttributionToActiveState() throws Exception {
final int workToken = 4242;
FakeClock clock = new FakeClock();
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeDoFnInstruction(
new FakeSlowDoFn(clock, Duration.millis(1000)), 0, StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
options.setActiveWorkRefreshPeriodMillis(100);
StreamingDataflowWorker worker =
makeWorker(
instructions,
options,
false /* publishCounters */,
clock,
clock::newFakeScheduledExecutor);
worker.start();
ActiveWorkRefreshSink awrSink = new ActiveWorkRefreshSink(EMPTY_DATA_RESPONDER);
server.whenGetDataCalled().answerByDefault(awrSink::getData).delayEachResponseBy(Duration.ZERO);
server.whenGetWorkCalled().thenReturn(makeInput(workToken, 0 /* timestamp */));
server.waitForAndGetCommits(1);
worker.stop();
assertEquals(
awrSink.getLatencyAttributionDuration(workToken, State.ACTIVE), Duration.millis(1000));
}
@Test
public void testLatencyAttributionToReadingState() throws Exception {
final int workToken = 5454;
FakeClock clock = new FakeClock();
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeDoFnInstruction(new ReadingDoFn(), 0, StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
options.setActiveWorkRefreshPeriodMillis(100);
StreamingDataflowWorker worker =
makeWorker(
instructions,
options,
false /* publishCounters */,
clock,
clock::newFakeScheduledExecutor);
worker.start();
ActiveWorkRefreshSink awrSink =
new ActiveWorkRefreshSink(
(request) -> {
clock.sleep(Duration.millis(1000));
return EMPTY_DATA_RESPONDER.apply(request);
});
server.whenGetDataCalled().answerByDefault(awrSink::getData).delayEachResponseBy(Duration.ZERO);
server.whenGetWorkCalled().thenReturn(makeInput(workToken, 0 /* timestamp */));
server.waitForAndGetCommits(1);
worker.stop();
assertEquals(
awrSink.getLatencyAttributionDuration(workToken, State.READING), Duration.millis(1000));
}
@Test
public void testLatencyAttributionToCommittingState() throws Exception {
final int workToken = 6464;
FakeClock clock = new FakeClock();
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
server
.whenCommitWorkCalled()
.answerByDefault(
(request) -> {
clock.sleep(Duration.millis(1000));
return Windmill.CommitWorkResponse.getDefaultInstance();
});
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
options.setActiveWorkRefreshPeriodMillis(100);
StreamingDataflowWorker worker =
makeWorker(
instructions,
options,
false /* publishCounters */,
clock,
clock::newFakeScheduledExecutor);
worker.start();
ActiveWorkRefreshSink awrSink = new ActiveWorkRefreshSink(EMPTY_DATA_RESPONDER);
server.whenGetDataCalled().answerByDefault(awrSink::getData).delayEachResponseBy(Duration.ZERO);
server.whenGetWorkCalled().thenReturn(makeInput(workToken, TimeUnit.MILLISECONDS.toMicros(0)));
server.waitForAndGetCommits(1);
worker.stop();
assertEquals(
awrSink.getLatencyAttributionDuration(workToken, State.COMMITTING), Duration.millis(1000));
}
@Test
public void testLatencyAttributionPopulatedInCommitRequest() throws Exception {
final int workToken = 7272;
long dofnWaitTimeMs = 1000;
FakeClock clock = new FakeClock();
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeDoFnInstruction(
new FakeSlowDoFn(clock, Duration.millis(dofnWaitTimeMs)), 0, StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
options.setActiveWorkRefreshPeriodMillis(100);
options.setNumberOfWorkerHarnessThreads(1);
StreamingDataflowWorker worker =
makeWorker(
instructions,
options,
false /* publishCounters */,
clock,
clock::newFakeScheduledExecutor);
worker.start();
ActiveWorkRefreshSink awrSink = new ActiveWorkRefreshSink(EMPTY_DATA_RESPONDER);
server.whenGetDataCalled().answerByDefault(awrSink::getData).delayEachResponseBy(Duration.ZERO);
server.whenGetWorkCalled().thenReturn(makeInput(workToken, 1 /* timestamp */));
Map<Long, WorkItemCommitRequest> workItemCommitRequest = server.waitForAndGetCommits(1);
worker.stop();
assertEquals(
workItemCommitRequest.get((long) workToken).getPerWorkItemLatencyAttributions(0),
LatencyAttribution.newBuilder()
.setState(State.ACTIVE)
.setTotalDurationMillis(dofnWaitTimeMs)
.build());
if (streamingEngine) {
assertEquals(
workItemCommitRequest.get((long) workToken).getPerWorkItemLatencyAttributions(1),
LatencyAttribution.newBuilder()
.setState(State.GET_WORK_IN_TRANSIT_TO_USER_WORKER)
.setTotalDurationMillis(1000)
.build());
}
}
@Test
public void testLimitOnOutputBundleSize() throws Exception {
List<Integer> finalizeTracker = Lists.newArrayList();
TestCountingSource.setFinalizeTracker(finalizeTracker);
final int numMessagesInCustomSourceShard = 100000;
final int inflatedSizePerMessage = 10000;
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorker worker =
makeWorker(
makeUnboundedSourcePipeline(
numMessagesInCustomSourceShard, new InflateDoFn(inflatedSizePerMessage)),
createTestingPipelineOptions(server),
false /* publishCounters */);
worker.start();
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \"computation\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \"0000000000000001\""
+ " sharding_key: 1"
+ " work_token: 1"
+ " cache_token: 1"
+ " }"
+ "}",
null));
Matcher<Integer> isWithinBundleSizeLimits =
both(greaterThan(StreamingDataflowWorker.MAX_SINK_BYTES * 9 / 10))
.and(lessThan(StreamingDataflowWorker.MAX_SINK_BYTES * 11 / 10));
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
Windmill.WorkItemCommitRequest commit = result.get(1L);
assertThat(commit.getSerializedSize(), isWithinBundleSizeLimits);
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \"computation\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \"0000000000000001\""
+ " sharding_key: 1"
+ " work_token: 2"
+ " cache_token: 1"
+ " }"
+ "}",
null));
result = server.waitForAndGetCommits(1);
commit = result.get(2L);
assertThat(commit.getSerializedSize(), isWithinBundleSizeLimits);
}
@Test
public void testLimitOnOutputBundleSizeWithMultipleSinks() throws Exception {
List<Integer> finalizeTracker = Lists.newArrayList();
TestCountingSource.setFinalizeTracker(finalizeTracker);
final int numMessagesInCustomSourceShard = 100000;
final int inflatedSizePerMessage = 10000;
List<ParallelInstruction> instructions = new ArrayList<>();
instructions.addAll(
makeUnboundedSourcePipeline(
numMessagesInCustomSourceShard, new InflateDoFn(inflatedSizePerMessage)));
instructions.add(
makeSinkInstruction(
DEFAULT_DESTINATION_STREAM_ID + "-1",
StringUtf8Coder.of(),
1,
GlobalWindow.Coder.INSTANCE));
instructions.add(
makeSinkInstruction(
DEFAULT_DESTINATION_STREAM_ID + "-2",
StringUtf8Coder.of(),
1,
GlobalWindow.Coder.INSTANCE));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorker worker =
makeWorker(instructions, createTestingPipelineOptions(server), true /* publishCounters */);
worker.start();
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \"computation\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \"0000000000000001\""
+ " sharding_key: 1"
+ " work_token: 1"
+ " cache_token: 1"
+ " }"
+ "}",
null));
Matcher<Integer> isWithinBundleSizeLimits =
both(greaterThan(StreamingDataflowWorker.MAX_SINK_BYTES * 9 / 10))
.and(lessThan(StreamingDataflowWorker.MAX_SINK_BYTES * 11 / 10));
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
Windmill.WorkItemCommitRequest commit = result.get(1L);
assertThat(commit.getSerializedSize(), isWithinBundleSizeLimits);
server
.whenGetWorkCalled()
.thenReturn(
buildInput(
"work {"
+ " computation_id: \"computation\""
+ " input_data_watermark: 0"
+ " work {"
+ " key: \"0000000000000001\""
+ " sharding_key: 1"
+ " work_token: 2"
+ " cache_token: 1"
+ " }"
+ "}",
null));
result = server.waitForAndGetCommits(1);
commit = result.get(2L);
assertThat(commit.getSerializedSize(), isWithinBundleSizeLimits);
}
@Test
public void testStuckCommit() throws Exception {
if (!streamingEngine) {
return;
}
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(StringUtf8Coder.of()),
makeSinkInstruction(StringUtf8Coder.of(), 0));
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
options.setStuckCommitDurationMillis(2000);
StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */);
worker.start();
server.setDropStreamingCommits(true);
server
.whenGetWorkCalled()
.thenReturn(makeInput(10, TimeUnit.MILLISECONDS.toMicros(2), DEFAULT_KEY_STRING, 1))
.thenReturn(makeInput(15, TimeUnit.MILLISECONDS.toMicros(3), DEFAULT_KEY_STRING, 5));
ConcurrentHashMap<Long, Consumer<CommitStatus>> droppedCommits =
server.waitForDroppedCommits(2);
server.setDropStreamingCommits(false);
server
.whenGetWorkCalled()
.thenReturn(makeInput(1, TimeUnit.MILLISECONDS.toMicros(1), DEFAULT_KEY_STRING, 1));
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
droppedCommits.values().iterator().next().accept(CommitStatus.OK);
worker.stop();
assertTrue(result.containsKey(1L));
assertEquals(
makeExpectedOutput(
1, TimeUnit.MILLISECONDS.toMicros(1), DEFAULT_KEY_STRING, 1, DEFAULT_KEY_STRING)
.build(),
removeDynamicFields(result.get(1L)));
}
static class BlockingFn extends DoFn<String, String> implements TestRule {
public static CountDownLatch blocker = new CountDownLatch(1);
public static Semaphore counter = new Semaphore(0);
public static AtomicInteger callCounter = new AtomicInteger(0);
@ProcessElement
public void processElement(ProcessContext c) throws InterruptedException {
callCounter.incrementAndGet();
counter.release();
blocker.await();
c.output(c.element());
}
@Override
public Statement apply(final Statement base, final Description description) {
return new Statement() {
@Override
public void evaluate() throws Throwable {
blocker = new CountDownLatch(1);
counter = new Semaphore(0);
callCounter = new AtomicInteger();
base.evaluate();
}
};
}
}
static class KeyTokenInvalidFn extends DoFn<KV<String, String>, KV<String, String>> {
static boolean thrown = false;
@ProcessElement
public void processElement(ProcessContext c) {
if (!thrown) {
thrown = true;
throw new KeyTokenInvalidException("key");
} else {
c.output(c.element());
}
}
}
static class LargeCommitFn extends DoFn<KV<String, String>, KV<String, String>> {
@ProcessElement
public void processElement(ProcessContext c) {
if (c.element().getKey().equals("large_key")) {
StringBuilder s = new StringBuilder();
for (int i = 0; i < 100; ++i) {
s.append("large_commit");
}
c.output(KV.of(c.element().getKey(), s.toString()));
} else {
c.output(c.element());
}
}
}
static class ChangeKeysFn extends DoFn<KV<String, String>, KV<String, String>> {
@ProcessElement
public void processElement(ProcessContext c) {
KV<String, String> elem = c.element();
c.output(KV.of(elem.getKey() + "_" + elem.getValue(), elem.getValue()));
}
}
static class TestExceptionFn extends DoFn<String, String> {
boolean firstTime = true;
@ProcessElement
public void processElement(ProcessContext c) throws Exception {
if (firstTime) {
firstTime = false;
try {
throw new Exception("Exception!");
} catch (Exception e) {
throw new Exception("Another exception!", e);
}
}
}
}
static class PassthroughDoFn
extends DoFn<KV<String, Iterable<String>>, KV<String, Iterable<String>>> {
@ProcessElement
public void processElement(ProcessContext c) {
c.output(c.element());
}
}
static class Action {
GetWorkResponse response;
Timer[] expectedTimers = new Timer[] {};
WatermarkHold[] expectedHolds = new WatermarkHold[] {};
public Action(GetWorkResponse response) {
this.response = response;
}
Action withHolds(WatermarkHold... holds) {
this.expectedHolds = holds;
return this;
}
Action withTimers(Timer... timers) {
this.expectedTimers = timers;
return this;
}
}
static class PrintFn extends DoFn<ValueWithRecordId<KV<Integer, Integer>>, String> {
@ProcessElement
public void processElement(ProcessContext c) {
KV<Integer, Integer> elem = c.element().getValue();
c.output(elem.getKey() + ":" + elem.getValue());
}
}
private static class MockWork {
Work create(long workToken) {
return Work.create(
Windmill.WorkItem.newBuilder().setKey(ByteString.EMPTY).setWorkToken(workToken).build(),
Instant::now,
Collections.emptyList(),
work -> {});
}
}
static class TestExceptionInvalidatesCacheFn
extends DoFn<ValueWithRecordId<KV<Integer, Integer>>, String> {
static boolean thrown = false;
@StateId("int")
private final StateSpec<ValueState<Integer>> counter = StateSpecs.value(VarIntCoder.of());
@ProcessElement
public void processElement(ProcessContext c, @StateId("int") ValueState<Integer> state)
throws Exception {
KV<Integer, Integer> elem = c.element().getValue();
if (elem.getValue() == 0) {
LOG.error("**** COUNTER 0 ****");
assertNull(state.read());
state.write(42);
assertEquals((Integer) 42, state.read());
} else if (elem.getValue() == 1) {
LOG.error("**** COUNTER 1 ****");
assertEquals((Integer) 42, state.read());
} else if (elem.getValue() == 2) {
if (!thrown) {
LOG.error("**** COUNTER 2 (will throw) ****");
thrown = true;
throw new Exception("Exception!");
}
LOG.error("**** COUNTER 2 (retry) ****");
assertEquals((Integer) 42, state.read());
} else {
throw new RuntimeException("only expecting values [0,2]");
}
c.output(elem.getKey() + ":" + elem.getValue());
}
}
private static class FanoutFn extends DoFn<String, String> {
@ProcessElement
public void processElement(ProcessContext c) {
StringBuilder builder = new StringBuilder(1000000);
for (int i = 0; i < 1000000; i++) {
builder.append(' ');
}
String largeString = builder.toString();
for (int i = 0; i < 3000; i++) {
c.output(largeString);
}
}
}
private static class SlowDoFn extends DoFn<String, String> {
@ProcessElement
public void processElement(ProcessContext c) throws Exception {
Thread.sleep(1000);
c.output(c.element());
}
}
static class FakeClock implements Supplier<Instant> {
private final PriorityQueue<Job> jobs = new PriorityQueue<>();
private Instant now = Instant.now();
public ScheduledExecutorService newFakeScheduledExecutor(String unused) {
return new FakeScheduledExecutor();
}
@Override
public synchronized Instant get() {
return now;
}
public synchronized void clear() {
jobs.clear();
}
public synchronized void sleep(Duration duration) {
if (duration.isShorterThan(Duration.ZERO)) {
throw new UnsupportedOperationException("Cannot sleep backwards in time");
}
Instant endOfSleep = now.plus(duration);
while (true) {
Job job = jobs.peek();
if (job == null || job.when.isAfter(endOfSleep)) {
break;
}
jobs.remove();
now = job.when;
job.work.run();
}
now = endOfSleep;
}
private synchronized void schedule(Duration fromNow, Runnable work) {
jobs.add(new Job(now.plus(fromNow), work));
}
private static class Job implements Comparable<Job> {
final Instant when;
final Runnable work;
Job(Instant when, Runnable work) {
this.when = when;
this.work = work;
}
@Override
public int compareTo(Job job) {
return when.compareTo(job.when);
}
}
private class FakeScheduledExecutor implements ScheduledExecutorService {
@Override
public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException {
return true;
}
@Override
public void execute(Runnable command) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks)
throws InterruptedException {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public <T> List<Future<T>> invokeAll(
Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit)
throws InterruptedException {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public <T> T invokeAny(Collection<? extends Callable<T>> tasks)
throws ExecutionException, InterruptedException {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public <T> T invokeAny(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit)
throws ExecutionException, InterruptedException, TimeoutException {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public boolean isShutdown() {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public boolean isTerminated() {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public void shutdown() {}
@Override
public List<Runnable> shutdownNow() {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public <T> Future<T> submit(Callable<T> task) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public Future<?> submit(Runnable task) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public <T> Future<T> submit(Runnable task, T result) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public <V> ScheduledFuture<V> schedule(Callable<V> callable, long delay, TimeUnit unit) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public ScheduledFuture<?> scheduleAtFixedRate(
Runnable command, long initialDelay, long period, TimeUnit unit) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public ScheduledFuture<?> scheduleWithFixedDelay(
Runnable command, long initialDelay, long delay, TimeUnit unit) {
if (delay <= 0) {
throw new UnsupportedOperationException(
"Please supply a delay > 0 to scheduleWithFixedDelay");
}
FakeClock.this.schedule(
Duration.millis(unit.toMillis(initialDelay)),
new Runnable() {
@Override
public void run() {
command.run();
FakeClock.this.schedule(Duration.millis(unit.toMillis(delay)), this);
}
});
FakeClock.this.sleep(Duration.ZERO);
return null;
}
}
}
private static class FakeSlowDoFn extends DoFn<String, String> {
private static FakeClock clock;
private final Duration sleep;
FakeSlowDoFn(FakeClock clock, Duration sleep) {
FakeSlowDoFn.clock = clock;
this.sleep = sleep;
}
@ProcessElement
public void processElement(ProcessContext c) throws Exception {
clock.sleep(sleep);
c.output(c.element());
}
}
static class ActiveWorkRefreshSink {
private final Function<GetDataRequest, GetDataResponse> responder;
private final Map<Long, EnumMap<LatencyAttribution.State, Duration>> totalDurations =
new HashMap<>();
ActiveWorkRefreshSink(Function<GetDataRequest, GetDataResponse> responder) {
this.responder = responder;
}
Duration getLatencyAttributionDuration(long workToken, LatencyAttribution.State state) {
EnumMap<LatencyAttribution.State, Duration> durations = totalDurations.get(workToken);
return durations == null ? Duration.ZERO : durations.getOrDefault(state, Duration.ZERO);
}
boolean isActiveWorkRefresh(GetDataRequest request) {
for (ComputationGetDataRequest computationRequest : request.getRequestsList()) {
if (!computationRequest.getComputationId().equals(DEFAULT_COMPUTATION_ID)) {
return false;
}
for (KeyedGetDataRequest keyedRequest : computationRequest.getRequestsList()) {
if (keyedRequest.getWorkToken() == 0
|| keyedRequest.getShardingKey() != DEFAULT_SHARDING_KEY
|| keyedRequest.getValuesToFetchCount() != 0
|| keyedRequest.getBagsToFetchCount() != 0
|| keyedRequest.getTagValuePrefixesToFetchCount() != 0
|| keyedRequest.getWatermarkHoldsToFetchCount() != 0) {
return false;
}
}
}
return true;
}
GetDataResponse getData(GetDataRequest request) {
if (!isActiveWorkRefresh(request)) {
return responder.apply(request);
}
for (ComputationGetDataRequest computationRequest : request.getRequestsList()) {
for (KeyedGetDataRequest keyedRequest : computationRequest.getRequestsList()) {
for (LatencyAttribution la : keyedRequest.getLatencyAttributionList()) {
EnumMap<LatencyAttribution.State, Duration> durations =
totalDurations.computeIfAbsent(
keyedRequest.getWorkToken(),
(Long workToken) ->
new EnumMap<LatencyAttribution.State, Duration>(
LatencyAttribution.State.class));
Duration cur = Duration.millis(la.getTotalDurationMillis());
durations.compute(la.getState(), (s, d) -> d == null || d.isShorterThan(cur) ? cur : d);
}
}
}
return EMPTY_DATA_RESPONDER.apply(request);
}
}
static class ReadingDoFn extends DoFn<String, String> {
@StateId("int")
private final StateSpec<ValueState<Integer>> counter = StateSpecs.value(VarIntCoder.of());
@ProcessElement
public void processElement(ProcessContext c, @StateId("int") ValueState<Integer> state) {
state.read();
c.output(c.element());
}
}
/** For each input element, emits a large string. */
private static class InflateDoFn extends DoFn<ValueWithRecordId<KV<Integer, Integer>>, String> {
final int inflatedSize;
/** For each input elements, outputs a string of this length */
InflateDoFn(int inflatedSize) {
this.inflatedSize = inflatedSize;
}
@ProcessElement
public void processElement(ProcessContext c) {
char[] chars = new char[inflatedSize];
Arrays.fill(chars, ' ');
c.output(new String(chars));
}
}
} |
Nit: let's pad this string in the front and back to better check "e.getMessage().contains". | public void testInsertFailsGracefully() throws Exception {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows =
ImmutableList.of(wrapValue(new TableRow()), wrapValue(new TableRow()));
ErrorProto errorProto = new ErrorProto().setReason("schemaMismatch");
final TableDataInsertAllResponse row1Failed =
new TableDataInsertAllResponse()
.setInsertErrors(
ImmutableList.of(
new InsertErrors().setIndex(1L).setErrors(ImmutableList.of(errorProto))));
final TableDataInsertAllResponse row0Failed =
new TableDataInsertAllResponse()
.setInsertErrors(
ImmutableList.of(
new InsertErrors().setIndex(0L).setErrors(ImmutableList.of(errorProto))));
MockSetupFunction row0FailureResponseFunction =
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenAnswer(invocation -> toStream(row0Failed));
};
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(row1Failed));
},
row0FailureResponseFunction,
row0FailureResponseFunction,
row0FailureResponseFunction);
DatasetServiceImpl dataService =
new DatasetServiceImpl(bigquery, null, PipelineOptionsFactory.create());
try {
dataService.insertAll(
ref,
rows,
null,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.alwaysRetry(),
null,
null,
false,
false,
false,
null);
fail();
} catch (IOException e) {
assertThat(e, instanceOf(IOException.class));
assertThat(e.getMessage(), containsString("Insert failed:"));
assertThat(e.getMessage(), containsString("[{\"errors\":[{\"reason\":\"schemaMismatch\"}]"));
}
verifyAllResponsesAreRead();
expectedLogs.verifyInfo("Retrying 1 failed inserts to BigQuery");
verifyWriteMetricWasSet("project", "dataset", "table", "schemamismatch", 4);
}
/**
* Tests that {@link DatasetServiceImpl
* non-quota-exceeded attempts.
*/
@Test
public void testFailInsertOtherRetry() throws Exception {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows = new ArrayList<>();
rows.add(wrapValue(new TableRow()));
final TableDataInsertAllResponse allRowsSucceeded =
new TableDataInsertAllResponse().setInsertErrors(ImmutableList.of());
setupMockResponses(
response -> {
when(response.getStatusCode()).thenReturn(403);
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getContent())
.thenReturn(toStream(errorWithReasonAndStatus("actually forbidden", 403)));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(new TableDataInsertAllResponse()));
});
DatasetServiceImpl dataService =
new DatasetServiceImpl(bigquery, null, PipelineOptionsFactory.create());
thrown.expect(RuntimeException.class);
thrown.expectMessage("actually forbidden");
try {
dataService.insertAll(
ref,
rows,
null,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.alwaysRetry(),
null,
null,
false,
false,
false,
null);
} finally {
verify(responses[0], atLeastOnce()).getStatusCode();
verify(responses[0]).getContent();
verify(responses[0]).getContentType();
verify(responses[1], never()).getStatusCode();
verify(responses[1], never()).getContent();
verify(responses[1], never()).getContentType();
}
verifyWriteMetricWasSet("project", "dataset", "table", "actually forbidden", 1);
}
/** Tests that {@link DatasetServiceImpl
@Test
public void testInsertTimeoutLog() throws Exception {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows = new ArrayList<>();
rows.add(wrapValue(new TableRow()));
setupMockResponses(
response -> {
when(response.getStatusCode()).thenReturn(400);
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getContent())
.thenReturn(
toStream(errorWithReasonAndStatus("No rows present in the request.", 400)));
});
DatasetServiceImpl dataService =
new DatasetServiceImpl(bigquery, null, PipelineOptionsFactory.create());
RuntimeException e =
assertThrows(
RuntimeException.class,
() ->
dataService.insertAll(
ref,
rows,
null,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.alwaysRetry(),
null,
null,
false,
false,
false,
null));
assertThat(e.getCause().getMessage(), containsString("No rows present in the request."));
verifyAllResponsesAreRead();
expectedLogs.verifyError("No rows present in the request error likely caused by");
verifyWriteMetricWasSet("project", "dataset", "table", "no rows present in the request.", 1);
}
/**
* Tests that {@link DatasetServiceImpl
* and returns the list of rows not retried.
*/
@Test
public void testInsertRetryPolicy() throws InterruptedException, IOException {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows =
ImmutableList.of(wrapValue(new TableRow()), wrapValue(new TableRow()));
final TableDataInsertAllResponse firstFailure =
new TableDataInsertAllResponse()
.setInsertErrors(
ImmutableList.of(
new InsertErrors()
.setIndex(0L)
.setErrors(ImmutableList.of(new ErrorProto().setReason("timeout"))),
new InsertErrors()
.setIndex(1L)
.setErrors(ImmutableList.of(new ErrorProto().setReason("invalid")))));
final TableDataInsertAllResponse secondFialure =
new TableDataInsertAllResponse()
.setInsertErrors(
ImmutableList.of(
new InsertErrors()
.setIndex(0L)
.setErrors(ImmutableList.of(new ErrorProto().setReason("timeout")))));
final TableDataInsertAllResponse allRowsSucceeded = new TableDataInsertAllResponse();
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(firstFailure));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(secondFialure));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(allRowsSucceeded));
});
DatasetServiceImpl dataService =
new DatasetServiceImpl(bigquery, null, PipelineOptionsFactory.create());
List<ValueInSingleWindow<TableRow>> failedInserts = Lists.newArrayList();
dataService.insertAll(
ref,
rows,
null,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.retryTransientErrors(),
failedInserts,
ErrorContainer.TABLE_ROW_ERROR_CONTAINER,
false,
false,
false,
null);
assertEquals(1, failedInserts.size());
expectedLogs.verifyInfo("Retrying 1 failed inserts to BigQuery");
verifyWriteMetricWasSet("project", "dataset", "table", "timeout", 2);
}
/**
* Tests that {@link DatasetServiceImpl
* ignoreUnknownValues and ignoreInsertIds parameters.
*/
@Test
public void testSkipInvalidRowsIgnoreUnknownIgnoreInsertIdsValuesStreaming()
throws InterruptedException, IOException {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows =
ImmutableList.of(wrapValue(new TableRow()), wrapValue(new TableRow()));
final TableDataInsertAllResponse allRowsSucceeded = new TableDataInsertAllResponse();
MockSetupFunction allRowsSucceededResponseFunction =
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(allRowsSucceeded));
};
setupMockResponses(allRowsSucceededResponseFunction, allRowsSucceededResponseFunction);
DatasetServiceImpl dataService =
new DatasetServiceImpl(bigquery, null, PipelineOptionsFactory.create());
dataService.insertAll(
ref,
rows,
null,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.neverRetry(),
Lists.newArrayList(),
ErrorContainer.TABLE_ROW_ERROR_CONTAINER,
false,
false,
false,
null);
TableDataInsertAllRequest parsedRequest =
fromString(request.getContentAsString(), TableDataInsertAllRequest.class);
assertFalse(parsedRequest.getSkipInvalidRows());
assertFalse(parsedRequest.getIgnoreUnknownValues());
dataService.insertAll(
ref,
rows,
null,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.neverRetry(),
Lists.newArrayList(),
ErrorContainer.TABLE_ROW_ERROR_CONTAINER,
true,
true,
true,
null);
parsedRequest = fromString(request.getContentAsString(), TableDataInsertAllRequest.class);
assertTrue(parsedRequest.getSkipInvalidRows());
assertTrue(parsedRequest.getIgnoreUnknownValues());
assertNull(parsedRequest.getRows().get(0).getInsertId());
assertNull(parsedRequest.getRows().get(1).getInsertId());
verifyWriteMetricWasSet("project", "dataset", "table", "ok", 2);
}
/** A helper to convert a string response back to a {@link GenericJson} subclass. */
private static <T extends GenericJson> T fromString(String content, Class<T> clazz)
throws IOException {
return JacksonFactory.getDefaultInstance().fromString(content, clazz);
}
/** A helper to wrap a {@link GenericJson} object in a content stream. */
private static InputStream toStream(GenericJson content) throws IOException {
return new ByteArrayInputStream(JacksonFactory.getDefaultInstance().toByteArray(content));
}
/** A helper that generates the error JSON payload that Google APIs produce. */
private static GoogleJsonErrorContainer errorWithReasonAndStatus(String reason, int status) {
ErrorInfo info = new ErrorInfo();
info.setReason(reason);
info.setDomain("global");
GoogleJsonError error = new GoogleJsonError();
error.setErrors(ImmutableList.of(info));
error.setCode(status);
error.setMessage(reason);
GoogleJsonErrorContainer container = new GoogleJsonErrorContainer();
container.setError(error);
return container;
}
@Test
public void testGetErrorInfo() throws IOException {
DatasetServiceImpl dataService =
new DatasetServiceImpl(bigquery, null, PipelineOptionsFactory.create());
ErrorInfo info = new ErrorInfo();
List<ErrorInfo> infoList = new ArrayList<>();
infoList.add(info);
info.setReason("QuotaExceeded");
GoogleJsonError error = new GoogleJsonError();
error.setErrors(infoList);
HttpResponseException.Builder builder = mock(HttpResponseException.Builder.class);
IOException validException = new GoogleJsonResponseException(builder, error);
IOException invalidException = new IOException();
assertEquals(info.getReason(), DatasetServiceImpl.getErrorInfo(validException).getReason());
assertNull(DatasetServiceImpl.getErrorInfo(invalidException));
}
@Test
public void testCreateTableSucceeds() throws IOException {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
Table testTable = new Table().setTableReference(ref);
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(testTable));
});
BigQueryServicesImpl.DatasetServiceImpl services =
new BigQueryServicesImpl.DatasetServiceImpl(
bigquery, null, PipelineOptionsFactory.create());
Table ret =
services.tryCreateTable(
testTable, new RetryBoundedBackOff(BackOff.ZERO_BACKOFF, 0), Sleeper.DEFAULT);
assertEquals(testTable, ret);
verifyAllResponsesAreRead();
}
/** Tests that {@link BigQueryServicesImpl} does not retry non-rate-limited attempts. */
@Test
public void testCreateTableDoesNotRetry() throws IOException {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
Table testTable = new Table().setTableReference(ref);
setupMockResponses(
response -> {
when(response.getStatusCode()).thenReturn(403);
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getContent())
.thenReturn(toStream(errorWithReasonAndStatus("actually forbidden", 403)));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(testTable));
});
thrown.expect(GoogleJsonResponseException.class);
thrown.expectMessage("actually forbidden");
BigQueryServicesImpl.DatasetServiceImpl services =
new BigQueryServicesImpl.DatasetServiceImpl(
bigquery, null, PipelineOptionsFactory.create());
try {
services.tryCreateTable(
testTable, new RetryBoundedBackOff(BackOff.ZERO_BACKOFF, 3), Sleeper.DEFAULT);
fail();
} catch (IOException e) {
verify(responses[0], atLeastOnce()).getStatusCode();
verify(responses[0]).getContent();
verify(responses[0]).getContentType();
verify(responses[1], never()).getStatusCode();
verify(responses[1], never()).getContent();
verify(responses[1], never()).getContentType();
throw e;
}
}
/** Tests that table creation succeeds when the table already exists. */
@Test
public void testCreateTableSucceedsAlreadyExists() throws IOException {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
TableSchema schema =
new TableSchema()
.setFields(
ImmutableList.of(
new TableFieldSchema().setName("column1").setType("String"),
new TableFieldSchema().setName("column2").setType("Integer")));
Table testTable = new Table().setTableReference(ref).setSchema(schema);
setupMockResponses(
response -> {
when(response.getStatusCode()).thenReturn(409);
});
BigQueryServicesImpl.DatasetServiceImpl services =
new BigQueryServicesImpl.DatasetServiceImpl(
bigquery, null, PipelineOptionsFactory.create());
Table ret =
services.tryCreateTable(
testTable, new RetryBoundedBackOff(BackOff.ZERO_BACKOFF, 0), Sleeper.DEFAULT);
assertNull(ret);
verifyAllResponsesAreRead();
}
/** Tests that {@link BigQueryServicesImpl} retries quota rate limited attempts. */
@Test
public void testCreateTableRetry() throws IOException {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
Table testTable = new Table().setTableReference(ref);
setupMockResponses(
response -> {
when(response.getStatusCode()).thenReturn(403);
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getContent())
.thenReturn(toStream(errorWithReasonAndStatus("rateLimitExceeded", 403)));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(testTable));
});
BigQueryServicesImpl.DatasetServiceImpl services =
new BigQueryServicesImpl.DatasetServiceImpl(
bigquery, null, PipelineOptionsFactory.create());
Table ret =
services.tryCreateTable(
testTable, new RetryBoundedBackOff(BackOff.ZERO_BACKOFF, 3), Sleeper.DEFAULT);
assertEquals(testTable, ret);
verifyAllResponsesAreRead();
assertNotNull(ret.getTableReference());
expectedLogs.verifyInfo(
"Quota limit reached when creating table project:dataset.table, "
+ "retrying up to 5 minutes");
}
/** Tests that {@link DatasetServiceImpl
@Test
public void testSimpleErrorRetrieval() throws InterruptedException, IOException {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows =
ImmutableList.of(
wrapValue(new TableRow().set("a", 1)), wrapValue(new TableRow().set("b", 2)));
final List<ValueInSingleWindow<TableRow>> expected =
ImmutableList.of(
wrapErrorValue(new TableRow().set("a", 1)), wrapErrorValue(new TableRow().set("b", 2)));
final TableDataInsertAllResponse failures =
new TableDataInsertAllResponse()
.setInsertErrors(
ImmutableList.of(
new InsertErrors()
.setIndex(0L)
.setErrors(ImmutableList.of(new ErrorProto().setReason("timeout"))),
new InsertErrors()
.setIndex(1L)
.setErrors(ImmutableList.of(new ErrorProto().setReason("invalid")))));
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(failures));
});
DatasetServiceImpl dataService =
new DatasetServiceImpl(bigquery, null, PipelineOptionsFactory.create());
List<ValueInSingleWindow<TableRow>> failedInserts = Lists.newArrayList();
dataService.insertAll(
ref,
rows,
null,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.neverRetry(),
failedInserts,
ErrorContainer.TABLE_ROW_ERROR_CONTAINER,
false,
false,
false,
null);
assertThat(failedInserts, is(expected));
}
/** Tests that {@link DatasetServiceImpl
@Test
public void testExtendedErrorRetrieval() throws InterruptedException, IOException {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows =
ImmutableList.of(
wrapValue(new TableRow().set("a", 1)), wrapValue(new TableRow().set("b", 2)));
final TableDataInsertAllResponse failures =
new TableDataInsertAllResponse()
.setInsertErrors(
ImmutableList.of(
new InsertErrors()
.setIndex(0L)
.setErrors(ImmutableList.of(new ErrorProto().setReason("timeout"))),
new InsertErrors()
.setIndex(1L)
.setErrors(ImmutableList.of(new ErrorProto().setReason("invalid")))));
final List<ValueInSingleWindow<BigQueryInsertError>> expected =
ImmutableList.of(
wrapErrorValue(
new BigQueryInsertError(
rows.get(0).getValue(), failures.getInsertErrors().get(0), ref)),
wrapErrorValue(
new BigQueryInsertError(
rows.get(1).getValue(), failures.getInsertErrors().get(1), ref)));
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getContent()).thenReturn(toStream(failures));
});
DatasetServiceImpl dataService =
new DatasetServiceImpl(bigquery, null, PipelineOptionsFactory.create());
List<ValueInSingleWindow<BigQueryInsertError>> failedInserts = Lists.newArrayList();
dataService.insertAll(
ref,
rows,
null,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.neverRetry(),
failedInserts,
ErrorContainer.BIG_QUERY_INSERT_ERROR_ERROR_CONTAINER,
false,
false,
false,
null);
assertThat(failedInserts, is(expected));
}
@Test
public void testCreateReadSessionSetsRequestCountMetric()
throws InterruptedException, IOException {
BigQueryServicesImpl.StorageClientImpl client =
mock(BigQueryServicesImpl.StorageClientImpl.class);
CreateReadSessionRequest.Builder builder = CreateReadSessionRequest.newBuilder();
builder.getReadSessionBuilder().setTable("myproject:mydataset.mytable");
CreateReadSessionRequest request = builder.build();
when(client.callCreateReadSession(request))
.thenReturn(ReadSession.newBuilder().build());
when(client.createReadSession(any())).thenCallRealMethod();
client.createReadSession(request);
verifyReadMetricWasSet("myproject", "mydataset", "mytable", "ok", 1);
}
@Test
public void testCreateReadSessionSetsRequestCountMetricOnError()
throws InterruptedException, IOException {
BigQueryServicesImpl.StorageClientImpl client =
mock(BigQueryServicesImpl.StorageClientImpl.class);
CreateReadSessionRequest.Builder builder = CreateReadSessionRequest.newBuilder();
builder.getReadSessionBuilder().setTable("myproject:mydataset.mytable");
CreateReadSessionRequest request = builder.build();
StatusCode statusCode =
new StatusCode() {
@Override
public Code getCode() {
return Code.NOT_FOUND;
}
@Override
public Object getTransportCode() {
return null;
}
};
when(client.callCreateReadSession(request))
.thenThrow(new ApiException("Not Found", null, statusCode, false));
when(client.createReadSession(any())).thenCallRealMethod();
thrown.expect(ApiException.class);
thrown.expectMessage("Not Found");
client.createReadSession(request);
verifyReadMetricWasSet("myproject", "mydataset", "mytable", "not_found", 1);
}
@Test
public void testReadRowsSetsRequestCountMetric() throws InterruptedException, IOException {
BigQueryServices.StorageClient client = mock(BigQueryServicesImpl.StorageClientImpl.class);
ReadRowsRequest request = null;
BigQueryServices.BigQueryServerStream<ReadRowsResponse> response =
new BigQueryServices.BigQueryServerStream<ReadRowsResponse>() {
@Override
public Iterator<ReadRowsResponse> iterator() {
return null;
}
@Override
public void cancel() {}
};
when(client.readRows(request)).thenReturn(response);
when(client.readRows(any(), any())).thenCallRealMethod();
client.readRows(request, "myproject:mydataset.mytable");
verifyReadMetricWasSet("myproject", "mydataset", "mytable", "ok", 1);
}
@Test
public void testReadRowsSetsRequestCountMetricOnError() throws InterruptedException, IOException {
BigQueryServices.StorageClient client = mock(BigQueryServicesImpl.StorageClientImpl.class);
ReadRowsRequest request = null;
StatusCode statusCode =
new StatusCode() {
@Override
public Code getCode() {
return Code.INTERNAL;
}
@Override
public Object getTransportCode() {
return null;
}
};
when(client.readRows(request))
.thenThrow(new ApiException("Internal", null, statusCode, false));
when(client.readRows(any(), any())).thenCallRealMethod();
thrown.expect(ApiException.class);
thrown.expectMessage("Internal");
client.readRows(request, "myproject:mydataset.mytable");
verifyReadMetricWasSet("myproject", "mydataset", "mytable", "internal", 1);
}
@Test
public void testSplitReadStreamSetsRequestCountMetric() throws InterruptedException, IOException {
BigQueryServices.StorageClient client = mock(BigQueryServicesImpl.StorageClientImpl.class);
SplitReadStreamRequest request = null;
when(client.splitReadStream(request))
.thenReturn(SplitReadStreamResponse.newBuilder().build());
when(client.splitReadStream(any(), any())).thenCallRealMethod();
client.splitReadStream(request, "myproject:mydataset.mytable");
verifyReadMetricWasSet("myproject", "mydataset", "mytable", "ok", 1);
}
@Test
public void testSplitReadStreamSetsRequestCountMetricOnError()
throws InterruptedException, IOException {
BigQueryServices.StorageClient client = mock(BigQueryServicesImpl.StorageClientImpl.class);
SplitReadStreamRequest request = null;
StatusCode statusCode =
new StatusCode() {
@Override
public Code getCode() {
return Code.RESOURCE_EXHAUSTED;
}
@Override
public Object getTransportCode() {
return null;
}
};
when(client.splitReadStream(request))
.thenThrow(
new ApiException(
"Resource Exhausted", null, statusCode, false));
when(client.splitReadStream(any(), any())).thenCallRealMethod();
thrown.expect(ApiException.class);
thrown.expectMessage("Resource Exhausted");
client.splitReadStream(request, "myproject:mydataset.mytable");
verifyReadMetricWasSet("myproject", "mydataset", "mytable", "resource_exhausted", 1);
}
@Test
public void testRetryAttemptCounter() {
BigQueryServicesImpl.StorageClientImpl.RetryAttemptCounter counter =
new BigQueryServicesImpl.StorageClientImpl.RetryAttemptCounter();
RetryInfo retryInfo =
RetryInfo.newBuilder()
.setRetryDelay(
com.google.protobuf.Duration.newBuilder()
.setSeconds(123)
.setNanos(456000000)
.build())
.build();
Metadata metadata = new Metadata();
metadata.put(
Metadata.Key.of(
"google.rpc.retryinfo-bin",
new Metadata.BinaryMarshaller<RetryInfo>() {
@Override
public byte[] toBytes(RetryInfo value) {
return value.toByteArray();
}
@Override
public RetryInfo parseBytes(byte[] serialized) {
try {
Parser<RetryInfo> parser = RetryInfo.newBuilder().build().getParserForType();
return parser.parseFrom(serialized);
} catch (Exception e) {
return null;
}
}
}),
retryInfo);
MetricName metricName =
MetricName.named(
"org.apache.beam.sdk.io.gcp.bigquery.BigQueryServicesImpl$StorageClientImpl",
"throttling-msecs");
MetricsContainerImpl container =
(MetricsContainerImpl) MetricsEnvironment.getCurrentContainer();
counter.onRetryAttempt(null, null);
assertEquals(0, (long) container.getCounter(metricName).getCumulative());
counter.onRetryAttempt(
Status.RESOURCE_EXHAUSTED.withDescription("You have consumed some quota"), new Metadata());
assertEquals(0, (long) container.getCounter(metricName).getCumulative());
counter.onRetryAttempt(Status.RESOURCE_EXHAUSTED.withDescription("Stop for a while"), metadata);
assertEquals(123456, (long) container.getCounter(metricName).getCumulative());
counter.onRetryAttempt(Status.UNAVAILABLE.withDescription("Server is gone"), metadata);
assertEquals(123456, (long) container.getCounter(metricName).getCumulative());
}
} | toStream(errorWithReasonAndStatus("No rows present in the request.", 400))); | public void testInsertFailsGracefully() throws Exception {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows =
ImmutableList.of(wrapValue(new TableRow()), wrapValue(new TableRow()));
ErrorProto errorProto = new ErrorProto().setReason("schemaMismatch");
final TableDataInsertAllResponse row1Failed =
new TableDataInsertAllResponse()
.setInsertErrors(
ImmutableList.of(
new InsertErrors().setIndex(1L).setErrors(ImmutableList.of(errorProto))));
final TableDataInsertAllResponse row0Failed =
new TableDataInsertAllResponse()
.setInsertErrors(
ImmutableList.of(
new InsertErrors().setIndex(0L).setErrors(ImmutableList.of(errorProto))));
MockSetupFunction row0FailureResponseFunction =
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenAnswer(invocation -> toStream(row0Failed));
};
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(row1Failed));
},
row0FailureResponseFunction,
row0FailureResponseFunction,
row0FailureResponseFunction);
DatasetServiceImpl dataService =
new DatasetServiceImpl(bigquery, null, PipelineOptionsFactory.create());
try {
dataService.insertAll(
ref,
rows,
null,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.alwaysRetry(),
null,
null,
false,
false,
false,
null);
fail();
} catch (IOException e) {
assertThat(e, instanceOf(IOException.class));
assertThat(e.getMessage(), containsString("Insert failed:"));
assertThat(e.getMessage(), containsString("[{\"errors\":[{\"reason\":\"schemaMismatch\"}]"));
}
verifyAllResponsesAreRead();
expectedLogs.verifyInfo("Retrying 1 failed inserts to BigQuery");
verifyWriteMetricWasSet("project", "dataset", "table", "schemamismatch", 4);
}
/**
* Tests that {@link DatasetServiceImpl
* non-quota-exceeded attempts.
*/
@Test
public void testFailInsertOtherRetry() throws Exception {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows = new ArrayList<>();
rows.add(wrapValue(new TableRow()));
final TableDataInsertAllResponse allRowsSucceeded =
new TableDataInsertAllResponse().setInsertErrors(ImmutableList.of());
setupMockResponses(
response -> {
when(response.getStatusCode()).thenReturn(403);
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getContent())
.thenReturn(toStream(errorWithReasonAndStatus("actually forbidden", 403)));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(new TableDataInsertAllResponse()));
});
DatasetServiceImpl dataService =
new DatasetServiceImpl(bigquery, null, PipelineOptionsFactory.create());
thrown.expect(RuntimeException.class);
thrown.expectMessage("actually forbidden");
try {
dataService.insertAll(
ref,
rows,
null,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.alwaysRetry(),
null,
null,
false,
false,
false,
null);
} finally {
verify(responses[0], atLeastOnce()).getStatusCode();
verify(responses[0]).getContent();
verify(responses[0]).getContentType();
verify(responses[1], never()).getStatusCode();
verify(responses[1], never()).getContent();
verify(responses[1], never()).getContentType();
}
verifyWriteMetricWasSet("project", "dataset", "table", "actually forbidden", 1);
}
/** Tests that {@link DatasetServiceImpl
@Test
public void testInsertTimeoutLog() throws Exception {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows = new ArrayList<>();
rows.add(wrapValue(new TableRow()));
setupMockResponses(
response -> {
when(response.getStatusCode()).thenReturn(400);
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getContent())
.thenReturn(
toStream(errorWithReasonAndStatus(" No rows present in the request. ", 400)));
});
DatasetServiceImpl dataService =
new DatasetServiceImpl(bigquery, null, PipelineOptionsFactory.create());
RuntimeException e =
assertThrows(
RuntimeException.class,
() ->
dataService.insertAll(
ref,
rows,
null,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.alwaysRetry(),
null,
null,
false,
false,
false,
null));
assertThat(e.getCause().getMessage(), containsString("No rows present in the request."));
verifyAllResponsesAreRead();
expectedLogs.verifyError("No rows present in the request error likely caused by");
verifyWriteMetricWasSet("project", "dataset", "table", " no rows present in the request. ", 1);
}
/**
* Tests that {@link DatasetServiceImpl
* and returns the list of rows not retried.
*/
@Test
public void testInsertRetryPolicy() throws InterruptedException, IOException {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows =
ImmutableList.of(wrapValue(new TableRow()), wrapValue(new TableRow()));
final TableDataInsertAllResponse firstFailure =
new TableDataInsertAllResponse()
.setInsertErrors(
ImmutableList.of(
new InsertErrors()
.setIndex(0L)
.setErrors(ImmutableList.of(new ErrorProto().setReason("timeout"))),
new InsertErrors()
.setIndex(1L)
.setErrors(ImmutableList.of(new ErrorProto().setReason("invalid")))));
final TableDataInsertAllResponse secondFialure =
new TableDataInsertAllResponse()
.setInsertErrors(
ImmutableList.of(
new InsertErrors()
.setIndex(0L)
.setErrors(ImmutableList.of(new ErrorProto().setReason("timeout")))));
final TableDataInsertAllResponse allRowsSucceeded = new TableDataInsertAllResponse();
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(firstFailure));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(secondFialure));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(allRowsSucceeded));
});
DatasetServiceImpl dataService =
new DatasetServiceImpl(bigquery, null, PipelineOptionsFactory.create());
List<ValueInSingleWindow<TableRow>> failedInserts = Lists.newArrayList();
dataService.insertAll(
ref,
rows,
null,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.retryTransientErrors(),
failedInserts,
ErrorContainer.TABLE_ROW_ERROR_CONTAINER,
false,
false,
false,
null);
assertEquals(1, failedInserts.size());
expectedLogs.verifyInfo("Retrying 1 failed inserts to BigQuery");
verifyWriteMetricWasSet("project", "dataset", "table", "timeout", 2);
}
/**
* Tests that {@link DatasetServiceImpl
* ignoreUnknownValues and ignoreInsertIds parameters.
*/
@Test
public void testSkipInvalidRowsIgnoreUnknownIgnoreInsertIdsValuesStreaming()
throws InterruptedException, IOException {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows =
ImmutableList.of(wrapValue(new TableRow()), wrapValue(new TableRow()));
final TableDataInsertAllResponse allRowsSucceeded = new TableDataInsertAllResponse();
MockSetupFunction allRowsSucceededResponseFunction =
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(allRowsSucceeded));
};
setupMockResponses(allRowsSucceededResponseFunction, allRowsSucceededResponseFunction);
DatasetServiceImpl dataService =
new DatasetServiceImpl(bigquery, null, PipelineOptionsFactory.create());
dataService.insertAll(
ref,
rows,
null,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.neverRetry(),
Lists.newArrayList(),
ErrorContainer.TABLE_ROW_ERROR_CONTAINER,
false,
false,
false,
null);
TableDataInsertAllRequest parsedRequest =
fromString(request.getContentAsString(), TableDataInsertAllRequest.class);
assertFalse(parsedRequest.getSkipInvalidRows());
assertFalse(parsedRequest.getIgnoreUnknownValues());
dataService.insertAll(
ref,
rows,
null,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.neverRetry(),
Lists.newArrayList(),
ErrorContainer.TABLE_ROW_ERROR_CONTAINER,
true,
true,
true,
null);
parsedRequest = fromString(request.getContentAsString(), TableDataInsertAllRequest.class);
assertTrue(parsedRequest.getSkipInvalidRows());
assertTrue(parsedRequest.getIgnoreUnknownValues());
assertNull(parsedRequest.getRows().get(0).getInsertId());
assertNull(parsedRequest.getRows().get(1).getInsertId());
verifyWriteMetricWasSet("project", "dataset", "table", "ok", 2);
}
/** A helper to convert a string response back to a {@link GenericJson} subclass. */
private static <T extends GenericJson> T fromString(String content, Class<T> clazz)
throws IOException {
return JacksonFactory.getDefaultInstance().fromString(content, clazz);
}
/** A helper to wrap a {@link GenericJson} object in a content stream. */
private static InputStream toStream(GenericJson content) throws IOException {
return new ByteArrayInputStream(JacksonFactory.getDefaultInstance().toByteArray(content));
}
/** A helper that generates the error JSON payload that Google APIs produce. */
private static GoogleJsonErrorContainer errorWithReasonAndStatus(String reason, int status) {
ErrorInfo info = new ErrorInfo();
info.setReason(reason);
info.setDomain("global");
GoogleJsonError error = new GoogleJsonError();
error.setErrors(ImmutableList.of(info));
error.setCode(status);
error.setMessage(reason);
GoogleJsonErrorContainer container = new GoogleJsonErrorContainer();
container.setError(error);
return container;
}
@Test
public void testGetErrorInfo() throws IOException {
DatasetServiceImpl dataService =
new DatasetServiceImpl(bigquery, null, PipelineOptionsFactory.create());
ErrorInfo info = new ErrorInfo();
List<ErrorInfo> infoList = new ArrayList<>();
infoList.add(info);
info.setReason("QuotaExceeded");
GoogleJsonError error = new GoogleJsonError();
error.setErrors(infoList);
HttpResponseException.Builder builder = mock(HttpResponseException.Builder.class);
IOException validException = new GoogleJsonResponseException(builder, error);
IOException invalidException = new IOException();
assertEquals(info.getReason(), DatasetServiceImpl.getErrorInfo(validException).getReason());
assertNull(DatasetServiceImpl.getErrorInfo(invalidException));
}
@Test
public void testCreateTableSucceeds() throws IOException {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
Table testTable = new Table().setTableReference(ref);
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(testTable));
});
BigQueryServicesImpl.DatasetServiceImpl services =
new BigQueryServicesImpl.DatasetServiceImpl(
bigquery, null, PipelineOptionsFactory.create());
Table ret =
services.tryCreateTable(
testTable, new RetryBoundedBackOff(BackOff.ZERO_BACKOFF, 0), Sleeper.DEFAULT);
assertEquals(testTable, ret);
verifyAllResponsesAreRead();
}
/** Tests that {@link BigQueryServicesImpl} does not retry non-rate-limited attempts. */
@Test
public void testCreateTableDoesNotRetry() throws IOException {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
Table testTable = new Table().setTableReference(ref);
setupMockResponses(
response -> {
when(response.getStatusCode()).thenReturn(403);
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getContent())
.thenReturn(toStream(errorWithReasonAndStatus("actually forbidden", 403)));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(testTable));
});
thrown.expect(GoogleJsonResponseException.class);
thrown.expectMessage("actually forbidden");
BigQueryServicesImpl.DatasetServiceImpl services =
new BigQueryServicesImpl.DatasetServiceImpl(
bigquery, null, PipelineOptionsFactory.create());
try {
services.tryCreateTable(
testTable, new RetryBoundedBackOff(BackOff.ZERO_BACKOFF, 3), Sleeper.DEFAULT);
fail();
} catch (IOException e) {
verify(responses[0], atLeastOnce()).getStatusCode();
verify(responses[0]).getContent();
verify(responses[0]).getContentType();
verify(responses[1], never()).getStatusCode();
verify(responses[1], never()).getContent();
verify(responses[1], never()).getContentType();
throw e;
}
}
/** Tests that table creation succeeds when the table already exists. */
@Test
public void testCreateTableSucceedsAlreadyExists() throws IOException {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
TableSchema schema =
new TableSchema()
.setFields(
ImmutableList.of(
new TableFieldSchema().setName("column1").setType("String"),
new TableFieldSchema().setName("column2").setType("Integer")));
Table testTable = new Table().setTableReference(ref).setSchema(schema);
setupMockResponses(
response -> {
when(response.getStatusCode()).thenReturn(409);
});
BigQueryServicesImpl.DatasetServiceImpl services =
new BigQueryServicesImpl.DatasetServiceImpl(
bigquery, null, PipelineOptionsFactory.create());
Table ret =
services.tryCreateTable(
testTable, new RetryBoundedBackOff(BackOff.ZERO_BACKOFF, 0), Sleeper.DEFAULT);
assertNull(ret);
verifyAllResponsesAreRead();
}
/** Tests that {@link BigQueryServicesImpl} retries quota rate limited attempts. */
@Test
public void testCreateTableRetry() throws IOException {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
Table testTable = new Table().setTableReference(ref);
setupMockResponses(
response -> {
when(response.getStatusCode()).thenReturn(403);
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getContent())
.thenReturn(toStream(errorWithReasonAndStatus("rateLimitExceeded", 403)));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(testTable));
});
BigQueryServicesImpl.DatasetServiceImpl services =
new BigQueryServicesImpl.DatasetServiceImpl(
bigquery, null, PipelineOptionsFactory.create());
Table ret =
services.tryCreateTable(
testTable, new RetryBoundedBackOff(BackOff.ZERO_BACKOFF, 3), Sleeper.DEFAULT);
assertEquals(testTable, ret);
verifyAllResponsesAreRead();
assertNotNull(ret.getTableReference());
expectedLogs.verifyInfo(
"Quota limit reached when creating table project:dataset.table, "
+ "retrying up to 5 minutes");
}
/** Tests that {@link DatasetServiceImpl
@Test
public void testSimpleErrorRetrieval() throws InterruptedException, IOException {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows =
ImmutableList.of(
wrapValue(new TableRow().set("a", 1)), wrapValue(new TableRow().set("b", 2)));
final List<ValueInSingleWindow<TableRow>> expected =
ImmutableList.of(
wrapErrorValue(new TableRow().set("a", 1)), wrapErrorValue(new TableRow().set("b", 2)));
final TableDataInsertAllResponse failures =
new TableDataInsertAllResponse()
.setInsertErrors(
ImmutableList.of(
new InsertErrors()
.setIndex(0L)
.setErrors(ImmutableList.of(new ErrorProto().setReason("timeout"))),
new InsertErrors()
.setIndex(1L)
.setErrors(ImmutableList.of(new ErrorProto().setReason("invalid")))));
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(failures));
});
DatasetServiceImpl dataService =
new DatasetServiceImpl(bigquery, null, PipelineOptionsFactory.create());
List<ValueInSingleWindow<TableRow>> failedInserts = Lists.newArrayList();
dataService.insertAll(
ref,
rows,
null,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.neverRetry(),
failedInserts,
ErrorContainer.TABLE_ROW_ERROR_CONTAINER,
false,
false,
false,
null);
assertThat(failedInserts, is(expected));
}
/** Tests that {@link DatasetServiceImpl
@Test
public void testExtendedErrorRetrieval() throws InterruptedException, IOException {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows =
ImmutableList.of(
wrapValue(new TableRow().set("a", 1)), wrapValue(new TableRow().set("b", 2)));
final TableDataInsertAllResponse failures =
new TableDataInsertAllResponse()
.setInsertErrors(
ImmutableList.of(
new InsertErrors()
.setIndex(0L)
.setErrors(ImmutableList.of(new ErrorProto().setReason("timeout"))),
new InsertErrors()
.setIndex(1L)
.setErrors(ImmutableList.of(new ErrorProto().setReason("invalid")))));
final List<ValueInSingleWindow<BigQueryInsertError>> expected =
ImmutableList.of(
wrapErrorValue(
new BigQueryInsertError(
rows.get(0).getValue(), failures.getInsertErrors().get(0), ref)),
wrapErrorValue(
new BigQueryInsertError(
rows.get(1).getValue(), failures.getInsertErrors().get(1), ref)));
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getContent()).thenReturn(toStream(failures));
});
DatasetServiceImpl dataService =
new DatasetServiceImpl(bigquery, null, PipelineOptionsFactory.create());
List<ValueInSingleWindow<BigQueryInsertError>> failedInserts = Lists.newArrayList();
dataService.insertAll(
ref,
rows,
null,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.neverRetry(),
failedInserts,
ErrorContainer.BIG_QUERY_INSERT_ERROR_ERROR_CONTAINER,
false,
false,
false,
null);
assertThat(failedInserts, is(expected));
}
@Test
public void testCreateReadSessionSetsRequestCountMetric()
throws InterruptedException, IOException {
BigQueryServicesImpl.StorageClientImpl client =
mock(BigQueryServicesImpl.StorageClientImpl.class);
CreateReadSessionRequest.Builder builder = CreateReadSessionRequest.newBuilder();
builder.getReadSessionBuilder().setTable("myproject:mydataset.mytable");
CreateReadSessionRequest request = builder.build();
when(client.callCreateReadSession(request))
.thenReturn(ReadSession.newBuilder().build());
when(client.createReadSession(any())).thenCallRealMethod();
client.createReadSession(request);
verifyReadMetricWasSet("myproject", "mydataset", "mytable", "ok", 1);
}
@Test
public void testCreateReadSessionSetsRequestCountMetricOnError()
throws InterruptedException, IOException {
BigQueryServicesImpl.StorageClientImpl client =
mock(BigQueryServicesImpl.StorageClientImpl.class);
CreateReadSessionRequest.Builder builder = CreateReadSessionRequest.newBuilder();
builder.getReadSessionBuilder().setTable("myproject:mydataset.mytable");
CreateReadSessionRequest request = builder.build();
StatusCode statusCode =
new StatusCode() {
@Override
public Code getCode() {
return Code.NOT_FOUND;
}
@Override
public Object getTransportCode() {
return null;
}
};
when(client.callCreateReadSession(request))
.thenThrow(new ApiException("Not Found", null, statusCode, false));
when(client.createReadSession(any())).thenCallRealMethod();
thrown.expect(ApiException.class);
thrown.expectMessage("Not Found");
client.createReadSession(request);
verifyReadMetricWasSet("myproject", "mydataset", "mytable", "not_found", 1);
}
@Test
public void testReadRowsSetsRequestCountMetric() throws InterruptedException, IOException {
BigQueryServices.StorageClient client = mock(BigQueryServicesImpl.StorageClientImpl.class);
ReadRowsRequest request = null;
BigQueryServices.BigQueryServerStream<ReadRowsResponse> response =
new BigQueryServices.BigQueryServerStream<ReadRowsResponse>() {
@Override
public Iterator<ReadRowsResponse> iterator() {
return null;
}
@Override
public void cancel() {}
};
when(client.readRows(request)).thenReturn(response);
when(client.readRows(any(), any())).thenCallRealMethod();
client.readRows(request, "myproject:mydataset.mytable");
verifyReadMetricWasSet("myproject", "mydataset", "mytable", "ok", 1);
}
@Test
public void testReadRowsSetsRequestCountMetricOnError() throws InterruptedException, IOException {
BigQueryServices.StorageClient client = mock(BigQueryServicesImpl.StorageClientImpl.class);
ReadRowsRequest request = null;
StatusCode statusCode =
new StatusCode() {
@Override
public Code getCode() {
return Code.INTERNAL;
}
@Override
public Object getTransportCode() {
return null;
}
};
when(client.readRows(request))
.thenThrow(new ApiException("Internal", null, statusCode, false));
when(client.readRows(any(), any())).thenCallRealMethod();
thrown.expect(ApiException.class);
thrown.expectMessage("Internal");
client.readRows(request, "myproject:mydataset.mytable");
verifyReadMetricWasSet("myproject", "mydataset", "mytable", "internal", 1);
}
@Test
public void testSplitReadStreamSetsRequestCountMetric() throws InterruptedException, IOException {
BigQueryServices.StorageClient client = mock(BigQueryServicesImpl.StorageClientImpl.class);
SplitReadStreamRequest request = null;
when(client.splitReadStream(request))
.thenReturn(SplitReadStreamResponse.newBuilder().build());
when(client.splitReadStream(any(), any())).thenCallRealMethod();
client.splitReadStream(request, "myproject:mydataset.mytable");
verifyReadMetricWasSet("myproject", "mydataset", "mytable", "ok", 1);
}
@Test
public void testSplitReadStreamSetsRequestCountMetricOnError()
throws InterruptedException, IOException {
BigQueryServices.StorageClient client = mock(BigQueryServicesImpl.StorageClientImpl.class);
SplitReadStreamRequest request = null;
StatusCode statusCode =
new StatusCode() {
@Override
public Code getCode() {
return Code.RESOURCE_EXHAUSTED;
}
@Override
public Object getTransportCode() {
return null;
}
};
when(client.splitReadStream(request))
.thenThrow(
new ApiException(
"Resource Exhausted", null, statusCode, false));
when(client.splitReadStream(any(), any())).thenCallRealMethod();
thrown.expect(ApiException.class);
thrown.expectMessage("Resource Exhausted");
client.splitReadStream(request, "myproject:mydataset.mytable");
verifyReadMetricWasSet("myproject", "mydataset", "mytable", "resource_exhausted", 1);
}
@Test
public void testRetryAttemptCounter() {
BigQueryServicesImpl.StorageClientImpl.RetryAttemptCounter counter =
new BigQueryServicesImpl.StorageClientImpl.RetryAttemptCounter();
RetryInfo retryInfo =
RetryInfo.newBuilder()
.setRetryDelay(
com.google.protobuf.Duration.newBuilder()
.setSeconds(123)
.setNanos(456000000)
.build())
.build();
Metadata metadata = new Metadata();
metadata.put(
Metadata.Key.of(
"google.rpc.retryinfo-bin",
new Metadata.BinaryMarshaller<RetryInfo>() {
@Override
public byte[] toBytes(RetryInfo value) {
return value.toByteArray();
}
@Override
public RetryInfo parseBytes(byte[] serialized) {
try {
Parser<RetryInfo> parser = RetryInfo.newBuilder().build().getParserForType();
return parser.parseFrom(serialized);
} catch (Exception e) {
return null;
}
}
}),
retryInfo);
MetricName metricName =
MetricName.named(
"org.apache.beam.sdk.io.gcp.bigquery.BigQueryServicesImpl$StorageClientImpl",
"throttling-msecs");
MetricsContainerImpl container =
(MetricsContainerImpl) MetricsEnvironment.getCurrentContainer();
counter.onRetryAttempt(null, null);
assertEquals(0, (long) container.getCounter(metricName).getCumulative());
counter.onRetryAttempt(
Status.RESOURCE_EXHAUSTED.withDescription("You have consumed some quota"), new Metadata());
assertEquals(0, (long) container.getCounter(metricName).getCumulative());
counter.onRetryAttempt(Status.RESOURCE_EXHAUSTED.withDescription("Stop for a while"), metadata);
assertEquals(123456, (long) container.getCounter(metricName).getCumulative());
counter.onRetryAttempt(Status.UNAVAILABLE.withDescription("Server is gone"), metadata);
assertEquals(123456, (long) container.getCounter(metricName).getCumulative());
}
} | class BigQueryServicesImplTest {
@Rule public ExpectedException thrown = ExpectedException.none();
@Rule public ExpectedLogs expectedLogs = ExpectedLogs.none(BigQueryServicesImpl.class);
private LowLevelHttpResponse[] responses;
private MockLowLevelHttpRequest request;
private Bigquery bigquery;
@Before
public void setUp() {
MockitoAnnotations.initMocks(this);
request =
new MockLowLevelHttpRequest() {
int index = 0;
@Override
public LowLevelHttpResponse execute() throws IOException {
Verify.verify(
index < responses.length,
"The number of HttpRequest invocation exceeded the number of prepared mock requests. Index: %d",
index);
return responses[index++];
}
};
MockHttpTransport transport =
new MockHttpTransport.Builder().setLowLevelHttpRequest(request).build();
bigquery =
new Bigquery.Builder(
transport, Transport.getJsonFactory(), new RetryHttpRequestInitializer())
.build();
MetricsContainerImpl container = new MetricsContainerImpl(null);
MetricsEnvironment.setProcessWideContainer(container);
MetricsEnvironment.setCurrentContainer(container);
}
@FunctionalInterface
private interface MockSetupFunction {
void apply(LowLevelHttpResponse t) throws IOException;
}
/**
* Prepares the mock objects using {@code mockPreparations}, and assigns them to {@link
*
*/
private void setupMockResponses(MockSetupFunction... mockPreparations) throws IOException {
responses = new LowLevelHttpResponse[mockPreparations.length];
for (int i = 0; i < mockPreparations.length; ++i) {
MockSetupFunction setupFunction = mockPreparations[i];
LowLevelHttpResponse response = mock(LowLevelHttpResponse.class);
setupFunction.apply(response);
responses[i] = response;
}
}
/**
* Verifies the test interacted the mock objects in {@link
*
* <p>The implementation of google-api-client or google-http-client may influence the number of
* interaction in future
*/
private void verifyAllResponsesAreRead() throws IOException {
Verify.verify(responses != null, "The test setup is incorrect. Responses are not setup");
for (LowLevelHttpResponse response : responses) {
verify(response, atLeastOnce()).getStatusCode();
verify(response, times(1)).getContent();
verify(response, times(1)).getContentType();
}
}
private void verifyRequestMetricWasSet(
String method, String projectId, String dataset, String table, String status, long count) {
HashMap<String, String> labels = new HashMap<String, String>();
labels.put(MonitoringInfoConstants.Labels.PTRANSFORM, "");
labels.put(MonitoringInfoConstants.Labels.SERVICE, "BigQuery");
labels.put(MonitoringInfoConstants.Labels.METHOD, method);
labels.put(
MonitoringInfoConstants.Labels.RESOURCE,
GcpResourceIdentifiers.bigQueryTable(projectId, dataset, table));
labels.put(MonitoringInfoConstants.Labels.BIGQUERY_PROJECT_ID, projectId);
labels.put(MonitoringInfoConstants.Labels.BIGQUERY_DATASET, dataset);
labels.put(MonitoringInfoConstants.Labels.BIGQUERY_TABLE, table);
labels.put(MonitoringInfoConstants.Labels.STATUS, status);
MonitoringInfoMetricName name =
MonitoringInfoMetricName.named(MonitoringInfoConstants.Urns.API_REQUEST_COUNT, labels);
MetricsContainerImpl container =
(MetricsContainerImpl) MetricsEnvironment.getProcessWideContainer();
assertEquals(count, (long) container.getCounter(name).getCumulative());
}
private void verifyWriteMetricWasSet(
String projectId, String dataset, String table, String status, long count) {
verifyRequestMetricWasSet("BigQueryBatchWrite", projectId, dataset, table, status, count);
}
private void verifyReadMetricWasSet(
String projectId, String dataset, String table, String status, long count) {
verifyRequestMetricWasSet("BigQueryBatchRead", projectId, dataset, table, status, count);
}
/** Tests that {@link BigQueryServicesImpl.JobServiceImpl
@Test
public void testStartLoadJobSucceeds() throws IOException, InterruptedException {
Job testJob = new Job();
JobReference jobRef = new JobReference();
jobRef.setJobId("jobId");
jobRef.setProjectId("projectId");
testJob.setJobReference(jobRef);
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(testJob));
});
Sleeper sleeper = new FastNanoClockAndSleeper();
JobServiceImpl.startJob(
testJob,
new ApiErrorExtractor(),
bigquery,
sleeper,
BackOffAdapter.toGcpBackOff(FluentBackoff.DEFAULT.backoff()));
verifyAllResponsesAreRead();
expectedLogs.verifyInfo(String.format("Started BigQuery job: %s", jobRef));
}
/**
* Tests that {@link BigQueryServicesImpl.JobServiceImpl
* exist job.
*/
@Test
public void testStartLoadJobSucceedsAlreadyExists() throws IOException, InterruptedException {
Job testJob = new Job();
JobReference jobRef = new JobReference();
jobRef.setJobId("jobId");
jobRef.setProjectId("projectId");
testJob.setJobReference(jobRef);
setupMockResponses(
response -> {
when(response.getStatusCode()).thenReturn(409);
});
Sleeper sleeper = new FastNanoClockAndSleeper();
JobServiceImpl.startJob(
testJob,
new ApiErrorExtractor(),
bigquery,
sleeper,
BackOffAdapter.toGcpBackOff(FluentBackoff.DEFAULT.backoff()));
verifyAllResponsesAreRead();
expectedLogs.verifyNotLogged("Started BigQuery job");
}
/** Tests that {@link BigQueryServicesImpl.JobServiceImpl
@Test
public void testStartLoadJobRetry() throws IOException, InterruptedException {
Job testJob = new Job();
JobReference jobRef = new JobReference();
jobRef.setJobId("jobId");
jobRef.setProjectId("projectId");
testJob.setJobReference(jobRef);
setupMockResponses(
response -> {
when(response.getStatusCode()).thenReturn(403);
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getContent())
.thenReturn(toStream(errorWithReasonAndStatus("rateLimitExceeded", 403)));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(testJob));
});
Sleeper sleeper = new FastNanoClockAndSleeper();
JobServiceImpl.startJob(
testJob,
new ApiErrorExtractor(),
bigquery,
sleeper,
BackOffAdapter.toGcpBackOff(FluentBackoff.DEFAULT.backoff()));
verifyAllResponsesAreRead();
}
/** Tests that {@link BigQueryServicesImpl.JobServiceImpl
@Test
public void testPollJobSucceeds() throws IOException, InterruptedException {
Job testJob = new Job();
testJob.setStatus(new JobStatus().setState("DONE"));
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(testJob));
});
BigQueryServicesImpl.JobServiceImpl jobService =
new BigQueryServicesImpl.JobServiceImpl(bigquery);
JobReference jobRef = new JobReference().setProjectId("projectId").setJobId("jobId");
Job job = jobService.pollJob(jobRef, Sleeper.DEFAULT, BackOff.ZERO_BACKOFF);
assertEquals(testJob, job);
verifyAllResponsesAreRead();
}
/** Tests that {@link BigQueryServicesImpl.JobServiceImpl
@Test
public void testPollJobFailed() throws IOException, InterruptedException {
Job testJob = new Job();
testJob.setStatus(new JobStatus().setState("DONE").setErrorResult(new ErrorProto()));
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(testJob));
});
BigQueryServicesImpl.JobServiceImpl jobService =
new BigQueryServicesImpl.JobServiceImpl(bigquery);
JobReference jobRef = new JobReference().setProjectId("projectId").setJobId("jobId");
Job job = jobService.pollJob(jobRef, Sleeper.DEFAULT, BackOff.ZERO_BACKOFF);
assertEquals(testJob, job);
verifyAllResponsesAreRead();
}
/** Tests that {@link BigQueryServicesImpl.JobServiceImpl
@Test
public void testPollJobUnknown() throws IOException, InterruptedException {
Job testJob = new Job();
testJob.setStatus(new JobStatus());
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(testJob));
});
BigQueryServicesImpl.JobServiceImpl jobService =
new BigQueryServicesImpl.JobServiceImpl(bigquery);
JobReference jobRef = new JobReference().setProjectId("projectId").setJobId("jobId");
Job job = jobService.pollJob(jobRef, Sleeper.DEFAULT, BackOff.STOP_BACKOFF);
assertEquals(null, job);
verifyAllResponsesAreRead();
}
@Test
public void testGetJobSucceeds() throws Exception {
Job testJob = new Job();
testJob.setStatus(new JobStatus());
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(testJob));
});
BigQueryServicesImpl.JobServiceImpl jobService =
new BigQueryServicesImpl.JobServiceImpl(bigquery);
JobReference jobRef = new JobReference().setProjectId("projectId").setJobId("jobId");
Job job = jobService.getJob(jobRef, Sleeper.DEFAULT, BackOff.ZERO_BACKOFF);
assertEquals(testJob, job);
verifyAllResponsesAreRead();
}
@Test
public void testGetJobNotFound() throws Exception {
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(404);
});
BigQueryServicesImpl.JobServiceImpl jobService =
new BigQueryServicesImpl.JobServiceImpl(bigquery);
JobReference jobRef = new JobReference().setProjectId("projectId").setJobId("jobId");
Job job = jobService.getJob(jobRef, Sleeper.DEFAULT, BackOff.ZERO_BACKOFF);
assertEquals(null, job);
verifyAllResponsesAreRead();
}
@Test
public void testGetJobThrows() throws Exception {
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(401);
});
BigQueryServicesImpl.JobServiceImpl jobService =
new BigQueryServicesImpl.JobServiceImpl(bigquery);
JobReference jobRef = new JobReference().setProjectId("projectId").setJobId("jobId");
thrown.expect(IOException.class);
thrown.expectMessage(String.format("Unable to find BigQuery job: %s", jobRef));
jobService.getJob(jobRef, Sleeper.DEFAULT, BackOff.STOP_BACKOFF);
}
@Test
public void testGetTableSucceeds() throws Exception {
TableReference tableRef =
new TableReference()
.setProjectId("projectId")
.setDatasetId("datasetId")
.setTableId("tableId");
Table testTable = new Table();
testTable.setTableReference(tableRef);
setupMockResponses(
response -> {
when(response.getStatusCode()).thenReturn(403);
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getContent())
.thenReturn(toStream(errorWithReasonAndStatus("rateLimitExceeded", 403)));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(testTable));
});
BigQueryServicesImpl.DatasetServiceImpl datasetService =
new BigQueryServicesImpl.DatasetServiceImpl(
bigquery, null, PipelineOptionsFactory.create());
Table table = datasetService.getTable(tableRef, null, BackOff.ZERO_BACKOFF, Sleeper.DEFAULT);
assertEquals(testTable, table);
verifyAllResponsesAreRead();
}
@Test
public void testGetTableNotFound() throws IOException, InterruptedException {
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(404);
});
BigQueryServicesImpl.DatasetServiceImpl datasetService =
new BigQueryServicesImpl.DatasetServiceImpl(
bigquery, null, PipelineOptionsFactory.create());
TableReference tableRef =
new TableReference()
.setProjectId("projectId")
.setDatasetId("datasetId")
.setTableId("tableId");
Table table = datasetService.getTable(tableRef, null, BackOff.ZERO_BACKOFF, Sleeper.DEFAULT);
assertNull(table);
verifyAllResponsesAreRead();
}
@Test
public void testGetTableThrows() throws Exception {
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(401);
});
TableReference tableRef =
new TableReference()
.setProjectId("projectId")
.setDatasetId("datasetId")
.setTableId("tableId");
thrown.expect(IOException.class);
thrown.expectMessage(String.format("Unable to get table: %s", tableRef.getTableId()));
BigQueryServicesImpl.DatasetServiceImpl datasetService =
new BigQueryServicesImpl.DatasetServiceImpl(
bigquery, null, PipelineOptionsFactory.create());
datasetService.getTable(tableRef, null, BackOff.STOP_BACKOFF, Sleeper.DEFAULT);
}
@Test
public void testIsTableEmptySucceeds() throws Exception {
TableReference tableRef =
new TableReference()
.setProjectId("projectId")
.setDatasetId("datasetId")
.setTableId("tableId");
TableDataList testDataList = new TableDataList().setRows(ImmutableList.of(new TableRow()));
setupMockResponses(
response -> {
when(response.getStatusCode()).thenReturn(403);
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getContent())
.thenReturn(toStream(errorWithReasonAndStatus("rateLimitExceeded", 403)));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(testDataList));
});
BigQueryServicesImpl.DatasetServiceImpl datasetService =
new BigQueryServicesImpl.DatasetServiceImpl(
bigquery, null, PipelineOptionsFactory.create());
assertFalse(datasetService.isTableEmpty(tableRef, BackOff.ZERO_BACKOFF, Sleeper.DEFAULT));
verifyAllResponsesAreRead();
}
@Test
public void testIsTableEmptyNoRetryForNotFound() throws IOException, InterruptedException {
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(404);
});
BigQueryServicesImpl.DatasetServiceImpl datasetService =
new BigQueryServicesImpl.DatasetServiceImpl(
bigquery, null, PipelineOptionsFactory.create());
TableReference tableRef =
new TableReference()
.setProjectId("projectId")
.setDatasetId("datasetId")
.setTableId("tableId");
thrown.expect(IOException.class);
thrown.expectMessage(String.format("Unable to list table data: %s", tableRef.getTableId()));
try {
datasetService.isTableEmpty(tableRef, BackOff.ZERO_BACKOFF, Sleeper.DEFAULT);
} finally {
verifyAllResponsesAreRead();
}
}
@Test
public void testIsTableEmptyThrows() throws Exception {
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(401);
});
TableReference tableRef =
new TableReference()
.setProjectId("projectId")
.setDatasetId("datasetId")
.setTableId("tableId");
BigQueryServicesImpl.DatasetServiceImpl datasetService =
new BigQueryServicesImpl.DatasetServiceImpl(
bigquery, null, PipelineOptionsFactory.create());
thrown.expect(IOException.class);
thrown.expectMessage(String.format("Unable to list table data: %s", tableRef.getTableId()));
datasetService.isTableEmpty(tableRef, BackOff.STOP_BACKOFF, Sleeper.DEFAULT);
}
@Test
public void testExecuteWithRetries() throws IOException, InterruptedException {
Table testTable = new Table();
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(testTable));
});
Table table =
BigQueryServicesImpl.executeWithRetries(
bigquery.tables().get("projectId", "datasetId", "tableId"),
"Failed to get table.",
Sleeper.DEFAULT,
BackOff.STOP_BACKOFF,
BigQueryServicesImpl.ALWAYS_RETRY);
assertEquals(testTable, table);
verifyAllResponsesAreRead();
}
private <T> FailsafeValueInSingleWindow<T, T> wrapValue(T value) {
return FailsafeValueInSingleWindow.of(
value,
GlobalWindow.TIMESTAMP_MAX_VALUE,
GlobalWindow.INSTANCE,
PaneInfo.ON_TIME_AND_ONLY_FIRING,
value);
}
private <T> ValueInSingleWindow<T> wrapErrorValue(T value) {
return ValueInSingleWindow.of(
value,
GlobalWindow.TIMESTAMP_MAX_VALUE,
GlobalWindow.INSTANCE,
PaneInfo.ON_TIME_AND_ONLY_FIRING);
}
/** Tests that {@link DatasetServiceImpl
@Test
public void testInsertRateLimitRetry() throws Exception {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows = new ArrayList<>();
rows.add(wrapValue(new TableRow()));
setupMockResponses(
response -> {
when(response.getStatusCode()).thenReturn(403);
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getContent())
.thenReturn(toStream(errorWithReasonAndStatus("rateLimitExceeded", 403)));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(new TableDataInsertAllResponse()));
});
DatasetServiceImpl dataService =
new DatasetServiceImpl(bigquery, null, PipelineOptionsFactory.create());
dataService.insertAll(
ref,
rows,
null,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.alwaysRetry(),
null,
null,
false,
false,
false,
null);
verifyAllResponsesAreRead();
expectedLogs.verifyInfo("BigQuery insertAll error, retrying:");
verifyWriteMetricWasSet("project", "dataset", "table", "ratelimitexceeded", 1);
}
/** Tests that {@link DatasetServiceImpl
@Test
public void testInsertQuotaExceededRetry() throws Exception {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows = new ArrayList<>();
rows.add(wrapValue(new TableRow()));
setupMockResponses(
response -> {
when(response.getStatusCode()).thenReturn(403);
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getContent())
.thenReturn(toStream(errorWithReasonAndStatus("quotaExceeded", 403)));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(new TableDataInsertAllResponse()));
});
DatasetServiceImpl dataService =
new DatasetServiceImpl(bigquery, null, PipelineOptionsFactory.create());
dataService.insertAll(
ref,
rows,
null,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.alwaysRetry(),
null,
null,
false,
false,
false,
null);
verifyAllResponsesAreRead();
expectedLogs.verifyInfo("BigQuery insertAll error, retrying:");
verifyWriteMetricWasSet("project", "dataset", "table", "quotaexceeded", 1);
}
/** Tests that {@link DatasetServiceImpl
@Test
public void testInsertStoppedRetry() throws Exception {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows = new ArrayList<>();
rows.add(wrapValue(new TableRow()));
MockSetupFunction quotaExceededResponse =
response -> {
when(response.getStatusCode()).thenReturn(403);
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getContent())
.thenReturn(toStream(errorWithReasonAndStatus("quotaExceeded", 403)));
};
setupMockResponses(
quotaExceededResponse,
quotaExceededResponse,
quotaExceededResponse,
quotaExceededResponse,
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(new TableDataInsertAllResponse()));
});
thrown.expect(RuntimeException.class);
thrown.expectMessage("quotaExceeded");
DatasetServiceImpl dataService =
new DatasetServiceImpl(bigquery, null, PipelineOptionsFactory.create());
dataService.insertAll(
ref,
rows,
null,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.alwaysRetry(),
null,
null,
false,
false,
false,
null);
verifyAllResponsesAreRead();
verifyWriteMetricWasSet("project", "dataset", "table", "quotaexceeded", 1);
}
private static final FluentBackoff TEST_BACKOFF =
FluentBackoff.DEFAULT
.withInitialBackoff(Duration.millis(1))
.withExponent(1)
.withMaxRetries(3);
/** Tests that {@link DatasetServiceImpl
@Test
public void testInsertRetrySelectRows() throws Exception {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows =
ImmutableList.of(
wrapValue(new TableRow().set("row", "a")), wrapValue(new TableRow().set("row", "b")));
List<String> insertIds = ImmutableList.of("a", "b");
final TableDataInsertAllResponse bFailed =
new TableDataInsertAllResponse()
.setInsertErrors(
ImmutableList.of(
new InsertErrors().setIndex(1L).setErrors(ImmutableList.of(new ErrorProto()))));
final TableDataInsertAllResponse allRowsSucceeded = new TableDataInsertAllResponse();
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(bFailed));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(allRowsSucceeded));
});
DatasetServiceImpl dataService =
new DatasetServiceImpl(bigquery, null, PipelineOptionsFactory.create());
dataService.insertAll(
ref,
rows,
insertIds,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.alwaysRetry(),
null,
null,
false,
false,
false,
null);
verifyAllResponsesAreRead();
verifyWriteMetricWasSet("project", "dataset", "table", "unknown", 1);
verifyWriteMetricWasSet("project", "dataset", "table", "ok", 1);
}
/** Tests that {@link DatasetServiceImpl
@Test
public void testInsertWithinRowCountLimits() throws Exception {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows =
ImmutableList.of(
wrapValue(new TableRow().set("row", "a")),
wrapValue(new TableRow().set("row", "b")),
wrapValue(new TableRow().set("row", "c")));
List<String> insertIds = ImmutableList.of("a", "b", "c");
final TableDataInsertAllResponse allRowsSucceeded = new TableDataInsertAllResponse();
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(allRowsSucceeded));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(allRowsSucceeded));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(allRowsSucceeded));
});
DatasetServiceImpl dataService =
new DatasetServiceImpl(
bigquery,
null,
PipelineOptionsFactory.fromArgs("--maxStreamingRowsToBatch=1").create());
dataService.insertAll(
ref,
rows,
insertIds,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.alwaysRetry(),
null,
null,
false,
false,
false,
null);
verifyAllResponsesAreRead();
verifyWriteMetricWasSet("project", "dataset", "table", "ok", 3);
}
/** Tests that {@link DatasetServiceImpl
@Test
public void testInsertWithinRequestByteSizeLimits() throws Exception {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows =
ImmutableList.of(
wrapValue(new TableRow().set("row", "a")),
wrapValue(new TableRow().set("row", "b")),
wrapValue(new TableRow().set("row", "cdefghijklmnopqrstuvwxyz")));
List<String> insertIds = ImmutableList.of("a", "b", "c");
final TableDataInsertAllResponse allRowsSucceeded = new TableDataInsertAllResponse();
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(allRowsSucceeded));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(allRowsSucceeded));
});
DatasetServiceImpl dataService =
new DatasetServiceImpl(
bigquery, null, PipelineOptionsFactory.fromArgs("--maxStreamingBatchSize=15").create());
dataService.insertAll(
ref,
rows,
insertIds,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.alwaysRetry(),
new ArrayList<>(),
ErrorContainer.TABLE_ROW_ERROR_CONTAINER,
false,
false,
false,
null);
verifyAllResponsesAreRead();
verifyWriteMetricWasSet("project", "dataset", "table", "ok", 2);
}
/** Tests that {@link DatasetServiceImpl
@Test | class BigQueryServicesImplTest {
@Rule public ExpectedException thrown = ExpectedException.none();
@Rule public ExpectedLogs expectedLogs = ExpectedLogs.none(BigQueryServicesImpl.class);
private LowLevelHttpResponse[] responses;
private MockLowLevelHttpRequest request;
private Bigquery bigquery;
@Before
public void setUp() {
MockitoAnnotations.initMocks(this);
request =
new MockLowLevelHttpRequest() {
int index = 0;
@Override
public LowLevelHttpResponse execute() throws IOException {
Verify.verify(
index < responses.length,
"The number of HttpRequest invocation exceeded the number of prepared mock requests. Index: %d",
index);
return responses[index++];
}
};
MockHttpTransport transport =
new MockHttpTransport.Builder().setLowLevelHttpRequest(request).build();
bigquery =
new Bigquery.Builder(
transport, Transport.getJsonFactory(), new RetryHttpRequestInitializer())
.build();
MetricsContainerImpl container = new MetricsContainerImpl(null);
MetricsEnvironment.setProcessWideContainer(container);
MetricsEnvironment.setCurrentContainer(container);
}
@FunctionalInterface
private interface MockSetupFunction {
void apply(LowLevelHttpResponse t) throws IOException;
}
/**
* Prepares the mock objects using {@code mockPreparations}, and assigns them to {@link
*
*/
private void setupMockResponses(MockSetupFunction... mockPreparations) throws IOException {
responses = new LowLevelHttpResponse[mockPreparations.length];
for (int i = 0; i < mockPreparations.length; ++i) {
MockSetupFunction setupFunction = mockPreparations[i];
LowLevelHttpResponse response = mock(LowLevelHttpResponse.class);
setupFunction.apply(response);
responses[i] = response;
}
}
/**
* Verifies the test interacted the mock objects in {@link
*
* <p>The implementation of google-api-client or google-http-client may influence the number of
* interaction in future
*/
private void verifyAllResponsesAreRead() throws IOException {
Verify.verify(responses != null, "The test setup is incorrect. Responses are not setup");
for (LowLevelHttpResponse response : responses) {
verify(response, atLeastOnce()).getStatusCode();
verify(response, times(1)).getContent();
verify(response, times(1)).getContentType();
}
}
private void verifyRequestMetricWasSet(
String method, String projectId, String dataset, String table, String status, long count) {
HashMap<String, String> labels = new HashMap<String, String>();
labels.put(MonitoringInfoConstants.Labels.PTRANSFORM, "");
labels.put(MonitoringInfoConstants.Labels.SERVICE, "BigQuery");
labels.put(MonitoringInfoConstants.Labels.METHOD, method);
labels.put(
MonitoringInfoConstants.Labels.RESOURCE,
GcpResourceIdentifiers.bigQueryTable(projectId, dataset, table));
labels.put(MonitoringInfoConstants.Labels.BIGQUERY_PROJECT_ID, projectId);
labels.put(MonitoringInfoConstants.Labels.BIGQUERY_DATASET, dataset);
labels.put(MonitoringInfoConstants.Labels.BIGQUERY_TABLE, table);
labels.put(MonitoringInfoConstants.Labels.STATUS, status);
MonitoringInfoMetricName name =
MonitoringInfoMetricName.named(MonitoringInfoConstants.Urns.API_REQUEST_COUNT, labels);
MetricsContainerImpl container =
(MetricsContainerImpl) MetricsEnvironment.getProcessWideContainer();
assertEquals(count, (long) container.getCounter(name).getCumulative());
}
private void verifyWriteMetricWasSet(
String projectId, String dataset, String table, String status, long count) {
verifyRequestMetricWasSet("BigQueryBatchWrite", projectId, dataset, table, status, count);
}
private void verifyReadMetricWasSet(
String projectId, String dataset, String table, String status, long count) {
verifyRequestMetricWasSet("BigQueryBatchRead", projectId, dataset, table, status, count);
}
/** Tests that {@link BigQueryServicesImpl.JobServiceImpl
@Test
public void testStartLoadJobSucceeds() throws IOException, InterruptedException {
Job testJob = new Job();
JobReference jobRef = new JobReference();
jobRef.setJobId("jobId");
jobRef.setProjectId("projectId");
testJob.setJobReference(jobRef);
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(testJob));
});
Sleeper sleeper = new FastNanoClockAndSleeper();
JobServiceImpl.startJob(
testJob,
new ApiErrorExtractor(),
bigquery,
sleeper,
BackOffAdapter.toGcpBackOff(FluentBackoff.DEFAULT.backoff()));
verifyAllResponsesAreRead();
expectedLogs.verifyInfo(String.format("Started BigQuery job: %s", jobRef));
}
/**
* Tests that {@link BigQueryServicesImpl.JobServiceImpl
* exist job.
*/
@Test
public void testStartLoadJobSucceedsAlreadyExists() throws IOException, InterruptedException {
Job testJob = new Job();
JobReference jobRef = new JobReference();
jobRef.setJobId("jobId");
jobRef.setProjectId("projectId");
testJob.setJobReference(jobRef);
setupMockResponses(
response -> {
when(response.getStatusCode()).thenReturn(409);
});
Sleeper sleeper = new FastNanoClockAndSleeper();
JobServiceImpl.startJob(
testJob,
new ApiErrorExtractor(),
bigquery,
sleeper,
BackOffAdapter.toGcpBackOff(FluentBackoff.DEFAULT.backoff()));
verifyAllResponsesAreRead();
expectedLogs.verifyNotLogged("Started BigQuery job");
}
/** Tests that {@link BigQueryServicesImpl.JobServiceImpl
@Test
public void testStartLoadJobRetry() throws IOException, InterruptedException {
Job testJob = new Job();
JobReference jobRef = new JobReference();
jobRef.setJobId("jobId");
jobRef.setProjectId("projectId");
testJob.setJobReference(jobRef);
setupMockResponses(
response -> {
when(response.getStatusCode()).thenReturn(403);
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getContent())
.thenReturn(toStream(errorWithReasonAndStatus("rateLimitExceeded", 403)));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(testJob));
});
Sleeper sleeper = new FastNanoClockAndSleeper();
JobServiceImpl.startJob(
testJob,
new ApiErrorExtractor(),
bigquery,
sleeper,
BackOffAdapter.toGcpBackOff(FluentBackoff.DEFAULT.backoff()));
verifyAllResponsesAreRead();
}
/** Tests that {@link BigQueryServicesImpl.JobServiceImpl
@Test
public void testPollJobSucceeds() throws IOException, InterruptedException {
Job testJob = new Job();
testJob.setStatus(new JobStatus().setState("DONE"));
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(testJob));
});
BigQueryServicesImpl.JobServiceImpl jobService =
new BigQueryServicesImpl.JobServiceImpl(bigquery);
JobReference jobRef = new JobReference().setProjectId("projectId").setJobId("jobId");
Job job = jobService.pollJob(jobRef, Sleeper.DEFAULT, BackOff.ZERO_BACKOFF);
assertEquals(testJob, job);
verifyAllResponsesAreRead();
}
/** Tests that {@link BigQueryServicesImpl.JobServiceImpl
@Test
public void testPollJobFailed() throws IOException, InterruptedException {
Job testJob = new Job();
testJob.setStatus(new JobStatus().setState("DONE").setErrorResult(new ErrorProto()));
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(testJob));
});
BigQueryServicesImpl.JobServiceImpl jobService =
new BigQueryServicesImpl.JobServiceImpl(bigquery);
JobReference jobRef = new JobReference().setProjectId("projectId").setJobId("jobId");
Job job = jobService.pollJob(jobRef, Sleeper.DEFAULT, BackOff.ZERO_BACKOFF);
assertEquals(testJob, job);
verifyAllResponsesAreRead();
}
/** Tests that {@link BigQueryServicesImpl.JobServiceImpl
@Test
public void testPollJobUnknown() throws IOException, InterruptedException {
Job testJob = new Job();
testJob.setStatus(new JobStatus());
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(testJob));
});
BigQueryServicesImpl.JobServiceImpl jobService =
new BigQueryServicesImpl.JobServiceImpl(bigquery);
JobReference jobRef = new JobReference().setProjectId("projectId").setJobId("jobId");
Job job = jobService.pollJob(jobRef, Sleeper.DEFAULT, BackOff.STOP_BACKOFF);
assertEquals(null, job);
verifyAllResponsesAreRead();
}
@Test
public void testGetJobSucceeds() throws Exception {
Job testJob = new Job();
testJob.setStatus(new JobStatus());
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(testJob));
});
BigQueryServicesImpl.JobServiceImpl jobService =
new BigQueryServicesImpl.JobServiceImpl(bigquery);
JobReference jobRef = new JobReference().setProjectId("projectId").setJobId("jobId");
Job job = jobService.getJob(jobRef, Sleeper.DEFAULT, BackOff.ZERO_BACKOFF);
assertEquals(testJob, job);
verifyAllResponsesAreRead();
}
@Test
public void testGetJobNotFound() throws Exception {
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(404);
});
BigQueryServicesImpl.JobServiceImpl jobService =
new BigQueryServicesImpl.JobServiceImpl(bigquery);
JobReference jobRef = new JobReference().setProjectId("projectId").setJobId("jobId");
Job job = jobService.getJob(jobRef, Sleeper.DEFAULT, BackOff.ZERO_BACKOFF);
assertEquals(null, job);
verifyAllResponsesAreRead();
}
@Test
public void testGetJobThrows() throws Exception {
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(401);
});
BigQueryServicesImpl.JobServiceImpl jobService =
new BigQueryServicesImpl.JobServiceImpl(bigquery);
JobReference jobRef = new JobReference().setProjectId("projectId").setJobId("jobId");
thrown.expect(IOException.class);
thrown.expectMessage(String.format("Unable to find BigQuery job: %s", jobRef));
jobService.getJob(jobRef, Sleeper.DEFAULT, BackOff.STOP_BACKOFF);
}
@Test
public void testGetTableSucceeds() throws Exception {
TableReference tableRef =
new TableReference()
.setProjectId("projectId")
.setDatasetId("datasetId")
.setTableId("tableId");
Table testTable = new Table();
testTable.setTableReference(tableRef);
setupMockResponses(
response -> {
when(response.getStatusCode()).thenReturn(403);
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getContent())
.thenReturn(toStream(errorWithReasonAndStatus("rateLimitExceeded", 403)));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(testTable));
});
BigQueryServicesImpl.DatasetServiceImpl datasetService =
new BigQueryServicesImpl.DatasetServiceImpl(
bigquery, null, PipelineOptionsFactory.create());
Table table = datasetService.getTable(tableRef, null, BackOff.ZERO_BACKOFF, Sleeper.DEFAULT);
assertEquals(testTable, table);
verifyAllResponsesAreRead();
}
@Test
public void testGetTableNotFound() throws IOException, InterruptedException {
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(404);
});
BigQueryServicesImpl.DatasetServiceImpl datasetService =
new BigQueryServicesImpl.DatasetServiceImpl(
bigquery, null, PipelineOptionsFactory.create());
TableReference tableRef =
new TableReference()
.setProjectId("projectId")
.setDatasetId("datasetId")
.setTableId("tableId");
Table table = datasetService.getTable(tableRef, null, BackOff.ZERO_BACKOFF, Sleeper.DEFAULT);
assertNull(table);
verifyAllResponsesAreRead();
}
@Test
public void testGetTableThrows() throws Exception {
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(401);
});
TableReference tableRef =
new TableReference()
.setProjectId("projectId")
.setDatasetId("datasetId")
.setTableId("tableId");
thrown.expect(IOException.class);
thrown.expectMessage(String.format("Unable to get table: %s", tableRef.getTableId()));
BigQueryServicesImpl.DatasetServiceImpl datasetService =
new BigQueryServicesImpl.DatasetServiceImpl(
bigquery, null, PipelineOptionsFactory.create());
datasetService.getTable(tableRef, null, BackOff.STOP_BACKOFF, Sleeper.DEFAULT);
}
@Test
public void testIsTableEmptySucceeds() throws Exception {
TableReference tableRef =
new TableReference()
.setProjectId("projectId")
.setDatasetId("datasetId")
.setTableId("tableId");
TableDataList testDataList = new TableDataList().setRows(ImmutableList.of(new TableRow()));
setupMockResponses(
response -> {
when(response.getStatusCode()).thenReturn(403);
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getContent())
.thenReturn(toStream(errorWithReasonAndStatus("rateLimitExceeded", 403)));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(testDataList));
});
BigQueryServicesImpl.DatasetServiceImpl datasetService =
new BigQueryServicesImpl.DatasetServiceImpl(
bigquery, null, PipelineOptionsFactory.create());
assertFalse(datasetService.isTableEmpty(tableRef, BackOff.ZERO_BACKOFF, Sleeper.DEFAULT));
verifyAllResponsesAreRead();
}
@Test
public void testIsTableEmptyNoRetryForNotFound() throws IOException, InterruptedException {
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(404);
});
BigQueryServicesImpl.DatasetServiceImpl datasetService =
new BigQueryServicesImpl.DatasetServiceImpl(
bigquery, null, PipelineOptionsFactory.create());
TableReference tableRef =
new TableReference()
.setProjectId("projectId")
.setDatasetId("datasetId")
.setTableId("tableId");
thrown.expect(IOException.class);
thrown.expectMessage(String.format("Unable to list table data: %s", tableRef.getTableId()));
try {
datasetService.isTableEmpty(tableRef, BackOff.ZERO_BACKOFF, Sleeper.DEFAULT);
} finally {
verifyAllResponsesAreRead();
}
}
@Test
public void testIsTableEmptyThrows() throws Exception {
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(401);
});
TableReference tableRef =
new TableReference()
.setProjectId("projectId")
.setDatasetId("datasetId")
.setTableId("tableId");
BigQueryServicesImpl.DatasetServiceImpl datasetService =
new BigQueryServicesImpl.DatasetServiceImpl(
bigquery, null, PipelineOptionsFactory.create());
thrown.expect(IOException.class);
thrown.expectMessage(String.format("Unable to list table data: %s", tableRef.getTableId()));
datasetService.isTableEmpty(tableRef, BackOff.STOP_BACKOFF, Sleeper.DEFAULT);
}
@Test
public void testExecuteWithRetries() throws IOException, InterruptedException {
Table testTable = new Table();
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(testTable));
});
Table table =
BigQueryServicesImpl.executeWithRetries(
bigquery.tables().get("projectId", "datasetId", "tableId"),
"Failed to get table.",
Sleeper.DEFAULT,
BackOff.STOP_BACKOFF,
BigQueryServicesImpl.ALWAYS_RETRY);
assertEquals(testTable, table);
verifyAllResponsesAreRead();
}
private <T> FailsafeValueInSingleWindow<T, T> wrapValue(T value) {
return FailsafeValueInSingleWindow.of(
value,
GlobalWindow.TIMESTAMP_MAX_VALUE,
GlobalWindow.INSTANCE,
PaneInfo.ON_TIME_AND_ONLY_FIRING,
value);
}
private <T> ValueInSingleWindow<T> wrapErrorValue(T value) {
return ValueInSingleWindow.of(
value,
GlobalWindow.TIMESTAMP_MAX_VALUE,
GlobalWindow.INSTANCE,
PaneInfo.ON_TIME_AND_ONLY_FIRING);
}
/** Tests that {@link DatasetServiceImpl
@Test
public void testInsertRateLimitRetry() throws Exception {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows = new ArrayList<>();
rows.add(wrapValue(new TableRow()));
setupMockResponses(
response -> {
when(response.getStatusCode()).thenReturn(403);
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getContent())
.thenReturn(toStream(errorWithReasonAndStatus("rateLimitExceeded", 403)));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(new TableDataInsertAllResponse()));
});
DatasetServiceImpl dataService =
new DatasetServiceImpl(bigquery, null, PipelineOptionsFactory.create());
dataService.insertAll(
ref,
rows,
null,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.alwaysRetry(),
null,
null,
false,
false,
false,
null);
verifyAllResponsesAreRead();
expectedLogs.verifyInfo("BigQuery insertAll error, retrying:");
verifyWriteMetricWasSet("project", "dataset", "table", "ratelimitexceeded", 1);
}
/** Tests that {@link DatasetServiceImpl
@Test
public void testInsertQuotaExceededRetry() throws Exception {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows = new ArrayList<>();
rows.add(wrapValue(new TableRow()));
setupMockResponses(
response -> {
when(response.getStatusCode()).thenReturn(403);
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getContent())
.thenReturn(toStream(errorWithReasonAndStatus("quotaExceeded", 403)));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(new TableDataInsertAllResponse()));
});
DatasetServiceImpl dataService =
new DatasetServiceImpl(bigquery, null, PipelineOptionsFactory.create());
dataService.insertAll(
ref,
rows,
null,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.alwaysRetry(),
null,
null,
false,
false,
false,
null);
verifyAllResponsesAreRead();
expectedLogs.verifyInfo("BigQuery insertAll error, retrying:");
verifyWriteMetricWasSet("project", "dataset", "table", "quotaexceeded", 1);
}
/** Tests that {@link DatasetServiceImpl
@Test
public void testInsertStoppedRetry() throws Exception {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows = new ArrayList<>();
rows.add(wrapValue(new TableRow()));
MockSetupFunction quotaExceededResponse =
response -> {
when(response.getStatusCode()).thenReturn(403);
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getContent())
.thenReturn(toStream(errorWithReasonAndStatus("quotaExceeded", 403)));
};
setupMockResponses(
quotaExceededResponse,
quotaExceededResponse,
quotaExceededResponse,
quotaExceededResponse,
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(new TableDataInsertAllResponse()));
});
thrown.expect(RuntimeException.class);
thrown.expectMessage("quotaExceeded");
DatasetServiceImpl dataService =
new DatasetServiceImpl(bigquery, null, PipelineOptionsFactory.create());
dataService.insertAll(
ref,
rows,
null,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.alwaysRetry(),
null,
null,
false,
false,
false,
null);
verifyAllResponsesAreRead();
verifyWriteMetricWasSet("project", "dataset", "table", "quotaexceeded", 1);
}
private static final FluentBackoff TEST_BACKOFF =
FluentBackoff.DEFAULT
.withInitialBackoff(Duration.millis(1))
.withExponent(1)
.withMaxRetries(3);
/** Tests that {@link DatasetServiceImpl
@Test
public void testInsertRetrySelectRows() throws Exception {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows =
ImmutableList.of(
wrapValue(new TableRow().set("row", "a")), wrapValue(new TableRow().set("row", "b")));
List<String> insertIds = ImmutableList.of("a", "b");
final TableDataInsertAllResponse bFailed =
new TableDataInsertAllResponse()
.setInsertErrors(
ImmutableList.of(
new InsertErrors().setIndex(1L).setErrors(ImmutableList.of(new ErrorProto()))));
final TableDataInsertAllResponse allRowsSucceeded = new TableDataInsertAllResponse();
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(bFailed));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(allRowsSucceeded));
});
DatasetServiceImpl dataService =
new DatasetServiceImpl(bigquery, null, PipelineOptionsFactory.create());
dataService.insertAll(
ref,
rows,
insertIds,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.alwaysRetry(),
null,
null,
false,
false,
false,
null);
verifyAllResponsesAreRead();
verifyWriteMetricWasSet("project", "dataset", "table", "unknown", 1);
verifyWriteMetricWasSet("project", "dataset", "table", "ok", 1);
}
/** Tests that {@link DatasetServiceImpl
@Test
public void testInsertWithinRowCountLimits() throws Exception {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows =
ImmutableList.of(
wrapValue(new TableRow().set("row", "a")),
wrapValue(new TableRow().set("row", "b")),
wrapValue(new TableRow().set("row", "c")));
List<String> insertIds = ImmutableList.of("a", "b", "c");
final TableDataInsertAllResponse allRowsSucceeded = new TableDataInsertAllResponse();
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(allRowsSucceeded));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(allRowsSucceeded));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(allRowsSucceeded));
});
DatasetServiceImpl dataService =
new DatasetServiceImpl(
bigquery,
null,
PipelineOptionsFactory.fromArgs("--maxStreamingRowsToBatch=1").create());
dataService.insertAll(
ref,
rows,
insertIds,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.alwaysRetry(),
null,
null,
false,
false,
false,
null);
verifyAllResponsesAreRead();
verifyWriteMetricWasSet("project", "dataset", "table", "ok", 3);
}
/** Tests that {@link DatasetServiceImpl
@Test
public void testInsertWithinRequestByteSizeLimits() throws Exception {
TableReference ref =
new TableReference().setProjectId("project").setDatasetId("dataset").setTableId("table");
List<FailsafeValueInSingleWindow<TableRow, TableRow>> rows =
ImmutableList.of(
wrapValue(new TableRow().set("row", "a")),
wrapValue(new TableRow().set("row", "b")),
wrapValue(new TableRow().set("row", "cdefghijklmnopqrstuvwxyz")));
List<String> insertIds = ImmutableList.of("a", "b", "c");
final TableDataInsertAllResponse allRowsSucceeded = new TableDataInsertAllResponse();
setupMockResponses(
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(allRowsSucceeded));
},
response -> {
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(allRowsSucceeded));
});
DatasetServiceImpl dataService =
new DatasetServiceImpl(
bigquery, null, PipelineOptionsFactory.fromArgs("--maxStreamingBatchSize=15").create());
dataService.insertAll(
ref,
rows,
insertIds,
BackOffAdapter.toGcpBackOff(TEST_BACKOFF.backoff()),
TEST_BACKOFF,
new MockSleeper(),
InsertRetryPolicy.alwaysRetry(),
new ArrayList<>(),
ErrorContainer.TABLE_ROW_ERROR_CONTAINER,
false,
false,
false,
null);
verifyAllResponsesAreRead();
verifyWriteMetricWasSet("project", "dataset", "table", "ok", 2);
}
/** Tests that {@link DatasetServiceImpl
@Test |
We don't have this API anymore. What if we extract the defaultable parameters and check? | public void testFunctionType() {
Symbol symbol = getSymbol(43, 12);
FunctionTypeDescriptor type = ((FunctionSymbol) symbol).typeDescriptor();
assertEquals(type.kind(), TypeDescKind.FUNCTION);
List<Parameter> parameters = type.parameters();
assertEquals(parameters.size(), 2);
validateParam(parameters.get(0), "x", REQUIRED, INT);
Parameter restParam = type.restParam().get();
validateParam(restParam, "rest", REST, ARRAY);
BallerinaTypeDescriptor returnType = type.returnTypeDescriptor().get();
assertEquals(returnType.kind(), INT);
} | BallerinaTypeDescriptor returnType = type.returnTypeDescriptor().get(); | public void testFunctionType() {
Symbol symbol = getSymbol(43, 12);
FunctionTypeDescriptor type = ((FunctionSymbol) symbol).typeDescriptor();
assertEquals(type.kind(), TypeDescKind.FUNCTION);
List<Parameter> parameters = type.parameters();
assertEquals(parameters.size(), 2);
validateParam(parameters.get(0), "x", REQUIRED, INT);
Parameter restParam = type.restParam().get();
validateParam(restParam, "rest", REST, ARRAY);
BallerinaTypeDescriptor returnType = type.returnTypeDescriptor().get();
assertEquals(returnType.kind(), INT);
} | class TypedescriptorTest {
SemanticModel model;
@BeforeClass
public void setup() {
CompilerContext context = new CompilerContext();
CompileResult result = compile("test-src/typedesc_test.bal", context);
BLangPackage pkg = (BLangPackage) result.getAST();
model = new BallerinaSemanticModel(pkg, context);
}
@Test
public void testAnnotationType() {
Symbol symbol = getSymbol(22, 37);
TypeReferenceTypeDescriptor type =
(TypeReferenceTypeDescriptor) ((AnnotationSymbol) symbol).typeDescriptor().get();
assertEquals(type.typeDescriptor().kind(), TypeDescKind.RECORD);
}
@Test
public void testConstantType() {
Symbol symbol = getSymbol(16, 7);
BallerinaTypeDescriptor type = ((ConstantSymbol) symbol).typeDescriptor();
assertEquals(type.kind(), FLOAT);
}
@Test
@Test
public void testFutureType() {
Symbol symbol = getSymbol(45, 16);
FutureTypeDescriptor type = (FutureTypeDescriptor) ((VariableSymbol) symbol).typeDescriptor();
assertEquals(type.kind(), FUTURE);
assertEquals(type.typeParameter().get().kind(), INT);
}
@Test
public void testArrayType() {
Symbol symbol = getSymbol(47, 18);
ArrayTypeDescriptor type = (ArrayTypeDescriptor) ((VariableSymbol) symbol).typeDescriptor();
assertEquals(type.kind(), ARRAY);
assertEquals(((TypeReferenceTypeDescriptor) type.memberTypeDescriptor()).typeDescriptor().kind(), OBJECT);
}
@Test
public void testMapType() {
Symbol symbol = getSymbol(49, 16);
MapTypeDescriptor type = (MapTypeDescriptor) ((VariableSymbol) symbol).typeDescriptor();
assertEquals(type.kind(), MAP);
assertEquals(type.typeParameter().get().kind(), STRING);
}
@Test
public void testNilType() {
Symbol symbol = getSymbol(38, 9);
FunctionTypeDescriptor type = (FunctionTypeDescriptor) ((FunctionSymbol) symbol).typeDescriptor();
assertEquals(type.returnTypeDescriptor().get().kind(), NIL);
}
@Test
public void testObjectType() {
Symbol symbol = getSymbol(28, 6);
TypeReferenceTypeDescriptor typeRef =
(TypeReferenceTypeDescriptor) ((TypeSymbol) symbol).typeDescriptor();
ObjectTypeDescriptor type = (ObjectTypeDescriptor) typeRef.typeDescriptor();
assertEquals(type.kind(), OBJECT);
List<FieldDescriptor> fields = type.fieldDescriptors();
FieldDescriptor field = fields.get(0);
assertEquals(fields.size(), 1);
assertEquals(field.name(), "name");
assertEquals(field.typeDescriptor().kind(), STRING);
List<MethodSymbol> methods = type.methods();
MethodSymbol method = methods.get(0);
assertEquals(fields.size(), 1);
assertEquals(method.name(), "getName");
assertEquals(type.initMethod().get().name(), "init");
}
@Test
public void testRecordType() {
Symbol symbol = getSymbol(18, 5);
TypeReferenceTypeDescriptor typeRef =
(TypeReferenceTypeDescriptor) ((TypeSymbol) symbol).typeDescriptor();
RecordTypeDescriptor type = (RecordTypeDescriptor) typeRef.typeDescriptor();
assertEquals(type.kind(), RECORD);
assertFalse(type.inclusive());
assertFalse(type.restTypeDescriptor().isPresent());
List<FieldDescriptor> fields = type.fieldDescriptors();
FieldDescriptor field = fields.get(0);
assertEquals(fields.size(), 1);
assertEquals(field.name(), "path");
assertEquals(field.typeDescriptor().kind(), STRING);
}
@Test
public void testTupleType() {
Symbol symbol = getSymbol(51, 28);
TupleTypeDescriptor type = (TupleTypeDescriptor) ((VariableSymbol) symbol).typeDescriptor();
assertEquals(type.kind(), TUPLE);
List<BallerinaTypeDescriptor> members = type.memberTypeDescriptors();
assertEquals(members.size(), 2);
assertEquals(members.get(0).kind(), INT);
assertEquals(members.get(1).kind(), STRING);
assertTrue(type.restTypeDescriptor().isPresent());
assertEquals(type.restTypeDescriptor().get().kind(), FLOAT);
}
@Test(dataProvider = "TypedescDataProvider")
public void testTypedescType(int line, int col, TypeDescKind kind) {
Symbol symbol = getSymbol(line, col);
TypeDescTypeDescriptor type = (TypeDescTypeDescriptor) ((VariableSymbol) symbol).typeDescriptor();
assertEquals(type.kind(), TYPEDESC);
assertTrue(type.typeParameter().isPresent());
assertEquals(type.typeParameter().get().kind(), kind);
}
@DataProvider(name = "TypedescDataProvider")
public Object[][] getTypedescPositions() {
return new Object[][]{
{53, 22, ANYDATA},
{54, 13, UNION}
};
}
@Test
public void testUnionType() {
Symbol symbol = getSymbol(56, 21);
UnionTypeDescriptor type = (UnionTypeDescriptor) ((VariableSymbol) symbol).typeDescriptor();
assertEquals(type.kind(), UNION);
List<BallerinaTypeDescriptor> members = type.memberTypeDescriptors();
assertEquals(members.get(0).kind(), INT);
assertEquals(members.get(1).kind(), STRING);
assertEquals(members.get(2).kind(), FLOAT);
}
@Test(enabled = false)
public void testNamedUnion() {
Symbol symbol = getSymbol(58, 11);
TypeReferenceTypeDescriptor typeRef =
(TypeReferenceTypeDescriptor) ((VariableSymbol) symbol).typeDescriptor();
assertEquals(typeRef.kind(), TYPE_REFERENCE);
UnionTypeDescriptor type = (UnionTypeDescriptor) typeRef.typeDescriptor();
List<BallerinaTypeDescriptor> members = type.memberTypeDescriptors();
assertEquals(members.get(0).kind(), INT);
assertEquals(members.get(1).kind(), FLOAT);
assertEquals(members.get(2).kind(), DECIMAL);
}
@Test(dataProvider = "FiniteTypeDataProvider")
public void testFiniteType(int line, int column, List<String> expSignatures) {
Symbol symbol = getSymbol(line, column);
UnionTypeDescriptor union = (UnionTypeDescriptor) ((VariableSymbol) symbol).typeDescriptor();
assertEquals(union.kind(), UNION);
List<BallerinaTypeDescriptor> members = union.memberTypeDescriptors();
for (int i = 0; i < members.size(); i++) {
BallerinaTypeDescriptor member = members.get(i);
assertEquals(member.kind(), SINGLETON);
assertEquals(member.signature(), expSignatures.get(i));
}
}
@DataProvider(name = "FiniteTypeDataProvider")
public Object[][] getFiniteTypePos() {
return new Object[][]{
{60, 10, List.of("0", "1", "2", "3")},
{62, 11, List.of("default", "csv", "tdf")}
};
}
private Symbol getSymbol(int line, int column) {
return model.symbol("typedesc_test.bal", from(line, column)).get();
}
private void validateParam(Parameter param, String name, ParameterKind kind, TypeDescKind typeKind) {
assertEquals(param.name().get(), name);
assertEquals(param.kind(), kind);
assertEquals(param.typeDescriptor().kind(), typeKind);
}
} | class TypedescriptorTest {
SemanticModel model;
@BeforeClass
public void setup() {
CompilerContext context = new CompilerContext();
CompileResult result = compile("test-src/typedesc_test.bal", context);
BLangPackage pkg = (BLangPackage) result.getAST();
model = new BallerinaSemanticModel(pkg, context);
}
@Test
public void testAnnotationType() {
Symbol symbol = getSymbol(22, 37);
TypeReferenceTypeDescriptor type =
(TypeReferenceTypeDescriptor) ((AnnotationSymbol) symbol).typeDescriptor().get();
assertEquals(type.typeDescriptor().kind(), TypeDescKind.RECORD);
}
@Test
public void testConstantType() {
Symbol symbol = getSymbol(16, 7);
BallerinaTypeDescriptor type = ((ConstantSymbol) symbol).typeDescriptor();
assertEquals(type.kind(), FLOAT);
}
@Test
@Test
public void testFutureType() {
Symbol symbol = getSymbol(45, 16);
FutureTypeDescriptor type = (FutureTypeDescriptor) ((VariableSymbol) symbol).typeDescriptor();
assertEquals(type.kind(), FUTURE);
assertEquals(type.typeParameter().get().kind(), INT);
}
@Test
public void testArrayType() {
Symbol symbol = getSymbol(47, 18);
ArrayTypeDescriptor type = (ArrayTypeDescriptor) ((VariableSymbol) symbol).typeDescriptor();
assertEquals(type.kind(), ARRAY);
assertEquals(((TypeReferenceTypeDescriptor) type.memberTypeDescriptor()).typeDescriptor().kind(), OBJECT);
}
@Test
public void testMapType() {
Symbol symbol = getSymbol(49, 16);
MapTypeDescriptor type = (MapTypeDescriptor) ((VariableSymbol) symbol).typeDescriptor();
assertEquals(type.kind(), MAP);
assertEquals(type.typeParameter().get().kind(), STRING);
}
@Test
public void testNilType() {
Symbol symbol = getSymbol(38, 9);
FunctionTypeDescriptor type = (FunctionTypeDescriptor) ((FunctionSymbol) symbol).typeDescriptor();
assertEquals(type.returnTypeDescriptor().get().kind(), NIL);
}
@Test
public void testObjectType() {
Symbol symbol = getSymbol(28, 6);
TypeReferenceTypeDescriptor typeRef =
(TypeReferenceTypeDescriptor) ((TypeSymbol) symbol).typeDescriptor();
ObjectTypeDescriptor type = (ObjectTypeDescriptor) typeRef.typeDescriptor();
assertEquals(type.kind(), OBJECT);
List<FieldDescriptor> fields = type.fieldDescriptors();
FieldDescriptor field = fields.get(0);
assertEquals(fields.size(), 1);
assertEquals(field.name(), "name");
assertEquals(field.typeDescriptor().kind(), STRING);
List<MethodSymbol> methods = type.methods();
MethodSymbol method = methods.get(0);
assertEquals(fields.size(), 1);
assertEquals(method.name(), "getName");
assertEquals(type.initMethod().get().name(), "init");
}
@Test
public void testRecordType() {
Symbol symbol = getSymbol(18, 5);
TypeReferenceTypeDescriptor typeRef =
(TypeReferenceTypeDescriptor) ((TypeSymbol) symbol).typeDescriptor();
RecordTypeDescriptor type = (RecordTypeDescriptor) typeRef.typeDescriptor();
assertEquals(type.kind(), RECORD);
assertFalse(type.inclusive());
assertFalse(type.restTypeDescriptor().isPresent());
List<FieldDescriptor> fields = type.fieldDescriptors();
FieldDescriptor field = fields.get(0);
assertEquals(fields.size(), 1);
assertEquals(field.name(), "path");
assertEquals(field.typeDescriptor().kind(), STRING);
}
@Test
public void testTupleType() {
Symbol symbol = getSymbol(51, 28);
TupleTypeDescriptor type = (TupleTypeDescriptor) ((VariableSymbol) symbol).typeDescriptor();
assertEquals(type.kind(), TUPLE);
List<BallerinaTypeDescriptor> members = type.memberTypeDescriptors();
assertEquals(members.size(), 2);
assertEquals(members.get(0).kind(), INT);
assertEquals(members.get(1).kind(), STRING);
assertTrue(type.restTypeDescriptor().isPresent());
assertEquals(type.restTypeDescriptor().get().kind(), FLOAT);
}
@Test(dataProvider = "TypedescDataProvider")
public void testTypedescType(int line, int col, TypeDescKind kind) {
Symbol symbol = getSymbol(line, col);
TypeDescTypeDescriptor type = (TypeDescTypeDescriptor) ((VariableSymbol) symbol).typeDescriptor();
assertEquals(type.kind(), TYPEDESC);
assertTrue(type.typeParameter().isPresent());
assertEquals(type.typeParameter().get().kind(), kind);
}
@DataProvider(name = "TypedescDataProvider")
public Object[][] getTypedescPositions() {
return new Object[][]{
{53, 22, ANYDATA},
{54, 13, UNION}
};
}
@Test
public void testUnionType() {
Symbol symbol = getSymbol(56, 21);
UnionTypeDescriptor type = (UnionTypeDescriptor) ((VariableSymbol) symbol).typeDescriptor();
assertEquals(type.kind(), UNION);
List<BallerinaTypeDescriptor> members = type.memberTypeDescriptors();
assertEquals(members.get(0).kind(), INT);
assertEquals(members.get(1).kind(), STRING);
assertEquals(members.get(2).kind(), FLOAT);
}
@Test(enabled = false)
public void testNamedUnion() {
Symbol symbol = getSymbol(58, 11);
TypeReferenceTypeDescriptor typeRef =
(TypeReferenceTypeDescriptor) ((VariableSymbol) symbol).typeDescriptor();
assertEquals(typeRef.kind(), TYPE_REFERENCE);
UnionTypeDescriptor type = (UnionTypeDescriptor) typeRef.typeDescriptor();
List<BallerinaTypeDescriptor> members = type.memberTypeDescriptors();
assertEquals(members.get(0).kind(), INT);
assertEquals(members.get(1).kind(), FLOAT);
assertEquals(members.get(2).kind(), DECIMAL);
}
@Test(dataProvider = "FiniteTypeDataProvider")
public void testFiniteType(int line, int column, List<String> expSignatures) {
Symbol symbol = getSymbol(line, column);
UnionTypeDescriptor union = (UnionTypeDescriptor) ((VariableSymbol) symbol).typeDescriptor();
assertEquals(union.kind(), UNION);
List<BallerinaTypeDescriptor> members = union.memberTypeDescriptors();
for (int i = 0; i < members.size(); i++) {
BallerinaTypeDescriptor member = members.get(i);
assertEquals(member.kind(), SINGLETON);
assertEquals(member.signature(), expSignatures.get(i));
}
}
@DataProvider(name = "FiniteTypeDataProvider")
public Object[][] getFiniteTypePos() {
return new Object[][]{
{60, 10, List.of("0", "1", "2", "3")},
{62, 11, List.of("default", "csv", "tdf")}
};
}
private Symbol getSymbol(int line, int column) {
return model.symbol("typedesc_test.bal", from(line, column)).get();
}
private void validateParam(Parameter param, String name, ParameterKind kind, TypeDescKind typeKind) {
assertEquals(param.name().get(), name);
assertEquals(param.kind(), kind);
assertEquals(param.typeDescriptor().kind(), typeKind);
}
} |
Hi @tsreaper , Thank you for your advice. In `flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/functions/SqlFunctionUtils.java` there is an implementation of truncating method designed for `DecimalData`: ``` public static DecimalData struncate(DecimalData b0, int b1) { if (b1 >= b0.scale()) { return b0; } BigDecimal b2 = b0.toBigDecimal() .movePointRight(b1) .setScale(0, RoundingMode.DOWN) .movePointLeft(b1); int p = b0.precision(); int s = b0.scale(); if (b1 < 0) { return DecimalData.fromBigDecimal(b2, Math.min(38, 1 + p - s), 0); } else { return DecimalData.fromBigDecimal(b2, 1 + p - s + b1, b1); } } ``` It uses the same logic as the round method. After I thought over this issue, I suggest that we should add 1 on precision (same logic as round method). If we did not do that, for example, given a number f1 0.333 with the type of DECIMAL(3, 3), if we call truncate(f1, 0), the precision of the result would be 0, which would trigger an exception of 'Decimal precision must be between 1 and 38 (both inclusive)'. Those info above is my point of view. If you have any suggestion, please comment below. Thanks a lot. | public static List<TestSpec> testData() {
return Arrays.asList(
TestSpec.forFunction(BuiltInFunctionDefinitions.PLUS)
.onFieldsWithData(new BigDecimal("1514356320000"))
.andDataTypes(DataTypes.DECIMAL(19, 0).notNull())
.testResult(
$("f0").plus(6),
"f0 + 6",
new BigDecimal("1514356320006"),
DataTypes.DECIMAL(20, 0).notNull())
.testResult(
$("f0").plus($("f0")),
"f0 + f0",
new BigDecimal("3028712640000"),
DataTypes.DECIMAL(20, 0).notNull()),
TestSpec.forFunction(BuiltInFunctionDefinitions.MINUS)
.onFieldsWithData(new BigDecimal("1514356320000"))
.andDataTypes(DataTypes.DECIMAL(19, 0))
.testResult(
$("f0").minus(6),
"f0 - 6",
new BigDecimal("1514356319994"),
DataTypes.DECIMAL(20, 0))
.testResult(
$("f0").minus($("f0")),
"f0 - f0",
new BigDecimal("0"),
DataTypes.DECIMAL(20, 0)),
TestSpec.forFunction(BuiltInFunctionDefinitions.DIVIDE)
.onFieldsWithData(new BigDecimal("1514356320000"))
.andDataTypes(DataTypes.DECIMAL(19, 0).notNull())
.testResult(
$("f0").dividedBy(6),
"f0 / 6",
new BigDecimal("252392720000.00000000000"),
DataTypes.DECIMAL(30, 11).notNull())
.testResult(
$("f0").dividedBy($("f0")),
"f0 / f0",
new BigDecimal("1.0000000000000000000"),
DataTypes.DECIMAL(38, 19).notNull()),
TestSpec.forFunction(BuiltInFunctionDefinitions.TIMES)
.onFieldsWithData(new BigDecimal("1514356320000"))
.andDataTypes(DataTypes.DECIMAL(19, 0))
.testResult(
$("f0").times(6),
"f0 * 6",
new BigDecimal("9086137920000"),
DataTypes.DECIMAL(30, 0))
.testResult(
$("f0").times($("f0")),
"f0 * f0",
new BigDecimal("2293275063923942400000000"),
DataTypes.DECIMAL(38, 0)),
TestSpec.forFunction(BuiltInFunctionDefinitions.MOD)
.onFieldsWithData(new BigDecimal("1514356320000"), 44L, 3)
.andDataTypes(DataTypes.DECIMAL(19, 0), DataTypes.BIGINT(), DataTypes.INT())
.testResult(
$("f0").mod($("f0")),
"MOD(f0, f0)",
new BigDecimal(0),
DataTypes.DECIMAL(19, 0))
.testResult($("f0").mod(6), "MOD(f0, 6)", 0, DataTypes.INT())
.testResult($("f1").mod($("f2")), "MOD(f1, f2)", 2, DataTypes.INT()),
TestSpec.forFunction(BuiltInFunctionDefinitions.ROUND)
.onFieldsWithData(new BigDecimal("12345.12345"))
.testResult(
$("f0").round(2),
"ROUND(f0, 2)",
new BigDecimal("12345.12"),
DataTypes.DECIMAL(8, 2).notNull()),
TestSpec.forFunction(BuiltInFunctionDefinitions.TRUNCATE)
.onFieldsWithData(new BigDecimal("123.456"))
.testResult(
$("f0").truncate(2),
"TRUNCATE(f0, 2)",
new BigDecimal("123.45"),
DataTypes.DECIMAL(6, 2).notNull()));
} | DataTypes.DECIMAL(6, 2).notNull())); | public static List<TestSpec> testData() {
return Arrays.asList(
TestSpec.forFunction(BuiltInFunctionDefinitions.PLUS)
.onFieldsWithData(new BigDecimal("1514356320000"))
.andDataTypes(DataTypes.DECIMAL(19, 0).notNull())
.testResult(
$("f0").plus(6),
"f0 + 6",
new BigDecimal("1514356320006"),
DataTypes.DECIMAL(20, 0).notNull())
.testResult(
$("f0").plus($("f0")),
"f0 + f0",
new BigDecimal("3028712640000"),
DataTypes.DECIMAL(20, 0).notNull()),
TestSpec.forFunction(BuiltInFunctionDefinitions.MINUS)
.onFieldsWithData(new BigDecimal("1514356320000"))
.andDataTypes(DataTypes.DECIMAL(19, 0))
.testResult(
$("f0").minus(6),
"f0 - 6",
new BigDecimal("1514356319994"),
DataTypes.DECIMAL(20, 0))
.testResult(
$("f0").minus($("f0")),
"f0 - f0",
new BigDecimal("0"),
DataTypes.DECIMAL(20, 0)),
TestSpec.forFunction(BuiltInFunctionDefinitions.DIVIDE)
.onFieldsWithData(new BigDecimal("1514356320000"))
.andDataTypes(DataTypes.DECIMAL(19, 0).notNull())
.testResult(
$("f0").dividedBy(6),
"f0 / 6",
new BigDecimal("252392720000.00000000000"),
DataTypes.DECIMAL(30, 11).notNull())
.testResult(
$("f0").dividedBy($("f0")),
"f0 / f0",
new BigDecimal("1.0000000000000000000"),
DataTypes.DECIMAL(38, 19).notNull()),
TestSpec.forFunction(BuiltInFunctionDefinitions.TIMES)
.onFieldsWithData(new BigDecimal("1514356320000"))
.andDataTypes(DataTypes.DECIMAL(19, 0))
.testResult(
$("f0").times(6),
"f0 * 6",
new BigDecimal("9086137920000"),
DataTypes.DECIMAL(30, 0))
.testResult(
$("f0").times($("f0")),
"f0 * f0",
new BigDecimal("2293275063923942400000000"),
DataTypes.DECIMAL(38, 0)),
TestSpec.forFunction(BuiltInFunctionDefinitions.MOD)
.onFieldsWithData(new BigDecimal("1514356320000"), 44L, 3)
.andDataTypes(DataTypes.DECIMAL(19, 0), DataTypes.BIGINT(), DataTypes.INT())
.testResult(
$("f0").mod($("f0")),
"MOD(f0, f0)",
new BigDecimal(0),
DataTypes.DECIMAL(19, 0))
.testResult($("f0").mod(6), "MOD(f0, 6)", 0, DataTypes.INT())
.testResult($("f1").mod($("f2")), "MOD(f1, f2)", 2, DataTypes.INT()),
TestSpec.forFunction(BuiltInFunctionDefinitions.ROUND)
.onFieldsWithData(new BigDecimal("12345.12345"))
.testResult(
$("f0").round(2),
"ROUND(f0, 2)",
new BigDecimal("12345.12"),
DataTypes.DECIMAL(8, 2).notNull()),
TestSpec.forFunction(BuiltInFunctionDefinitions.TRUNCATE)
.onFieldsWithData(new BigDecimal("123.456"))
.testResult(
$("f0").truncate(2),
"TRUNCATE(f0, 2)",
new BigDecimal("123.45"),
DataTypes.DECIMAL(6, 2).notNull()));
} | class MathFunctionsITCase extends BuiltInFunctionTestBase {
@Parameters(name = "{index}: {0}")
} | class MathFunctionsITCase extends BuiltInFunctionTestBase {
@Parameters(name = "{index}: {0}")
} |
You're right! Let me fix that | public void start(Future<Void> startFuture) throws Exception {
final AtomicInteger remainingCount = new AtomicInteger(httpsOptions != null ? 2 : 1);
final HttpServerStartHandler httpServerStartHandler = new HttpServerStartHandler(startFuture, httpOptions,
remainingCount);
httpServer = vertx.createHttpServer(httpOptions);
httpServer.requestHandler(router);
httpServer.listen(port, host, httpServerStartHandler);
if (httpsOptions != null) {
httpsServer = vertx.createHttpServer(httpsOptions);
httpsServer.requestHandler(router);
httpsServer.listen(httpsPort, host, httpServerStartHandler);
}
} | remainingCount); | public void start(Future<Void> startFuture) {
final AtomicInteger remainingCount = new AtomicInteger(httpsOptions != null ? 2 : 1);
httpServer = vertx.createHttpServer(httpOptions);
httpServer.requestHandler(router);
httpServer.listen(port, host, event -> {
if (event.cause() != null) {
startFuture.fail(event.cause());
} else {
httpOptions.setPort(event.result().actualPort());
if (remainingCount.decrementAndGet() == 0) {
startFuture.complete(null);
}
}
});
if (httpsOptions != null) {
httpsServer = vertx.createHttpServer(httpsOptions);
httpsServer.requestHandler(router);
httpsServer.listen(httpsPort, host, event -> {
if (event.cause() != null) {
startFuture.fail(event.cause());
} else {
httpsOptions.setPort(event.result().actualPort());
if (remainingCount.decrementAndGet() == 0) {
startFuture.complete();
}
}
});
}
} | class WebDeploymentVerticle extends AbstractVerticle {
private final int port;
private final int httpsPort;
private final String host;
private HttpServer httpServer;
private HttpServer httpsServer;
private final HttpServerOptions httpOptions;
private final HttpServerOptions httpsOptions;
private final Router router;
public WebDeploymentVerticle(int port, int httpsPort, String host, HttpServerOptions httpOptions,
HttpServerOptions httpsOptions, Router router) {
this.port = port;
this.httpsPort = httpsPort;
this.host = host;
this.httpOptions = httpOptions;
this.httpsOptions = httpsOptions;
this.router = router;
}
@Override
@Override
public void stop(Future<Void> stopFuture) throws Exception {
httpServer.close(new Handler<AsyncResult<Void>>() {
@Override
public void handle(AsyncResult<Void> event) {
if (httpsServer != null) {
httpsServer.close(new Handler<AsyncResult<Void>>() {
@Override
public void handle(AsyncResult<Void> event) {
stopFuture.complete();
}
});
} else {
stopFuture.complete();
}
}
});
}
} | class WebDeploymentVerticle extends AbstractVerticle {
private final int port;
private final int httpsPort;
private final String host;
private HttpServer httpServer;
private HttpServer httpsServer;
private final HttpServerOptions httpOptions;
private final HttpServerOptions httpsOptions;
private final Router router;
public WebDeploymentVerticle(int port, int httpsPort, String host, HttpServerOptions httpOptions,
HttpServerOptions httpsOptions, Router router) {
this.port = port;
this.httpsPort = httpsPort;
this.host = host;
this.httpOptions = httpOptions;
this.httpsOptions = httpsOptions;
this.router = router;
}
@Override
@Override
public void stop(Future<Void> stopFuture) {
httpServer.close(new Handler<AsyncResult<Void>>() {
@Override
public void handle(AsyncResult<Void> event) {
if (httpsServer != null) {
httpsServer.close(new Handler<AsyncResult<Void>>() {
@Override
public void handle(AsyncResult<Void> event) {
stopFuture.complete();
}
});
} else {
stopFuture.complete();
}
}
});
}
} |
Please use assertThat and InstanceOf to assert | public void assertRightMysqlSchemaDataSources() throws Exception {
JDBCRawBackendDataSourceFactory jdbcRawBackendDataSourceFactory = Mockito.mock(JDBCRawBackendDataSourceFactory.class);
Mockito.when(jdbcRawBackendDataSourceFactory.build(Mockito.anyString(), Mockito.any())).thenReturn(new HikariDataSource());
ProxyDataSourceContext proxyDataSourceContext = build(jdbcRawBackendDataSourceFactory);
assertTrue(proxyDataSourceContext.getDatabaseType() instanceof MySQLDatabaseType);
assertTrue(proxyDataSourceContext.getDataSourcesMap().size() == 1);
Mockito.reset(jdbcRawBackendDataSourceFactory);
} | assertTrue(proxyDataSourceContext.getDatabaseType() instanceof MySQLDatabaseType); | public void assertRightMysqlSchemaDataSources() throws Exception {
JDBCRawBackendDataSourceFactory jdbcRawBackendDataSourceFactory = mock(JDBCRawBackendDataSourceFactory.class);
when(jdbcRawBackendDataSourceFactory.build(anyString(), any())).thenReturn(new HikariDataSource());
ProxyDataSourceContext proxyDataSourceContext = build(jdbcRawBackendDataSourceFactory);
assertThat(proxyDataSourceContext.getDatabaseType(), instanceOf(MySQLDatabaseType.class));
assertTrue(proxyDataSourceContext.getDataSourcesMap().size() == 1);
reset(jdbcRawBackendDataSourceFactory);
} | class ProxyDataSourceContextTest {
@Test
public void assertEmptySchemaDataSources() {
Map<String, Map<String, DataSourceParameter>> schemaDataSources = new HashMap<>();
ProxyDataSourceContext proxyDataSourceContext = new ProxyDataSourceContext(schemaDataSources);
assertTrue(proxyDataSourceContext.getDatabaseType() instanceof MySQLDatabaseType);
assertTrue(proxyDataSourceContext.getDataSourcesMap().isEmpty());
}
@Test(expected = ShardingSphereException.class)
public void assertWrongSchemaDataSources() {
DataSourceParameter dataSourceParameter = new DataSourceParameter();
dataSourceParameter.setUrl("jdbc11:mysql11:xxx");
Map<String, DataSourceParameter> dataSourceParameterMap = new LinkedHashMap<>();
dataSourceParameterMap.put("order1", dataSourceParameter);
Map<String, Map<String, DataSourceParameter>> schemaDataSources = new HashMap<>();
schemaDataSources.put("order", dataSourceParameterMap);
new ProxyDataSourceContext(schemaDataSources);
}
@Test(expected = ShardingSphereException.class)
public void assertThrowByBuild() throws Exception {
JDBCRawBackendDataSourceFactory jdbcRawBackendDataSourceFactory = Mockito.mock(JDBCRawBackendDataSourceFactory.class);
Mockito.when(jdbcRawBackendDataSourceFactory.build(Mockito.anyString(), Mockito.any())).thenThrow(new ShardingSphereException(""));
build(jdbcRawBackendDataSourceFactory);
Mockito.reset(jdbcRawBackendDataSourceFactory);
}
@Test
private ProxyDataSourceContext build(final JDBCRawBackendDataSourceFactory jdbcRawBackendDataSourceFactory) throws Exception {
JDBCRawBackendDataSourceFactory jdbcBackendDataSourceFactory = (JDBCRawBackendDataSourceFactory) JDBCRawBackendDataSourceFactory.getInstance();
Class<?> jdbcBackendDataSourceFactoryClass = jdbcBackendDataSourceFactory.getClass();
Field field = jdbcBackendDataSourceFactoryClass.getDeclaredField("INSTANCE");
Field modifiers = field.getClass().getDeclaredField("modifiers");
modifiers.setAccessible(true);
modifiers.setInt(field, field.getModifiers() & ~Modifier.FINAL);
field.setAccessible(true);
field.set(field, jdbcRawBackendDataSourceFactory);
DataSourceParameter dataSourceParameter = new DataSourceParameter();
dataSourceParameter.setUrl("jdbc:mysql:xxx");
Map<String, DataSourceParameter> dataSourceParameterMap = new LinkedHashMap<>();
dataSourceParameterMap.put("order1", dataSourceParameter);
Map<String, Map<String, DataSourceParameter>> schemaDataSources = new HashMap<>();
schemaDataSources.put("order", dataSourceParameterMap);
return new ProxyDataSourceContext(schemaDataSources);
}
} | class ProxyDataSourceContextTest {
@Test
public void assertEmptySchemaDataSources() {
Map<String, Map<String, DataSourceParameter>> schemaDataSources = new HashMap<>();
ProxyDataSourceContext proxyDataSourceContext = new ProxyDataSourceContext(schemaDataSources);
assertThat(proxyDataSourceContext.getDatabaseType(), instanceOf(MySQLDatabaseType.class));
assertTrue(proxyDataSourceContext.getDataSourcesMap().isEmpty());
}
@Test(expected = ShardingSphereException.class)
public void assertWrongSchemaDataSources() {
DataSourceParameter dataSourceParameter = new DataSourceParameter();
dataSourceParameter.setUrl("jdbc11:mysql11:xxx");
Map<String, DataSourceParameter> dataSourceParameterMap = new LinkedHashMap<>();
dataSourceParameterMap.put("order1", dataSourceParameter);
Map<String, Map<String, DataSourceParameter>> schemaDataSources = new HashMap<>();
schemaDataSources.put("order", dataSourceParameterMap);
new ProxyDataSourceContext(schemaDataSources);
}
@Test(expected = ShardingSphereException.class)
public void assertThrowByBuild() throws Exception {
JDBCRawBackendDataSourceFactory jdbcRawBackendDataSourceFactory = mock(JDBCRawBackendDataSourceFactory.class);
when(jdbcRawBackendDataSourceFactory.build(anyString(), any())).thenThrow(new ShardingSphereException(""));
build(jdbcRawBackendDataSourceFactory);
reset(jdbcRawBackendDataSourceFactory);
}
@Test
private ProxyDataSourceContext build(final JDBCRawBackendDataSourceFactory jdbcRawBackendDataSourceFactory) throws Exception {
JDBCRawBackendDataSourceFactory jdbcBackendDataSourceFactory = (JDBCRawBackendDataSourceFactory) JDBCRawBackendDataSourceFactory.getInstance();
Class<?> jdbcBackendDataSourceFactoryClass = jdbcBackendDataSourceFactory.getClass();
Field field = jdbcBackendDataSourceFactoryClass.getDeclaredField("INSTANCE");
Field modifiers = field.getClass().getDeclaredField("modifiers");
modifiers.setAccessible(true);
modifiers.setInt(field, field.getModifiers() & ~Modifier.FINAL);
field.setAccessible(true);
field.set(field, jdbcRawBackendDataSourceFactory);
DataSourceParameter dataSourceParameter = new DataSourceParameter();
dataSourceParameter.setUrl("jdbc:mysql:xxx");
Map<String, DataSourceParameter> dataSourceParameterMap = new LinkedHashMap<>();
dataSourceParameterMap.put("order1", dataSourceParameter);
Map<String, Map<String, DataSourceParameter>> schemaDataSources = new HashMap<>();
schemaDataSources.put("order", dataSourceParameterMap);
return new ProxyDataSourceContext(schemaDataSources);
}
} |
This is going to count as a crash in our SLO monitoring. Can you actually get here or is this rejected by the parser? If you can actually get here you might throw a `UnsupportedOperationException`, if you can't actually get here consider `IllegalArgumentException` (which will still count as a crash but makes it clear it isn't a problem with this code). | void validateJavaUdf(ResolvedNodes.ResolvedCreateFunctionStmt createFunctionStmt) {
for (FunctionArgumentType argumentType :
createFunctionStmt.getSignature().getFunctionArgumentList()) {
Type type = argumentType.getType();
if (type == null) {
throw new UnsupportedOperationException("UDF templated argument types are not supported.");
}
validateJavaUdfZetaSqlType(type);
}
if (createFunctionStmt.getReturnType() == null) {
throw new NullPointerException("UDF return type must not be null.");
}
validateJavaUdfZetaSqlType(createFunctionStmt.getReturnType());
} | throw new NullPointerException("UDF return type must not be null."); | void validateJavaUdf(ResolvedNodes.ResolvedCreateFunctionStmt createFunctionStmt) {
for (FunctionArgumentType argumentType :
createFunctionStmt.getSignature().getFunctionArgumentList()) {
Type type = argumentType.getType();
if (type == null) {
throw new UnsupportedOperationException("UDF templated argument types are not supported.");
}
validateJavaUdfZetaSqlType(type);
}
if (createFunctionStmt.getReturnType() == null) {
throw new IllegalArgumentException("UDF return type must not be null.");
}
validateJavaUdfZetaSqlType(createFunctionStmt.getReturnType());
} | class BeamZetaSqlCatalog {
public static final String PRE_DEFINED_WINDOW_FUNCTIONS = "pre_defined_window_functions";
public static final String USER_DEFINED_SQL_FUNCTIONS = "user_defined_functions";
public static final String USER_DEFINED_JAVA_SCALAR_FUNCTIONS =
"user_defined_java_scalar_functions";
/**
* Same as {@link Function}.ZETASQL_FUNCTION_GROUP_NAME. Identifies built-in ZetaSQL functions.
*/
public static final String ZETASQL_FUNCTION_GROUP_NAME = "ZetaSQL";
private static final ImmutableList<String> PRE_DEFINED_WINDOW_FUNCTION_DECLARATIONS =
ImmutableList.of(
"CREATE FUNCTION TUMBLE(ts TIMESTAMP, window_size STRING) AS (1);",
"CREATE FUNCTION TUMBLE_START(window_size STRING) RETURNS TIMESTAMP AS (null);",
"CREATE FUNCTION TUMBLE_END(window_size STRING) RETURNS TIMESTAMP AS (null);",
"CREATE FUNCTION HOP(ts TIMESTAMP, emit_frequency STRING, window_size STRING) AS (1);",
"CREATE FUNCTION HOP_START(emit_frequency STRING, window_size STRING) "
+ "RETURNS TIMESTAMP AS (null);",
"CREATE FUNCTION HOP_END(emit_frequency STRING, window_size STRING) "
+ "RETURNS TIMESTAMP AS (null);",
"CREATE FUNCTION SESSION(ts TIMESTAMP, session_gap STRING) AS (1);",
"CREATE FUNCTION SESSION_START(session_gap STRING) RETURNS TIMESTAMP AS (null);",
"CREATE FUNCTION SESSION_END(session_gap STRING) RETURNS TIMESTAMP AS (null);");
/** The top-level Calcite schema, which may contain sub-schemas. */
private final SchemaPlus calciteSchema;
/**
* The top-level ZetaSQL catalog, which may contain nested catalogs for qualified table and
* function references.
*/
private final SimpleCatalog zetaSqlCatalog;
private final JavaTypeFactory typeFactory;
private final JavaUdfLoader javaUdfLoader = new JavaUdfLoader();
private final Map<List<String>, ResolvedNodes.ResolvedCreateFunctionStmt> sqlScalarUdfs =
new HashMap<>();
/** User-defined table valued functions. */
private final Map<List<String>, ResolvedNode> sqlUdtvfs = new HashMap<>();
private final Map<List<String>, UserFunctionDefinitions.JavaScalarFunction> javaScalarUdfs =
new HashMap<>();
private BeamZetaSqlCatalog(
SchemaPlus calciteSchema, SimpleCatalog zetaSqlCatalog, JavaTypeFactory typeFactory) {
this.calciteSchema = calciteSchema;
this.zetaSqlCatalog = zetaSqlCatalog;
this.typeFactory = typeFactory;
}
/** Return catalog pre-populated with builtin functions. */
static BeamZetaSqlCatalog create(
SchemaPlus calciteSchema, JavaTypeFactory typeFactory, AnalyzerOptions options) {
BeamZetaSqlCatalog catalog =
new BeamZetaSqlCatalog(
calciteSchema, new SimpleCatalog(calciteSchema.getName()), typeFactory);
catalog.addFunctionsToCatalog(options);
return catalog;
}
SimpleCatalog getZetaSqlCatalog() {
return zetaSqlCatalog;
}
void addTables(List<List<String>> tables, QueryTrait queryTrait) {
tables.forEach(table -> addTableToLeafCatalog(table, queryTrait));
}
void addFunction(ResolvedNodes.ResolvedCreateFunctionStmt createFunctionStmt) {
String functionGroup = getFunctionGroup(createFunctionStmt);
switch (functionGroup) {
case USER_DEFINED_SQL_FUNCTIONS:
sqlScalarUdfs.put(createFunctionStmt.getNamePath(), createFunctionStmt);
break;
case USER_DEFINED_JAVA_SCALAR_FUNCTIONS:
validateJavaUdf(createFunctionStmt);
String jarPath = getJarPath(createFunctionStmt);
ScalarFn scalarFn =
javaUdfLoader.loadScalarFunction(createFunctionStmt.getNamePath(), jarPath);
Method method = ScalarFnReflector.getApplyMethod(scalarFn);
javaScalarUdfs.put(
createFunctionStmt.getNamePath(),
UserFunctionDefinitions.JavaScalarFunction.create(method, jarPath));
break;
default:
throw new IllegalArgumentException(
String.format("Encountered unrecognized function group %s.", functionGroup));
}
zetaSqlCatalog.addFunction(
new Function(
createFunctionStmt.getNamePath(),
functionGroup,
createFunctionStmt.getIsAggregate()
? ZetaSQLFunctions.FunctionEnums.Mode.AGGREGATE
: ZetaSQLFunctions.FunctionEnums.Mode.SCALAR,
ImmutableList.of(createFunctionStmt.getSignature())));
}
/**
* Throws {@link UnsupportedOperationException} if ZetaSQL type is not supported in Java UDF.
* Supported types are a subset of the types supported by {@link BeamJavaUdfCalcRule}.
*/
void validateJavaUdfZetaSqlType(Type type) {
switch (type.getKind()) {
case TYPE_INT64:
case TYPE_DOUBLE:
case TYPE_BOOL:
case TYPE_STRING:
case TYPE_BYTES:
break;
case TYPE_NUMERIC:
case TYPE_DATE:
case TYPE_TIME:
case TYPE_DATETIME:
case TYPE_TIMESTAMP:
case TYPE_ARRAY:
case TYPE_STRUCT:
throw new UnsupportedOperationException(
"ZetaSQL type not allowed in Java UDF: " + type.getKind().name());
default:
throw new UnsupportedOperationException("Unknown ZetaSQL type: " + type.getKind().name());
}
}
void addTableValuedFunction(
ResolvedNodes.ResolvedCreateTableFunctionStmt createTableFunctionStmt) {
zetaSqlCatalog.addTableValuedFunction(
new TableValuedFunction.FixedOutputSchemaTVF(
createTableFunctionStmt.getNamePath(),
createTableFunctionStmt.getSignature(),
TVFRelation.createColumnBased(
createTableFunctionStmt.getQuery().getColumnList().stream()
.map(c -> TVFRelation.Column.create(c.getName(), c.getType()))
.collect(Collectors.toList()))));
sqlUdtvfs.put(createTableFunctionStmt.getNamePath(), createTableFunctionStmt.getQuery());
}
UserFunctionDefinitions getUserFunctionDefinitions() {
return UserFunctionDefinitions.newBuilder()
.setSqlScalarFunctions(ImmutableMap.copyOf(sqlScalarUdfs))
.setSqlTableValuedFunctions(ImmutableMap.copyOf(sqlUdtvfs))
.setJavaScalarFunctions(ImmutableMap.copyOf(javaScalarUdfs))
.build();
}
private void addFunctionsToCatalog(AnalyzerOptions options) {
ZetaSQLBuiltinFunctionOptions zetasqlBuiltinFunctionOptions =
new ZetaSQLBuiltinFunctionOptions(options.getLanguageOptions());
SupportedZetaSqlBuiltinFunctions.ALLOWLIST.forEach(
zetasqlBuiltinFunctionOptions::includeFunctionSignatureId);
zetaSqlCatalog.addZetaSQLFunctions(zetasqlBuiltinFunctionOptions);
addWindowScalarFunctions(options);
addWindowTvfs();
addUdfsFromSchema();
}
private void addWindowScalarFunctions(AnalyzerOptions options) {
PRE_DEFINED_WINDOW_FUNCTION_DECLARATIONS.stream()
.map(
func ->
(ResolvedNodes.ResolvedCreateFunctionStmt)
Analyzer.analyzeStatement(func, options, zetaSqlCatalog))
.map(
resolvedFunc ->
new Function(
String.join(".", resolvedFunc.getNamePath()),
PRE_DEFINED_WINDOW_FUNCTIONS,
ZetaSQLFunctions.FunctionEnums.Mode.SCALAR,
ImmutableList.of(resolvedFunc.getSignature())))
.forEach(zetaSqlCatalog::addFunction);
}
@SuppressWarnings({
"nullness"
})
private void addWindowTvfs() {
FunctionArgumentType retType =
new FunctionArgumentType(ZetaSQLFunctions.SignatureArgumentKind.ARG_TYPE_RELATION);
FunctionArgumentType inputTableType =
new FunctionArgumentType(ZetaSQLFunctions.SignatureArgumentKind.ARG_TYPE_RELATION);
FunctionArgumentType descriptorType =
new FunctionArgumentType(
ZetaSQLFunctions.SignatureArgumentKind.ARG_TYPE_DESCRIPTOR,
FunctionArgumentType.FunctionArgumentTypeOptions.builder()
.setDescriptorResolutionTableOffset(0)
.build(),
1);
FunctionArgumentType stringType =
new FunctionArgumentType(TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_STRING));
zetaSqlCatalog.addTableValuedFunction(
new TableValuedFunction.ForwardInputSchemaToOutputSchemaWithAppendedColumnTVF(
ImmutableList.of(TVFStreamingUtils.FIXED_WINDOW_TVF),
new FunctionSignature(
retType, ImmutableList.of(inputTableType, descriptorType, stringType), -1),
ImmutableList.of(
TVFRelation.Column.create(
TVFStreamingUtils.WINDOW_START,
TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_TIMESTAMP)),
TVFRelation.Column.create(
TVFStreamingUtils.WINDOW_END,
TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_TIMESTAMP))),
null,
null));
zetaSqlCatalog.addTableValuedFunction(
new TableValuedFunction.ForwardInputSchemaToOutputSchemaWithAppendedColumnTVF(
ImmutableList.of(TVFStreamingUtils.SLIDING_WINDOW_TVF),
new FunctionSignature(
retType,
ImmutableList.of(inputTableType, descriptorType, stringType, stringType),
-1),
ImmutableList.of(
TVFRelation.Column.create(
TVFStreamingUtils.WINDOW_START,
TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_TIMESTAMP)),
TVFRelation.Column.create(
TVFStreamingUtils.WINDOW_END,
TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_TIMESTAMP))),
null,
null));
zetaSqlCatalog.addTableValuedFunction(
new TableValuedFunction.ForwardInputSchemaToOutputSchemaWithAppendedColumnTVF(
ImmutableList.of(TVFStreamingUtils.SESSION_WINDOW_TVF),
new FunctionSignature(
retType,
ImmutableList.of(inputTableType, descriptorType, descriptorType, stringType),
-1),
ImmutableList.of(
TVFRelation.Column.create(
TVFStreamingUtils.WINDOW_START,
TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_TIMESTAMP)),
TVFRelation.Column.create(
TVFStreamingUtils.WINDOW_END,
TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_TIMESTAMP))),
null,
null));
}
private void addUdfsFromSchema() {
for (String functionName : calciteSchema.getFunctionNames()) {
Collection<org.apache.beam.vendor.calcite.v1_20_0.org.apache.calcite.schema.Function>
functions = calciteSchema.getFunctions(functionName);
if (functions.size() != 1) {
throw new IllegalArgumentException(
String.format(
"Expected exactly 1 definition for function '%s', but found %d."
+ " Beam ZetaSQL supports only a single function definition per function name (BEAM-12073).",
functionName, functions.size()));
}
for (org.apache.beam.vendor.calcite.v1_20_0.org.apache.calcite.schema.Function function :
functions) {
if (function instanceof ScalarFunctionImpl) {
ScalarFunctionImpl scalarFunction = (ScalarFunctionImpl) function;
validateScalarFunctionImpl(scalarFunction);
List<String> path = Arrays.asList(functionName.split("\\."));
Method method = scalarFunction.method;
javaScalarUdfs.put(path, UserFunctionDefinitions.JavaScalarFunction.create(method, ""));
FunctionArgumentType resultType =
new FunctionArgumentType(
ZetaSqlCalciteTranslationUtils.toZetaSqlType(
scalarFunction.getReturnType(typeFactory)));
List<FunctionArgumentType> argumentTypes =
scalarFunction.getParameters().stream()
.map(
(arg) ->
new FunctionArgumentType(
ZetaSqlCalciteTranslationUtils.toZetaSqlType(
arg.getType(typeFactory))))
.collect(Collectors.toList());
FunctionSignature functionSignature =
new FunctionSignature(resultType, argumentTypes, 0L);
zetaSqlCatalog.addFunction(
new Function(
path,
USER_DEFINED_JAVA_SCALAR_FUNCTIONS,
ZetaSQLFunctions.FunctionEnums.Mode.SCALAR,
ImmutableList.of(functionSignature)));
} else {
throw new IllegalArgumentException(
String.format(
"Function %s has unrecognized implementation type %s.",
functionName, function.getClass().getName()));
}
}
}
}
private void validateScalarFunctionImpl(ScalarFunctionImpl scalarFunction) {
for (FunctionParameter parameter : scalarFunction.getParameters()) {
validateJavaUdfCalciteType(parameter.getType(typeFactory));
}
validateJavaUdfCalciteType(scalarFunction.getReturnType(typeFactory));
}
/**
* Throws {@link UnsupportedOperationException} if Calcite type is not supported in Java UDF.
* Supported types are a subset of the corresponding Calcite types supported by {@link
* BeamJavaUdfCalcRule}.
*/
private void validateJavaUdfCalciteType(RelDataType type) {
switch (type.getSqlTypeName()) {
case BIGINT:
case DOUBLE:
case BOOLEAN:
case VARCHAR:
case VARBINARY:
break;
case DECIMAL:
case DATE:
case TIME:
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
case TIMESTAMP:
case ARRAY:
case ROW:
throw new UnsupportedOperationException(
"Calcite type not allowed in Java UDF: " + type.getSqlTypeName().getName());
default:
throw new UnsupportedOperationException(
"Unknown Calcite type: " + type.getSqlTypeName().getName());
}
}
private String getFunctionGroup(ResolvedNodes.ResolvedCreateFunctionStmt createFunctionStmt) {
switch (createFunctionStmt.getLanguage().toUpperCase()) {
case "JAVA":
if (createFunctionStmt.getIsAggregate()) {
throw new UnsupportedOperationException(
"Java SQL aggregate functions are not supported (BEAM-10925).");
}
return USER_DEFINED_JAVA_SCALAR_FUNCTIONS;
case "SQL":
if (createFunctionStmt.getIsAggregate()) {
throw new UnsupportedOperationException(
"Native SQL aggregate functions are not supported (BEAM-9954).");
}
return USER_DEFINED_SQL_FUNCTIONS;
case "PY":
case "PYTHON":
case "JS":
case "JAVASCRIPT":
throw new UnsupportedOperationException(
String.format(
"Function %s uses unsupported language %s.",
String.join(".", createFunctionStmt.getNamePath()),
createFunctionStmt.getLanguage()));
default:
throw new IllegalArgumentException(
String.format(
"Function %s uses unrecognized language %s.",
String.join(".", createFunctionStmt.getNamePath()),
createFunctionStmt.getLanguage()));
}
}
/**
* Assume last element in tablePath is a table name, and everything before is catalogs. So the
* logic is to create nested catalogs until the last level, then add a table at the last level.
*
* <p>Table schema is extracted from Calcite schema based on the table name resolution strategy,
* e.g. either by drilling down the schema.getSubschema() path or joining the table name with dots
* to construct a single compound identifier (e.g. Data Catalog use case).
*/
private void addTableToLeafCatalog(List<String> tablePath, QueryTrait queryTrait) {
SimpleCatalog leafCatalog = createNestedCatalogs(zetaSqlCatalog, tablePath);
org.apache.beam.vendor.calcite.v1_20_0.org.apache.calcite.schema.Table calciteTable =
TableResolution.resolveCalciteTable(calciteSchema, tablePath);
if (calciteTable == null) {
throw new ZetaSqlException(
"Wasn't able to resolve the path "
+ tablePath
+ " in schema: "
+ calciteSchema.getName());
}
RelDataType rowType = calciteTable.getRowType(typeFactory);
TableResolution.SimpleTableWithPath tableWithPath =
TableResolution.SimpleTableWithPath.of(tablePath);
queryTrait.addResolvedTable(tableWithPath);
addFieldsToTable(tableWithPath, rowType);
leafCatalog.addSimpleTable(tableWithPath.getTable());
}
private static void addFieldsToTable(
TableResolution.SimpleTableWithPath tableWithPath, RelDataType rowType) {
for (RelDataTypeField field : rowType.getFieldList()) {
tableWithPath
.getTable()
.addSimpleColumn(
field.getName(), ZetaSqlCalciteTranslationUtils.toZetaSqlType(field.getType()));
}
}
/** For table path like a.b.c we assume c is the table and a.b are the nested catalogs/schemas. */
private static SimpleCatalog createNestedCatalogs(SimpleCatalog catalog, List<String> tablePath) {
SimpleCatalog currentCatalog = catalog;
for (int i = 0; i < tablePath.size() - 1; i++) {
String nextCatalogName = tablePath.get(i);
Optional<SimpleCatalog> existing = tryGetExisting(currentCatalog, nextCatalogName);
currentCatalog =
existing.isPresent() ? existing.get() : addNewCatalog(currentCatalog, nextCatalogName);
}
return currentCatalog;
}
private static Optional<SimpleCatalog> tryGetExisting(
SimpleCatalog currentCatalog, String nextCatalogName) {
return currentCatalog.getCatalogList().stream()
.filter(c -> nextCatalogName.equals(c.getFullName()))
.findFirst();
}
private static SimpleCatalog addNewCatalog(SimpleCatalog currentCatalog, String nextCatalogName) {
SimpleCatalog nextCatalog = new SimpleCatalog(nextCatalogName);
currentCatalog.addSimpleCatalog(nextCatalog);
return nextCatalog;
}
private static String getJarPath(ResolvedNodes.ResolvedCreateFunctionStmt createFunctionStmt) {
String jarPath = getOptionStringValue(createFunctionStmt, "path");
if (jarPath.isEmpty()) {
throw new IllegalArgumentException(
String.format(
"No jar was provided to define function %s. Add 'OPTIONS (path=<jar location>)' to the CREATE FUNCTION statement.",
String.join(".", createFunctionStmt.getNamePath())));
}
return jarPath;
}
private static String getOptionStringValue(
ResolvedNodes.ResolvedCreateFunctionStmt createFunctionStmt, String optionName) {
for (ResolvedNodes.ResolvedOption option : createFunctionStmt.getOptionList()) {
if (optionName.equals(option.getName())) {
if (option.getValue() == null) {
throw new IllegalArgumentException(
String.format(
"Option '%s' has null value (expected %s).",
optionName, ZetaSQLType.TypeKind.TYPE_STRING));
}
if (option.getValue().getType().getKind() != ZetaSQLType.TypeKind.TYPE_STRING) {
throw new IllegalArgumentException(
String.format(
"Option '%s' has type %s (expected %s).",
optionName,
option.getValue().getType().getKind(),
ZetaSQLType.TypeKind.TYPE_STRING));
}
return ((ResolvedNodes.ResolvedLiteral) option.getValue()).getValue().getStringValue();
}
}
return "";
}
} | class BeamZetaSqlCatalog {
public static final String PRE_DEFINED_WINDOW_FUNCTIONS = "pre_defined_window_functions";
public static final String USER_DEFINED_SQL_FUNCTIONS = "user_defined_functions";
public static final String USER_DEFINED_JAVA_SCALAR_FUNCTIONS =
"user_defined_java_scalar_functions";
/**
* Same as {@link Function}.ZETASQL_FUNCTION_GROUP_NAME. Identifies built-in ZetaSQL functions.
*/
public static final String ZETASQL_FUNCTION_GROUP_NAME = "ZetaSQL";
private static final ImmutableList<String> PRE_DEFINED_WINDOW_FUNCTION_DECLARATIONS =
ImmutableList.of(
"CREATE FUNCTION TUMBLE(ts TIMESTAMP, window_size STRING) AS (1);",
"CREATE FUNCTION TUMBLE_START(window_size STRING) RETURNS TIMESTAMP AS (null);",
"CREATE FUNCTION TUMBLE_END(window_size STRING) RETURNS TIMESTAMP AS (null);",
"CREATE FUNCTION HOP(ts TIMESTAMP, emit_frequency STRING, window_size STRING) AS (1);",
"CREATE FUNCTION HOP_START(emit_frequency STRING, window_size STRING) "
+ "RETURNS TIMESTAMP AS (null);",
"CREATE FUNCTION HOP_END(emit_frequency STRING, window_size STRING) "
+ "RETURNS TIMESTAMP AS (null);",
"CREATE FUNCTION SESSION(ts TIMESTAMP, session_gap STRING) AS (1);",
"CREATE FUNCTION SESSION_START(session_gap STRING) RETURNS TIMESTAMP AS (null);",
"CREATE FUNCTION SESSION_END(session_gap STRING) RETURNS TIMESTAMP AS (null);");
/** The top-level Calcite schema, which may contain sub-schemas. */
private final SchemaPlus calciteSchema;
/**
* The top-level ZetaSQL catalog, which may contain nested catalogs for qualified table and
* function references.
*/
private final SimpleCatalog zetaSqlCatalog;
private final JavaTypeFactory typeFactory;
private final JavaUdfLoader javaUdfLoader = new JavaUdfLoader();
private final Map<List<String>, ResolvedNodes.ResolvedCreateFunctionStmt> sqlScalarUdfs =
new HashMap<>();
/** User-defined table valued functions. */
private final Map<List<String>, ResolvedNode> sqlUdtvfs = new HashMap<>();
private final Map<List<String>, UserFunctionDefinitions.JavaScalarFunction> javaScalarUdfs =
new HashMap<>();
private BeamZetaSqlCatalog(
SchemaPlus calciteSchema, SimpleCatalog zetaSqlCatalog, JavaTypeFactory typeFactory) {
this.calciteSchema = calciteSchema;
this.zetaSqlCatalog = zetaSqlCatalog;
this.typeFactory = typeFactory;
}
/** Return catalog pre-populated with builtin functions. */
static BeamZetaSqlCatalog create(
SchemaPlus calciteSchema, JavaTypeFactory typeFactory, AnalyzerOptions options) {
BeamZetaSqlCatalog catalog =
new BeamZetaSqlCatalog(
calciteSchema, new SimpleCatalog(calciteSchema.getName()), typeFactory);
catalog.addFunctionsToCatalog(options);
return catalog;
}
SimpleCatalog getZetaSqlCatalog() {
return zetaSqlCatalog;
}
void addTables(List<List<String>> tables, QueryTrait queryTrait) {
tables.forEach(table -> addTableToLeafCatalog(table, queryTrait));
}
void addFunction(ResolvedNodes.ResolvedCreateFunctionStmt createFunctionStmt) {
String functionGroup = getFunctionGroup(createFunctionStmt);
switch (functionGroup) {
case USER_DEFINED_SQL_FUNCTIONS:
sqlScalarUdfs.put(createFunctionStmt.getNamePath(), createFunctionStmt);
break;
case USER_DEFINED_JAVA_SCALAR_FUNCTIONS:
validateJavaUdf(createFunctionStmt);
String jarPath = getJarPath(createFunctionStmt);
ScalarFn scalarFn =
javaUdfLoader.loadScalarFunction(createFunctionStmt.getNamePath(), jarPath);
Method method = ScalarFnReflector.getApplyMethod(scalarFn);
javaScalarUdfs.put(
createFunctionStmt.getNamePath(),
UserFunctionDefinitions.JavaScalarFunction.create(method, jarPath));
break;
default:
throw new IllegalArgumentException(
String.format("Encountered unrecognized function group %s.", functionGroup));
}
zetaSqlCatalog.addFunction(
new Function(
createFunctionStmt.getNamePath(),
functionGroup,
createFunctionStmt.getIsAggregate()
? ZetaSQLFunctions.FunctionEnums.Mode.AGGREGATE
: ZetaSQLFunctions.FunctionEnums.Mode.SCALAR,
ImmutableList.of(createFunctionStmt.getSignature())));
}
/**
* Throws {@link UnsupportedOperationException} if ZetaSQL type is not supported in Java UDF.
* Supported types are a subset of the types supported by {@link BeamJavaUdfCalcRule}.
*/
void validateJavaUdfZetaSqlType(Type type) {
switch (type.getKind()) {
case TYPE_INT64:
case TYPE_DOUBLE:
case TYPE_BOOL:
case TYPE_STRING:
case TYPE_BYTES:
break;
case TYPE_NUMERIC:
case TYPE_DATE:
case TYPE_TIME:
case TYPE_DATETIME:
case TYPE_TIMESTAMP:
case TYPE_ARRAY:
case TYPE_STRUCT:
default:
throw new UnsupportedOperationException(
"ZetaSQL type not allowed in Java UDF: " + type.getKind().name());
}
}
void addTableValuedFunction(
ResolvedNodes.ResolvedCreateTableFunctionStmt createTableFunctionStmt) {
zetaSqlCatalog.addTableValuedFunction(
new TableValuedFunction.FixedOutputSchemaTVF(
createTableFunctionStmt.getNamePath(),
createTableFunctionStmt.getSignature(),
TVFRelation.createColumnBased(
createTableFunctionStmt.getQuery().getColumnList().stream()
.map(c -> TVFRelation.Column.create(c.getName(), c.getType()))
.collect(Collectors.toList()))));
sqlUdtvfs.put(createTableFunctionStmt.getNamePath(), createTableFunctionStmt.getQuery());
}
UserFunctionDefinitions getUserFunctionDefinitions() {
return UserFunctionDefinitions.newBuilder()
.setSqlScalarFunctions(ImmutableMap.copyOf(sqlScalarUdfs))
.setSqlTableValuedFunctions(ImmutableMap.copyOf(sqlUdtvfs))
.setJavaScalarFunctions(ImmutableMap.copyOf(javaScalarUdfs))
.build();
}
private void addFunctionsToCatalog(AnalyzerOptions options) {
ZetaSQLBuiltinFunctionOptions zetasqlBuiltinFunctionOptions =
new ZetaSQLBuiltinFunctionOptions(options.getLanguageOptions());
SupportedZetaSqlBuiltinFunctions.ALLOWLIST.forEach(
zetasqlBuiltinFunctionOptions::includeFunctionSignatureId);
zetaSqlCatalog.addZetaSQLFunctions(zetasqlBuiltinFunctionOptions);
addWindowScalarFunctions(options);
addWindowTvfs();
addUdfsFromSchema();
}
private void addWindowScalarFunctions(AnalyzerOptions options) {
PRE_DEFINED_WINDOW_FUNCTION_DECLARATIONS.stream()
.map(
func ->
(ResolvedNodes.ResolvedCreateFunctionStmt)
Analyzer.analyzeStatement(func, options, zetaSqlCatalog))
.map(
resolvedFunc ->
new Function(
String.join(".", resolvedFunc.getNamePath()),
PRE_DEFINED_WINDOW_FUNCTIONS,
ZetaSQLFunctions.FunctionEnums.Mode.SCALAR,
ImmutableList.of(resolvedFunc.getSignature())))
.forEach(zetaSqlCatalog::addFunction);
}
@SuppressWarnings({
"nullness"
})
private void addWindowTvfs() {
FunctionArgumentType retType =
new FunctionArgumentType(ZetaSQLFunctions.SignatureArgumentKind.ARG_TYPE_RELATION);
FunctionArgumentType inputTableType =
new FunctionArgumentType(ZetaSQLFunctions.SignatureArgumentKind.ARG_TYPE_RELATION);
FunctionArgumentType descriptorType =
new FunctionArgumentType(
ZetaSQLFunctions.SignatureArgumentKind.ARG_TYPE_DESCRIPTOR,
FunctionArgumentType.FunctionArgumentTypeOptions.builder()
.setDescriptorResolutionTableOffset(0)
.build(),
1);
FunctionArgumentType stringType =
new FunctionArgumentType(TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_STRING));
zetaSqlCatalog.addTableValuedFunction(
new TableValuedFunction.ForwardInputSchemaToOutputSchemaWithAppendedColumnTVF(
ImmutableList.of(TVFStreamingUtils.FIXED_WINDOW_TVF),
new FunctionSignature(
retType, ImmutableList.of(inputTableType, descriptorType, stringType), -1),
ImmutableList.of(
TVFRelation.Column.create(
TVFStreamingUtils.WINDOW_START,
TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_TIMESTAMP)),
TVFRelation.Column.create(
TVFStreamingUtils.WINDOW_END,
TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_TIMESTAMP))),
null,
null));
zetaSqlCatalog.addTableValuedFunction(
new TableValuedFunction.ForwardInputSchemaToOutputSchemaWithAppendedColumnTVF(
ImmutableList.of(TVFStreamingUtils.SLIDING_WINDOW_TVF),
new FunctionSignature(
retType,
ImmutableList.of(inputTableType, descriptorType, stringType, stringType),
-1),
ImmutableList.of(
TVFRelation.Column.create(
TVFStreamingUtils.WINDOW_START,
TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_TIMESTAMP)),
TVFRelation.Column.create(
TVFStreamingUtils.WINDOW_END,
TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_TIMESTAMP))),
null,
null));
zetaSqlCatalog.addTableValuedFunction(
new TableValuedFunction.ForwardInputSchemaToOutputSchemaWithAppendedColumnTVF(
ImmutableList.of(TVFStreamingUtils.SESSION_WINDOW_TVF),
new FunctionSignature(
retType,
ImmutableList.of(inputTableType, descriptorType, descriptorType, stringType),
-1),
ImmutableList.of(
TVFRelation.Column.create(
TVFStreamingUtils.WINDOW_START,
TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_TIMESTAMP)),
TVFRelation.Column.create(
TVFStreamingUtils.WINDOW_END,
TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_TIMESTAMP))),
null,
null));
}
private void addUdfsFromSchema() {
for (String functionName : calciteSchema.getFunctionNames()) {
Collection<org.apache.beam.vendor.calcite.v1_20_0.org.apache.calcite.schema.Function>
functions = calciteSchema.getFunctions(functionName);
if (functions.size() != 1) {
throw new IllegalArgumentException(
String.format(
"Expected exactly 1 definition for function '%s', but found %d."
+ " Beam ZetaSQL supports only a single function definition per function name (BEAM-12073).",
functionName, functions.size()));
}
for (org.apache.beam.vendor.calcite.v1_20_0.org.apache.calcite.schema.Function function :
functions) {
if (function instanceof ScalarFunctionImpl) {
ScalarFunctionImpl scalarFunction = (ScalarFunctionImpl) function;
validateScalarFunctionImpl(scalarFunction);
List<String> path = Arrays.asList(functionName.split("\\."));
Method method = scalarFunction.method;
javaScalarUdfs.put(path, UserFunctionDefinitions.JavaScalarFunction.create(method, ""));
FunctionArgumentType resultType =
new FunctionArgumentType(
ZetaSqlCalciteTranslationUtils.toZetaSqlType(
scalarFunction.getReturnType(typeFactory)));
List<FunctionArgumentType> argumentTypes =
scalarFunction.getParameters().stream()
.map(
(arg) ->
new FunctionArgumentType(
ZetaSqlCalciteTranslationUtils.toZetaSqlType(
arg.getType(typeFactory))))
.collect(Collectors.toList());
FunctionSignature functionSignature =
new FunctionSignature(resultType, argumentTypes, 0L);
zetaSqlCatalog.addFunction(
new Function(
path,
USER_DEFINED_JAVA_SCALAR_FUNCTIONS,
ZetaSQLFunctions.FunctionEnums.Mode.SCALAR,
ImmutableList.of(functionSignature)));
} else {
throw new IllegalArgumentException(
String.format(
"Function %s has unrecognized implementation type %s.",
functionName, function.getClass().getName()));
}
}
}
}
private void validateScalarFunctionImpl(ScalarFunctionImpl scalarFunction) {
for (FunctionParameter parameter : scalarFunction.getParameters()) {
validateJavaUdfCalciteType(parameter.getType(typeFactory));
}
validateJavaUdfCalciteType(scalarFunction.getReturnType(typeFactory));
}
/**
* Throws {@link UnsupportedOperationException} if Calcite type is not supported in Java UDF.
* Supported types are a subset of the corresponding Calcite types supported by {@link
* BeamJavaUdfCalcRule}.
*/
private void validateJavaUdfCalciteType(RelDataType type) {
switch (type.getSqlTypeName()) {
case BIGINT:
case DOUBLE:
case BOOLEAN:
case VARCHAR:
case VARBINARY:
break;
case DECIMAL:
case DATE:
case TIME:
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
case TIMESTAMP:
case ARRAY:
case ROW:
default:
throw new UnsupportedOperationException(
"Calcite type not allowed in ZetaSQL Java UDF: " + type.getSqlTypeName().getName());
}
}
private String getFunctionGroup(ResolvedNodes.ResolvedCreateFunctionStmt createFunctionStmt) {
switch (createFunctionStmt.getLanguage().toUpperCase()) {
case "JAVA":
if (createFunctionStmt.getIsAggregate()) {
throw new UnsupportedOperationException(
"Java SQL aggregate functions are not supported (BEAM-10925).");
}
return USER_DEFINED_JAVA_SCALAR_FUNCTIONS;
case "SQL":
if (createFunctionStmt.getIsAggregate()) {
throw new UnsupportedOperationException(
"Native SQL aggregate functions are not supported (BEAM-9954).");
}
return USER_DEFINED_SQL_FUNCTIONS;
case "PY":
case "PYTHON":
case "JS":
case "JAVASCRIPT":
throw new UnsupportedOperationException(
String.format(
"Function %s uses unsupported language %s.",
String.join(".", createFunctionStmt.getNamePath()),
createFunctionStmt.getLanguage()));
default:
throw new IllegalArgumentException(
String.format(
"Function %s uses unrecognized language %s.",
String.join(".", createFunctionStmt.getNamePath()),
createFunctionStmt.getLanguage()));
}
}
/**
* Assume last element in tablePath is a table name, and everything before is catalogs. So the
* logic is to create nested catalogs until the last level, then add a table at the last level.
*
* <p>Table schema is extracted from Calcite schema based on the table name resolution strategy,
* e.g. either by drilling down the schema.getSubschema() path or joining the table name with dots
* to construct a single compound identifier (e.g. Data Catalog use case).
*/
private void addTableToLeafCatalog(List<String> tablePath, QueryTrait queryTrait) {
SimpleCatalog leafCatalog = createNestedCatalogs(zetaSqlCatalog, tablePath);
org.apache.beam.vendor.calcite.v1_20_0.org.apache.calcite.schema.Table calciteTable =
TableResolution.resolveCalciteTable(calciteSchema, tablePath);
if (calciteTable == null) {
throw new ZetaSqlException(
"Wasn't able to resolve the path "
+ tablePath
+ " in schema: "
+ calciteSchema.getName());
}
RelDataType rowType = calciteTable.getRowType(typeFactory);
TableResolution.SimpleTableWithPath tableWithPath =
TableResolution.SimpleTableWithPath.of(tablePath);
queryTrait.addResolvedTable(tableWithPath);
addFieldsToTable(tableWithPath, rowType);
leafCatalog.addSimpleTable(tableWithPath.getTable());
}
private static void addFieldsToTable(
TableResolution.SimpleTableWithPath tableWithPath, RelDataType rowType) {
for (RelDataTypeField field : rowType.getFieldList()) {
tableWithPath
.getTable()
.addSimpleColumn(
field.getName(), ZetaSqlCalciteTranslationUtils.toZetaSqlType(field.getType()));
}
}
/** For table path like a.b.c we assume c is the table and a.b are the nested catalogs/schemas. */
private static SimpleCatalog createNestedCatalogs(SimpleCatalog catalog, List<String> tablePath) {
SimpleCatalog currentCatalog = catalog;
for (int i = 0; i < tablePath.size() - 1; i++) {
String nextCatalogName = tablePath.get(i);
Optional<SimpleCatalog> existing = tryGetExisting(currentCatalog, nextCatalogName);
currentCatalog =
existing.isPresent() ? existing.get() : addNewCatalog(currentCatalog, nextCatalogName);
}
return currentCatalog;
}
private static Optional<SimpleCatalog> tryGetExisting(
SimpleCatalog currentCatalog, String nextCatalogName) {
return currentCatalog.getCatalogList().stream()
.filter(c -> nextCatalogName.equals(c.getFullName()))
.findFirst();
}
private static SimpleCatalog addNewCatalog(SimpleCatalog currentCatalog, String nextCatalogName) {
SimpleCatalog nextCatalog = new SimpleCatalog(nextCatalogName);
currentCatalog.addSimpleCatalog(nextCatalog);
return nextCatalog;
}
private static String getJarPath(ResolvedNodes.ResolvedCreateFunctionStmt createFunctionStmt) {
String jarPath = getOptionStringValue(createFunctionStmt, "path");
if (jarPath.isEmpty()) {
throw new IllegalArgumentException(
String.format(
"No jar was provided to define function %s. Add 'OPTIONS (path=<jar location>)' to the CREATE FUNCTION statement.",
String.join(".", createFunctionStmt.getNamePath())));
}
return jarPath;
}
private static String getOptionStringValue(
ResolvedNodes.ResolvedCreateFunctionStmt createFunctionStmt, String optionName) {
for (ResolvedNodes.ResolvedOption option : createFunctionStmt.getOptionList()) {
if (optionName.equals(option.getName())) {
if (option.getValue() == null) {
throw new IllegalArgumentException(
String.format(
"Option '%s' has null value (expected %s).",
optionName, ZetaSQLType.TypeKind.TYPE_STRING));
}
if (option.getValue().getType().getKind() != ZetaSQLType.TypeKind.TYPE_STRING) {
throw new IllegalArgumentException(
String.format(
"Option '%s' has type %s (expected %s).",
optionName,
option.getValue().getType().getKind(),
ZetaSQLType.TypeKind.TYPE_STRING));
}
return ((ResolvedNodes.ResolvedLiteral) option.getValue()).getValue().getStringValue();
}
}
return "";
}
} |
db or full db only need one? | public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TableName tableName = (TableName) o;
return Objects.equals(catalog, tableName.catalog)
&& Objects.equals(tbl, tableName.tbl)
&& Objects.equals(db, tableName.db)
&& Objects.equals(fullDb, tableName.fullDb);
} | && Objects.equals(db, tableName.db) | public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TableName tableName = (TableName) o;
return Objects.equals(catalog, tableName.catalog)
&& Objects.equals(tbl, tableName.tbl)
&& Objects.equals(db, tableName.db);
} | class TableName implements Writable, GsonPreProcessable, GsonPostProcessable {
public static final String LAMBDA_FUNC_TABLE = "__LAMBDA_TABLE";
private String catalog;
@SerializedName(value = "tbl")
private String tbl;
private String db;
@SerializedName(value = "fullDb")
private String fullDb;
public TableName() {
}
public TableName(String db, String tbl) {
this(null, db, tbl);
}
public TableName(String catalog, String db, String tbl) {
this.catalog = catalog;
this.db = db;
this.tbl = tbl;
}
public void analyze(Analyzer analyzer) throws AnalysisException {
if (Strings.isNullOrEmpty(catalog)) {
catalog = analyzer.getDefaultCatalog();
}
if (Strings.isNullOrEmpty(db)) {
db = analyzer.getDefaultDb();
if (Strings.isNullOrEmpty(db)) {
ErrorReport.reportAnalysisException(ErrorCode.ERR_NO_DB_ERROR);
}
}
if (Strings.isNullOrEmpty(tbl)) {
throw new AnalysisException("Table name is null");
}
}
public void normalization(ConnectContext connectContext) {
try {
if (Strings.isNullOrEmpty(catalog)) {
if (Strings.isNullOrEmpty(connectContext.getCurrentCatalog())) {
ErrorReport.reportAnalysisException(ErrorCode.ERR_BAD_CATALOG_ERROR, catalog);
}
catalog = connectContext.getCurrentCatalog();
}
if (Strings.isNullOrEmpty(db)) {
db = connectContext.getDatabase();
if (Strings.isNullOrEmpty(db)) {
ErrorReport.reportAnalysisException(ErrorCode.ERR_NO_DB_ERROR);
}
}
if (Strings.isNullOrEmpty(tbl)) {
throw new SemanticException("Table name is null");
}
} catch (AnalysisException e) {
throw new SemanticException(e.getMessage());
}
}
public String getDb() {
return db;
}
public void setDb(String db) {
this.db = db;
}
public String getTbl() {
return tbl;
}
public String getCatalog() {
return catalog;
}
public void setCatalog(String catalog) {
this.catalog = catalog;
}
public void setTbl(String tbl) {
this.tbl = tbl;
}
public boolean isEmpty() {
return tbl.isEmpty();
}
public String getCatalogAndDb() {
return Joiner.on(".").skipNulls().join(catalog, db);
}
/**
* Returns true if this name has a non-empty database field and a non-empty
* table name.
*/
public boolean isFullyQualified() {
return db != null && !db.isEmpty() && !tbl.isEmpty();
}
public String getNoClusterString() {
if (db == null) {
return tbl;
} else {
return db + "." + tbl;
}
}
@Override
public String toString() {
if (db == null) {
return tbl;
} else {
return db + "." + tbl;
}
}
public String toSql() {
StringBuilder stringBuilder = new StringBuilder();
if (catalog != null && !CatalogMgr.isInternalCatalog(catalog)) {
stringBuilder.append("`").append(catalog).append("`.");
}
if (db != null) {
stringBuilder.append("`").append(db).append("`.");
}
stringBuilder.append("`").append(tbl).append("`");
return stringBuilder.toString();
}
@Override
public void write(DataOutput out) throws IOException {
Text.writeString(out, ClusterNamespace.getFullName(db));
Text.writeString(out, tbl);
}
public void readFields(DataInput in) throws IOException {
db = ClusterNamespace.getNameFromFullName(Text.readString(in));
tbl = Text.readString(in);
}
@Override
public void gsonPostProcess() throws IOException {
db = ClusterNamespace.getNameFromFullName(fullDb);
}
@Override
public void gsonPreProcess() throws IOException {
fullDb = ClusterNamespace.getFullName(db);
}
@Override
@Override
public int hashCode() {
return Objects.hash(catalog, tbl, db, fullDb);
}
} | class TableName implements Writable, GsonPreProcessable, GsonPostProcessable {
public static final String LAMBDA_FUNC_TABLE = "__LAMBDA_TABLE";
private String catalog;
@SerializedName(value = "tbl")
private String tbl;
private String db;
@SerializedName(value = "fullDb")
private String fullDb;
public TableName() {
}
public TableName(String db, String tbl) {
this(null, db, tbl);
}
public TableName(String catalog, String db, String tbl) {
this.catalog = catalog;
this.db = db;
this.tbl = tbl;
}
public void analyze(Analyzer analyzer) throws AnalysisException {
if (Strings.isNullOrEmpty(catalog)) {
catalog = analyzer.getDefaultCatalog();
}
if (Strings.isNullOrEmpty(db)) {
db = analyzer.getDefaultDb();
if (Strings.isNullOrEmpty(db)) {
ErrorReport.reportAnalysisException(ErrorCode.ERR_NO_DB_ERROR);
}
}
if (Strings.isNullOrEmpty(tbl)) {
throw new AnalysisException("Table name is null");
}
}
public void normalization(ConnectContext connectContext) {
try {
if (Strings.isNullOrEmpty(catalog)) {
if (Strings.isNullOrEmpty(connectContext.getCurrentCatalog())) {
ErrorReport.reportAnalysisException(ErrorCode.ERR_BAD_CATALOG_ERROR, catalog);
}
catalog = connectContext.getCurrentCatalog();
}
if (Strings.isNullOrEmpty(db)) {
db = connectContext.getDatabase();
if (Strings.isNullOrEmpty(db)) {
ErrorReport.reportAnalysisException(ErrorCode.ERR_NO_DB_ERROR);
}
}
if (Strings.isNullOrEmpty(tbl)) {
throw new SemanticException("Table name is null");
}
} catch (AnalysisException e) {
throw new SemanticException(e.getMessage());
}
}
public String getDb() {
return db;
}
public void setDb(String db) {
this.db = db;
}
public String getTbl() {
return tbl;
}
public String getCatalog() {
return catalog;
}
public void setCatalog(String catalog) {
this.catalog = catalog;
}
public void setTbl(String tbl) {
this.tbl = tbl;
}
public boolean isEmpty() {
return tbl.isEmpty();
}
public String getCatalogAndDb() {
return Joiner.on(".").skipNulls().join(catalog, db);
}
/**
* Returns true if this name has a non-empty database field and a non-empty
* table name.
*/
public boolean isFullyQualified() {
return db != null && !db.isEmpty() && !tbl.isEmpty();
}
public String getNoClusterString() {
if (db == null) {
return tbl;
} else {
return db + "." + tbl;
}
}
@Override
public String toString() {
if (db == null) {
return tbl;
} else {
return db + "." + tbl;
}
}
public String toSql() {
StringBuilder stringBuilder = new StringBuilder();
if (catalog != null && !CatalogMgr.isInternalCatalog(catalog)) {
stringBuilder.append("`").append(catalog).append("`.");
}
if (db != null) {
stringBuilder.append("`").append(db).append("`.");
}
stringBuilder.append("`").append(tbl).append("`");
return stringBuilder.toString();
}
@Override
public void write(DataOutput out) throws IOException {
Text.writeString(out, ClusterNamespace.getFullName(db));
Text.writeString(out, tbl);
}
public void readFields(DataInput in) throws IOException {
db = ClusterNamespace.getNameFromFullName(Text.readString(in));
tbl = Text.readString(in);
}
@Override
public void gsonPostProcess() throws IOException {
db = ClusterNamespace.getNameFromFullName(fullDb);
}
@Override
public void gsonPreProcess() throws IOException {
fullDb = ClusterNamespace.getFullName(db);
}
@Override
@Override
public int hashCode() {
return Objects.hash(catalog, tbl, db);
}
} |
`getCheckedRecordsCount` might be greater than `getRecordsCount`, the percentage could not greater than 100 | public ConsistencyCheckJobProgressInfo getJobProgressInfo(final String parentJobId) {
Optional<String> checkLatestJobId = PipelineAPIFactory.getGovernanceRepositoryAPI().getCheckLatestJobId(parentJobId);
ShardingSpherePreconditions.checkState(checkLatestJobId.isPresent(), () -> new PipelineJobNotFoundException(parentJobId));
String checkJobId = checkLatestJobId.get();
ConsistencyCheckJobProgress jobItemProgress = getJobItemProgress(checkJobId, 0);
ConsistencyCheckJobProgressInfo result = new ConsistencyCheckJobProgressInfo();
if (null == jobItemProgress) {
return result;
}
int inventoryFinishedPercentage;
LocalDateTime checkBeginTime = new Timestamp(jobItemProgress.getCheckBeginTimeMillis()).toLocalDateTime();
if (null != jobItemProgress.getRecordsCount() && Objects.equals(jobItemProgress.getCheckedRecordsCount(), jobItemProgress.getRecordsCount())) {
inventoryFinishedPercentage = 100;
LocalDateTime checkEndTime = new Timestamp(jobItemProgress.getCheckEndTimeMillis()).toLocalDateTime();
Duration duration = Duration.between(checkBeginTime, checkEndTime);
result.setCheckDuration(duration.toMillis() / 1000);
result.setCheckEndTime(DATE_TIME_FORMATTER.format(checkEndTime));
result.setRemainingTime(0L);
} else {
if (null == jobItemProgress.getRecordsCount()) {
inventoryFinishedPercentage = 0;
} else {
inventoryFinishedPercentage = BigDecimal.valueOf(Math.floorDiv(jobItemProgress.getCheckedRecordsCount() * 100, jobItemProgress.getRecordsCount())).intValue();
Duration duration = Duration.between(checkBeginTime, LocalDateTime.now());
long remainMills = jobItemProgress.getRecordsCount() * 100 / jobItemProgress.getCheckedRecordsCount() * duration.toMillis();
result.setRemainingTime(remainMills / 1000);
}
}
result.setInventoryFinishedPercentage(inventoryFinishedPercentage);
result.setTableName(Optional.ofNullable(jobItemProgress.getTableName()).orElse(""));
result.setCheckBeginTime(DATE_TIME_FORMATTER.format(checkBeginTime));
result.setErrorMessage(getJobItemErrorMessage(checkJobId, 0));
Map<String, DataConsistencyCheckResult> checkJobResult = PipelineAPIFactory.getGovernanceRepositoryAPI().getCheckJobResult(parentJobId, checkJobId);
Optional<DataConsistencyCheckResult> dataConsistencyCheckResult = Optional.ofNullable(checkJobResult.get(jobItemProgress.getTableName()));
dataConsistencyCheckResult.ifPresent(optional -> result.setCheckResult(optional.getContentCheckResult().isMatched()));
return result;
} | inventoryFinishedPercentage = BigDecimal.valueOf(Math.floorDiv(jobItemProgress.getCheckedRecordsCount() * 100, jobItemProgress.getRecordsCount())).intValue(); | public ConsistencyCheckJobProgressInfo getJobProgressInfo(final String parentJobId) {
Optional<String> checkLatestJobId = PipelineAPIFactory.getGovernanceRepositoryAPI().getCheckLatestJobId(parentJobId);
ShardingSpherePreconditions.checkState(checkLatestJobId.isPresent(), () -> new PipelineJobNotFoundException(parentJobId));
String checkJobId = checkLatestJobId.get();
ConsistencyCheckJobProgress jobItemProgress = getJobItemProgress(checkJobId, 0);
ConsistencyCheckJobProgressInfo result = new ConsistencyCheckJobProgressInfo();
if (null == jobItemProgress) {
return result;
}
int finishedPercentage;
LocalDateTime checkBeginTime = new Timestamp(jobItemProgress.getCheckBeginTimeMillis()).toLocalDateTime();
if (null != jobItemProgress.getRecordsCount() && Objects.equals(jobItemProgress.getCheckedRecordsCount(), jobItemProgress.getRecordsCount())) {
finishedPercentage = 100;
LocalDateTime checkEndTime = new Timestamp(jobItemProgress.getCheckEndTimeMillis()).toLocalDateTime();
Duration duration = Duration.between(checkBeginTime, checkEndTime);
result.setDurationSeconds(duration.toMillis() / 1000);
result.setCheckEndTime(DATE_TIME_FORMATTER.format(checkEndTime));
result.setRemainingSeconds(0L);
} else {
if (null == jobItemProgress.getRecordsCount()) {
finishedPercentage = 0;
} else {
finishedPercentage = Math.min(100, BigDecimal.valueOf(Math.floorDiv(jobItemProgress.getCheckedRecordsCount() * 100, jobItemProgress.getRecordsCount())).intValue());
Duration duration = Duration.between(checkBeginTime, LocalDateTime.now());
long remainMills = jobItemProgress.getRecordsCount() * 100 / jobItemProgress.getCheckedRecordsCount() * duration.toMillis();
result.setRemainingSeconds(remainMills / 1000);
}
}
result.setFinishedPercentage(finishedPercentage);
String tableName = null == jobItemProgress.getTableNames() ? null : jobItemProgress.getTableNames().split(",")[0];
result.setTableName(Optional.ofNullable(tableName).orElse(""));
result.setCheckBeginTime(DATE_TIME_FORMATTER.format(checkBeginTime));
result.setErrorMessage(getJobItemErrorMessage(checkJobId, 0));
Map<String, DataConsistencyCheckResult> checkJobResult = PipelineAPIFactory.getGovernanceRepositoryAPI().getCheckJobResult(parentJobId, checkJobId);
Optional<DataConsistencyCheckResult> dataConsistencyCheckResult = Optional.ofNullable(checkJobResult.get(tableName));
dataConsistencyCheckResult.ifPresent(optional -> result.setResult(optional.getContentCheckResult().isMatched()));
return result;
} | class ConsistencyCheckJobAPIImpl extends AbstractPipelineJobAPIImpl implements ConsistencyCheckJobAPI {
private final YamlConsistencyCheckJobProgressSwapper swapper = new YamlConsistencyCheckJobProgressSwapper();
@Override
protected String marshalJobIdLeftPart(final PipelineJobId pipelineJobId) {
ConsistencyCheckJobId jobId = (ConsistencyCheckJobId) pipelineJobId;
return jobId.getParentJobId() + jobId.getSequence();
}
@Override
public String createJobAndStart(final CreateConsistencyCheckJobParameter parameter) {
GovernanceRepositoryAPI repositoryAPI = PipelineAPIFactory.getGovernanceRepositoryAPI();
String parentJobId = parameter.getJobId();
Optional<String> checkLatestJobId = repositoryAPI.getCheckLatestJobId(parentJobId);
if (checkLatestJobId.isPresent()) {
PipelineJobItemProgress progress = getJobItemProgress(checkLatestJobId.get(), 0);
if (null == progress || JobStatus.FINISHED != progress.getStatus()) {
log.info("check job already exists and status is not FINISHED, progress={}", progress);
throw new UncompletedConsistencyCheckJobExistsException(checkLatestJobId.get());
}
}
int sequence = checkLatestJobId.map(optional -> ConsistencyCheckJobId.parseSequence(optional) + 1).orElse(ConsistencyCheckJobId.MIN_SEQUENCE);
String result = marshalJobId(new ConsistencyCheckJobId(parentJobId, sequence));
repositoryAPI.persistCheckLatestJobId(parentJobId, result);
repositoryAPI.deleteCheckJobResult(parentJobId, result);
dropJob(result);
YamlConsistencyCheckJobConfiguration yamlConfig = new YamlConsistencyCheckJobConfiguration();
yamlConfig.setJobId(result);
yamlConfig.setParentJobId(parentJobId);
yamlConfig.setAlgorithmTypeName(parameter.getAlgorithmTypeName());
yamlConfig.setAlgorithmProps(parameter.getAlgorithmProps());
start(new YamlConsistencyCheckJobConfigurationSwapper().swapToObject(yamlConfig));
return result;
}
@Override
public Map<String, DataConsistencyCheckResult> getLatestDataConsistencyCheckResult(final String jobId) {
Optional<String> checkLatestJobId = PipelineAPIFactory.getGovernanceRepositoryAPI().getCheckLatestJobId(jobId);
if (!checkLatestJobId.isPresent()) {
return Collections.emptyMap();
}
return PipelineAPIFactory.getGovernanceRepositoryAPI().getCheckJobResult(jobId, checkLatestJobId.get());
}
@Override
public void persistJobItemProgress(final PipelineJobItemContext jobItemContext) {
ConsistencyCheckJobItemContext checkJobItemContext = (ConsistencyCheckJobItemContext) jobItemContext;
ConsistencyCheckJobProgress jobProgress = new ConsistencyCheckJobProgress();
jobProgress.setStatus(jobItemContext.getStatus());
jobProgress.setCheckedRecordsCount(checkJobItemContext.getCheckedRecordsCount().get());
jobProgress.setRecordsCount(checkJobItemContext.getRecordsCount());
jobProgress.setCheckBeginTimeMillis(checkJobItemContext.getCheckBeginTimeMillis());
jobProgress.setCheckEndTimeMillis(checkJobItemContext.getCheckEndTimeMillis());
jobProgress.setTableName(checkJobItemContext.getTableName());
YamlConsistencyCheckJobProgress yamlJobProgress = swapper.swapToYamlConfiguration(jobProgress);
PipelineAPIFactory.getGovernanceRepositoryAPI().persistJobItemProgress(jobItemContext.getJobId(), jobItemContext.getShardingItem(), YamlEngine.marshal(yamlJobProgress));
}
@Override
public ConsistencyCheckJobProgress getJobItemProgress(final String jobId, final int shardingItem) {
String progress = PipelineAPIFactory.getGovernanceRepositoryAPI().getJobItemProgress(jobId, shardingItem);
if (StringUtils.isBlank(progress)) {
return null;
}
return swapper.swapToObject(YamlEngine.unmarshal(progress, YamlConsistencyCheckJobProgress.class, true));
}
@Override
public void updateJobItemStatus(final String jobId, final int shardingItem, final JobStatus status) {
ConsistencyCheckJobProgress jobProgress = getJobItemProgress(jobId, shardingItem);
if (null == jobProgress) {
log.warn("updateJobItemStatus, jobProgress is null, jobId={}, shardingItem={}", jobId, shardingItem);
return;
}
jobProgress.setStatus(status);
PipelineAPIFactory.getGovernanceRepositoryAPI().persistJobItemProgress(jobId, shardingItem, YamlEngine.marshal(swapper.swapToYamlConfiguration(jobProgress)));
}
@Override
public void startDisabledJob(final String jobId) {
log.info("Start disable check job {}", jobId);
PipelineJobItemProgress jobProgress = getJobItemProgress(jobId, 0);
ShardingSpherePreconditions.checkState(null == jobProgress || JobStatus.FINISHED != jobProgress.getStatus(), () -> new PipelineJobHasAlreadyFinishedException(jobId));
super.startDisabledJob(jobId);
}
@Override
public void startByParentJobId(final String parentJobId) {
log.info("Start check job by parent job id: {}", parentJobId);
Optional<String> checkLatestJobId = PipelineAPIFactory.getGovernanceRepositoryAPI().getCheckLatestJobId(parentJobId);
ShardingSpherePreconditions.checkState(checkLatestJobId.isPresent(), () -> new PipelineJobNotFoundException(parentJobId));
startDisabledJob(checkLatestJobId.get());
}
@Override
public void stopByParentJobId(final String parentJobId) {
log.info("Stop check job by parent job id: {}", parentJobId);
Optional<String> checkLatestJobId = PipelineAPIFactory.getGovernanceRepositoryAPI().getCheckLatestJobId(parentJobId);
ShardingSpherePreconditions.checkState(checkLatestJobId.isPresent(), () -> new PipelineJobNotFoundException(parentJobId));
stop(checkLatestJobId.get());
}
@Override
@Override
public ConsistencyCheckJobConfiguration getJobConfiguration(final String jobId) {
return getJobConfiguration(getElasticJobConfigPOJO(jobId));
}
@Override
protected ConsistencyCheckJobConfiguration getJobConfiguration(final JobConfigurationPOJO jobConfigPOJO) {
return new YamlConsistencyCheckJobConfigurationSwapper().swapToObject(jobConfigPOJO.getJobParameter());
}
@Override
protected YamlPipelineJobConfiguration swapToYamlJobConfiguration(final PipelineJobConfiguration jobConfig) {
return new YamlConsistencyCheckJobConfigurationSwapper().swapToYamlConfiguration((ConsistencyCheckJobConfiguration) jobConfig);
}
@Override
public void extendYamlJobConfiguration(final YamlPipelineJobConfiguration yamlJobConfig) {
throw new UnsupportedOperationException();
}
@Override
public PipelineTaskConfiguration buildTaskConfiguration(final PipelineJobConfiguration pipelineJobConfig, final int jobShardingItem, final PipelineProcessConfiguration pipelineProcessConfig) {
throw new UnsupportedOperationException();
}
@Override
public PipelineProcessContext buildPipelineProcessContext(final PipelineJobConfiguration pipelineJobConfig) {
throw new UnsupportedOperationException();
}
@Override
protected PipelineJobInfo getJobInfo(final String jobId) {
throw new UnsupportedOperationException();
}
@Override
protected String getJobClassName() {
return ConsistencyCheckJob.class.getName();
}
@Override
public JobType getJobType() {
return JobType.CONSISTENCY_CHECK;
}
} | class ConsistencyCheckJobAPIImpl extends AbstractPipelineJobAPIImpl implements ConsistencyCheckJobAPI {
private final YamlConsistencyCheckJobProgressSwapper swapper = new YamlConsistencyCheckJobProgressSwapper();
@Override
protected String marshalJobIdLeftPart(final PipelineJobId pipelineJobId) {
ConsistencyCheckJobId jobId = (ConsistencyCheckJobId) pipelineJobId;
return jobId.getParentJobId() + jobId.getSequence();
}
@Override
public String createJobAndStart(final CreateConsistencyCheckJobParameter parameter) {
GovernanceRepositoryAPI repositoryAPI = PipelineAPIFactory.getGovernanceRepositoryAPI();
String parentJobId = parameter.getJobId();
Optional<String> checkLatestJobId = repositoryAPI.getCheckLatestJobId(parentJobId);
if (checkLatestJobId.isPresent()) {
PipelineJobItemProgress progress = getJobItemProgress(checkLatestJobId.get(), 0);
if (null == progress || JobStatus.FINISHED != progress.getStatus()) {
log.info("check job already exists and status is not FINISHED, progress={}", progress);
throw new UncompletedConsistencyCheckJobExistsException(checkLatestJobId.get());
}
}
int sequence = checkLatestJobId.map(optional -> ConsistencyCheckJobId.parseSequence(optional) + 1).orElse(ConsistencyCheckJobId.MIN_SEQUENCE);
String result = marshalJobId(new ConsistencyCheckJobId(parentJobId, sequence));
repositoryAPI.persistCheckLatestJobId(parentJobId, result);
repositoryAPI.deleteCheckJobResult(parentJobId, result);
dropJob(result);
YamlConsistencyCheckJobConfiguration yamlConfig = new YamlConsistencyCheckJobConfiguration();
yamlConfig.setJobId(result);
yamlConfig.setParentJobId(parentJobId);
yamlConfig.setAlgorithmTypeName(parameter.getAlgorithmTypeName());
yamlConfig.setAlgorithmProps(parameter.getAlgorithmProps());
start(new YamlConsistencyCheckJobConfigurationSwapper().swapToObject(yamlConfig));
return result;
}
@Override
public Map<String, DataConsistencyCheckResult> getLatestDataConsistencyCheckResult(final String jobId) {
Optional<String> checkLatestJobId = PipelineAPIFactory.getGovernanceRepositoryAPI().getCheckLatestJobId(jobId);
if (!checkLatestJobId.isPresent()) {
return Collections.emptyMap();
}
return PipelineAPIFactory.getGovernanceRepositoryAPI().getCheckJobResult(jobId, checkLatestJobId.get());
}
@Override
public void persistJobItemProgress(final PipelineJobItemContext jobItemContext) {
ConsistencyCheckJobItemContext checkJobItemContext = (ConsistencyCheckJobItemContext) jobItemContext;
ConsistencyCheckJobProgress jobProgress = new ConsistencyCheckJobProgress();
jobProgress.setStatus(jobItemContext.getStatus());
jobProgress.setCheckedRecordsCount(checkJobItemContext.getCheckedRecordsCount().get());
jobProgress.setRecordsCount(checkJobItemContext.getRecordsCount());
jobProgress.setCheckBeginTimeMillis(checkJobItemContext.getCheckBeginTimeMillis());
jobProgress.setCheckEndTimeMillis(checkJobItemContext.getCheckEndTimeMillis());
jobProgress.setTableNames(null == checkJobItemContext.getTableNames() ? null : String.join(",", checkJobItemContext.getTableNames()));
YamlConsistencyCheckJobProgress yamlJobProgress = swapper.swapToYamlConfiguration(jobProgress);
PipelineAPIFactory.getGovernanceRepositoryAPI().persistJobItemProgress(jobItemContext.getJobId(), jobItemContext.getShardingItem(), YamlEngine.marshal(yamlJobProgress));
}
@Override
public ConsistencyCheckJobProgress getJobItemProgress(final String jobId, final int shardingItem) {
String progress = PipelineAPIFactory.getGovernanceRepositoryAPI().getJobItemProgress(jobId, shardingItem);
if (StringUtils.isBlank(progress)) {
return null;
}
return swapper.swapToObject(YamlEngine.unmarshal(progress, YamlConsistencyCheckJobProgress.class, true));
}
@Override
public void updateJobItemStatus(final String jobId, final int shardingItem, final JobStatus status) {
ConsistencyCheckJobProgress jobProgress = getJobItemProgress(jobId, shardingItem);
if (null == jobProgress) {
log.warn("updateJobItemStatus, jobProgress is null, jobId={}, shardingItem={}", jobId, shardingItem);
return;
}
jobProgress.setStatus(status);
PipelineAPIFactory.getGovernanceRepositoryAPI().persistJobItemProgress(jobId, shardingItem, YamlEngine.marshal(swapper.swapToYamlConfiguration(jobProgress)));
}
@Override
public void startDisabledJob(final String jobId) {
log.info("Start disable check job {}", jobId);
PipelineJobItemProgress jobProgress = getJobItemProgress(jobId, 0);
ShardingSpherePreconditions.checkState(null == jobProgress || JobStatus.FINISHED != jobProgress.getStatus(), () -> new PipelineJobHasAlreadyFinishedException(jobId));
super.startDisabledJob(jobId);
}
@Override
public void startByParentJobId(final String parentJobId) {
log.info("Start check job by parent job id: {}", parentJobId);
Optional<String> checkLatestJobId = PipelineAPIFactory.getGovernanceRepositoryAPI().getCheckLatestJobId(parentJobId);
ShardingSpherePreconditions.checkState(checkLatestJobId.isPresent(), () -> new PipelineJobNotFoundException(parentJobId));
startDisabledJob(checkLatestJobId.get());
}
@Override
public void stopByParentJobId(final String parentJobId) {
log.info("Stop check job by parent job id: {}", parentJobId);
Optional<String> checkLatestJobId = PipelineAPIFactory.getGovernanceRepositoryAPI().getCheckLatestJobId(parentJobId);
ShardingSpherePreconditions.checkState(checkLatestJobId.isPresent(), () -> new PipelineJobNotFoundException(parentJobId));
stop(checkLatestJobId.get());
}
@Override
@Override
public ConsistencyCheckJobConfiguration getJobConfiguration(final String jobId) {
return getJobConfiguration(getElasticJobConfigPOJO(jobId));
}
@Override
protected ConsistencyCheckJobConfiguration getJobConfiguration(final JobConfigurationPOJO jobConfigPOJO) {
return new YamlConsistencyCheckJobConfigurationSwapper().swapToObject(jobConfigPOJO.getJobParameter());
}
@Override
protected YamlPipelineJobConfiguration swapToYamlJobConfiguration(final PipelineJobConfiguration jobConfig) {
return new YamlConsistencyCheckJobConfigurationSwapper().swapToYamlConfiguration((ConsistencyCheckJobConfiguration) jobConfig);
}
@Override
public void extendYamlJobConfiguration(final YamlPipelineJobConfiguration yamlJobConfig) {
throw new UnsupportedOperationException();
}
@Override
public PipelineTaskConfiguration buildTaskConfiguration(final PipelineJobConfiguration pipelineJobConfig, final int jobShardingItem, final PipelineProcessConfiguration pipelineProcessConfig) {
throw new UnsupportedOperationException();
}
@Override
public PipelineProcessContext buildPipelineProcessContext(final PipelineJobConfiguration pipelineJobConfig) {
throw new UnsupportedOperationException();
}
@Override
protected PipelineJobInfo getJobInfo(final String jobId) {
throw new UnsupportedOperationException();
}
@Override
protected String getJobClassName() {
return ConsistencyCheckJob.class.getName();
}
@Override
public JobType getJobType() {
return JobType.CONSISTENCY_CHECK;
}
} |
I was confused because I don't see where the test attempts to catch the failure via try/catch or ExpectedException test rule. | public IntervalWindow assignWindow(Instant timestamp) {
return new IntervalWindow(
BoundedWindow.TIMESTAMP_MIN_VALUE, GlobalWindow.INSTANCE.maxTimestamp());
} | BoundedWindow.TIMESTAMP_MIN_VALUE, GlobalWindow.INSTANCE.maxTimestamp()); | public IntervalWindow assignWindow(Instant timestamp) {
return new IntervalWindow(
BoundedWindow.TIMESTAMP_MIN_VALUE, GlobalWindow.INSTANCE.maxTimestamp());
} | class TestWindowFn extends PartitioningWindowFn<Object, IntervalWindow> {
@Override
@Override
public boolean isCompatible(WindowFn<?, ?> other) {
return equals(other);
}
@Override
public Coder<IntervalWindow> windowCoder() {
return IntervalWindowCoder.of();
}
} | class TestWindowFn extends PartitioningWindowFn<Object, IntervalWindow> {
@Override
@Override
public boolean isCompatible(WindowFn<?, ?> other) {
return equals(other);
}
@Override
public Coder<IntervalWindow> windowCoder() {
return IntervalWindowCoder.of();
}
} |
Good point, I can try. Without this I got an exception telling me I had to chunk it, but yeah, you're probably right that the header should be enough. If AsyncFile gives me the file size. | public void writeResponse(AsyncFile file, Type genericType, ServerRequestContext context) throws WebApplicationException {
ResteasyReactiveRequestContext ctx = ((ResteasyReactiveRequestContext) context);
ctx.suspend();
ServerHttpResponse response = context.serverResponse();
response.setChunked(true);
file.handler(buffer -> {
try {
response.write(buffer.getBytes());
} catch (Exception x) {
ctx.resume(x);
return;
}
if (response.isWriteQueueFull()) {
file.pause();
response.addDrainHandler(new Runnable() {
@Override
public void run() {
file.resume();
}
});
}
});
file.endHandler(new Handler<Void>() {
@Override
public void handle(Void event) {
response.end();
ctx.resume();
}
});
} | response.setChunked(true); | public void writeResponse(AsyncFile file, Type genericType, ServerRequestContext context) throws WebApplicationException {
ResteasyReactiveRequestContext ctx = ((ResteasyReactiveRequestContext) context);
ctx.suspend();
ServerHttpResponse response = context.serverResponse();
if (file.getReadLength() != Long.MAX_VALUE) {
response.setResponseHeader(HttpHeaders.CONTENT_LENGTH, String.valueOf(file.getReadLength()));
} else {
response.setChunked(true);
}
file.handler(buffer -> {
try {
response.write(buffer.getBytes());
} catch (Exception x) {
ctx.resume(x);
return;
}
if (response.isWriteQueueFull()) {
file.pause();
response.addDrainHandler(new Runnable() {
@Override
public void run() {
file.resume();
}
});
}
});
file.endHandler(new Handler<Void>() {
@Override
public void handle(Void event) {
file.close();
response.end();
ctx.resume();
}
});
} | class ServerVertxAsyncFileMessageBodyWriter extends VertxAsyncFileMessageBodyWriter
implements ServerMessageBodyWriter<AsyncFile> {
@Override
public boolean isWriteable(Class<?> type, Type genericType, ResteasyReactiveResourceInfo target, MediaType mediaType) {
return AsyncFile.class.isAssignableFrom(type);
}
@Override
} | class ServerVertxAsyncFileMessageBodyWriter extends VertxAsyncFileMessageBodyWriter
implements ServerMessageBodyWriter<AsyncFile> {
@Override
public boolean isWriteable(Class<?> type, Type genericType, ResteasyReactiveResourceInfo target, MediaType mediaType) {
return AsyncFile.class.isAssignableFrom(type);
}
@Override
} |
It's true, though! 😬 But I'll remove it. | OperationHandlerImpl createHandler() throws Exception {
VisitorSession visitorSession = mock(VisitorSession.class);
when(documentAccess.createVisitorSession(any(VisitorParameters.class))).thenAnswer(p -> {
VisitorParameters params = (VisitorParameters)p.getArguments()[0];
assignedParameters.set(params);
VisitorStatistics statistics = new VisitorStatistics();
statistics.setBucketsVisited(bucketsVisited);
params.getControlHandler().onVisitorStatistics(statistics);
ProgressToken progress = new ProgressToken();
params.getControlHandler().onProgress(progress);
params.getControlHandler().onDone(completionCode, "bork bork");
return visitorSession;
});
OperationHandlerImpl.ClusterEnumerator clusterEnumerator = () -> Arrays.asList(new ClusterDef("foo", "configId"));
return new OperationHandlerImpl(documentAccess, clusterEnumerator);
} | OperationHandlerImpl createHandler() throws Exception {
VisitorSession visitorSession = mock(VisitorSession.class);
when(documentAccess.createVisitorSession(any(VisitorParameters.class))).thenAnswer(p -> {
VisitorParameters params = (VisitorParameters)p.getArguments()[0];
assignedParameters.set(params);
VisitorStatistics statistics = new VisitorStatistics();
statistics.setBucketsVisited(bucketsVisited);
params.getControlHandler().onVisitorStatistics(statistics);
ProgressToken progress = new ProgressToken();
params.getControlHandler().onProgress(progress);
params.getControlHandler().onDone(completionCode, "bork bork");
return visitorSession;
});
OperationHandlerImpl.ClusterEnumerator clusterEnumerator = () -> Arrays.asList(new ClusterDef("foo", "configId"));
return new OperationHandlerImpl(documentAccess, clusterEnumerator);
} | class OperationHandlerImplFixture {
DocumentAccess documentAccess = mock(DocumentAccess.class);
AtomicReference<VisitorParameters> assignedParameters = new AtomicReference<>();
VisitorControlHandler.CompletionCode completionCode = VisitorControlHandler.CompletionCode.SUCCESS;
int bucketsVisited = 0;
} | class OperationHandlerImplFixture {
DocumentAccess documentAccess = mock(DocumentAccess.class);
AtomicReference<VisitorParameters> assignedParameters = new AtomicReference<>();
VisitorControlHandler.CompletionCode completionCode = VisitorControlHandler.CompletionCode.SUCCESS;
int bucketsVisited = 0;
} | |
Created issue #31678 to track above. | public static Optional<Symbol> getDocumentableSymbol(NonTerminalNode node, SemanticModel semanticModel) {
switch (node.kind()) {
case FUNCTION_DEFINITION:
case OBJECT_METHOD_DEFINITION:
case RESOURCE_ACCESSOR_DEFINITION:
case METHOD_DECLARATION:
case SERVICE_DECLARATION:
case TYPE_DEFINITION:
case CLASS_DEFINITION:
return semanticModel.symbol(node);
default:
break;
}
return Optional.empty();
} | public static Optional<Symbol> getDocumentableSymbol(NonTerminalNode node, SemanticModel semanticModel) {
switch (node.kind()) {
case FUNCTION_DEFINITION:
case OBJECT_METHOD_DEFINITION:
case RESOURCE_ACCESSOR_DEFINITION:
case METHOD_DECLARATION:
case SERVICE_DECLARATION:
case TYPE_DEFINITION:
case CLASS_DEFINITION:
return semanticModel.symbol(node);
default:
break;
}
return Optional.empty();
} | class DocumentationGenerator {
private DocumentationGenerator() {
}
/**
* Checks whether the node has documentation.
*
* @param node documentatable {@link NonTerminalNode}
* @return returns True if has documentation False otherwise
*/
public static boolean hasDocs(NonTerminalNode node) {
for (Node next : node.children()) {
if (next.kind() == SyntaxKind.METADATA && ((MetadataNode) next).documentationString().isPresent()) {
return true;
}
}
return false;
}
/**
* Returns range of current documentation.
*
* @param node documentatable {@link NonTerminalNode}
* @return returns {@link Range}
*/
public static Optional<Range> getDocsRange(NonTerminalNode node) {
for (Node next : node.children()) {
if (next.kind() == SyntaxKind.METADATA && ((MetadataNode) next).documentationString().isPresent()) {
return Optional.of(CommonUtil.toRange(((MetadataNode) next).documentationString().get().lineRange()));
}
}
return Optional.empty();
}
/**
* Generate documentation for non-terminal node.
*
* @param node non-terminal node
* @param syntaxTree syntaxTree {@link SyntaxTree}
* @return optional documentation
*/
public static Optional<DocAttachmentInfo> getDocumentationEditForNode(NonTerminalNode node,
SyntaxTree syntaxTree) {
switch (node.kind()) {
case FUNCTION_DEFINITION:
case RESOURCE_ACCESSOR_DEFINITION:
case OBJECT_METHOD_DEFINITION: {
return Optional.of(generateFunctionDocumentation((FunctionDefinitionNode) node, syntaxTree));
}
case METHOD_DECLARATION: {
return Optional.of(generateMethodDocumentation((MethodDeclarationNode) node, syntaxTree));
}
case SERVICE_DECLARATION: {
return Optional.of(generateServiceDocumentation((ServiceDeclarationNode) node, syntaxTree));
}
case TYPE_DEFINITION: {
return Optional.of(generateRecordOrObjectDocumentation((TypeDefinitionNode) node, syntaxTree));
}
case CLASS_DEFINITION: {
return Optional.of(generateClassDocumentation((ClassDefinitionNode) node, syntaxTree));
}
default:
break;
}
return Optional.empty();
}
/**
* Generate documentation for service node.
*
* @param serviceDeclrNode service declaration node
* @param syntaxTree syntaxTree {@link SyntaxTree}
* @return generated doc attachment
*/
private static DocAttachmentInfo generateServiceDocumentation(ServiceDeclarationNode serviceDeclrNode,
SyntaxTree syntaxTree) {
MetadataNode metadata = serviceDeclrNode.metadata().orElse(null);
Position docStart = CommonUtil.toRange(serviceDeclrNode.lineRange()).getStart();
if (metadata != null && !metadata.annotations().isEmpty()) {
docStart = CommonUtil.toRange(metadata.annotations().get(0).lineRange()).getStart();
}
String desc = String.format("Description%n");
return new DocAttachmentInfo(desc, docStart, getPadding(serviceDeclrNode, syntaxTree));
}
/**
* Generate documentation for function definition node.
*
* @param bLangFunction function definition node
* @param syntaxTree syntaxTree {@link SyntaxTree}
* @return generated doc attachment
*/
private static DocAttachmentInfo generateFunctionDocumentation(FunctionDefinitionNode bLangFunction,
SyntaxTree syntaxTree) {
return getFunctionNodeDocumentation(bLangFunction.functionSignature(),
bLangFunction.relativeResourcePath(),
bLangFunction.metadata().orElse(null),
CommonUtil.toRange(bLangFunction.lineRange()),
syntaxTree);
}
/**
* Generate documentation for method declaration node.
*
* @param methodDeclrNode method declaration node
* @param syntaxTree syntaxTree {@link SyntaxTree}
* @return
*/
private static DocAttachmentInfo generateMethodDocumentation(MethodDeclarationNode methodDeclrNode,
SyntaxTree syntaxTree) {
return getFunctionNodeDocumentation(methodDeclrNode.methodSignature(),
methodDeclrNode.relativeResourcePath(),
methodDeclrNode.metadata().orElse(null),
CommonUtil.toRange(methodDeclrNode.lineRange()),
syntaxTree);
}
/**
* Generate documentation for record or object type definition node.
*
* @param typeDefNode type definition node
* @param syntaxTree syntaxTree {@link SyntaxTree}
* @return generated doc attachment
*/
private static DocAttachmentInfo generateRecordOrObjectDocumentation(TypeDefinitionNode typeDefNode,
SyntaxTree syntaxTree) {
MetadataNode metadata = typeDefNode.metadata().orElse(null);
Position docStart = CommonUtil.toRange(typeDefNode.lineRange()).getStart();
if (metadata != null && !metadata.annotations().isEmpty()) {
docStart = CommonUtil.toRange(metadata.annotations().get(0).lineRange()).getStart();
}
io.ballerina.compiler.syntax.tree.Node typeDesc = typeDefNode.typeDescriptor();
String desc = String.format("Description%n");
LinkedHashMap<String, String> parameters = new LinkedHashMap<>();
switch (typeDesc.kind()) {
case RECORD_TYPE_DESC:
RecordTypeDescriptorNode recordTypeDescNode = (RecordTypeDescriptorNode) typeDesc;
recordTypeDescNode.fields().forEach(field -> {
Optional<Token> paramName = Optional.empty();
if (field.kind() == SyntaxKind.RECORD_FIELD) {
paramName = Optional.of(((RecordFieldNode) field).fieldName());
} else if (field.kind() == SyntaxKind.RECORD_FIELD_WITH_DEFAULT_VALUE) {
paramName = Optional.of(((RecordFieldWithDefaultValueNode) field).fieldName());
}
paramName.ifPresent(param -> parameters.put(param.text(), "Field Description"));
});
break;
case OBJECT_TYPE_DESC:
ObjectTypeDescriptorNode objectTypeDescNode = (ObjectTypeDescriptorNode) typeDesc;
objectTypeDescNode.members().forEach(field -> {
if (field.kind() == SyntaxKind.OBJECT_FIELD &&
((ObjectFieldNode) field).visibilityQualifier().isPresent()) {
ObjectFieldNode fieldNode = (ObjectFieldNode) field;
if (fieldNode.visibilityQualifier().get().kind() == SyntaxKind.PUBLIC_KEYWORD) {
parameters.put(fieldNode.fieldName().text(), "Field Description");
}
}
});
break;
default:
break;
}
return new DocAttachmentInfo(desc, parameters, null, null, docStart, getPadding(typeDefNode, syntaxTree));
}
/**
* Generate documentation for class definition node.
*
* @param classDefNode class definition node
* @param syntaxTree {@link SyntaxTree}
* @return generated doc attachment
*/
private static DocAttachmentInfo generateClassDocumentation(ClassDefinitionNode classDefNode,
SyntaxTree syntaxTree) {
MetadataNode metadata = classDefNode.metadata().orElse(null);
Position docStart = CommonUtil.toRange(classDefNode.lineRange()).getStart();
if (metadata != null && !metadata.annotations().isEmpty()) {
docStart = CommonUtil.toRange(metadata.annotations().get(0).lineRange()).getStart();
}
String desc = String.format("Description%n");
LinkedHashMap<String, String> parameters = new LinkedHashMap<>();
classDefNode.members().forEach(field -> {
if (field.kind() == SyntaxKind.OBJECT_FIELD &&
((ObjectFieldNode) field).visibilityQualifier().isPresent()) {
ObjectFieldNode fieldNode = (ObjectFieldNode) field;
if (fieldNode.visibilityQualifier().get().kind() == SyntaxKind.PUBLIC_KEYWORD) {
parameters.put(fieldNode.fieldName().text(), "Parameter Description");
}
}
});
return new DocAttachmentInfo(desc, parameters, null, null, docStart, getPadding(classDefNode, syntaxTree));
}
private static DocAttachmentInfo getFunctionNodeDocumentation(FunctionSignatureNode signatureNode,
NodeList<Node> resourceNodes,
MetadataNode metadata, Range functionRange,
SyntaxTree syntaxTree) {
Position docStart = functionRange.getStart();
boolean hasDeprecated = false;
if (metadata != null && !metadata.annotations().isEmpty()) {
for (AnnotationNode annotationNode : metadata.annotations()) {
io.ballerina.compiler.syntax.tree.Node annotReference = annotationNode.annotReference();
if (annotReference.kind() == SyntaxKind.SIMPLE_NAME_REFERENCE &&
"deprecated".equals(((SimpleNameReferenceNode) annotReference).name().text())) {
hasDeprecated = true;
}
}
docStart = CommonUtil.toRange(metadata.annotations().get(0).lineRange()).getStart();
}
String desc = String.format("Description%n");
LinkedHashMap<String, String> parameters = new LinkedHashMap<>();
if (!resourceNodes.isEmpty()) {
resourceNodes.forEach(param-> {
if (param instanceof ResourcePathParameterNode) {
Optional<Token> paramName = Optional.empty();
if (param.kind() == SyntaxKind.RESOURCE_PATH_SEGMENT_PARAM
|| param.kind() == SyntaxKind.RESOURCE_PATH_REST_PARAM) {
paramName = Optional.ofNullable(((ResourcePathParameterNode) param).paramName());
}
paramName.ifPresent(token -> parameters.put(token.text(), "Parameter Description"));
}
});
}
signatureNode.parameters().forEach(param -> {
Optional<Token> paramName = Optional.empty();
if (param.kind() == SyntaxKind.REQUIRED_PARAM) {
paramName = ((RequiredParameterNode) param).paramName();
} else if (param.kind() == SyntaxKind.DEFAULTABLE_PARAM) {
paramName = ((DefaultableParameterNode) param).paramName();
} else if (param.kind() == SyntaxKind.REST_PARAM) {
paramName = ((RestParameterNode) param).paramName();
}
paramName.ifPresent(token -> parameters.put(token.text(), "Parameter Description"));
});
String returnDesc = signatureNode.returnTypeDesc().isPresent() ? "Return Value Description" : null;
String deprecatedDesc = null;
if (hasDeprecated) {
deprecatedDesc = "Deprecated Description";
}
return new DocAttachmentInfo(desc, parameters, returnDesc, deprecatedDesc, docStart,
getPadding(signatureNode.parent(), syntaxTree));
}
private static String getPadding(NonTerminalNode bLangFunction, SyntaxTree syntaxTree) {
LinePosition position = bLangFunction.location().lineRange().startLine();
TextDocument textDocument = syntaxTree.textDocument();
int prevCharIndex = textDocument.textPositionFrom(LinePosition.from(position.line(), position.offset()));
int lineStartIndex = textDocument.textPositionFrom(LinePosition.from(position.line(), 0));
String sourceCode = syntaxTree.toSourceCode();
String padding = sourceCode.substring(lineStartIndex, prevCharIndex);
return padding.isBlank() ? padding : " ";
}
} | class DocumentationGenerator {
private DocumentationGenerator() {
}
/**
* Checks whether the node has documentation.
*
* @param node documentatable {@link NonTerminalNode}
* @return returns True if has documentation False otherwise
*/
public static boolean hasDocs(NonTerminalNode node) {
for (Node next : node.children()) {
if (next.kind() == SyntaxKind.METADATA && ((MetadataNode) next).documentationString().isPresent()) {
return true;
}
}
return false;
}
/**
* Returns range of current documentation.
*
* @param node documentatable {@link NonTerminalNode}
* @return returns {@link Range}
*/
public static Optional<Range> getDocsRange(NonTerminalNode node) {
for (Node next : node.children()) {
if (next.kind() == SyntaxKind.METADATA && ((MetadataNode) next).documentationString().isPresent()) {
return Optional.of(CommonUtil.toRange(((MetadataNode) next).documentationString().get().lineRange()));
}
}
return Optional.empty();
}
/**
* Generate documentation for non-terminal node.
*
* @param node non-terminal node
* @param syntaxTree syntaxTree {@link SyntaxTree}
* @return optional documentation
*/
public static Optional<DocAttachmentInfo> getDocumentationEditForNode(NonTerminalNode node,
SyntaxTree syntaxTree) {
switch (node.kind()) {
case FUNCTION_DEFINITION:
case RESOURCE_ACCESSOR_DEFINITION:
case OBJECT_METHOD_DEFINITION: {
return Optional.of(generateFunctionDocumentation((FunctionDefinitionNode) node, syntaxTree));
}
case METHOD_DECLARATION: {
return Optional.of(generateMethodDocumentation((MethodDeclarationNode) node, syntaxTree));
}
case SERVICE_DECLARATION: {
return Optional.of(generateServiceDocumentation((ServiceDeclarationNode) node, syntaxTree));
}
case TYPE_DEFINITION: {
return Optional.of(generateRecordOrObjectDocumentation((TypeDefinitionNode) node, syntaxTree));
}
case CLASS_DEFINITION: {
return Optional.of(generateClassDocumentation((ClassDefinitionNode) node, syntaxTree));
}
default:
break;
}
return Optional.empty();
}
/**
* Generate documentation for service node.
*
* @param serviceDeclrNode service declaration node
* @param syntaxTree syntaxTree {@link SyntaxTree}
* @return generated doc attachment
*/
private static DocAttachmentInfo generateServiceDocumentation(ServiceDeclarationNode serviceDeclrNode,
SyntaxTree syntaxTree) {
MetadataNode metadata = serviceDeclrNode.metadata().orElse(null);
Position docStart = CommonUtil.toRange(serviceDeclrNode.lineRange()).getStart();
if (metadata != null && !metadata.annotations().isEmpty()) {
docStart = CommonUtil.toRange(metadata.annotations().get(0).lineRange()).getStart();
}
String desc = String.format("Description%n");
return new DocAttachmentInfo(desc, docStart, getPadding(serviceDeclrNode, syntaxTree));
}
/**
* Generate documentation for function definition node.
*
* @param bLangFunction function definition node
* @param syntaxTree syntaxTree {@link SyntaxTree}
* @return generated doc attachment
*/
private static DocAttachmentInfo generateFunctionDocumentation(FunctionDefinitionNode bLangFunction,
SyntaxTree syntaxTree) {
return getFunctionNodeDocumentation(bLangFunction.functionSignature(),
bLangFunction.relativeResourcePath(),
bLangFunction.metadata().orElse(null),
CommonUtil.toRange(bLangFunction.lineRange()),
syntaxTree);
}
/**
* Generate documentation for method declaration node.
*
* @param methodDeclrNode method declaration node
* @param syntaxTree syntaxTree {@link SyntaxTree}
* @return
*/
private static DocAttachmentInfo generateMethodDocumentation(MethodDeclarationNode methodDeclrNode,
SyntaxTree syntaxTree) {
return getFunctionNodeDocumentation(methodDeclrNode.methodSignature(),
methodDeclrNode.relativeResourcePath(),
methodDeclrNode.metadata().orElse(null),
CommonUtil.toRange(methodDeclrNode.lineRange()),
syntaxTree);
}
/**
* Generate documentation for record or object type definition node.
*
* @param typeDefNode type definition node
* @param syntaxTree syntaxTree {@link SyntaxTree}
* @return generated doc attachment
*/
private static DocAttachmentInfo generateRecordOrObjectDocumentation(TypeDefinitionNode typeDefNode,
SyntaxTree syntaxTree) {
MetadataNode metadata = typeDefNode.metadata().orElse(null);
Position docStart = CommonUtil.toRange(typeDefNode.lineRange()).getStart();
if (metadata != null && !metadata.annotations().isEmpty()) {
docStart = CommonUtil.toRange(metadata.annotations().get(0).lineRange()).getStart();
}
io.ballerina.compiler.syntax.tree.Node typeDesc = typeDefNode.typeDescriptor();
String desc = String.format("Description%n");
LinkedHashMap<String, String> parameters = new LinkedHashMap<>();
switch (typeDesc.kind()) {
case RECORD_TYPE_DESC:
RecordTypeDescriptorNode recordTypeDescNode = (RecordTypeDescriptorNode) typeDesc;
recordTypeDescNode.fields().forEach(field -> {
Optional<Token> paramName = Optional.empty();
if (field.kind() == SyntaxKind.RECORD_FIELD) {
paramName = Optional.of(((RecordFieldNode) field).fieldName());
} else if (field.kind() == SyntaxKind.RECORD_FIELD_WITH_DEFAULT_VALUE) {
paramName = Optional.of(((RecordFieldWithDefaultValueNode) field).fieldName());
}
paramName.ifPresent(param -> parameters.put(param.text(), "Field Description"));
});
break;
case OBJECT_TYPE_DESC:
ObjectTypeDescriptorNode objectTypeDescNode = (ObjectTypeDescriptorNode) typeDesc;
objectTypeDescNode.members().forEach(field -> {
if (field.kind() == SyntaxKind.OBJECT_FIELD &&
((ObjectFieldNode) field).visibilityQualifier().isPresent()) {
ObjectFieldNode fieldNode = (ObjectFieldNode) field;
if (fieldNode.visibilityQualifier().get().kind() == SyntaxKind.PUBLIC_KEYWORD) {
parameters.put(fieldNode.fieldName().text(), "Field Description");
}
}
});
break;
default:
break;
}
return new DocAttachmentInfo(desc, parameters, null, null, docStart, getPadding(typeDefNode, syntaxTree));
}
/**
* Generate documentation for class definition node.
*
* @param classDefNode class definition node
* @param syntaxTree {@link SyntaxTree}
* @return generated doc attachment
*/
private static DocAttachmentInfo generateClassDocumentation(ClassDefinitionNode classDefNode,
SyntaxTree syntaxTree) {
MetadataNode metadata = classDefNode.metadata().orElse(null);
Position docStart = CommonUtil.toRange(classDefNode.lineRange()).getStart();
if (metadata != null && !metadata.annotations().isEmpty()) {
docStart = CommonUtil.toRange(metadata.annotations().get(0).lineRange()).getStart();
}
String desc = String.format("Description%n");
LinkedHashMap<String, String> parameters = new LinkedHashMap<>();
classDefNode.members().forEach(field -> {
if (field.kind() == SyntaxKind.OBJECT_FIELD &&
((ObjectFieldNode) field).visibilityQualifier().isPresent()) {
ObjectFieldNode fieldNode = (ObjectFieldNode) field;
if (fieldNode.visibilityQualifier().get().kind() == SyntaxKind.PUBLIC_KEYWORD) {
parameters.put(fieldNode.fieldName().text(), "Parameter Description");
}
}
});
return new DocAttachmentInfo(desc, parameters, null, null, docStart, getPadding(classDefNode, syntaxTree));
}
private static DocAttachmentInfo getFunctionNodeDocumentation(FunctionSignatureNode signatureNode,
NodeList<Node> resourceNodes,
MetadataNode metadata, Range functionRange,
SyntaxTree syntaxTree) {
Position docStart = functionRange.getStart();
boolean hasDeprecated = false;
if (metadata != null && !metadata.annotations().isEmpty()) {
for (AnnotationNode annotationNode : metadata.annotations()) {
io.ballerina.compiler.syntax.tree.Node annotReference = annotationNode.annotReference();
if (annotReference.kind() == SyntaxKind.SIMPLE_NAME_REFERENCE &&
"deprecated".equals(((SimpleNameReferenceNode) annotReference).name().text())) {
hasDeprecated = true;
}
}
docStart = CommonUtil.toRange(metadata.annotations().get(0).lineRange()).getStart();
}
String desc = String.format("Description%n");
LinkedHashMap<String, String> parameters = new LinkedHashMap<>();
if (!resourceNodes.isEmpty()) {
resourceNodes.forEach(param-> {
if (param instanceof ResourcePathParameterNode) {
Optional<Token> paramName = Optional.empty();
if (param.kind() == SyntaxKind.RESOURCE_PATH_SEGMENT_PARAM
|| param.kind() == SyntaxKind.RESOURCE_PATH_REST_PARAM) {
paramName = Optional.ofNullable(((ResourcePathParameterNode) param).paramName());
}
paramName.ifPresent(token -> parameters.put(token.text(), "Parameter Description"));
}
});
}
signatureNode.parameters().forEach(param -> {
Optional<Token> paramName = Optional.empty();
if (param.kind() == SyntaxKind.REQUIRED_PARAM) {
paramName = ((RequiredParameterNode) param).paramName();
} else if (param.kind() == SyntaxKind.DEFAULTABLE_PARAM) {
paramName = ((DefaultableParameterNode) param).paramName();
} else if (param.kind() == SyntaxKind.REST_PARAM) {
paramName = ((RestParameterNode) param).paramName();
}
paramName.ifPresent(token -> parameters.put(token.text(), "Parameter Description"));
});
String returnDesc = signatureNode.returnTypeDesc().isPresent() ? "Return Value Description" : null;
String deprecatedDesc = null;
if (hasDeprecated) {
deprecatedDesc = "Deprecated Description";
}
return new DocAttachmentInfo(desc, parameters, returnDesc, deprecatedDesc, docStart,
getPadding(signatureNode.parent(), syntaxTree));
}
private static String getPadding(NonTerminalNode bLangFunction, SyntaxTree syntaxTree) {
LinePosition position = bLangFunction.location().lineRange().startLine();
TextDocument textDocument = syntaxTree.textDocument();
int prevCharIndex = textDocument.textPositionFrom(LinePosition.from(position.line(), position.offset()));
int lineStartIndex = textDocument.textPositionFrom(LinePosition.from(position.line(), 0));
String sourceCode = syntaxTree.toSourceCode();
String padding = sourceCode.substring(lineStartIndex, prevCharIndex);
return padding.isBlank() ? padding : " ";
}
} | |
http and Listener should move to constants. | void invokeFilters(BLangResource resourceNode, SymbolEnv env) {
if (resourceNode.requiredParams.size() == 2 && "http".equals(resourceNode.requiredParams.get(
0).type.tsymbol.pkgID.name.value) && "Listener".equals(resourceNode.requiredParams.get(
0).type.tsymbol.name.value)) {
addFilterStatements(resourceNode, env);
}
} | if (resourceNode.requiredParams.size() == 2 && "http".equals(resourceNode.requiredParams.get( | void invokeFilters(BLangResource resourceNode, SymbolEnv env) {
BLangVariable endpoint;
if (resourceNode.requiredParams.size() == 2) {
endpoint = resourceNode.requiredParams.get(0);
if (ORG_NAME.equals(endpoint.type.tsymbol.pkgID.orgName.value) && PACKAGE_NAME.equals(
endpoint.type.tsymbol.pkgID.name.value) && ENDPOINT_TYPE_NAME.equals(
endpoint.type.tsymbol.name.value)) {
addFilterStatements(resourceNode, env);
}
}
} | class HttpFiltersDesugar {
private final SymbolTable symTable;
private final SymbolResolver symResolver;
private final Names names;
private static final String HTTP_CONNECTION_VAR = "conn";
private static final String HTTP_ENDPOINT_CONFIG = "config";
private static final String HTTP_FILTERS_VAR = "filters";
private static final String HTTP_FILTER_VAR = "filter";
private static final String HTTP_FILTERCONTEXT_VAR = "filterContext";
private static final int ENDPOINT_PARAM_NUM = 0;
private static final int REQUEST_PARAM_NUM = 1;
private static final int CONNECTOR_FIELD_INDEX = 3;
private static final int FILTER_CONTEXT_FIELD_INDEX = 1;
private static final int ENDPOINT_CONFIG_INDEX = 4;
private static final int FILTERS_CONFIG_INDEX = 6;
private static final CompilerContext.Key<HttpFiltersDesugar> HTTP_FILTERS_DESUGAR_KEY =
new CompilerContext.Key<>();
public static HttpFiltersDesugar getInstance(CompilerContext context) {
HttpFiltersDesugar desugar = context.get(HTTP_FILTERS_DESUGAR_KEY);
if (desugar == null) {
desugar = new HttpFiltersDesugar(context);
}
return desugar;
}
private HttpFiltersDesugar(CompilerContext context) {
context.put(HTTP_FILTERS_DESUGAR_KEY, this);
this.symTable = SymbolTable.getInstance(context);
this.symResolver = SymbolResolver.getInstance(context);
this.names = Names.getInstance(context);
}
/**
* Check if the resource is an http resource and apply filter.
*
* @param resourceNode The resource to apply filter on if it is http
* @param env the symbol environment
*/
private void addFilterStatements(BLangResource resourceNode, SymbolEnv env) {
BLangVariable filterContextVar = addFilterContextCreation(resourceNode, env);
addAssignmentAndForEach(resourceNode, filterContextVar);
}
/**
* Adds code {@code http:FilterContext _$$_filterContext = new (serviceTypeDef, "serviceName", "resourceName");}.
*/
private BLangVariable addFilterContextCreation(BLangResource resourceNode, SymbolEnv env) {
BLangIdentifier pkgAlias = ASTBuilderUtil.createIdentifier(resourceNode.pos, "http");
BLangUserDefinedType filterContextUserDefinedType = new BLangUserDefinedType(
pkgAlias, ASTBuilderUtil.createIdentifier(resourceNode.pos, "FilterContext"));
BType filterContextType = symResolver.resolveTypeNode(filterContextUserDefinedType, env);
String filterContextVarName = GEN_VAR_PREFIX.value + HTTP_FILTERCONTEXT_VAR;
BLangSimpleVarRef serviceRef = new BLangSimpleVarRef();
serviceRef.variableName = ((BLangService) resourceNode.parent).name;
serviceRef.type = symTable.typeDesc;
serviceRef.pos = resourceNode.pos;
serviceRef.symbol = ((BLangService) resourceNode.parent).symbol;
BLangLiteral serviceName = new BLangLiteral();
serviceName.value = ((BLangService) resourceNode.parent).name.value;
serviceName.type = symTable.stringType;
serviceName.pos = resourceNode.pos;
BLangLiteral resourceName = new BLangLiteral();
resourceName.value = resourceNode.name.value;
resourceName.type = symTable.stringType;
resourceName.pos = resourceNode.pos;
BLangInvocation filterInvocation = (BLangInvocation) TreeBuilder.createInvocationNode();
filterInvocation.symbol = ((BObjectTypeSymbol) filterContextType.tsymbol).initializerFunc.symbol;
filterInvocation.pos = resourceNode.pos;
filterInvocation.requiredArgs.add(serviceRef);
filterInvocation.requiredArgs.add(serviceName);
filterInvocation.requiredArgs.add(resourceName);
filterInvocation.argExprs.add(serviceRef);
filterInvocation.argExprs.add(serviceName);
filterInvocation.argExprs.add(resourceName);
filterInvocation.type = symTable.nilType;
BLangTypeInit filterInitNode = (BLangTypeInit) TreeBuilder.createObjectInitNode();
filterInitNode.pos = resourceNode.pos;
filterInitNode.type = filterContextType;
filterInitNode.objectInitInvocation = filterInvocation;
filterInitNode.userDefinedType = filterContextUserDefinedType;
filterInitNode.argsExpr.add(serviceRef);
filterInitNode.argsExpr.add(serviceName);
filterInitNode.argsExpr.add(resourceName);
BLangVariable filterContextVar = ASTBuilderUtil.createVariable(
resourceNode.pos, filterContextVarName, filterContextType, filterInitNode,
new BVarSymbol(0, names.fromString(filterContextVarName), resourceNode.symbol.pkgID, filterContextType,
resourceNode.symbol));
filterContextVar.typeNode = filterContextUserDefinedType;
resourceNode.body.stmts.add(0, ASTBuilderUtil.createVariableDef(resourceNode.pos, filterContextVar));
return filterContextVar;
}
/**
* Adds code
* caller.conn.filterContext = _$$_filterContext;
* foreach _$$_filter in caller.config.filters {
* if(!_$$_filter.filterRequest()){
* Done;
* }
* }
*/
private void addAssignmentAndForEach(BLangResource resourceNode, BLangVariable filterContextVar) {
BLangSimpleVarRef callerRef = new BLangSimpleVarRef();
BLangVariable endpointVar = resourceNode.requiredParams.get(ENDPOINT_PARAM_NUM);
callerRef.variableName = endpointVar.name;
callerRef.type = endpointVar.type;
callerRef.pos = resourceNode.pos;
callerRef.symbol = endpointVar.symbol;
BField connVal = ((BLangService) resourceNode.parent).endpointType.fields.get(CONNECTOR_FIELD_INDEX);
BLangFieldBasedAccess connField = ASTBuilderUtil.createFieldAccessExpr(callerRef, ASTBuilderUtil
.createIdentifier(resourceNode.pos, HTTP_CONNECTION_VAR));
connField.type = connVal.type;
connField.symbol = connVal.symbol;
connField.pos = resourceNode.pos;
BField filterContextVal = ((BObjectType) connVal.type).fields.get(FILTER_CONTEXT_FIELD_INDEX);
BLangFieldBasedAccess filterContextField = ASTBuilderUtil.createFieldAccessExpr(connField, ASTBuilderUtil
.createIdentifier(resourceNode.pos, HTTP_FILTERCONTEXT_VAR));
filterContextField.type = filterContextVal.type;
filterContextField.symbol = filterContextVal.symbol;
filterContextField.pos = resourceNode.pos;
BLangSimpleVarRef filterContextRef = new BLangSimpleVarRef();
filterContextRef.variableName = filterContextVar.name;
filterContextRef.type = filterContextVar.type;
filterContextRef.pos = resourceNode.pos;
filterContextRef.symbol = filterContextVar.symbol;
BLangAssignment filterContextAssignment = ASTBuilderUtil.createAssignmentStmt(resourceNode.pos,
filterContextField,
filterContextRef, false);
resourceNode.body.stmts.add(1, filterContextAssignment);
BField configVal = ((BLangService) resourceNode.parent).endpointType.fields.get(ENDPOINT_CONFIG_INDEX);
BField filtersVal = ((BRecordType) configVal.type).fields.get(FILTERS_CONFIG_INDEX);
BType filtersType = filtersVal.type;
BType filterType = ((BArrayType) filtersType).eType;
BLangFieldBasedAccess configField = ASTBuilderUtil.createFieldAccessExpr(callerRef, ASTBuilderUtil
.createIdentifier(resourceNode.pos, HTTP_ENDPOINT_CONFIG));
configField.type = configVal.type;
configField.symbol = configVal.symbol;
configField.pos = resourceNode.pos;
BLangFieldBasedAccess filtersField = ASTBuilderUtil.createFieldAccessExpr(configField, ASTBuilderUtil
.createIdentifier(resourceNode.pos, HTTP_FILTERS_VAR));
filtersField.type = filtersType;
filtersField.symbol = filtersVal.symbol;
filtersField.pos = resourceNode.pos;
BLangSimpleVarRef filterRef = new BLangSimpleVarRef();
String filterVarName = GEN_VAR_PREFIX + HTTP_FILTER_VAR;
filterRef.variableName = ASTBuilderUtil.createIdentifier(resourceNode.pos, filterVarName);
filterRef.type = filterType;
filterRef.pos = resourceNode.pos;
filterRef.symbol = new BVarSymbol(0, new Name(filterVarName), resourceNode.symbol.pkgID, filterType,
resourceNode.symbol);
BLangDone doneNode = (BLangDone) TreeBuilder.createDoneNode();
doneNode.pos = resourceNode.pos;
BLangBlockStmt doneStatement = ASTBuilderUtil.createBlockStmt(resourceNode.pos,
createSingletonArrayList(doneNode));
BLangSimpleVarRef requestRef = new BLangSimpleVarRef();
BLangVariable requestVar = resourceNode.requiredParams.get(REQUEST_PARAM_NUM);
requestRef.variableName = requestVar.name;
requestRef.type = requestVar.type;
requestRef.pos = requestVar.pos;
requestRef.symbol = requestVar.symbol;
BLangInvocation filterRequestInvocation = (BLangInvocation) TreeBuilder.createInvocationNode();
filterRequestInvocation.symbol = ((BObjectTypeSymbol) filterType.tsymbol).attachedFuncs.get(1).symbol;
filterRequestInvocation.pos = resourceNode.pos;
filterRequestInvocation.requiredArgs.add(callerRef);
filterRequestInvocation.requiredArgs.add(requestRef);
filterRequestInvocation.requiredArgs.add(filterContextRef);
filterRequestInvocation.type = symTable.booleanType;
filterRequestInvocation.expr = filterRef;
BLangUnaryExpr unaryExpr = ASTBuilderUtil.createUnaryExpr(
resourceNode.pos, filterRequestInvocation, symTable.booleanType, OperatorKind.NOT,
new BOperatorSymbol(names.fromString(OperatorKind.NOT.value()), symTable.rootPkgSymbol.pkgID,
new BInvokableType(createSingletonArrayList(symTable.booleanType),
symTable.booleanType, null), symTable.rootPkgSymbol,
InstructionCodes.BNOT));
BLangBracedOrTupleExpr ifBraceExpr = (BLangBracedOrTupleExpr) TreeBuilder.createBracedOrTupleExpression();
ifBraceExpr.expressions.add(unaryExpr);
ifBraceExpr.isBracedExpr = true;
ifBraceExpr.pos = resourceNode.pos;
ifBraceExpr.type = symTable.booleanType;
BLangIf ifNode = (BLangIf) TreeBuilder.createIfElseStatementNode();
ifNode.pos = resourceNode.pos;
ifNode.body = doneStatement;
ifNode.expr = ifBraceExpr;
BLangBlockStmt ifStatement = ASTBuilderUtil.createBlockStmt(resourceNode.pos,
createSingletonArrayList(ifNode));
BLangForeach foreach = (BLangForeach) TreeBuilder.createForeachNode();
foreach.pos = resourceNode.pos;
foreach.body = ifStatement;
foreach.collection = filtersField;
foreach.varRefs.add(filterRef);
foreach.varTypes = createSingletonArrayList(filterType);
resourceNode.body.stmts.add(2, foreach);
}
private <E> List<E> createSingletonArrayList(E val) {
List<E> list = new ArrayList<>();
list.add(val);
return list;
}
} | class HttpFiltersDesugar {
private final SymbolTable symTable;
private final SymbolResolver symResolver;
private final Names names;
private static final String HTTP_CONNECTION_VAR = "conn";
private static final String HTTP_ENDPOINT_CONFIG = "config";
private static final String HTTP_FILTERS_VAR = "filters";
private static final String HTTP_FILTER_VAR = "filter";
private static final String HTTP_FILTERCONTEXT_VAR = "filterContext";
private static final String ORG_NAME = "ballerina";
private static final String PACKAGE_NAME = "http";
private static final String ENDPOINT_TYPE_NAME = "Listener";
private static final int ENDPOINT_PARAM_NUM = 0;
private static final int REQUEST_PARAM_NUM = 1;
private static final int CONNECTOR_FIELD_INDEX = 3;
private static final int FILTER_CONTEXT_FIELD_INDEX = 1;
private static final int ENDPOINT_CONFIG_INDEX = 4;
private static final int FILTERS_CONFIG_INDEX = 6;
private static final CompilerContext.Key<HttpFiltersDesugar> HTTP_FILTERS_DESUGAR_KEY =
new CompilerContext.Key<>();
public static HttpFiltersDesugar getInstance(CompilerContext context) {
HttpFiltersDesugar desugar = context.get(HTTP_FILTERS_DESUGAR_KEY);
if (desugar == null) {
desugar = new HttpFiltersDesugar(context);
}
return desugar;
}
private HttpFiltersDesugar(CompilerContext context) {
context.put(HTTP_FILTERS_DESUGAR_KEY, this);
this.symTable = SymbolTable.getInstance(context);
this.symResolver = SymbolResolver.getInstance(context);
this.names = Names.getInstance(context);
}
/**
* Check if the resource is an http resource and apply filter.
*
* @param resourceNode The resource to apply filter on if it is http
* @param env the symbol environment
*/
private void addFilterStatements(BLangResource resourceNode, SymbolEnv env) {
BLangVariable filterContextVar = addFilterContextCreation(resourceNode, env);
addAssignmentAndForEach(resourceNode, filterContextVar);
}
/**
* Adds code {@code http:FilterContext _$$_filterContext = new (serviceTypeDef, "serviceName", "resourceName");}.
*
* @param resourceNode The resource to add the FilterContext creation.
* @param env the symbol environment.
*/
private BLangVariable addFilterContextCreation(BLangResource resourceNode, SymbolEnv env) {
BLangIdentifier pkgAlias = ASTBuilderUtil.createIdentifier(resourceNode.pos, "http");
BLangUserDefinedType filterContextUserDefinedType = new BLangUserDefinedType(
pkgAlias, ASTBuilderUtil.createIdentifier(resourceNode.pos, "FilterContext"));
BType filterContextType = symResolver.resolveTypeNode(filterContextUserDefinedType, env);
String filterContextVarName = GEN_VAR_PREFIX.value + HTTP_FILTERCONTEXT_VAR;
BLangSimpleVarRef serviceRef = new BLangSimpleVarRef();
serviceRef.variableName = ((BLangService) resourceNode.parent).name;
serviceRef.type = symTable.typeDesc;
serviceRef.pos = resourceNode.pos;
serviceRef.symbol = ((BLangService) resourceNode.parent).symbol;
BLangLiteral serviceName = new BLangLiteral();
serviceName.value = ((BLangService) resourceNode.parent).name.value;
serviceName.type = symTable.stringType;
serviceName.pos = resourceNode.pos;
BLangLiteral resourceName = new BLangLiteral();
resourceName.value = resourceNode.name.value;
resourceName.type = symTable.stringType;
resourceName.pos = resourceNode.pos;
BLangInvocation filterInvocation = (BLangInvocation) TreeBuilder.createInvocationNode();
filterInvocation.symbol = ((BObjectTypeSymbol) filterContextType.tsymbol).initializerFunc.symbol;
filterInvocation.pos = resourceNode.pos;
filterInvocation.requiredArgs.add(serviceRef);
filterInvocation.requiredArgs.add(serviceName);
filterInvocation.requiredArgs.add(resourceName);
filterInvocation.argExprs.add(serviceRef);
filterInvocation.argExprs.add(serviceName);
filterInvocation.argExprs.add(resourceName);
filterInvocation.type = symTable.nilType;
BLangTypeInit filterInitNode = (BLangTypeInit) TreeBuilder.createObjectInitNode();
filterInitNode.pos = resourceNode.pos;
filterInitNode.type = filterContextType;
filterInitNode.objectInitInvocation = filterInvocation;
filterInitNode.userDefinedType = filterContextUserDefinedType;
filterInitNode.argsExpr.add(serviceRef);
filterInitNode.argsExpr.add(serviceName);
filterInitNode.argsExpr.add(resourceName);
BLangVariable filterContextVar = ASTBuilderUtil.createVariable(
resourceNode.pos, filterContextVarName, filterContextType, filterInitNode,
new BVarSymbol(0, names.fromString(filterContextVarName), resourceNode.symbol.pkgID, filterContextType,
resourceNode.symbol));
filterContextVar.typeNode = filterContextUserDefinedType;
resourceNode.body.stmts.add(0, ASTBuilderUtil.createVariableDef(resourceNode.pos, filterContextVar));
return filterContextVar;
}
/**
* Adds code
* <blockquote><pre>
* caller.conn.filterContext = _$$_filterContext;
* foreach _$$_filter in caller.config.filters {
* if(!_$$_filter.filterRequest()){
* Done;
* }
* }
* </pre></blockquote>
*/
private void addAssignmentAndForEach(BLangResource resourceNode, BLangVariable filterContextVar) {
BLangSimpleVarRef callerRef = new BLangSimpleVarRef();
BLangVariable endpointVar = resourceNode.requiredParams.get(ENDPOINT_PARAM_NUM);
callerRef.variableName = endpointVar.name;
callerRef.type = endpointVar.type;
callerRef.pos = resourceNode.pos;
callerRef.symbol = endpointVar.symbol;
BField connVal = ((BLangService) resourceNode.parent).endpointType.fields.get(CONNECTOR_FIELD_INDEX);
BLangFieldBasedAccess connField = ASTBuilderUtil.createFieldAccessExpr(callerRef, ASTBuilderUtil
.createIdentifier(resourceNode.pos, HTTP_CONNECTION_VAR));
connField.type = connVal.type;
connField.symbol = connVal.symbol;
connField.pos = resourceNode.pos;
BField filterContextVal = ((BObjectType) connVal.type).fields.get(FILTER_CONTEXT_FIELD_INDEX);
BLangFieldBasedAccess filterContextField = ASTBuilderUtil.createFieldAccessExpr(connField, ASTBuilderUtil
.createIdentifier(resourceNode.pos, HTTP_FILTERCONTEXT_VAR));
filterContextField.type = filterContextVal.type;
filterContextField.symbol = filterContextVal.symbol;
filterContextField.pos = resourceNode.pos;
BLangSimpleVarRef filterContextRef = new BLangSimpleVarRef();
filterContextRef.variableName = filterContextVar.name;
filterContextRef.type = filterContextVar.type;
filterContextRef.pos = resourceNode.pos;
filterContextRef.symbol = filterContextVar.symbol;
BLangAssignment filterContextAssignment = ASTBuilderUtil.createAssignmentStmt(resourceNode.pos,
filterContextField,
filterContextRef, false);
resourceNode.body.stmts.add(1, filterContextAssignment);
BField configVal = ((BLangService) resourceNode.parent).endpointType.fields.get(ENDPOINT_CONFIG_INDEX);
BField filtersVal = ((BRecordType) configVal.type).fields.get(FILTERS_CONFIG_INDEX);
BType filtersType = filtersVal.type;
BType filterType = ((BArrayType) filtersType).eType;
BLangFieldBasedAccess configField = ASTBuilderUtil.createFieldAccessExpr(callerRef, ASTBuilderUtil
.createIdentifier(resourceNode.pos, HTTP_ENDPOINT_CONFIG));
configField.type = configVal.type;
configField.symbol = configVal.symbol;
configField.pos = resourceNode.pos;
BLangFieldBasedAccess filtersField = ASTBuilderUtil.createFieldAccessExpr(configField, ASTBuilderUtil
.createIdentifier(resourceNode.pos, HTTP_FILTERS_VAR));
filtersField.type = filtersType;
filtersField.symbol = filtersVal.symbol;
filtersField.pos = resourceNode.pos;
BLangSimpleVarRef filterRef = new BLangSimpleVarRef();
String filterVarName = GEN_VAR_PREFIX + HTTP_FILTER_VAR;
filterRef.variableName = ASTBuilderUtil.createIdentifier(resourceNode.pos, filterVarName);
filterRef.type = filterType;
filterRef.pos = resourceNode.pos;
filterRef.symbol = new BVarSymbol(0, new Name(filterVarName), resourceNode.symbol.pkgID, filterType,
resourceNode.symbol);
BLangDone doneNode = (BLangDone) TreeBuilder.createDoneNode();
doneNode.pos = resourceNode.pos;
BLangBlockStmt doneStatement = ASTBuilderUtil.createBlockStmt(resourceNode.pos,
createSingletonArrayList(doneNode));
BLangSimpleVarRef requestRef = new BLangSimpleVarRef();
BLangVariable requestVar = resourceNode.requiredParams.get(REQUEST_PARAM_NUM);
requestRef.variableName = requestVar.name;
requestRef.type = requestVar.type;
requestRef.pos = requestVar.pos;
requestRef.symbol = requestVar.symbol;
BLangInvocation filterRequestInvocation = (BLangInvocation) TreeBuilder.createInvocationNode();
filterRequestInvocation.symbol = ((BObjectTypeSymbol) filterType.tsymbol).attachedFuncs.get(1).symbol;
filterRequestInvocation.pos = resourceNode.pos;
filterRequestInvocation.requiredArgs.add(callerRef);
filterRequestInvocation.requiredArgs.add(requestRef);
filterRequestInvocation.requiredArgs.add(filterContextRef);
filterRequestInvocation.type = symTable.booleanType;
filterRequestInvocation.expr = filterRef;
BLangUnaryExpr unaryExpr = ASTBuilderUtil.createUnaryExpr(
resourceNode.pos, filterRequestInvocation, symTable.booleanType, OperatorKind.NOT,
new BOperatorSymbol(names.fromString(OperatorKind.NOT.value()), symTable.rootPkgSymbol.pkgID,
new BInvokableType(createSingletonArrayList(symTable.booleanType),
symTable.booleanType, null), symTable.rootPkgSymbol,
InstructionCodes.BNOT));
BLangBracedOrTupleExpr ifBraceExpr = (BLangBracedOrTupleExpr) TreeBuilder.createBracedOrTupleExpression();
ifBraceExpr.expressions.add(unaryExpr);
ifBraceExpr.isBracedExpr = true;
ifBraceExpr.pos = resourceNode.pos;
ifBraceExpr.type = symTable.booleanType;
BLangIf ifNode = (BLangIf) TreeBuilder.createIfElseStatementNode();
ifNode.pos = resourceNode.pos;
ifNode.body = doneStatement;
ifNode.expr = ifBraceExpr;
BLangBlockStmt ifStatement = ASTBuilderUtil.createBlockStmt(resourceNode.pos,
createSingletonArrayList(ifNode));
BLangForeach foreach = (BLangForeach) TreeBuilder.createForeachNode();
foreach.pos = resourceNode.pos;
foreach.body = ifStatement;
foreach.collection = filtersField;
foreach.varRefs.add(filterRef);
foreach.varTypes = createSingletonArrayList(filterType);
resourceNode.body.stmts.add(2, foreach);
}
private <E> List<E> createSingletonArrayList(E val) {
List<E> list = new ArrayList<>();
list.add(val);
return list;
}
} |
In the current state of the PR, `-r` is no longer valid. | public void testClaimRestoreModeParsing() throws Exception {
String[] parameters = {
"-s", "expectedSavepointPath", "-n", "-r", "claim", getTestJarPath()
};
CommandLine commandLine =
CliFrontendParser.parse(CliFrontendParser.RUN_OPTIONS, parameters, true);
ProgramOptions programOptions = ProgramOptions.create(commandLine);
ExecutionConfigAccessor executionOptions =
ExecutionConfigAccessor.fromProgramOptions(programOptions, Collections.emptyList());
SavepointRestoreSettings savepointSettings = executionOptions.getSavepointRestoreSettings();
assertTrue(savepointSettings.restoreSavepoint());
assertEquals(RestoreMode.CLAIM, savepointSettings.getRestoreMode());
assertEquals("expectedSavepointPath", savepointSettings.getRestorePath());
assertTrue(savepointSettings.allowNonRestoredState());
} | "-s", "expectedSavepointPath", "-n", "-r", "claim", getTestJarPath() | public void testClaimRestoreModeParsing() throws Exception {
String[] parameters = {
"-s", "expectedSavepointPath", "-n", "-restoreMode", "claim", getTestJarPath()
};
CommandLine commandLine =
CliFrontendParser.parse(CliFrontendParser.RUN_OPTIONS, parameters, true);
ProgramOptions programOptions = ProgramOptions.create(commandLine);
ExecutionConfigAccessor executionOptions =
ExecutionConfigAccessor.fromProgramOptions(programOptions, Collections.emptyList());
SavepointRestoreSettings savepointSettings = executionOptions.getSavepointRestoreSettings();
assertTrue(savepointSettings.restoreSavepoint());
assertEquals(RestoreMode.CLAIM, savepointSettings.getRestoreMode());
assertEquals("expectedSavepointPath", savepointSettings.getRestorePath());
assertTrue(savepointSettings.allowNonRestoredState());
} | class CliFrontendRunTest extends CliFrontendTestBase {
@BeforeClass
public static void init() {
CliFrontendTestUtils.pipeSystemOutToNull();
}
@AfterClass
public static void shutdown() {
CliFrontendTestUtils.restoreSystemOut();
}
@Test
public void testRun() throws Exception {
final Configuration configuration = getConfiguration();
{
String[] parameters = {"-v", getTestJarPath()};
verifyCliFrontend(configuration, getCli(), parameters, 4, false);
}
{
String[] parameters = {"-v", "-d", getTestJarPath()};
verifyCliFrontend(configuration, getCli(), parameters, 4, true);
}
{
String[] parameters = {"-v", "-p", "42", getTestJarPath()};
verifyCliFrontend(configuration, getCli(), parameters, 42, false);
}
{
String[] parameters = {"-p", "2", "-d", getTestJarPath()};
verifyCliFrontend(configuration, getCli(), parameters, 2, true);
}
{
String[] parameters = {"-s", "expectedSavepointPath", getTestJarPath()};
CommandLine commandLine =
CliFrontendParser.parse(CliFrontendParser.RUN_OPTIONS, parameters, true);
ProgramOptions programOptions = ProgramOptions.create(commandLine);
ExecutionConfigAccessor executionOptions =
ExecutionConfigAccessor.fromProgramOptions(
programOptions, Collections.emptyList());
SavepointRestoreSettings savepointSettings =
executionOptions.getSavepointRestoreSettings();
assertTrue(savepointSettings.restoreSavepoint());
assertEquals("expectedSavepointPath", savepointSettings.getRestorePath());
assertFalse(savepointSettings.allowNonRestoredState());
}
{
String[] parameters = {"-s", "expectedSavepointPath", "-n", getTestJarPath()};
CommandLine commandLine =
CliFrontendParser.parse(CliFrontendParser.RUN_OPTIONS, parameters, true);
ProgramOptions programOptions = ProgramOptions.create(commandLine);
ExecutionConfigAccessor executionOptions =
ExecutionConfigAccessor.fromProgramOptions(
programOptions, Collections.emptyList());
SavepointRestoreSettings savepointSettings =
executionOptions.getSavepointRestoreSettings();
assertTrue(savepointSettings.restoreSavepoint());
assertEquals("expectedSavepointPath", savepointSettings.getRestorePath());
assertTrue(savepointSettings.allowNonRestoredState());
}
{
String[] parameters = {
getTestJarPath(), "-arg1", "value1", "justavalue", "--arg2", "value2"
};
CommandLine commandLine =
CliFrontendParser.parse(CliFrontendParser.RUN_OPTIONS, parameters, true);
ProgramOptions programOptions = ProgramOptions.create(commandLine);
assertEquals("-arg1", programOptions.getProgramArgs()[0]);
assertEquals("value1", programOptions.getProgramArgs()[1]);
assertEquals("justavalue", programOptions.getProgramArgs()[2]);
assertEquals("--arg2", programOptions.getProgramArgs()[3]);
assertEquals("value2", programOptions.getProgramArgs()[4]);
}
}
@Test
@Test
public void testNoClaimRestoreModeParsing() throws Exception {
String[] parameters = {
"-s", "expectedSavepointPath", "-n", "-restoreMode", "no_claim", getTestJarPath()
};
CommandLine commandLine =
CliFrontendParser.parse(CliFrontendParser.RUN_OPTIONS, parameters, true);
ProgramOptions programOptions = ProgramOptions.create(commandLine);
ExecutionConfigAccessor executionOptions =
ExecutionConfigAccessor.fromProgramOptions(programOptions, Collections.emptyList());
SavepointRestoreSettings savepointSettings = executionOptions.getSavepointRestoreSettings();
assertTrue(savepointSettings.restoreSavepoint());
assertEquals(RestoreMode.NO_CLAIM, savepointSettings.getRestoreMode());
assertEquals("expectedSavepointPath", savepointSettings.getRestorePath());
assertTrue(savepointSettings.allowNonRestoredState());
}
@Test(expected = CliArgsException.class)
public void testUnrecognizedOption() throws Exception {
String[] parameters = {"-v", "-l", "-a", "some", "program", "arguments"};
Configuration configuration = getConfiguration();
CliFrontend testFrontend =
new CliFrontend(configuration, Collections.singletonList(getCli()));
testFrontend.run(parameters);
}
@Test(expected = CliArgsException.class)
public void testInvalidParallelismOption() throws Exception {
String[] parameters = {"-v", "-p", "text", getTestJarPath()};
Configuration configuration = getConfiguration();
CliFrontend testFrontend =
new CliFrontend(configuration, Collections.singletonList(getCli()));
testFrontend.run(parameters);
}
@Test(expected = CliArgsException.class)
public void testParallelismWithOverflow() throws Exception {
String[] parameters = {"-v", "-p", "475871387138", getTestJarPath()};
Configuration configuration = new Configuration();
CliFrontend testFrontend =
new CliFrontend(configuration, Collections.singletonList(getCli()));
testFrontend.run(parameters);
}
public static void verifyCliFrontend(
Configuration configuration,
AbstractCustomCommandLine cli,
String[] parameters,
int expectedParallelism,
boolean isDetached)
throws Exception {
RunTestingCliFrontend testFrontend =
new RunTestingCliFrontend(
configuration,
new DefaultClusterClientServiceLoader(),
cli,
expectedParallelism,
isDetached);
testFrontend.run(parameters);
}
public static void verifyCliFrontend(
Configuration configuration,
ClusterClientServiceLoader clusterClientServiceLoader,
AbstractCustomCommandLine cli,
String[] parameters,
int expectedParallelism,
boolean isDetached)
throws Exception {
RunTestingCliFrontend testFrontend =
new RunTestingCliFrontend(
configuration,
clusterClientServiceLoader,
cli,
expectedParallelism,
isDetached);
testFrontend.run(parameters);
}
private static final class RunTestingCliFrontend extends CliFrontend {
private final int expectedParallelism;
private final boolean isDetached;
private RunTestingCliFrontend(
Configuration configuration,
ClusterClientServiceLoader clusterClientServiceLoader,
AbstractCustomCommandLine cli,
int expectedParallelism,
boolean isDetached) {
super(configuration, clusterClientServiceLoader, Collections.singletonList(cli));
this.expectedParallelism = expectedParallelism;
this.isDetached = isDetached;
}
@Override
protected void executeProgram(Configuration configuration, PackagedProgram program) {
final ExecutionConfigAccessor executionConfigAccessor =
ExecutionConfigAccessor.fromConfiguration(configuration);
assertEquals(isDetached, executionConfigAccessor.getDetachedMode());
assertEquals(expectedParallelism, executionConfigAccessor.getParallelism());
}
}
} | class CliFrontendRunTest extends CliFrontendTestBase {
@BeforeClass
public static void init() {
CliFrontendTestUtils.pipeSystemOutToNull();
}
@AfterClass
public static void shutdown() {
CliFrontendTestUtils.restoreSystemOut();
}
@Test
public void testRun() throws Exception {
final Configuration configuration = getConfiguration();
{
String[] parameters = {"-v", getTestJarPath()};
verifyCliFrontend(configuration, getCli(), parameters, 4, false);
}
{
String[] parameters = {"-v", "-d", getTestJarPath()};
verifyCliFrontend(configuration, getCli(), parameters, 4, true);
}
{
String[] parameters = {"-v", "-p", "42", getTestJarPath()};
verifyCliFrontend(configuration, getCli(), parameters, 42, false);
}
{
String[] parameters = {"-p", "2", "-d", getTestJarPath()};
verifyCliFrontend(configuration, getCli(), parameters, 2, true);
}
{
String[] parameters = {"-s", "expectedSavepointPath", getTestJarPath()};
CommandLine commandLine =
CliFrontendParser.parse(CliFrontendParser.RUN_OPTIONS, parameters, true);
ProgramOptions programOptions = ProgramOptions.create(commandLine);
ExecutionConfigAccessor executionOptions =
ExecutionConfigAccessor.fromProgramOptions(
programOptions, Collections.emptyList());
SavepointRestoreSettings savepointSettings =
executionOptions.getSavepointRestoreSettings();
assertTrue(savepointSettings.restoreSavepoint());
assertEquals("expectedSavepointPath", savepointSettings.getRestorePath());
assertFalse(savepointSettings.allowNonRestoredState());
}
{
String[] parameters = {"-s", "expectedSavepointPath", "-n", getTestJarPath()};
CommandLine commandLine =
CliFrontendParser.parse(CliFrontendParser.RUN_OPTIONS, parameters, true);
ProgramOptions programOptions = ProgramOptions.create(commandLine);
ExecutionConfigAccessor executionOptions =
ExecutionConfigAccessor.fromProgramOptions(
programOptions, Collections.emptyList());
SavepointRestoreSettings savepointSettings =
executionOptions.getSavepointRestoreSettings();
assertTrue(savepointSettings.restoreSavepoint());
assertEquals("expectedSavepointPath", savepointSettings.getRestorePath());
assertTrue(savepointSettings.allowNonRestoredState());
}
{
String[] parameters = {
getTestJarPath(), "-arg1", "value1", "justavalue", "--arg2", "value2"
};
CommandLine commandLine =
CliFrontendParser.parse(CliFrontendParser.RUN_OPTIONS, parameters, true);
ProgramOptions programOptions = ProgramOptions.create(commandLine);
assertEquals("-arg1", programOptions.getProgramArgs()[0]);
assertEquals("value1", programOptions.getProgramArgs()[1]);
assertEquals("justavalue", programOptions.getProgramArgs()[2]);
assertEquals("--arg2", programOptions.getProgramArgs()[3]);
assertEquals("value2", programOptions.getProgramArgs()[4]);
}
}
@Test
@Test
public void testLegacyRestoreModeParsing() throws Exception {
String[] parameters = {
"-s", "expectedSavepointPath", "-n", "-restoreMode", "legacy", getTestJarPath()
};
CommandLine commandLine =
CliFrontendParser.parse(CliFrontendParser.RUN_OPTIONS, parameters, true);
ProgramOptions programOptions = ProgramOptions.create(commandLine);
ExecutionConfigAccessor executionOptions =
ExecutionConfigAccessor.fromProgramOptions(programOptions, Collections.emptyList());
SavepointRestoreSettings savepointSettings = executionOptions.getSavepointRestoreSettings();
assertTrue(savepointSettings.restoreSavepoint());
assertEquals(RestoreMode.LEGACY, savepointSettings.getRestoreMode());
assertEquals("expectedSavepointPath", savepointSettings.getRestorePath());
assertTrue(savepointSettings.allowNonRestoredState());
}
@Test(expected = CliArgsException.class)
public void testUnrecognizedOption() throws Exception {
String[] parameters = {"-v", "-l", "-a", "some", "program", "arguments"};
Configuration configuration = getConfiguration();
CliFrontend testFrontend =
new CliFrontend(configuration, Collections.singletonList(getCli()));
testFrontend.run(parameters);
}
@Test(expected = CliArgsException.class)
public void testInvalidParallelismOption() throws Exception {
String[] parameters = {"-v", "-p", "text", getTestJarPath()};
Configuration configuration = getConfiguration();
CliFrontend testFrontend =
new CliFrontend(configuration, Collections.singletonList(getCli()));
testFrontend.run(parameters);
}
@Test(expected = CliArgsException.class)
public void testParallelismWithOverflow() throws Exception {
String[] parameters = {"-v", "-p", "475871387138", getTestJarPath()};
Configuration configuration = new Configuration();
CliFrontend testFrontend =
new CliFrontend(configuration, Collections.singletonList(getCli()));
testFrontend.run(parameters);
}
public static void verifyCliFrontend(
Configuration configuration,
AbstractCustomCommandLine cli,
String[] parameters,
int expectedParallelism,
boolean isDetached)
throws Exception {
RunTestingCliFrontend testFrontend =
new RunTestingCliFrontend(
configuration,
new DefaultClusterClientServiceLoader(),
cli,
expectedParallelism,
isDetached);
testFrontend.run(parameters);
}
public static void verifyCliFrontend(
Configuration configuration,
ClusterClientServiceLoader clusterClientServiceLoader,
AbstractCustomCommandLine cli,
String[] parameters,
int expectedParallelism,
boolean isDetached)
throws Exception {
RunTestingCliFrontend testFrontend =
new RunTestingCliFrontend(
configuration,
clusterClientServiceLoader,
cli,
expectedParallelism,
isDetached);
testFrontend.run(parameters);
}
private static final class RunTestingCliFrontend extends CliFrontend {
private final int expectedParallelism;
private final boolean isDetached;
private RunTestingCliFrontend(
Configuration configuration,
ClusterClientServiceLoader clusterClientServiceLoader,
AbstractCustomCommandLine cli,
int expectedParallelism,
boolean isDetached) {
super(configuration, clusterClientServiceLoader, Collections.singletonList(cli));
this.expectedParallelism = expectedParallelism;
this.isDetached = isDetached;
}
@Override
protected void executeProgram(Configuration configuration, PackagedProgram program) {
final ExecutionConfigAccessor executionConfigAccessor =
ExecutionConfigAccessor.fromConfiguration(configuration);
assertEquals(isDetached, executionConfigAccessor.getDetachedMode());
assertEquals(expectedParallelism, executionConfigAccessor.getParallelism());
}
}
} |
Why do we need to call clear here? | public void init() throws IOException {
resultPath = tempFolder().newFolder().toURI().toString();
clear();
env().setParallelism(3);
env().enableCheckpointing(100);
rows = new ArrayList<>();
for (int i = 0; i < 100; i++) {
rows.add(Row.of(i, String.valueOf(i % 10), String.valueOf(i)));
}
DataStream<Row> stream = new DataStream<>(env().getJavaEnv().addSource(
new ParallelFiniteTestSource<>(rows),
new RowTypeInfo(
new TypeInformation[] {Types.INT, Types.STRING, Types.STRING},
new String[] {"a", "b", "c"})));
tEnv().createTemporaryView("my_table", stream);
} | clear(); | public void init() throws IOException {
resultPath = tempFolder().newFolder().toURI().toString();
env().setParallelism(3);
env().enableCheckpointing(100);
List<Row> rows = new ArrayList<>();
for (int i = 0; i < 100; i++) {
rows.add(Row.of(i, String.valueOf(i % 10), String.valueOf(i)));
}
this.expectedRows = new ArrayList<>();
this.expectedRows.addAll(rows);
this.expectedRows.addAll(rows);
this.expectedRows.sort(Comparator.comparingInt(o -> (Integer) o.getField(0)));
DataStream<Row> stream = new DataStream<>(env().getJavaEnv().addSource(
new ParallelFiniteTestSource<>(rows),
new RowTypeInfo(
new TypeInformation[] {Types.INT, Types.STRING, Types.STRING},
new String[] {"a", "b", "c"})));
tEnv().createTemporaryView("my_table", stream);
} | class FileCompactionITCaseBase extends StreamingTestBase {
@Rule
public Timeout timeoutPerTest = Timeout.seconds(60);
private String resultPath;
private List<Row> rows;
@Before
@After
public void clear() throws IOException {
FileUtils.deleteDirectory(new File(URI.create(resultPath)));
}
protected abstract String format();
@Test
public void testNonPartition() throws Exception {
tEnv().executeSql("CREATE TABLE sink_table (a int, b string, c string) with (" + options() + ")");
tEnv().executeSql("insert into sink_table select * from my_table").await();
List<Row> results = toListAndClose(tEnv().executeSql("select * from sink_table").collect());
results.sort(Comparator.comparingInt(o -> (Integer) o.getField(0)));
assertEquals(rows, results);
File[] files = new File(URI.create(resultPath)).listFiles(
(dir, name) -> name.startsWith("compacted-part-"));
assertEquals(Arrays.toString(files), 1, files.length);
String fileName = files[0].getName();
assertTrue(fileName, fileName.startsWith("compacted-part-"));
}
@Test
public void testPartition() throws Exception {
tEnv().executeSql("CREATE TABLE sink_table (a int, b string, c string) partitioned by (b) with (" + options() + ")");
tEnv().executeSql("insert into sink_table select * from my_table").await();
List<Row> results = toListAndClose(tEnv().executeSql("select * from sink_table").collect());
results.sort(Comparator.comparingInt(o -> (Integer) o.getField(0)));
assertEquals(rows, results);
File path = new File(URI.create(resultPath));
assertEquals(10, path.listFiles().length);
for (int i = 0; i < 10; i++) {
File partition = new File(path, "b=" + i);
File[] files = partition.listFiles();
assertEquals(Arrays.toString(files), 2, files.length);
assertEquals(1, partition.list((dir, name) -> name.equals("_SUCCESS")).length);
assertEquals(1, partition.list((dir, name) -> name.startsWith("compacted-part-")).length);
}
}
private String options() {
return "'connector'='filesystem'," +
"'sink.partition-commit.policy.kind'='success-file'," +
"'auto-compaction'='true'," +
"'compaction.file-size' = '128MB'," +
"'sink.rolling-policy.file-size' = '1b'," +
kv("format", format()) + "," +
kv("path", resultPath);
}
private String kv(String key, String value) {
return String.format("'%s'='%s'", key, value);
}
private List<Row> toListAndClose(CloseableIterator<Row> iterator) throws Exception {
List<Row> rows = CollectionUtil.iteratorToList(iterator);
iterator.close();
return rows;
}
} | class FileCompactionITCaseBase extends StreamingTestBase {
@Rule
public Timeout timeoutPerTest = Timeout.seconds(60);
private String resultPath;
private List<Row> expectedRows;
@Before
protected abstract String format();
@Test
public void testNonPartition() throws Exception {
tEnv().executeSql("CREATE TABLE sink_table (a int, b string, c string) with (" + options() + ")");
tEnv().executeSql("insert into sink_table select * from my_table").await();
assertIterator(tEnv().executeSql("select * from sink_table").collect());
assertFiles(new File(URI.create(resultPath)).listFiles(), false);
}
@Test
public void testPartition() throws Exception {
tEnv().executeSql("CREATE TABLE sink_table (a int, b string, c string) partitioned by (b) with (" + options() + ")");
tEnv().executeSql("insert into sink_table select * from my_table").await();
assertIterator(tEnv().executeSql("select * from sink_table").collect());
File path = new File(URI.create(resultPath));
assertEquals(10, path.listFiles().length);
for (int i = 0; i < 10; i++) {
File partition = new File(path, "b=" + i);
assertFiles(partition.listFiles(), true);
}
}
private String options() {
return "'connector'='filesystem'," +
"'sink.partition-commit.policy.kind'='success-file'," +
"'auto-compaction'='true'," +
"'compaction.file-size' = '128MB'," +
"'sink.rolling-policy.file-size' = '1b'," +
kv("format", format()) + "," +
kv("path", resultPath);
}
private String kv(String key, String value) {
return String.format("'%s'='%s'", key, value);
}
private void assertIterator(CloseableIterator<Row> iterator) throws Exception {
List<Row> result = CollectionUtil.iteratorToList(iterator);
iterator.close();
result.sort(Comparator.comparingInt(o -> (Integer) o.getField(0)));
assertEquals(expectedRows, result);
}
private void assertFiles(File[] files, boolean containSuccess) {
File successFile = null;
for (File file : files) {
if (containSuccess && file.getName().equals("_SUCCESS")) {
successFile = file;
} else {
assertTrue(file.getName(), file.getName().startsWith(COMPACTED_PREFIX));
}
}
if (containSuccess) {
Assert.assertNotNull("Should contains success file", successFile);
}
}
} |
how about : ``` assertThat(configuration.keySet()).containsExactly(expectedKey); ``` | void testDelegationConfigurationWithPrefix() {
String prefix = "pref-";
String expectedKey = "key";
/*
* Key matches the prefix
*/
Configuration backingConf = new Configuration();
backingConf.setValueInternal(prefix + expectedKey, "value", false);
DelegatingConfiguration configuration = new DelegatingConfiguration(backingConf, prefix);
Set<String> keySet = configuration.keySet();
assertThat(keySet).hasSize(1);
assertThat(expectedKey).isEqualTo(keySet.iterator().next());
/*
* Key does not match the prefix
*/
backingConf = new Configuration();
backingConf.setValueInternal("test-key", "value", false);
configuration = new DelegatingConfiguration(backingConf, prefix);
keySet = configuration.keySet();
assertThat(keySet).isEmpty();
} | assertThat(expectedKey).isEqualTo(keySet.iterator().next()); | void testDelegationConfigurationWithPrefix() {
String prefix = "pref-";
String expectedKey = "key";
/*
* Key matches the prefix
*/
Configuration backingConf = new Configuration();
backingConf.setValueInternal(prefix + expectedKey, "value", false);
DelegatingConfiguration configuration = new DelegatingConfiguration(backingConf, prefix);
Set<String> keySet = configuration.keySet();
assertThat(keySet).hasSize(1).containsExactly(expectedKey);
/*
* Key does not match the prefix
*/
backingConf = new Configuration();
backingConf.setValueInternal("test-key", "value", false);
configuration = new DelegatingConfiguration(backingConf, prefix);
assertThat(configuration.keySet()).isEmpty();
} | class and call it
lookForWrapper:
for (Method wrapperMethod : delegateMethods) {
if (configurationMethod.getName().equals(wrapperMethod.getName())) {
Class<?>[] wrapperMethodParams = wrapperMethod.getParameterTypes();
Class<?>[] configMethodParams = configurationMethod.getParameterTypes();
if (wrapperMethodParams.length != configMethodParams.length) {
continue;
}
for (int i = 0; i < wrapperMethodParams.length; i++) {
if (wrapperMethodParams[i] != configMethodParams[i]) {
continue lookForWrapper;
}
}
hasMethod = true;
break;
}
} | class and call it
lookForWrapper:
for (Method wrapperMethod : delegateMethods) {
if (configurationMethod.getName().equals(wrapperMethod.getName())) {
Class<?>[] wrapperMethodParams = wrapperMethod.getParameterTypes();
Class<?>[] configMethodParams = configurationMethod.getParameterTypes();
if (wrapperMethodParams.length != configMethodParams.length) {
continue;
}
for (int i = 0; i < wrapperMethodParams.length; i++) {
if (wrapperMethodParams[i] != configMethodParams[i]) {
continue lookForWrapper;
}
}
hasMethod = true;
break;
}
} |
OK. I didn't notice the `TableProperty`, Thanks for your reminder. | public void readFields(DataInput in) throws IOException {
super.readFields(in);
this.state = OlapTableState.valueOf(Text.readString(in));
int counter = in.readInt();
for (int i = 0; i < counter; i++) {
String indexName = Text.readString(in);
long indexId = in.readLong();
this.indexNameToId.put(indexName, indexId);
int colCount = in.readInt();
List<Column> schema = new LinkedList<Column>();
for (int j = 0; j < colCount; j++) {
Column column = Column.read(in);
schema.add(column);
}
this.indexIdToSchema.put(indexId, schema);
TStorageType type = TStorageType.valueOf(Text.readString(in));
this.indexIdToStorageType.put(indexId, type);
this.indexIdToSchemaVersion.put(indexId, in.readInt());
this.indexIdToSchemaHash.put(indexId, in.readInt());
this.indexIdToShortKeyColumnCount.put(indexId, in.readShort());
}
if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_30) {
keysType = KeysType.valueOf(Text.readString(in));
} else {
keysType = KeysType.AGG_KEYS;
}
PartitionType partType = PartitionType.valueOf(Text.readString(in));
if (partType == PartitionType.UNPARTITIONED) {
partitionInfo = SinglePartitionInfo.read(in);
} else if (partType == PartitionType.RANGE) {
partitionInfo = RangePartitionInfo.read(in);
} else {
throw new IOException("invalid partition type: " + partType);
}
DistributionInfoType distriType = DistributionInfoType.valueOf(Text.readString(in));
if (distriType == DistributionInfoType.HASH) {
defaultDistributionInfo = HashDistributionInfo.read(in);
} else if (distriType == DistributionInfoType.RANDOM) {
defaultDistributionInfo = RandomDistributionInfo.read(in);
} else {
throw new IOException("invalid distribution type: " + distriType);
}
int partitionCount = in.readInt();
for (int i = 0; i < partitionCount; ++i) {
Partition partition = Partition.read(in);
idToPartition.put(partition.getId(), partition);
nameToPartition.put(partition.getName(), partition);
}
if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_9) {
if (in.readBoolean()) {
int bfColumnCount = in.readInt();
bfColumns = Sets.newHashSet();
for (int i = 0; i < bfColumnCount; i++) {
bfColumns.add(Text.readString(in));
}
bfFpp = in.readDouble();
}
}
if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_46) {
if (in.readBoolean()) {
colocateGroup = Text.readString(in);
}
}
if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_57) {
baseIndexId = in.readLong();
} else {
baseIndexId = id;
}
if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_70) {
if (in.readBoolean()) {
this.indexes = TableIndexes.read(in);
}
}
if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_71) {
if (in.readBoolean()) {
tableProperty = TableProperty.read(in);
}
}
if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_72) {
isInMemory = in.readBoolean();
}
} | isInMemory = in.readBoolean(); | public void readFields(DataInput in) throws IOException {
super.readFields(in);
this.state = OlapTableState.valueOf(Text.readString(in));
int counter = in.readInt();
for (int i = 0; i < counter; i++) {
String indexName = Text.readString(in);
long indexId = in.readLong();
this.indexNameToId.put(indexName, indexId);
int colCount = in.readInt();
List<Column> schema = new LinkedList<Column>();
for (int j = 0; j < colCount; j++) {
Column column = Column.read(in);
schema.add(column);
}
this.indexIdToSchema.put(indexId, schema);
TStorageType type = TStorageType.valueOf(Text.readString(in));
this.indexIdToStorageType.put(indexId, type);
this.indexIdToSchemaVersion.put(indexId, in.readInt());
this.indexIdToSchemaHash.put(indexId, in.readInt());
this.indexIdToShortKeyColumnCount.put(indexId, in.readShort());
}
if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_30) {
keysType = KeysType.valueOf(Text.readString(in));
} else {
keysType = KeysType.AGG_KEYS;
}
PartitionType partType = PartitionType.valueOf(Text.readString(in));
if (partType == PartitionType.UNPARTITIONED) {
partitionInfo = SinglePartitionInfo.read(in);
} else if (partType == PartitionType.RANGE) {
partitionInfo = RangePartitionInfo.read(in);
} else {
throw new IOException("invalid partition type: " + partType);
}
DistributionInfoType distriType = DistributionInfoType.valueOf(Text.readString(in));
if (distriType == DistributionInfoType.HASH) {
defaultDistributionInfo = HashDistributionInfo.read(in);
} else if (distriType == DistributionInfoType.RANDOM) {
defaultDistributionInfo = RandomDistributionInfo.read(in);
} else {
throw new IOException("invalid distribution type: " + distriType);
}
int partitionCount = in.readInt();
for (int i = 0; i < partitionCount; ++i) {
Partition partition = Partition.read(in);
idToPartition.put(partition.getId(), partition);
nameToPartition.put(partition.getName(), partition);
}
if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_9) {
if (in.readBoolean()) {
int bfColumnCount = in.readInt();
bfColumns = Sets.newHashSet();
for (int i = 0; i < bfColumnCount; i++) {
bfColumns.add(Text.readString(in));
}
bfFpp = in.readDouble();
}
}
if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_46) {
if (in.readBoolean()) {
colocateGroup = Text.readString(in);
}
}
if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_57) {
baseIndexId = in.readLong();
} else {
baseIndexId = id;
}
if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_70) {
if (in.readBoolean()) {
this.indexes = TableIndexes.read(in);
}
}
if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_71) {
if (in.readBoolean()) {
tableProperty = TableProperty.read(in);
}
}
} | class OlapTable extends Table {
private static final Logger LOG = LogManager.getLogger(OlapTable.class);
public enum OlapTableState {
NORMAL,
ROLLUP,
SCHEMA_CHANGE,
@Deprecated
BACKUP,
RESTORE,
RESTORE_WITH_LOAD
}
private OlapTableState state;
private Map<Long, List<Column>> indexIdToSchema;
private Map<Long, Integer> indexIdToSchemaVersion;
private Map<Long, Integer> indexIdToSchemaHash;
private Map<Long, Short> indexIdToShortKeyColumnCount;
private Map<Long, TStorageType> indexIdToStorageType;
private Map<String, Long> indexNameToId;
private KeysType keysType;
private PartitionInfo partitionInfo;
private DistributionInfo defaultDistributionInfo;
private Map<Long, Partition> idToPartition;
private Map<String, Partition> nameToPartition;
private Set<String> bfColumns;
private double bfFpp;
private String colocateGroup;
private TableIndexes indexes;
private long baseIndexId = -1;
private TableProperty tableProperty;
private boolean isInMemory = false;
public OlapTable() {
super(TableType.OLAP);
this.indexIdToSchema = new HashMap<Long, List<Column>>();
this.indexIdToSchemaHash = new HashMap<Long, Integer>();
this.indexIdToSchemaVersion = new HashMap<Long, Integer>();
this.indexIdToShortKeyColumnCount = new HashMap<Long, Short>();
this.indexIdToStorageType = new HashMap<Long, TStorageType>();
this.indexNameToId = new HashMap<String, Long>();
this.idToPartition = new HashMap<Long, Partition>();
this.nameToPartition = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER);
this.bfColumns = null;
this.bfFpp = 0;
this.colocateGroup = null;
this.indexes = null;
this.tableProperty = null;
}
public OlapTable(long id, String tableName, List<Column> baseSchema, KeysType keysType,
PartitionInfo partitionInfo, DistributionInfo defaultDistributionInfo) {
this(id, tableName, baseSchema, keysType, partitionInfo, defaultDistributionInfo, null);
}
public OlapTable(long id, String tableName, List<Column> baseSchema, KeysType keysType,
PartitionInfo partitionInfo, DistributionInfo defaultDistributionInfo, TableIndexes indexes) {
super(id, tableName, TableType.OLAP, baseSchema);
this.state = OlapTableState.NORMAL;
this.indexIdToSchema = new HashMap<Long, List<Column>>();
this.indexIdToSchemaHash = new HashMap<Long, Integer>();
this.indexIdToSchemaVersion = new HashMap<Long, Integer>();
this.indexIdToShortKeyColumnCount = new HashMap<Long, Short>();
this.indexIdToStorageType = new HashMap<Long, TStorageType>();
this.indexNameToId = new HashMap<String, Long>();
this.idToPartition = new HashMap<Long, Partition>();
this.nameToPartition = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER);
this.keysType = keysType;
this.partitionInfo = partitionInfo;
this.defaultDistributionInfo = defaultDistributionInfo;
this.bfColumns = null;
this.bfFpp = 0;
this.colocateGroup = null;
this.indexes = indexes;
this.tableProperty = null;
}
public void setTableProperty(TableProperty tableProperty) {
this.tableProperty = tableProperty;
}
public TableProperty getTableProperty() {
return this.tableProperty;
}
public boolean dynamicPartitionExists() {
return tableProperty != null
&& tableProperty.getDynamicPartitionProperty() != null
&& tableProperty.getDynamicPartitionProperty().isExist();
}
public void setBaseIndexId(long baseIndexId) {
this.baseIndexId = baseIndexId;
}
public long getBaseIndexId() {
return baseIndexId;
}
public void setState(OlapTableState state) {
this.state = state;
}
public OlapTableState getState() {
return state;
}
public List<Index> getIndexes() {
if (indexes == null) {
return Lists.newArrayList();
}
return indexes.getIndexes();
}
public TableIndexes getTableIndexes() {
return indexes;
}
public Map<String, Index> getIndexesMap() {
Map<String, Index> indexMap = new HashMap<>();
if (indexes != null) {
Optional.ofNullable(indexes.getIndexes()).orElse(Collections.emptyList()).stream().forEach(
i -> indexMap.put(i.getIndexName(), i));
}
return indexMap;
}
public void setName(String newName) {
long baseIndexId = indexNameToId.remove(this.name);
indexNameToId.put(newName, baseIndexId);
this.name = newName;
if (this.partitionInfo.getType() == PartitionType.UNPARTITIONED) {
for (Partition partition : getPartitions()) {
partition.setName(newName);
nameToPartition.clear();
nameToPartition.put(newName, partition);
break;
}
}
}
public boolean hasMaterializedIndex(String indexName) {
return indexNameToId.containsKey(indexName);
}
/*
* Set index schema info for specified index.
*/
public void setIndexSchemaInfo(Long indexId, String indexName, List<Column> schema, int schemaVersion,
int schemaHash, short shortKeyColumnCount) {
if (indexName == null) {
Preconditions.checkState(indexNameToId.containsValue(indexId));
} else {
indexNameToId.put(indexName, indexId);
}
indexIdToSchema.put(indexId, schema);
indexIdToSchemaVersion.put(indexId, schemaVersion);
indexIdToSchemaHash.put(indexId, schemaHash);
indexIdToShortKeyColumnCount.put(indexId, shortKeyColumnCount);
}
public void setIndexStorageType(Long indexId, TStorageType newStorageType) {
Preconditions.checkState(newStorageType == TStorageType.COLUMN);
indexIdToStorageType.put(indexId, newStorageType);
}
public void rebuildFullSchema() {
fullSchema.clear();
nameToColumn.clear();
for (List<Column> columns : indexIdToSchema.values()) {
for (Column column : columns) {
if (!nameToColumn.containsKey(column.getName())) {
fullSchema.add(column);
nameToColumn.put(column.getName(), column);
}
}
}
LOG.debug("after rebuild full schema. table {}, schema: {}", id, fullSchema);
}
public boolean deleteIndexInfo(String indexName) {
if (!indexNameToId.containsKey(indexName)) {
return false;
}
long indexId = this.indexNameToId.remove(indexName);
indexIdToSchema.remove(indexId);
indexIdToSchemaVersion.remove(indexId);
indexIdToSchemaHash.remove(indexId);
indexIdToShortKeyColumnCount.remove(indexId);
indexIdToStorageType.remove(indexId);
return true;
}
public Map<String, Long> getIndexNameToId() {
return indexNameToId;
}
public Long getIndexIdByName(String indexName) {
return indexNameToId.get(indexName);
}
public String getIndexNameById(long indexId) {
for (Map.Entry<String, Long> entry : indexNameToId.entrySet()) {
if (entry.getValue() == indexId) {
return entry.getKey();
}
}
return null;
}
public void renameIndexForSchemaChange(String name, String newName) {
long idxId = indexNameToId.remove(name);
indexNameToId.put(newName, idxId);
}
public void renameColumnNamePrefix(long idxId) {
List<Column> columns = indexIdToSchema.get(idxId);
for (Column column : columns) {
column.setName(Column.removeNamePrefix(column.getName()));
}
}
public Status resetIdsForRestore(Catalog catalog, Database db, int restoreReplicationNum) {
id = catalog.getNextId();
Map<Long, String> origIdxIdToName = Maps.newHashMap();
for (Map.Entry<String, Long> entry : indexNameToId.entrySet()) {
origIdxIdToName.put(entry.getValue(), entry.getKey());
}
for (Map.Entry<Long, String> entry : origIdxIdToName.entrySet()) {
long newIdxId = catalog.getNextId();
if (entry.getValue().equals(name)) {
baseIndexId = newIdxId;
}
indexIdToSchema.put(newIdxId, indexIdToSchema.remove(entry.getKey()));
indexIdToSchemaHash.put(newIdxId, indexIdToSchemaHash.remove(entry.getKey()));
indexIdToSchemaVersion.put(newIdxId, indexIdToSchemaVersion.remove(entry.getKey()));
indexIdToShortKeyColumnCount.put(newIdxId, indexIdToShortKeyColumnCount.remove(entry.getKey()));
indexIdToStorageType.put(newIdxId, indexIdToStorageType.remove(entry.getKey()));
indexNameToId.put(entry.getValue(), newIdxId);
}
Map<String, Long> origPartNameToId = Maps.newHashMap();
for (Partition partition : idToPartition.values()) {
origPartNameToId.put(partition.getName(), partition.getId());
}
if (partitionInfo.getType() == PartitionType.RANGE) {
RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo;
for (Map.Entry<String, Long> entry : origPartNameToId.entrySet()) {
long newPartId = catalog.getNextId();
rangePartitionInfo.idToDataProperty.put(newPartId,
rangePartitionInfo.idToDataProperty.remove(entry.getValue()));
rangePartitionInfo.idToReplicationNum.remove(entry.getValue());
rangePartitionInfo.idToReplicationNum.put(newPartId,
(short) restoreReplicationNum);
rangePartitionInfo.getIdToRange().put(newPartId,
rangePartitionInfo.getIdToRange().remove(entry.getValue()));
idToPartition.put(newPartId, idToPartition.remove(entry.getValue()));
}
} else {
long newPartId = catalog.getNextId();
for (Map.Entry<String, Long> entry : origPartNameToId.entrySet()) {
partitionInfo.idToDataProperty.put(newPartId, partitionInfo.idToDataProperty.remove(entry.getValue()));
partitionInfo.idToReplicationNum.remove(entry.getValue());
partitionInfo.idToReplicationNum.put(newPartId, (short) restoreReplicationNum);
idToPartition.put(newPartId, idToPartition.remove(entry.getValue()));
}
}
for (Map.Entry<Long, Partition> entry : idToPartition.entrySet()) {
Partition partition = entry.getValue();
for (Map.Entry<Long, String> entry2 : origIdxIdToName.entrySet()) {
MaterializedIndex idx = partition.getIndex(entry2.getKey());
long newIdxId = indexNameToId.get(entry2.getValue());
int schemaHash = indexIdToSchemaHash.get(newIdxId);
idx.setIdForRestore(newIdxId);
if (newIdxId != baseIndexId) {
partition.deleteRollupIndex(entry2.getKey());
partition.createRollupIndex(idx);
}
int tabletNum = idx.getTablets().size();
idx.clearTabletsForRestore();
for (int i = 0; i < tabletNum; i++) {
long newTabletId = catalog.getNextId();
Tablet newTablet = new Tablet(newTabletId);
idx.addTablet(newTablet, null /* tablet meta */, true /* is restore */);
List<Long> beIds = Catalog.getCurrentSystemInfo().seqChooseBackendIds(partitionInfo.getReplicationNum(entry.getKey()),
true, true,
db.getClusterName());
if (beIds == null) {
return new Status(ErrCode.COMMON_ERROR, "failed to find "
+ partitionInfo.getReplicationNum(entry.getKey())
+ " different hosts to create table: " + name);
}
for (Long beId : beIds) {
long newReplicaId = catalog.getNextId();
Replica replica = new Replica(newReplicaId, beId, ReplicaState.NORMAL,
partition.getVisibleVersion(), partition.getVisibleVersionHash(), schemaHash);
newTablet.addReplica(replica, true /* is restore */);
}
}
}
partition.setIdForRestore(entry.getKey());
}
return Status.OK;
}
public Map<Long, List<Column>> getIndexIdToSchema() {
return indexIdToSchema;
}
public Map<Long, List<Column>> getCopiedIndexIdToSchema() {
return new HashMap<>(indexIdToSchema);
}
public List<Column> getSchemaByIndexId(Long indexId) {
return indexIdToSchema.get(indexId);
}
public List<Column> getKeyColumnsByIndexId(Long indexId) {
ArrayList<Column> keyColumns = Lists.newArrayList();
List<Column> allColumns = this.getSchemaByIndexId(indexId);
for (Column column : allColumns) {
if (column.isKey()) {
keyColumns.add(column);
}
}
return keyColumns;
}
public int getSchemaVersionByIndexId(Long indexId) {
if (indexIdToSchemaVersion.containsKey(indexId)) {
return indexIdToSchemaVersion.get(indexId);
}
return -1;
}
public Map<Long, Integer> getIndexIdToSchemaHash() {
return indexIdToSchemaHash;
}
public Map<Long, Integer> getCopiedIndexIdToSchemaHash() {
return new HashMap<>(indexIdToSchemaHash);
}
public int getSchemaHashByIndexId(Long indexId) {
if (indexIdToSchemaHash.containsKey(indexId)) {
return indexIdToSchemaHash.get(indexId);
}
return -1;
}
public Map<Long, Short> getIndexIdToShortKeyColumnCount() {
return indexIdToShortKeyColumnCount;
}
public Map<Long, Short> getCopiedIndexIdToShortKeyColumnCount() {
return new HashMap<>(indexIdToShortKeyColumnCount);
}
public short getShortKeyColumnCountByIndexId(Long indexId) {
if (indexIdToShortKeyColumnCount.containsKey(indexId)) {
return indexIdToShortKeyColumnCount.get(indexId);
}
return (short) -1;
}
public Map<Long, TStorageType> getIndexIdToStorageType() {
return indexIdToStorageType;
}
public Map<Long, TStorageType> getCopiedIndexIdToStorageType() {
return new HashMap<>(indexIdToStorageType);
}
public void setStorageTypeToIndex(Long indexId, TStorageType storageType) {
indexIdToStorageType.put(indexId, storageType);
}
public TStorageType getStorageTypeByIndexId(Long indexId) {
return indexIdToStorageType.get(indexId);
}
public KeysType getKeysType() {
return keysType;
}
public PartitionInfo getPartitionInfo() {
return partitionInfo;
}
public DistributionInfo getDefaultDistributionInfo() {
return defaultDistributionInfo;
}
public void renamePartition(String partitionName, String newPartitionName) {
if (partitionInfo.getType() == PartitionType.UNPARTITIONED) {
for (Partition partition : idToPartition.values()) {
partition.setName(newPartitionName);
nameToPartition.clear();
nameToPartition.put(newPartitionName, partition);
LOG.info("rename patition {} in table {}", newPartitionName, name);
break;
}
} else {
Partition partition = nameToPartition.remove(partitionName);
partition.setName(newPartitionName);
nameToPartition.put(newPartitionName, partition);
}
}
public void addPartition(Partition partition) {
idToPartition.put(partition.getId(), partition);
nameToPartition.put(partition.getName(), partition);
}
public Partition dropPartition(long dbId, String partitionName) {
return dropPartition(dbId, partitionName, false);
}
public Partition dropPartition(long dbId, String partitionName, boolean isRestore) {
Partition partition = nameToPartition.get(partitionName);
if (partition != null) {
idToPartition.remove(partition.getId());
nameToPartition.remove(partitionName);
Preconditions.checkState(partitionInfo.getType() == PartitionType.RANGE);
RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo;
if (!isRestore) {
Catalog.getCurrentRecycleBin().recyclePartition(dbId, id, partition,
rangePartitionInfo.getRange(partition.getId()),
rangePartitionInfo.getDataProperty(partition.getId()),
rangePartitionInfo.getReplicationNum(partition.getId()));
}
rangePartitionInfo.dropPartition(partition.getId());
}
return partition;
}
public Partition dropPartitionForBackup(String partitionName) {
return dropPartition(-1, partitionName, true);
}
public Collection<Partition> getPartitions() {
return idToPartition.values();
}
public Partition getPartition(long partitionId) {
return idToPartition.get(partitionId);
}
public Partition getPartition(String partitionName) {
return nameToPartition.get(partitionName);
}
public Set<String> getPartitionNames() {
return Sets.newHashSet(nameToPartition.keySet());
}
public Set<String> getCopiedBfColumns() {
if (bfColumns == null) {
return null;
}
return Sets.newHashSet(bfColumns);
}
public List<Index> getCopiedIndexes() {
if (indexes == null) {
return Lists.newArrayList();
}
return indexes.getCopiedIndexes();
}
public double getBfFpp() {
return bfFpp;
}
public void setBloomFilterInfo(Set<String> bfColumns, double bfFpp) {
this.bfColumns = bfColumns;
this.bfFpp = bfFpp;
}
public void setIndexes(List<Index> indexes) {
if (this.indexes == null) {
this.indexes = new TableIndexes(null);
}
this.indexes.setIndexes(indexes);
}
public String getColocateGroup() {
return colocateGroup;
}
public void setColocateGroup(String colocateGroup) {
this.colocateGroup = colocateGroup;
}
public boolean isInMemory() {
return this.isInMemory;
}
public void setIsInMemory(boolean isInMemory) {
this.isInMemory = isInMemory;
}
public boolean shouldLoadToNewRollup() {
return false;
}
public TTableDescriptor toThrift() {
TOlapTable tOlapTable = new TOlapTable(getName());
TTableDescriptor tTableDescriptor = new TTableDescriptor(id, TTableType.OLAP_TABLE,
fullSchema.size(), 0, getName(), "");
tTableDescriptor.setOlapTable(tOlapTable);
return tTableDescriptor;
}
public long getRowCount() {
long rowCount = 0;
for (Map.Entry<Long, Partition> entry : idToPartition.entrySet()) {
rowCount += entry.getValue().getBaseIndex().getRowCount();
}
return rowCount;
}
public AlterTableStmt toAddRollupStmt(String dbName, Collection<Long> indexIds) {
List<AlterClause> alterClauses = Lists.newArrayList();
for (Map.Entry<String, Long> entry : indexNameToId.entrySet()) {
String indexName = entry.getKey();
long indexId = entry.getValue();
if (!indexIds.contains(indexId)) {
continue;
}
List<String> columnNames = Lists.newArrayList();
for (Column column : indexIdToSchema.get(indexId)) {
columnNames.add(column.getName());
}
Map<String, String> properties = Maps.newHashMap();
properties.put(PropertyAnalyzer.PROPERTIES_STORAGE_TYPE, indexIdToStorageType.get(indexId).name());
properties.put(PropertyAnalyzer.PROPERTIES_SHORT_KEY, indexIdToShortKeyColumnCount.get(indexId).toString());
properties.put(PropertyAnalyzer.PROPERTIES_SCHEMA_VERSION, indexIdToSchemaVersion.get(indexId).toString());
AddRollupClause addRollupClause = new AddRollupClause(indexName, columnNames, null, null, properties);
alterClauses.add(addRollupClause);
}
return new AlterTableStmt(new TableName(dbName, name), alterClauses);
}
@Override
public CreateTableStmt toCreateTableStmt(String dbName) {
throw new RuntimeException("Don't support anymore");
}
public int getSignature(int signatureVersion, List<String> partNames) {
Adler32 adler32 = new Adler32();
adler32.update(signatureVersion);
final String charsetName = "UTF-8";
try {
adler32.update(name.getBytes(charsetName));
LOG.debug("signature. table name: {}", name);
adler32.update(type.name().getBytes(charsetName));
LOG.debug("signature. table type: {}", type.name());
Set<String> indexNames = Sets.newTreeSet();
indexNames.addAll(indexNameToId.keySet());
for (String indexName : indexNames) {
long indexId = indexNameToId.get(indexName);
adler32.update(indexName.getBytes(charsetName));
LOG.debug("signature. index name: {}", indexName);
adler32.update(indexIdToSchemaHash.get(indexId));
LOG.debug("signature. index schema hash: {}", indexIdToSchemaHash.get(indexId));
adler32.update(indexIdToShortKeyColumnCount.get(indexId));
LOG.debug("signature. index short key: {}", indexIdToShortKeyColumnCount.get(indexId));
adler32.update(indexIdToStorageType.get(indexId).name().getBytes(charsetName));
LOG.debug("signature. index storage type: {}", indexIdToStorageType.get(indexId));
}
if (bfColumns != null && !bfColumns.isEmpty()) {
for (String bfCol : bfColumns) {
adler32.update(bfCol.getBytes());
LOG.debug("signature. bf col: {}", bfCol);
}
adler32.update(String.valueOf(bfFpp).getBytes());
LOG.debug("signature. bf fpp: {}", bfFpp);
}
adler32.update(partitionInfo.getType().name().getBytes(charsetName));
LOG.debug("signature. partition type: {}", partitionInfo.getType().name());
if (partitionInfo.getType() == PartitionType.RANGE) {
RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo;
List<Column> partitionColumns = rangePartitionInfo.getPartitionColumns();
adler32.update(Util.schemaHash(0, partitionColumns, null, 0));
LOG.debug("signature. partition col hash: {}", Util.schemaHash(0, partitionColumns, null, 0));
}
Collections.sort(partNames, String.CASE_INSENSITIVE_ORDER);
for (String partName : partNames) {
Partition partition = getPartition(partName);
Preconditions.checkNotNull(partition, partName);
adler32.update(partName.getBytes(charsetName));
LOG.debug("signature. partition name: {}", partName);
DistributionInfo distributionInfo = partition.getDistributionInfo();
adler32.update(distributionInfo.getType().name().getBytes(charsetName));
if (distributionInfo.getType() == DistributionInfoType.HASH) {
HashDistributionInfo hashDistributionInfo = (HashDistributionInfo) distributionInfo;
adler32.update(Util.schemaHash(0, hashDistributionInfo.getDistributionColumns(), null, 0));
LOG.debug("signature. distribution col hash: {}",
Util.schemaHash(0, hashDistributionInfo.getDistributionColumns(), null, 0));
adler32.update(hashDistributionInfo.getBucketNum());
LOG.debug("signature. bucket num: {}", hashDistributionInfo.getBucketNum());
}
}
} catch (UnsupportedEncodingException e) {
LOG.error("encoding error", e);
return -1;
}
LOG.debug("signature: {}", Math.abs((int) adler32.getValue()));
return Math.abs((int) adler32.getValue());
}
public Status getIntersectPartNamesWith(OlapTable anotherTbl, List<String> intersectPartNames) {
if (this.getPartitionInfo().getType() != anotherTbl.getPartitionInfo().getType()) {
return new Status(ErrCode.COMMON_ERROR, "Table's partition type is different");
}
Set<String> intersect = this.getPartitionNames();
intersect.retainAll(anotherTbl.getPartitionNames());
intersectPartNames.addAll(intersect);
return Status.OK;
}
@Override
public boolean isPartitioned() {
int numSegs = 0;
for (Partition part : getPartitions()) {
numSegs += part.getDistributionInfo().getBucketNum();
if (numSegs > 1) {
return true;
}
}
return false;
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
Text.writeString(out, state.name());
int counter = indexNameToId.size();
out.writeInt(counter);
for (Map.Entry<String, Long> entry : indexNameToId.entrySet()) {
String indexName = entry.getKey();
long indexId = entry.getValue();
Text.writeString(out, indexName);
out.writeLong(indexId);
out.writeInt(indexIdToSchema.get(indexId).size());
for (Column column : indexIdToSchema.get(indexId)) {
column.write(out);
}
Text.writeString(out, indexIdToStorageType.get(indexId).name());
out.writeInt(indexIdToSchemaVersion.get(indexId));
out.writeInt(indexIdToSchemaHash.get(indexId));
out.writeShort(indexIdToShortKeyColumnCount.get(indexId));
}
Text.writeString(out, keysType.name());
Text.writeString(out, partitionInfo.getType().name());
partitionInfo.write(out);
Text.writeString(out, defaultDistributionInfo.getType().name());
defaultDistributionInfo.write(out);
int partitionCount = idToPartition.size();
out.writeInt(partitionCount);
for (Partition partition : idToPartition.values()) {
partition.write(out);
}
if (bfColumns == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeInt(bfColumns.size());
for (String bfColumn : bfColumns) {
Text.writeString(out, bfColumn);
}
out.writeDouble(bfFpp);
}
if (colocateGroup == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
Text.writeString(out, colocateGroup);
}
out.writeLong(baseIndexId);
if (indexes != null) {
out.writeBoolean(true);
indexes.write(out);
} else {
out.writeBoolean(false);
}
if (tableProperty == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
tableProperty.write(out);
}
out.writeBoolean(isInMemory);
}
public boolean equals(Table table) {
if (this == table) {
return true;
}
return table instanceof OlapTable;
}
public OlapTable selectiveCopy(Collection<String> reservedPartNames, boolean resetState, IndexExtState extState) {
OlapTable copied = new OlapTable();
if (!DeepCopy.copy(this, copied, OlapTable.class)) {
LOG.warn("failed to copy olap table: " + getName());
return null;
}
if (resetState) {
copied.setState(OlapTableState.NORMAL);
for (Partition partition : copied.getPartitions()) {
partition.setState(PartitionState.NORMAL);
copied.getPartitionInfo().setDataProperty(partition.getId(), new DataProperty(TStorageMedium.HDD));
for (MaterializedIndex idx : partition.getMaterializedIndices(extState)) {
idx.setState(IndexState.NORMAL);
for (Tablet tablet : idx.getTablets()) {
for (Replica replica : tablet.getReplicas()) {
replica.setState(ReplicaState.NORMAL);
}
}
}
}
}
if (reservedPartNames == null || reservedPartNames.isEmpty()) {
return copied;
}
Set<String> partNames = Sets.newTreeSet(String.CASE_INSENSITIVE_ORDER);
partNames.addAll(copied.getPartitionNames());
for (String partName : partNames) {
if (!reservedPartNames.contains(partName)) {
copied.dropPartitionForBackup(partName);
}
}
return copied;
}
/*
* this method is currently used for truncating table(partitions).
* the new partition has new id, so we need to change all 'id-related' members
*
* return the old partition.
*/
public Partition replacePartition(Partition newPartition) {
Partition oldPartition = nameToPartition.remove(newPartition.getName());
idToPartition.remove(oldPartition.getId());
idToPartition.put(newPartition.getId(), newPartition);
nameToPartition.put(newPartition.getName(), newPartition);
DataProperty dataProperty = partitionInfo.getDataProperty(oldPartition.getId());
short replicationNum = partitionInfo.getReplicationNum(oldPartition.getId());
if (partitionInfo.getType() == PartitionType.RANGE) {
RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo;
Range<PartitionKey> range = rangePartitionInfo.getRange(oldPartition.getId());
rangePartitionInfo.dropPartition(oldPartition.getId());
rangePartitionInfo.addPartition(newPartition.getId(), range, dataProperty, replicationNum);
} else {
partitionInfo.dropPartition(oldPartition.getId());
partitionInfo.addPartition(newPartition.getId(), dataProperty, replicationNum);
}
return oldPartition;
}
public long getDataSize() {
long dataSize = 0;
for (Partition partition : getPartitions()) {
dataSize += partition.getDataSize();
}
return dataSize;
}
public boolean isStable(SystemInfoService infoService, TabletScheduler tabletScheduler, String clusterName) {
int availableBackendsNum = infoService.getClusterBackendIds(clusterName, true).size();
for (Partition partition : idToPartition.values()) {
long visibleVersion = partition.getVisibleVersion();
long visibleVersionHash = partition.getVisibleVersionHash();
short replicationNum = partitionInfo.getReplicationNum(partition.getId());
for (MaterializedIndex mIndex : partition.getMaterializedIndices(IndexExtState.ALL)) {
for (Tablet tablet : mIndex.getTablets()) {
if (tabletScheduler.containsTablet(tablet.getId())) {
return false;
}
Pair<TabletStatus, TabletSchedCtx.Priority> statusPair = tablet.getHealthStatusWithPriority(
infoService, clusterName, visibleVersion, visibleVersionHash, replicationNum,
availableBackendsNum);
if (statusPair.first != TabletStatus.HEALTHY) {
LOG.info("table {} is not stable because tablet {} status is {}. replicas: {}",
id, tablet.getId(), statusPair.first, tablet.getReplicas());
return false;
}
}
}
}
return true;
}
public List<List<Long>> getArbitraryTabletBucketsSeq() throws DdlException {
List<List<Long>> backendsPerBucketSeq = Lists.newArrayList();
for (Partition partition : idToPartition.values()) {
short replicationNum = partitionInfo.getReplicationNum(partition.getId());
MaterializedIndex baseIdx = partition.getBaseIndex();
for (Long tabletId : baseIdx.getTabletIdsInOrder()) {
Tablet tablet = baseIdx.getTablet(tabletId);
List<Long> replicaBackendIds = tablet.getNormalReplicaBackendIds();
if (replicaBackendIds.size() < replicationNum) {
throw new DdlException("Normal replica number of tablet " + tabletId + " is: "
+ replicaBackendIds.size() + ", which is less than expected: " + replicationNum);
}
backendsPerBucketSeq.add(replicaBackendIds.subList(0, replicationNum));
}
break;
}
return backendsPerBucketSeq;
}
/**
* Get the proximate row count of this table, if you need accurate row count should select count(*) from table.
* @return proximate row count
*/
public long proximateRowCount() {
long totalCount = 0;
for (Partition partition : getPartitions()) {
long version = partition.getVisibleVersion();
long versionHash = partition.getVisibleVersionHash();
for (MaterializedIndex index : partition.getMaterializedIndices(IndexExtState.VISIBLE)) {
for (Tablet tablet : index.getTablets()) {
long tabletRowCount = 0L;
for (Replica replica : tablet.getReplicas()) {
if (replica.checkVersionCatchUp(version, versionHash, false)
&& replica.getRowCount() > tabletRowCount) {
tabletRowCount = replica.getRowCount();
}
}
totalCount += tabletRowCount;
}
}
}
return totalCount;
}
@Override
public List<Column> getBaseSchema() {
return indexIdToSchema.get(baseIndexId);
}
public int getKeysNum() {
int keysNum = 0;
for (Column column : getBaseSchema()) {
if (column.isKey()) {
keysNum += 1;
}
}
return keysNum;
}
public boolean convertRandomDistributionToHashDistribution() {
boolean hasChanged = false;
List<Column> baseSchema = indexIdToSchema.get(baseIndexId);
if (defaultDistributionInfo.getType() == DistributionInfoType.RANDOM) {
defaultDistributionInfo = ((RandomDistributionInfo) defaultDistributionInfo).toHashDistributionInfo(baseSchema);
hasChanged = true;
}
for (Partition partition : idToPartition.values()) {
if (partition.convertRandomDistributionToHashDistribution(baseSchema)) {
hasChanged = true;
}
}
return hasChanged;
}
public void setReplicationNum(Short replicationNum) {
if (tableProperty == null) {
tableProperty = new TableProperty(new HashMap<>());
}
tableProperty.modifyTableProperties(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM, replicationNum.toString());
tableProperty.buildReplicationNum();
}
public Short getReplicationNum() {
if (tableProperty != null) {
return tableProperty.getReplicationNum();
}
return null;
}
} | class OlapTable extends Table {
private static final Logger LOG = LogManager.getLogger(OlapTable.class);
public enum OlapTableState {
NORMAL,
ROLLUP,
SCHEMA_CHANGE,
@Deprecated
BACKUP,
RESTORE,
RESTORE_WITH_LOAD
}
private OlapTableState state;
private Map<Long, List<Column>> indexIdToSchema;
private Map<Long, Integer> indexIdToSchemaVersion;
private Map<Long, Integer> indexIdToSchemaHash;
private Map<Long, Short> indexIdToShortKeyColumnCount;
private Map<Long, TStorageType> indexIdToStorageType;
private Map<String, Long> indexNameToId;
private KeysType keysType;
private PartitionInfo partitionInfo;
private DistributionInfo defaultDistributionInfo;
private Map<Long, Partition> idToPartition;
private Map<String, Partition> nameToPartition;
private Set<String> bfColumns;
private double bfFpp;
private String colocateGroup;
private TableIndexes indexes;
private long baseIndexId = -1;
private TableProperty tableProperty;
public OlapTable() {
super(TableType.OLAP);
this.indexIdToSchema = new HashMap<Long, List<Column>>();
this.indexIdToSchemaHash = new HashMap<Long, Integer>();
this.indexIdToSchemaVersion = new HashMap<Long, Integer>();
this.indexIdToShortKeyColumnCount = new HashMap<Long, Short>();
this.indexIdToStorageType = new HashMap<Long, TStorageType>();
this.indexNameToId = new HashMap<String, Long>();
this.idToPartition = new HashMap<Long, Partition>();
this.nameToPartition = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER);
this.bfColumns = null;
this.bfFpp = 0;
this.colocateGroup = null;
this.indexes = null;
this.tableProperty = null;
}
public OlapTable(long id, String tableName, List<Column> baseSchema, KeysType keysType,
PartitionInfo partitionInfo, DistributionInfo defaultDistributionInfo) {
this(id, tableName, baseSchema, keysType, partitionInfo, defaultDistributionInfo, null);
}
public OlapTable(long id, String tableName, List<Column> baseSchema, KeysType keysType,
PartitionInfo partitionInfo, DistributionInfo defaultDistributionInfo, TableIndexes indexes) {
super(id, tableName, TableType.OLAP, baseSchema);
this.state = OlapTableState.NORMAL;
this.indexIdToSchema = new HashMap<Long, List<Column>>();
this.indexIdToSchemaHash = new HashMap<Long, Integer>();
this.indexIdToSchemaVersion = new HashMap<Long, Integer>();
this.indexIdToShortKeyColumnCount = new HashMap<Long, Short>();
this.indexIdToStorageType = new HashMap<Long, TStorageType>();
this.indexNameToId = new HashMap<String, Long>();
this.idToPartition = new HashMap<Long, Partition>();
this.nameToPartition = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER);
this.keysType = keysType;
this.partitionInfo = partitionInfo;
this.defaultDistributionInfo = defaultDistributionInfo;
this.bfColumns = null;
this.bfFpp = 0;
this.colocateGroup = null;
this.indexes = indexes;
this.tableProperty = null;
}
public void setTableProperty(TableProperty tableProperty) {
this.tableProperty = tableProperty;
}
public TableProperty getTableProperty() {
return this.tableProperty;
}
public boolean dynamicPartitionExists() {
return tableProperty != null
&& tableProperty.getDynamicPartitionProperty() != null
&& tableProperty.getDynamicPartitionProperty().isExist();
}
public void setBaseIndexId(long baseIndexId) {
this.baseIndexId = baseIndexId;
}
public long getBaseIndexId() {
return baseIndexId;
}
public void setState(OlapTableState state) {
this.state = state;
}
public OlapTableState getState() {
return state;
}
public List<Index> getIndexes() {
if (indexes == null) {
return Lists.newArrayList();
}
return indexes.getIndexes();
}
public TableIndexes getTableIndexes() {
return indexes;
}
public Map<String, Index> getIndexesMap() {
Map<String, Index> indexMap = new HashMap<>();
if (indexes != null) {
Optional.ofNullable(indexes.getIndexes()).orElse(Collections.emptyList()).stream().forEach(
i -> indexMap.put(i.getIndexName(), i));
}
return indexMap;
}
public void setName(String newName) {
long baseIndexId = indexNameToId.remove(this.name);
indexNameToId.put(newName, baseIndexId);
this.name = newName;
if (this.partitionInfo.getType() == PartitionType.UNPARTITIONED) {
for (Partition partition : getPartitions()) {
partition.setName(newName);
nameToPartition.clear();
nameToPartition.put(newName, partition);
break;
}
}
}
public boolean hasMaterializedIndex(String indexName) {
return indexNameToId.containsKey(indexName);
}
/*
* Set index schema info for specified index.
*/
public void setIndexSchemaInfo(Long indexId, String indexName, List<Column> schema, int schemaVersion,
int schemaHash, short shortKeyColumnCount) {
if (indexName == null) {
Preconditions.checkState(indexNameToId.containsValue(indexId));
} else {
indexNameToId.put(indexName, indexId);
}
indexIdToSchema.put(indexId, schema);
indexIdToSchemaVersion.put(indexId, schemaVersion);
indexIdToSchemaHash.put(indexId, schemaHash);
indexIdToShortKeyColumnCount.put(indexId, shortKeyColumnCount);
}
public void setIndexStorageType(Long indexId, TStorageType newStorageType) {
Preconditions.checkState(newStorageType == TStorageType.COLUMN);
indexIdToStorageType.put(indexId, newStorageType);
}
public void rebuildFullSchema() {
fullSchema.clear();
nameToColumn.clear();
for (List<Column> columns : indexIdToSchema.values()) {
for (Column column : columns) {
if (!nameToColumn.containsKey(column.getName())) {
fullSchema.add(column);
nameToColumn.put(column.getName(), column);
}
}
}
LOG.debug("after rebuild full schema. table {}, schema: {}", id, fullSchema);
}
public boolean deleteIndexInfo(String indexName) {
if (!indexNameToId.containsKey(indexName)) {
return false;
}
long indexId = this.indexNameToId.remove(indexName);
indexIdToSchema.remove(indexId);
indexIdToSchemaVersion.remove(indexId);
indexIdToSchemaHash.remove(indexId);
indexIdToShortKeyColumnCount.remove(indexId);
indexIdToStorageType.remove(indexId);
return true;
}
public Map<String, Long> getIndexNameToId() {
return indexNameToId;
}
public Long getIndexIdByName(String indexName) {
return indexNameToId.get(indexName);
}
public String getIndexNameById(long indexId) {
for (Map.Entry<String, Long> entry : indexNameToId.entrySet()) {
if (entry.getValue() == indexId) {
return entry.getKey();
}
}
return null;
}
public void renameIndexForSchemaChange(String name, String newName) {
long idxId = indexNameToId.remove(name);
indexNameToId.put(newName, idxId);
}
public void renameColumnNamePrefix(long idxId) {
List<Column> columns = indexIdToSchema.get(idxId);
for (Column column : columns) {
column.setName(Column.removeNamePrefix(column.getName()));
}
}
public Status resetIdsForRestore(Catalog catalog, Database db, int restoreReplicationNum) {
id = catalog.getNextId();
Map<Long, String> origIdxIdToName = Maps.newHashMap();
for (Map.Entry<String, Long> entry : indexNameToId.entrySet()) {
origIdxIdToName.put(entry.getValue(), entry.getKey());
}
for (Map.Entry<Long, String> entry : origIdxIdToName.entrySet()) {
long newIdxId = catalog.getNextId();
if (entry.getValue().equals(name)) {
baseIndexId = newIdxId;
}
indexIdToSchema.put(newIdxId, indexIdToSchema.remove(entry.getKey()));
indexIdToSchemaHash.put(newIdxId, indexIdToSchemaHash.remove(entry.getKey()));
indexIdToSchemaVersion.put(newIdxId, indexIdToSchemaVersion.remove(entry.getKey()));
indexIdToShortKeyColumnCount.put(newIdxId, indexIdToShortKeyColumnCount.remove(entry.getKey()));
indexIdToStorageType.put(newIdxId, indexIdToStorageType.remove(entry.getKey()));
indexNameToId.put(entry.getValue(), newIdxId);
}
Map<String, Long> origPartNameToId = Maps.newHashMap();
for (Partition partition : idToPartition.values()) {
origPartNameToId.put(partition.getName(), partition.getId());
}
if (partitionInfo.getType() == PartitionType.RANGE) {
RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo;
for (Map.Entry<String, Long> entry : origPartNameToId.entrySet()) {
long newPartId = catalog.getNextId();
rangePartitionInfo.idToDataProperty.put(newPartId,
rangePartitionInfo.idToDataProperty.remove(entry.getValue()));
rangePartitionInfo.idToReplicationNum.remove(entry.getValue());
rangePartitionInfo.idToReplicationNum.put(newPartId,
(short) restoreReplicationNum);
rangePartitionInfo.getIdToRange().put(newPartId,
rangePartitionInfo.getIdToRange().remove(entry.getValue()));
idToPartition.put(newPartId, idToPartition.remove(entry.getValue()));
}
} else {
long newPartId = catalog.getNextId();
for (Map.Entry<String, Long> entry : origPartNameToId.entrySet()) {
partitionInfo.idToDataProperty.put(newPartId, partitionInfo.idToDataProperty.remove(entry.getValue()));
partitionInfo.idToReplicationNum.remove(entry.getValue());
partitionInfo.idToReplicationNum.put(newPartId, (short) restoreReplicationNum);
idToPartition.put(newPartId, idToPartition.remove(entry.getValue()));
}
}
for (Map.Entry<Long, Partition> entry : idToPartition.entrySet()) {
Partition partition = entry.getValue();
for (Map.Entry<Long, String> entry2 : origIdxIdToName.entrySet()) {
MaterializedIndex idx = partition.getIndex(entry2.getKey());
long newIdxId = indexNameToId.get(entry2.getValue());
int schemaHash = indexIdToSchemaHash.get(newIdxId);
idx.setIdForRestore(newIdxId);
if (newIdxId != baseIndexId) {
partition.deleteRollupIndex(entry2.getKey());
partition.createRollupIndex(idx);
}
int tabletNum = idx.getTablets().size();
idx.clearTabletsForRestore();
for (int i = 0; i < tabletNum; i++) {
long newTabletId = catalog.getNextId();
Tablet newTablet = new Tablet(newTabletId);
idx.addTablet(newTablet, null /* tablet meta */, true /* is restore */);
List<Long> beIds = Catalog.getCurrentSystemInfo().seqChooseBackendIds(partitionInfo.getReplicationNum(entry.getKey()),
true, true,
db.getClusterName());
if (beIds == null) {
return new Status(ErrCode.COMMON_ERROR, "failed to find "
+ partitionInfo.getReplicationNum(entry.getKey())
+ " different hosts to create table: " + name);
}
for (Long beId : beIds) {
long newReplicaId = catalog.getNextId();
Replica replica = new Replica(newReplicaId, beId, ReplicaState.NORMAL,
partition.getVisibleVersion(), partition.getVisibleVersionHash(), schemaHash);
newTablet.addReplica(replica, true /* is restore */);
}
}
}
partition.setIdForRestore(entry.getKey());
}
return Status.OK;
}
public Map<Long, List<Column>> getIndexIdToSchema() {
return indexIdToSchema;
}
public Map<Long, List<Column>> getCopiedIndexIdToSchema() {
return new HashMap<>(indexIdToSchema);
}
public List<Column> getSchemaByIndexId(Long indexId) {
return indexIdToSchema.get(indexId);
}
public List<Column> getKeyColumnsByIndexId(Long indexId) {
ArrayList<Column> keyColumns = Lists.newArrayList();
List<Column> allColumns = this.getSchemaByIndexId(indexId);
for (Column column : allColumns) {
if (column.isKey()) {
keyColumns.add(column);
}
}
return keyColumns;
}
public int getSchemaVersionByIndexId(Long indexId) {
if (indexIdToSchemaVersion.containsKey(indexId)) {
return indexIdToSchemaVersion.get(indexId);
}
return -1;
}
public Map<Long, Integer> getIndexIdToSchemaHash() {
return indexIdToSchemaHash;
}
public Map<Long, Integer> getCopiedIndexIdToSchemaHash() {
return new HashMap<>(indexIdToSchemaHash);
}
public int getSchemaHashByIndexId(Long indexId) {
if (indexIdToSchemaHash.containsKey(indexId)) {
return indexIdToSchemaHash.get(indexId);
}
return -1;
}
public Map<Long, Short> getIndexIdToShortKeyColumnCount() {
return indexIdToShortKeyColumnCount;
}
public Map<Long, Short> getCopiedIndexIdToShortKeyColumnCount() {
return new HashMap<>(indexIdToShortKeyColumnCount);
}
public short getShortKeyColumnCountByIndexId(Long indexId) {
if (indexIdToShortKeyColumnCount.containsKey(indexId)) {
return indexIdToShortKeyColumnCount.get(indexId);
}
return (short) -1;
}
public Map<Long, TStorageType> getIndexIdToStorageType() {
return indexIdToStorageType;
}
public Map<Long, TStorageType> getCopiedIndexIdToStorageType() {
return new HashMap<>(indexIdToStorageType);
}
public void setStorageTypeToIndex(Long indexId, TStorageType storageType) {
indexIdToStorageType.put(indexId, storageType);
}
public TStorageType getStorageTypeByIndexId(Long indexId) {
return indexIdToStorageType.get(indexId);
}
public KeysType getKeysType() {
return keysType;
}
public PartitionInfo getPartitionInfo() {
return partitionInfo;
}
public DistributionInfo getDefaultDistributionInfo() {
return defaultDistributionInfo;
}
public void renamePartition(String partitionName, String newPartitionName) {
if (partitionInfo.getType() == PartitionType.UNPARTITIONED) {
for (Partition partition : idToPartition.values()) {
partition.setName(newPartitionName);
nameToPartition.clear();
nameToPartition.put(newPartitionName, partition);
LOG.info("rename patition {} in table {}", newPartitionName, name);
break;
}
} else {
Partition partition = nameToPartition.remove(partitionName);
partition.setName(newPartitionName);
nameToPartition.put(newPartitionName, partition);
}
}
public void addPartition(Partition partition) {
idToPartition.put(partition.getId(), partition);
nameToPartition.put(partition.getName(), partition);
}
public Partition dropPartition(long dbId, String partitionName) {
return dropPartition(dbId, partitionName, false);
}
public Partition dropPartition(long dbId, String partitionName, boolean isRestore) {
Partition partition = nameToPartition.get(partitionName);
if (partition != null) {
idToPartition.remove(partition.getId());
nameToPartition.remove(partitionName);
Preconditions.checkState(partitionInfo.getType() == PartitionType.RANGE);
RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo;
if (!isRestore) {
Catalog.getCurrentRecycleBin().recyclePartition(dbId, id, partition,
rangePartitionInfo.getRange(partition.getId()),
rangePartitionInfo.getDataProperty(partition.getId()),
rangePartitionInfo.getReplicationNum(partition.getId()),
rangePartitionInfo.getIsInMemory(partition.getId()));
}
rangePartitionInfo.dropPartition(partition.getId());
}
return partition;
}
public Partition dropPartitionForBackup(String partitionName) {
return dropPartition(-1, partitionName, true);
}
public Collection<Partition> getPartitions() {
return idToPartition.values();
}
public Partition getPartition(long partitionId) {
return idToPartition.get(partitionId);
}
public Partition getPartition(String partitionName) {
return nameToPartition.get(partitionName);
}
public Set<String> getPartitionNames() {
return Sets.newHashSet(nameToPartition.keySet());
}
public Set<String> getCopiedBfColumns() {
if (bfColumns == null) {
return null;
}
return Sets.newHashSet(bfColumns);
}
public List<Index> getCopiedIndexes() {
if (indexes == null) {
return Lists.newArrayList();
}
return indexes.getCopiedIndexes();
}
public double getBfFpp() {
return bfFpp;
}
public void setBloomFilterInfo(Set<String> bfColumns, double bfFpp) {
this.bfColumns = bfColumns;
this.bfFpp = bfFpp;
}
public void setIndexes(List<Index> indexes) {
if (this.indexes == null) {
this.indexes = new TableIndexes(null);
}
this.indexes.setIndexes(indexes);
}
public String getColocateGroup() {
return colocateGroup;
}
public void setColocateGroup(String colocateGroup) {
this.colocateGroup = colocateGroup;
}
public boolean shouldLoadToNewRollup() {
return false;
}
public TTableDescriptor toThrift() {
TOlapTable tOlapTable = new TOlapTable(getName());
TTableDescriptor tTableDescriptor = new TTableDescriptor(id, TTableType.OLAP_TABLE,
fullSchema.size(), 0, getName(), "");
tTableDescriptor.setOlapTable(tOlapTable);
return tTableDescriptor;
}
public long getRowCount() {
long rowCount = 0;
for (Map.Entry<Long, Partition> entry : idToPartition.entrySet()) {
rowCount += entry.getValue().getBaseIndex().getRowCount();
}
return rowCount;
}
public AlterTableStmt toAddRollupStmt(String dbName, Collection<Long> indexIds) {
List<AlterClause> alterClauses = Lists.newArrayList();
for (Map.Entry<String, Long> entry : indexNameToId.entrySet()) {
String indexName = entry.getKey();
long indexId = entry.getValue();
if (!indexIds.contains(indexId)) {
continue;
}
List<String> columnNames = Lists.newArrayList();
for (Column column : indexIdToSchema.get(indexId)) {
columnNames.add(column.getName());
}
Map<String, String> properties = Maps.newHashMap();
properties.put(PropertyAnalyzer.PROPERTIES_STORAGE_TYPE, indexIdToStorageType.get(indexId).name());
properties.put(PropertyAnalyzer.PROPERTIES_SHORT_KEY, indexIdToShortKeyColumnCount.get(indexId).toString());
properties.put(PropertyAnalyzer.PROPERTIES_SCHEMA_VERSION, indexIdToSchemaVersion.get(indexId).toString());
AddRollupClause addRollupClause = new AddRollupClause(indexName, columnNames, null, null, properties);
alterClauses.add(addRollupClause);
}
return new AlterTableStmt(new TableName(dbName, name), alterClauses);
}
@Override
public CreateTableStmt toCreateTableStmt(String dbName) {
throw new RuntimeException("Don't support anymore");
}
public int getSignature(int signatureVersion, List<String> partNames) {
Adler32 adler32 = new Adler32();
adler32.update(signatureVersion);
final String charsetName = "UTF-8";
try {
adler32.update(name.getBytes(charsetName));
LOG.debug("signature. table name: {}", name);
adler32.update(type.name().getBytes(charsetName));
LOG.debug("signature. table type: {}", type.name());
Set<String> indexNames = Sets.newTreeSet();
indexNames.addAll(indexNameToId.keySet());
for (String indexName : indexNames) {
long indexId = indexNameToId.get(indexName);
adler32.update(indexName.getBytes(charsetName));
LOG.debug("signature. index name: {}", indexName);
adler32.update(indexIdToSchemaHash.get(indexId));
LOG.debug("signature. index schema hash: {}", indexIdToSchemaHash.get(indexId));
adler32.update(indexIdToShortKeyColumnCount.get(indexId));
LOG.debug("signature. index short key: {}", indexIdToShortKeyColumnCount.get(indexId));
adler32.update(indexIdToStorageType.get(indexId).name().getBytes(charsetName));
LOG.debug("signature. index storage type: {}", indexIdToStorageType.get(indexId));
}
if (bfColumns != null && !bfColumns.isEmpty()) {
for (String bfCol : bfColumns) {
adler32.update(bfCol.getBytes());
LOG.debug("signature. bf col: {}", bfCol);
}
adler32.update(String.valueOf(bfFpp).getBytes());
LOG.debug("signature. bf fpp: {}", bfFpp);
}
adler32.update(partitionInfo.getType().name().getBytes(charsetName));
LOG.debug("signature. partition type: {}", partitionInfo.getType().name());
if (partitionInfo.getType() == PartitionType.RANGE) {
RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo;
List<Column> partitionColumns = rangePartitionInfo.getPartitionColumns();
adler32.update(Util.schemaHash(0, partitionColumns, null, 0));
LOG.debug("signature. partition col hash: {}", Util.schemaHash(0, partitionColumns, null, 0));
}
Collections.sort(partNames, String.CASE_INSENSITIVE_ORDER);
for (String partName : partNames) {
Partition partition = getPartition(partName);
Preconditions.checkNotNull(partition, partName);
adler32.update(partName.getBytes(charsetName));
LOG.debug("signature. partition name: {}", partName);
DistributionInfo distributionInfo = partition.getDistributionInfo();
adler32.update(distributionInfo.getType().name().getBytes(charsetName));
if (distributionInfo.getType() == DistributionInfoType.HASH) {
HashDistributionInfo hashDistributionInfo = (HashDistributionInfo) distributionInfo;
adler32.update(Util.schemaHash(0, hashDistributionInfo.getDistributionColumns(), null, 0));
LOG.debug("signature. distribution col hash: {}",
Util.schemaHash(0, hashDistributionInfo.getDistributionColumns(), null, 0));
adler32.update(hashDistributionInfo.getBucketNum());
LOG.debug("signature. bucket num: {}", hashDistributionInfo.getBucketNum());
}
}
} catch (UnsupportedEncodingException e) {
LOG.error("encoding error", e);
return -1;
}
LOG.debug("signature: {}", Math.abs((int) adler32.getValue()));
return Math.abs((int) adler32.getValue());
}
public Status getIntersectPartNamesWith(OlapTable anotherTbl, List<String> intersectPartNames) {
if (this.getPartitionInfo().getType() != anotherTbl.getPartitionInfo().getType()) {
return new Status(ErrCode.COMMON_ERROR, "Table's partition type is different");
}
Set<String> intersect = this.getPartitionNames();
intersect.retainAll(anotherTbl.getPartitionNames());
intersectPartNames.addAll(intersect);
return Status.OK;
}
@Override
public boolean isPartitioned() {
int numSegs = 0;
for (Partition part : getPartitions()) {
numSegs += part.getDistributionInfo().getBucketNum();
if (numSegs > 1) {
return true;
}
}
return false;
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
Text.writeString(out, state.name());
int counter = indexNameToId.size();
out.writeInt(counter);
for (Map.Entry<String, Long> entry : indexNameToId.entrySet()) {
String indexName = entry.getKey();
long indexId = entry.getValue();
Text.writeString(out, indexName);
out.writeLong(indexId);
out.writeInt(indexIdToSchema.get(indexId).size());
for (Column column : indexIdToSchema.get(indexId)) {
column.write(out);
}
Text.writeString(out, indexIdToStorageType.get(indexId).name());
out.writeInt(indexIdToSchemaVersion.get(indexId));
out.writeInt(indexIdToSchemaHash.get(indexId));
out.writeShort(indexIdToShortKeyColumnCount.get(indexId));
}
Text.writeString(out, keysType.name());
Text.writeString(out, partitionInfo.getType().name());
partitionInfo.write(out);
Text.writeString(out, defaultDistributionInfo.getType().name());
defaultDistributionInfo.write(out);
int partitionCount = idToPartition.size();
out.writeInt(partitionCount);
for (Partition partition : idToPartition.values()) {
partition.write(out);
}
if (bfColumns == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeInt(bfColumns.size());
for (String bfColumn : bfColumns) {
Text.writeString(out, bfColumn);
}
out.writeDouble(bfFpp);
}
if (colocateGroup == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
Text.writeString(out, colocateGroup);
}
out.writeLong(baseIndexId);
if (indexes != null) {
out.writeBoolean(true);
indexes.write(out);
} else {
out.writeBoolean(false);
}
if (tableProperty == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
tableProperty.write(out);
}
}
public boolean equals(Table table) {
if (this == table) {
return true;
}
return table instanceof OlapTable;
}
public OlapTable selectiveCopy(Collection<String> reservedPartNames, boolean resetState, IndexExtState extState) {
OlapTable copied = new OlapTable();
if (!DeepCopy.copy(this, copied, OlapTable.class)) {
LOG.warn("failed to copy olap table: " + getName());
return null;
}
if (resetState) {
copied.setState(OlapTableState.NORMAL);
for (Partition partition : copied.getPartitions()) {
partition.setState(PartitionState.NORMAL);
copied.getPartitionInfo().setDataProperty(partition.getId(), new DataProperty(TStorageMedium.HDD));
for (MaterializedIndex idx : partition.getMaterializedIndices(extState)) {
idx.setState(IndexState.NORMAL);
for (Tablet tablet : idx.getTablets()) {
for (Replica replica : tablet.getReplicas()) {
replica.setState(ReplicaState.NORMAL);
}
}
}
}
}
if (reservedPartNames == null || reservedPartNames.isEmpty()) {
return copied;
}
Set<String> partNames = Sets.newTreeSet(String.CASE_INSENSITIVE_ORDER);
partNames.addAll(copied.getPartitionNames());
for (String partName : partNames) {
if (!reservedPartNames.contains(partName)) {
copied.dropPartitionForBackup(partName);
}
}
return copied;
}
/*
* this method is currently used for truncating table(partitions).
* the new partition has new id, so we need to change all 'id-related' members
*
* return the old partition.
*/
public Partition replacePartition(Partition newPartition) {
Partition oldPartition = nameToPartition.remove(newPartition.getName());
idToPartition.remove(oldPartition.getId());
idToPartition.put(newPartition.getId(), newPartition);
nameToPartition.put(newPartition.getName(), newPartition);
DataProperty dataProperty = partitionInfo.getDataProperty(oldPartition.getId());
short replicationNum = partitionInfo.getReplicationNum(oldPartition.getId());
boolean isInMemory = partitionInfo.getIsInMemory(oldPartition.getId());
if (partitionInfo.getType() == PartitionType.RANGE) {
RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo;
Range<PartitionKey> range = rangePartitionInfo.getRange(oldPartition.getId());
rangePartitionInfo.dropPartition(oldPartition.getId());
rangePartitionInfo.addPartition(newPartition.getId(), range, dataProperty,
replicationNum, isInMemory);
} else {
partitionInfo.dropPartition(oldPartition.getId());
partitionInfo.addPartition(newPartition.getId(), dataProperty, replicationNum, isInMemory);
}
return oldPartition;
}
public long getDataSize() {
long dataSize = 0;
for (Partition partition : getPartitions()) {
dataSize += partition.getDataSize();
}
return dataSize;
}
public boolean isStable(SystemInfoService infoService, TabletScheduler tabletScheduler, String clusterName) {
int availableBackendsNum = infoService.getClusterBackendIds(clusterName, true).size();
for (Partition partition : idToPartition.values()) {
long visibleVersion = partition.getVisibleVersion();
long visibleVersionHash = partition.getVisibleVersionHash();
short replicationNum = partitionInfo.getReplicationNum(partition.getId());
for (MaterializedIndex mIndex : partition.getMaterializedIndices(IndexExtState.ALL)) {
for (Tablet tablet : mIndex.getTablets()) {
if (tabletScheduler.containsTablet(tablet.getId())) {
return false;
}
Pair<TabletStatus, TabletSchedCtx.Priority> statusPair = tablet.getHealthStatusWithPriority(
infoService, clusterName, visibleVersion, visibleVersionHash, replicationNum,
availableBackendsNum);
if (statusPair.first != TabletStatus.HEALTHY) {
LOG.info("table {} is not stable because tablet {} status is {}. replicas: {}",
id, tablet.getId(), statusPair.first, tablet.getReplicas());
return false;
}
}
}
}
return true;
}
public List<List<Long>> getArbitraryTabletBucketsSeq() throws DdlException {
List<List<Long>> backendsPerBucketSeq = Lists.newArrayList();
for (Partition partition : idToPartition.values()) {
short replicationNum = partitionInfo.getReplicationNum(partition.getId());
MaterializedIndex baseIdx = partition.getBaseIndex();
for (Long tabletId : baseIdx.getTabletIdsInOrder()) {
Tablet tablet = baseIdx.getTablet(tabletId);
List<Long> replicaBackendIds = tablet.getNormalReplicaBackendIds();
if (replicaBackendIds.size() < replicationNum) {
throw new DdlException("Normal replica number of tablet " + tabletId + " is: "
+ replicaBackendIds.size() + ", which is less than expected: " + replicationNum);
}
backendsPerBucketSeq.add(replicaBackendIds.subList(0, replicationNum));
}
break;
}
return backendsPerBucketSeq;
}
/**
* Get the proximate row count of this table, if you need accurate row count should select count(*) from table.
* @return proximate row count
*/
public long proximateRowCount() {
long totalCount = 0;
for (Partition partition : getPartitions()) {
long version = partition.getVisibleVersion();
long versionHash = partition.getVisibleVersionHash();
for (MaterializedIndex index : partition.getMaterializedIndices(IndexExtState.VISIBLE)) {
for (Tablet tablet : index.getTablets()) {
long tabletRowCount = 0L;
for (Replica replica : tablet.getReplicas()) {
if (replica.checkVersionCatchUp(version, versionHash, false)
&& replica.getRowCount() > tabletRowCount) {
tabletRowCount = replica.getRowCount();
}
}
totalCount += tabletRowCount;
}
}
}
return totalCount;
}
@Override
public List<Column> getBaseSchema() {
return indexIdToSchema.get(baseIndexId);
}
public int getKeysNum() {
int keysNum = 0;
for (Column column : getBaseSchema()) {
if (column.isKey()) {
keysNum += 1;
}
}
return keysNum;
}
public boolean convertRandomDistributionToHashDistribution() {
boolean hasChanged = false;
List<Column> baseSchema = indexIdToSchema.get(baseIndexId);
if (defaultDistributionInfo.getType() == DistributionInfoType.RANDOM) {
defaultDistributionInfo = ((RandomDistributionInfo) defaultDistributionInfo).toHashDistributionInfo(baseSchema);
hasChanged = true;
}
for (Partition partition : idToPartition.values()) {
if (partition.convertRandomDistributionToHashDistribution(baseSchema)) {
hasChanged = true;
}
}
return hasChanged;
}
public void setReplicationNum(Short replicationNum) {
if (tableProperty == null) {
tableProperty = new TableProperty(new HashMap<>());
}
tableProperty.modifyTableProperties(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM, replicationNum.toString());
tableProperty.buildReplicationNum();
}
public Short getReplicationNum() {
if (tableProperty != null) {
return tableProperty.getReplicationNum();
}
return null;
}
public Boolean isInMemory() {
if (tableProperty != null) {
return tableProperty.IsInMemory();
}
return false;
}
public void setIsInMemory(boolean isInMemory) {
if (tableProperty == null) {
tableProperty = new TableProperty(new HashMap<>());
}
tableProperty.modifyTableProperties(PropertyAnalyzer.PROPERTIES_INMEMORY, Boolean.valueOf(isInMemory).toString());
tableProperty.buildInMemory();
}
} |
`checkRange` and `checkDate` return true is error 😂 https://github.com/apache/doris/blob/881670566c0aa577dd83af69c7b3d9f7a3986ab2/fe/fe-core/src/main/java/org/apache/doris/analysis/DateLiteral.java#L1298 | private void init(String s, Type type) throws AnalysisException {
try {
Preconditions.checkArgument(type.isDateType());
TemporalAccessor dateTime = null;
boolean parsed = false;
if (!s.contains("-")) {
for (DateTimeFormatter formatter : formatterList) {
try {
dateTime = formatter.parse(s);
parsed = true;
break;
} catch (DateTimeParseException ex) {
}
}
if (!parsed) {
throw new AnalysisException("Invalid date value: " + s);
}
} else {
String[] datePart = s.contains(" ") ? s.split(" ")[0].split("-") : s.split("-");
DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder();
if (datePart.length != 3) {
throw new AnalysisException("Invalid date value: " + s);
}
for (int i = 0; i < datePart.length; i++) {
switch (i) {
case 0:
if (datePart[i].length() == 2) {
builder.appendValueReduced(ChronoField.YEAR, 2, 2, 1970);
} else {
builder.appendPattern(String.join("", Collections.nCopies(datePart[i].length(), "u")));
}
break;
case 1:
builder.appendPattern(String.join("", Collections.nCopies(datePart[i].length(), "M")));
break;
case 2:
builder.appendPattern(String.join("", Collections.nCopies(datePart[i].length(), "d")));
break;
default:
throw new AnalysisException("Two many parts in date format " + s);
}
if (i < datePart.length - 1) {
builder.appendLiteral("-");
}
}
if (s.contains(" ")) {
builder.appendLiteral(" ");
}
String[] timePart = s.contains(" ") ? s.split(" ")[1].split(":") : new String[]{};
if (timePart.length > 0 && (type.equals(Type.DATE) || type.equals(Type.DATEV2))) {
throw new AnalysisException("Invalid date value: " + s);
}
if (timePart.length == 0 && (type.equals(Type.DATETIME) || type.equals(Type.DATETIMEV2))) {
throw new AnalysisException("Invalid datetime value: " + s);
}
for (int i = 0; i < timePart.length; i++) {
switch (i) {
case 0:
builder.appendPattern(String.join("", Collections.nCopies(timePart[i].length(), "H")));
break;
case 1:
builder.appendPattern(String.join("", Collections.nCopies(timePart[i].length(), "m")));
break;
case 2:
builder.appendPattern(String.join("", Collections.nCopies(timePart[i].contains(".")
? timePart[i].split("\\.")[0].length() : timePart[i].length(), "s")));
if (timePart[i].contains(".")) {
builder.appendFraction(ChronoField.MICRO_OF_SECOND, 0, 6, true);
}
break;
default:
throw new AnalysisException("Two many parts in time format " + s);
}
if (i < timePart.length - 1) {
builder.appendLiteral(":");
}
}
DateTimeFormatter formatter = builder.toFormatter().withResolverStyle(ResolverStyle.STRICT);
dateTime = formatter.parse(s);
parsed = true;
}
Preconditions.checkArgument(parsed);
year = getOrDefault(dateTime, ChronoField.YEAR, 0);
month = getOrDefault(dateTime, ChronoField.MONTH_OF_YEAR, 0);
day = getOrDefault(dateTime, ChronoField.DAY_OF_MONTH, 0);
hour = getOrDefault(dateTime, ChronoField.HOUR_OF_DAY, 0);
minute = getOrDefault(dateTime, ChronoField.MINUTE_OF_HOUR, 0);
second = getOrDefault(dateTime, ChronoField.SECOND_OF_MINUTE, 0);
microsecond = getOrDefault(dateTime, ChronoField.MICRO_OF_SECOND, 0);
if (type.isDatetimeV2()) {
this.roundFloor(((ScalarType) type).getScalarScale());
}
this.type = type;
if (checkRange() || checkDate()) {
throw new AnalysisException("Datetime value is out of range");
}
} catch (Exception ex) {
throw new AnalysisException("date literal [" + s + "] is invalid: " + ex.getMessage());
}
} | if (checkRange() || checkDate()) { | private void init(String s, Type type) throws AnalysisException {
try {
Preconditions.checkArgument(type.isDateType());
TemporalAccessor dateTime = null;
boolean parsed = false;
if (!s.contains("-")) {
for (DateTimeFormatter formatter : formatterList) {
try {
dateTime = formatter.parse(s);
parsed = true;
break;
} catch (DateTimeParseException ex) {
}
}
if (!parsed) {
throw new AnalysisException("Invalid date value: " + s);
}
} else {
String[] datePart = s.contains(" ") ? s.split(" ")[0].split("-") : s.split("-");
DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder();
if (datePart.length != 3) {
throw new AnalysisException("Invalid date value: " + s);
}
for (int i = 0; i < datePart.length; i++) {
switch (i) {
case 0:
if (datePart[i].length() == 2) {
builder.appendValueReduced(ChronoField.YEAR, 2, 2, 1970);
} else {
builder.appendPattern(String.join("", Collections.nCopies(datePart[i].length(), "u")));
}
break;
case 1:
builder.appendPattern(String.join("", Collections.nCopies(datePart[i].length(), "M")));
break;
case 2:
builder.appendPattern(String.join("", Collections.nCopies(datePart[i].length(), "d")));
break;
default:
throw new AnalysisException("Two many parts in date format " + s);
}
if (i < datePart.length - 1) {
builder.appendLiteral("-");
}
}
if (s.contains(" ")) {
builder.appendLiteral(" ");
}
String[] timePart = s.contains(" ") ? s.split(" ")[1].split(":") : new String[]{};
if (timePart.length > 0 && (type.equals(Type.DATE) || type.equals(Type.DATEV2))) {
throw new AnalysisException("Invalid date value: " + s);
}
if (timePart.length == 0 && (type.equals(Type.DATETIME) || type.equals(Type.DATETIMEV2))) {
throw new AnalysisException("Invalid datetime value: " + s);
}
for (int i = 0; i < timePart.length; i++) {
switch (i) {
case 0:
builder.appendPattern(String.join("", Collections.nCopies(timePart[i].length(), "H")));
break;
case 1:
builder.appendPattern(String.join("", Collections.nCopies(timePart[i].length(), "m")));
break;
case 2:
builder.appendPattern(String.join("", Collections.nCopies(timePart[i].contains(".")
? timePart[i].split("\\.")[0].length() : timePart[i].length(), "s")));
if (timePart[i].contains(".")) {
builder.appendFraction(ChronoField.MICRO_OF_SECOND, 0, 6, true);
}
break;
default:
throw new AnalysisException("Two many parts in time format " + s);
}
if (i < timePart.length - 1) {
builder.appendLiteral(":");
}
}
DateTimeFormatter formatter = builder.toFormatter().withResolverStyle(ResolverStyle.STRICT);
dateTime = formatter.parse(s);
parsed = true;
}
Preconditions.checkArgument(parsed);
year = getOrDefault(dateTime, ChronoField.YEAR, 0);
month = getOrDefault(dateTime, ChronoField.MONTH_OF_YEAR, 0);
day = getOrDefault(dateTime, ChronoField.DAY_OF_MONTH, 0);
hour = getOrDefault(dateTime, ChronoField.HOUR_OF_DAY, 0);
minute = getOrDefault(dateTime, ChronoField.MINUTE_OF_HOUR, 0);
second = getOrDefault(dateTime, ChronoField.SECOND_OF_MINUTE, 0);
microsecond = getOrDefault(dateTime, ChronoField.MICRO_OF_SECOND, 0);
if (type.isDatetimeV2()) {
this.roundFloor(((ScalarType) type).getScalarScale());
}
this.type = type;
if (checkRange() || checkDate()) {
throw new AnalysisException("Datetime value is out of range");
}
} catch (Exception ex) {
throw new AnalysisException("date literal [" + s + "] is invalid: " + ex.getMessage());
}
} | class DateLiteral extends LiteralExpr {
private static final Logger LOG = LogManager.getLogger(DateLiteral.class);
private static final DateLiteral MIN_DATE = new DateLiteral(0000, 1, 1);
private static final DateLiteral MAX_DATE = new DateLiteral(9999, 12, 31);
private static final DateLiteral MIN_DATETIME = new DateLiteral(0000, 1, 1, 0, 0, 0);
private static final DateLiteral MAX_DATETIME = new DateLiteral(9999, 12, 31, 23, 59, 59);
private static final DateLiteral MIN_DATETIMEV2
= new DateLiteral(0000, 1, 1, 0, 0, 0, 0);
private static final DateLiteral MAX_DATETIMEV2
= new DateLiteral(9999, 12, 31, 23, 59, 59, 999999L);
private static final int DATEKEY_LENGTH = 8;
private static final int DATETIMEKEY_LENGTH = 14;
private static final int MAX_MICROSECOND = 999999;
private static DateTimeFormatter DATE_TIME_FORMATTER = null;
private static DateTimeFormatter DATE_TIME_FORMATTER_TO_MICRO_SECOND = null;
private static DateTimeFormatter DATE_FORMATTER = null;
private static List<DateTimeFormatter> formatterList = null;
/*
* The datekey type is widely used in data warehouses
* For example, 20121229 means '2012-12-29'
* and data in the form of 'yyyymmdd' is generally called the datekey type.
*/
private static DateTimeFormatter DATEKEY_FORMATTER = null;
private static DateTimeFormatter DATETIMEKEY_FORMATTER = null;
private static Map<String, Integer> MONTH_NAME_DICT = Maps.newHashMap();
private static Map<String, Integer> MONTH_ABBR_NAME_DICT = Maps.newHashMap();
private static Map<String, Integer> WEEK_DAY_NAME_DICT = Maps.newHashMap();
private static final int[] DAYS_IN_MONTH = new int[] {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
private static final int ALLOW_SPACE_MASK = 4 | 64;
private static final int MAX_DATE_PARTS = 8;
private static final int YY_PART_YEAR = 70;
static {
try {
DATE_TIME_FORMATTER = formatBuilder("%Y-%m-%d %H:%i:%s").toFormatter();
DATE_FORMATTER = formatBuilder("%Y-%m-%d").toFormatter();
DATEKEY_FORMATTER = formatBuilder("%Y%m%d").toFormatter();
DATETIMEKEY_FORMATTER = formatBuilder("%Y%m%d%H%i%s").toFormatter();
DATE_TIME_FORMATTER_TO_MICRO_SECOND = new DateTimeFormatterBuilder()
.appendPattern("uuuu-MM-dd HH:mm:ss")
.appendFraction(ChronoField.MICRO_OF_SECOND, 0, 6, true)
.toFormatter();
formatterList = Lists.newArrayList(
formatBuilder("%Y%m%d").appendLiteral('T').appendPattern("HHmmss")
.appendFraction(ChronoField.MICRO_OF_SECOND, 0, 6, true).toFormatter(),
formatBuilder("%Y%m%d").appendLiteral('T').appendPattern("HHmmss")
.appendFraction(ChronoField.MICRO_OF_SECOND, 0, 6, false).toFormatter(),
formatBuilder("%Y%m%d%H%i%s")
.appendFraction(ChronoField.MICRO_OF_SECOND, 0, 6, true).toFormatter(),
formatBuilder("%Y%m%d%H%i%s")
.appendFraction(ChronoField.MICRO_OF_SECOND, 0, 6, false).toFormatter(),
DATETIMEKEY_FORMATTER, DATEKEY_FORMATTER);
} catch (AnalysisException e) {
LOG.error("invalid date format", e);
System.exit(-1);
}
MONTH_NAME_DICT.put("january", 1);
MONTH_NAME_DICT.put("february", 2);
MONTH_NAME_DICT.put("march", 3);
MONTH_NAME_DICT.put("april", 4);
MONTH_NAME_DICT.put("may", 5);
MONTH_NAME_DICT.put("june", 6);
MONTH_NAME_DICT.put("july", 7);
MONTH_NAME_DICT.put("august", 8);
MONTH_NAME_DICT.put("september", 9);
MONTH_NAME_DICT.put("october", 10);
MONTH_NAME_DICT.put("november", 11);
MONTH_NAME_DICT.put("december", 12);
MONTH_ABBR_NAME_DICT.put("jan", 1);
MONTH_ABBR_NAME_DICT.put("feb", 2);
MONTH_ABBR_NAME_DICT.put("mar", 3);
MONTH_ABBR_NAME_DICT.put("apr", 4);
MONTH_ABBR_NAME_DICT.put("may", 5);
MONTH_ABBR_NAME_DICT.put("jun", 6);
MONTH_ABBR_NAME_DICT.put("jul", 7);
MONTH_ABBR_NAME_DICT.put("aug", 8);
MONTH_ABBR_NAME_DICT.put("sep", 9);
MONTH_ABBR_NAME_DICT.put("oct", 10);
MONTH_ABBR_NAME_DICT.put("nov", 11);
MONTH_ABBR_NAME_DICT.put("dec", 12);
WEEK_DAY_NAME_DICT.put("monday", 0);
WEEK_DAY_NAME_DICT.put("tuesday", 1);
WEEK_DAY_NAME_DICT.put("wednesday", 2);
WEEK_DAY_NAME_DICT.put("thursday", 3);
WEEK_DAY_NAME_DICT.put("friday", 4);
WEEK_DAY_NAME_DICT.put("saturday", 5);
WEEK_DAY_NAME_DICT.put("sunday", 6);
MONTH_ABBR_NAME_DICT.put("mon", 0);
MONTH_ABBR_NAME_DICT.put("tue", 1);
MONTH_ABBR_NAME_DICT.put("wed", 2);
MONTH_ABBR_NAME_DICT.put("thu", 3);
MONTH_ABBR_NAME_DICT.put("fri", 4);
MONTH_ABBR_NAME_DICT.put("sat", 5);
MONTH_ABBR_NAME_DICT.put("sun", 6);
}
private static final Pattern HAS_TIME_PART = Pattern.compile("^.*[HhIiklrSsTp]+.*$");
private enum DateLiteralType {
DATETIME(0),
DATE(1),
DATETIMEV2(2),
DATEV2(3);
private final int value;
DateLiteralType(int value) {
this.value = value;
}
public int value() {
return value;
}
}
public DateLiteral() {
super();
}
public DateLiteral(Type type, boolean isMax) throws AnalysisException {
super();
this.type = type;
if (type.equals(Type.DATE) || type.equals(Type.DATEV2)) {
if (isMax) {
copy(MAX_DATE);
} else {
copy(MIN_DATE);
}
} else if (type.equals(Type.DATETIME)) {
if (isMax) {
copy(MAX_DATETIME);
} else {
copy(MIN_DATETIME);
}
} else {
if (isMax) {
copy(MAX_DATETIMEV2);
} else {
copy(MIN_DATETIMEV2);
}
}
analysisDone();
}
public DateLiteral(String s, Type type) throws AnalysisException {
super();
init(s, type);
analysisDone();
}
public DateLiteral(long unixTimestamp, TimeZone timeZone, Type type) throws AnalysisException {
Timestamp timestamp = new Timestamp(unixTimestamp);
ZonedDateTime zonedDateTime = ZonedDateTime.ofInstant(timestamp.toInstant(), ZoneId.of(timeZone.getID()));
year = zonedDateTime.getYear();
month = zonedDateTime.getMonthValue();
day = zonedDateTime.getDayOfMonth();
hour = zonedDateTime.getHour();
minute = zonedDateTime.getMinute();
second = zonedDateTime.getSecond();
microsecond = zonedDateTime.get(ChronoField.MICRO_OF_SECOND);
if (type.equals(Type.DATE)) {
hour = 0;
minute = 0;
second = 0;
microsecond = 0;
this.type = Type.DATE;
} else if (type.equals(Type.DATETIME)) {
this.type = Type.DATETIME;
microsecond = 0;
} else if (type.equals(Type.DATEV2)) {
hour = 0;
minute = 0;
second = 0;
microsecond = 0;
this.type = Type.DATEV2;
} else if (type.equals(Type.DATETIMEV2)) {
this.type = Type.DATETIMEV2;
} else {
throw new AnalysisException("Error date literal type : " + type);
}
}
public DateLiteral(long year, long month, long day) {
this.hour = 0;
this.minute = 0;
this.second = 0;
this.year = year;
this.month = month;
this.day = day;
this.type = ScalarType.getDefaultDateType(Type.DATE);
}
public DateLiteral(long year, long month, long day, Type type) {
this.year = year;
this.month = month;
this.day = day;
Preconditions.checkArgument(type.getPrimitiveType().equals(Type.DATE.getPrimitiveType())
|| type.getPrimitiveType().equals(Type.DATEV2.getPrimitiveType()));
this.type = type;
}
public DateLiteral(long year, long month, long day, long hour, long minute, long second) {
this.hour = hour;
this.minute = minute;
this.second = second;
this.year = year;
this.month = month;
this.day = day;
this.type = ScalarType.getDefaultDateType(Type.DATETIME);
}
public DateLiteral(long year, long month, long day, long hour, long minute, long second, long microsecond) {
this.hour = hour;
this.minute = minute;
this.second = second;
this.year = year;
this.month = month;
this.day = day;
this.microsecond = microsecond;
this.type = Type.DATETIMEV2;
}
public DateLiteral(long year, long month, long day, long hour, long minute, long second, Type type) {
this.hour = hour;
this.minute = minute;
this.second = second;
this.year = year;
this.month = month;
this.day = day;
Preconditions.checkArgument(type.getPrimitiveType().equals(Type.DATETIME.getPrimitiveType())
|| type.getPrimitiveType().equals(Type.DATETIMEV2.getPrimitiveType()));
this.type = type;
}
public DateLiteral(LocalDateTime dateTime, Type type) {
this.year = dateTime.getYear();
this.month = dateTime.getMonthValue();
this.day = dateTime.getDayOfMonth();
this.type = type;
if (type.equals(Type.DATETIME) || type.equals(Type.DATETIMEV2)) {
this.hour = dateTime.getHour();
this.minute = dateTime.getMinute();
this.second = dateTime.getSecond();
this.microsecond = dateTime.get(ChronoField.MICRO_OF_SECOND);
}
}
public DateLiteral(DateLiteral other) {
super(other);
hour = other.hour;
minute = other.minute;
second = other.second;
year = other.year;
month = other.month;
day = other.day;
microsecond = other.microsecond;
type = other.type;
}
public static DateLiteral createMinValue(Type type) throws AnalysisException {
return new DateLiteral(type, false);
}
private void copy(DateLiteral other) {
hour = other.hour;
minute = other.minute;
second = other.second;
year = other.year;
month = other.month;
day = other.day;
microsecond = other.microsecond;
type = other.type;
}
@Override
public Expr clone() {
return new DateLiteral(this);
}
@Override
public boolean isMinValue() {
switch (type.getPrimitiveType()) {
case DATE:
case DATEV2:
return this.getStringValue().compareTo(MIN_DATE.getStringValue()) == 0;
case DATETIME:
return this.getStringValue().compareTo(MIN_DATETIME.getStringValue()) == 0;
case DATETIMEV2:
return this.getStringValue().compareTo(MIN_DATETIMEV2.getStringValue()) == 0;
default:
return false;
}
}
@Override
public Object getRealValue() {
if (type.equals(Type.DATE)) {
return year * 16 * 32L + month * 32 + day;
} else if (type.equals(Type.DATETIME)) {
return (year * 10000 + month * 100 + day) * 1000000L + hour * 10000 + minute * 100 + second;
} else if (type.equals(Type.DATEV2)) {
return (year << 9) | (month << 5) | day;
} else if (type.equals(Type.DATETIMEV2)) {
return (year << 50) | (month << 46) | (day << 41) | (hour << 36)
| (minute << 30) | (second << 24) | microsecond;
} else {
Preconditions.checkState(false, "invalid date type: " + type);
return -1L;
}
}
@Override
public ByteBuffer getHashValue(PrimitiveType type) {
String value = convertToString(type);
ByteBuffer buffer;
try {
buffer = ByteBuffer.wrap(value.getBytes("UTF-8"));
} catch (Exception e) {
throw new RuntimeException(e);
}
return buffer;
}
@Override
public int compareLiteral(LiteralExpr expr) {
if (expr instanceof NullLiteral) {
return 1;
}
if (expr == MaxLiteral.MAX_VALUE) {
return -1;
}
return Long.signum(getLongValue() - expr.getLongValue());
}
@Override
public String toSqlImpl() {
return "'" + getStringValue() + "'";
}
@Override
public String getStringValue() {
if (type.isDate() || type.isDateV2()) {
return String.format("%04d-%02d-%02d", year, month, day);
} else if (type.isDatetimeV2()) {
String tmp = String.format("%04d-%02d-%02d %02d:%02d:%02d",
year, month, day, hour, minute, second);
if (microsecond == 0) {
return tmp;
}
return tmp + String.format(".%06d", microsecond);
} else {
return String.format("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, minute, second);
}
}
public void roundCeiling(int newScale) {
Preconditions.checkArgument(type.isDatetimeV2());
long remain = Double.valueOf(microsecond % (Math.pow(10, 6 - newScale))).longValue();
if (remain != 0) {
microsecond = Double.valueOf((microsecond + (Math.pow(10, 6 - newScale)))
/ (Math.pow(10, 6 - newScale)) * (Math.pow(10, 6 - newScale))).longValue();
}
type = ScalarType.createDatetimeV2Type(newScale);
}
public void roundFloor(int newScale) {
microsecond = Double.valueOf(microsecond / (Math.pow(10, 6 - newScale))
* (Math.pow(10, 6 - newScale))).longValue();
type = ScalarType.createDatetimeV2Type(newScale);
}
private String convertToString(PrimitiveType type) {
if (type == PrimitiveType.DATE || type == PrimitiveType.DATEV2) {
return String.format("%04d-%02d-%02d", year, month, day);
} else if (type == PrimitiveType.DATETIMEV2) {
String tmp = String.format("%04d-%02d-%02d %02d:%02d:%02d",
year, month, day, hour, minute, second);
if (microsecond == 0) {
return tmp;
}
return tmp + String.format(".%06d", microsecond);
} else {
return String.format("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, minute, second);
}
}
@Override
public long getLongValue() {
return (year * 10000 + month * 100 + day) * 1000000L + hour * 10000 + minute * 100 + second;
}
@Override
public double getDoubleValue() {
return getLongValue();
}
@Override
protected void toThrift(TExprNode msg) {
msg.node_type = TExprNodeType.DATE_LITERAL;
msg.date_literal = new TDateLiteral(getStringValue());
}
@Override
protected Expr uncheckedCastTo(Type targetType) throws AnalysisException {
if (targetType.isDateType()) {
if (type.equals(targetType)) {
return this;
}
if (targetType.equals(Type.DATE) || targetType.equals(Type.DATEV2)) {
return new DateLiteral(this.year, this.month, this.day, targetType);
} else if (targetType.equals(Type.DATETIME)) {
return new DateLiteral(this.year, this.month, this.day, this.hour, this.minute, this.second,
targetType);
} else if (targetType.isDatetimeV2()) {
return new DateLiteral(this.year, this.month, this.day, this.hour, this.minute, this.microsecond,
targetType);
} else {
throw new AnalysisException("Error date literal type : " + type);
}
} else if (targetType.isStringType()) {
return new StringLiteral(getStringValue());
} else if (Type.isImplicitlyCastable(this.type, targetType, true)) {
return new CastExpr(targetType, this);
}
Preconditions.checkState(false);
return this;
}
public void castToDate() {
if (Config.enable_date_conversion) {
this.type = Type.DATEV2;
} else {
this.type = Type.DATE;
}
hour = 0;
minute = 0;
second = 0;
}
private long makePackedDatetime() {
long ymd = ((year * 13 + month) << 5) | day;
long hms = (hour << 12) | (minute << 6) | second;
long packedDatetime = ((ymd << 17) | hms) << 24 + microsecond;
return packedDatetime;
}
private long makePackedDatetimeV2() {
return (year << 50) | (month << 46) | (day << 41) | (hour << 36)
| (minute << 30) | (second << 24) | microsecond;
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
if (this.type.equals(Type.DATETIME)) {
out.writeShort(DateLiteralType.DATETIME.value());
out.writeLong(makePackedDatetime());
} else if (this.type.equals(Type.DATE)) {
out.writeShort(DateLiteralType.DATE.value());
out.writeLong(makePackedDatetime());
} else if (this.type.getPrimitiveType() == PrimitiveType.DATETIMEV2) {
out.writeShort(DateLiteralType.DATETIMEV2.value());
out.writeLong(makePackedDatetimeV2());
out.writeInt(((ScalarType) this.type).getScalarScale());
} else if (this.type.equals(Type.DATEV2)) {
out.writeShort(DateLiteralType.DATEV2.value());
out.writeLong(makePackedDatetimeV2());
} else {
throw new IOException("Error date literal type : " + type);
}
}
private void fromPackedDatetime(long packedTime) {
microsecond = (packedTime % (1L << 24));
long ymdhms = (packedTime >> 24);
long ymd = ymdhms >> 17;
day = ymd % (1 << 5);
long ym = ymd >> 5;
month = ym % 13;
year = ym / 13;
year %= 10000;
long hms = ymdhms % (1 << 17);
second = hms % (1 << 6);
minute = (hms >> 6) % (1 << 6);
hour = (hms >> 12);
this.type = Type.DATETIME;
}
public void readFields(DataInput in) throws IOException {
super.readFields(in);
short dateLiteralType = in.readShort();
fromPackedDatetime(in.readLong());
if (dateLiteralType == DateLiteralType.DATETIME.value()) {
this.type = Type.DATETIME;
} else if (dateLiteralType == DateLiteralType.DATE.value()) {
this.type = Type.DATE;
} else if (dateLiteralType == DateLiteralType.DATETIMEV2.value()) {
this.type = ScalarType.createDatetimeV2Type(in.readInt());
} else if (dateLiteralType == DateLiteralType.DATEV2.value()) {
this.type = Type.DATEV2;
} else {
throw new IOException("Error date literal type : " + type);
}
}
public static DateLiteral read(DataInput in) throws IOException {
DateLiteral literal = new DateLiteral();
literal.readFields(in);
return literal;
}
public long unixTimestamp(TimeZone timeZone) {
ZonedDateTime zonedDateTime = ZonedDateTime.of((int) year, (int) month, (int) day, (int) hour,
(int) minute, (int) second, (int) microsecond, ZoneId.of(timeZone.getID()));
Timestamp timestamp = Timestamp.from(zonedDateTime.toInstant());
return timestamp.getTime();
}
public static boolean hasTimePart(String format) {
return HAS_TIME_PART.matcher(format).matches();
}
public String dateFormat(String pattern) throws AnalysisException {
TemporalAccessor accessor;
if (type.equals(Type.DATE) || type.equals(Type.DATEV2)) {
accessor = DATE_FORMATTER.parse(getStringValue());
} else if (type.isDatetimeV2()) {
accessor = DATE_TIME_FORMATTER_TO_MICRO_SECOND.parse(getStringValue());
} else {
accessor = DATE_TIME_FORMATTER.parse(getStringValue());
}
DateTimeFormatter toFormatter = formatBuilder(pattern).toFormatter();
return toFormatter.format(accessor);
}
private static DateTimeFormatterBuilder formatBuilder(String pattern) throws AnalysisException {
DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder();
boolean escaped = false;
for (int i = 0; i < pattern.length(); i++) {
char character = pattern.charAt(i);
if (escaped) {
switch (character) {
case 'a':
builder.appendText(ChronoField.DAY_OF_WEEK, TextStyle.SHORT);
break;
case 'b':
builder.appendText(ChronoField.MONTH_OF_YEAR, TextStyle.SHORT);
break;
case 'c':
builder.appendValue(ChronoField.MONTH_OF_YEAR);
break;
case 'd':
builder.appendValue(ChronoField.DAY_OF_MONTH, 2);
break;
case 'e':
builder.appendValue(ChronoField.DAY_OF_MONTH);
break;
case 'H':
builder.appendValue(ChronoField.HOUR_OF_DAY, 2);
break;
case 'h':
case 'I':
builder.appendValue(ChronoField.HOUR_OF_AMPM, 2);
break;
case 'i':
builder.appendValue(ChronoField.MINUTE_OF_HOUR, 2);
break;
case 'j':
builder.appendValue(ChronoField.DAY_OF_YEAR, 3);
break;
case 'k':
builder.appendValue(ChronoField.HOUR_OF_DAY);
break;
case 'l':
builder.appendValue(ChronoField.HOUR_OF_AMPM);
break;
case 'M':
builder.appendText(ChronoField.MONTH_OF_YEAR, TextStyle.FULL);
break;
case 'm':
builder.appendValue(ChronoField.MONTH_OF_YEAR, 2);
break;
case 'p':
builder.appendText(ChronoField.AMPM_OF_DAY);
break;
case 'r':
builder.appendValue(ChronoField.HOUR_OF_AMPM, 2)
.appendPattern(":mm:ss ")
.appendText(ChronoField.AMPM_OF_DAY, TextStyle.FULL)
.toFormatter();
break;
case 'S':
case 's':
builder.appendValue(ChronoField.SECOND_OF_MINUTE, 2);
break;
case 'T':
builder.appendPattern("HH:mm:ss");
break;
case 'v':
builder.appendValue(ChronoField.ALIGNED_WEEK_OF_YEAR, 2);
break;
case 'x':
case 'Y':
builder.appendValue(ChronoField.YEAR, 4);
break;
case 'W':
builder.appendText(ChronoField.DAY_OF_WEEK, TextStyle.FULL);
break;
case 'y':
builder.appendValueReduced(ChronoField.YEAR, 2, 2, 1970);
break;
case 'f':
case 'w':
case 'U':
case 'u':
case 'V':
case 'X':
case 'D':
throw new AnalysisException(String.format("%%%s not supported in date format string",
character));
case '%':
builder.appendLiteral('%');
break;
default:
builder.appendLiteral(character);
break;
}
escaped = false;
} else if (character == '%') {
escaped = true;
} else {
builder.appendLiteral(character);
}
}
return builder;
}
private int getOrDefault(final TemporalAccessor accessor, final ChronoField field,
final int defaultValue) {
return accessor.isSupported(field) ? accessor.get(field) : defaultValue;
}
public LocalDateTime getTimeFormatter() {
TemporalAccessor accessor;
if (type.equals(Type.DATE) || type.equals(Type.DATEV2)) {
accessor = DATE_FORMATTER.parse(getStringValue());
} else if (type.isDatetimeV2()) {
accessor = DATE_TIME_FORMATTER_TO_MICRO_SECOND.parse(getStringValue());
} else {
accessor = DATE_TIME_FORMATTER.parse(getStringValue());
}
final int year = accessor.get(ChronoField.YEAR);
final int month = accessor.get(ChronoField.MONTH_OF_YEAR);
final int dayOfMonth = accessor.get(ChronoField.DAY_OF_MONTH);
final int hour = getOrDefault(accessor, ChronoField.HOUR_OF_DAY, 0);
final int minute = getOrDefault(accessor, ChronoField.MINUTE_OF_HOUR, 0);
final int second = getOrDefault(accessor, ChronoField.SECOND_OF_MINUTE, 0);
final int microSeconds = getOrDefault(accessor, ChronoField.MICRO_OF_SECOND, 0);
return LocalDateTime.of(year, month, dayOfMonth, hour, minute, second, microSeconds);
}
public DateLiteral plusYears(int year) throws AnalysisException {
return new DateLiteral(getTimeFormatter().plusYears(year), type);
}
public DateLiteral plusMonths(int month) throws AnalysisException {
return new DateLiteral(getTimeFormatter().plusMonths(month), type);
}
public DateLiteral plusDays(int day) throws AnalysisException {
return new DateLiteral(getTimeFormatter().plusDays(day), type);
}
public DateLiteral plusHours(int hour) throws AnalysisException {
return new DateLiteral(getTimeFormatter().plusHours(hour), type);
}
public DateLiteral plusMinutes(int minute) throws AnalysisException {
return new DateLiteral(getTimeFormatter().plusMinutes(minute), type);
}
public DateLiteral plusSeconds(int second) throws AnalysisException {
return new DateLiteral(getTimeFormatter().plusSeconds(second), type);
}
public long getYear() {
return year;
}
public long getMonth() {
return month;
}
public long getDay() {
return day;
}
public long getHour() {
return hour;
}
public long getMinute() {
return minute;
}
public long getSecond() {
return second;
}
public long getMicrosecond() {
return microsecond;
}
private long year;
private long month;
private long day;
private long hour;
private long minute;
private long second;
private long microsecond;
@Override
public int hashCode() {
return 31 * super.hashCode() + Objects.hashCode(unixTimestamp(TimeZone.getDefault()));
}
public int fromDateFormatStr(String format, String value, boolean hasSubVal) throws InvalidFormatException {
int fp = 0;
int fend = format.length();
int vp = 0;
int vend = value.length();
boolean datePartUsed = false;
boolean timePartUsed = false;
boolean microSecondPartUsed = false;
int dayPart = 0;
long weekday = -1;
long yearday = -1;
long weekNum = -1;
boolean strictWeekNumber = false;
boolean sundayFirst = false;
boolean strictWeekNumberYearType = false;
long strictWeekNumberYear = -1;
boolean usaTime = false;
char f;
while (fp < fend && vp < vend) {
while (vp < vend && Character.isSpaceChar(value.charAt(vp))) {
vp++;
}
if (vp >= vend) {
break;
}
f = format.charAt(fp);
if (f == '%' && fp + 1 < fend) {
int tmp = 0;
long intValue = 0;
fp++;
f = format.charAt(fp);
fp++;
switch (f) {
case 'y':
tmp = vp + Math.min(2, vend - vp);
intValue = strToLong(value.substring(vp, tmp));
intValue += intValue >= 70 ? 1900 : 2000;
this.year = intValue;
vp = tmp;
datePartUsed = true;
break;
case 'Y':
tmp = vp + Math.min(4, vend - vp);
intValue = strToLong(value.substring(vp, tmp));
if (tmp - vp <= 2) {
intValue += intValue >= 70 ? 1900 : 2000;
}
this.year = intValue;
vp = tmp;
datePartUsed = true;
break;
case 'm':
case 'c':
tmp = vp + Math.min(2, vend - vp);
intValue = strToLong(value.substring(vp, tmp));
this.month = intValue;
vp = tmp;
datePartUsed = true;
break;
case 'M': {
int nextPos = findWord(value, vp);
intValue = checkWord(MONTH_NAME_DICT, value.substring(vp, nextPos));
this.month = intValue;
vp = nextPos;
break;
}
case 'b': {
int nextPos = findWord(value, vp);
intValue = checkWord(MONTH_ABBR_NAME_DICT, value.substring(vp, nextPos));
this.month = intValue;
vp = nextPos;
break;
}
case 'd':
case 'e':
tmp = vp + Math.min(2, vend - vp);
intValue = strToLong(value.substring(vp, tmp));
this.day = intValue;
vp = tmp;
datePartUsed = true;
break;
case 'D':
tmp = vp + Math.min(2, vend - vp);
intValue = strToLong(value.substring(vp, tmp));
this.day = intValue;
vp = tmp + Math.min(2, vend - tmp);
datePartUsed = true;
break;
case 'h':
case 'I':
case 'l':
usaTime = true;
case 'k':
case 'H':
tmp = findNumber(value, vp, 2);
intValue = strToLong(value.substring(vp, tmp));
this.hour = intValue;
vp = tmp;
timePartUsed = true;
break;
case 'i':
tmp = vp + Math.min(2, vend - vp);
intValue = strToLong(value.substring(vp, tmp));
this.minute = intValue;
vp = tmp;
timePartUsed = true;
break;
case 's':
case 'S':
tmp = vp + Math.min(2, vend - vp);
intValue = strToLong(value.substring(vp, tmp));
this.second = intValue;
vp = tmp;
timePartUsed = true;
break;
case 'f':
tmp = vp + Math.min(6, vend - vp);
intValue = strToLong(value.substring(vp, tmp));
this.microsecond = (long) (intValue * Math.pow(10, 6 - Math.min(6, vend - vp)));
timePartUsed = true;
microSecondPartUsed = true;
vp = tmp;
break;
case 'p':
if ((vend - vp) < 2 || Character.toUpperCase(value.charAt(vp + 1)) != 'M' || !usaTime) {
throw new InvalidFormatException("Invalid %p format");
}
if (Character.toUpperCase(value.charAt(vp)) == 'P') {
dayPart = 12;
}
timePartUsed = true;
vp += 2;
break;
case 'W': {
int nextPos = findWord(value, vp);
intValue = checkWord(WEEK_DAY_NAME_DICT, value.substring(vp, nextPos));
intValue++;
weekday = intValue;
datePartUsed = true;
break;
}
case 'a': {
int nextPos = findWord(value, vp);
intValue = checkWord(WEEK_DAY_NAME_DICT, value.substring(vp, nextPos));
intValue++;
weekday = intValue;
datePartUsed = true;
break;
}
case 'w':
tmp = vp + Math.min(1, vend - vp);
intValue = strToLong(value.substring(vp, tmp));
if (intValue >= 7) {
throw new InvalidFormatException("invalid day of week: " + intValue);
}
if (intValue == 0) {
intValue = 7;
}
weekday = intValue;
vp = tmp;
datePartUsed = true;
break;
case 'j':
tmp = vp + Math.min(3, vend - vp);
intValue = strToLong(value.substring(vp, tmp));
yearday = intValue;
vp = tmp;
datePartUsed = true;
break;
case 'u':
case 'v':
case 'U':
case 'V':
sundayFirst = (format.charAt(fp - 1) == 'U' || format.charAt(fp - 1) == 'V');
strictWeekNumber = (format.charAt(fp - 1) == 'V' || format.charAt(fp - 1) == 'v');
tmp = vp + Math.min(2, vend - vp);
intValue = Long.valueOf(value.substring(vp, tmp));
weekNum = intValue;
if (weekNum > 53 || (strictWeekNumber && weekNum == 0)) {
throw new InvalidFormatException("invalid num of week: " + weekNum);
}
vp = tmp;
datePartUsed = true;
break;
case 'x':
case 'X':
strictWeekNumberYearType = (format.charAt(fp - 1) == 'X');
tmp = vp + Math.min(4, vend - vp);
intValue = Long.valueOf(value.substring(vp, tmp));
strictWeekNumberYear = intValue;
vp = tmp;
datePartUsed = true;
break;
case 'r':
tmp = fromDateFormatStr("%I:%i:%S %p", value.substring(vp, vend), true);
vp = tmp;
timePartUsed = true;
break;
case 'T':
tmp = fromDateFormatStr("%H:%i:%S", value.substring(vp, vend), true);
vp = tmp;
timePartUsed = true;
break;
case '.':
while (vp < vend && Character.toString(value.charAt(vp)).matches("\\p{Punct}")) {
vp++;
}
break;
case '@':
while (vp < vend && Character.isLetter(value.charAt(vp))) {
vp++;
}
break;
case '
while (vp < vend && Character.isDigit(value.charAt(vp))) {
vp++;
}
break;
case '%':
if ('%' != value.charAt(vp)) {
throw new InvalidFormatException("invalid char after %: " + value.charAt(vp));
}
vp++;
break;
default:
throw new InvalidFormatException("Invalid format pattern: " + f);
}
} else if (format.charAt(fp) != ' ') {
if (format.charAt(fp) != value.charAt(vp)) {
throw new InvalidFormatException("Invalid char: " + value.charAt(vp) + ", expected: "
+ format.charAt(fp));
}
fp++;
vp++;
} else {
fp++;
}
}
while (fp < fend) {
f = format.charAt(fp);
if (f == '%' && fp + 1 < fend) {
fp++;
f = format.charAt(fp);
fp++;
switch (f) {
case 'H':
case 'h':
case 'I':
case 'i':
case 'k':
case 'l':
case 'r':
case 's':
case 'S':
case 'p':
case 'T':
timePartUsed = true;
break;
default:
break;
}
} else {
fp++;
}
}
if (usaTime) {
if (this.hour > 12 || this.hour < 1) {
throw new InvalidFormatException("Invalid hour: " + hour);
}
this.hour = (this.hour % 12) + dayPart;
}
if (hasSubVal) {
return vp;
}
if (yearday > 0) {
long days = calcDaynr(this.year, 1, 1) + yearday - 1;
getDateFromDaynr(days);
}
if (weekNum >= 0 && weekday > 0) {
if ((strictWeekNumber && (strictWeekNumberYear < 0
|| strictWeekNumberYearType != sundayFirst))
|| (!strictWeekNumber && strictWeekNumberYear >= 0)) {
throw new InvalidFormatException("invalid week number");
}
long days = calcDaynr(strictWeekNumber ? strictWeekNumberYear : this.year, 1, 1);
long weekdayB = calcWeekday(days, sundayFirst);
if (sundayFirst) {
days += ((weekdayB == 0) ? 0 : 7) - weekdayB + (weekNum - 1) * 7 + weekday % 7;
} else {
days += ((weekdayB <= 3) ? 0 : 7) - weekdayB + (weekNum - 1) * 7 + weekday - 1;
}
getDateFromDaynr(days);
}
if (datePartUsed) {
if (microSecondPartUsed) {
this.type = Type.DATETIMEV2;
} else if (timePartUsed) {
this.type = ScalarType.getDefaultDateType(Type.DATETIME);
} else {
this.type = ScalarType.getDefaultDateType(Type.DATE);
}
}
if (checkRange() || checkDate()) {
throw new InvalidFormatException("Invalid format");
}
return 0;
}
public int fromDateFormatStr(String format, String value, boolean hasSubVal, Type type)
throws InvalidFormatException {
switch (type.getPrimitiveType()) {
case DATETIME:
case DATE:
return fromDateFormatStr(format, value, hasSubVal);
default:
int val = fromDateFormatStr(format, value, hasSubVal);
convertTypeToV2();
return val;
}
}
private void convertTypeToV2() {
switch (type.getPrimitiveType()) {
case DATETIME:
this.type = Type.DATETIMEV2;
break;
case DATE:
this.type = Type.DATEV2;
break;
default:
}
}
private boolean checkRange() {
return year > MAX_DATETIME.year || month > MAX_DATETIME.month || day > MAX_DATETIME.day
|| hour > MAX_DATETIME.hour || minute > MAX_DATETIME.minute || second > MAX_DATETIME.second
|| microsecond > MAX_MICROSECOND;
}
private boolean checkDate() {
if (month != 0 && day > DAYS_IN_MONTH[((int) month)]) {
if (month == 2 && day == 29 && Year.isLeap(year)) {
return false;
}
return true;
}
return false;
}
private long strToLong(String l) throws InvalidFormatException {
try {
long y = Long.valueOf(l);
if (y < 0) {
throw new InvalidFormatException("Invalid format: negative number.");
}
return y;
} catch (NumberFormatException e) {
throw new InvalidFormatException(e.getMessage());
}
}
private long calcDaynr(long year, long month, long day) {
long delsum = 0;
long y = year;
if (year == 0 && month == 0) {
return 0;
}
/* Cast to int to be able to handle month == 0 */
delsum = 365 * y + 31 * (month - 1) + day;
if (month <= 2) {
y--;
} else {
delsum -= (month * 4 + 23) / 10;
}
return delsum + y / 4 - y / 100 + y / 400;
}
private long calcWeekday(long dayNr, boolean isSundayFirstDay) {
return (dayNr + 5L + (isSundayFirstDay ? 1L : 0L)) % 7;
}
private void getDateFromDaynr(long daynr) throws InvalidFormatException {
if (daynr <= 0 || daynr > 3652424) {
throw new InvalidFormatException("Invalid days to year: " + daynr);
}
this.year = daynr / 365;
long daysBeforeYear = 0;
while (daynr < (daysBeforeYear = calcDaynr(this.year, 1, 1))) {
this.year--;
}
long daysOfYear = daynr - daysBeforeYear + 1;
int leapDay = 0;
if (Year.isLeap(this.year)) {
if (daysOfYear > 31 + 28) {
daysOfYear--;
if (daysOfYear == 31 + 28) {
leapDay = 1;
}
}
}
this.month = 1;
while (daysOfYear > DAYS_IN_MONTH[(int) this.month]) {
daysOfYear -= DAYS_IN_MONTH[(int) this.month];
this.month++;
}
this.day = daysOfYear + leapDay;
}
private int findWord(String value, int start) {
int p = start;
while (p < value.length() && Character.isLetter(value.charAt(p))) {
p++;
}
return p;
}
private int findNumber(String value, int start, int maxLen) {
int p = start;
int left = maxLen;
while (p < value.length() && Character.isDigit(value.charAt(p)) && left > 0) {
p++;
left--;
}
return p;
}
private int checkWord(Map<String, Integer> dict, String value) throws InvalidFormatException {
Integer i = dict.get(value.toLowerCase());
if (i != null) {
return i;
}
throw new InvalidFormatException("'" + value + "' is invalid");
}
public void fromDateStr(String dateStr) throws AnalysisException {
dateStr = dateStr.trim();
if (dateStr.isEmpty()) {
throw new AnalysisException("parse datetime value failed: " + dateStr);
}
int[] dateVal = new int[MAX_DATE_PARTS];
int[] dateLen = new int[MAX_DATE_PARTS];
int pre = 0;
int pos = 0;
while (pos < dateStr.length() && (Character.isDigit(dateStr.charAt(pos)) || dateStr.charAt(pos) == 'T')) {
pos++;
}
int yearLen = 4;
int digits = pos - pre;
boolean isIntervalFormat = false;
if (pos == dateStr.length() || dateStr.charAt(pos) == '.') {
if (digits == 4 || digits == 8 || digits >= 14) {
yearLen = 4;
} else {
yearLen = 2;
}
isIntervalFormat = true;
}
int fieldIdx = 0;
int fieldLen = yearLen;
while (pre < dateStr.length() && Character.isDigit(dateStr.charAt(pre)) && fieldIdx < MAX_DATE_PARTS - 1) {
int start = pre;
int tempVal = 0;
boolean scanToDelim = (!isIntervalFormat) && (fieldIdx != 6);
while (pre < dateStr.length() && Character.isDigit(dateStr.charAt(pre))
&& (scanToDelim || fieldLen-- != 0)) {
tempVal = tempVal * 10 + (dateStr.charAt(pre++) - '0');
}
dateVal[fieldIdx] = tempVal;
dateLen[fieldIdx] = pre - start;
fieldLen = 2;
if (pre == dateStr.length()) {
fieldIdx++;
break;
}
if (fieldIdx == 2 && dateStr.charAt(pre) == 'T') {
pre++;
fieldIdx++;
continue;
}
if (fieldIdx == 5) {
if (dateStr.charAt(pre) == '.') {
pre++;
fieldLen = 6;
} else if (Character.isDigit(dateStr.charAt(pre))) {
fieldIdx++;
break;
}
fieldIdx++;
continue;
}
while (pre < dateStr.length() && (Character.toString(dateStr.charAt(pre)).matches("\\p{Punct}"))
|| Character.isSpaceChar(dateStr.charAt(pre))) {
if (Character.isSpaceChar(dateStr.charAt(pre))) {
if (((1 << fieldIdx) & ALLOW_SPACE_MASK) == 0) {
throw new AnalysisException("parse datetime value failed: " + dateStr);
}
}
pre++;
}
fieldIdx++;
}
int numField = fieldIdx;
if (!isIntervalFormat) {
yearLen = dateLen[0];
}
for (; fieldIdx < MAX_DATE_PARTS; ++fieldIdx) {
dateLen[fieldIdx] = 0;
dateVal[fieldIdx] = 0;
}
if (yearLen == 2) {
if (dateVal[0] < YY_PART_YEAR) {
dateVal[0] += 2000;
} else {
dateVal[0] += 1900;
}
}
if (numField < 3) {
throw new AnalysisException("parse datetime value failed: " + dateStr);
}
year = dateVal[0];
month = dateVal[1];
day = dateVal[2];
hour = dateVal[3];
minute = dateVal[4];
second = dateVal[5];
microsecond = dateVal[6];
if (numField == 3) {
type = ScalarType.getDefaultDateType(Type.DATE);
} else {
type = ScalarType.getDefaultDateType(Type.DATETIME);
}
if (checkRange() || checkDate()) {
throw new AnalysisException("Datetime value is out of range: " + dateStr);
}
}
} | class DateLiteral extends LiteralExpr {
private static final Logger LOG = LogManager.getLogger(DateLiteral.class);
private static final DateLiteral MIN_DATE = new DateLiteral(0000, 1, 1);
private static final DateLiteral MAX_DATE = new DateLiteral(9999, 12, 31);
private static final DateLiteral MIN_DATETIME = new DateLiteral(0000, 1, 1, 0, 0, 0);
private static final DateLiteral MAX_DATETIME = new DateLiteral(9999, 12, 31, 23, 59, 59);
private static final DateLiteral MIN_DATETIMEV2
= new DateLiteral(0000, 1, 1, 0, 0, 0, 0);
private static final DateLiteral MAX_DATETIMEV2
= new DateLiteral(9999, 12, 31, 23, 59, 59, 999999L);
private static final int DATEKEY_LENGTH = 8;
private static final int DATETIMEKEY_LENGTH = 14;
private static final int MAX_MICROSECOND = 999999;
private static DateTimeFormatter DATE_TIME_FORMATTER = null;
private static DateTimeFormatter DATE_TIME_FORMATTER_TO_MICRO_SECOND = null;
private static DateTimeFormatter DATE_FORMATTER = null;
private static List<DateTimeFormatter> formatterList = null;
/*
* The datekey type is widely used in data warehouses
* For example, 20121229 means '2012-12-29'
* and data in the form of 'yyyymmdd' is generally called the datekey type.
*/
private static DateTimeFormatter DATEKEY_FORMATTER = null;
private static DateTimeFormatter DATETIMEKEY_FORMATTER = null;
private static Map<String, Integer> MONTH_NAME_DICT = Maps.newHashMap();
private static Map<String, Integer> MONTH_ABBR_NAME_DICT = Maps.newHashMap();
private static Map<String, Integer> WEEK_DAY_NAME_DICT = Maps.newHashMap();
private static final int[] DAYS_IN_MONTH = new int[] {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
private static final int ALLOW_SPACE_MASK = 4 | 64;
private static final int MAX_DATE_PARTS = 8;
private static final int YY_PART_YEAR = 70;
static {
try {
DATE_TIME_FORMATTER = formatBuilder("%Y-%m-%d %H:%i:%s").toFormatter();
DATE_FORMATTER = formatBuilder("%Y-%m-%d").toFormatter();
DATEKEY_FORMATTER = formatBuilder("%Y%m%d").toFormatter();
DATETIMEKEY_FORMATTER = formatBuilder("%Y%m%d%H%i%s").toFormatter();
DATE_TIME_FORMATTER_TO_MICRO_SECOND = new DateTimeFormatterBuilder()
.appendPattern("uuuu-MM-dd HH:mm:ss")
.appendFraction(ChronoField.MICRO_OF_SECOND, 0, 6, true)
.toFormatter();
formatterList = Lists.newArrayList(
formatBuilder("%Y%m%d").appendLiteral('T').appendPattern("HHmmss")
.appendFraction(ChronoField.MICRO_OF_SECOND, 0, 6, true).toFormatter(),
formatBuilder("%Y%m%d").appendLiteral('T').appendPattern("HHmmss")
.appendFraction(ChronoField.MICRO_OF_SECOND, 0, 6, false).toFormatter(),
formatBuilder("%Y%m%d%H%i%s")
.appendFraction(ChronoField.MICRO_OF_SECOND, 0, 6, true).toFormatter(),
formatBuilder("%Y%m%d%H%i%s")
.appendFraction(ChronoField.MICRO_OF_SECOND, 0, 6, false).toFormatter(),
DATETIMEKEY_FORMATTER, DATEKEY_FORMATTER);
} catch (AnalysisException e) {
LOG.error("invalid date format", e);
System.exit(-1);
}
MONTH_NAME_DICT.put("january", 1);
MONTH_NAME_DICT.put("february", 2);
MONTH_NAME_DICT.put("march", 3);
MONTH_NAME_DICT.put("april", 4);
MONTH_NAME_DICT.put("may", 5);
MONTH_NAME_DICT.put("june", 6);
MONTH_NAME_DICT.put("july", 7);
MONTH_NAME_DICT.put("august", 8);
MONTH_NAME_DICT.put("september", 9);
MONTH_NAME_DICT.put("october", 10);
MONTH_NAME_DICT.put("november", 11);
MONTH_NAME_DICT.put("december", 12);
MONTH_ABBR_NAME_DICT.put("jan", 1);
MONTH_ABBR_NAME_DICT.put("feb", 2);
MONTH_ABBR_NAME_DICT.put("mar", 3);
MONTH_ABBR_NAME_DICT.put("apr", 4);
MONTH_ABBR_NAME_DICT.put("may", 5);
MONTH_ABBR_NAME_DICT.put("jun", 6);
MONTH_ABBR_NAME_DICT.put("jul", 7);
MONTH_ABBR_NAME_DICT.put("aug", 8);
MONTH_ABBR_NAME_DICT.put("sep", 9);
MONTH_ABBR_NAME_DICT.put("oct", 10);
MONTH_ABBR_NAME_DICT.put("nov", 11);
MONTH_ABBR_NAME_DICT.put("dec", 12);
WEEK_DAY_NAME_DICT.put("monday", 0);
WEEK_DAY_NAME_DICT.put("tuesday", 1);
WEEK_DAY_NAME_DICT.put("wednesday", 2);
WEEK_DAY_NAME_DICT.put("thursday", 3);
WEEK_DAY_NAME_DICT.put("friday", 4);
WEEK_DAY_NAME_DICT.put("saturday", 5);
WEEK_DAY_NAME_DICT.put("sunday", 6);
MONTH_ABBR_NAME_DICT.put("mon", 0);
MONTH_ABBR_NAME_DICT.put("tue", 1);
MONTH_ABBR_NAME_DICT.put("wed", 2);
MONTH_ABBR_NAME_DICT.put("thu", 3);
MONTH_ABBR_NAME_DICT.put("fri", 4);
MONTH_ABBR_NAME_DICT.put("sat", 5);
MONTH_ABBR_NAME_DICT.put("sun", 6);
}
private static final Pattern HAS_TIME_PART = Pattern.compile("^.*[HhIiklrSsTp]+.*$");
private enum DateLiteralType {
DATETIME(0),
DATE(1),
DATETIMEV2(2),
DATEV2(3);
private final int value;
DateLiteralType(int value) {
this.value = value;
}
public int value() {
return value;
}
}
public DateLiteral() {
super();
}
public DateLiteral(Type type, boolean isMax) throws AnalysisException {
super();
this.type = type;
if (type.equals(Type.DATE) || type.equals(Type.DATEV2)) {
if (isMax) {
copy(MAX_DATE);
} else {
copy(MIN_DATE);
}
} else if (type.equals(Type.DATETIME)) {
if (isMax) {
copy(MAX_DATETIME);
} else {
copy(MIN_DATETIME);
}
} else {
if (isMax) {
copy(MAX_DATETIMEV2);
} else {
copy(MIN_DATETIMEV2);
}
}
analysisDone();
}
public DateLiteral(String s, Type type) throws AnalysisException {
super();
init(s, type);
analysisDone();
}
public DateLiteral(long unixTimestamp, TimeZone timeZone, Type type) throws AnalysisException {
Timestamp timestamp = new Timestamp(unixTimestamp);
ZonedDateTime zonedDateTime = ZonedDateTime.ofInstant(timestamp.toInstant(), ZoneId.of(timeZone.getID()));
year = zonedDateTime.getYear();
month = zonedDateTime.getMonthValue();
day = zonedDateTime.getDayOfMonth();
hour = zonedDateTime.getHour();
minute = zonedDateTime.getMinute();
second = zonedDateTime.getSecond();
microsecond = zonedDateTime.get(ChronoField.MICRO_OF_SECOND);
if (type.equals(Type.DATE)) {
hour = 0;
minute = 0;
second = 0;
microsecond = 0;
this.type = Type.DATE;
} else if (type.equals(Type.DATETIME)) {
this.type = Type.DATETIME;
microsecond = 0;
} else if (type.equals(Type.DATEV2)) {
hour = 0;
minute = 0;
second = 0;
microsecond = 0;
this.type = Type.DATEV2;
} else if (type.equals(Type.DATETIMEV2)) {
this.type = Type.DATETIMEV2;
} else {
throw new AnalysisException("Error date literal type : " + type);
}
}
public DateLiteral(long year, long month, long day) {
this.hour = 0;
this.minute = 0;
this.second = 0;
this.year = year;
this.month = month;
this.day = day;
this.type = ScalarType.getDefaultDateType(Type.DATE);
}
public DateLiteral(long year, long month, long day, Type type) {
this.year = year;
this.month = month;
this.day = day;
Preconditions.checkArgument(type.getPrimitiveType().equals(Type.DATE.getPrimitiveType())
|| type.getPrimitiveType().equals(Type.DATEV2.getPrimitiveType()));
this.type = type;
}
public DateLiteral(long year, long month, long day, long hour, long minute, long second) {
this.hour = hour;
this.minute = minute;
this.second = second;
this.year = year;
this.month = month;
this.day = day;
this.type = ScalarType.getDefaultDateType(Type.DATETIME);
}
public DateLiteral(long year, long month, long day, long hour, long minute, long second, long microsecond) {
this.hour = hour;
this.minute = minute;
this.second = second;
this.year = year;
this.month = month;
this.day = day;
this.microsecond = microsecond;
this.type = Type.DATETIMEV2;
}
public DateLiteral(long year, long month, long day, long hour, long minute, long second, Type type) {
this.hour = hour;
this.minute = minute;
this.second = second;
this.year = year;
this.month = month;
this.day = day;
Preconditions.checkArgument(type.getPrimitiveType().equals(Type.DATETIME.getPrimitiveType())
|| type.getPrimitiveType().equals(Type.DATETIMEV2.getPrimitiveType()));
this.type = type;
}
public DateLiteral(LocalDateTime dateTime, Type type) {
this.year = dateTime.getYear();
this.month = dateTime.getMonthValue();
this.day = dateTime.getDayOfMonth();
this.type = type;
if (type.equals(Type.DATETIME) || type.equals(Type.DATETIMEV2)) {
this.hour = dateTime.getHour();
this.minute = dateTime.getMinute();
this.second = dateTime.getSecond();
this.microsecond = dateTime.get(ChronoField.MICRO_OF_SECOND);
}
}
public DateLiteral(DateLiteral other) {
super(other);
hour = other.hour;
minute = other.minute;
second = other.second;
year = other.year;
month = other.month;
day = other.day;
microsecond = other.microsecond;
type = other.type;
}
public static DateLiteral createMinValue(Type type) throws AnalysisException {
return new DateLiteral(type, false);
}
private void copy(DateLiteral other) {
hour = other.hour;
minute = other.minute;
second = other.second;
year = other.year;
month = other.month;
day = other.day;
microsecond = other.microsecond;
type = other.type;
}
@Override
public Expr clone() {
return new DateLiteral(this);
}
@Override
public boolean isMinValue() {
switch (type.getPrimitiveType()) {
case DATE:
case DATEV2:
return this.getStringValue().compareTo(MIN_DATE.getStringValue()) == 0;
case DATETIME:
return this.getStringValue().compareTo(MIN_DATETIME.getStringValue()) == 0;
case DATETIMEV2:
return this.getStringValue().compareTo(MIN_DATETIMEV2.getStringValue()) == 0;
default:
return false;
}
}
@Override
public Object getRealValue() {
if (type.equals(Type.DATE)) {
return year * 16 * 32L + month * 32 + day;
} else if (type.equals(Type.DATETIME)) {
return (year * 10000 + month * 100 + day) * 1000000L + hour * 10000 + minute * 100 + second;
} else if (type.equals(Type.DATEV2)) {
return (year << 9) | (month << 5) | day;
} else if (type.equals(Type.DATETIMEV2)) {
return (year << 50) | (month << 46) | (day << 41) | (hour << 36)
| (minute << 30) | (second << 24) | microsecond;
} else {
Preconditions.checkState(false, "invalid date type: " + type);
return -1L;
}
}
@Override
public ByteBuffer getHashValue(PrimitiveType type) {
String value = convertToString(type);
ByteBuffer buffer;
try {
buffer = ByteBuffer.wrap(value.getBytes("UTF-8"));
} catch (Exception e) {
throw new RuntimeException(e);
}
return buffer;
}
@Override
public int compareLiteral(LiteralExpr expr) {
if (expr instanceof NullLiteral) {
return 1;
}
if (expr == MaxLiteral.MAX_VALUE) {
return -1;
}
return Long.signum(getLongValue() - expr.getLongValue());
}
@Override
public String toSqlImpl() {
return "'" + getStringValue() + "'";
}
@Override
public String getStringValue() {
if (type.isDate() || type.isDateV2()) {
return String.format("%04d-%02d-%02d", year, month, day);
} else if (type.isDatetimeV2()) {
String tmp = String.format("%04d-%02d-%02d %02d:%02d:%02d",
year, month, day, hour, minute, second);
if (microsecond == 0) {
return tmp;
}
return tmp + String.format(".%06d", microsecond);
} else {
return String.format("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, minute, second);
}
}
public void roundCeiling(int newScale) {
Preconditions.checkArgument(type.isDatetimeV2());
long remain = Double.valueOf(microsecond % (Math.pow(10, 6 - newScale))).longValue();
if (remain != 0) {
microsecond = Double.valueOf((microsecond + (Math.pow(10, 6 - newScale)))
/ (Math.pow(10, 6 - newScale)) * (Math.pow(10, 6 - newScale))).longValue();
}
type = ScalarType.createDatetimeV2Type(newScale);
}
public void roundFloor(int newScale) {
microsecond = Double.valueOf(microsecond / (Math.pow(10, 6 - newScale))
* (Math.pow(10, 6 - newScale))).longValue();
type = ScalarType.createDatetimeV2Type(newScale);
}
private String convertToString(PrimitiveType type) {
if (type == PrimitiveType.DATE || type == PrimitiveType.DATEV2) {
return String.format("%04d-%02d-%02d", year, month, day);
} else if (type == PrimitiveType.DATETIMEV2) {
String tmp = String.format("%04d-%02d-%02d %02d:%02d:%02d",
year, month, day, hour, minute, second);
if (microsecond == 0) {
return tmp;
}
return tmp + String.format(".%06d", microsecond);
} else {
return String.format("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, minute, second);
}
}
@Override
public long getLongValue() {
return (year * 10000 + month * 100 + day) * 1000000L + hour * 10000 + minute * 100 + second;
}
@Override
public double getDoubleValue() {
return getLongValue();
}
@Override
protected void toThrift(TExprNode msg) {
msg.node_type = TExprNodeType.DATE_LITERAL;
msg.date_literal = new TDateLiteral(getStringValue());
}
@Override
protected Expr uncheckedCastTo(Type targetType) throws AnalysisException {
if (targetType.isDateType()) {
if (type.equals(targetType)) {
return this;
}
if (targetType.equals(Type.DATE) || targetType.equals(Type.DATEV2)) {
return new DateLiteral(this.year, this.month, this.day, targetType);
} else if (targetType.equals(Type.DATETIME)) {
return new DateLiteral(this.year, this.month, this.day, this.hour, this.minute, this.second,
targetType);
} else if (targetType.isDatetimeV2()) {
return new DateLiteral(this.year, this.month, this.day, this.hour, this.minute, this.microsecond,
targetType);
} else {
throw new AnalysisException("Error date literal type : " + type);
}
} else if (targetType.isStringType()) {
return new StringLiteral(getStringValue());
} else if (Type.isImplicitlyCastable(this.type, targetType, true)) {
return new CastExpr(targetType, this);
}
Preconditions.checkState(false);
return this;
}
public void castToDate() {
if (Config.enable_date_conversion) {
this.type = Type.DATEV2;
} else {
this.type = Type.DATE;
}
hour = 0;
minute = 0;
second = 0;
}
private long makePackedDatetime() {
long ymd = ((year * 13 + month) << 5) | day;
long hms = (hour << 12) | (minute << 6) | second;
long packedDatetime = ((ymd << 17) | hms) << 24 + microsecond;
return packedDatetime;
}
private long makePackedDatetimeV2() {
return (year << 50) | (month << 46) | (day << 41) | (hour << 36)
| (minute << 30) | (second << 24) | microsecond;
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
if (this.type.equals(Type.DATETIME)) {
out.writeShort(DateLiteralType.DATETIME.value());
out.writeLong(makePackedDatetime());
} else if (this.type.equals(Type.DATE)) {
out.writeShort(DateLiteralType.DATE.value());
out.writeLong(makePackedDatetime());
} else if (this.type.getPrimitiveType() == PrimitiveType.DATETIMEV2) {
out.writeShort(DateLiteralType.DATETIMEV2.value());
out.writeLong(makePackedDatetimeV2());
out.writeInt(((ScalarType) this.type).getScalarScale());
} else if (this.type.equals(Type.DATEV2)) {
out.writeShort(DateLiteralType.DATEV2.value());
out.writeLong(makePackedDatetimeV2());
} else {
throw new IOException("Error date literal type : " + type);
}
}
private void fromPackedDatetime(long packedTime) {
microsecond = (packedTime % (1L << 24));
long ymdhms = (packedTime >> 24);
long ymd = ymdhms >> 17;
day = ymd % (1 << 5);
long ym = ymd >> 5;
month = ym % 13;
year = ym / 13;
year %= 10000;
long hms = ymdhms % (1 << 17);
second = hms % (1 << 6);
minute = (hms >> 6) % (1 << 6);
hour = (hms >> 12);
this.type = Type.DATETIME;
}
public void readFields(DataInput in) throws IOException {
super.readFields(in);
short dateLiteralType = in.readShort();
fromPackedDatetime(in.readLong());
if (dateLiteralType == DateLiteralType.DATETIME.value()) {
this.type = Type.DATETIME;
} else if (dateLiteralType == DateLiteralType.DATE.value()) {
this.type = Type.DATE;
} else if (dateLiteralType == DateLiteralType.DATETIMEV2.value()) {
this.type = ScalarType.createDatetimeV2Type(in.readInt());
} else if (dateLiteralType == DateLiteralType.DATEV2.value()) {
this.type = Type.DATEV2;
} else {
throw new IOException("Error date literal type : " + type);
}
}
public static DateLiteral read(DataInput in) throws IOException {
DateLiteral literal = new DateLiteral();
literal.readFields(in);
return literal;
}
public long unixTimestamp(TimeZone timeZone) {
ZonedDateTime zonedDateTime = ZonedDateTime.of((int) year, (int) month, (int) day, (int) hour,
(int) minute, (int) second, (int) microsecond, ZoneId.of(timeZone.getID()));
Timestamp timestamp = Timestamp.from(zonedDateTime.toInstant());
return timestamp.getTime();
}
public static boolean hasTimePart(String format) {
return HAS_TIME_PART.matcher(format).matches();
}
public String dateFormat(String pattern) throws AnalysisException {
TemporalAccessor accessor;
if (type.equals(Type.DATE) || type.equals(Type.DATEV2)) {
accessor = DATE_FORMATTER.parse(getStringValue());
} else if (type.isDatetimeV2()) {
accessor = DATE_TIME_FORMATTER_TO_MICRO_SECOND.parse(getStringValue());
} else {
accessor = DATE_TIME_FORMATTER.parse(getStringValue());
}
DateTimeFormatter toFormatter = formatBuilder(pattern).toFormatter();
return toFormatter.format(accessor);
}
private static DateTimeFormatterBuilder formatBuilder(String pattern) throws AnalysisException {
DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder();
boolean escaped = false;
for (int i = 0; i < pattern.length(); i++) {
char character = pattern.charAt(i);
if (escaped) {
switch (character) {
case 'a':
builder.appendText(ChronoField.DAY_OF_WEEK, TextStyle.SHORT);
break;
case 'b':
builder.appendText(ChronoField.MONTH_OF_YEAR, TextStyle.SHORT);
break;
case 'c':
builder.appendValue(ChronoField.MONTH_OF_YEAR);
break;
case 'd':
builder.appendValue(ChronoField.DAY_OF_MONTH, 2);
break;
case 'e':
builder.appendValue(ChronoField.DAY_OF_MONTH);
break;
case 'H':
builder.appendValue(ChronoField.HOUR_OF_DAY, 2);
break;
case 'h':
case 'I':
builder.appendValue(ChronoField.HOUR_OF_AMPM, 2);
break;
case 'i':
builder.appendValue(ChronoField.MINUTE_OF_HOUR, 2);
break;
case 'j':
builder.appendValue(ChronoField.DAY_OF_YEAR, 3);
break;
case 'k':
builder.appendValue(ChronoField.HOUR_OF_DAY);
break;
case 'l':
builder.appendValue(ChronoField.HOUR_OF_AMPM);
break;
case 'M':
builder.appendText(ChronoField.MONTH_OF_YEAR, TextStyle.FULL);
break;
case 'm':
builder.appendValue(ChronoField.MONTH_OF_YEAR, 2);
break;
case 'p':
builder.appendText(ChronoField.AMPM_OF_DAY);
break;
case 'r':
builder.appendValue(ChronoField.HOUR_OF_AMPM, 2)
.appendPattern(":mm:ss ")
.appendText(ChronoField.AMPM_OF_DAY, TextStyle.FULL)
.toFormatter();
break;
case 'S':
case 's':
builder.appendValue(ChronoField.SECOND_OF_MINUTE, 2);
break;
case 'T':
builder.appendPattern("HH:mm:ss");
break;
case 'v':
builder.appendValue(ChronoField.ALIGNED_WEEK_OF_YEAR, 2);
break;
case 'x':
case 'Y':
builder.appendValue(ChronoField.YEAR, 4);
break;
case 'W':
builder.appendText(ChronoField.DAY_OF_WEEK, TextStyle.FULL);
break;
case 'y':
builder.appendValueReduced(ChronoField.YEAR, 2, 2, 1970);
break;
case 'f':
case 'w':
case 'U':
case 'u':
case 'V':
case 'X':
case 'D':
throw new AnalysisException(String.format("%%%s not supported in date format string",
character));
case '%':
builder.appendLiteral('%');
break;
default:
builder.appendLiteral(character);
break;
}
escaped = false;
} else if (character == '%') {
escaped = true;
} else {
builder.appendLiteral(character);
}
}
return builder;
}
private int getOrDefault(final TemporalAccessor accessor, final ChronoField field,
final int defaultValue) {
return accessor.isSupported(field) ? accessor.get(field) : defaultValue;
}
public LocalDateTime getTimeFormatter() {
TemporalAccessor accessor;
if (type.equals(Type.DATE) || type.equals(Type.DATEV2)) {
accessor = DATE_FORMATTER.parse(getStringValue());
} else if (type.isDatetimeV2()) {
accessor = DATE_TIME_FORMATTER_TO_MICRO_SECOND.parse(getStringValue());
} else {
accessor = DATE_TIME_FORMATTER.parse(getStringValue());
}
final int year = accessor.get(ChronoField.YEAR);
final int month = accessor.get(ChronoField.MONTH_OF_YEAR);
final int dayOfMonth = accessor.get(ChronoField.DAY_OF_MONTH);
final int hour = getOrDefault(accessor, ChronoField.HOUR_OF_DAY, 0);
final int minute = getOrDefault(accessor, ChronoField.MINUTE_OF_HOUR, 0);
final int second = getOrDefault(accessor, ChronoField.SECOND_OF_MINUTE, 0);
final int microSeconds = getOrDefault(accessor, ChronoField.MICRO_OF_SECOND, 0);
return LocalDateTime.of(year, month, dayOfMonth, hour, minute, second, microSeconds);
}
public DateLiteral plusYears(int year) throws AnalysisException {
return new DateLiteral(getTimeFormatter().plusYears(year), type);
}
public DateLiteral plusMonths(int month) throws AnalysisException {
return new DateLiteral(getTimeFormatter().plusMonths(month), type);
}
public DateLiteral plusDays(int day) throws AnalysisException {
return new DateLiteral(getTimeFormatter().plusDays(day), type);
}
public DateLiteral plusHours(int hour) throws AnalysisException {
return new DateLiteral(getTimeFormatter().plusHours(hour), type);
}
public DateLiteral plusMinutes(int minute) throws AnalysisException {
return new DateLiteral(getTimeFormatter().plusMinutes(minute), type);
}
public DateLiteral plusSeconds(int second) throws AnalysisException {
return new DateLiteral(getTimeFormatter().plusSeconds(second), type);
}
public long getYear() {
return year;
}
public long getMonth() {
return month;
}
public long getDay() {
return day;
}
public long getHour() {
return hour;
}
public long getMinute() {
return minute;
}
public long getSecond() {
return second;
}
public long getMicrosecond() {
return microsecond;
}
private long year;
private long month;
private long day;
private long hour;
private long minute;
private long second;
private long microsecond;
@Override
public int hashCode() {
return 31 * super.hashCode() + Objects.hashCode(unixTimestamp(TimeZone.getDefault()));
}
public int fromDateFormatStr(String format, String value, boolean hasSubVal) throws InvalidFormatException {
int fp = 0;
int fend = format.length();
int vp = 0;
int vend = value.length();
boolean datePartUsed = false;
boolean timePartUsed = false;
boolean microSecondPartUsed = false;
int dayPart = 0;
long weekday = -1;
long yearday = -1;
long weekNum = -1;
boolean strictWeekNumber = false;
boolean sundayFirst = false;
boolean strictWeekNumberYearType = false;
long strictWeekNumberYear = -1;
boolean usaTime = false;
char f;
while (fp < fend && vp < vend) {
while (vp < vend && Character.isSpaceChar(value.charAt(vp))) {
vp++;
}
if (vp >= vend) {
break;
}
f = format.charAt(fp);
if (f == '%' && fp + 1 < fend) {
int tmp = 0;
long intValue = 0;
fp++;
f = format.charAt(fp);
fp++;
switch (f) {
case 'y':
tmp = vp + Math.min(2, vend - vp);
intValue = strToLong(value.substring(vp, tmp));
intValue += intValue >= 70 ? 1900 : 2000;
this.year = intValue;
vp = tmp;
datePartUsed = true;
break;
case 'Y':
tmp = vp + Math.min(4, vend - vp);
intValue = strToLong(value.substring(vp, tmp));
if (tmp - vp <= 2) {
intValue += intValue >= 70 ? 1900 : 2000;
}
this.year = intValue;
vp = tmp;
datePartUsed = true;
break;
case 'm':
case 'c':
tmp = vp + Math.min(2, vend - vp);
intValue = strToLong(value.substring(vp, tmp));
this.month = intValue;
vp = tmp;
datePartUsed = true;
break;
case 'M': {
int nextPos = findWord(value, vp);
intValue = checkWord(MONTH_NAME_DICT, value.substring(vp, nextPos));
this.month = intValue;
vp = nextPos;
break;
}
case 'b': {
int nextPos = findWord(value, vp);
intValue = checkWord(MONTH_ABBR_NAME_DICT, value.substring(vp, nextPos));
this.month = intValue;
vp = nextPos;
break;
}
case 'd':
case 'e':
tmp = vp + Math.min(2, vend - vp);
intValue = strToLong(value.substring(vp, tmp));
this.day = intValue;
vp = tmp;
datePartUsed = true;
break;
case 'D':
tmp = vp + Math.min(2, vend - vp);
intValue = strToLong(value.substring(vp, tmp));
this.day = intValue;
vp = tmp + Math.min(2, vend - tmp);
datePartUsed = true;
break;
case 'h':
case 'I':
case 'l':
usaTime = true;
case 'k':
case 'H':
tmp = findNumber(value, vp, 2);
intValue = strToLong(value.substring(vp, tmp));
this.hour = intValue;
vp = tmp;
timePartUsed = true;
break;
case 'i':
tmp = vp + Math.min(2, vend - vp);
intValue = strToLong(value.substring(vp, tmp));
this.minute = intValue;
vp = tmp;
timePartUsed = true;
break;
case 's':
case 'S':
tmp = vp + Math.min(2, vend - vp);
intValue = strToLong(value.substring(vp, tmp));
this.second = intValue;
vp = tmp;
timePartUsed = true;
break;
case 'f':
tmp = vp + Math.min(6, vend - vp);
intValue = strToLong(value.substring(vp, tmp));
this.microsecond = (long) (intValue * Math.pow(10, 6 - Math.min(6, vend - vp)));
timePartUsed = true;
microSecondPartUsed = true;
vp = tmp;
break;
case 'p':
if ((vend - vp) < 2 || Character.toUpperCase(value.charAt(vp + 1)) != 'M' || !usaTime) {
throw new InvalidFormatException("Invalid %p format");
}
if (Character.toUpperCase(value.charAt(vp)) == 'P') {
dayPart = 12;
}
timePartUsed = true;
vp += 2;
break;
case 'W': {
int nextPos = findWord(value, vp);
intValue = checkWord(WEEK_DAY_NAME_DICT, value.substring(vp, nextPos));
intValue++;
weekday = intValue;
datePartUsed = true;
break;
}
case 'a': {
int nextPos = findWord(value, vp);
intValue = checkWord(WEEK_DAY_NAME_DICT, value.substring(vp, nextPos));
intValue++;
weekday = intValue;
datePartUsed = true;
break;
}
case 'w':
tmp = vp + Math.min(1, vend - vp);
intValue = strToLong(value.substring(vp, tmp));
if (intValue >= 7) {
throw new InvalidFormatException("invalid day of week: " + intValue);
}
if (intValue == 0) {
intValue = 7;
}
weekday = intValue;
vp = tmp;
datePartUsed = true;
break;
case 'j':
tmp = vp + Math.min(3, vend - vp);
intValue = strToLong(value.substring(vp, tmp));
yearday = intValue;
vp = tmp;
datePartUsed = true;
break;
case 'u':
case 'v':
case 'U':
case 'V':
sundayFirst = (format.charAt(fp - 1) == 'U' || format.charAt(fp - 1) == 'V');
strictWeekNumber = (format.charAt(fp - 1) == 'V' || format.charAt(fp - 1) == 'v');
tmp = vp + Math.min(2, vend - vp);
intValue = Long.valueOf(value.substring(vp, tmp));
weekNum = intValue;
if (weekNum > 53 || (strictWeekNumber && weekNum == 0)) {
throw new InvalidFormatException("invalid num of week: " + weekNum);
}
vp = tmp;
datePartUsed = true;
break;
case 'x':
case 'X':
strictWeekNumberYearType = (format.charAt(fp - 1) == 'X');
tmp = vp + Math.min(4, vend - vp);
intValue = Long.valueOf(value.substring(vp, tmp));
strictWeekNumberYear = intValue;
vp = tmp;
datePartUsed = true;
break;
case 'r':
tmp = fromDateFormatStr("%I:%i:%S %p", value.substring(vp, vend), true);
vp = tmp;
timePartUsed = true;
break;
case 'T':
tmp = fromDateFormatStr("%H:%i:%S", value.substring(vp, vend), true);
vp = tmp;
timePartUsed = true;
break;
case '.':
while (vp < vend && Character.toString(value.charAt(vp)).matches("\\p{Punct}")) {
vp++;
}
break;
case '@':
while (vp < vend && Character.isLetter(value.charAt(vp))) {
vp++;
}
break;
case '
while (vp < vend && Character.isDigit(value.charAt(vp))) {
vp++;
}
break;
case '%':
if ('%' != value.charAt(vp)) {
throw new InvalidFormatException("invalid char after %: " + value.charAt(vp));
}
vp++;
break;
default:
throw new InvalidFormatException("Invalid format pattern: " + f);
}
} else if (format.charAt(fp) != ' ') {
if (format.charAt(fp) != value.charAt(vp)) {
throw new InvalidFormatException("Invalid char: " + value.charAt(vp) + ", expected: "
+ format.charAt(fp));
}
fp++;
vp++;
} else {
fp++;
}
}
while (fp < fend) {
f = format.charAt(fp);
if (f == '%' && fp + 1 < fend) {
fp++;
f = format.charAt(fp);
fp++;
switch (f) {
case 'H':
case 'h':
case 'I':
case 'i':
case 'k':
case 'l':
case 'r':
case 's':
case 'S':
case 'p':
case 'T':
timePartUsed = true;
break;
default:
break;
}
} else {
fp++;
}
}
if (usaTime) {
if (this.hour > 12 || this.hour < 1) {
throw new InvalidFormatException("Invalid hour: " + hour);
}
this.hour = (this.hour % 12) + dayPart;
}
if (hasSubVal) {
return vp;
}
if (yearday > 0) {
long days = calcDaynr(this.year, 1, 1) + yearday - 1;
getDateFromDaynr(days);
}
if (weekNum >= 0 && weekday > 0) {
if ((strictWeekNumber && (strictWeekNumberYear < 0
|| strictWeekNumberYearType != sundayFirst))
|| (!strictWeekNumber && strictWeekNumberYear >= 0)) {
throw new InvalidFormatException("invalid week number");
}
long days = calcDaynr(strictWeekNumber ? strictWeekNumberYear : this.year, 1, 1);
long weekdayB = calcWeekday(days, sundayFirst);
if (sundayFirst) {
days += ((weekdayB == 0) ? 0 : 7) - weekdayB + (weekNum - 1) * 7 + weekday % 7;
} else {
days += ((weekdayB <= 3) ? 0 : 7) - weekdayB + (weekNum - 1) * 7 + weekday - 1;
}
getDateFromDaynr(days);
}
if (datePartUsed) {
if (microSecondPartUsed) {
this.type = Type.DATETIMEV2;
} else if (timePartUsed) {
this.type = ScalarType.getDefaultDateType(Type.DATETIME);
} else {
this.type = ScalarType.getDefaultDateType(Type.DATE);
}
}
if (checkRange() || checkDate()) {
throw new InvalidFormatException("Invalid format");
}
return 0;
}
public int fromDateFormatStr(String format, String value, boolean hasSubVal, Type type)
throws InvalidFormatException {
switch (type.getPrimitiveType()) {
case DATETIME:
case DATE:
return fromDateFormatStr(format, value, hasSubVal);
default:
int val = fromDateFormatStr(format, value, hasSubVal);
convertTypeToV2();
return val;
}
}
private void convertTypeToV2() {
switch (type.getPrimitiveType()) {
case DATETIME:
this.type = Type.DATETIMEV2;
break;
case DATE:
this.type = Type.DATEV2;
break;
default:
}
}
private boolean checkRange() {
return year > MAX_DATETIME.year || month > MAX_DATETIME.month || day > MAX_DATETIME.day
|| hour > MAX_DATETIME.hour || minute > MAX_DATETIME.minute || second > MAX_DATETIME.second
|| microsecond > MAX_MICROSECOND;
}
private boolean checkDate() {
if (month != 0 && day > DAYS_IN_MONTH[((int) month)]) {
if (month == 2 && day == 29 && Year.isLeap(year)) {
return false;
}
return true;
}
return false;
}
private long strToLong(String l) throws InvalidFormatException {
try {
long y = Long.valueOf(l);
if (y < 0) {
throw new InvalidFormatException("Invalid format: negative number.");
}
return y;
} catch (NumberFormatException e) {
throw new InvalidFormatException(e.getMessage());
}
}
private long calcDaynr(long year, long month, long day) {
long delsum = 0;
long y = year;
if (year == 0 && month == 0) {
return 0;
}
/* Cast to int to be able to handle month == 0 */
delsum = 365 * y + 31 * (month - 1) + day;
if (month <= 2) {
y--;
} else {
delsum -= (month * 4 + 23) / 10;
}
return delsum + y / 4 - y / 100 + y / 400;
}
private long calcWeekday(long dayNr, boolean isSundayFirstDay) {
return (dayNr + 5L + (isSundayFirstDay ? 1L : 0L)) % 7;
}
private void getDateFromDaynr(long daynr) throws InvalidFormatException {
if (daynr <= 0 || daynr > 3652424) {
throw new InvalidFormatException("Invalid days to year: " + daynr);
}
this.year = daynr / 365;
long daysBeforeYear = 0;
while (daynr < (daysBeforeYear = calcDaynr(this.year, 1, 1))) {
this.year--;
}
long daysOfYear = daynr - daysBeforeYear + 1;
int leapDay = 0;
if (Year.isLeap(this.year)) {
if (daysOfYear > 31 + 28) {
daysOfYear--;
if (daysOfYear == 31 + 28) {
leapDay = 1;
}
}
}
this.month = 1;
while (daysOfYear > DAYS_IN_MONTH[(int) this.month]) {
daysOfYear -= DAYS_IN_MONTH[(int) this.month];
this.month++;
}
this.day = daysOfYear + leapDay;
}
private int findWord(String value, int start) {
int p = start;
while (p < value.length() && Character.isLetter(value.charAt(p))) {
p++;
}
return p;
}
private int findNumber(String value, int start, int maxLen) {
int p = start;
int left = maxLen;
while (p < value.length() && Character.isDigit(value.charAt(p)) && left > 0) {
p++;
left--;
}
return p;
}
private int checkWord(Map<String, Integer> dict, String value) throws InvalidFormatException {
Integer i = dict.get(value.toLowerCase());
if (i != null) {
return i;
}
throw new InvalidFormatException("'" + value + "' is invalid");
}
public void fromDateStr(String dateStr) throws AnalysisException {
dateStr = dateStr.trim();
if (dateStr.isEmpty()) {
throw new AnalysisException("parse datetime value failed: " + dateStr);
}
int[] dateVal = new int[MAX_DATE_PARTS];
int[] dateLen = new int[MAX_DATE_PARTS];
int pre = 0;
int pos = 0;
while (pos < dateStr.length() && (Character.isDigit(dateStr.charAt(pos)) || dateStr.charAt(pos) == 'T')) {
pos++;
}
int yearLen = 4;
int digits = pos - pre;
boolean isIntervalFormat = false;
if (pos == dateStr.length() || dateStr.charAt(pos) == '.') {
if (digits == 4 || digits == 8 || digits >= 14) {
yearLen = 4;
} else {
yearLen = 2;
}
isIntervalFormat = true;
}
int fieldIdx = 0;
int fieldLen = yearLen;
while (pre < dateStr.length() && Character.isDigit(dateStr.charAt(pre)) && fieldIdx < MAX_DATE_PARTS - 1) {
int start = pre;
int tempVal = 0;
boolean scanToDelim = (!isIntervalFormat) && (fieldIdx != 6);
while (pre < dateStr.length() && Character.isDigit(dateStr.charAt(pre))
&& (scanToDelim || fieldLen-- != 0)) {
tempVal = tempVal * 10 + (dateStr.charAt(pre++) - '0');
}
dateVal[fieldIdx] = tempVal;
dateLen[fieldIdx] = pre - start;
fieldLen = 2;
if (pre == dateStr.length()) {
fieldIdx++;
break;
}
if (fieldIdx == 2 && dateStr.charAt(pre) == 'T') {
pre++;
fieldIdx++;
continue;
}
if (fieldIdx == 5) {
if (dateStr.charAt(pre) == '.') {
pre++;
fieldLen = 6;
} else if (Character.isDigit(dateStr.charAt(pre))) {
fieldIdx++;
break;
}
fieldIdx++;
continue;
}
while (pre < dateStr.length() && (Character.toString(dateStr.charAt(pre)).matches("\\p{Punct}"))
|| Character.isSpaceChar(dateStr.charAt(pre))) {
if (Character.isSpaceChar(dateStr.charAt(pre))) {
if (((1 << fieldIdx) & ALLOW_SPACE_MASK) == 0) {
throw new AnalysisException("parse datetime value failed: " + dateStr);
}
}
pre++;
}
fieldIdx++;
}
int numField = fieldIdx;
if (!isIntervalFormat) {
yearLen = dateLen[0];
}
for (; fieldIdx < MAX_DATE_PARTS; ++fieldIdx) {
dateLen[fieldIdx] = 0;
dateVal[fieldIdx] = 0;
}
if (yearLen == 2) {
if (dateVal[0] < YY_PART_YEAR) {
dateVal[0] += 2000;
} else {
dateVal[0] += 1900;
}
}
if (numField < 3) {
throw new AnalysisException("parse datetime value failed: " + dateStr);
}
year = dateVal[0];
month = dateVal[1];
day = dateVal[2];
hour = dateVal[3];
minute = dateVal[4];
second = dateVal[5];
microsecond = dateVal[6];
if (numField == 3) {
type = ScalarType.getDefaultDateType(Type.DATE);
} else {
type = ScalarType.getDefaultDateType(Type.DATETIME);
}
if (checkRange() || checkDate()) {
throw new AnalysisException("Datetime value is out of range: " + dateStr);
}
}
} |
It's better to use AdminClient rather KafkaConsumer if we only want to get topic meatdata. | private void tryDelete(AdminClient adminClient, String topic) throws Exception {
try {
adminClient
.deleteTopics(Collections.singleton(topic))
.all()
.get(DELETE_TIMEOUT_SECONDS, TimeUnit.SECONDS);
try (KafkaConsumer<Void, Void> consumer = createTempConsumer()) {
CommonTestUtils.waitUtil(
() -> {
List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);
return partitionInfos == null || partitionInfos.isEmpty();
},
Duration.ofSeconds(DELETE_TIMEOUT_SECONDS),
String.format("Topic \"%s\" was not deleted within timeout", topic));
}
} catch (TimeoutException e) {
LOG.info(
"Did not receive delete topic response within {} seconds. Checking if it succeeded",
DELETE_TIMEOUT_SECONDS);
if (adminClient
.listTopics()
.names()
.get(DELETE_TIMEOUT_SECONDS, TimeUnit.SECONDS)
.contains(topic)) {
throw new Exception("Topic still exists after timeout", e);
}
}
} | try (KafkaConsumer<Void, Void> consumer = createTempConsumer()) { | private void tryDelete(AdminClient adminClient, String topic) throws Exception {
try {
adminClient
.deleteTopics(Collections.singleton(topic))
.all()
.get(REQUEST_TIMEOUT_SECONDS, TimeUnit.SECONDS);
CommonTestUtils.waitUtil(
() -> {
try {
return adminClient.listTopics().listings()
.get(REQUEST_TIMEOUT_SECONDS, TimeUnit.SECONDS).stream()
.map(TopicListing::name)
.noneMatch((name) -> name.equals(topic));
} catch (Exception e) {
LOG.warn("Exception caught when listing Kafka topics", e);
return false;
}
},
Duration.ofSeconds(REQUEST_TIMEOUT_SECONDS),
String.format("Topic \"%s\" was not deleted within timeout", topic));
} catch (TimeoutException e) {
LOG.info(
"Did not receive delete topic response within {} seconds. Checking if it succeeded",
REQUEST_TIMEOUT_SECONDS);
if (adminClient
.listTopics()
.names()
.get(REQUEST_TIMEOUT_SECONDS, TimeUnit.SECONDS)
.contains(topic)) {
throw new Exception("Topic still exists after timeout", e);
}
}
} | class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
protected static final Logger LOG = LoggerFactory.getLogger(KafkaTestEnvironmentImpl.class);
private static final String ZOOKEEPER_HOSTNAME = "zookeeper";
private static final int ZOOKEEPER_PORT = 2181;
private final Map<Integer, KafkaContainer> brokers = new HashMap<>();
private final Set<Integer> pausedBroker = new HashSet<>();
private @Nullable GenericContainer<?> zookeeper;
private String brokerConnectionString = "";
private Properties standardProps;
private FlinkKafkaProducer.Semantic producerSemantic = FlinkKafkaProducer.Semantic.EXACTLY_ONCE;
private int zkTimeout = 30000;
private Config config;
private static final int DELETE_TIMEOUT_SECONDS = 30;
public void setProducerSemantic(FlinkKafkaProducer.Semantic producerSemantic) {
this.producerSemantic = producerSemantic;
}
@Override
public void prepare(Config config) throws Exception {
if (config.isSecureMode()) {
config.setKafkaServersNumber(1);
zkTimeout = zkTimeout * 15;
}
this.config = config;
brokers.clear();
LOG.info("Starting KafkaServer");
startKafkaContainerCluster(config.getKafkaServersNumber());
LOG.info("KafkaServer started.");
standardProps = new Properties();
standardProps.setProperty("bootstrap.servers", brokerConnectionString);
standardProps.setProperty("group.id", "flink-tests");
standardProps.setProperty("enable.auto.commit", "false");
standardProps.setProperty("zookeeper.session.timeout.ms", String.valueOf(zkTimeout));
standardProps.setProperty("zookeeper.connection.timeout.ms", String.valueOf(zkTimeout));
standardProps.setProperty("auto.offset.reset", "earliest");
standardProps.setProperty(
"max.partition.fetch.bytes",
"256");
}
@Override
public void deleteTestTopic(String topic) {
LOG.info("Deleting topic {}", topic);
Properties props = getSecureProperties();
props.putAll(getStandardProperties());
String clientId = Long.toString(new Random().nextLong());
props.put("client.id", clientId);
AdminClient adminClient = AdminClient.create(props);
try {
tryDelete(adminClient, topic);
} catch (Exception e) {
e.printStackTrace();
fail(String.format("Delete test topic : %s failed, %s", topic, e.getMessage()));
} finally {
adminClient.close(Duration.ofMillis(5000L));
maybePrintDanglingThreadStacktrace(clientId);
}
}
@Override
public void createTestTopic(
String topic, int numberOfPartitions, int replicationFactor, Properties properties) {
LOG.info("Creating topic {}", topic);
try (AdminClient adminClient = AdminClient.create(getStandardProperties())) {
NewTopic topicObj = new NewTopic(topic, numberOfPartitions, (short) replicationFactor);
adminClient.createTopics(Collections.singleton(topicObj)).all().get();
try (KafkaConsumer<Void, Void> consumer = createTempConsumer()) {
CommonTestUtils.waitUtil(
() -> {
List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);
return partitionInfos != null
&& partitionInfos.size() == numberOfPartitions;
},
Duration.ofSeconds(30),
String.format("New topic \"%s\" is not ready within timeout", topicObj));
}
} catch (Exception e) {
e.printStackTrace();
fail("Create test topic : " + topic + " failed, " + e.getMessage());
}
}
@Override
public Properties getStandardProperties() {
return standardProps;
}
@Override
public Properties getSecureProperties() {
Properties prop = new Properties();
if (config.isSecureMode()) {
prop.put("security.inter.broker.protocol", "SASL_PLAINTEXT");
prop.put("security.protocol", "SASL_PLAINTEXT");
prop.put("sasl.kerberos.service.name", "kafka");
prop.setProperty("zookeeper.session.timeout.ms", String.valueOf(zkTimeout));
prop.setProperty("zookeeper.connection.timeout.ms", String.valueOf(zkTimeout));
prop.setProperty("metadata.fetch.timeout.ms", "120000");
}
return prop;
}
@Override
public String getBrokerConnectionString() {
return brokerConnectionString;
}
@Override
public String getVersion() {
return DockerImageVersions.KAFKA;
}
@Override
public <T> FlinkKafkaConsumerBase<T> getConsumer(
List<String> topics, KafkaDeserializationSchema<T> readSchema, Properties props) {
return new FlinkKafkaConsumer<T>(topics, readSchema, props);
}
@Override
public <T> KafkaSourceBuilder<T> getSourceBuilder(
List<String> topics, KafkaDeserializationSchema<T> schema, Properties props) {
return KafkaSource.<T>builder()
.setTopics(topics)
.setDeserializer(KafkaRecordDeserializationSchema.of(schema))
.setProperties(props);
}
@Override
@SuppressWarnings("unchecked")
public <K, V> Collection<ConsumerRecord<K, V>> getAllRecordsFromTopic(
Properties properties, String topic) {
return UnmodifiableList.decorate(KafkaUtil.drainAllRecordsFromTopic(topic, properties));
}
@Override
public <T> StreamSink<T> getProducerSink(
String topic,
SerializationSchema<T> serSchema,
Properties props,
FlinkKafkaPartitioner<T> partitioner) {
return new StreamSink<>(
new FlinkKafkaProducer<>(
topic,
serSchema,
props,
partitioner,
producerSemantic,
FlinkKafkaProducer.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE));
}
@Override
public <T> DataStreamSink<T> produceIntoKafka(
DataStream<T> stream,
String topic,
KeyedSerializationSchema<T> serSchema,
Properties props,
FlinkKafkaPartitioner<T> partitioner) {
return stream.addSink(
new FlinkKafkaProducer<T>(
topic,
serSchema,
props,
Optional.ofNullable(partitioner),
producerSemantic,
FlinkKafkaProducer.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE));
}
@Override
public <T> DataStreamSink<T> produceIntoKafka(
DataStream<T> stream,
String topic,
SerializationSchema<T> serSchema,
Properties props,
FlinkKafkaPartitioner<T> partitioner) {
return stream.addSink(
new FlinkKafkaProducer<T>(
topic,
serSchema,
props,
partitioner,
producerSemantic,
FlinkKafkaProducer.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE));
}
@Override
public <T> DataStreamSink<T> produceIntoKafka(
DataStream<T> stream,
String topic,
KafkaSerializationSchema<T> serSchema,
Properties props) {
return stream.addSink(new FlinkKafkaProducer<T>(topic, serSchema, props, producerSemantic));
}
@Override
public KafkaOffsetHandler createOffsetHandler() {
return new KafkaOffsetHandlerImpl();
}
@Override
public void restartBroker(int leaderId) throws Exception {
unpause(leaderId);
}
@Override
public void stopBroker(int brokerId) throws Exception {
pause(brokerId);
}
@Override
public int getLeaderToShutDown(String topic) throws Exception {
try (final AdminClient client = AdminClient.create(getStandardProperties())) {
TopicDescription result =
client.describeTopics(Collections.singleton(topic)).all().get().get(topic);
return result.partitions().get(0).leader().id();
}
}
@Override
public boolean isSecureRunSupported() {
return true;
}
@Override
public void shutdown() throws Exception {
brokers.values().forEach(GenericContainer::stop);
brokers.clear();
if (zookeeper != null) {
zookeeper.stop();
}
}
private class KafkaOffsetHandlerImpl implements KafkaOffsetHandler {
private final KafkaConsumer<byte[], byte[]> offsetClient;
public KafkaOffsetHandlerImpl() {
Properties props = new Properties();
props.putAll(standardProps);
props.setProperty(
"key.deserializer",
"org.apache.kafka.common.serialization.ByteArrayDeserializer");
props.setProperty(
"value.deserializer",
"org.apache.kafka.common.serialization.ByteArrayDeserializer");
offsetClient = new KafkaConsumer<>(props);
}
@Override
public Long getCommittedOffset(String topicName, int partition) {
OffsetAndMetadata committed =
offsetClient.committed(new TopicPartition(topicName, partition));
return (committed != null) ? committed.offset() : null;
}
@Override
public void setCommittedOffset(String topicName, int partition, long offset) {
Map<TopicPartition, OffsetAndMetadata> partitionAndOffset = new HashMap<>();
partitionAndOffset.put(
new TopicPartition(topicName, partition), new OffsetAndMetadata(offset));
offsetClient.commitSync(partitionAndOffset);
}
@Override
public void close() {
offsetClient.close();
}
}
private void startKafkaContainerCluster(int numBrokers) {
Network network = Network.newNetwork();
if (numBrokers > 1) {
zookeeper = createZookeeperContainer(network);
zookeeper.start();
LOG.info("Zookeeper container started");
}
for (int brokerID = 0; brokerID < numBrokers; brokerID++) {
KafkaContainer broker = createKafkaContainer(network, brokerID, zookeeper);
brokers.put(brokerID, broker);
}
new ArrayList<>(brokers.values()).parallelStream().forEach(GenericContainer::start);
LOG.info("{} brokers started", numBrokers);
brokerConnectionString =
brokers.values().stream()
.map(KafkaContainer::getBootstrapServers)
.map(server -> server.split(":
.collect(Collectors.joining(","));
}
private GenericContainer<?> createZookeeperContainer(Network network) {
return new GenericContainer<>(DockerImageName.parse(DockerImageVersions.ZOOKEEPER))
.withNetwork(network)
.withNetworkAliases(ZOOKEEPER_HOSTNAME)
.withEnv("ZOOKEEPER_CLIENT_PORT", String.valueOf(ZOOKEEPER_PORT));
}
private KafkaContainer createKafkaContainer(
Network network, int brokerID, @Nullable GenericContainer<?> zookeeper) {
String brokerName = String.format("Kafka-%d", brokerID);
KafkaContainer broker =
KafkaUtil.createKafkaContainer(DockerImageVersions.KAFKA, LOG, brokerName)
.withNetwork(network)
.withNetworkAliases(brokerName)
.withEnv("KAFKA_BROKER_ID", String.valueOf(brokerID))
.withEnv("KAFKA_MESSAGE_MAX_BYTES", String.valueOf(50 * 1024 * 1024))
.withEnv("KAFKA_REPLICA_FETCH_MAX_BYTES", String.valueOf(50 * 1024 * 1024))
.withEnv(
"KAFKA_TRANSACTION_MAX_TIMEOUT_MS",
Integer.toString(1000 * 60 * 60 * 2))
.withEnv("KAFKA_LOG_RETENTION_MS", "-1")
.withEnv("KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS", String.valueOf(zkTimeout))
.withEnv(
"KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS", String.valueOf(zkTimeout));
if (zookeeper != null) {
broker.dependsOn(zookeeper)
.withExternalZookeeper(
String.format("%s:%d", ZOOKEEPER_HOSTNAME, ZOOKEEPER_PORT));
} else {
broker.withEmbeddedZookeeper();
}
return broker;
}
private void pause(int brokerId) {
if (pausedBroker.contains(brokerId)) {
LOG.warn("Broker {} is already paused. Skipping pause operation", brokerId);
return;
}
DockerClientFactory.instance()
.client()
.pauseContainerCmd(brokers.get(brokerId).getContainerId())
.exec();
pausedBroker.add(brokerId);
LOG.info("Broker {} is paused", brokerId);
}
private void unpause(int brokerId) throws Exception {
if (!pausedBroker.contains(brokerId)) {
LOG.warn("Broker {} is already running. Skipping unpause operation", brokerId);
return;
}
DockerClientFactory.instance()
.client()
.unpauseContainerCmd(brokers.get(brokerId).getContainerId())
.exec();
try (AdminClient adminClient = AdminClient.create(getStandardProperties())) {
CommonTestUtils.waitUtil(
() -> {
try {
return adminClient.describeCluster().nodes().get().stream()
.anyMatch((node) -> node.id() == brokerId);
} catch (Exception e) {
return false;
}
},
Duration.ofSeconds(30),
String.format(
"The paused broker %d is not recovered within timeout", brokerId));
}
pausedBroker.remove(brokerId);
LOG.info("Broker {} is resumed", brokerId);
}
private KafkaConsumer<Void, Void> createTempConsumer() {
Properties consumerProps = new Properties();
consumerProps.putAll(getStandardProperties());
consumerProps.setProperty(
ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
VoidDeserializer.class.getCanonicalName());
consumerProps.setProperty(
ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
VoidDeserializer.class.getCanonicalName());
consumerProps.setProperty(ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG, "false");
return new KafkaConsumer<>(consumerProps);
}
} | class KafkaTestEnvironmentImpl extends KafkaTestEnvironment {
protected static final Logger LOG = LoggerFactory.getLogger(KafkaTestEnvironmentImpl.class);
private static final String ZOOKEEPER_HOSTNAME = "zookeeper";
private static final int ZOOKEEPER_PORT = 2181;
private final Map<Integer, KafkaContainer> brokers = new HashMap<>();
private final Set<Integer> pausedBroker = new HashSet<>();
private @Nullable GenericContainer<?> zookeeper;
private String brokerConnectionString = "";
private Properties standardProps;
private FlinkKafkaProducer.Semantic producerSemantic = FlinkKafkaProducer.Semantic.EXACTLY_ONCE;
private int zkTimeout = 30000;
private Config config;
private static final int REQUEST_TIMEOUT_SECONDS = 30;
public void setProducerSemantic(FlinkKafkaProducer.Semantic producerSemantic) {
this.producerSemantic = producerSemantic;
}
@Override
public void prepare(Config config) throws Exception {
if (config.isSecureMode()) {
config.setKafkaServersNumber(1);
zkTimeout = zkTimeout * 15;
}
this.config = config;
brokers.clear();
LOG.info("Starting KafkaServer");
startKafkaContainerCluster(config.getKafkaServersNumber());
LOG.info("KafkaServer started.");
standardProps = new Properties();
standardProps.setProperty("bootstrap.servers", brokerConnectionString);
standardProps.setProperty("group.id", "flink-tests");
standardProps.setProperty("enable.auto.commit", "false");
standardProps.setProperty("zookeeper.session.timeout.ms", String.valueOf(zkTimeout));
standardProps.setProperty("zookeeper.connection.timeout.ms", String.valueOf(zkTimeout));
standardProps.setProperty("auto.offset.reset", "earliest");
standardProps.setProperty(
"max.partition.fetch.bytes",
"256");
}
@Override
public void deleteTestTopic(String topic) {
LOG.info("Deleting topic {}", topic);
Properties props = getSecureProperties();
props.putAll(getStandardProperties());
String clientId = Long.toString(new Random().nextLong());
props.put("client.id", clientId);
AdminClient adminClient = AdminClient.create(props);
try {
tryDelete(adminClient, topic);
} catch (Exception e) {
e.printStackTrace();
fail(String.format("Delete test topic : %s failed, %s", topic, e.getMessage()));
} finally {
adminClient.close(Duration.ofMillis(5000L));
maybePrintDanglingThreadStacktrace(clientId);
}
}
@Override
public void createTestTopic(
String topic, int numberOfPartitions, int replicationFactor, Properties properties) {
LOG.info("Creating topic {}", topic);
try (AdminClient adminClient = AdminClient.create(getStandardProperties())) {
NewTopic topicObj = new NewTopic(topic, numberOfPartitions, (short) replicationFactor);
adminClient.createTopics(Collections.singleton(topicObj)).all().get();
CommonTestUtils.waitUtil(
() -> {
Map<String, TopicDescription> topicDescriptions;
try {
topicDescriptions =
adminClient
.describeTopics(Collections.singleton(topic))
.all()
.get(REQUEST_TIMEOUT_SECONDS, TimeUnit.SECONDS);
} catch (Exception e) {
LOG.warn("Exception caught when describing Kafka topics", e);
return false;
}
if (topicDescriptions == null || !topicDescriptions.containsKey(topic)) {
return false;
}
TopicDescription topicDescription = topicDescriptions.get(topic);
return topicDescription.partitions().size() == numberOfPartitions;
},
Duration.ofSeconds(30),
String.format("New topic \"%s\" is not ready within timeout", topicObj));
} catch (Exception e) {
e.printStackTrace();
fail("Create test topic : " + topic + " failed, " + e.getMessage());
}
}
@Override
public Properties getStandardProperties() {
return standardProps;
}
@Override
public Properties getSecureProperties() {
Properties prop = new Properties();
if (config.isSecureMode()) {
prop.put("security.inter.broker.protocol", "SASL_PLAINTEXT");
prop.put("security.protocol", "SASL_PLAINTEXT");
prop.put("sasl.kerberos.service.name", "kafka");
prop.setProperty("zookeeper.session.timeout.ms", String.valueOf(zkTimeout));
prop.setProperty("zookeeper.connection.timeout.ms", String.valueOf(zkTimeout));
prop.setProperty("metadata.fetch.timeout.ms", "120000");
}
return prop;
}
@Override
public String getBrokerConnectionString() {
return brokerConnectionString;
}
@Override
public String getVersion() {
return DockerImageVersions.KAFKA;
}
@Override
public <T> FlinkKafkaConsumerBase<T> getConsumer(
List<String> topics, KafkaDeserializationSchema<T> readSchema, Properties props) {
return new FlinkKafkaConsumer<T>(topics, readSchema, props);
}
@Override
public <T> KafkaSourceBuilder<T> getSourceBuilder(
List<String> topics, KafkaDeserializationSchema<T> schema, Properties props) {
return KafkaSource.<T>builder()
.setTopics(topics)
.setDeserializer(KafkaRecordDeserializationSchema.of(schema))
.setProperties(props);
}
@Override
@SuppressWarnings("unchecked")
public <K, V> Collection<ConsumerRecord<K, V>> getAllRecordsFromTopic(
Properties properties, String topic) {
return UnmodifiableList.decorate(KafkaUtil.drainAllRecordsFromTopic(topic, properties));
}
@Override
public <T> StreamSink<T> getProducerSink(
String topic,
SerializationSchema<T> serSchema,
Properties props,
FlinkKafkaPartitioner<T> partitioner) {
return new StreamSink<>(
new FlinkKafkaProducer<>(
topic,
serSchema,
props,
partitioner,
producerSemantic,
FlinkKafkaProducer.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE));
}
@Override
public <T> DataStreamSink<T> produceIntoKafka(
DataStream<T> stream,
String topic,
KeyedSerializationSchema<T> serSchema,
Properties props,
FlinkKafkaPartitioner<T> partitioner) {
return stream.addSink(
new FlinkKafkaProducer<T>(
topic,
serSchema,
props,
Optional.ofNullable(partitioner),
producerSemantic,
FlinkKafkaProducer.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE));
}
@Override
public <T> DataStreamSink<T> produceIntoKafka(
DataStream<T> stream,
String topic,
SerializationSchema<T> serSchema,
Properties props,
FlinkKafkaPartitioner<T> partitioner) {
return stream.addSink(
new FlinkKafkaProducer<T>(
topic,
serSchema,
props,
partitioner,
producerSemantic,
FlinkKafkaProducer.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE));
}
@Override
public <T> DataStreamSink<T> produceIntoKafka(
DataStream<T> stream,
String topic,
KafkaSerializationSchema<T> serSchema,
Properties props) {
return stream.addSink(new FlinkKafkaProducer<T>(topic, serSchema, props, producerSemantic));
}
@Override
public KafkaOffsetHandler createOffsetHandler() {
return new KafkaOffsetHandlerImpl();
}
@Override
public void restartBroker(int leaderId) throws Exception {
unpause(leaderId);
}
@Override
public void stopBroker(int brokerId) throws Exception {
pause(brokerId);
}
@Override
public int getLeaderToShutDown(String topic) throws Exception {
try (final AdminClient client = AdminClient.create(getStandardProperties())) {
TopicDescription result =
client.describeTopics(Collections.singleton(topic)).all().get().get(topic);
return result.partitions().get(0).leader().id();
}
}
@Override
public boolean isSecureRunSupported() {
return true;
}
@Override
public void shutdown() throws Exception {
brokers.values().forEach(GenericContainer::stop);
brokers.clear();
if (zookeeper != null) {
zookeeper.stop();
}
}
private class KafkaOffsetHandlerImpl implements KafkaOffsetHandler {
private final KafkaConsumer<byte[], byte[]> offsetClient;
public KafkaOffsetHandlerImpl() {
Properties props = new Properties();
props.putAll(standardProps);
props.setProperty(
"key.deserializer",
"org.apache.kafka.common.serialization.ByteArrayDeserializer");
props.setProperty(
"value.deserializer",
"org.apache.kafka.common.serialization.ByteArrayDeserializer");
offsetClient = new KafkaConsumer<>(props);
}
@Override
public Long getCommittedOffset(String topicName, int partition) {
OffsetAndMetadata committed =
offsetClient.committed(new TopicPartition(topicName, partition));
return (committed != null) ? committed.offset() : null;
}
@Override
public void setCommittedOffset(String topicName, int partition, long offset) {
Map<TopicPartition, OffsetAndMetadata> partitionAndOffset = new HashMap<>();
partitionAndOffset.put(
new TopicPartition(topicName, partition), new OffsetAndMetadata(offset));
offsetClient.commitSync(partitionAndOffset);
}
@Override
public void close() {
offsetClient.close();
}
}
private void startKafkaContainerCluster(int numBrokers) {
Network network = Network.newNetwork();
if (numBrokers > 1) {
zookeeper = createZookeeperContainer(network);
zookeeper.start();
LOG.info("Zookeeper container started");
}
for (int brokerID = 0; brokerID < numBrokers; brokerID++) {
KafkaContainer broker = createKafkaContainer(network, brokerID, zookeeper);
brokers.put(brokerID, broker);
}
new ArrayList<>(brokers.values()).parallelStream().forEach(GenericContainer::start);
LOG.info("{} brokers started", numBrokers);
brokerConnectionString =
brokers.values().stream()
.map(KafkaContainer::getBootstrapServers)
.map(server -> server.split(":
.collect(Collectors.joining(","));
}
private GenericContainer<?> createZookeeperContainer(Network network) {
return new GenericContainer<>(DockerImageName.parse(DockerImageVersions.ZOOKEEPER))
.withNetwork(network)
.withNetworkAliases(ZOOKEEPER_HOSTNAME)
.withEnv("ZOOKEEPER_CLIENT_PORT", String.valueOf(ZOOKEEPER_PORT));
}
private KafkaContainer createKafkaContainer(
Network network, int brokerID, @Nullable GenericContainer<?> zookeeper) {
String brokerName = String.format("Kafka-%d", brokerID);
KafkaContainer broker =
KafkaUtil.createKafkaContainer(DockerImageVersions.KAFKA, LOG, brokerName)
.withNetwork(network)
.withNetworkAliases(brokerName)
.withEnv("KAFKA_BROKER_ID", String.valueOf(brokerID))
.withEnv("KAFKA_MESSAGE_MAX_BYTES", String.valueOf(50 * 1024 * 1024))
.withEnv("KAFKA_REPLICA_FETCH_MAX_BYTES", String.valueOf(50 * 1024 * 1024))
.withEnv(
"KAFKA_TRANSACTION_MAX_TIMEOUT_MS",
Integer.toString(1000 * 60 * 60 * 2))
.withEnv("KAFKA_LOG_RETENTION_MS", "-1")
.withEnv("KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS", String.valueOf(zkTimeout))
.withEnv(
"KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS", String.valueOf(zkTimeout));
if (zookeeper != null) {
broker.dependsOn(zookeeper)
.withExternalZookeeper(
String.format("%s:%d", ZOOKEEPER_HOSTNAME, ZOOKEEPER_PORT));
} else {
broker.withEmbeddedZookeeper();
}
return broker;
}
private void pause(int brokerId) {
if (pausedBroker.contains(brokerId)) {
LOG.warn("Broker {} is already paused. Skipping pause operation", brokerId);
return;
}
DockerClientFactory.instance()
.client()
.pauseContainerCmd(brokers.get(brokerId).getContainerId())
.exec();
pausedBroker.add(brokerId);
LOG.info("Broker {} is paused", brokerId);
}
private void unpause(int brokerId) throws Exception {
if (!pausedBroker.contains(brokerId)) {
LOG.warn("Broker {} is already running. Skipping unpause operation", brokerId);
return;
}
DockerClientFactory.instance()
.client()
.unpauseContainerCmd(brokers.get(brokerId).getContainerId())
.exec();
try (AdminClient adminClient = AdminClient.create(getStandardProperties())) {
CommonTestUtils.waitUtil(
() -> {
try {
return adminClient.describeCluster().nodes().get().stream()
.anyMatch((node) -> node.id() == brokerId);
} catch (Exception e) {
return false;
}
},
Duration.ofSeconds(30),
String.format(
"The paused broker %d is not recovered within timeout", brokerId));
}
pausedBroker.remove(brokerId);
LOG.info("Broker {} is resumed", brokerId);
}
private KafkaConsumer<Void, Void> createTempConsumer() {
Properties consumerProps = new Properties();
consumerProps.putAll(getStandardProperties());
consumerProps.setProperty(
ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
VoidDeserializer.class.getCanonicalName());
consumerProps.setProperty(
ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
VoidDeserializer.class.getCanonicalName());
consumerProps.setProperty(ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG, "false");
return new KafkaConsumer<>(consumerProps);
}
} |
Hm... Here we aren't storing the references, just using them to check and then obtain the bean. So we don't need to close them anywhere, no? | private Object getBeanInstance(Object testInstance, Field field) {
Class<?> fieldClass = field.getType();
InstanceHandle<?> instance = Arc.container().instance(fieldClass, getQualifiers(field));
if (!instance.isAvailable()) {
throw new IllegalStateException("Invalid use of @MockBean - could not determine bean of type: "
+ fieldClass + ". Offending field is " + field.getName() + " of test class "
+ testInstance.getClass());
}
return instance.get();
} | InstanceHandle<?> instance = Arc.container().instance(fieldClass, getQualifiers(field)); | private Object getBeanInstance(Object testInstance, Field field) {
Class<?> fieldClass = field.getType();
InstanceHandle<?> instance = Arc.container().instance(fieldClass, getQualifiers(field));
if (!instance.isAvailable()) {
throw new IllegalStateException("Invalid use of @InjectMock - could not determine bean of type: "
+ fieldClass + ". Offending field is " + field.getName() + " of test class "
+ testInstance.getClass());
}
return instance.get();
} | class CreateMockitoMocksCallback implements QuarkusTestBeforeAllCallback {
@Override
public void beforeAll(Object testInstance) {
Class<?> current = testInstance.getClass();
while (current.getSuperclass() != null) {
for (Field field : current.getDeclaredFields()) {
MockBean mockBeanAnnotation = field.getAnnotation(MockBean.class);
if (mockBeanAnnotation != null) {
Object beanInstance = getBeanInstance(testInstance, field);
Object mock = createMockAndSetTestField(testInstance, field, beanInstance);
MockitoMocksTracker.track(testInstance, mock, beanInstance);
}
}
current = current.getSuperclass();
}
}
private Object createMockAndSetTestField(Object testInstance, Field field, Object beanInstance) {
Object mock = Mockito.mock(beanInstance.getClass());
field.setAccessible(true);
try {
field.set(testInstance, mock);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
return mock;
}
private Annotation[] getQualifiers(Field fieldToMock) {
List<Annotation> qualifiers = new ArrayList<>();
Annotation[] fieldAnnotations = fieldToMock.getDeclaredAnnotations();
for (Annotation fieldAnnotation : fieldAnnotations) {
for (Annotation annotationOfFieldAnnotation : fieldAnnotation.annotationType().getAnnotations()) {
if (annotationOfFieldAnnotation.annotationType().equals(Qualifier.class)) {
qualifiers.add(fieldAnnotation);
break;
}
}
}
return qualifiers.toArray(new Annotation[0]);
}
} | class CreateMockitoMocksCallback implements QuarkusTestBeforeAllCallback {
@Override
public void beforeAll(Object testInstance) {
Class<?> current = testInstance.getClass();
while (current.getSuperclass() != null) {
for (Field field : current.getDeclaredFields()) {
InjectMock injectMockAnnotation = field.getAnnotation(InjectMock.class);
if (injectMockAnnotation != null) {
Object beanInstance = getBeanInstance(testInstance, field);
Object mock = createMockAndSetTestField(testInstance, field, beanInstance);
MockitoMocksTracker.track(testInstance, mock, beanInstance);
}
}
current = current.getSuperclass();
}
}
private Object createMockAndSetTestField(Object testInstance, Field field, Object beanInstance) {
Object mock = Mockito.mock(beanInstance.getClass());
field.setAccessible(true);
try {
field.set(testInstance, mock);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
return mock;
}
private Annotation[] getQualifiers(Field fieldToMock) {
List<Annotation> qualifiers = new ArrayList<>();
Annotation[] fieldAnnotations = fieldToMock.getDeclaredAnnotations();
for (Annotation fieldAnnotation : fieldAnnotations) {
for (Annotation annotationOfFieldAnnotation : fieldAnnotation.annotationType().getAnnotations()) {
if (annotationOfFieldAnnotation.annotationType().equals(Qualifier.class)) {
qualifiers.add(fieldAnnotation);
break;
}
}
}
return qualifiers.toArray(new Annotation[0]);
}
} |
I guess we could make this method empty and remove `canceled` variable for this concrete unit test. | public void cancel() {
canceled = true;
} | canceled = true; | public void cancel() {
isRunning = false;
} | class MockSource implements SourceFunction<Tuple2<Long, Integer>>, ListCheckpointed<Serializable> {
private static final long serialVersionUID = 1;
private int maxElements;
private int checkpointDelay;
private int readDelay;
private volatile int count;
private volatile long lastCheckpointId = -1;
private Semaphore semaphore;
private volatile boolean isRunning = true;
public MockSource(int maxElements, int checkpointDelay, int readDelay) {
this.maxElements = maxElements;
this.checkpointDelay = checkpointDelay;
this.readDelay = readDelay;
this.count = 0;
semaphore = new Semaphore(1);
}
@Override
public void run(SourceContext<Tuple2<Long, Integer>> ctx) {
final Object lockObject = ctx.getCheckpointLock();
while (isRunning && count < maxElements) {
try {
Thread.sleep(readDelay);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
synchronized (lockObject) {
ctx.collect(new Tuple2<Long, Integer>(lastCheckpointId, count));
count++;
}
}
}
@Override
@Override
public List<Serializable> snapshotState(long checkpointId, long timestamp) throws Exception {
if (!semaphore.tryAcquire()) {
Assert.fail("Concurrent invocation of snapshotState.");
}
int startCount = count;
lastCheckpointId = checkpointId;
long sum = 0;
for (int i = 0; i < checkpointDelay; i++) {
sum += new Random().nextLong();
}
if (startCount != count) {
semaphore.release();
Assert.fail("Count is different at start end end of snapshot.");
}
semaphore.release();
return Collections.<Serializable>singletonList(sum);
}
@Override
public void restoreState(List<Serializable> state) throws Exception {
}
} | class MockSource implements SourceFunction<Tuple2<Long, Integer>>, ListCheckpointed<Serializable> {
private static final long serialVersionUID = 1;
private int maxElements;
private int checkpointDelay;
private int readDelay;
private volatile int count;
private volatile long lastCheckpointId = -1;
private Semaphore semaphore;
private volatile boolean isRunning = true;
public MockSource(int maxElements, int checkpointDelay, int readDelay) {
this.maxElements = maxElements;
this.checkpointDelay = checkpointDelay;
this.readDelay = readDelay;
this.count = 0;
semaphore = new Semaphore(1);
}
@Override
public void run(SourceContext<Tuple2<Long, Integer>> ctx) {
while (isRunning && count < maxElements) {
try {
Thread.sleep(readDelay);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
synchronized (ctx.getCheckpointLock()) {
ctx.collect(new Tuple2<>(lastCheckpointId, count));
count++;
}
}
}
@Override
@Override
public List<Serializable> snapshotState(long checkpointId, long timestamp) throws Exception {
if (!semaphore.tryAcquire()) {
Assert.fail("Concurrent invocation of snapshotState.");
}
int startCount = count;
lastCheckpointId = checkpointId;
long sum = 0;
for (int i = 0; i < checkpointDelay; i++) {
sum += new Random().nextLong();
}
if (startCount != count) {
semaphore.release();
Assert.fail("Count is different at start end end of snapshot.");
}
semaphore.release();
return Collections.singletonList(sum);
}
@Override
public void restoreState(List<Serializable> state) throws Exception {
}
} |
This should be in a `Teardown` method - we can fail at various points above and then the table won't be deleted. | public void testWriteWithBackoff() throws Exception {
String tableName = DatabaseTestHelper.getTestTableName("UT_WRITE_BACKOFF");
DatabaseTestHelper.createTable(dataSource, tableName);
Connection connection = dataSource.getConnection();
Statement lockStatement = connection.createStatement();
lockStatement.execute("ALTER TABLE " + tableName + " LOCKSIZE TABLE");
lockStatement.execute("LOCK TABLE " + tableName + " IN EXCLUSIVE MODE");
connection.setAutoCommit(false);
PreparedStatement insertStatement =
connection.prepareStatement("insert into " + tableName + " values(?, ?)");
insertStatement.setInt(1, 1);
insertStatement.setString(2, "TEST");
insertStatement.execute();
pipeline
.apply(Create.of(Collections.singletonList(KV.of(1, "TEST"))))
.apply(
JdbcIO.<KV<Integer, String>>write()
.withDataSourceConfiguration(
JdbcIO.DataSourceConfiguration.create(
"org.apache.derby.jdbc.ClientDriver",
"jdbc:derby:
.withStatement(String.format("insert into %s values(?, ?)", tableName))
.withPreparedStatementSetter(
(element, statement) -> {
statement.setInt(1, element.getKey());
statement.setString(2, element.getValue());
}));
Thread commitThread = new Thread(() -> {
try {
Thread.sleep(10000);
connection.commit();
} catch (Exception e) {
}
});
commitThread.start();
pipeline.run();
commitThread.join();
try (Connection readConnection = dataSource.getConnection()) {
try (Statement statement = readConnection.createStatement()) {
try (ResultSet resultSet = statement.executeQuery("select count(*) from "
+ tableName)) {
resultSet.next();
int count = resultSet.getInt(1);
Assert.assertEquals(2, count);
}
}
} finally {
DatabaseTestHelper.deleteTable(dataSource, tableName);
}
} | DatabaseTestHelper.deleteTable(dataSource, tableName); | public void testWriteWithBackoff() throws Exception {
String tableName = DatabaseTestHelper.getTestTableName("UT_WRITE_BACKOFF");
DatabaseTestHelper.createTable(dataSource, tableName);
Connection connection = dataSource.getConnection();
Statement lockStatement = connection.createStatement();
lockStatement.execute("ALTER TABLE " + tableName + " LOCKSIZE TABLE");
lockStatement.execute("LOCK TABLE " + tableName + " IN EXCLUSIVE MODE");
connection.setAutoCommit(false);
PreparedStatement insertStatement =
connection.prepareStatement("insert into " + tableName + " values(?, ?)");
insertStatement.setInt(1, 1);
insertStatement.setString(2, "TEST");
insertStatement.execute();
pipeline
.apply(Create.of(Collections.singletonList(KV.of(1, "TEST"))))
.apply(
JdbcIO.<KV<Integer, String>>write()
.withDataSourceConfiguration(
JdbcIO.DataSourceConfiguration.create(
"org.apache.derby.jdbc.ClientDriver",
"jdbc:derby:
.withStatement(String.format("insert into %s values(?, ?)", tableName))
.withRetryStrategy((JdbcIO.RetryStrategy) e -> {
return e.getSQLState().equals("XJ208");
})
.withPreparedStatementSetter(
(element, statement) -> {
statement.setInt(1, element.getKey());
statement.setString(2, element.getValue());
}));
Thread commitThread = new Thread(() -> {
try {
Thread.sleep(10000);
connection.commit();
} catch (Exception e) {
}
});
commitThread.start();
pipeline.run();
commitThread.join();
expectedLogs.verifyWarn("Deadlock detected, retrying");
try (Connection readConnection = dataSource.getConnection()) {
try (Statement statement = readConnection.createStatement()) {
try (ResultSet resultSet = statement.executeQuery("select count(*) from "
+ tableName)) {
resultSet.next();
int count = resultSet.getInt(1);
Assert.assertEquals(2, count);
}
}
}
} | class JdbcIOTest implements Serializable {
private static final Logger LOG = LoggerFactory.getLogger(JdbcIOTest.class);
public static final int EXPECTED_ROW_COUNT = 1000;
private static NetworkServerControl derbyServer;
private static ClientDataSource dataSource;
private static int port;
private static String readTableName;
@Rule
public final transient TestPipeline pipeline = TestPipeline.create();
@BeforeClass
public static void startDatabase() throws Exception {
ServerSocket socket = new ServerSocket(0);
port = socket.getLocalPort();
socket.close();
LOG.info("Starting Derby database on {}", port);
System.setProperty("derby.locks.waitTimeout", "2");
System.setProperty("derby.stream.error.file", "target/derby.log");
derbyServer = new NetworkServerControl(InetAddress.getByName("localhost"), port);
StringWriter out = new StringWriter();
derbyServer.start(new PrintWriter(out));
boolean started = false;
int count = 0;
while (!started && count < 30) {
if (out.toString().contains("started")) {
started = true;
} else {
count++;
Thread.sleep(500);
try {
derbyServer.ping();
started = true;
} catch (Throwable t) {
}
}
}
dataSource = new ClientDataSource();
dataSource.setCreateDatabase("create");
dataSource.setDatabaseName("target/beam");
dataSource.setServerName("localhost");
dataSource.setPortNumber(port);
readTableName = DatabaseTestHelper.getTestTableName("UT_READ");
DatabaseTestHelper.createTable(dataSource, readTableName);
addInitialData(dataSource, readTableName);
}
@AfterClass
public static void shutDownDatabase() throws Exception {
try {
DatabaseTestHelper.deleteTable(dataSource, readTableName);
} finally {
if (derbyServer != null) {
derbyServer.shutdown();
}
}
}
@Test
public void testDataSourceConfigurationDataSource() throws Exception {
JdbcIO.DataSourceConfiguration config = JdbcIO.DataSourceConfiguration.create(dataSource);
try (Connection conn = config.buildDatasource().getConnection()) {
assertTrue(conn.isValid(0));
}
}
@Test
public void testDataSourceConfigurationDriverAndUrl() throws Exception {
JdbcIO.DataSourceConfiguration config = JdbcIO.DataSourceConfiguration.create(
"org.apache.derby.jdbc.ClientDriver",
"jdbc:derby:
try (Connection conn = config.buildDatasource().getConnection()) {
assertTrue(conn.isValid(0));
}
}
@Test
public void testDataSourceConfigurationUsernameAndPassword() throws Exception {
JdbcIO.DataSourceConfiguration config = JdbcIO.DataSourceConfiguration.create(
"org.apache.derby.jdbc.ClientDriver",
"jdbc:derby:
.withUsername("sa")
.withPassword("sa");
try (Connection conn = config.buildDatasource().getConnection()) {
assertTrue(conn.isValid(0));
}
}
@Test
public void testDataSourceConfigurationNullPassword() throws Exception {
JdbcIO.DataSourceConfiguration config = JdbcIO.DataSourceConfiguration.create(
"org.apache.derby.jdbc.ClientDriver",
"jdbc:derby:
.withUsername("sa")
.withPassword(null);
try (Connection conn = config.buildDatasource().getConnection()) {
assertTrue(conn.isValid(0));
}
}
@Test
public void testDataSourceConfigurationNullUsernameAndPassword() throws Exception {
JdbcIO.DataSourceConfiguration config = JdbcIO.DataSourceConfiguration.create(
"org.apache.derby.jdbc.ClientDriver",
"jdbc:derby:
.withUsername(null)
.withPassword(null);
try (Connection conn = config.buildDatasource().getConnection()) {
assertTrue(conn.isValid(0));
}
}
/**
* Create test data that is consistent with that generated by TestRow.
*/
private static void addInitialData(DataSource dataSource, String tableName)
throws SQLException {
try (Connection connection = dataSource.getConnection()) {
connection.setAutoCommit(false);
try (PreparedStatement preparedStatement =
connection.prepareStatement(
String.format("insert into %s values (?,?)", tableName))) {
for (int i = 0; i < EXPECTED_ROW_COUNT; i++) {
preparedStatement.clearParameters();
preparedStatement.setInt(1, i);
preparedStatement.setString(2, TestRow.getNameForSeed(i));
preparedStatement.executeUpdate();
}
}
connection.commit();
}
}
@Test
public void testRead() throws Exception {
PCollection<TestRow> rows = pipeline.apply(
JdbcIO.<TestRow>read()
.withDataSourceConfiguration(JdbcIO.DataSourceConfiguration.create(dataSource))
.withQuery("select name,id from " + readTableName)
.withRowMapper(new JdbcTestHelper.CreateTestRowOfNameAndId())
.withCoder(SerializableCoder.of(TestRow.class)));
PAssert.thatSingleton(rows.apply("Count All", Count.globally()))
.isEqualTo((long) EXPECTED_ROW_COUNT);
Iterable<TestRow> expectedValues = TestRow.getExpectedValues(0, EXPECTED_ROW_COUNT);
PAssert.that(rows).containsInAnyOrder(expectedValues);
pipeline.run();
}
@Test
public void testReadWithSingleStringParameter() throws Exception {
PCollection<TestRow> rows =
pipeline.apply(
JdbcIO.<TestRow>read()
.withDataSourceConfiguration(JdbcIO.DataSourceConfiguration.create(dataSource))
.withQuery(String.format("select name,id from %s where name = ?", readTableName))
.withStatementPreparator(
(preparedStatement) -> preparedStatement.setString(1, getNameForSeed(1)))
.withRowMapper(new JdbcTestHelper.CreateTestRowOfNameAndId())
.withCoder(SerializableCoder.of(TestRow.class)));
PAssert.thatSingleton(rows.apply("Count All", Count.globally())).isEqualTo(1L);
Iterable<TestRow> expectedValues = Collections.singletonList(TestRow.fromSeed(1));
PAssert.that(rows).containsInAnyOrder(expectedValues);
pipeline.run();
}
@Test
public void testWrite() throws Exception {
final long rowsToAdd = 1000L;
String tableName = DatabaseTestHelper.getTestTableName("UT_WRITE");
DatabaseTestHelper.createTable(dataSource, tableName);
try {
ArrayList<KV<Integer, String>> data = new ArrayList<>();
for (int i = 0; i < rowsToAdd; i++) {
KV<Integer, String> kv = KV.of(i, "Test");
data.add(kv);
}
pipeline
.apply(Create.of(data))
.apply(
JdbcIO.<KV<Integer, String>>write()
.withDataSourceConfiguration(
JdbcIO.DataSourceConfiguration.create(
"org.apache.derby.jdbc.ClientDriver",
"jdbc:derby:
.withStatement(String.format("insert into %s values(?, ?)", tableName))
.withBatchSize(10L)
.withPreparedStatementSetter(
(element, statement) -> {
statement.setInt(1, element.getKey());
statement.setString(2, element.getValue());
}));
pipeline.run();
try (Connection connection = dataSource.getConnection()) {
try (Statement statement = connection.createStatement()) {
try (ResultSet resultSet = statement.executeQuery("select count(*) from "
+ tableName)) {
resultSet.next();
int count = resultSet.getInt(1);
Assert.assertEquals(EXPECTED_ROW_COUNT, count);
}
}
}
} finally {
DatabaseTestHelper.deleteTable(dataSource, tableName);
}
}
@Test
@Test
public void testWriteWithEmptyPCollection() throws Exception {
pipeline
.apply(Create.empty(KvCoder.of(VarIntCoder.of(), StringUtf8Coder.of())))
.apply(
JdbcIO.<KV<Integer, String>>write()
.withDataSourceConfiguration(
JdbcIO.DataSourceConfiguration.create(
"org.apache.derby.jdbc.ClientDriver",
"jdbc:derby:
.withStatement("insert into BEAM values(?, ?)")
.withPreparedStatementSetter(
(element, statement) -> {
statement.setInt(1, element.getKey());
statement.setString(2, element.getValue());
}));
pipeline.run();
}
} | class JdbcIOTest implements Serializable {
private static final Logger LOG = LoggerFactory.getLogger(JdbcIOTest.class);
public static final int EXPECTED_ROW_COUNT = 1000;
public static final String BACKOFF_TABLE = "UT_WRITE_BACKOFF";
private static NetworkServerControl derbyServer;
private static ClientDataSource dataSource;
private static int port;
private static String readTableName;
@Rule
public final transient TestPipeline pipeline = TestPipeline.create();
@Rule
public final transient ExpectedLogs expectedLogs = ExpectedLogs.none(JdbcIO.class);
@BeforeClass
public static void startDatabase() throws Exception {
ServerSocket socket = new ServerSocket(0);
port = socket.getLocalPort();
socket.close();
LOG.info("Starting Derby database on {}", port);
System.setProperty("derby.locks.waitTimeout", "2");
System.setProperty("derby.stream.error.file", "target/derby.log");
derbyServer = new NetworkServerControl(InetAddress.getByName("localhost"), port);
StringWriter out = new StringWriter();
derbyServer.start(new PrintWriter(out));
boolean started = false;
int count = 0;
while (!started && count < 30) {
if (out.toString().contains("started")) {
started = true;
} else {
count++;
Thread.sleep(500);
try {
derbyServer.ping();
started = true;
} catch (Throwable t) {
}
}
}
dataSource = new ClientDataSource();
dataSource.setCreateDatabase("create");
dataSource.setDatabaseName("target/beam");
dataSource.setServerName("localhost");
dataSource.setPortNumber(port);
readTableName = DatabaseTestHelper.getTestTableName("UT_READ");
DatabaseTestHelper.createTable(dataSource, readTableName);
addInitialData(dataSource, readTableName);
}
@AfterClass
public static void shutDownDatabase() throws Exception {
try {
DatabaseTestHelper.deleteTable(dataSource, readTableName);
} finally {
if (derbyServer != null) {
derbyServer.shutdown();
}
}
}
@Test
public void testDataSourceConfigurationDataSource() throws Exception {
JdbcIO.DataSourceConfiguration config = JdbcIO.DataSourceConfiguration.create(dataSource);
try (Connection conn = config.buildDatasource().getConnection()) {
assertTrue(conn.isValid(0));
}
}
@Test
public void testDataSourceConfigurationDriverAndUrl() throws Exception {
JdbcIO.DataSourceConfiguration config = JdbcIO.DataSourceConfiguration.create(
"org.apache.derby.jdbc.ClientDriver",
"jdbc:derby:
try (Connection conn = config.buildDatasource().getConnection()) {
assertTrue(conn.isValid(0));
}
}
@Test
public void testDataSourceConfigurationUsernameAndPassword() throws Exception {
JdbcIO.DataSourceConfiguration config = JdbcIO.DataSourceConfiguration.create(
"org.apache.derby.jdbc.ClientDriver",
"jdbc:derby:
.withUsername("sa")
.withPassword("sa");
try (Connection conn = config.buildDatasource().getConnection()) {
assertTrue(conn.isValid(0));
}
}
@Test
public void testDataSourceConfigurationNullPassword() throws Exception {
JdbcIO.DataSourceConfiguration config = JdbcIO.DataSourceConfiguration.create(
"org.apache.derby.jdbc.ClientDriver",
"jdbc:derby:
.withUsername("sa")
.withPassword(null);
try (Connection conn = config.buildDatasource().getConnection()) {
assertTrue(conn.isValid(0));
}
}
@Test
public void testDataSourceConfigurationNullUsernameAndPassword() throws Exception {
JdbcIO.DataSourceConfiguration config = JdbcIO.DataSourceConfiguration.create(
"org.apache.derby.jdbc.ClientDriver",
"jdbc:derby:
.withUsername(null)
.withPassword(null);
try (Connection conn = config.buildDatasource().getConnection()) {
assertTrue(conn.isValid(0));
}
}
/**
* Create test data that is consistent with that generated by TestRow.
*/
private static void addInitialData(DataSource dataSource, String tableName)
throws SQLException {
try (Connection connection = dataSource.getConnection()) {
connection.setAutoCommit(false);
try (PreparedStatement preparedStatement =
connection.prepareStatement(
String.format("insert into %s values (?,?)", tableName))) {
for (int i = 0; i < EXPECTED_ROW_COUNT; i++) {
preparedStatement.clearParameters();
preparedStatement.setInt(1, i);
preparedStatement.setString(2, TestRow.getNameForSeed(i));
preparedStatement.executeUpdate();
}
}
connection.commit();
}
}
@Test
public void testRead() throws Exception {
PCollection<TestRow> rows = pipeline.apply(
JdbcIO.<TestRow>read()
.withDataSourceConfiguration(JdbcIO.DataSourceConfiguration.create(dataSource))
.withQuery("select name,id from " + readTableName)
.withRowMapper(new JdbcTestHelper.CreateTestRowOfNameAndId())
.withCoder(SerializableCoder.of(TestRow.class)));
PAssert.thatSingleton(rows.apply("Count All", Count.globally()))
.isEqualTo((long) EXPECTED_ROW_COUNT);
Iterable<TestRow> expectedValues = TestRow.getExpectedValues(0, EXPECTED_ROW_COUNT);
PAssert.that(rows).containsInAnyOrder(expectedValues);
pipeline.run();
}
@Test
public void testReadWithSingleStringParameter() throws Exception {
PCollection<TestRow> rows =
pipeline.apply(
JdbcIO.<TestRow>read()
.withDataSourceConfiguration(JdbcIO.DataSourceConfiguration.create(dataSource))
.withQuery(String.format("select name,id from %s where name = ?", readTableName))
.withStatementPreparator(
(preparedStatement) -> preparedStatement.setString(1, getNameForSeed(1)))
.withRowMapper(new JdbcTestHelper.CreateTestRowOfNameAndId())
.withCoder(SerializableCoder.of(TestRow.class)));
PAssert.thatSingleton(rows.apply("Count All", Count.globally())).isEqualTo(1L);
Iterable<TestRow> expectedValues = Collections.singletonList(TestRow.fromSeed(1));
PAssert.that(rows).containsInAnyOrder(expectedValues);
pipeline.run();
}
@Test
public void testWrite() throws Exception {
final long rowsToAdd = 1000L;
String tableName = DatabaseTestHelper.getTestTableName("UT_WRITE");
DatabaseTestHelper.createTable(dataSource, tableName);
try {
ArrayList<KV<Integer, String>> data = new ArrayList<>();
for (int i = 0; i < rowsToAdd; i++) {
KV<Integer, String> kv = KV.of(i, "Test");
data.add(kv);
}
pipeline
.apply(Create.of(data))
.apply(
JdbcIO.<KV<Integer, String>>write()
.withDataSourceConfiguration(
JdbcIO.DataSourceConfiguration.create(
"org.apache.derby.jdbc.ClientDriver",
"jdbc:derby:
.withStatement(String.format("insert into %s values(?, ?)", tableName))
.withBatchSize(10L)
.withPreparedStatementSetter(
(element, statement) -> {
statement.setInt(1, element.getKey());
statement.setString(2, element.getValue());
}));
pipeline.run();
try (Connection connection = dataSource.getConnection()) {
try (Statement statement = connection.createStatement()) {
try (ResultSet resultSet = statement.executeQuery("select count(*) from "
+ tableName)) {
resultSet.next();
int count = resultSet.getInt(1);
Assert.assertEquals(EXPECTED_ROW_COUNT, count);
}
}
}
} finally {
DatabaseTestHelper.deleteTable(dataSource, tableName);
}
}
@Test
@After
public void tearDown() {
try {
DatabaseTestHelper.deleteTable(dataSource, BACKOFF_TABLE);
} catch (Exception e) {
}
}
@Test
public void testWriteWithEmptyPCollection() throws Exception {
pipeline
.apply(Create.empty(KvCoder.of(VarIntCoder.of(), StringUtf8Coder.of())))
.apply(
JdbcIO.<KV<Integer, String>>write()
.withDataSourceConfiguration(
JdbcIO.DataSourceConfiguration.create(
"org.apache.derby.jdbc.ClientDriver",
"jdbc:derby:
.withStatement("insert into BEAM values(?, ?)")
.withPreparedStatementSetter(
(element, statement) -> {
statement.setInt(1, element.getKey());
statement.setString(2, element.getValue());
}));
pipeline.run();
}
} |
Shouldn't this be done irrespective of whether a user-specified module init exists? The following two scenarios would produce two different sets of errors atm? Case I ```ballerina int i; function __init() { } public function main() { int j = i; } ``` Case II ```ballerina int i; public function main() { int j = i; } ``` | public void visit(BLangPackage pkgNode) {
if (pkgNode.completedPhases.contains(CompilerPhase.DATAFLOW_ANALYZE)) {
return;
}
List<TopLevelNode> sortedListOfNodes = new ArrayList<>(pkgNode.globalVars);
pkgNode.topLevelNodes.forEach(topLevelNode -> {
if (!sortedListOfNodes.contains(topLevelNode)) {
sortedListOfNodes.add(topLevelNode);
}
});
sortedListOfNodes.forEach(topLevelNode -> {
if (isModuleInitFunction((BLangNode) topLevelNode)) {
analyzeModuleInitFunc((BLangFunction) topLevelNode);
checkForUninitializedGlobalVar(pkgNode.globalVars);
} else {
analyzeNode((BLangNode) topLevelNode, env);
}
});
pkgNode.getTestablePkgs().forEach(testablePackage -> visit((BLangPackage) testablePackage));
this.globalVariableRefAnalyzer.analyzeAndReOrder(pkgNode, this.globalNodeDependsOn);
this.globalVariableRefAnalyzer.populateFunctionDependencies(this.functionToDependency);
checkUnusedImports(pkgNode.imports);
pkgNode.completedPhases.add(CompilerPhase.DATAFLOW_ANALYZE);
} | checkForUninitializedGlobalVar(pkgNode.globalVars); | public void visit(BLangPackage pkgNode) {
if (pkgNode.completedPhases.contains(CompilerPhase.DATAFLOW_ANALYZE)) {
return;
}
List<TopLevelNode> sortedListOfNodes = new ArrayList<>(pkgNode.globalVars);
addModuleInitToSortedNodeList(pkgNode, sortedListOfNodes);
addNodesToSortedNodeList(pkgNode, sortedListOfNodes);
for (TopLevelNode topLevelNode : sortedListOfNodes) {
if (isModuleInitFunction((BLangNode) topLevelNode)) {
analyzeModuleInitFunc((BLangFunction) topLevelNode);
} else {
analyzeNode((BLangNode) topLevelNode, env);
}
}
checkForUninitializedGlobalVars(pkgNode.globalVars);
pkgNode.getTestablePkgs().forEach(testablePackage -> visit((BLangPackage) testablePackage));
this.globalVariableRefAnalyzer.analyzeAndReOrder(pkgNode, this.globalNodeDependsOn);
this.globalVariableRefAnalyzer.populateFunctionDependencies(this.functionToDependency);
checkUnusedImports(pkgNode.imports);
pkgNode.completedPhases.add(CompilerPhase.DATAFLOW_ANALYZE);
} | class DataflowAnalyzer extends BLangNodeVisitor {
private final SymbolResolver symResolver;
private final Names names;
private SymbolEnv env;
private SymbolTable symTable;
private BLangDiagnosticLogHelper dlog;
private Map<BSymbol, InitStatus> uninitializedVars;
private Map<BSymbol, Set<BSymbol>> globalNodeDependsOn;
private Map<BSymbol, Set<BSymbol>> functionToDependency;
private boolean flowTerminated = false;
private static final CompilerContext.Key<DataflowAnalyzer> DATAFLOW_ANALYZER_KEY = new CompilerContext.Key<>();
private Deque<BSymbol> currDependentSymbol;
private final GlobalVariableRefAnalyzer globalVariableRefAnalyzer;
private DataflowAnalyzer(CompilerContext context) {
context.put(DATAFLOW_ANALYZER_KEY, this);
this.symTable = SymbolTable.getInstance(context);
this.dlog = BLangDiagnosticLogHelper.getInstance(context);
this.symResolver = SymbolResolver.getInstance(context);
this.names = Names.getInstance(context);
this.currDependentSymbol = new ArrayDeque<>();
this.globalVariableRefAnalyzer = GlobalVariableRefAnalyzer.getInstance(context);
}
public static DataflowAnalyzer getInstance(CompilerContext context) {
DataflowAnalyzer dataflowAnalyzer = context.get(DATAFLOW_ANALYZER_KEY);
if (dataflowAnalyzer == null) {
dataflowAnalyzer = new DataflowAnalyzer(context);
}
return dataflowAnalyzer;
}
/**
* Perform data-flow analysis on a package.
*
* @param pkgNode Package to perform data-flow analysis.
* @return Data-flow analyzed package
*/
public BLangPackage analyze(BLangPackage pkgNode) {
this.uninitializedVars = new HashMap<>();
this.globalNodeDependsOn = new LinkedHashMap<>();
this.functionToDependency = new HashMap<>();
SymbolEnv pkgEnv = this.symTable.pkgEnvMap.get(pkgNode.symbol);
analyzeNode(pkgNode, pkgEnv);
return pkgNode;
}
@Override
private boolean isModuleInitFunction(BLangNode node) {
if (node.getKind() == NodeKind.FUNCTION &&
Names.USER_DEFINED_INIT_SUFFIX.value.equals(((BLangFunction) node).name.value)) {
return true;
}
return false;
}
private void analyzeModuleInitFunc(BLangFunction funcNode) {
this.currDependentSymbol.push(funcNode.symbol);
SymbolEnv funcEnv = SymbolEnv.createFunctionEnv(funcNode, funcNode.symbol.scope, env);
analyzeNode(funcNode.body, funcEnv);
this.currDependentSymbol.pop();
}
private void checkForUninitializedGlobalVar(List<BLangSimpleVariable> globalVars) {
for (BLangSimpleVariable globalVar : globalVars) {
if (this.uninitializedVars.containsKey(globalVar.symbol)) {
this.dlog.error(globalVar.pos, DiagnosticCode.UNINITIALIZED_MODULE_VARIABLE, globalVar.name);
}
}
}
@Override
public void visit(BLangFunction funcNode) {
this.currDependentSymbol.push(funcNode.symbol);
SymbolEnv funcEnv = SymbolEnv.createFunctionEnv(funcNode, funcNode.symbol.scope, env);
funcNode.annAttachments.forEach(bLangAnnotationAttachment -> analyzeNode(bLangAnnotationAttachment.expr, env));
funcNode.requiredParams.forEach(param -> analyzeNode(param, funcEnv));
analyzeNode(funcNode.restParam, funcEnv);
analyzeBranch(funcNode.body, funcEnv);
this.currDependentSymbol.pop();
}
@Override
public void visit(BLangBlockFunctionBody body) {
SymbolEnv bodyEnv = SymbolEnv.createFuncBodyEnv(body, env);
for (BLangStatement statement : body.stmts) {
analyzeNode(statement, bodyEnv);
}
}
@Override
public void visit(BLangExprFunctionBody body) {
SymbolEnv bodyEnv = SymbolEnv.createFuncBodyEnv(body, env);
analyzeNode(body.expr, bodyEnv);
}
@Override
public void visit(BLangExternalFunctionBody body) {
}
@Override
public void visit(BLangBlockStmt blockNode) {
SymbolEnv blockEnv = SymbolEnv.createBlockEnv(blockNode, env);
blockNode.stmts.forEach(statement -> analyzeNode(statement, blockEnv));
}
@Override
public void visit(BLangLetExpression letExpression) {
for (BLangLetVariable letVarDeclaration : letExpression.letVarDeclarations) {
analyzeNode((BLangNode) letVarDeclaration.definitionNode, letExpression.env);
}
analyzeNode(letExpression.expr, letExpression.env);
}
@Override
public void visit(BLangCompilationUnit compUnit) {
}
@Override
public void visit(BLangXMLNS xmlnsNode) {
}
@Override
public void visit(BLangService service) {
this.currDependentSymbol.push(service.serviceTypeDefinition.symbol);
for (BLangExpression attachedExpr : service.attachedExprs) {
analyzeNode(attachedExpr, env);
}
service.annAttachments.forEach(bLangAnnotationAttachment -> analyzeNode(bLangAnnotationAttachment.expr, env));
service.resourceFunctions.forEach(function -> analyzeNode(function, env));
this.currDependentSymbol.pop();
}
@Override
public void visit(BLangResource resource) {
}
@Override
public void visit(BLangTypeDefinition typeDefinition) {
analyzeNode(typeDefinition.typeNode, env);
}
@Override
public void visit(BLangSimpleVariableDef varDefNode) {
BLangVariable var = varDefNode.var;
if (var.expr == null) {
addUninitializedVar(var);
return;
}
analyzeNode(var, env);
}
@Override
public void visit(BLangSimpleVariable variable) {
analyzeNode(variable.typeNode, env);
if (variable.symbol == null) {
if (variable.expr != null) {
analyzeNode(variable.expr, env);
}
return;
}
this.currDependentSymbol.push(variable.symbol);
try {
if (variable.expr != null) {
analyzeNode(variable.expr, env);
this.uninitializedVars.remove(variable.symbol);
return;
}
BSymbol owner = variable.symbol.owner;
if (owner.tag != SymTag.PACKAGE && owner.tag != SymTag.OBJECT) {
return;
}
addUninitializedVar(variable);
} finally {
this.currDependentSymbol.pop();
}
}
@Override
public void visit(BLangWorker worker) {
SymbolEnv workerEnv = SymbolEnv.createWorkerEnv(worker, this.env);
analyzeBranch(worker.body, workerEnv);
}
@Override
public void visit(BLangEndpoint endpoint) {
analyzeNode(endpoint.configurationExpr, env);
}
@Override
public void visit(BLangAssignment assignment) {
analyzeNode(assignment.expr, env);
checkAssignment(assignment.varRef);
}
@Override
public void visit(BLangCompoundAssignment compoundAssignNode) {
analyzeNode(compoundAssignNode.expr, env);
analyzeNode(compoundAssignNode.varRef, env);
this.uninitializedVars.remove(compoundAssignNode.varRef.symbol);
}
@Override
public void visit(BLangBreak breakNode) {
terminateFlow();
}
@Override
public void visit(BLangReturn returnNode) {
analyzeNode(returnNode.expr, env);
terminateFlow();
}
@Override
public void visit(BLangThrow throwNode) {
analyzeNode(throwNode.expr, env);
terminateFlow();
}
@Override
public void visit(BLangXMLNSStatement xmlnsStmt) {
analyzeNode(xmlnsStmt.xmlnsDecl, env);
}
@Override
public void visit(BLangIf ifNode) {
analyzeNode(ifNode.expr, env);
BranchResult ifResult = analyzeBranch(ifNode.body, env);
BranchResult elseResult = analyzeBranch(ifNode.elseStmt, env);
if (ifResult.flowTerminated) {
this.uninitializedVars = elseResult.uninitializedVars;
return;
}
if (elseResult.flowTerminated) {
this.uninitializedVars = ifResult.uninitializedVars;
return;
}
this.uninitializedVars = mergeUninitializedVars(ifResult.uninitializedVars, elseResult.uninitializedVars);
}
@Override
public void visit(BLangMatch match) {
analyzeNode(match.expr, env);
Map<BSymbol, InitStatus> uninitVars = new HashMap<>();
BranchResult lastPatternResult = null;
for (BLangMatch.BLangMatchBindingPatternClause patternClause : match.patternClauses) {
if (patternClause.isLastPattern) {
lastPatternResult = analyzeBranch(patternClause, env);
} else {
BranchResult result = analyzeBranch(patternClause, env);
if (result.flowTerminated) {
continue;
}
uninitVars = mergeUninitializedVars(uninitVars, result.uninitializedVars);
}
}
if (lastPatternResult != null) {
uninitVars = mergeUninitializedVars(uninitVars, lastPatternResult.uninitializedVars);
this.uninitializedVars = uninitVars;
return;
}
uninitVars = mergeUninitializedVars(new HashMap<>(), this.uninitializedVars);
this.uninitializedVars = uninitVars;
}
@Override
public void visit(BLangForeach foreach) {
analyzeNode(foreach.collection, env);
analyzeNode(foreach.body, env);
}
@Override
public void visit(BLangQueryAction queryAction) {
for (FromClauseNode fromClauseNode : queryAction.fromClauseList) {
analyzeNode((BLangFromClause) fromClauseNode, env);
}
for (WhereClauseNode whereClauseNode : queryAction.whereClauseList) {
analyzeNode((BLangWhereClause) whereClauseNode, env);
}
analyzeNode(queryAction.doClause, env);
}
@Override
public void visit(BLangWhile whileNode) {
Map<BSymbol, InitStatus> prevUninitializedVars = this.uninitializedVars;
analyzeNode(whileNode.expr, env);
analyzeNode(whileNode.body, env);
for (BSymbol symbol : prevUninitializedVars.keySet()) {
if (!this.uninitializedVars.containsKey(symbol)) {
this.uninitializedVars.put(symbol, InitStatus.PARTIAL_INIT);
}
}
}
@Override
public void visit(BLangLock lockNode) {
analyzeNode(lockNode.body, this.env);
}
@Override
public void visit(BLangTransaction transactionNode) {
analyzeNode(transactionNode.transactionBody, env);
analyzeNode(transactionNode.onRetryBody, env);
analyzeNode(transactionNode.committedBody, env);
analyzeNode(transactionNode.abortedBody, env);
Name transactionPkgName = names.fromString(Names.DOT.value + Names.TRANSACTION_PACKAGE.value);
Name compUnitName = names.fromString(transactionNode.pos.getSource().getCompilationUnitName());
this.symResolver.resolvePrefixSymbol(env, transactionPkgName, compUnitName);
}
@Override
public void visit(BLangTryCatchFinally tryNode) {
}
@Override
public void visit(BLangTupleDestructure stmt) {
analyzeNode(stmt.expr, env);
checkAssignment(stmt.varRef);
}
@Override
public void visit(BLangForkJoin forkJoin) {
/* ignore */
}
@Override
public void visit(BLangWorkerSend workerSendNode) {
analyzeNode(workerSendNode.expr, env);
}
@Override
public void visit(BLangWorkerSyncSendExpr syncSendExpr) {
analyzeNode(syncSendExpr.expr, env);
}
@Override
public void visit(BLangWorkerReceive workerReceiveNode) {
}
@Override
public void visit(BLangLiteral literalExpr) {
}
@Override
public void visit(BLangListConstructorExpr listConstructorExpr) {
listConstructorExpr.exprs.forEach(expr -> analyzeNode(expr, env));
}
@Override
public void visit(BLangRecordLiteral recordLiteral) {
for (RecordLiteralNode.RecordField field : recordLiteral.fields) {
if (field.isKeyValueField()) {
BLangRecordLiteral.BLangRecordKeyValueField keyValuePair =
(BLangRecordLiteral.BLangRecordKeyValueField) field;
if (keyValuePair.key.computedKey) {
analyzeNode(keyValuePair.key.expr, env);
}
analyzeNode(keyValuePair.valueExpr, env);
} else if (field.getKind() == NodeKind.SIMPLE_VARIABLE_REF) {
analyzeNode((BLangRecordLiteral.BLangRecordVarNameField) field, env);
} else {
analyzeNode(((BLangRecordLiteral.BLangRecordSpreadOperatorField) field).expr, env);
}
}
}
@Override
public void visit(BLangSimpleVarRef varRefExpr) {
checkVarRef(varRefExpr.symbol, varRefExpr.pos);
}
@Override
public void visit(BLangFieldBasedAccess fieldAccessExpr) {
if (!fieldAccessExpr.lhsVar && isObjectMemberAccessWithSelf(fieldAccessExpr)) {
checkVarRef(fieldAccessExpr.symbol, fieldAccessExpr.pos);
}
analyzeNode(fieldAccessExpr.expr, env);
}
@Override
public void visit(BLangIndexBasedAccess indexAccessExpr) {
analyzeNode(indexAccessExpr.expr, env);
analyzeNode(indexAccessExpr.indexExpr, env);
}
@Override
public void visit(BLangXMLElementAccess xmlElementAccess) {
analyzeNode(xmlElementAccess.expr, env);
}
@Override
public void visit(BLangXMLNavigationAccess xmlNavigation) {
analyzeNode(xmlNavigation.expr, env);
if (xmlNavigation.childIndex == null) {
analyzeNode(xmlNavigation.childIndex, env);
}
}
@Override
public void visit(BLangInvocation invocationExpr) {
analyzeNode(invocationExpr.expr, env);
if (!isFieldsInitializedForSelfArgument(invocationExpr)) {
return;
}
if (!isFieldsInitializedForSelfInvocation(invocationExpr.requiredArgs, invocationExpr.pos)) {
return;
}
if (!isFieldsInitializedForSelfInvocation(invocationExpr.restArgs, invocationExpr.pos)) {
return;
}
invocationExpr.requiredArgs.forEach(expr -> analyzeNode(expr, env));
invocationExpr.restArgs.forEach(expr -> analyzeNode(expr, env));
BSymbol owner = this.env.scope.owner;
if (owner.kind == SymbolKind.FUNCTION) {
BInvokableSymbol invokableOwnerSymbol = (BInvokableSymbol) owner;
Name name = names.fromIdNode(invocationExpr.name);
BSymbol dependsOnFunctionSym = symResolver.lookupSymbolInMainSpace(this.env, name);
if (symTable.notFoundSymbol != dependsOnFunctionSym) {
addDependency(invokableOwnerSymbol, dependsOnFunctionSym);
}
} else if (invocationExpr.symbol != null && invocationExpr.symbol.kind == SymbolKind.FUNCTION) {
BInvokableSymbol invokableProviderSymbol = (BInvokableSymbol) invocationExpr.symbol;
BSymbol curDependent = this.currDependentSymbol.peek();
if (curDependent != null && isGlobalVarSymbol(curDependent)) {
addDependency(curDependent, invokableProviderSymbol);
}
}
}
@Override
public void visit(BLangQueryExpr queryExpr) {
for (FromClauseNode fromClauseNode : queryExpr.fromClauseList) {
analyzeNode((BLangFromClause) fromClauseNode, env);
}
for (WhereClauseNode whereClauseNode : queryExpr.whereClauseList) {
analyzeNode((BLangWhereClause) whereClauseNode, env);
}
analyzeNode(queryExpr.selectClause, env);
}
@Override
public void visit(BLangFromClause fromClause) {
analyzeNode(fromClause.collection, env);
}
@Override
public void visit(BLangWhereClause whereClause) {
analyzeNode(whereClause.expression, env);
}
@Override
public void visit(BLangSelectClause selectClause) {
analyzeNode(selectClause.expression, env);
}
@Override
public void visit(BLangDoClause doClause) {
analyzeNode(doClause.body, env);
}
private boolean isFieldsInitializedForSelfArgument(BLangInvocation invocationExpr) {
if (invocationExpr.expr == null || !isSelfKeyWordExpr(invocationExpr.expr)) {
return true;
}
StringBuilder uninitializedFields =
getUninitializedFieldsForSelfKeyword((BObjectType) ((BLangSimpleVarRef)
invocationExpr.expr).symbol.type);
if (uninitializedFields.length() != 0) {
this.dlog.error(invocationExpr.pos, DiagnosticCode.CONTAINS_UNINITIALIZED_FIELDS,
uninitializedFields.toString());
return false;
}
return true;
}
private boolean isFieldsInitializedForSelfInvocation(List<BLangExpression> argExpressions, DiagnosticPos pos) {
for (BLangExpression expr : argExpressions) {
if (isSelfKeyWordExpr(expr)) {
StringBuilder uninitializedFields =
getUninitializedFieldsForSelfKeyword((BObjectType) ((BLangSimpleVarRef) expr).symbol.type);
if (uninitializedFields.length() != 0) {
this.dlog.error(pos, DiagnosticCode.CONTAINS_UNINITIALIZED_FIELDS,
uninitializedFields.toString());
return false;
}
}
}
return true;
}
private boolean isSelfKeyWordExpr(BLangExpression expr) {
return expr.getKind() == NodeKind.SIMPLE_VARIABLE_REF &&
Names.SELF.value.equals(((BLangSimpleVarRef) expr).getVariableName().getValue());
}
private StringBuilder getUninitializedFieldsForSelfKeyword(BObjectType objType) {
boolean isFirstUninitializedField = true;
StringBuilder uninitializedFields = new StringBuilder();
for (BField field : objType.fields) {
if (this.uninitializedVars.containsKey(field.symbol)) {
if (isFirstUninitializedField) {
uninitializedFields = new StringBuilder(field.symbol.getName().value);
isFirstUninitializedField = false;
} else {
uninitializedFields.append(", ").append(field.symbol.getName().value);
}
}
}
return uninitializedFields;
}
private boolean isGlobalVarSymbol(BSymbol symbol) {
if (symbol == null) {
return false;
} else if (symbol.owner == null) {
return false;
} else if (symbol.owner.tag != SymTag.PACKAGE) {
return false;
}
return isVariableOrConstant(symbol);
}
private boolean isVariableOrConstant(BSymbol symbol) {
if (symbol == null) {
return false;
}
return ((symbol.tag & SymTag.VARIABLE) == SymTag.VARIABLE) ||
((symbol.tag & SymTag.CONSTANT) == SymTag.CONSTANT);
}
/**
* Register dependent symbol to the provider symbol.
* Let global int a = b, a depend on b.
* Let func foo() { returns b + 1; }, where b is a global var, then foo depends on b.
*
* @param dependent dependent.
* @param provider object which provides a value.
*/
private void addDependency(BSymbol dependent, BSymbol provider) {
if (provider == null || dependent == null || dependent.pkgID != provider.pkgID) {
return;
}
Set<BSymbol> providers = globalNodeDependsOn.computeIfAbsent(dependent, s -> new LinkedHashSet<>());
providers.add(provider);
addFunctionToGlobalVarDependency(dependent, provider);
}
private void addFunctionToGlobalVarDependency(BSymbol dependent, BSymbol provider) {
if (dependent.kind != SymbolKind.FUNCTION) {
return;
}
if (isVariableOrConstant(provider) && !isGlobalVarSymbol(provider)) {
return;
}
Set<BSymbol> providers = this.functionToDependency.computeIfAbsent(dependent, s -> new HashSet<>());
providers.add(provider);
}
@Override
public void visit(BLangTypeInit typeInitExpr) {
typeInitExpr.argsExpr.forEach(argExpr -> analyzeNode(argExpr, env));
if (this.currDependentSymbol.peek() != null) {
addDependency(this.currDependentSymbol.peek(), typeInitExpr.type.tsymbol);
}
}
@Override
public void visit(BLangTernaryExpr ternaryExpr) {
analyzeNode(ternaryExpr.expr, env);
}
@Override
public void visit(BLangWaitExpr waitExpr) {
analyzeNode(waitExpr.getExpression(), env);
}
@Override
public void visit(BLangWorkerFlushExpr workerFlushExpr) {
}
@Override
public void visit(BLangWaitForAllExpr waitForAllExpr) {
waitForAllExpr.keyValuePairs.forEach(keyValue -> {
BLangExpression expr = keyValue.valueExpr != null ? keyValue.valueExpr : keyValue.keyExpr;
analyzeNode(expr, env);
});
}
@Override
public void visit(BLangBinaryExpr binaryExpr) {
analyzeNode(binaryExpr.lhsExpr, env);
analyzeNode(binaryExpr.rhsExpr, env);
}
@Override
public void visit(BLangElvisExpr elvisExpr) {
analyzeNode(elvisExpr.lhsExpr, env);
analyzeNode(elvisExpr.rhsExpr, env);
}
@Override
public void visit(BLangGroupExpr groupExpr) {
analyzeNode(groupExpr.expression, env);
}
@Override
public void visit(BLangUnaryExpr unaryExpr) {
analyzeNode(unaryExpr.expr, env);
}
@Override
public void visit(BLangTypeConversionExpr conversionExpr) {
analyzeNode(conversionExpr.expr, env);
}
@Override
public void visit(BLangXMLAttribute xmlAttribute) {
analyzeNode(xmlAttribute.value, env);
}
@Override
public void visit(BLangXMLElementLiteral xmlElementLiteral) {
xmlElementLiteral.children.forEach(expr -> analyzeNode(expr, env));
xmlElementLiteral.attributes.forEach(expr -> analyzeNode(expr, env));
xmlElementLiteral.inlineNamespaces.forEach(expr -> analyzeNode(expr, env));
}
@Override
public void visit(BLangXMLTextLiteral xmlTextLiteral) {
xmlTextLiteral.textFragments.forEach(expr -> analyzeNode(expr, env));
}
@Override
public void visit(BLangXMLCommentLiteral xmlCommentLiteral) {
xmlCommentLiteral.textFragments.forEach(expr -> analyzeNode(expr, env));
}
@Override
public void visit(BLangXMLProcInsLiteral xmlProcInsLiteral) {
xmlProcInsLiteral.dataFragments.forEach(expr -> analyzeNode(expr, env));
}
@Override
public void visit(BLangXMLQuotedString xmlQuotedString) {
xmlQuotedString.textFragments.forEach(expr -> analyzeNode(expr, env));
}
@Override
public void visit(BLangStringTemplateLiteral stringTemplateLiteral) {
stringTemplateLiteral.exprs.forEach(expr -> analyzeNode(expr, env));
}
@Override
public void visit(BLangLambdaFunction bLangLambdaFunction) {
Map<BSymbol, InitStatus> prevUninitializedVars = this.uninitializedVars;
BLangFunction funcNode = bLangLambdaFunction.function;
SymbolEnv funcEnv = SymbolEnv.createFunctionEnv(funcNode, funcNode.symbol.scope, env);
this.uninitializedVars = copyUninitializedVars();
this.flowTerminated = false;
analyzeNode(funcNode.body, funcEnv);
this.uninitializedVars = prevUninitializedVars;
}
@Override
public void visit(BLangXMLAttributeAccess xmlAttributeAccessExpr) {
analyzeNode(xmlAttributeAccessExpr.expr, env);
analyzeNode(xmlAttributeAccessExpr.indexExpr, env);
}
@Override
public void visit(BLangIntRangeExpression intRangeExpression) {
analyzeNode(intRangeExpression.startExpr, env);
analyzeNode(intRangeExpression.endExpr, env);
}
@Override
public void visit(BLangRestArgsExpression bLangVarArgsExpression) {
analyzeNode(bLangVarArgsExpression.expr, env);
}
@Override
public void visit(BLangNamedArgsExpression bLangNamedArgsExpression) {
analyzeNode(bLangNamedArgsExpression.expr, env);
}
@Override
public void visit(BLangIsAssignableExpr assignableExpr) {
}
@Override
public void visit(BLangMatchExpression matchExpression) {
analyzeNode(matchExpression.expr, env);
matchExpression.patternClauses.forEach(pattern -> analyzeNode(pattern, env));
}
@Override
public void visit(BLangMatchExprPatternClause matchExprPatternClause) {
analyzeNode(matchExprPatternClause.expr, env);
}
@Override
public void visit(BLangCheckedExpr checkedExpr) {
analyzeNode(checkedExpr.expr, env);
}
@Override
public void visit(BLangCheckPanickedExpr checkPanicExpr) {
analyzeNode(checkPanicExpr.expr, env);
}
@Override
public void visit(BLangXMLSequenceLiteral bLangXMLSequenceLiteral) {
bLangXMLSequenceLiteral.xmlItems.forEach(xml -> analyzeNode(xml, env));
}
@Override
public void visit(BLangExpressionStmt exprStmtNode) {
analyzeNode(exprStmtNode.expr, env);
}
@Override
public void visit(BLangAnnotation annotationNode) {
}
@Override
public void visit(BLangAnnotationAttachment annAttachmentNode) {
}
@Override
public void visit(BLangAbort abortNode) {
}
@Override
public void visit(BLangRetry retryNode) {
}
@Override
public void visit(BLangContinue continueNode) {
terminateFlow();
}
@Override
public void visit(BLangCatch catchNode) {
}
@Override
public void visit(BLangActionInvocation actionInvocationExpr) {
}
@Override
public void visit(BLangTypedescExpr accessExpr) {
}
@Override
public void visit(BLangXMLQName xmlQName) {
}
@Override
public void visit(BLangArrowFunction bLangArrowFunction) {
bLangArrowFunction.closureVarSymbols.forEach(closureVarSymbol -> {
if (this.uninitializedVars.keySet().contains(closureVarSymbol.bSymbol)) {
this.dlog.error(closureVarSymbol.diagnosticPos, DiagnosticCode.UNINITIALIZED_VARIABLE,
closureVarSymbol.bSymbol);
}
});
}
@Override
public void visit(BLangValueType valueType) {
}
@Override
public void visit(BLangConstant constant) {
boolean validVariable = constant.symbol != null;
if (validVariable) {
this.currDependentSymbol.push(constant.symbol);
}
try {
analyzeNode(constant.expr, env);
} finally {
if (validVariable) {
this.currDependentSymbol.pop();
}
}
}
@Override
public void visit(BLangArrayType arrayType) {
analyzeNode(arrayType.getElementType(), env);
}
@Override
public void visit(BLangBuiltInRefTypeNode builtInRefType) {
}
@Override
public void visit(BLangConstrainedType constrainedType) {
analyzeNode(constrainedType.constraint, env);
}
@Override
public void visit(BLangStreamType streamType) {
analyzeNode(streamType.constraint, env);
analyzeNode(streamType.error, env);
}
@Override
public void visit(BLangUserDefinedType userDefinedType) {
}
@Override
public void visit(BLangFunctionTypeNode functionTypeNode) {
functionTypeNode.params.forEach(param -> analyzeNode(param.typeNode, env));
analyzeNode(functionTypeNode.returnTypeNode, env);
}
@Override
public void visit(BLangUnionTypeNode unionTypeNode) {
unionTypeNode.memberTypeNodes.forEach(typeNode -> analyzeNode(typeNode, env));
}
@Override
public void visit(BLangObjectTypeNode objectTypeNode) {
SymbolEnv objectEnv = SymbolEnv.createTypeEnv(objectTypeNode, objectTypeNode.symbol.scope, env);
this.currDependentSymbol.push(objectTypeNode.symbol);
objectTypeNode.fields.forEach(field -> analyzeNode(field, objectEnv));
objectTypeNode.referencedFields.forEach(field -> analyzeNode(field, objectEnv));
if (objectTypeNode.initFunction != null) {
if (objectTypeNode.initFunction.body == null) {
Optional<BLangFunction> outerFuncDef =
objectEnv.enclPkg.functions.stream()
.filter(f -> f.symbol.name.equals((objectTypeNode.initFunction).symbol.name))
.findFirst();
outerFuncDef.ifPresent(bLangFunction -> objectTypeNode.initFunction = bLangFunction);
}
if (objectTypeNode.initFunction.body != null) {
if (objectTypeNode.initFunction.body.getKind() == NodeKind.BLOCK_FUNCTION_BODY) {
for (BLangStatement statement :
((BLangBlockFunctionBody) objectTypeNode.initFunction.body).stmts) {
analyzeNode(statement, objectEnv);
}
} else if (objectTypeNode.initFunction.body.getKind() == NodeKind.EXPR_FUNCTION_BODY) {
analyzeNode(((BLangExprFunctionBody) objectTypeNode.initFunction.body).expr, objectEnv);
}
}
}
if (!Symbols.isFlagOn(objectTypeNode.symbol.flags, Flags.ABSTRACT)) {
Stream.concat(objectTypeNode.fields.stream(), objectTypeNode.referencedFields.stream())
.filter(field -> !Symbols.isPrivate(field.symbol))
.forEach(field -> {
if (this.uninitializedVars.containsKey(field.symbol)) {
this.dlog.error(field.pos, DiagnosticCode.OBJECT_UNINITIALIZED_FIELD, field.name);
}
});
}
objectTypeNode.functions.forEach(function -> analyzeNode(function, env));
objectTypeNode.getTypeReferences().forEach(type -> analyzeNode((BLangType) type, env));
this.currDependentSymbol.pop();
}
@Override
public void visit(BLangRecordTypeNode recordTypeNode) {
recordTypeNode.getTypeReferences().forEach(type -> analyzeNode((BLangType) type, env));
recordTypeNode.fields.forEach(field -> analyzeNode(field, env));
}
@Override
public void visit(BLangFiniteTypeNode finiteTypeNode) {
finiteTypeNode.valueSpace.forEach(value -> analyzeNode(value, env));
}
@Override
public void visit(BLangTupleTypeNode tupleTypeNode) {
tupleTypeNode.memberTypeNodes.forEach(type -> analyzeNode(type, env));
}
@Override
public void visit(BLangMarkdownDocumentationLine bLangMarkdownDocumentationLine) {
}
@Override
public void visit(BLangMarkdownParameterDocumentation bLangDocumentationParameter) {
}
@Override
public void visit(BLangMarkdownReturnParameterDocumentation bLangMarkdownReturnParameterDocumentation) {
}
@Override
public void visit(BLangMarkdownDocumentation bLangMarkdownDocumentation) {
}
@Override
public void visit(BLangTestablePackage testablePkgNode) {
}
@Override
public void visit(BLangImportPackage importPkgNode) {
}
@Override
public void visit(BLangIdentifier identifierNode) {
}
@Override
public void visit(BLangPanic panicNode) {
analyzeNode(panicNode.expr, env);
terminateFlow();
}
@Override
public void visit(BLangTrapExpr trapExpr) {
analyzeNode(trapExpr.expr, env);
}
public void visit(BLangServiceConstructorExpr serviceConstructorExpr) {
if (this.currDependentSymbol.peek() != null) {
addDependency(this.currDependentSymbol.peek(), serviceConstructorExpr.type.tsymbol);
}
addDependency(serviceConstructorExpr.type.tsymbol, serviceConstructorExpr.serviceNode.symbol);
analyzeNode(serviceConstructorExpr.serviceNode, env);
}
@Override
public void visit(BLangTypeTestExpr typeTestExpr) {
analyzeNode(typeTestExpr.expr, env);
analyzeNode(typeTestExpr.typeNode, env);
}
@Override
public void visit(BLangAnnotAccessExpr annotAccessExpr) {
}
@Override
public void visit(BLangErrorType errorType) {
}
@Override
public void visit(BLangRecordDestructure recordDestructure) {
analyzeNode(recordDestructure.expr, env);
checkAssignment(recordDestructure.varRef);
}
@Override
public void visit(BLangErrorDestructure errorDestructure) {
analyzeNode(errorDestructure.expr, env);
checkAssignment(errorDestructure.varRef);
}
@Override
public void visit(BLangTupleVarRef tupleVarRefExpr) {
tupleVarRefExpr.expressions.forEach(expr -> analyzeNode(expr, env));
}
@Override
public void visit(BLangRecordVarRef varRefExpr) {
varRefExpr.recordRefFields.forEach(expr -> analyzeNode(expr.variableReference, env));
}
@Override
public void visit(BLangErrorVarRef varRefExpr) {
analyzeNode(varRefExpr.reason, env);
for (BLangNamedArgsExpression args : varRefExpr.detail) {
analyzeNode(args.expr, env);
}
analyzeNode(varRefExpr.restVar, env);
}
@Override
public void visit(BLangTupleVariable bLangTupleVariable) {
analyzeNode(bLangTupleVariable.typeNode, env);
}
@Override
public void visit(BLangTupleVariableDef bLangTupleVariableDef) {
BLangVariable var = bLangTupleVariableDef.var;
if (var.expr == null) {
addUninitializedVar(var);
return;
}
}
@Override
public void visit(BLangRecordVariable bLangRecordVariable) {
analyzeNode(bLangRecordVariable.typeNode, env);
}
@Override
public void visit(BLangRecordVariableDef bLangRecordVariableDef) {
BLangVariable var = bLangRecordVariableDef.var;
if (var.expr == null) {
addUninitializedVar(var);
}
}
@Override
public void visit(BLangErrorVariable bLangErrorVariable) {
analyzeNode(bLangErrorVariable.typeNode, env);
}
@Override
public void visit(BLangErrorVariableDef bLangErrorVariableDef) {
BLangVariable var = bLangErrorVariableDef.errorVariable;
if (var.expr == null) {
addUninitializedVar(var);
}
}
@Override
public void visit(BLangMatchStaticBindingPatternClause bLangMatchStaticBindingPatternClause) {
analyzeNode(bLangMatchStaticBindingPatternClause.body, env);
}
@Override
public void visit(BLangMatchStructuredBindingPatternClause bLangMatchStructuredBindingPatternClause) {
analyzeNode(bLangMatchStructuredBindingPatternClause.body, env);
}
private void addUninitializedVar(BLangVariable variable) {
if (!this.uninitializedVars.containsKey(variable.symbol)) {
this.uninitializedVars.put(variable.symbol, InitStatus.UN_INIT);
}
}
/**
* Analyze a branch and returns the set of uninitialized variables for that branch.
* This method will not update the current uninitialized variables set.
*
* @param node Branch node to be analyzed
* @param env Symbol environment
* @return Result of the branch.
*/
private BranchResult analyzeBranch(BLangNode node, SymbolEnv env) {
Map<BSymbol, InitStatus> prevUninitializedVars = this.uninitializedVars;
boolean prevFlowTerminated = this.flowTerminated;
this.uninitializedVars = copyUninitializedVars();
this.flowTerminated = false;
analyzeNode(node, env);
BranchResult brachResult = new BranchResult(this.uninitializedVars, this.flowTerminated);
this.uninitializedVars = prevUninitializedVars;
this.flowTerminated = prevFlowTerminated;
return brachResult;
}
private Map<BSymbol, InitStatus> copyUninitializedVars() {
return new HashMap<>(this.uninitializedVars);
}
private void analyzeNode(BLangNode node, SymbolEnv env) {
SymbolEnv prevEnv = this.env;
this.env = env;
if (node != null) {
node.accept(this);
}
this.env = prevEnv;
}
private Map<BSymbol, InitStatus> mergeUninitializedVars(Map<BSymbol, InitStatus> firstUninitVars,
Map<BSymbol, InitStatus> secondUninitVars) {
List<BSymbol> intersection = new ArrayList<>(firstUninitVars.keySet());
intersection.retainAll(secondUninitVars.keySet());
return Stream.concat(firstUninitVars.entrySet().stream(), secondUninitVars.entrySet().stream())
.collect(Collectors.toMap(entry -> entry.getKey(),
entry -> intersection.contains(entry.getKey()) ? entry.getValue() : InitStatus.PARTIAL_INIT,
(a, b) -> {
if (a == InitStatus.PARTIAL_INIT || b == InitStatus.PARTIAL_INIT) {
return InitStatus.PARTIAL_INIT;
}
return InitStatus.UN_INIT;
}));
}
private void checkVarRef(BSymbol symbol, DiagnosticPos pos) {
recordGlobalVariableReferenceRelationship(symbol);
InitStatus initStatus = this.uninitializedVars.get(symbol);
if (initStatus == null) {
return;
}
if (initStatus == InitStatus.UN_INIT) {
this.dlog.error(pos, DiagnosticCode.UNINITIALIZED_VARIABLE, symbol.name);
return;
}
this.dlog.error(pos, DiagnosticCode.PARTIALLY_INITIALIZED_VARIABLE, symbol.name);
}
private void recordGlobalVariableReferenceRelationship(BSymbol symbol) {
BSymbol ownerSymbol = this.env.scope.owner;
boolean isInPkgLevel = ownerSymbol.getKind() == SymbolKind.PACKAGE;
if (isInPkgLevel && isGlobalVarSymbol(symbol)) {
BSymbol dependent = this.currDependentSymbol.peek();
addDependency(dependent, symbol);
} else if (ownerSymbol.kind == SymbolKind.FUNCTION && isGlobalVarSymbol(symbol)) {
BInvokableSymbol invokableOwnerSymbol = (BInvokableSymbol) ownerSymbol;
addDependency(invokableOwnerSymbol, symbol);
} else if (ownerSymbol.kind == SymbolKind.OBJECT && isGlobalVarSymbol(symbol)) {
addDependency(ownerSymbol, symbol);
}
}
private boolean isObjectMemberAccessWithSelf(BLangAccessExpression fieldAccessExpr) {
if (fieldAccessExpr.expr.getKind() != NodeKind.SIMPLE_VARIABLE_REF) {
return false;
}
return Names.SELF.value.equals(((BLangSimpleVarRef) fieldAccessExpr.expr).variableName.value);
}
private void checkAssignment(BLangExpression varRef) {
switch (varRef.getKind()) {
case RECORD_VARIABLE_REF:
BLangRecordVarRef recordVarRef = (BLangRecordVarRef) varRef;
recordVarRef.recordRefFields.forEach(field -> checkAssignment(field.variableReference));
if (recordVarRef.restParam != null) {
checkAssignment((BLangExpression) recordVarRef.restParam);
}
return;
case TUPLE_VARIABLE_REF:
BLangTupleVarRef tupleVarRef = (BLangTupleVarRef) varRef;
tupleVarRef.expressions.forEach(this::checkAssignment);
if (tupleVarRef.restParam != null) {
checkAssignment((BLangExpression) tupleVarRef.restParam);
}
return;
case ERROR_VARIABLE_REF:
BLangErrorVarRef errorVarRef = (BLangErrorVarRef) varRef;
if (errorVarRef.reason != null) {
checkAssignment(errorVarRef.reason);
}
for (BLangNamedArgsExpression expression : errorVarRef.detail) {
checkAssignment(expression);
this.uninitializedVars.remove(((BLangVariableReference) expression.expr).symbol);
}
if (errorVarRef.restVar != null) {
checkAssignment(errorVarRef.restVar);
}
return;
case INDEX_BASED_ACCESS_EXPR:
case FIELD_BASED_ACCESS_EXPR:
if (isObjectMemberAccessWithSelf((BLangAccessExpression) varRef)) {
this.uninitializedVars.remove(((BLangVariableReference) varRef).symbol);
} else {
analyzeNode(((BLangAccessExpression) varRef).expr, env);
}
return;
default:
break;
}
if (varRef.getKind() != NodeKind.SIMPLE_VARIABLE_REF &&
varRef.getKind() != NodeKind.XML_ATTRIBUTE_ACCESS_EXPR) {
return;
}
if (varRef.getKind() == NodeKind.SIMPLE_VARIABLE_REF) {
BSymbol owner = this.env.scope.owner;
addFunctionToGlobalVarDependency(owner, ((BLangSimpleVarRef) varRef).symbol);
}
this.uninitializedVars.remove(((BLangVariableReference) varRef).symbol);
}
private void terminateFlow() {
this.flowTerminated = true;
}
private void checkUnusedImports(List<BLangImportPackage> imports) {
for (BLangImportPackage importStmt : imports) {
if (importStmt.symbol == null || importStmt.symbol.isUsed ||
Names.IGNORE.value.equals(importStmt.alias.value)) {
continue;
}
dlog.error(importStmt.pos, DiagnosticCode.UNUSED_IMPORT_MODULE, importStmt.getQualifiedPackageName());
}
}
private enum InitStatus {
UN_INIT, PARTIAL_INIT
}
private class BranchResult {
Map<BSymbol, InitStatus> uninitializedVars;
boolean flowTerminated;
BranchResult(Map<BSymbol, InitStatus> uninitializedVars, boolean flowTerminated) {
this.uninitializedVars = uninitializedVars;
this.flowTerminated = flowTerminated;
}
}
} | class DataflowAnalyzer extends BLangNodeVisitor {
private final SymbolResolver symResolver;
private final Names names;
private SymbolEnv env;
private SymbolTable symTable;
private BLangDiagnosticLogHelper dlog;
private Map<BSymbol, InitStatus> uninitializedVars;
private Map<BSymbol, Set<BSymbol>> globalNodeDependsOn;
private Map<BSymbol, Set<BSymbol>> functionToDependency;
private boolean flowTerminated = false;
private static final CompilerContext.Key<DataflowAnalyzer> DATAFLOW_ANALYZER_KEY = new CompilerContext.Key<>();
private Deque<BSymbol> currDependentSymbol;
private final GlobalVariableRefAnalyzer globalVariableRefAnalyzer;
private DataflowAnalyzer(CompilerContext context) {
context.put(DATAFLOW_ANALYZER_KEY, this);
this.symTable = SymbolTable.getInstance(context);
this.dlog = BLangDiagnosticLogHelper.getInstance(context);
this.symResolver = SymbolResolver.getInstance(context);
this.names = Names.getInstance(context);
this.currDependentSymbol = new ArrayDeque<>();
this.globalVariableRefAnalyzer = GlobalVariableRefAnalyzer.getInstance(context);
}
public static DataflowAnalyzer getInstance(CompilerContext context) {
DataflowAnalyzer dataflowAnalyzer = context.get(DATAFLOW_ANALYZER_KEY);
if (dataflowAnalyzer == null) {
dataflowAnalyzer = new DataflowAnalyzer(context);
}
return dataflowAnalyzer;
}
/**
* Perform data-flow analysis on a package.
*
* @param pkgNode Package to perform data-flow analysis.
* @return Data-flow analyzed package
*/
public BLangPackage analyze(BLangPackage pkgNode) {
this.uninitializedVars = new HashMap<>();
this.globalNodeDependsOn = new LinkedHashMap<>();
this.functionToDependency = new HashMap<>();
SymbolEnv pkgEnv = this.symTable.pkgEnvMap.get(pkgNode.symbol);
analyzeNode(pkgNode, pkgEnv);
return pkgNode;
}
@Override
private void addModuleInitToSortedNodeList(BLangPackage pkgNode, List<TopLevelNode> sortedListOfNodes) {
for (TopLevelNode node : pkgNode.topLevelNodes) {
if (isModuleInitFunction((BLangNode) node)) {
sortedListOfNodes.add(node);
break;
}
}
}
private void addNodesToSortedNodeList(BLangPackage pkgNode, List<TopLevelNode> sortedListOfNodes) {
pkgNode.topLevelNodes.forEach(topLevelNode -> {
if (!sortedListOfNodes.contains(topLevelNode)) {
sortedListOfNodes.add(topLevelNode);
}
});
}
private boolean isModuleInitFunction(BLangNode node) {
return node.getKind() == NodeKind.FUNCTION &&
Names.USER_DEFINED_INIT_SUFFIX.value.equals(((BLangFunction) node).name.value);
}
private void analyzeModuleInitFunc(BLangFunction funcNode) {
this.currDependentSymbol.push(funcNode.symbol);
SymbolEnv moduleInitFuncEnv = SymbolEnv.createModuleInitFunctionEnv(funcNode, funcNode.symbol.scope, env);
for (BLangAnnotationAttachment bLangAnnotationAttachment : funcNode.annAttachments) {
analyzeNode(bLangAnnotationAttachment.expr, env);
}
analyzeNode(funcNode.body, moduleInitFuncEnv);
this.currDependentSymbol.pop();
}
private void checkForUninitializedGlobalVars(List<BLangSimpleVariable> globalVars) {
for (BLangSimpleVariable globalVar : globalVars) {
if (this.uninitializedVars.containsKey(globalVar.symbol)) {
this.dlog.error(globalVar.pos, DiagnosticCode.UNINITIALIZED_VARIABLE, globalVar.name);
}
}
}
@Override
public void visit(BLangFunction funcNode) {
this.currDependentSymbol.push(funcNode.symbol);
SymbolEnv funcEnv = SymbolEnv.createFunctionEnv(funcNode, funcNode.symbol.scope, env);
funcNode.annAttachments.forEach(bLangAnnotationAttachment -> analyzeNode(bLangAnnotationAttachment.expr, env));
funcNode.requiredParams.forEach(param -> analyzeNode(param, funcEnv));
analyzeNode(funcNode.restParam, funcEnv);
analyzeBranch(funcNode.body, funcEnv);
this.currDependentSymbol.pop();
}
@Override
public void visit(BLangBlockFunctionBody body) {
SymbolEnv bodyEnv = SymbolEnv.createFuncBodyEnv(body, env);
bodyEnv.isModuleInit = env.isModuleInit;
for (BLangStatement statement : body.stmts) {
analyzeNode(statement, bodyEnv);
}
}
@Override
public void visit(BLangExprFunctionBody body) {
SymbolEnv bodyEnv = SymbolEnv.createFuncBodyEnv(body, env);
analyzeNode(body.expr, bodyEnv);
}
@Override
public void visit(BLangExternalFunctionBody body) {
}
@Override
public void visit(BLangBlockStmt blockNode) {
SymbolEnv blockEnv = SymbolEnv.createBlockEnv(blockNode, env);
blockNode.stmts.forEach(statement -> analyzeNode(statement, blockEnv));
}
@Override
public void visit(BLangLetExpression letExpression) {
for (BLangLetVariable letVarDeclaration : letExpression.letVarDeclarations) {
analyzeNode((BLangNode) letVarDeclaration.definitionNode, letExpression.env);
}
analyzeNode(letExpression.expr, letExpression.env);
}
@Override
public void visit(BLangCompilationUnit compUnit) {
}
@Override
public void visit(BLangXMLNS xmlnsNode) {
}
@Override
public void visit(BLangService service) {
this.currDependentSymbol.push(service.serviceTypeDefinition.symbol);
for (BLangExpression attachedExpr : service.attachedExprs) {
analyzeNode(attachedExpr, env);
}
service.annAttachments.forEach(bLangAnnotationAttachment -> analyzeNode(bLangAnnotationAttachment.expr, env));
service.resourceFunctions.forEach(function -> analyzeNode(function, env));
this.currDependentSymbol.pop();
}
@Override
public void visit(BLangResource resource) {
}
@Override
public void visit(BLangTypeDefinition typeDefinition) {
analyzeNode(typeDefinition.typeNode, env);
}
@Override
public void visit(BLangSimpleVariableDef varDefNode) {
BLangVariable var = varDefNode.var;
if (var.expr == null) {
addUninitializedVar(var);
return;
}
analyzeNode(var, env);
}
@Override
public void visit(BLangSimpleVariable variable) {
analyzeNode(variable.typeNode, env);
if (variable.symbol == null) {
if (variable.expr != null) {
analyzeNode(variable.expr, env);
}
return;
}
this.currDependentSymbol.push(variable.symbol);
try {
if (variable.expr != null) {
analyzeNode(variable.expr, env);
this.uninitializedVars.remove(variable.symbol);
return;
}
BSymbol owner = variable.symbol.owner;
if (owner.tag != SymTag.PACKAGE && owner.tag != SymTag.OBJECT) {
return;
}
addUninitializedVar(variable);
} finally {
this.currDependentSymbol.pop();
}
}
@Override
public void visit(BLangWorker worker) {
SymbolEnv workerEnv = SymbolEnv.createWorkerEnv(worker, this.env);
analyzeBranch(worker.body, workerEnv);
}
@Override
public void visit(BLangEndpoint endpoint) {
analyzeNode(endpoint.configurationExpr, env);
}
@Override
public void visit(BLangAssignment assignment) {
analyzeNode(assignment.expr, env);
checkAssignment(assignment.varRef);
}
@Override
public void visit(BLangCompoundAssignment compoundAssignNode) {
analyzeNode(compoundAssignNode.expr, env);
analyzeNode(compoundAssignNode.varRef, env);
this.uninitializedVars.remove(compoundAssignNode.varRef.symbol);
}
@Override
public void visit(BLangBreak breakNode) {
terminateFlow();
}
@Override
public void visit(BLangReturn returnNode) {
analyzeNode(returnNode.expr, env);
terminateFlow();
}
@Override
public void visit(BLangThrow throwNode) {
analyzeNode(throwNode.expr, env);
terminateFlow();
}
@Override
public void visit(BLangXMLNSStatement xmlnsStmt) {
analyzeNode(xmlnsStmt.xmlnsDecl, env);
}
@Override
public void visit(BLangIf ifNode) {
analyzeNode(ifNode.expr, env);
BranchResult ifResult = analyzeBranch(ifNode.body, env);
BranchResult elseResult = analyzeBranch(ifNode.elseStmt, env);
if (ifResult.flowTerminated) {
this.uninitializedVars = elseResult.uninitializedVars;
return;
}
if (elseResult.flowTerminated) {
this.uninitializedVars = ifResult.uninitializedVars;
return;
}
this.uninitializedVars = mergeUninitializedVars(ifResult.uninitializedVars, elseResult.uninitializedVars);
}
@Override
public void visit(BLangMatch match) {
analyzeNode(match.expr, env);
Map<BSymbol, InitStatus> uninitVars = new HashMap<>();
BranchResult lastPatternResult = null;
for (BLangMatch.BLangMatchBindingPatternClause patternClause : match.patternClauses) {
if (patternClause.isLastPattern) {
lastPatternResult = analyzeBranch(patternClause, env);
} else {
BranchResult result = analyzeBranch(patternClause, env);
if (result.flowTerminated) {
continue;
}
uninitVars = mergeUninitializedVars(uninitVars, result.uninitializedVars);
}
}
if (lastPatternResult != null) {
uninitVars = mergeUninitializedVars(uninitVars, lastPatternResult.uninitializedVars);
this.uninitializedVars = uninitVars;
return;
}
uninitVars = mergeUninitializedVars(new HashMap<>(), this.uninitializedVars);
this.uninitializedVars = uninitVars;
}
@Override
public void visit(BLangForeach foreach) {
analyzeNode(foreach.collection, env);
analyzeNode(foreach.body, env);
}
@Override
public void visit(BLangQueryAction queryAction) {
for (FromClauseNode fromClauseNode : queryAction.fromClauseList) {
analyzeNode((BLangFromClause) fromClauseNode, env);
}
for (WhereClauseNode whereClauseNode : queryAction.whereClauseList) {
analyzeNode((BLangWhereClause) whereClauseNode, env);
}
analyzeNode(queryAction.doClause, env);
}
@Override
public void visit(BLangWhile whileNode) {
Map<BSymbol, InitStatus> prevUninitializedVars = this.uninitializedVars;
analyzeNode(whileNode.expr, env);
analyzeNode(whileNode.body, env);
for (BSymbol symbol : prevUninitializedVars.keySet()) {
if (!this.uninitializedVars.containsKey(symbol)) {
this.uninitializedVars.put(symbol, InitStatus.PARTIAL_INIT);
}
}
}
@Override
public void visit(BLangLock lockNode) {
analyzeNode(lockNode.body, this.env);
}
@Override
public void visit(BLangTransaction transactionNode) {
analyzeNode(transactionNode.transactionBody, env);
analyzeNode(transactionNode.onRetryBody, env);
analyzeNode(transactionNode.committedBody, env);
analyzeNode(transactionNode.abortedBody, env);
Name transactionPkgName = names.fromString(Names.DOT.value + Names.TRANSACTION_PACKAGE.value);
Name compUnitName = names.fromString(transactionNode.pos.getSource().getCompilationUnitName());
this.symResolver.resolvePrefixSymbol(env, transactionPkgName, compUnitName);
}
@Override
public void visit(BLangTryCatchFinally tryNode) {
}
@Override
public void visit(BLangTupleDestructure stmt) {
analyzeNode(stmt.expr, env);
checkAssignment(stmt.varRef);
}
@Override
public void visit(BLangForkJoin forkJoin) {
/* ignore */
}
@Override
public void visit(BLangWorkerSend workerSendNode) {
analyzeNode(workerSendNode.expr, env);
}
@Override
public void visit(BLangWorkerSyncSendExpr syncSendExpr) {
analyzeNode(syncSendExpr.expr, env);
}
@Override
public void visit(BLangWorkerReceive workerReceiveNode) {
}
@Override
public void visit(BLangLiteral literalExpr) {
}
@Override
public void visit(BLangListConstructorExpr listConstructorExpr) {
listConstructorExpr.exprs.forEach(expr -> analyzeNode(expr, env));
}
@Override
public void visit(BLangTableConstructorExpr tableConstructorExpr) {
tableConstructorExpr.recordLiteralList.forEach(expr -> analyzeNode(expr, env));
checkForDuplicateKeys(tableConstructorExpr);
}
private void checkForDuplicateKeys(BLangTableConstructorExpr tableConstructorExpr) {
Set<Integer> keyHashSet = new HashSet<>();
List<String> fieldNames = getFieldNames(tableConstructorExpr);
if (!fieldNames.isEmpty()) {
for (BLangRecordLiteral literal : tableConstructorExpr.recordLiteralList) {
List<BLangExpression> keyArray = createKeyArray(literal, fieldNames);
int hashInt = generateHash(keyArray);
if (!keyHashSet.add(hashInt)) {
String fields = String.join(", ", fieldNames);
String values = keyArray.stream().map(Object::toString).collect(Collectors.joining(", "));
dlog.error(literal.pos, DiagnosticCode.DUPLICATE_KEY_IN_TABLE_LITERAL, fields, values);
}
}
}
}
private int generateHash(List<BLangExpression> keyArray) {
int result = 0;
for (BLangExpression expr : keyArray) {
result = 31 * result + hash(expr);
}
return result;
}
public Integer hash(Node node) {
int result = 0;
if (node.getKind() == NodeKind.RECORD_LITERAL_EXPR) {
BLangRecordLiteral recordLiteral = (BLangRecordLiteral) node;
for (RecordLiteralNode.RecordField entry : recordLiteral.fields) {
result = 31 * result + hash(entry);
}
} else if (node.getKind() == NodeKind.RECORD_LITERAL_KEY_VALUE) {
BLangRecordLiteral.BLangRecordKeyValueField field = (BLangRecordLiteral.BLangRecordKeyValueField) node;
result = 31 * result + hash(field.key.expr) + hash(field.valueExpr);
} else if (node.getKind() == NodeKind.ARRAY_LITERAL_EXPR) {
BLangListConstructorExpr.BLangArrayLiteral arrayLiteral =
(BLangListConstructorExpr.BLangArrayLiteral) node;
for (BLangExpression expr : arrayLiteral.exprs) {
result = 31 * result + hash(expr);
}
} else if (node.getKind() == NodeKind.LITERAL | node.getKind() == NodeKind.NUMERIC_LITERAL) {
BLangLiteral literal = (BLangLiteral) node;
result = Objects.hash(literal.value);
} else if (node.getKind() == NodeKind.XML_TEXT_LITERAL) {
BLangXMLTextLiteral literal = (BLangXMLTextLiteral) node;
result = 31 * result + hash(literal.concatExpr);
for (BLangExpression expr : literal.textFragments) {
result = result * 31 + hash(expr);
}
} else if (node.getKind() == NodeKind.XML_ATTRIBUTE) {
BLangXMLAttribute attribute = (BLangXMLAttribute) node;
result = 31 * result + hash(attribute.name) + hash(attribute.value);
} else if (node.getKind() == NodeKind.XML_QNAME) {
BLangXMLQName xmlqName = (BLangXMLQName) node;
result = 31 * result + hash(xmlqName.localname) + hash(xmlqName.prefix);
} else if (node.getKind() == NodeKind.XML_COMMENT_LITERAL) {
BLangXMLCommentLiteral literal = (BLangXMLCommentLiteral) node;
result = 31 * result + hash(literal.concatExpr);
for (BLangExpression expr : literal.textFragments) {
result = result * 31 + hash(expr);
}
} else if (node.getKind() == NodeKind.XML_ELEMENT_LITERAL) {
BLangXMLElementLiteral literal = (BLangXMLElementLiteral) node;
result = 31 * result + hash(literal.startTagName) + hash(literal.endTagName);
for (BLangExpression expr : literal.attributes) {
result = 31 * result + hash(expr);
}
for (BLangExpression expr : literal.children) {
result = 31 * result + hash(expr);
}
} else if (node.getKind() == NodeKind.XML_QUOTED_STRING) {
BLangXMLQuotedString literal = (BLangXMLQuotedString) node;
result = 31 * result + hash(literal.concatExpr);
for (BLangExpression expr : literal.textFragments) {
result = result * 31 + hash(expr);
}
} else if (node.getKind() == NodeKind.XMLNS) {
BLangXMLNS xmlns = (BLangXMLNS) node;
result = result * 31 + hash(xmlns.prefix) + hash(xmlns.namespaceURI);
} else if (node.getKind() == NodeKind.XML_PI_LITERAL) {
BLangXMLProcInsLiteral literal = (BLangXMLProcInsLiteral) node;
result = 31 * result + hash(literal.target) + hash(literal.dataConcatExpr);
for (BLangExpression expr : literal.dataFragments) {
result = result * 31 + hash(expr);
}
} else if (node.getKind() == NodeKind.IDENTIFIER) {
BLangIdentifier identifier = (BLangIdentifier) node;
result = identifier.value.hashCode();
} else {
dlog.error(((BLangExpression) node).pos, DiagnosticCode.EXPRESSION_IS_NOT_A_CONSTANT_EXPRESSION);
}
return result;
}
private List<BLangExpression> createKeyArray(BLangRecordLiteral literal, List<String> fieldNames) {
Map<String, BLangExpression> fieldMap =
literal.fields.stream().map(recordField -> (BLangRecordLiteral.BLangRecordKeyValueField) recordField)
.map(d -> new SimpleEntry<>(d.key.expr.toString(), d.valueExpr)).
collect(Collectors.toMap(SimpleEntry::getKey, SimpleEntry::getValue));
return fieldNames.stream().map(fieldMap::get).collect(Collectors.toList());
}
private List<String> getFieldNames(BLangTableConstructorExpr constructorExpr) {
List<String> fieldNames = ((BTableType) constructorExpr.type).fieldNameList;
if (fieldNames != null) {
return fieldNames;
}
if (constructorExpr.tableKeySpecifier != null &&
!constructorExpr.tableKeySpecifier.fieldNameIdentifierList.isEmpty()) {
BLangTableKeySpecifier tableKeySpecifier = constructorExpr.tableKeySpecifier;
fieldNames = tableKeySpecifier.fieldNameIdentifierList.stream().map(fieldName -> fieldName.value)
.collect(Collectors.toList());
} else {
return new ArrayList<>();
}
return fieldNames;
}
@Override
public void visit(BLangRecordLiteral recordLiteral) {
for (RecordLiteralNode.RecordField field : recordLiteral.fields) {
if (field.isKeyValueField()) {
BLangRecordLiteral.BLangRecordKeyValueField keyValuePair =
(BLangRecordLiteral.BLangRecordKeyValueField) field;
if (keyValuePair.key.computedKey) {
analyzeNode(keyValuePair.key.expr, env);
}
analyzeNode(keyValuePair.valueExpr, env);
} else if (field.getKind() == NodeKind.SIMPLE_VARIABLE_REF) {
analyzeNode((BLangRecordLiteral.BLangRecordVarNameField) field, env);
} else {
analyzeNode(((BLangRecordLiteral.BLangRecordSpreadOperatorField) field).expr, env);
}
}
}
@Override
public void visit(BLangSimpleVarRef varRefExpr) {
checkVarRef(varRefExpr.symbol, varRefExpr.pos);
}
@Override
public void visit(BLangFieldBasedAccess fieldAccessExpr) {
if (!fieldAccessExpr.lhsVar && isObjectMemberAccessWithSelf(fieldAccessExpr)) {
checkVarRef(fieldAccessExpr.symbol, fieldAccessExpr.pos);
}
analyzeNode(fieldAccessExpr.expr, env);
}
@Override
public void visit(BLangIndexBasedAccess indexAccessExpr) {
analyzeNode(indexAccessExpr.expr, env);
analyzeNode(indexAccessExpr.indexExpr, env);
}
@Override
public void visit(BLangTableMultiKeyExpr tableMultiKeyExpr) {
tableMultiKeyExpr.multiKeyIndexExprs.forEach(value -> analyzeNode(value, env));
}
@Override
public void visit(BLangXMLElementAccess xmlElementAccess) {
analyzeNode(xmlElementAccess.expr, env);
}
@Override
public void visit(BLangXMLNavigationAccess xmlNavigation) {
analyzeNode(xmlNavigation.expr, env);
if (xmlNavigation.childIndex == null) {
analyzeNode(xmlNavigation.childIndex, env);
}
}
@Override
public void visit(BLangInvocation invocationExpr) {
analyzeNode(invocationExpr.expr, env);
if (!isGlobalVarsInitialized(invocationExpr.pos)) {
return;
}
if (!isFieldsInitializedForSelfArgument(invocationExpr)) {
return;
}
if (!isFieldsInitializedForSelfInvocation(invocationExpr.requiredArgs, invocationExpr.pos)) {
return;
}
if (!isFieldsInitializedForSelfInvocation(invocationExpr.restArgs, invocationExpr.pos)) {
return;
}
invocationExpr.requiredArgs.forEach(expr -> analyzeNode(expr, env));
invocationExpr.restArgs.forEach(expr -> analyzeNode(expr, env));
BSymbol owner = this.env.scope.owner;
if (owner.kind == SymbolKind.FUNCTION) {
BInvokableSymbol invokableOwnerSymbol = (BInvokableSymbol) owner;
Name name = names.fromIdNode(invocationExpr.name);
BSymbol dependsOnFunctionSym = symResolver.lookupSymbolInMainSpace(this.env, name);
if (symTable.notFoundSymbol != dependsOnFunctionSym) {
addDependency(invokableOwnerSymbol, dependsOnFunctionSym);
}
} else if (invocationExpr.symbol != null && invocationExpr.symbol.kind == SymbolKind.FUNCTION) {
BInvokableSymbol invokableProviderSymbol = (BInvokableSymbol) invocationExpr.symbol;
BSymbol curDependent = this.currDependentSymbol.peek();
if (curDependent != null && isGlobalVarSymbol(curDependent)) {
addDependency(curDependent, invokableProviderSymbol);
}
}
}
@Override
public void visit(BLangActionInvocation actionInvocation) {
this.visit((BLangInvocation) actionInvocation);
}
@Override
public void visit(BLangQueryExpr queryExpr) {
for (FromClauseNode fromClauseNode : queryExpr.fromClauseList) {
analyzeNode((BLangFromClause) fromClauseNode, env);
}
for (WhereClauseNode whereClauseNode : queryExpr.whereClauseList) {
analyzeNode((BLangWhereClause) whereClauseNode, env);
}
analyzeNode(queryExpr.selectClause, env);
}
@Override
public void visit(BLangFromClause fromClause) {
analyzeNode(fromClause.collection, env);
}
@Override
public void visit(BLangWhereClause whereClause) {
analyzeNode(whereClause.expression, env);
}
@Override
public void visit(BLangSelectClause selectClause) {
analyzeNode(selectClause.expression, env);
}
@Override
public void visit(BLangDoClause doClause) {
analyzeNode(doClause.body, env);
}
private boolean isFieldsInitializedForSelfArgument(BLangInvocation invocationExpr) {
if (invocationExpr.expr == null || !isSelfKeyWordExpr(invocationExpr.expr)) {
return true;
}
StringBuilder uninitializedFields =
getUninitializedFieldsForSelfKeyword((BObjectType) ((BLangSimpleVarRef)
invocationExpr.expr).symbol.type);
if (uninitializedFields.length() != 0) {
this.dlog.error(invocationExpr.pos, DiagnosticCode.CONTAINS_UNINITIALIZED_FIELDS,
uninitializedFields.toString());
return false;
}
return true;
}
private boolean isFieldsInitializedForSelfInvocation(List<BLangExpression> argExpressions, DiagnosticPos pos) {
for (BLangExpression expr : argExpressions) {
if (isSelfKeyWordExpr(expr)) {
StringBuilder uninitializedFields =
getUninitializedFieldsForSelfKeyword((BObjectType) ((BLangSimpleVarRef) expr).symbol.type);
if (uninitializedFields.length() != 0) {
this.dlog.error(pos, DiagnosticCode.CONTAINS_UNINITIALIZED_FIELDS,
uninitializedFields.toString());
return false;
}
}
}
return true;
}
private boolean isGlobalVarsInitialized(DiagnosticPos pos) {
if (env.isModuleInit) {
boolean isFirstUninitializedField = true;
StringBuilder uninitializedFields = new StringBuilder();
for (BSymbol symbol : this.uninitializedVars.keySet()) {
if (isFirstUninitializedField) {
uninitializedFields = new StringBuilder(symbol.getName().value);
isFirstUninitializedField = false;
} else {
uninitializedFields.append(", ").append(symbol.getName().value);
}
}
if (uninitializedFields.length() != 0) {
this.dlog.error(pos, DiagnosticCode.CONTAINS_UNINITIALIZED_VARIABLES,
uninitializedFields.toString());
return false;
}
}
return true;
}
private boolean isSelfKeyWordExpr(BLangExpression expr) {
return expr.getKind() == NodeKind.SIMPLE_VARIABLE_REF &&
Names.SELF.value.equals(((BLangSimpleVarRef) expr).getVariableName().getValue());
}
private StringBuilder getUninitializedFieldsForSelfKeyword(BObjectType objType) {
boolean isFirstUninitializedField = true;
StringBuilder uninitializedFields = new StringBuilder();
for (BField field : objType.fields) {
if (this.uninitializedVars.containsKey(field.symbol)) {
if (isFirstUninitializedField) {
uninitializedFields = new StringBuilder(field.symbol.getName().value);
isFirstUninitializedField = false;
} else {
uninitializedFields.append(", ").append(field.symbol.getName().value);
}
}
}
return uninitializedFields;
}
private boolean isGlobalVarSymbol(BSymbol symbol) {
if (symbol == null) {
return false;
} else if (symbol.owner == null) {
return false;
} else if (symbol.owner.tag != SymTag.PACKAGE) {
return false;
}
return isVariableOrConstant(symbol);
}
private boolean isVariableOrConstant(BSymbol symbol) {
if (symbol == null) {
return false;
}
return ((symbol.tag & SymTag.VARIABLE) == SymTag.VARIABLE) ||
((symbol.tag & SymTag.CONSTANT) == SymTag.CONSTANT);
}
/**
* Register dependent symbol to the provider symbol.
* Let global int a = b, a depend on b.
* Let func foo() { returns b + 1; }, where b is a global var, then foo depends on b.
*
* @param dependent dependent.
* @param provider object which provides a value.
*/
private void addDependency(BSymbol dependent, BSymbol provider) {
if (provider == null || dependent == null || dependent.pkgID != provider.pkgID) {
return;
}
Set<BSymbol> providers = globalNodeDependsOn.computeIfAbsent(dependent, s -> new LinkedHashSet<>());
providers.add(provider);
addFunctionToGlobalVarDependency(dependent, provider);
}
private void addFunctionToGlobalVarDependency(BSymbol dependent, BSymbol provider) {
if (dependent.kind != SymbolKind.FUNCTION) {
return;
}
if (isVariableOrConstant(provider) && !isGlobalVarSymbol(provider)) {
return;
}
Set<BSymbol> providers = this.functionToDependency.computeIfAbsent(dependent, s -> new HashSet<>());
providers.add(provider);
}
@Override
public void visit(BLangTypeInit typeInitExpr) {
typeInitExpr.argsExpr.forEach(argExpr -> analyzeNode(argExpr, env));
if (this.currDependentSymbol.peek() != null) {
addDependency(this.currDependentSymbol.peek(), typeInitExpr.type.tsymbol);
}
}
@Override
public void visit(BLangTernaryExpr ternaryExpr) {
analyzeNode(ternaryExpr.expr, env);
}
@Override
public void visit(BLangWaitExpr waitExpr) {
analyzeNode(waitExpr.getExpression(), env);
}
@Override
public void visit(BLangWorkerFlushExpr workerFlushExpr) {
}
@Override
public void visit(BLangWaitForAllExpr waitForAllExpr) {
waitForAllExpr.keyValuePairs.forEach(keyValue -> {
BLangExpression expr = keyValue.valueExpr != null ? keyValue.valueExpr : keyValue.keyExpr;
analyzeNode(expr, env);
});
}
@Override
public void visit(BLangBinaryExpr binaryExpr) {
analyzeNode(binaryExpr.lhsExpr, env);
analyzeNode(binaryExpr.rhsExpr, env);
}
@Override
public void visit(BLangElvisExpr elvisExpr) {
analyzeNode(elvisExpr.lhsExpr, env);
analyzeNode(elvisExpr.rhsExpr, env);
}
@Override
public void visit(BLangGroupExpr groupExpr) {
analyzeNode(groupExpr.expression, env);
}
@Override
public void visit(BLangUnaryExpr unaryExpr) {
analyzeNode(unaryExpr.expr, env);
}
@Override
public void visit(BLangTypeConversionExpr conversionExpr) {
analyzeNode(conversionExpr.expr, env);
}
@Override
public void visit(BLangXMLAttribute xmlAttribute) {
analyzeNode(xmlAttribute.value, env);
}
@Override
public void visit(BLangXMLElementLiteral xmlElementLiteral) {
xmlElementLiteral.children.forEach(expr -> analyzeNode(expr, env));
xmlElementLiteral.attributes.forEach(expr -> analyzeNode(expr, env));
xmlElementLiteral.inlineNamespaces.forEach(expr -> analyzeNode(expr, env));
}
@Override
public void visit(BLangXMLTextLiteral xmlTextLiteral) {
xmlTextLiteral.textFragments.forEach(expr -> analyzeNode(expr, env));
}
@Override
public void visit(BLangXMLCommentLiteral xmlCommentLiteral) {
xmlCommentLiteral.textFragments.forEach(expr -> analyzeNode(expr, env));
}
@Override
public void visit(BLangXMLProcInsLiteral xmlProcInsLiteral) {
xmlProcInsLiteral.dataFragments.forEach(expr -> analyzeNode(expr, env));
}
@Override
public void visit(BLangXMLQuotedString xmlQuotedString) {
xmlQuotedString.textFragments.forEach(expr -> analyzeNode(expr, env));
}
@Override
public void visit(BLangStringTemplateLiteral stringTemplateLiteral) {
stringTemplateLiteral.exprs.forEach(expr -> analyzeNode(expr, env));
}
@Override
public void visit(BLangLambdaFunction bLangLambdaFunction) {
Map<BSymbol, InitStatus> prevUninitializedVars = this.uninitializedVars;
BLangFunction funcNode = bLangLambdaFunction.function;
SymbolEnv funcEnv = SymbolEnv.createFunctionEnv(funcNode, funcNode.symbol.scope, env);
this.uninitializedVars = copyUninitializedVars();
this.flowTerminated = false;
analyzeNode(funcNode.body, funcEnv);
this.uninitializedVars = prevUninitializedVars;
}
@Override
public void visit(BLangXMLAttributeAccess xmlAttributeAccessExpr) {
analyzeNode(xmlAttributeAccessExpr.expr, env);
analyzeNode(xmlAttributeAccessExpr.indexExpr, env);
}
@Override
public void visit(BLangIntRangeExpression intRangeExpression) {
analyzeNode(intRangeExpression.startExpr, env);
analyzeNode(intRangeExpression.endExpr, env);
}
@Override
public void visit(BLangRestArgsExpression bLangVarArgsExpression) {
analyzeNode(bLangVarArgsExpression.expr, env);
}
@Override
public void visit(BLangNamedArgsExpression bLangNamedArgsExpression) {
analyzeNode(bLangNamedArgsExpression.expr, env);
}
@Override
public void visit(BLangIsAssignableExpr assignableExpr) {
}
@Override
public void visit(BLangMatchExpression matchExpression) {
analyzeNode(matchExpression.expr, env);
matchExpression.patternClauses.forEach(pattern -> analyzeNode(pattern, env));
}
@Override
public void visit(BLangMatchExprPatternClause matchExprPatternClause) {
analyzeNode(matchExprPatternClause.expr, env);
}
@Override
public void visit(BLangCheckedExpr checkedExpr) {
analyzeNode(checkedExpr.expr, env);
}
@Override
public void visit(BLangCheckPanickedExpr checkPanicExpr) {
analyzeNode(checkPanicExpr.expr, env);
}
@Override
public void visit(BLangXMLSequenceLiteral bLangXMLSequenceLiteral) {
bLangXMLSequenceLiteral.xmlItems.forEach(xml -> analyzeNode(xml, env));
}
@Override
public void visit(BLangExpressionStmt exprStmtNode) {
analyzeNode(exprStmtNode.expr, env);
}
@Override
public void visit(BLangAnnotation annotationNode) {
}
@Override
public void visit(BLangAnnotationAttachment annAttachmentNode) {
}
@Override
public void visit(BLangAbort abortNode) {
}
@Override
public void visit(BLangRetry retryNode) {
}
@Override
public void visit(BLangContinue continueNode) {
terminateFlow();
}
@Override
public void visit(BLangCatch catchNode) {
}
@Override
public void visit(BLangTypedescExpr accessExpr) {
}
@Override
public void visit(BLangXMLQName xmlQName) {
}
@Override
public void visit(BLangArrowFunction bLangArrowFunction) {
bLangArrowFunction.closureVarSymbols.forEach(closureVarSymbol -> {
if (this.uninitializedVars.keySet().contains(closureVarSymbol.bSymbol)) {
this.dlog.error(closureVarSymbol.diagnosticPos, DiagnosticCode.USAGE_OF_UNINITIALIZED_VARIABLE,
closureVarSymbol.bSymbol);
}
});
}
@Override
public void visit(BLangValueType valueType) {
}
@Override
public void visit(BLangConstant constant) {
boolean validVariable = constant.symbol != null;
if (validVariable) {
this.currDependentSymbol.push(constant.symbol);
}
try {
analyzeNode(constant.expr, env);
} finally {
if (validVariable) {
this.currDependentSymbol.pop();
}
}
}
@Override
public void visit(BLangArrayType arrayType) {
analyzeNode(arrayType.getElementType(), env);
}
@Override
public void visit(BLangBuiltInRefTypeNode builtInRefType) {
}
@Override
public void visit(BLangConstrainedType constrainedType) {
analyzeNode(constrainedType.constraint, env);
}
@Override
public void visit(BLangStreamType streamType) {
analyzeNode(streamType.constraint, env);
analyzeNode(streamType.error, env);
}
@Override
public void visit(BLangTableTypeNode tableType) {
analyzeNode(tableType.constraint, env);
if (tableType.tableKeyTypeConstraint != null) {
analyzeNode(tableType.tableKeyTypeConstraint.keyType, env);
}
}
@Override
public void visit(BLangUserDefinedType userDefinedType) {
}
@Override
public void visit(BLangFunctionTypeNode functionTypeNode) {
functionTypeNode.params.forEach(param -> analyzeNode(param.typeNode, env));
analyzeNode(functionTypeNode.returnTypeNode, env);
}
@Override
public void visit(BLangUnionTypeNode unionTypeNode) {
unionTypeNode.memberTypeNodes.forEach(typeNode -> analyzeNode(typeNode, env));
}
@Override
public void visit(BLangObjectTypeNode objectTypeNode) {
SymbolEnv objectEnv = SymbolEnv.createTypeEnv(objectTypeNode, objectTypeNode.symbol.scope, env);
this.currDependentSymbol.push(objectTypeNode.symbol);
objectTypeNode.fields.forEach(field -> analyzeNode(field, objectEnv));
objectTypeNode.referencedFields.forEach(field -> analyzeNode(field, objectEnv));
if (objectTypeNode.initFunction != null) {
if (objectTypeNode.initFunction.body == null) {
Optional<BLangFunction> outerFuncDef =
objectEnv.enclPkg.functions.stream()
.filter(f -> f.symbol.name.equals((objectTypeNode.initFunction).symbol.name))
.findFirst();
outerFuncDef.ifPresent(bLangFunction -> objectTypeNode.initFunction = bLangFunction);
}
if (objectTypeNode.initFunction.body != null) {
if (objectTypeNode.initFunction.body.getKind() == NodeKind.BLOCK_FUNCTION_BODY) {
for (BLangStatement statement :
((BLangBlockFunctionBody) objectTypeNode.initFunction.body).stmts) {
analyzeNode(statement, objectEnv);
}
} else if (objectTypeNode.initFunction.body.getKind() == NodeKind.EXPR_FUNCTION_BODY) {
analyzeNode(((BLangExprFunctionBody) objectTypeNode.initFunction.body).expr, objectEnv);
}
}
}
if (!Symbols.isFlagOn(objectTypeNode.symbol.flags, Flags.ABSTRACT)) {
Stream.concat(objectTypeNode.fields.stream(), objectTypeNode.referencedFields.stream())
.filter(field -> !Symbols.isPrivate(field.symbol))
.forEach(field -> {
if (this.uninitializedVars.containsKey(field.symbol)) {
this.dlog.error(field.pos, DiagnosticCode.OBJECT_UNINITIALIZED_FIELD, field.name);
}
});
}
objectTypeNode.functions.forEach(function -> analyzeNode(function, env));
objectTypeNode.getTypeReferences().forEach(type -> analyzeNode((BLangType) type, env));
this.currDependentSymbol.pop();
}
@Override
public void visit(BLangRecordTypeNode recordTypeNode) {
recordTypeNode.getTypeReferences().forEach(type -> analyzeNode((BLangType) type, env));
recordTypeNode.fields.forEach(field -> analyzeNode(field, env));
}
@Override
public void visit(BLangFiniteTypeNode finiteTypeNode) {
finiteTypeNode.valueSpace.forEach(value -> analyzeNode(value, env));
}
@Override
public void visit(BLangTupleTypeNode tupleTypeNode) {
tupleTypeNode.memberTypeNodes.forEach(type -> analyzeNode(type, env));
}
@Override
public void visit(BLangMarkdownDocumentationLine bLangMarkdownDocumentationLine) {
}
@Override
public void visit(BLangMarkdownParameterDocumentation bLangDocumentationParameter) {
}
@Override
public void visit(BLangMarkdownReturnParameterDocumentation bLangMarkdownReturnParameterDocumentation) {
}
@Override
public void visit(BLangMarkdownDocumentation bLangMarkdownDocumentation) {
}
@Override
public void visit(BLangTestablePackage testablePkgNode) {
}
@Override
public void visit(BLangImportPackage importPkgNode) {
}
@Override
public void visit(BLangIdentifier identifierNode) {
}
@Override
public void visit(BLangPanic panicNode) {
analyzeNode(panicNode.expr, env);
terminateFlow();
}
@Override
public void visit(BLangTrapExpr trapExpr) {
analyzeNode(trapExpr.expr, env);
}
public void visit(BLangServiceConstructorExpr serviceConstructorExpr) {
if (this.currDependentSymbol.peek() != null) {
addDependency(this.currDependentSymbol.peek(), serviceConstructorExpr.type.tsymbol);
}
addDependency(serviceConstructorExpr.type.tsymbol, serviceConstructorExpr.serviceNode.symbol);
analyzeNode(serviceConstructorExpr.serviceNode, env);
}
@Override
public void visit(BLangTypeTestExpr typeTestExpr) {
analyzeNode(typeTestExpr.expr, env);
analyzeNode(typeTestExpr.typeNode, env);
}
@Override
public void visit(BLangAnnotAccessExpr annotAccessExpr) {
}
@Override
public void visit(BLangErrorType errorType) {
}
@Override
public void visit(BLangRecordDestructure recordDestructure) {
analyzeNode(recordDestructure.expr, env);
checkAssignment(recordDestructure.varRef);
}
@Override
public void visit(BLangErrorDestructure errorDestructure) {
analyzeNode(errorDestructure.expr, env);
checkAssignment(errorDestructure.varRef);
}
@Override
public void visit(BLangTupleVarRef tupleVarRefExpr) {
tupleVarRefExpr.expressions.forEach(expr -> analyzeNode(expr, env));
}
@Override
public void visit(BLangRecordVarRef varRefExpr) {
varRefExpr.recordRefFields.forEach(expr -> analyzeNode(expr.variableReference, env));
}
@Override
public void visit(BLangErrorVarRef varRefExpr) {
analyzeNode(varRefExpr.reason, env);
for (BLangNamedArgsExpression args : varRefExpr.detail) {
analyzeNode(args.expr, env);
}
analyzeNode(varRefExpr.restVar, env);
}
@Override
public void visit(BLangTupleVariable bLangTupleVariable) {
analyzeNode(bLangTupleVariable.typeNode, env);
}
@Override
public void visit(BLangTupleVariableDef bLangTupleVariableDef) {
BLangVariable var = bLangTupleVariableDef.var;
if (var.expr == null) {
addUninitializedVar(var);
return;
}
}
@Override
public void visit(BLangRecordVariable bLangRecordVariable) {
analyzeNode(bLangRecordVariable.typeNode, env);
}
@Override
public void visit(BLangRecordVariableDef bLangRecordVariableDef) {
BLangVariable var = bLangRecordVariableDef.var;
if (var.expr == null) {
addUninitializedVar(var);
}
}
@Override
public void visit(BLangErrorVariable bLangErrorVariable) {
analyzeNode(bLangErrorVariable.typeNode, env);
}
@Override
public void visit(BLangErrorVariableDef bLangErrorVariableDef) {
BLangVariable var = bLangErrorVariableDef.errorVariable;
if (var.expr == null) {
addUninitializedVar(var);
}
}
@Override
public void visit(BLangMatchStaticBindingPatternClause bLangMatchStaticBindingPatternClause) {
analyzeNode(bLangMatchStaticBindingPatternClause.body, env);
}
@Override
public void visit(BLangMatchStructuredBindingPatternClause bLangMatchStructuredBindingPatternClause) {
analyzeNode(bLangMatchStructuredBindingPatternClause.body, env);
}
private void addUninitializedVar(BLangVariable variable) {
if (!this.uninitializedVars.containsKey(variable.symbol)) {
this.uninitializedVars.put(variable.symbol, InitStatus.UN_INIT);
}
}
/**
* Analyze a branch and returns the set of uninitialized variables for that branch.
* This method will not update the current uninitialized variables set.
*
* @param node Branch node to be analyzed
* @param env Symbol environment
* @return Result of the branch.
*/
private BranchResult analyzeBranch(BLangNode node, SymbolEnv env) {
Map<BSymbol, InitStatus> prevUninitializedVars = this.uninitializedVars;
boolean prevFlowTerminated = this.flowTerminated;
this.uninitializedVars = copyUninitializedVars();
this.flowTerminated = false;
analyzeNode(node, env);
BranchResult brachResult = new BranchResult(this.uninitializedVars, this.flowTerminated);
this.uninitializedVars = prevUninitializedVars;
this.flowTerminated = prevFlowTerminated;
return brachResult;
}
private Map<BSymbol, InitStatus> copyUninitializedVars() {
return new HashMap<>(this.uninitializedVars);
}
private void analyzeNode(BLangNode node, SymbolEnv env) {
SymbolEnv prevEnv = this.env;
this.env = env;
if (node != null) {
node.accept(this);
}
this.env = prevEnv;
}
private Map<BSymbol, InitStatus> mergeUninitializedVars(Map<BSymbol, InitStatus> firstUninitVars,
Map<BSymbol, InitStatus> secondUninitVars) {
List<BSymbol> intersection = new ArrayList<>(firstUninitVars.keySet());
intersection.retainAll(secondUninitVars.keySet());
return Stream.concat(firstUninitVars.entrySet().stream(), secondUninitVars.entrySet().stream())
.collect(Collectors.toMap(entry -> entry.getKey(),
entry -> intersection.contains(entry.getKey()) ? entry.getValue() : InitStatus.PARTIAL_INIT,
(a, b) -> {
if (a == InitStatus.PARTIAL_INIT || b == InitStatus.PARTIAL_INIT) {
return InitStatus.PARTIAL_INIT;
}
return InitStatus.UN_INIT;
}));
}
private void checkVarRef(BSymbol symbol, DiagnosticPos pos) {
recordGlobalVariableReferenceRelationship(symbol);
InitStatus initStatus = this.uninitializedVars.get(symbol);
if (initStatus == null) {
return;
}
if (initStatus == InitStatus.UN_INIT) {
this.dlog.error(pos, DiagnosticCode.USAGE_OF_UNINITIALIZED_VARIABLE, symbol.name);
return;
}
this.dlog.error(pos, DiagnosticCode.PARTIALLY_INITIALIZED_VARIABLE, symbol.name);
}
private void recordGlobalVariableReferenceRelationship(BSymbol symbol) {
BSymbol ownerSymbol = this.env.scope.owner;
boolean isInPkgLevel = ownerSymbol.getKind() == SymbolKind.PACKAGE;
if (isInPkgLevel && isGlobalVarSymbol(symbol)) {
BSymbol dependent = this.currDependentSymbol.peek();
addDependency(dependent, symbol);
} else if (ownerSymbol.kind == SymbolKind.FUNCTION && isGlobalVarSymbol(symbol)) {
BInvokableSymbol invokableOwnerSymbol = (BInvokableSymbol) ownerSymbol;
addDependency(invokableOwnerSymbol, symbol);
} else if (ownerSymbol.kind == SymbolKind.OBJECT && isGlobalVarSymbol(symbol)) {
addDependency(ownerSymbol, symbol);
}
}
private boolean isObjectMemberAccessWithSelf(BLangAccessExpression fieldAccessExpr) {
if (fieldAccessExpr.expr.getKind() != NodeKind.SIMPLE_VARIABLE_REF) {
return false;
}
return Names.SELF.value.equals(((BLangSimpleVarRef) fieldAccessExpr.expr).variableName.value);
}
private void checkAssignment(BLangExpression varRef) {
switch (varRef.getKind()) {
case RECORD_VARIABLE_REF:
BLangRecordVarRef recordVarRef = (BLangRecordVarRef) varRef;
recordVarRef.recordRefFields.forEach(field -> checkAssignment(field.variableReference));
if (recordVarRef.restParam != null) {
checkAssignment((BLangExpression) recordVarRef.restParam);
}
return;
case TUPLE_VARIABLE_REF:
BLangTupleVarRef tupleVarRef = (BLangTupleVarRef) varRef;
tupleVarRef.expressions.forEach(this::checkAssignment);
if (tupleVarRef.restParam != null) {
checkAssignment((BLangExpression) tupleVarRef.restParam);
}
return;
case ERROR_VARIABLE_REF:
BLangErrorVarRef errorVarRef = (BLangErrorVarRef) varRef;
if (errorVarRef.reason != null) {
checkAssignment(errorVarRef.reason);
}
for (BLangNamedArgsExpression expression : errorVarRef.detail) {
checkAssignment(expression);
this.uninitializedVars.remove(((BLangVariableReference) expression.expr).symbol);
}
if (errorVarRef.restVar != null) {
checkAssignment(errorVarRef.restVar);
}
return;
case INDEX_BASED_ACCESS_EXPR:
case FIELD_BASED_ACCESS_EXPR:
if (isObjectMemberAccessWithSelf((BLangAccessExpression) varRef)) {
this.uninitializedVars.remove(((BLangVariableReference) varRef).symbol);
} else {
analyzeNode(((BLangAccessExpression) varRef).expr, env);
}
return;
default:
break;
}
if (varRef.getKind() != NodeKind.SIMPLE_VARIABLE_REF &&
varRef.getKind() != NodeKind.XML_ATTRIBUTE_ACCESS_EXPR) {
return;
}
if (varRef.getKind() == NodeKind.SIMPLE_VARIABLE_REF) {
BSymbol owner = this.env.scope.owner;
addFunctionToGlobalVarDependency(owner, ((BLangSimpleVarRef) varRef).symbol);
}
this.uninitializedVars.remove(((BLangVariableReference) varRef).symbol);
}
private void terminateFlow() {
this.flowTerminated = true;
}
private void checkUnusedImports(List<BLangImportPackage> imports) {
for (BLangImportPackage importStmt : imports) {
if (importStmt.symbol == null || importStmt.symbol.isUsed ||
Names.IGNORE.value.equals(importStmt.alias.value)) {
continue;
}
dlog.error(importStmt.pos, DiagnosticCode.UNUSED_IMPORT_MODULE, importStmt.getQualifiedPackageName());
}
}
private enum InitStatus {
UN_INIT, PARTIAL_INIT
}
private class BranchResult {
Map<BSymbol, InitStatus> uninitializedVars;
boolean flowTerminated;
BranchResult(Map<BSymbol, InitStatus> uninitializedVars, boolean flowTerminated) {
this.uninitializedVars = uninitializedVars;
this.flowTerminated = flowTerminated;
}
}
} |
Yes. Right now there is nothing to do. | public void applyToConfiguration(Configuration configuration) {
hdfsCloudCredential.applyToConfiguration(configuration);
addConfigResourcesToConfiguration(configResources, configuration);
} | hdfsCloudCredential.applyToConfiguration(configuration); | public void applyToConfiguration(Configuration configuration) {
hdfsCloudCredential.applyToConfiguration(configuration);
addConfigResourcesToConfiguration(configResources, configuration);
} | class HDFSCloudConfiguration implements CloudConfiguration {
private static final Logger LOG = LogManager.getLogger(HDFSCloudConfiguration.class);
private final HDFSCloudCredential hdfsCloudCredential;
private String configResources;
private String runtimeJars;
private static final String CONFIG_RESOURCES_SEPERATOR = ",";
public HDFSCloudConfiguration(HDFSCloudCredential hdfsCloudCredential) {
Preconditions.checkNotNull(hdfsCloudCredential);
this.hdfsCloudCredential = hdfsCloudCredential;
}
public void setConfigResources(String configResources) {
this.configResources = configResources;
}
public void setRuntimeJars(String runtimeJars) {
this.runtimeJars = runtimeJars;
}
public HDFSCloudCredential getHdfsCloudCredential() {
return hdfsCloudCredential;
}
public void addConfigResourcesToConfiguration(String configResources, Configuration conf) {
if (Strings.isNullOrEmpty(configResources)) {
return;
}
String[] parts = configResources.split(CONFIG_RESOURCES_SEPERATOR);
for (String p : parts) {
Path path = new Path(StarRocksFE.STARROCKS_HOME_DIR + "/conf/", p);
LOG.debug(String.format("Add path '%s' to configuration", path.toString()));
conf.addResource(path);
}
}
@Override
public void toThrift(TCloudConfiguration tCloudConfiguration) {
tCloudConfiguration.setCloud_type(TCloudType.HDFS);
Map<String, String> properties = new HashMap<>();
hdfsCloudCredential.toThrift(properties);
properties.put(HDFS_CONFIG_RESOURCES, configResources);
properties.put(HDFS_RUNTIME_JARS, runtimeJars);
tCloudConfiguration.setCloud_properties_v2(properties);
}
@Override
@Override
public String getCredentialString() {
return "HDFSCloudConfiguration{" +
"configResources=" + configResources +
", runtimeJars=" + runtimeJars +
", credential=" + hdfsCloudCredential.getCredentialString() + "}";
}
@Override
public CloudType getCloudType() {
return CloudType.HDFS;
}
@Override
public FileStoreInfo toFileStoreInfo() {
return hdfsCloudCredential.toFileStoreInfo();
}
} | class HDFSCloudConfiguration implements CloudConfiguration {
private static final Logger LOG = LogManager.getLogger(HDFSCloudConfiguration.class);
private final HDFSCloudCredential hdfsCloudCredential;
private String configResources;
private String runtimeJars;
private static final String CONFIG_RESOURCES_SEPERATOR = ",";
public HDFSCloudConfiguration(HDFSCloudCredential hdfsCloudCredential) {
Preconditions.checkNotNull(hdfsCloudCredential);
this.hdfsCloudCredential = hdfsCloudCredential;
}
public void setConfigResources(String configResources) {
this.configResources = configResources;
}
public void setRuntimeJars(String runtimeJars) {
this.runtimeJars = runtimeJars;
}
public HDFSCloudCredential getHdfsCloudCredential() {
return hdfsCloudCredential;
}
public void addConfigResourcesToConfiguration(String configResources, Configuration conf) {
if (Strings.isNullOrEmpty(configResources)) {
return;
}
String[] parts = configResources.split(CONFIG_RESOURCES_SEPERATOR);
for (String p : parts) {
Path path = new Path(StarRocksFE.STARROCKS_HOME_DIR + "/conf/", p);
LOG.debug(String.format("Add path '%s' to configuration", path.toString()));
conf.addResource(path);
}
}
@Override
public void toThrift(TCloudConfiguration tCloudConfiguration) {
tCloudConfiguration.setCloud_type(TCloudType.HDFS);
Map<String, String> properties = new HashMap<>();
hdfsCloudCredential.toThrift(properties);
properties.put(HDFS_CONFIG_RESOURCES, configResources);
properties.put(HDFS_RUNTIME_JARS, runtimeJars);
tCloudConfiguration.setCloud_properties_v2(properties);
}
@Override
@Override
public String getCredentialString() {
return "HDFSCloudConfiguration{" +
"configResources=" + configResources +
", runtimeJars=" + runtimeJars +
", credential=" + hdfsCloudCredential.getCredentialString() + "}";
}
@Override
public CloudType getCloudType() {
return CloudType.HDFS;
}
@Override
public FileStoreInfo toFileStoreInfo() {
return hdfsCloudCredential.toFileStoreInfo();
}
} |
I am wondering should we use block() here? | public void createContainerWithComputedProperties() {
CosmosContainerProperties containerProperties = getCollectionDefinition(containerName);
List<ComputedProperty> computedProperties = new ArrayList<>(
Arrays.asList(
new ComputedProperty("lowerName", "SELECT VALUE LOWER(c.name) FROM c")
)
);
containerProperties.setComputedProperties(computedProperties);
database.createContainer(containerProperties).subscribe();
} | database.createContainer(containerProperties).subscribe(); | public void createContainerWithComputedProperties() {
CosmosContainerProperties containerProperties = getCollectionDefinition(containerName);
List<ComputedProperty> computedProperties = new ArrayList<>(
Arrays.asList(
new ComputedProperty("lowerName", "SELECT VALUE LOWER(c.name) FROM c")
)
);
containerProperties.setComputedProperties(computedProperties);
database.createContainer(containerProperties).subscribe();
} | class ComputedPropertiesCodeSnippet {
private CosmosAsyncClient client;
private CosmosAsyncDatabase database;
private CosmosAsyncContainer container;
private String containerName = "TestContainer";
public ComputedPropertiesCodeSnippet() {
this.client = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.consistencyLevel(ConsistencyLevel.SESSION)
.buildAsyncClient();
this.database = this.client.getDatabase("TestDB");
}
public void replaceContainerWithComputedProperties() {
CosmosContainerProperties containerProperties = getCollectionDefinition(containerName);
List<ComputedProperty> computedProperties = new ArrayList<>(
Arrays.asList(
new ComputedProperty("upperName", "SELECT VALUE UPPER(c.name) FROM c")
)
);
containerProperties.setComputedProperties(computedProperties);
container = database.getContainer(containerName);
container.replace(containerProperties).subscribe();
}
public void replaceContainerWithExistingComputedProperties() {
container = database.getContainer(containerName);
CosmosContainerProperties modifiedProperties = container.read().block().getProperties();
Collection<ComputedProperty> modifiedComputedProperties = modifiedProperties.getComputedProperties();
modifiedComputedProperties.add(new ComputedProperty("upperName", "SELECT VALUE UPPER(c.firstName) FROM c"));
modifiedProperties.setComputedProperties(modifiedComputedProperties);
container.replace(modifiedProperties).subscribe();
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
} | class ComputedPropertiesCodeSnippet {
private CosmosAsyncClient client;
private CosmosAsyncDatabase database;
private CosmosAsyncContainer container;
private String containerName = "TestContainer";
public ComputedPropertiesCodeSnippet() {
this.client = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.consistencyLevel(ConsistencyLevel.SESSION)
.buildAsyncClient();
this.database = this.client.getDatabase("TestDB");
}
public void replaceContainerWithComputedProperties() {
CosmosContainerProperties containerProperties = getCollectionDefinition(containerName);
List<ComputedProperty> computedProperties = new ArrayList<>(
Arrays.asList(
new ComputedProperty("upperName", "SELECT VALUE UPPER(c.name) FROM c")
)
);
containerProperties.setComputedProperties(computedProperties);
container = database.getContainer(containerName);
container.replace(containerProperties).subscribe();
}
public void replaceContainerWithExistingComputedProperties() {
container = database.getContainer(containerName);
CosmosContainerProperties modifiedProperties = container.read().block().getProperties();
Collection<ComputedProperty> modifiedComputedProperties = modifiedProperties.getComputedProperties();
modifiedComputedProperties.add(new ComputedProperty("upperName", "SELECT VALUE UPPER(c.firstName) FROM c"));
modifiedProperties.setComputedProperties(modifiedComputedProperties);
container.replace(modifiedProperties).subscribe();
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
} |
Agree, the API is not ideal. | private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : db.readNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue()) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
}
return resultingNodes;
} | Optional<Node> currentNode = db.readNode(node.hostname()); | private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : db.readNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue()) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
}
return resultingNodes;
} | class NodeRepository extends AbstractComponent {
private static final Logger log = Logger.getLogger(NodeRepository.class.getName());
private final CuratorDatabaseClient db;
private final Clock clock;
private final Zone zone;
private final NodeFlavors flavors;
private final HostResourcesCalculator resourcesCalculator;
private final NameResolver nameResolver;
private final OsVersions osVersions;
private final InfrastructureVersions infrastructureVersions;
private final FirmwareChecks firmwareChecks;
private final DockerImages dockerImages;
private final JobControl jobControl;
private final Applications applications;
private final boolean canProvisionHosts;
private final int spareCount;
/**
* Creates a node repository from a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeRepositoryConfig config,
NodeFlavors flavors,
ProvisionServiceProvider provisionServiceProvider,
Curator curator,
Zone zone,
FlagSource flagSource) {
this(flavors,
provisionServiceProvider.getHostResourcesCalculator(),
curator,
Clock.systemUTC(),
zone,
new DnsNameResolver(),
DockerImage.fromString(config.dockerImage()),
flagSource,
config.useCuratorClientCache(),
provisionServiceProvider.getHostProvisioner().isPresent(),
zone.environment().isProduction() && provisionServiceProvider.getHostProvisioner().isEmpty() ? 1 : 0,
config.nodeCacheSize());
}
/**
* Creates a node repository from a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
public NodeRepository(NodeFlavors flavors,
HostResourcesCalculator resourcesCalculator,
Curator curator,
Clock clock,
Zone zone,
NameResolver nameResolver,
DockerImage dockerImage,
FlagSource flagSource,
boolean useCuratorClientCache,
boolean canProvisionHosts,
int spareCount,
long nodeCacheSize) {
this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize);
this.zone = zone;
this.clock = clock;
this.flavors = flavors;
this.resourcesCalculator = resourcesCalculator;
this.nameResolver = nameResolver;
this.osVersions = new OsVersions(this);
this.infrastructureVersions = new InfrastructureVersions(db);
this.firmwareChecks = new FirmwareChecks(db, clock);
this.dockerImages = new DockerImages(db, dockerImage);
this.jobControl = new JobControl(new JobControlFlags(db, flagSource));
this.applications = new Applications(db);
this.canProvisionHosts = canProvisionHosts;
this.spareCount = spareCount;
rewriteNodes();
}
/** Read and write all nodes to make sure they are stored in the latest version of the serialized format */
private void rewriteNodes() {
Instant start = clock.instant();
int nodesWritten = 0;
for (State state : State.values()) {
List<Node> nodes = db.readNodes(state);
db.writeTo(state, nodes, Agent.system, Optional.empty());
nodesWritten += nodes.size();
}
Instant end = clock.instant();
log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end)));
}
/** Returns the curator database client used by this */
public CuratorDatabaseClient database() { return db; }
/** Returns the Docker image to use for given node */
public DockerImage dockerImage(Node node) { return dockerImages.dockerImageFor(node.type()); }
/** @return The name resolver used to resolve hostname and ip addresses */
public NameResolver nameResolver() { return nameResolver; }
/** Returns the OS versions to use for nodes in this */
public OsVersions osVersions() { return osVersions; }
/** Returns the infrastructure versions to use for nodes in this */
public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; }
/** Returns the status of firmware checks for hosts managed by this. */
public FirmwareChecks firmwareChecks() { return firmwareChecks; }
/** Returns the docker images to use for nodes in this. */
public DockerImages dockerImages() { return dockerImages; }
/** Returns the status of maintenance jobs managed by this. */
public JobControl jobControl() { return jobControl; }
/** Returns this node repo's view of the applications deployed to it */
public Applications applications() { return applications; }
public NodeFlavors flavors() {
return flavors;
}
public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; }
/** The number of nodes we should ensure has free capacity for node failures whenever possible */
public int spareCount() { return spareCount; }
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, State ... inState) {
return db.readNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(State ... inState) {
return new ArrayList<>(db.readNodes(inState));
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, State ... inState) {
return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/** Returns a filterable list of nodes in this repository in any of the given states */
public NodeList list(State ... inState) {
return NodeList.copyOf(getNodes(inState));
}
/** Returns a filterable list of all nodes of an application */
public NodeList list(ApplicationId application) {
return NodeList.copyOf(getNodes(application));
}
/** Returns a locked list of all nodes in this repository */
public LockedNodeList list(Mutex lock) {
return new LockedNodeList(getNodes(), lock);
}
/** Returns a filterable list of all load balancers in this repository */
public LoadBalancerList loadBalancers() {
return loadBalancers((ignored) -> true);
}
/** Returns a filterable list of load balancers belonging to given application */
public LoadBalancerList loadBalancers(ApplicationId application) {
return loadBalancers((id) -> id.application().equals(application));
}
private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) {
return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values());
}
public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); }
public List<Node> getInactive() { return db.readNodes(State.inactive); }
public List<Node> getFailed() { return db.readNodes(State.failed); }
/**
* Returns the ACL for the node (trusted nodes, networks and ports)
*/
private NodeAcl getNodeAcl(Node node, NodeList candidates) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
Set<Integer> trustedPorts = new LinkedHashSet<>();
Set<String> trustedNetworks = new LinkedHashSet<>();
trustedPorts.add(22);
candidates.parentOf(node).ifPresent(trustedNodes::add);
node.allocation().ifPresent(allocation -> {
trustedNodes.addAll(candidates.owner(allocation.owner()).asList());
loadBalancers(allocation.owner()).asList().stream()
.map(LoadBalancer::instance)
.map(LoadBalancerInstance::networks)
.forEach(trustedNetworks::addAll);
});
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
node.allocation().ifPresent(allocation ->
trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner())).asList()));
if (node.state() == State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
trustedPorts.add(4443);
break;
case proxy:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedPorts.add(443);
trustedPorts.add(4080);
trustedPorts.add(4443);
break;
case controller:
trustedPorts.add(4443);
trustedPorts.add(443);
trustedPorts.add(80);
break;
default:
illegal("Don't know how to create ACL for " + node + " of type " + node.type());
}
return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
NodeList candidates = list();
if (children) {
return candidates.childrenOf(node).asList().stream()
.map(childNode -> getNodeAcl(childNode, candidates))
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
return Collections.singletonList(getNodeAcl(node, candidates));
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, IP.Config ipConfig, Optional<String> parentHostname,
Flavor flavor, Optional<TenantName> reservedTo, NodeType type) {
if (ipConfig.primary().isEmpty())
ipConfig = ipConfig.with(nameResolver.getAllByNameOrThrow(hostname));
return Node.create(openStackId, ipConfig, hostname, parentHostname, Optional.empty(), flavor, reservedTo, type, Optional.empty());
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname, Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, IP.Config.EMPTY, parentHostname, flavor, Optional.empty(), type);
}
/** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */
public List<Node> addDockerNodes(LockedNodeList nodes) {
for (Node node : nodes) {
if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER))
illegal("Cannot add " + node + ": This is not a docker node");
if ( ! node.allocation().isPresent())
illegal("Cannot add " + node + ": Docker containers needs to be allocated");
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
illegal("Cannot add " + node + ": A node with this name already exists (" +
existing.get() + ", " + existing.get().history() + "). Node to be added: " +
node + ", " + node.history());
}
return db.addNodesInState(nodes.asList(), State.reserved, Agent.system);
}
/**
* Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes.
* If any of the nodes already exists in the deprovisioned state, the new node will be merged
* with the history of that node.
*/
public List<Node> addNodes(List<Node> nodes, Agent agent) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesToAdd = new ArrayList<>();
List<Node> nodesToRemove = new ArrayList<>();
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
for (int j = 0; j < i; j++) {
if (node.equals(nodes.get(j)))
illegal("Cannot add nodes: " + node + " is duplicated in the argument list");
}
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent()) {
if (existing.get().state() != State.deprovisioned)
illegal("Cannot add " + node + ": A node with this name already exists");
node = node.with(existing.get().history());
node = node.with(existing.get().reports());
node = node.with(node.status().withFailCount(existing.get().status().failCount()));
if (existing.get().status().firmwareVerifiedAt().isPresent())
node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get()));
nodesToRemove.add(existing.get());
}
nodesToAdd.add(node);
}
List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent);
db.removeNodes(nodesToRemove);
return resultingNodes;
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes, Agent agent, String reason) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesWithResetFields = nodes.stream()
.map(node -> {
if (node.state() != State.provisioned && node.state() != State.dirty)
illegal("Can not set " + node + " ready. It is not provisioned or dirty.");
return node.withWantToRetire(false, false, Agent.system, clock.instant());
})
.collect(Collectors.toList());
return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason));
}
}
public Node setReady(String hostname, Agent agent, String reason) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady), agent, reason).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable(true)))
.collect(Collectors.toList());
write(removableNodes, lock);
}
}
/** Deactivate nodes owned by application guarded by given lock */
public void deactivate(NestedTransaction transaction, ProvisionLock lock) {
deactivate(db.readNodes(lock.application(), State.reserved, State.active), transaction, lock);
applications.remove(lock.application(), transaction, lock);
}
/**
* Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction, @SuppressWarnings("unused") ProvisionLock lock) {
return db.writeTo(State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeListFilter.from(nodes), (node, lock) -> setDirty(node, agent, reason));
}
/**
* Set a node dirty, allowed if it is in the provisioned, inactive, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(Node node, Agent agent, String reason) {
return db.writeTo(State.dirty, node, agent, Optional.of(reason));
}
public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = getNode(hostname).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty))
.filter(node -> node.state() != State.dirty)
.collect(Collectors.toList());
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != State.provisioned)
.filter(node -> node.state() != State.failed)
.filter(node -> node.state() != State.parked)
.map(Node::hostname)
.collect(Collectors.toList());
if ( ! hostnamesNotAllowedToDirty.isEmpty())
illegal("Could not deallocate " + nodeToDirty + ": " +
hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked]");
return nodesToDirty.stream().map(node -> setDirty(node, agent, reason)).collect(Collectors.toList());
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, true, State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) {
return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason));
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, true, State.active, agent, Optional.of(reason));
}
private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) {
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, true, toState, agent, reason));
return moved;
}
private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
if (!keepAllocation && node.allocation().isPresent()) {
node = node.withoutAllocation();
}
return move(node, toState, agent, reason);
}
private Node move(Node node, State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && node.allocation().isEmpty())
illegal("Could not set " + node + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
illegal("Could not set " + node + " active: Same cluster and index as " + currentActive);
}
}
return db.writeTo(toState, node, agent, reason);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For tenant docker
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) {
if (node.state() != State.dirty)
illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]");
return removeRecursively(node, true).get(0);
}
if (node.state() == State.ready) return node;
Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node);
List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost);
if ( ! failureReasons.isEmpty())
illegal(node + " cannot be readied because it has hard failures: " + failureReasons);
return setReady(Collections.singletonList(node), agent, reason).get(0);
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return a List of all the nodes that have been removed or (for hosts) deprovisioned
*/
public List<Node> removeRecursively(String hostname) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
return removeRecursively(node, false);
}
public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
requireRemovable(node, false, force);
if (node.type().isHost()) {
List<Node> children = list().childrenOf(node).asList();
children.forEach(child -> requireRemovable(child, true, force));
db.removeNodes(children);
List<Node> removed = new ArrayList<>(children);
if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host)
db.removeNodes(List.of(node));
else {
node = node.with(IP.Config.EMPTY);
move(node, State.deprovisioned, Agent.system, Optional.empty());
}
removed.add(node);
return removed;
}
else {
List<Node> removed = List.of(node);
db.removeNodes(removed);
return removed;
}
}
}
/** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */
public void forget(Node node) {
if (node.state() != State.deprovisioned)
throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten");
db.removeNodes(List.of(node));
}
/**
* Throws if the given node cannot be removed. Removal is allowed if:
* - Tenant node: node is unallocated
* - Host node: iff in state provisioned|failed|parked
* - Child node:
* If only removing the container node: node in state ready
* If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready
*/
private void requireRemovable(Node node, boolean removingAsChild, boolean force) {
if (force) return;
if (node.type() == NodeType.tenant && node.allocation().isPresent())
illegal(node + " is currently allocated and cannot be removed");
if (!node.type().isHost() && !removingAsChild) {
if (node.state() != State.ready)
illegal(node + " can not be removed as it is not in the state " + State.ready);
}
else if (!node.type().isHost()) {
Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready);
if ( ! legalStates.contains(node.state()))
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
else {
Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked);
if (! legalStates.contains(node.state()))
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
}
/**
* Increases the restart generation of the active nodes matching the filter.
*
* @return the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(State.active, filter),
(node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()),
lock));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* @return the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock));
}
/**
* Set target OS version of all nodes matching given filter.
*
* @return the nodes in their new state.
*/
public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) {
return performOn(filter, (node, lock) -> {
var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version));
return write(node.with(newStatus), lock);
});
}
/** Retire nodes matching given filter */
public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) {
return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock));
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock Already acquired lock
* @return the written node for convenience
*/
public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) {
return db.writeTo(nodes, Agent.system, Optional.empty());
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
public boolean canAllocateTenantNodeTo(Node host) {
if ( ! host.type().canRun(NodeType.tenant)) return false;
if (host.status().wantToRetire()) return false;
if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false;
if ( canProvisionHosts())
return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state());
else
return host.state() == State.active;
}
/** Returns whether this repository can provision hosts on demand */
public boolean canProvisionHosts() { return canProvisionHosts; }
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Returns the zone of this system */
public Zone zone() { return zone; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) {
return db.lock(application);
}
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) {
return db.lock(application, timeout);
}
/** Create a lock which provides exclusive rights to modifying unallocated nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Acquires the appropriate lock for this node */
public Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
private void illegal(String message) {
throw new IllegalArgumentException(message);
}
} | class NodeRepository extends AbstractComponent {
private static final Logger log = Logger.getLogger(NodeRepository.class.getName());
private final CuratorDatabaseClient db;
private final Clock clock;
private final Zone zone;
private final NodeFlavors flavors;
private final HostResourcesCalculator resourcesCalculator;
private final NameResolver nameResolver;
private final OsVersions osVersions;
private final InfrastructureVersions infrastructureVersions;
private final FirmwareChecks firmwareChecks;
private final DockerImages dockerImages;
private final JobControl jobControl;
private final Applications applications;
private final boolean canProvisionHosts;
private final int spareCount;
/**
* Creates a node repository from a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeRepositoryConfig config,
NodeFlavors flavors,
ProvisionServiceProvider provisionServiceProvider,
Curator curator,
Zone zone,
FlagSource flagSource) {
this(flavors,
provisionServiceProvider.getHostResourcesCalculator(),
curator,
Clock.systemUTC(),
zone,
new DnsNameResolver(),
DockerImage.fromString(config.dockerImage()),
flagSource,
config.useCuratorClientCache(),
provisionServiceProvider.getHostProvisioner().isPresent(),
zone.environment().isProduction() && provisionServiceProvider.getHostProvisioner().isEmpty() ? 1 : 0,
config.nodeCacheSize());
}
/**
* Creates a node repository from a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
public NodeRepository(NodeFlavors flavors,
HostResourcesCalculator resourcesCalculator,
Curator curator,
Clock clock,
Zone zone,
NameResolver nameResolver,
DockerImage dockerImage,
FlagSource flagSource,
boolean useCuratorClientCache,
boolean canProvisionHosts,
int spareCount,
long nodeCacheSize) {
this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize);
this.zone = zone;
this.clock = clock;
this.flavors = flavors;
this.resourcesCalculator = resourcesCalculator;
this.nameResolver = nameResolver;
this.osVersions = new OsVersions(this);
this.infrastructureVersions = new InfrastructureVersions(db);
this.firmwareChecks = new FirmwareChecks(db, clock);
this.dockerImages = new DockerImages(db, dockerImage);
this.jobControl = new JobControl(new JobControlFlags(db, flagSource));
this.applications = new Applications(db);
this.canProvisionHosts = canProvisionHosts;
this.spareCount = spareCount;
rewriteNodes();
}
/** Read and write all nodes to make sure they are stored in the latest version of the serialized format */
private void rewriteNodes() {
Instant start = clock.instant();
int nodesWritten = 0;
for (State state : State.values()) {
List<Node> nodes = db.readNodes(state);
db.writeTo(state, nodes, Agent.system, Optional.empty());
nodesWritten += nodes.size();
}
Instant end = clock.instant();
log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end)));
}
/** Returns the curator database client used by this */
public CuratorDatabaseClient database() { return db; }
/** Returns the Docker image to use for given node */
public DockerImage dockerImage(Node node) { return dockerImages.dockerImageFor(node.type()); }
/** @return The name resolver used to resolve hostname and ip addresses */
public NameResolver nameResolver() { return nameResolver; }
/** Returns the OS versions to use for nodes in this */
public OsVersions osVersions() { return osVersions; }
/** Returns the infrastructure versions to use for nodes in this */
public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; }
/** Returns the status of firmware checks for hosts managed by this. */
public FirmwareChecks firmwareChecks() { return firmwareChecks; }
/** Returns the docker images to use for nodes in this. */
public DockerImages dockerImages() { return dockerImages; }
/** Returns the status of maintenance jobs managed by this. */
public JobControl jobControl() { return jobControl; }
/** Returns this node repo's view of the applications deployed to it */
public Applications applications() { return applications; }
public NodeFlavors flavors() {
return flavors;
}
public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; }
/** The number of nodes we should ensure has free capacity for node failures whenever possible */
public int spareCount() { return spareCount; }
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, State ... inState) {
return db.readNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(State ... inState) {
return new ArrayList<>(db.readNodes(inState));
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, State ... inState) {
return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/** Returns a filterable list of nodes in this repository in any of the given states */
public NodeList list(State ... inState) {
return NodeList.copyOf(getNodes(inState));
}
/** Returns a filterable list of all nodes of an application */
public NodeList list(ApplicationId application) {
return NodeList.copyOf(getNodes(application));
}
/** Returns a locked list of all nodes in this repository */
public LockedNodeList list(Mutex lock) {
return new LockedNodeList(getNodes(), lock);
}
/** Returns a filterable list of all load balancers in this repository */
public LoadBalancerList loadBalancers() {
return loadBalancers((ignored) -> true);
}
/** Returns a filterable list of load balancers belonging to given application */
public LoadBalancerList loadBalancers(ApplicationId application) {
return loadBalancers((id) -> id.application().equals(application));
}
private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) {
return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values());
}
public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); }
public List<Node> getInactive() { return db.readNodes(State.inactive); }
public List<Node> getFailed() { return db.readNodes(State.failed); }
/**
* Returns the ACL for the node (trusted nodes, networks and ports)
*/
private NodeAcl getNodeAcl(Node node, NodeList candidates) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
Set<Integer> trustedPorts = new LinkedHashSet<>();
Set<String> trustedNetworks = new LinkedHashSet<>();
trustedPorts.add(22);
candidates.parentOf(node).ifPresent(trustedNodes::add);
node.allocation().ifPresent(allocation -> {
trustedNodes.addAll(candidates.owner(allocation.owner()).asList());
loadBalancers(allocation.owner()).asList().stream()
.map(LoadBalancer::instance)
.map(LoadBalancerInstance::networks)
.forEach(trustedNetworks::addAll);
});
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
node.allocation().ifPresent(allocation ->
trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner())).asList()));
if (node.state() == State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
trustedPorts.add(4443);
break;
case proxy:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedPorts.add(443);
trustedPorts.add(4080);
trustedPorts.add(4443);
break;
case controller:
trustedPorts.add(4443);
trustedPorts.add(443);
trustedPorts.add(80);
break;
default:
illegal("Don't know how to create ACL for " + node + " of type " + node.type());
}
return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
NodeList candidates = list();
if (children) {
return candidates.childrenOf(node).asList().stream()
.map(childNode -> getNodeAcl(childNode, candidates))
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
return Collections.singletonList(getNodeAcl(node, candidates));
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, IP.Config ipConfig, Optional<String> parentHostname,
Flavor flavor, Optional<TenantName> reservedTo, NodeType type) {
if (ipConfig.primary().isEmpty())
ipConfig = ipConfig.with(nameResolver.getAllByNameOrThrow(hostname));
return Node.create(openStackId, ipConfig, hostname, parentHostname, Optional.empty(), flavor, reservedTo, type, Optional.empty());
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname, Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, IP.Config.EMPTY, parentHostname, flavor, Optional.empty(), type);
}
/** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */
public List<Node> addDockerNodes(LockedNodeList nodes) {
for (Node node : nodes) {
if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER))
illegal("Cannot add " + node + ": This is not a docker node");
if ( ! node.allocation().isPresent())
illegal("Cannot add " + node + ": Docker containers needs to be allocated");
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
illegal("Cannot add " + node + ": A node with this name already exists (" +
existing.get() + ", " + existing.get().history() + "). Node to be added: " +
node + ", " + node.history());
}
return db.addNodesInState(nodes.asList(), State.reserved, Agent.system);
}
/**
* Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes.
* If any of the nodes already exists in the deprovisioned state, the new node will be merged
* with the history of that node.
*/
public List<Node> addNodes(List<Node> nodes, Agent agent) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesToAdd = new ArrayList<>();
List<Node> nodesToRemove = new ArrayList<>();
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
for (int j = 0; j < i; j++) {
if (node.equals(nodes.get(j)))
illegal("Cannot add nodes: " + node + " is duplicated in the argument list");
}
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent()) {
if (existing.get().state() != State.deprovisioned)
illegal("Cannot add " + node + ": A node with this name already exists");
node = node.with(existing.get().history());
node = node.with(existing.get().reports());
node = node.with(node.status().withFailCount(existing.get().status().failCount()));
if (existing.get().status().firmwareVerifiedAt().isPresent())
node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get()));
nodesToRemove.add(existing.get());
}
nodesToAdd.add(node);
}
List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent);
db.removeNodes(nodesToRemove);
return resultingNodes;
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes, Agent agent, String reason) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesWithResetFields = nodes.stream()
.map(node -> {
if (node.state() != State.provisioned && node.state() != State.dirty)
illegal("Can not set " + node + " ready. It is not provisioned or dirty.");
return node.withWantToRetire(false, false, Agent.system, clock.instant());
})
.collect(Collectors.toList());
return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason));
}
}
public Node setReady(String hostname, Agent agent, String reason) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady), agent, reason).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable(true)))
.collect(Collectors.toList());
write(removableNodes, lock);
}
}
/** Deactivate nodes owned by application guarded by given lock */
public void deactivate(NestedTransaction transaction, ProvisionLock lock) {
deactivate(db.readNodes(lock.application(), State.reserved, State.active), transaction, lock);
applications.remove(lock.application(), transaction, lock);
}
/**
* Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction, @SuppressWarnings("unused") ProvisionLock lock) {
return db.writeTo(State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeListFilter.from(nodes), (node, lock) -> setDirty(node, agent, reason));
}
/**
* Set a node dirty, allowed if it is in the provisioned, inactive, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(Node node, Agent agent, String reason) {
return db.writeTo(State.dirty, node, agent, Optional.of(reason));
}
public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = getNode(hostname).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty))
.filter(node -> node.state() != State.dirty)
.collect(Collectors.toList());
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != State.provisioned)
.filter(node -> node.state() != State.failed)
.filter(node -> node.state() != State.parked)
.map(Node::hostname)
.collect(Collectors.toList());
if ( ! hostnamesNotAllowedToDirty.isEmpty())
illegal("Could not deallocate " + nodeToDirty + ": " +
hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked]");
return nodesToDirty.stream().map(node -> setDirty(node, agent, reason)).collect(Collectors.toList());
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, true, State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) {
return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason));
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, true, State.active, agent, Optional.of(reason));
}
private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) {
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, true, toState, agent, reason));
return moved;
}
private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
if (!keepAllocation && node.allocation().isPresent()) {
node = node.withoutAllocation();
}
return move(node, toState, agent, reason);
}
private Node move(Node node, State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && node.allocation().isEmpty())
illegal("Could not set " + node + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
illegal("Could not set " + node + " active: Same cluster and index as " + currentActive);
}
}
return db.writeTo(toState, node, agent, reason);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For tenant docker
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) {
if (node.state() != State.dirty)
illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]");
return removeRecursively(node, true).get(0);
}
if (node.state() == State.ready) return node;
Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node);
List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost);
if ( ! failureReasons.isEmpty())
illegal(node + " cannot be readied because it has hard failures: " + failureReasons);
return setReady(Collections.singletonList(node), agent, reason).get(0);
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return a List of all the nodes that have been removed or (for hosts) deprovisioned
*/
public List<Node> removeRecursively(String hostname) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
return removeRecursively(node, false);
}
public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
requireRemovable(node, false, force);
if (node.type().isHost()) {
List<Node> children = list().childrenOf(node).asList();
children.forEach(child -> requireRemovable(child, true, force));
db.removeNodes(children);
List<Node> removed = new ArrayList<>(children);
if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host)
db.removeNodes(List.of(node));
else {
node = node.with(IP.Config.EMPTY);
move(node, State.deprovisioned, Agent.system, Optional.empty());
}
removed.add(node);
return removed;
}
else {
List<Node> removed = List.of(node);
db.removeNodes(removed);
return removed;
}
}
}
/** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */
public void forget(Node node) {
if (node.state() != State.deprovisioned)
throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten");
db.removeNodes(List.of(node));
}
/**
* Throws if the given node cannot be removed. Removal is allowed if:
* - Tenant node: node is unallocated
* - Host node: iff in state provisioned|failed|parked
* - Child node:
* If only removing the container node: node in state ready
* If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready
*/
private void requireRemovable(Node node, boolean removingAsChild, boolean force) {
if (force) return;
if (node.type() == NodeType.tenant && node.allocation().isPresent())
illegal(node + " is currently allocated and cannot be removed");
if (!node.type().isHost() && !removingAsChild) {
if (node.state() != State.ready)
illegal(node + " can not be removed as it is not in the state " + State.ready);
}
else if (!node.type().isHost()) {
Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready);
if ( ! legalStates.contains(node.state()))
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
else {
Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked);
if (! legalStates.contains(node.state()))
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
}
/**
* Increases the restart generation of the active nodes matching the filter.
*
* @return the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(State.active, filter),
(node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()),
lock));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* @return the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock));
}
/**
* Set target OS version of all nodes matching given filter.
*
* @return the nodes in their new state.
*/
public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) {
return performOn(filter, (node, lock) -> {
var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version));
return write(node.with(newStatus), lock);
});
}
/** Retire nodes matching given filter */
public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) {
return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock));
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock Already acquired lock
* @return the written node for convenience
*/
public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) {
return db.writeTo(nodes, Agent.system, Optional.empty());
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
public boolean canAllocateTenantNodeTo(Node host) {
if ( ! host.type().canRun(NodeType.tenant)) return false;
if (host.status().wantToRetire()) return false;
if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false;
if ( canProvisionHosts())
return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state());
else
return host.state() == State.active;
}
/** Returns whether this repository can provision hosts on demand */
public boolean canProvisionHosts() { return canProvisionHosts; }
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Returns the zone of this system */
public Zone zone() { return zone; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) {
return db.lock(application);
}
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) {
return db.lock(application, timeout);
}
/** Create a lock which provides exclusive rights to modifying unallocated nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Acquires the appropriate lock for this node */
public Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
private void illegal(String message) {
throw new IllegalArgumentException(message);
}
} |
The most risky bug in this code is: Exposing sensitive information through accessors You can modify the code like this: ```java // It's not recommended to provide public getters for sensitive information like access keys and secret keys. // If you need to provide read access to this data, rethink your design to ensure it's done securely, // and consider limiting the scope of the access. Perhaps use a more secure method to handle sensitive credentials. public class AliyunCloudCredential { private String accessKey; private String secretKey; private String endpoint; // Existing constructor and other methods... // Remove or limit the getters for sensitive information. // For demonstration purposes here, they're simply removed. @Override public void applyToConfiguration(Configuration configuration) { configuration.set("fs.oss.impl", "com.aliyun.jindodata.oss.JindoOssFileSystem"); } // Additional methods and class implementation... } ``` | public void applyToConfiguration(Configuration configuration) {
configuration.set("fs.oss.impl", "com.aliyun.jindodata.oss.JindoOssFileSystem");
configuration.set("fs.AbstractFileSystem.oss.impl", "com.aliyun.jindodata.oss.OSS");
configuration.set("fs.oss.accessKeyId", accessKey);
configuration.set("fs.oss.accessKeySecret", secretKey);
configuration.set("fs.oss.endpoint", endpoint);
} | configuration.set("fs.oss.impl", "com.aliyun.jindodata.oss.JindoOssFileSystem"); | public void applyToConfiguration(Configuration configuration) {
configuration.set("fs.oss.impl", "com.aliyun.jindodata.oss.JindoOssFileSystem");
configuration.set("fs.AbstractFileSystem.oss.impl", "com.aliyun.jindodata.oss.OSS");
configuration.set("fs.oss.accessKeyId", accessKey);
configuration.set("fs.oss.accessKeySecret", secretKey);
configuration.set("fs.oss.endpoint", endpoint);
} | class AliyunCloudCredential implements CloudCredential {
private final String accessKey;
private final String secretKey;
private final String endpoint;
public AliyunCloudCredential(String accessKey, String secretKey, String endpoint) {
Preconditions.checkNotNull(accessKey);
Preconditions.checkNotNull(secretKey);
Preconditions.checkNotNull(endpoint);
this.accessKey = accessKey;
this.secretKey = secretKey;
this.endpoint = endpoint;
}
public String getAccessKey() {
return accessKey;
}
public String getSecretKey() {
return secretKey;
}
public String getEndpoint() {
return endpoint;
}
@Override
@Override
public boolean validate() {
return !this.accessKey.isEmpty() && !this.secretKey.isEmpty() && !this.endpoint.isEmpty();
}
@Override
public void toThrift(Map<String, String> properties) {
properties.put(CloudConfigurationConstants.AWS_S3_ACCESS_KEY, accessKey);
properties.put(CloudConfigurationConstants.AWS_S3_SECRET_KEY, secretKey);
properties.put(CloudConfigurationConstants.AWS_S3_ENDPOINT, endpoint);
}
@Override
public String toCredString() {
return "AliyunCloudCredential{" +
"accessKey='" + accessKey + '\'' +
", secretKey='" + secretKey + '\'' +
", endpoint='" + endpoint + '\'' +
'}';
}
@Override
public FileStoreInfo toFileStoreInfo() {
return null;
}
} | class AliyunCloudCredential implements CloudCredential {
private final String accessKey;
private final String secretKey;
private final String endpoint;
public AliyunCloudCredential(String accessKey, String secretKey, String endpoint) {
Preconditions.checkNotNull(accessKey);
Preconditions.checkNotNull(secretKey);
Preconditions.checkNotNull(endpoint);
this.accessKey = accessKey;
this.secretKey = secretKey;
this.endpoint = endpoint;
}
public String getAccessKey() {
return accessKey;
}
public String getSecretKey() {
return secretKey;
}
public String getEndpoint() {
return endpoint;
}
@Override
@Override
public boolean validate() {
return !this.accessKey.isEmpty() && !this.secretKey.isEmpty() && !this.endpoint.isEmpty();
}
@Override
public void toThrift(Map<String, String> properties) {
properties.put(CloudConfigurationConstants.AWS_S3_ACCESS_KEY, accessKey);
properties.put(CloudConfigurationConstants.AWS_S3_SECRET_KEY, secretKey);
properties.put(CloudConfigurationConstants.AWS_S3_ENDPOINT, endpoint);
}
@Override
public String toCredString() {
return "AliyunCloudCredential{" +
"accessKey='" + accessKey + '\'' +
", secretKey='" + secretKey + '\'' +
", endpoint='" + endpoint + '\'' +
'}';
}
@Override
public FileStoreInfo toFileStoreInfo() {
return null;
}
} |
I think it's better to assign aliases: `SELECT 1 AS x, '1' AS y ...` so that in unlikely case Calcite changes the convention of calling these `EXPR$..` the test doesn't break | public void testValues_selectEmpty() throws Exception {
String sql = "select 1, '1' FROM string_table WHERE false";
PCollection<Row> rows = compilePipeline(sql, pipeline);
PAssert.that(rows)
.containsInAnyOrder(
TestUtils.RowsBuilder.of(
Schema.FieldType.INT32, "EXPR$0",
Schema.FieldType.STRING, "EXPR$1")
.getRows());
pipeline.run();
} | String sql = "select 1, '1' FROM string_table WHERE false"; | public void testValues_selectEmpty() throws Exception {
String sql = "select 1, '1' FROM string_table WHERE false";
PCollection<Row> rows = compilePipeline(sql, pipeline);
PAssert.that(rows)
.containsInAnyOrder(
TestUtils.RowsBuilder.of(
Schema.FieldType.INT32, "EXPR$0",
Schema.FieldType.STRING, "EXPR$1")
.getRows());
pipeline.run();
} | class BeamValuesRelTest extends BaseRelTest {
@Rule public final TestPipeline pipeline = TestPipeline.create();
@BeforeClass
public static void prepare() {
registerTable(
"string_table",
TestBoundedTable.of(
Schema.FieldType.STRING, "name",
Schema.FieldType.STRING, "description"));
registerTable(
"int_table",
TestBoundedTable.of(
Schema.FieldType.INT32, "c0",
Schema.FieldType.INT32, "c1"));
}
@Test
public void testValues() throws Exception {
String sql =
"insert into string_table(name, description) values "
+ "('hello', 'world'), ('james', 'bond')";
PCollection<Row> rows = compilePipeline(sql, pipeline);
PAssert.that(rows)
.containsInAnyOrder(
TestUtils.RowsBuilder.of(
Schema.FieldType.STRING, "name",
Schema.FieldType.STRING, "description")
.addRows(
"hello", "world",
"james", "bond")
.getRows());
pipeline.run();
}
@Test
public void testValues_castInt() throws Exception {
String sql = "insert into int_table (c0, c1) values(cast(1 as int), cast(2 as int))";
PCollection<Row> rows = compilePipeline(sql, pipeline);
PAssert.that(rows)
.containsInAnyOrder(
TestUtils.RowsBuilder.of(
Schema.FieldType.INT32, "c0",
Schema.FieldType.INT32, "c1")
.addRows(1, 2)
.getRows());
pipeline.run();
}
@Test
public void testValues_onlySelect() throws Exception {
String sql = "select 1, '1'";
PCollection<Row> rows = compilePipeline(sql, pipeline);
PAssert.that(rows)
.containsInAnyOrder(
TestUtils.RowsBuilder.of(
Schema.FieldType.INT32, "EXPR$0",
Schema.FieldType.STRING, "EXPR$1")
.addRows(1, "1")
.getRows());
pipeline.run();
}
@Test
} | class BeamValuesRelTest extends BaseRelTest {
@Rule public final TestPipeline pipeline = TestPipeline.create();
@BeforeClass
public static void prepare() {
registerTable(
"string_table",
TestBoundedTable.of(
Schema.FieldType.STRING, "name",
Schema.FieldType.STRING, "description"));
registerTable(
"int_table",
TestBoundedTable.of(
Schema.FieldType.INT32, "c0",
Schema.FieldType.INT32, "c1"));
}
@Test
public void testValues() throws Exception {
String sql =
"insert into string_table(name, description) values "
+ "('hello', 'world'), ('james', 'bond')";
PCollection<Row> rows = compilePipeline(sql, pipeline);
PAssert.that(rows)
.containsInAnyOrder(
TestUtils.RowsBuilder.of(
Schema.FieldType.STRING, "name",
Schema.FieldType.STRING, "description")
.addRows(
"hello", "world",
"james", "bond")
.getRows());
pipeline.run();
}
@Test
public void testValues_castInt() throws Exception {
String sql = "insert into int_table (c0, c1) values(cast(1 as int), cast(2 as int))";
PCollection<Row> rows = compilePipeline(sql, pipeline);
PAssert.that(rows)
.containsInAnyOrder(
TestUtils.RowsBuilder.of(
Schema.FieldType.INT32, "c0",
Schema.FieldType.INT32, "c1")
.addRows(1, 2)
.getRows());
pipeline.run();
}
@Test
public void testValues_onlySelect() throws Exception {
String sql = "select 1, '1'";
PCollection<Row> rows = compilePipeline(sql, pipeline);
PAssert.that(rows)
.containsInAnyOrder(
TestUtils.RowsBuilder.of(
Schema.FieldType.INT32, "EXPR$0",
Schema.FieldType.STRING, "EXPR$1")
.addRows(1, "1")
.getRows());
pipeline.run();
}
@Test
} |
I was thinking of when we have to renew multiple lock Token in one go and need to expose that API, this would be helpful. But for now I have changed this like you said. | private Message createManagementMessage(String operation, String linkName) {
final Duration serverTimeout = MessageUtils.adjustServerTimeout(operationTimeout);
final Map<String, Object> applicationProperties = new HashMap<>();
applicationProperties.put(MANAGEMENT_OPERATION_KEY, operation);
applicationProperties.put(SERVER_TIMEOUT, serverTimeout.toMillis());
if (linkName != null && !linkName.isEmpty()) {
applicationProperties.put(ASSOCIATED_LINK_NAME_KEY, linkName);
}
final Message message = Proton.message();
message.setApplicationProperties(new ApplicationProperties(applicationProperties));
return message;
} | return message; | private Message createManagementMessage(String operation, String linkName) {
final Duration serverTimeout = MessageUtils.adjustServerTimeout(operationTimeout);
final Map<String, Object> applicationProperties = new HashMap<>();
applicationProperties.put(MANAGEMENT_OPERATION_KEY, operation);
applicationProperties.put(SERVER_TIMEOUT, serverTimeout.toMillis());
if (linkName != null && !linkName.isEmpty()) {
applicationProperties.put(ASSOCIATED_LINK_NAME_KEY, linkName);
}
final Message message = Proton.message();
message.setApplicationProperties(new ApplicationProperties(applicationProperties));
return message;
} | class ManagementChannel implements ServiceBusManagementNode {
private final MessageSerializer messageSerializer;
private final TokenManager tokenManager;
private final Duration operationTimeout;
private final Mono<RequestResponseChannel> createRequestResponse;
private final String fullyQualifiedNamespace;
private final ClientLogger logger;
private final String entityPath;
private final AtomicLong lastPeekedSequenceNumber = new AtomicLong();
private volatile boolean isDisposed;
ManagementChannel(Mono<RequestResponseChannel> createRequestResponse, String fullyQualifiedNamespace,
String entityPath, TokenManager tokenManager, MessageSerializer messageSerializer, Duration operationTimeout) {
this.createRequestResponse = createRequestResponse;
this.fullyQualifiedNamespace = fullyQualifiedNamespace;
this.logger = new ClientLogger(String.format("%s<%s>", ManagementChannel.class, entityPath));
this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null.");
this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null.");
this.tokenManager = Objects.requireNonNull(tokenManager, "'tokenManager' cannot be null.");
this.operationTimeout = operationTimeout;
}
@Override
public Mono<Void> updateDisposition(UUID lockToken, DispositionStatus dispositionStatus, String deadLetterReason,
String deadLetterErrorDescription, Map<String, Object> propertiesToModify) {
return isAuthorized(UPDATE_DISPOSITION_OPERATION).then(createRequestResponse.flatMap(channel -> {
final Message message = createDispositionMessage(new UUID[] {lockToken}, dispositionStatus,
null, null, null, channel.getReceiveLinkName());
return channel.sendWithAck(message);
}).flatMap(response -> {
final int statusCode = RequestResponseUtils.getResponseStatusCode(response);
final AmqpResponseCode responseCode = AmqpResponseCode.fromValue(statusCode);
if (responseCode == AmqpResponseCode.OK) {
return Mono.empty();
} else {
return Mono.error(ExceptionUtil.amqpResponseCodeToException(statusCode, "", getErrorContext()));
}
}));
}
/**
* {@inheritDoc}
*/
@Override
public Mono<Instant> renewMessageLock(UUID lockToken) {
return renewMessageLock(new UUID[]{lockToken})
.next();
}
/**
* {@inheritDoc}
*/
@Override
public Mono<Void> cancelSchedule(long sequenceNumber) {
return cancelSchedule(new Long[]{sequenceNumber});
}
/**
* {@inheritDoc}
*/
@Override
public Mono<Long> schedule(ServiceBusMessage message, Instant scheduledEnqueueTime) {
return scheduleMessage(message, scheduledEnqueueTime)
.next();
}
/**
* {@inheritDoc}
*/
@Override
public Mono<ServiceBusReceivedMessage> peek(long fromSequenceNumber) {
return peek(fromSequenceNumber, 1, null)
.last();
}
/**
* {@inheritDoc}
*/
@Override
public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages) {
return peek(this.lastPeekedSequenceNumber.get() + 1, maxMessages, null);
}
/**
* {@inheritDoc}
*/
@Override
public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages, long fromSequenceNumber) {
return peek(fromSequenceNumber, maxMessages, null);
}
/**
* {@inheritDoc}
*/
@Override
public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(ReceiveMode receiveMode, long sequenceNumber) {
return receiveDeferredMessageBatch(receiveMode, null, sequenceNumber)
.next();
}
/**
* {@inheritDoc}
*/
@Override
public Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(ReceiveMode receiveMode,
long... sequenceNumbers) {
return receiveDeferredMessageBatch(receiveMode, null, sequenceNumbers);
}
/**
* {@inheritDoc}
*/
@Override
public Mono<ServiceBusReceivedMessage> peek() {
return peek(lastPeekedSequenceNumber.get() + 1);
}
private Flux<ServiceBusReceivedMessage> peek(long fromSequenceNumber, int maxMessages, UUID sessionId) {
return isAuthorized(PEEK_OPERATION).thenMany(createRequestResponse.flatMap(channel -> {
final Message message = createManagementMessage(PEEK_OPERATION, channel.getReceiveLinkName());
final HashMap<String, Object> requestBodyMap = new HashMap<>();
requestBodyMap.put(FROM_SEQUENCE_NUMBER, fromSequenceNumber);
requestBodyMap.put(MESSAGE_COUNT_KEY, maxMessages);
if (!Objects.isNull(sessionId)) {
requestBodyMap.put(ManagementConstants.REQUEST_RESPONSE_SESSION_ID, sessionId);
}
message.setBody(new AmqpValue(requestBodyMap));
return channel.sendWithAck(message);
}).flatMapMany(amqpMessage -> {
final List<ServiceBusReceivedMessage> messageList =
messageSerializer.deserializeList(amqpMessage, ServiceBusReceivedMessage.class);
if (messageList.size() > 0) {
final ServiceBusReceivedMessage receivedMessage = messageList.get(messageList.size() - 1);
logger.info("Setting last peeked sequence number: {}", receivedMessage.getSequenceNumber());
if (receivedMessage.getSequenceNumber() > 0) {
this.lastPeekedSequenceNumber.set(receivedMessage.getSequenceNumber());
}
}
return Flux.fromIterable(messageList);
}));
}
private Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(ReceiveMode receiveMode, UUID sessionId,
long... fromSequenceNumbers) {
return isAuthorized(RECEIVE_BY_SEQUENCE_NUMBER_OPERATION).thenMany(createRequestResponse.flatMap(channel -> {
final Message message = createManagementMessage(RECEIVE_BY_SEQUENCE_NUMBER_OPERATION,
channel.getReceiveLinkName());
HashMap<String, Object> requestBodyMap = new HashMap<>();
requestBodyMap.put(SEQUENCE_NUMBERS, Arrays.stream(fromSequenceNumbers)
.boxed().toArray(Long[]::new));
requestBodyMap.put(RECEIVER_SETTLE_MODE,
UnsignedInteger.valueOf(receiveMode == ReceiveMode.RECEIVE_AND_DELETE ? 0 : 1));
if (!Objects.isNull(sessionId)) {
requestBodyMap.put(ManagementConstants.REQUEST_RESPONSE_SESSION_ID, sessionId);
}
message.setBody(new AmqpValue(requestBodyMap));
return channel.sendWithAck(message);
}).flatMapMany(amqpMessage -> {
final List<ServiceBusReceivedMessage> messageList =
messageSerializer.deserializeList(amqpMessage, ServiceBusReceivedMessage.class);
if (messageList.size() > 0) {
final ServiceBusReceivedMessage receivedMessage = messageList.get(messageList.size() - 1);
logger.info("Setting last peeked sequence number: {}", receivedMessage.getSequenceNumber());
if (receivedMessage.getSequenceNumber() > 0) {
this.lastPeekedSequenceNumber.set(receivedMessage.getSequenceNumber());
}
}
return Flux.fromIterable(messageList);
}));
}
private Mono<Void> isAuthorized(String operation) {
return tokenManager.getAuthorizationResults().next().flatMap(response -> {
if (response != AmqpResponseCode.ACCEPTED) {
return Mono.error(new AmqpException(false, String.format(
"User does not have authorization to perform operation [%s] on entity [%s]", operation, entityPath),
getErrorContext()));
} else {
return Mono.empty();
}
});
}
private Message createDispositionMessage(UUID[] lockTokens, DispositionStatus dispositionStatus,
String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify,
String linkName) {
logger.verbose("Update disposition of deliveries '{}' to '{}' on entity '{}', session '{}'",
Arrays.toString(lockTokens), dispositionStatus, entityPath, "n/a");
final Message message = createManagementMessage(UPDATE_DISPOSITION_OPERATION, linkName);
final Map<String, Object> requestBody = new HashMap<>();
requestBody.put(LOCK_TOKENS_KEY, lockTokens);
requestBody.put(ManagementConstants.DISPOSITION_STATUS_KEY, dispositionStatus.getValue());
if (deadLetterReason != null) {
requestBody.put(ManagementConstants.DEADLETTER_REASON_KEY, deadLetterReason);
}
if (deadLetterErrorDescription != null) {
requestBody.put(ManagementConstants.DEADLETTER_DESCRIPTION_KEY, deadLetterErrorDescription);
}
if (propertiesToModify != null && propertiesToModify.size() > 0) {
requestBody.put(ManagementConstants.PROPERTIES_TO_MODIFY_KEY, propertiesToModify);
}
message.setBody(new AmqpValue(requestBody));
return message;
}
private Flux<Instant> renewMessageLock(UUID[] renewLockList) {
return isAuthorized(PEEK_OPERATION).thenMany(createRequestResponse.flatMap(channel -> {
Message requestMessage = createManagementMessage(RENEW_LOCK_OPERATION,
channel.getReceiveLinkName());
requestMessage.setBody(new AmqpValue(Collections.singletonMap(LOCK_TOKENS_KEY, renewLockList)));
return channel.sendWithAck(requestMessage);
}).flatMapMany(responseMessage -> {
int statusCode = RequestResponseUtils.getResponseStatusCode(responseMessage);
if (statusCode != AmqpResponseCode.OK.getValue()) {
return Mono.error(ExceptionUtil.amqpResponseCodeToException(statusCode, "Could not renew the lock.",
getErrorContext()));
}
return Flux.fromIterable(messageSerializer.deserializeList(responseMessage, Instant.class));
}));
}
/**
* Creates an AMQP message with the required application properties.
*
* @param operation Management operation to perform (ie. peek, update-disposition, etc.)
* @param linkName Name of receiver link associated with operation.
*
* @return An AMQP message with the required headers.
*/
/***
* Create a Amqp key, value map to be used to create Amqp mesage for scheduling purpose.
*
* @param messageToSchedule The message which needs to be scheduled.
* @return Map of key and value in Amqp format.
* @throws AmqpException When payload exceeded maximum message allowed size.
*/
private Map<String, Object> createScheduleMessgeAmqpValue(ServiceBusMessage messageToSchedule) {
int maxMessageSize = MAX_MESSAGE_LENGTH_SENDER_LINK_BYTES;
Map<String, Object> requestBodyMap = new HashMap<>();
List<Message> messagesToSchedule = new ArrayList<>();
messagesToSchedule.add(messageSerializer.serialize(messageToSchedule));
Collection<HashMap<String, Object>> messageList = new LinkedList<>();
for (Message message : messagesToSchedule) {
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize =
Math.min(payloadSize + MAX_MESSAGING_AMQP_HEADER_SIZE_BYTES, maxMessageSize);
final byte[] bytes = new byte[allocationSize];
int encodedSize;
try {
encodedSize = message.encode(bytes, 0, allocationSize);
} catch (BufferOverflowException exception) {
final String errorMessage =
String.format(Locale.US,
"Error sending. Size of the payload exceeded maximum message size: %s kb",
maxMessageSize / 1024);
throw logger.logExceptionAsWarning(new AmqpException(false,
AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, getErrorContext()));
}
HashMap<String, Object> messageEntry = new HashMap<>();
messageEntry.put(MESSAGE, new Binary(bytes, 0, encodedSize));
messageEntry.put(MESSAGE_ID, message.getMessageId());
messageList.add(messageEntry);
}
requestBodyMap.put(MESSAGES, messageList);
return requestBodyMap;
}
private AmqpErrorContext getErrorContext() {
return new SessionErrorContext(fullyQualifiedNamespace, entityPath);
}
private Mono<Void> cancelSchedule(Long[] cancelScheduleNumbers) {
return isAuthorized(CANCEL_SCHEDULED_MESSAGE_OPERATION).thenMany(createRequestResponse.flatMap(channel -> {
Message requestMessage = createManagementMessage(CANCEL_SCHEDULED_MESSAGE_OPERATION,
channel.getReceiveLinkName());
requestMessage.setBody(new AmqpValue(Collections.singletonMap(SEQUENCE_NUMBERS, cancelScheduleNumbers)));
return channel.sendWithAck(requestMessage);
}).map(responseMessage -> {
int statusCode = RequestResponseUtils.getResponseStatusCode(responseMessage);
if (statusCode == AmqpResponseCode.OK.getValue()) {
return Mono.empty();
}
return Mono.error(new AmqpException(false, "Could not cancel schedule message with sequence "
+ Arrays.toString(cancelScheduleNumbers), getErrorContext()));
})).then();
}
private Flux<Long> scheduleMessage(ServiceBusMessage messageToSchedule, Instant scheduledEnqueueTime) {
messageToSchedule.setScheduledEnqueueTime(scheduledEnqueueTime);
return isAuthorized(SCHEDULE_MESSAGE_OPERATION).thenMany(createRequestResponse.flatMap(channel -> {
Message requestMessage = createManagementMessage(SCHEDULE_MESSAGE_OPERATION, channel.getReceiveLinkName());
Map<String, Object> requestBodyMap;
requestBodyMap = createScheduleMessgeAmqpValue(messageToSchedule);
requestMessage.setBody(new AmqpValue(requestBodyMap));
return channel.sendWithAck(requestMessage);
}).flatMapMany(responseMessage -> {
int statusCode = RequestResponseUtils.getResponseStatusCode(responseMessage);
if (statusCode != AmqpResponseCode.OK.getValue()) {
return Mono.error(ExceptionUtil.amqpResponseCodeToException(statusCode, "Could not schedule message.",
getErrorContext()));
}
return Flux.fromIterable(messageSerializer.deserializeList(responseMessage, Long.class));
}));
}
/**
* {@inheritDoc}
*/
@Override
public void close() {
if (isDisposed) {
return;
}
isDisposed = true;
tokenManager.close();
}
} | class ManagementChannel implements ServiceBusManagementNode {
private final MessageSerializer messageSerializer;
private final TokenManager tokenManager;
private final Duration operationTimeout;
private final Mono<RequestResponseChannel> createRequestResponse;
private final String fullyQualifiedNamespace;
private final ClientLogger logger;
private final String entityPath;
private final AtomicLong lastPeekedSequenceNumber = new AtomicLong();
private volatile boolean isDisposed;
ManagementChannel(Mono<RequestResponseChannel> createRequestResponse, String fullyQualifiedNamespace,
String entityPath, TokenManager tokenManager, MessageSerializer messageSerializer, Duration operationTimeout) {
this.createRequestResponse = createRequestResponse;
this.fullyQualifiedNamespace = fullyQualifiedNamespace;
this.logger = new ClientLogger(String.format("%s<%s>", ManagementChannel.class, entityPath));
this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null.");
this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null.");
this.tokenManager = Objects.requireNonNull(tokenManager, "'tokenManager' cannot be null.");
this.operationTimeout = operationTimeout;
}
@Override
public Mono<Void> updateDisposition(UUID lockToken, DispositionStatus dispositionStatus, String deadLetterReason,
String deadLetterErrorDescription, Map<String, Object> propertiesToModify) {
return isAuthorized(UPDATE_DISPOSITION_OPERATION).then(createRequestResponse.flatMap(channel -> {
final Message message = createDispositionMessage(new UUID[] {lockToken}, dispositionStatus,
null, null, null, channel.getReceiveLinkName());
return channel.sendWithAck(message);
}).flatMap(response -> {
final int statusCode = RequestResponseUtils.getResponseStatusCode(response);
final AmqpResponseCode responseCode = AmqpResponseCode.fromValue(statusCode);
if (responseCode == AmqpResponseCode.OK) {
return Mono.empty();
} else {
return Mono.error(ExceptionUtil.amqpResponseCodeToException(statusCode, "", getErrorContext()));
}
}));
}
/**
* {@inheritDoc}
*/
@Override
public Mono<ServiceBusReceivedMessage> peek(long fromSequenceNumber) {
return peek(fromSequenceNumber, 1, null)
.last();
}
/**
* {@inheritDoc}
*/
@Override
public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages) {
return peek(this.lastPeekedSequenceNumber.get() + 1, maxMessages, null);
}
/**
* {@inheritDoc}
*/
@Override
public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages, long fromSequenceNumber) {
return peek(fromSequenceNumber, maxMessages, null);
}
/**
* {@inheritDoc}
*/
@Override
public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(ReceiveMode receiveMode, long sequenceNumber) {
return receiveDeferredMessageBatch(receiveMode, null, sequenceNumber)
.next();
}
/**
* {@inheritDoc}
*/
@Override
public Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(ReceiveMode receiveMode,
long... sequenceNumbers) {
return receiveDeferredMessageBatch(receiveMode, null, sequenceNumbers);
}
/**
* {@inheritDoc}
*/
@Override
public Mono<ServiceBusReceivedMessage> peek() {
return peek(lastPeekedSequenceNumber.get() + 1);
}
private Flux<ServiceBusReceivedMessage> peek(long fromSequenceNumber, int maxMessages, UUID sessionId) {
return isAuthorized(PEEK_OPERATION).thenMany(createRequestResponse.flatMap(channel -> {
final Message message = createManagementMessage(PEEK_OPERATION, channel.getReceiveLinkName());
final HashMap<String, Object> requestBodyMap = new HashMap<>();
requestBodyMap.put(FROM_SEQUENCE_NUMBER, fromSequenceNumber);
requestBodyMap.put(MESSAGE_COUNT_KEY, maxMessages);
if (!Objects.isNull(sessionId)) {
requestBodyMap.put(ManagementConstants.REQUEST_RESPONSE_SESSION_ID, sessionId);
}
message.setBody(new AmqpValue(requestBodyMap));
return channel.sendWithAck(message);
}).flatMapMany(amqpMessage -> {
final List<ServiceBusReceivedMessage> messageList =
messageSerializer.deserializeList(amqpMessage, ServiceBusReceivedMessage.class);
if (messageList.size() > 0) {
final ServiceBusReceivedMessage receivedMessage = messageList.get(messageList.size() - 1);
logger.info("Setting last peeked sequence number: {}", receivedMessage.getSequenceNumber());
if (receivedMessage.getSequenceNumber() > 0) {
this.lastPeekedSequenceNumber.set(receivedMessage.getSequenceNumber());
}
}
return Flux.fromIterable(messageList);
}));
}
private Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(ReceiveMode receiveMode, UUID sessionId,
long... fromSequenceNumbers) {
return isAuthorized(RECEIVE_BY_SEQUENCE_NUMBER_OPERATION).thenMany(createRequestResponse.flatMap(channel -> {
final Message message = createManagementMessage(RECEIVE_BY_SEQUENCE_NUMBER_OPERATION,
channel.getReceiveLinkName());
HashMap<String, Object> requestBodyMap = new HashMap<>();
requestBodyMap.put(SEQUENCE_NUMBERS, Arrays.stream(fromSequenceNumbers)
.boxed().toArray(Long[]::new));
requestBodyMap.put(RECEIVER_SETTLE_MODE,
UnsignedInteger.valueOf(receiveMode == ReceiveMode.RECEIVE_AND_DELETE ? 0 : 1));
if (!Objects.isNull(sessionId)) {
requestBodyMap.put(ManagementConstants.REQUEST_RESPONSE_SESSION_ID, sessionId);
}
message.setBody(new AmqpValue(requestBodyMap));
return channel.sendWithAck(message);
}).flatMapMany(amqpMessage -> {
final List<ServiceBusReceivedMessage> messageList =
messageSerializer.deserializeList(amqpMessage, ServiceBusReceivedMessage.class);
if (messageList.size() > 0) {
final ServiceBusReceivedMessage receivedMessage = messageList.get(messageList.size() - 1);
logger.info("Setting last peeked sequence number: {}", receivedMessage.getSequenceNumber());
if (receivedMessage.getSequenceNumber() > 0) {
this.lastPeekedSequenceNumber.set(receivedMessage.getSequenceNumber());
}
}
return Flux.fromIterable(messageList);
}));
}
private Mono<Void> isAuthorized(String operation) {
return tokenManager.getAuthorizationResults().next().flatMap(response -> {
if (response != AmqpResponseCode.ACCEPTED) {
return Mono.error(new AmqpException(false, String.format(
"User does not have authorization to perform operation [%s] on entity [%s]", operation, entityPath),
getErrorContext()));
} else {
return Mono.empty();
}
});
}
private Message createDispositionMessage(UUID[] lockTokens, DispositionStatus dispositionStatus,
String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify,
String linkName) {
logger.verbose("Update disposition of deliveries '{}' to '{}' on entity '{}', session '{}'",
Arrays.toString(lockTokens), dispositionStatus, entityPath, "n/a");
final Message message = createManagementMessage(UPDATE_DISPOSITION_OPERATION, linkName);
final Map<String, Object> requestBody = new HashMap<>();
requestBody.put(LOCK_TOKENS_KEY, lockTokens);
requestBody.put(ManagementConstants.DISPOSITION_STATUS_KEY, dispositionStatus.getValue());
if (deadLetterReason != null) {
requestBody.put(ManagementConstants.DEADLETTER_REASON_KEY, deadLetterReason);
}
if (deadLetterErrorDescription != null) {
requestBody.put(ManagementConstants.DEADLETTER_DESCRIPTION_KEY, deadLetterErrorDescription);
}
if (propertiesToModify != null && propertiesToModify.size() > 0) {
requestBody.put(ManagementConstants.PROPERTIES_TO_MODIFY_KEY, propertiesToModify);
}
message.setBody(new AmqpValue(requestBody));
return message;
}
/**
* {@inheritDoc}
*/
@Override
public Mono<Instant> renewMessageLock(UUID lockToken) {
return isAuthorized(PEEK_OPERATION).then(createRequestResponse.flatMap(channel -> {
Message requestMessage = createManagementMessage(RENEW_LOCK_OPERATION,
channel.getReceiveLinkName());
requestMessage.setBody(new AmqpValue(Collections.singletonMap(LOCK_TOKENS_KEY, new UUID[]{lockToken})));
return channel.sendWithAck(requestMessage);
}).map(responseMessage -> {
int statusCode = RequestResponseUtils.getResponseStatusCode(responseMessage);
if (statusCode != AmqpResponseCode.OK.getValue()) {
throw logger.logExceptionAsError(new AmqpException(false,
String.format("Could not renew the lock for lock token: '%s'.", lockToken.toString()),
getErrorContext()));
}
List<Instant> renewTimeList = messageSerializer.deserializeList(responseMessage, Instant.class);
if (CoreUtils.isNullOrEmpty(renewTimeList)) {
throw logger.logExceptionAsError(new AmqpException(false,
String.format("Service bus response empty. "
+ "Could not renew message with lock token: '%s'.", lockToken.toString()),
getErrorContext()));
}
return renewTimeList.get(0);
}));
}
/**
* Creates an AMQP message with the required application properties.
*
* @param operation Management operation to perform (ie. peek, update-disposition, etc.)
* @param linkName Name of receiver link associated with operation.
*
* @return An AMQP message with the required headers.
*/
/**
* Create a Amqp key, value map to be used to create Amqp mesage for scheduling purpose.
*
* @param messageToSchedule The message which needs to be scheduled.
* @param maxMessageSize The maximum size allowed on send link.
*
* @return Map of key and value in Amqp format.
* @throws AmqpException When payload exceeded maximum message allowed size.
*/
private Map<String, Object> createScheduleMessgeAmqpValue(ServiceBusMessage messageToSchedule, int maxMessageSize) {
Message message = messageSerializer.serialize(messageToSchedule);
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize =
Math.min(payloadSize + MAX_MESSAGING_AMQP_HEADER_SIZE_BYTES, maxMessageSize);
final byte[] bytes = new byte[allocationSize];
int encodedSize;
try {
encodedSize = message.encode(bytes, 0, allocationSize);
} catch (BufferOverflowException exception) {
final String errorMessage =
String.format(Locale.US,
"Error sending. Size of the payload exceeded maximum message size: %s kb",
maxMessageSize / 1024);
throw logger.logExceptionAsWarning(new AmqpException(false,
AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, getErrorContext()));
}
HashMap<String, Object> messageEntry = new HashMap<>();
messageEntry.put(MESSAGE, new Binary(bytes, 0, encodedSize));
messageEntry.put(MESSAGE_ID, message.getMessageId());
Collection<HashMap<String, Object>> messageList = new LinkedList<>();
messageList.add(messageEntry);
Map<String, Object> requestBodyMap = new HashMap<>();
requestBodyMap.put(MESSAGES, messageList);
return requestBodyMap;
}
private AmqpErrorContext getErrorContext() {
return new SessionErrorContext(fullyQualifiedNamespace, entityPath);
}
/**
* {@inheritDoc}
*/
@Override
public Mono<Void> cancelScheduledMessage(long sequenceNumber) {
return isAuthorized(CANCEL_SCHEDULED_MESSAGE_OPERATION).then(createRequestResponse.flatMap(channel -> {
Message requestMessage = createManagementMessage(CANCEL_SCHEDULED_MESSAGE_OPERATION,
channel.getReceiveLinkName());
requestMessage.setBody(new AmqpValue(Collections.singletonMap(SEQUENCE_NUMBERS,
new Long[]{sequenceNumber})));
return channel.sendWithAck(requestMessage);
}).map(responseMessage -> {
int statusCode = RequestResponseUtils.getResponseStatusCode(responseMessage);
if (statusCode == AmqpResponseCode.OK.getValue()) {
return Mono.empty();
}
return Mono.error(new AmqpException(false, "Could not cancel scheduled message with sequence number "
+ sequenceNumber, getErrorContext()));
})).then();
}
/**
* {@inheritDoc}
*/
@Override
public Mono<Long> schedule(ServiceBusMessage messageToSchedule, Instant scheduledEnqueueTime, int maxSendLinkSize) {
messageToSchedule.setScheduledEnqueueTime(scheduledEnqueueTime);
return isAuthorized(SCHEDULE_MESSAGE_OPERATION).then(createRequestResponse.flatMap(channel -> {
Message requestMessage = createManagementMessage(SCHEDULE_MESSAGE_OPERATION, channel.getReceiveLinkName());
Map<String, Object> requestBodyMap = createScheduleMessgeAmqpValue(messageToSchedule, maxSendLinkSize);
requestMessage.setBody(new AmqpValue(requestBodyMap));
return channel.sendWithAck(requestMessage);
}).map(responseMessage -> {
int statusCode = RequestResponseUtils.getResponseStatusCode(responseMessage);
if (statusCode != AmqpResponseCode.OK.getValue()) {
throw logger.logExceptionAsError(new AmqpException(false,
String.format("Could not schedule message with message id: '%s'.",
messageToSchedule.getMessageId()), getErrorContext()));
}
List<Long> sequenceNumberList = messageSerializer.deserializeList(responseMessage, Long.class);
if (CoreUtils.isNullOrEmpty(sequenceNumberList)) {
throw logger.logExceptionAsError(new AmqpException(false,
String.format("Service bus response empty. Could not schedule message with message id: '%s'.",
messageToSchedule.getMessageId()), getErrorContext()));
}
return sequenceNumberList.get(0);
}));
}
/**
* {@inheritDoc}
*/
@Override
public void close() {
if (isDisposed) {
return;
}
isDisposed = true;
tokenManager.close();
}
} |
And did you test it? Whether `DefaultOidcUser` serialize / deserialize well? | public OidcUser loadUser(OidcUserRequest userRequest) throws OAuth2AuthenticationException {
OidcUser oidcUser = oidcUserService.loadUser(userRequest);
Authentication authentication = SecurityContextHolder.getContext().getAuthentication();
if (authentication != null) {
DefaultOidcUser defaultOidcUser = (DefaultOidcUser) session.getAttribute("defaultOidcUser");
return defaultOidcUser;
}
Set<String> groups = Optional.of(userRequest)
.map(OAuth2UserRequest::getAccessToken)
.map(AbstractOAuth2Token::getTokenValue)
.map(graphClient::getGroupsFromGraph)
.orElseGet(Collections::emptySet);
Set<String> groupRoles = groups.stream()
.filter(properties::isAllowedGroup)
.map(group -> ROLE_PREFIX + group)
.collect(Collectors.toSet());
Set<SimpleGrantedAuthority> authorities = groupRoles.stream()
.map(SimpleGrantedAuthority::new)
.collect(Collectors.toSet());
if (authorities.isEmpty()) {
authorities = DEFAULT_AUTHORITY_SET;
}
String nameAttributeKey =
Optional.of(userRequest)
.map(OAuth2UserRequest::getClientRegistration)
.map(ClientRegistration::getProviderDetails)
.map(ClientRegistration.ProviderDetails::getUserInfoEndpoint)
.map(ClientRegistration.ProviderDetails.UserInfoEndpoint::getUserNameAttributeName)
.filter(StringUtils::hasText)
.orElse(AADTokenClaim.NAME);
DefaultOidcUser defaultOidcUser = new DefaultOidcUser(authorities, oidcUser.getIdToken(), nameAttributeKey);
session.setAttribute("defaultOidcUser", defaultOidcUser);
return defaultOidcUser;
} | DefaultOidcUser defaultOidcUser = (DefaultOidcUser) session.getAttribute("defaultOidcUser"); | public OidcUser loadUser(OidcUserRequest userRequest) throws OAuth2AuthenticationException {
OidcUser oidcUser = oidcUserService.loadUser(userRequest);
Authentication authentication = SecurityContextHolder.getContext().getAuthentication();
ServletRequestAttributes attr = (ServletRequestAttributes) RequestContextHolder.currentRequestAttributes();
HttpSession session = attr.getRequest().getSession(true);
if (authentication != null) {
return (DefaultOidcUser) session.getAttribute(DEFAULT_OIDC_USER);
}
Set<String> groups = Optional.of(userRequest)
.map(OAuth2UserRequest::getAccessToken)
.map(AbstractOAuth2Token::getTokenValue)
.map(graphClient::getGroupsFromGraph)
.orElseGet(Collections::emptySet);
Set<String> groupRoles = groups.stream()
.filter(properties::isAllowedGroup)
.map(group -> ROLE_PREFIX + group)
.collect(Collectors.toSet());
Set<SimpleGrantedAuthority> authorities = groupRoles.stream()
.map(SimpleGrantedAuthority::new)
.collect(Collectors.toSet());
if (authorities.isEmpty()) {
authorities = DEFAULT_AUTHORITY_SET;
}
String nameAttributeKey =
Optional.of(userRequest)
.map(OAuth2UserRequest::getClientRegistration)
.map(ClientRegistration::getProviderDetails)
.map(ClientRegistration.ProviderDetails::getUserInfoEndpoint)
.map(ClientRegistration.ProviderDetails.UserInfoEndpoint::getUserNameAttributeName)
.filter(StringUtils::hasText)
.orElse(AADTokenClaim.NAME);
DefaultOidcUser defaultOidcUser = new DefaultOidcUser(authorities, oidcUser.getIdToken(), nameAttributeKey);
session.setAttribute(DEFAULT_OIDC_USER, defaultOidcUser);
return defaultOidcUser;
} | class AzureActiveDirectoryOAuth2UserService implements OAuth2UserService<OidcUserRequest, OidcUser> {
private final OidcUserService oidcUserService;
private final AADAuthenticationProperties properties;
private final GraphClient graphClient;
@Autowired
private HttpSession session;
public AzureActiveDirectoryOAuth2UserService(
AADAuthenticationProperties properties
) {
this.properties = properties;
this.oidcUserService = new OidcUserService();
this.graphClient = new GraphClient(properties);
}
@Override
} | class AzureActiveDirectoryOAuth2UserService implements OAuth2UserService<OidcUserRequest, OidcUser> {
private final OidcUserService oidcUserService;
private final AADAuthenticationProperties properties;
private final GraphClient graphClient;
private static final String DEFAULT_OIDC_USER = "defaultOidcUser";
public AzureActiveDirectoryOAuth2UserService(
AADAuthenticationProperties properties
) {
this.properties = properties;
this.oidcUserService = new OidcUserService();
this.graphClient = new GraphClient(properties);
}
@Override
} |
This line can be removed if you pass the default client as suggested above. | private HttpPipeline setupPipeline(HttpClient httpClient) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
HttpLogOptions httpLogOptions = new HttpLogOptions();
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
HttpClient client = httpClient != null ? httpClient : HttpClient.createDefault();
return new HttpPipelineBuilder().httpClient(client)
.policies(policies.toArray(new HttpPipelinePolicy[0])).build();
} | HttpClient client = httpClient != null ? httpClient : HttpClient.createDefault(); | private HttpPipeline setupPipeline(HttpClient httpClient) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
HttpLogOptions httpLogOptions = new HttpLogOptions();
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new HttpPipelineBuilder().httpClient(httpClient)
.policies(policies.toArray(new HttpPipelinePolicy[0])).build();
} | class IdentityClient {
private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter();
private static final Random RANDOM = new Random();
private final ClientLogger logger = new ClientLogger(IdentityClient.class);
private final IdentityClientOptions options;
private final PublicClientApplication publicClientApplication;
private final String tenantId;
private final String clientId;
private HttpPipelineAdapter httpPipelineAdapter;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param options the options configuring the client.
*/
IdentityClient(String tenantId, String clientId, IdentityClientOptions options) {
if (tenantId == null) {
tenantId = "common";
}
if (options == null) {
options = new IdentityClientOptions();
}
this.tenantId = tenantId;
this.clientId = clientId;
this.options = options;
if (clientId == null) {
this.publicClientApplication = null;
} else {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId;
PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId);
try {
publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl);
} catch (MalformedURLException e) {
throw logger.logExceptionAsWarning(new IllegalStateException(e));
}
HttpPipeline httpPipeline = options.getHttpPipeline();
if (httpPipeline != null) {
httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline);
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else {
HttpClient httpClient = options.getHttpClient();
if (httpClient != null) {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
} else {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
}
}
this.publicClientApplication = publicClientApplicationBuilder.build();
}
}
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param clientSecret the client secret of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromSecret(clientSecret))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(),
ZoneOffset.UTC)));
} catch (MalformedURLException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with a PKCS12 certificate.
*
* @param pfxCertificatePath the path to the PKCS12 certificate of the application
* @param pfxCertificatePassword the password protecting the PFX certificate
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword,
TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return Mono.fromCallable(() -> {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
new FileInputStream(pfxCertificatePath), pfxCertificatePassword))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
return applicationBuilder.build();
}).flatMap(application -> Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())))
.map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(),
ZoneOffset.UTC)));
}
/**
* Asynchronously acquire a token from Active Directory with a PEM certificate.
*
* @param pemCertificatePath the path to the PEM certificate of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath));
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
CertificateUtil.privateKeyFromPem(pemCertificateBytes),
CertificateUtil.publicKeyFromPem(pemCertificateBytes)))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(),
ZoneOffset.UTC)));
} catch (IOException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
return Mono.fromFuture(publicClientApplication.acquireToken(
UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray())
.build()))
.map(MsalToken::new);
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) {
SilentParameters parameters;
if (msalToken.getAccount() != null) {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build();
} else {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build();
}
return Mono.defer(() -> {
try {
return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new);
} catch (MalformedURLException e) {
return Mono.error(e);
}
});
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
return Mono.fromFuture(() -> {
DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()),
dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(),
dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build();
return publicClientApplication.acquireToken(parameters);
}).map(MsalToken::new);
}
/**
* Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow.
*
* @param request the details of the token request
* @param authorizationCode the oauth2 authorization code
* @param redirectUrl the redirectUrl where the authorization code is sent to
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode,
URI redirectUrl) {
return Mono.fromFuture(() -> publicClientApplication.acquireToken(
AuthorizationCodeParameters.builder(authorizationCode, redirectUrl)
.scopes(new HashSet<>(request.getScopes()))
.build()))
.map(MsalToken::new);
}
/**
* Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return AuthorizationCodeListener.create(port)
.flatMap(server -> {
URI redirectUri;
String browserUri;
try {
redirectUri = new URI(String.format("http:
browserUri =
String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt"
+ "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s",
authorityUrl,
clientId,
redirectUri.toString(),
UUID.randomUUID(),
String.join(" ", request.getScopes()));
} catch (URISyntaxException e) {
return server.dispose().then(Mono.error(e));
}
return server.listen()
.mergeWith(Mono.<String>fromRunnable(() -> {
try {
openUrl(browserUri);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
}
}).subscribeOn(Schedulers.newSingle("browser")))
.next()
.flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri))
.onErrorResume(t -> server.dispose().then(Mono.error(t)))
.flatMap(msalToken -> server.dispose().then(Mono.just(msalToken)));
});
}
/**
* Asynchronously acquire a token from the App Service Managed Service Identity endpoint.
*
* @param msiEndpoint the endpoint to acquire token from
* @param msiSecret the secret to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret,
TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
HttpURLConnection connection = null;
StringBuilder payload = new StringBuilder();
try {
payload.append("resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
payload.append("&api-version=");
payload.append(URLEncoder.encode("2017-09-01", "UTF-8"));
if (clientId != null) {
payload.append("&clientid=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
} catch (IOException exception) {
return Mono.error(exception);
}
try {
URL url = new URL(String.format("%s?%s", msiEndpoint, payload));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
if (msiSecret != null) {
connection.setRequestProperty("Secret", msiSecret);
}
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON));
} catch (IOException e) {
return Mono.error(e);
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
/**
* Asynchronously acquire a token from the Virtual Machine IMDS endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder();
final int imdsUpgradeTimeInMs = 70 * 1000;
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
payload.append("&resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
if (clientId != null) {
payload.append("&client_id=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
} catch (IOException exception) {
return Mono.error(exception);
}
return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> {
int retry = 1;
while (retry <= options.getMaxRetry()) {
URL url = null;
HttpURLConnection connection = null;
try {
url =
new URL(String.format("http:
payload.toString()));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON);
} catch (IOException exception) {
if (connection == null) {
throw logger.logExceptionAsError(new RuntimeException(
String.format("Could not connect to the url: %s.", url), exception));
}
int responseCode = connection.getResponseCode();
if (responseCode == 410
|| responseCode == 429
|| responseCode == 404
|| (responseCode >= 500 && responseCode <= 599)) {
int retryTimeoutInMs = options.getRetryTimeout()
.apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000;
retryTimeoutInMs =
(responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs
: retryTimeoutInMs;
retry++;
if (retry > options.getMaxRetry()) {
break;
} else {
sleep(retryTimeoutInMs);
}
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Couldn't acquire access token from IMDS, verify your objectId, "
+ "clientId or msiResourceId", exception));
}
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
throw logger.logExceptionAsError(new RuntimeException(
String.format("MSI: Failed to acquire tokens after retrying %s times",
options.getMaxRetry())));
}));
}
private Mono<Boolean> checkIMDSAvailable() {
StringBuilder payload = new StringBuilder();
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
} catch (IOException exception) {
return Mono.error(exception);
}
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
URL url = new URL(String.format("http:
payload.toString()));
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setConnectTimeout(500);
connection.connect();
} finally {
if (connection != null) {
connection.disconnect();
}
}
return true;
});
}
private static void sleep(int millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) {
switch (options.getType()) {
case SOCKS4:
case SOCKS5:
return new Proxy(Type.SOCKS, options.getAddress());
case HTTP:
default:
return new Proxy(Type.HTTP, options.getAddress());
}
}
void openUrl(String url) throws IOException {
Runtime rt = Runtime.getRuntime();
String os = System.getProperty("os.name").toLowerCase(Locale.ROOT);
if (os.contains("win")) {
rt.exec("rundll32 url.dll,FileProtocolHandler " + url);
} else if (os.contains("mac")) {
rt.exec("open " + url);
} else if (os.contains("nix") || os.contains("nux")) {
rt.exec("xdg-open " + url);
} else {
logger.error("Browser could not be opened - please open {} in a browser on this device.", url);
}
}
} | class IdentityClient {
private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter();
private static final Random RANDOM = new Random();
private final ClientLogger logger = new ClientLogger(IdentityClient.class);
private final IdentityClientOptions options;
private final PublicClientApplication publicClientApplication;
private final String tenantId;
private final String clientId;
private HttpPipelineAdapter httpPipelineAdapter;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param options the options configuring the client.
*/
IdentityClient(String tenantId, String clientId, IdentityClientOptions options) {
if (tenantId == null) {
tenantId = "common";
}
if (options == null) {
options = new IdentityClientOptions();
}
this.tenantId = tenantId;
this.clientId = clientId;
this.options = options;
if (clientId == null) {
this.publicClientApplication = null;
} else {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId;
PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId);
try {
publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl);
} catch (MalformedURLException e) {
throw logger.logExceptionAsWarning(new IllegalStateException(e));
}
HttpPipeline httpPipeline = options.getHttpPipeline();
if (httpPipeline != null) {
httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline);
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else {
HttpClient httpClient = options.getHttpClient();
if (httpClient != null) {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
} else {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault()));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
}
}
this.publicClientApplication = publicClientApplicationBuilder.build();
}
}
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param clientSecret the client secret of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromSecret(clientSecret))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(),
ZoneOffset.UTC)));
} catch (MalformedURLException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with a PKCS12 certificate.
*
* @param pfxCertificatePath the path to the PKCS12 certificate of the application
* @param pfxCertificatePassword the password protecting the PFX certificate
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword,
TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return Mono.fromCallable(() -> {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
new FileInputStream(pfxCertificatePath), pfxCertificatePassword))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
return applicationBuilder.build();
}).flatMap(application -> Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())))
.map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(),
ZoneOffset.UTC)));
}
/**
* Asynchronously acquire a token from Active Directory with a PEM certificate.
*
* @param pemCertificatePath the path to the PEM certificate of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath));
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
CertificateUtil.privateKeyFromPem(pemCertificateBytes),
CertificateUtil.publicKeyFromPem(pemCertificateBytes)))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(),
ZoneOffset.UTC)));
} catch (IOException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
return Mono.fromFuture(publicClientApplication.acquireToken(
UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray())
.build()))
.map(MsalToken::new);
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) {
SilentParameters parameters;
if (msalToken.getAccount() != null) {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build();
} else {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build();
}
return Mono.defer(() -> {
try {
return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new);
} catch (MalformedURLException e) {
return Mono.error(e);
}
});
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
return Mono.fromFuture(() -> {
DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()),
dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(),
dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build();
return publicClientApplication.acquireToken(parameters);
}).map(MsalToken::new);
}
/**
* Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow.
*
* @param request the details of the token request
* @param authorizationCode the oauth2 authorization code
* @param redirectUrl the redirectUrl where the authorization code is sent to
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode,
URI redirectUrl) {
return Mono.fromFuture(() -> publicClientApplication.acquireToken(
AuthorizationCodeParameters.builder(authorizationCode, redirectUrl)
.scopes(new HashSet<>(request.getScopes()))
.build()))
.map(MsalToken::new);
}
/**
* Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return AuthorizationCodeListener.create(port)
.flatMap(server -> {
URI redirectUri;
String browserUri;
try {
redirectUri = new URI(String.format("http:
browserUri =
String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt"
+ "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s",
authorityUrl,
clientId,
redirectUri.toString(),
UUID.randomUUID(),
String.join(" ", request.getScopes()));
} catch (URISyntaxException e) {
return server.dispose().then(Mono.error(e));
}
return server.listen()
.mergeWith(Mono.<String>fromRunnable(() -> {
try {
openUrl(browserUri);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
}
}).subscribeOn(Schedulers.newSingle("browser")))
.next()
.flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri))
.onErrorResume(t -> server.dispose().then(Mono.error(t)))
.flatMap(msalToken -> server.dispose().then(Mono.just(msalToken)));
});
}
/**
* Asynchronously acquire a token from the App Service Managed Service Identity endpoint.
*
* @param msiEndpoint the endpoint to acquire token from
* @param msiSecret the secret to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret,
TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
HttpURLConnection connection = null;
StringBuilder payload = new StringBuilder();
try {
payload.append("resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
payload.append("&api-version=");
payload.append(URLEncoder.encode("2017-09-01", "UTF-8"));
if (clientId != null) {
payload.append("&clientid=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
} catch (IOException exception) {
return Mono.error(exception);
}
try {
URL url = new URL(String.format("%s?%s", msiEndpoint, payload));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
if (msiSecret != null) {
connection.setRequestProperty("Secret", msiSecret);
}
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON));
} catch (IOException e) {
return Mono.error(e);
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
/**
* Asynchronously acquire a token from the Virtual Machine IMDS endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder();
final int imdsUpgradeTimeInMs = 70 * 1000;
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
payload.append("&resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
if (clientId != null) {
payload.append("&client_id=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
} catch (IOException exception) {
return Mono.error(exception);
}
return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> {
int retry = 1;
while (retry <= options.getMaxRetry()) {
URL url = null;
HttpURLConnection connection = null;
try {
url =
new URL(String.format("http:
payload.toString()));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON);
} catch (IOException exception) {
if (connection == null) {
throw logger.logExceptionAsError(new RuntimeException(
String.format("Could not connect to the url: %s.", url), exception));
}
int responseCode = connection.getResponseCode();
if (responseCode == 410
|| responseCode == 429
|| responseCode == 404
|| (responseCode >= 500 && responseCode <= 599)) {
int retryTimeoutInMs = options.getRetryTimeout()
.apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000;
retryTimeoutInMs =
(responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs
: retryTimeoutInMs;
retry++;
if (retry > options.getMaxRetry()) {
break;
} else {
sleep(retryTimeoutInMs);
}
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Couldn't acquire access token from IMDS, verify your objectId, "
+ "clientId or msiResourceId", exception));
}
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
throw logger.logExceptionAsError(new RuntimeException(
String.format("MSI: Failed to acquire tokens after retrying %s times",
options.getMaxRetry())));
}));
}
private Mono<Boolean> checkIMDSAvailable() {
StringBuilder payload = new StringBuilder();
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
} catch (IOException exception) {
return Mono.error(exception);
}
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
URL url = new URL(String.format("http:
payload.toString()));
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setConnectTimeout(500);
connection.connect();
} finally {
if (connection != null) {
connection.disconnect();
}
}
return true;
});
}
private static void sleep(int millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) {
switch (options.getType()) {
case SOCKS4:
case SOCKS5:
return new Proxy(Type.SOCKS, options.getAddress());
case HTTP:
default:
return new Proxy(Type.HTTP, options.getAddress());
}
}
void openUrl(String url) throws IOException {
Runtime rt = Runtime.getRuntime();
String os = System.getProperty("os.name").toLowerCase(Locale.ROOT);
if (os.contains("win")) {
rt.exec("rundll32 url.dll,FileProtocolHandler " + url);
} else if (os.contains("mac")) {
rt.exec("open " + url);
} else if (os.contains("nix") || os.contains("nux")) {
rt.exec("xdg-open " + url);
} else {
logger.error("Browser could not be opened - please open {} in a browser on this device.", url);
}
}
} |
Why do we have to add this code block? Shouldn't this implicitly be asserted if the jobs finishes successfully? | public void testDispatcherProcessFailure() throws Exception {
final Time timeout = Time.seconds(30L);
final File zookeeperStoragePath = temporaryFolder.newFolder();
final int numberOfJobManagers = 2;
final int numberOfTaskManagers = 2;
final int numberOfSlotsPerTaskManager = 2;
assertEquals(PARALLELISM, numberOfTaskManagers * numberOfSlotsPerTaskManager);
final DispatcherProcess[] dispatcherProcesses = new DispatcherProcess[numberOfJobManagers];
TaskManagerRunner[] taskManagerRunners = new TaskManagerRunner[numberOfTaskManagers];
HighAvailabilityServices highAvailabilityServices = null;
LeaderRetrievalService leaderRetrievalService = null;
File coordinateTempDir = null;
Configuration config = ZooKeeperTestUtils.createZooKeeperHAConfig(
zooKeeper.getConnectString(), zookeeperStoragePath.getPath());
config.setString(TaskManagerOptions.MANAGED_MEMORY_SIZE, "4m");
config.setInteger(TaskManagerOptions.NETWORK_NUM_BUFFERS, 100);
config.setInteger(TaskManagerOptions.NUM_TASK_SLOTS, 2);
final RpcService rpcService = AkkaRpcServiceUtils.createRpcService("localhost", 0, config);
try {
final Deadline deadline = TestTimeOut.fromNow();
coordinateTempDir = temporaryFolder.newFolder();
dispatcherProcesses[0] = new DispatcherProcess(0, config);
dispatcherProcesses[0].startProcess();
highAvailabilityServices = HighAvailabilityServicesUtils.createAvailableOrEmbeddedServices(
config,
TestingUtils.defaultExecutor());
for (int i = 0; i < numberOfTaskManagers; i++) {
taskManagerRunners[i] = new TaskManagerRunner(config, ResourceID.generate());
taskManagerRunners[i].start();
}
TestingListener leaderListener = new TestingListener();
leaderRetrievalService = highAvailabilityServices.getDispatcherLeaderRetriever();
leaderRetrievalService.start(leaderListener);
leaderListener.waitForNewLeader(deadline.timeLeft().toMillis());
String leaderAddress = leaderListener.getAddress();
UUID leaderId = leaderListener.getLeaderSessionID();
final CompletableFuture<DispatcherGateway> dispatcherGatewayFuture = rpcService.connect(
leaderAddress,
DispatcherId.fromUuid(leaderId),
DispatcherGateway.class);
final DispatcherGateway dispatcherGateway = dispatcherGatewayFuture.get();
waitForTaskManagers(numberOfTaskManagers, dispatcherGateway, deadline.timeLeft());
final File coordinateDirClosure = coordinateTempDir;
final Throwable[] errorRef = new Throwable[1];
Thread programTrigger = new Thread("Program Trigger") {
@Override
public void run() {
try {
testJobManagerFailure(zooKeeper.getConnectString(), coordinateDirClosure, zookeeperStoragePath);
}
catch (Throwable t) {
t.printStackTrace();
errorRef[0] = t;
}
}
};
programTrigger.start();
AbstractTaskManagerProcessFailureRecoveryTest.waitForMarkerFiles(coordinateTempDir,
READY_MARKER_FILE_PREFIX, PARALLELISM, deadline.timeLeft().toMillis());
dispatcherProcesses[0].destroy();
dispatcherProcesses[1] = new DispatcherProcess(1, config);
dispatcherProcesses[1].startProcess();
leaderListener.waitForNewLeader(deadline.timeLeft().toMillis());
leaderAddress = leaderListener.getAddress();
leaderId = leaderListener.getLeaderSessionID();
final CompletableFuture<DispatcherGateway> newDispatcherGatewayFuture = rpcService.connect(
leaderAddress,
DispatcherId.fromUuid(leaderId),
DispatcherGateway.class);
final DispatcherGateway newDispatcherGateway = newDispatcherGatewayFuture.get();
waitForTaskManagers(numberOfTaskManagers, newDispatcherGateway, deadline.timeLeft());
AbstractTaskManagerProcessFailureRecoveryTest.touchFile(new File(coordinateTempDir, PROCEED_MARKER_FILE));
programTrigger.join(deadline.timeLeft().toMillis());
AbstractTaskManagerProcessFailureRecoveryTest.waitForMarkerFiles(coordinateTempDir,
FINISH_MARKER_FILE_PREFIX, 1, deadline.timeLeft().toMillis());
assertFalse("The program did not finish in time", programTrigger.isAlive());
if (errorRef[0] != null) {
Throwable error = errorRef[0];
error.printStackTrace();
fail("The program encountered a " + error.getClass().getSimpleName() + " : " + error.getMessage());
}
}
catch (Throwable t) {
t.printStackTrace();
for (DispatcherProcess p : dispatcherProcesses) {
if (p != null) {
p.printProcessLog();
}
}
throw t;
}
finally {
for (int i = 0; i < numberOfTaskManagers; i++) {
if (taskManagerRunners[i] != null) {
taskManagerRunners[i].close();
}
}
if (leaderRetrievalService != null) {
leaderRetrievalService.stop();
}
for (DispatcherProcess dispatcherProcess : dispatcherProcesses) {
if (dispatcherProcess != null) {
dispatcherProcess.destroy();
}
}
if (highAvailabilityServices != null) {
highAvailabilityServices.closeAndCleanupAllData();
}
RpcUtils.terminateRpcService(rpcService, timeout);
if (coordinateTempDir != null) {
try {
FileUtils.deleteDirectory(coordinateTempDir);
}
catch (Throwable ignored) {
}
}
}
} | waitForTaskManagers(numberOfTaskManagers, newDispatcherGateway, deadline.timeLeft()); | public void testDispatcherProcessFailure() throws Exception {
final Time timeout = Time.seconds(30L);
final File zookeeperStoragePath = temporaryFolder.newFolder();
final int numberOfJobManagers = 2;
final int numberOfTaskManagers = 2;
final int numberOfSlotsPerTaskManager = 2;
assertEquals(PARALLELISM, numberOfTaskManagers * numberOfSlotsPerTaskManager);
final DispatcherProcess[] dispatcherProcesses = new DispatcherProcess[numberOfJobManagers];
TaskManagerRunner[] taskManagerRunners = new TaskManagerRunner[numberOfTaskManagers];
HighAvailabilityServices highAvailabilityServices = null;
LeaderRetrievalService leaderRetrievalService = null;
File coordinateTempDir = null;
Configuration config = ZooKeeperTestUtils.createZooKeeperHAConfig(
zooKeeper.getConnectString(), zookeeperStoragePath.getPath());
config.setString(TaskManagerOptions.MANAGED_MEMORY_SIZE, "4m");
config.setInteger(TaskManagerOptions.NETWORK_NUM_BUFFERS, 100);
config.setInteger(TaskManagerOptions.NUM_TASK_SLOTS, 2);
final RpcService rpcService = AkkaRpcServiceUtils.createRpcService("localhost", 0, config);
try {
final Deadline deadline = TestTimeOut.fromNow();
coordinateTempDir = temporaryFolder.newFolder();
dispatcherProcesses[0] = new DispatcherProcess(0, config);
dispatcherProcesses[0].startProcess();
highAvailabilityServices = HighAvailabilityServicesUtils.createAvailableOrEmbeddedServices(
config,
TestingUtils.defaultExecutor());
for (int i = 0; i < numberOfTaskManagers; i++) {
taskManagerRunners[i] = new TaskManagerRunner(config, ResourceID.generate());
taskManagerRunners[i].start();
}
TestingListener leaderListener = new TestingListener();
leaderRetrievalService = highAvailabilityServices.getDispatcherLeaderRetriever();
leaderRetrievalService.start(leaderListener);
leaderListener.waitForNewLeader(deadline.timeLeft().toMillis());
String leaderAddress = leaderListener.getAddress();
UUID leaderId = leaderListener.getLeaderSessionID();
final CompletableFuture<DispatcherGateway> dispatcherGatewayFuture = rpcService.connect(
leaderAddress,
DispatcherId.fromUuid(leaderId),
DispatcherGateway.class);
final DispatcherGateway dispatcherGateway = dispatcherGatewayFuture.get();
waitForTaskManagers(numberOfTaskManagers, dispatcherGateway, deadline.timeLeft());
final File coordinateDirClosure = coordinateTempDir;
final Throwable[] errorRef = new Throwable[1];
Thread programTrigger = new Thread("Program Trigger") {
@Override
public void run() {
try {
testJobManagerFailure(zooKeeper.getConnectString(), coordinateDirClosure, zookeeperStoragePath);
}
catch (Throwable t) {
t.printStackTrace();
errorRef[0] = t;
}
}
};
programTrigger.start();
AbstractTaskManagerProcessFailureRecoveryTest.waitForMarkerFiles(coordinateTempDir,
READY_MARKER_FILE_PREFIX, PARALLELISM, deadline.timeLeft().toMillis());
dispatcherProcesses[0].destroy();
dispatcherProcesses[1] = new DispatcherProcess(1, config);
dispatcherProcesses[1].startProcess();
AbstractTaskManagerProcessFailureRecoveryTest.touchFile(new File(coordinateTempDir, PROCEED_MARKER_FILE));
programTrigger.join(deadline.timeLeft().toMillis());
AbstractTaskManagerProcessFailureRecoveryTest.waitForMarkerFiles(coordinateTempDir,
FINISH_MARKER_FILE_PREFIX, 1, deadline.timeLeft().toMillis());
assertFalse("The program did not finish in time", programTrigger.isAlive());
if (errorRef[0] != null) {
Throwable error = errorRef[0];
error.printStackTrace();
fail("The program encountered a " + error.getClass().getSimpleName() + " : " + error.getMessage());
}
}
catch (Throwable t) {
t.printStackTrace();
for (DispatcherProcess p : dispatcherProcesses) {
if (p != null) {
p.printProcessLog();
}
}
throw t;
}
finally {
for (int i = 0; i < numberOfTaskManagers; i++) {
if (taskManagerRunners[i] != null) {
taskManagerRunners[i].close();
}
}
if (leaderRetrievalService != null) {
leaderRetrievalService.stop();
}
for (DispatcherProcess dispatcherProcess : dispatcherProcesses) {
if (dispatcherProcess != null) {
dispatcherProcess.destroy();
}
}
if (highAvailabilityServices != null) {
highAvailabilityServices.closeAndCleanupAllData();
}
RpcUtils.terminateRpcService(rpcService, timeout);
if (coordinateTempDir != null) {
try {
FileUtils.deleteDirectory(coordinateTempDir);
}
catch (Throwable ignored) {
}
}
}
} | class JobManagerHAProcessFailureRecoveryITCase extends TestLogger {
private static ZooKeeperTestEnvironment zooKeeper;
private static final FiniteDuration TestTimeOut = new FiniteDuration(5, TimeUnit.MINUTES);
@Rule
public final TemporaryFolder temporaryFolder = new TemporaryFolder();
@BeforeClass
public static void setup() {
zooKeeper = new ZooKeeperTestEnvironment(1);
}
@Before
public void cleanUp() throws Exception {
zooKeeper.deleteAll();
}
@AfterClass
public static void tearDown() throws Exception {
if (zooKeeper != null) {
zooKeeper.shutdown();
}
}
protected static final String READY_MARKER_FILE_PREFIX = "ready_";
protected static final String FINISH_MARKER_FILE_PREFIX = "finish_";
protected static final String PROCEED_MARKER_FILE = "proceed";
protected static final int PARALLELISM = 4;
private final ExecutionMode executionMode;
public JobManagerHAProcessFailureRecoveryITCase(ExecutionMode executionMode) {
this.executionMode = executionMode;
}
@Parameterized.Parameters
public static Collection<Object[]> executionMode() {
return Arrays.asList(new Object[][]{
{ ExecutionMode.PIPELINED},
{ExecutionMode.BATCH}});
}
/**
* Test program with JobManager failure.
*
* @param zkQuorum ZooKeeper quorum to connect to
* @param coordinateDir Coordination directory
* @throws Exception
*/
private void testJobManagerFailure(String zkQuorum, final File coordinateDir, final File zookeeperStoragePath) throws Exception {
Configuration config = new Configuration();
config.setString(HighAvailabilityOptions.HA_MODE, "ZOOKEEPER");
config.setString(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, zkQuorum);
config.setString(HighAvailabilityOptions.HA_STORAGE_PATH, zookeeperStoragePath.getAbsolutePath());
ExecutionEnvironment env = ExecutionEnvironment.createRemoteEnvironment(
"leader", 1, config);
env.setParallelism(PARALLELISM);
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0L));
env.getConfig().setExecutionMode(executionMode);
env.getConfig().disableSysoutLogging();
final long numElements = 100000L;
final DataSet<Long> result = env.generateSequence(1, numElements)
.rebalance()
.map(new RichMapFunction<Long, Long>() {
private final File proceedFile = new File(coordinateDir, PROCEED_MARKER_FILE);
private boolean markerCreated = false;
private boolean checkForProceedFile = true;
@Override
public Long map(Long value) throws Exception {
if (!markerCreated) {
int taskIndex = getRuntimeContext().getIndexOfThisSubtask();
AbstractTaskManagerProcessFailureRecoveryTest.touchFile(
new File(coordinateDir, READY_MARKER_FILE_PREFIX + taskIndex));
markerCreated = true;
}
if (checkForProceedFile) {
if (proceedFile.exists()) {
checkForProceedFile = false;
}
else {
Thread.sleep(100);
}
}
return value;
}
})
.reduce(new ReduceFunction<Long>() {
@Override
public Long reduce(Long value1, Long value2) {
return value1 + value2;
}
})
.flatMap(new RichFlatMapFunction<Long, Long>() {
@Override
public void flatMap(Long value, Collector<Long> out) throws Exception {
assertEquals(numElements * (numElements + 1L) / 2L, (long) value);
int taskIndex = getRuntimeContext().getIndexOfThisSubtask();
AbstractTaskManagerProcessFailureRecoveryTest.touchFile(
new File(coordinateDir, FINISH_MARKER_FILE_PREFIX + taskIndex));
}
});
result.output(new DiscardingOutputFormat<Long>());
env.execute();
}
@Test
private void waitForTaskManagers(int numberOfTaskManagers, DispatcherGateway dispatcherGateway, FiniteDuration timeLeft) throws ExecutionException, InterruptedException {
FutureUtils.retrySuccesfulWithDelay(
() -> dispatcherGateway.requestClusterOverview(Time.milliseconds(timeLeft.toMillis())),
Time.milliseconds(50L),
org.apache.flink.api.common.time.Deadline.fromNow(Duration.ofMillis(timeLeft.toMillis())),
clusterOverview -> clusterOverview.getNumTaskManagersConnected() >= numberOfTaskManagers,
new ScheduledExecutorServiceAdapter(Executors.newSingleThreadScheduledExecutor()))
.get();
}
} | class JobManagerHAProcessFailureRecoveryITCase extends TestLogger {
private static ZooKeeperTestEnvironment zooKeeper;
private static final FiniteDuration TestTimeOut = new FiniteDuration(5, TimeUnit.MINUTES);
@Rule
public final TemporaryFolder temporaryFolder = new TemporaryFolder();
@BeforeClass
public static void setup() {
zooKeeper = new ZooKeeperTestEnvironment(1);
}
@Before
public void cleanUp() throws Exception {
zooKeeper.deleteAll();
}
@AfterClass
public static void tearDown() throws Exception {
if (zooKeeper != null) {
zooKeeper.shutdown();
}
}
protected static final String READY_MARKER_FILE_PREFIX = "ready_";
protected static final String FINISH_MARKER_FILE_PREFIX = "finish_";
protected static final String PROCEED_MARKER_FILE = "proceed";
protected static final int PARALLELISM = 4;
private final ExecutionMode executionMode;
public JobManagerHAProcessFailureRecoveryITCase(ExecutionMode executionMode) {
this.executionMode = executionMode;
}
@Parameterized.Parameters
public static Collection<Object[]> executionMode() {
return Arrays.asList(new Object[][]{
{ ExecutionMode.PIPELINED},
{ExecutionMode.BATCH}});
}
/**
* Test program with JobManager failure.
*
* @param zkQuorum ZooKeeper quorum to connect to
* @param coordinateDir Coordination directory
* @throws Exception
*/
private void testJobManagerFailure(String zkQuorum, final File coordinateDir, final File zookeeperStoragePath) throws Exception {
Configuration config = new Configuration();
config.setString(HighAvailabilityOptions.HA_MODE, "ZOOKEEPER");
config.setString(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, zkQuorum);
config.setString(HighAvailabilityOptions.HA_STORAGE_PATH, zookeeperStoragePath.getAbsolutePath());
ExecutionEnvironment env = ExecutionEnvironment.createRemoteEnvironment(
"leader", 1, config);
env.setParallelism(PARALLELISM);
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0L));
env.getConfig().setExecutionMode(executionMode);
env.getConfig().disableSysoutLogging();
final long numElements = 100000L;
final DataSet<Long> result = env.generateSequence(1, numElements)
.rebalance()
.map(new RichMapFunction<Long, Long>() {
private final File proceedFile = new File(coordinateDir, PROCEED_MARKER_FILE);
private boolean markerCreated = false;
private boolean checkForProceedFile = true;
@Override
public Long map(Long value) throws Exception {
if (!markerCreated) {
int taskIndex = getRuntimeContext().getIndexOfThisSubtask();
AbstractTaskManagerProcessFailureRecoveryTest.touchFile(
new File(coordinateDir, READY_MARKER_FILE_PREFIX + taskIndex));
markerCreated = true;
}
if (checkForProceedFile) {
if (proceedFile.exists()) {
checkForProceedFile = false;
}
else {
Thread.sleep(100);
}
}
return value;
}
})
.reduce(new ReduceFunction<Long>() {
@Override
public Long reduce(Long value1, Long value2) {
return value1 + value2;
}
})
.flatMap(new RichFlatMapFunction<Long, Long>() {
@Override
public void flatMap(Long value, Collector<Long> out) throws Exception {
assertEquals(numElements * (numElements + 1L) / 2L, (long) value);
int taskIndex = getRuntimeContext().getIndexOfThisSubtask();
AbstractTaskManagerProcessFailureRecoveryTest.touchFile(
new File(coordinateDir, FINISH_MARKER_FILE_PREFIX + taskIndex));
}
});
result.output(new DiscardingOutputFormat<Long>());
env.execute();
}
@Test
private void waitForTaskManagers(int numberOfTaskManagers, DispatcherGateway dispatcherGateway, FiniteDuration timeLeft) throws ExecutionException, InterruptedException {
FutureUtils.retrySuccesfulWithDelay(
() -> dispatcherGateway.requestClusterOverview(Time.milliseconds(timeLeft.toMillis())),
Time.milliseconds(50L),
org.apache.flink.api.common.time.Deadline.fromNow(Duration.ofMillis(timeLeft.toMillis())),
clusterOverview -> clusterOverview.getNumTaskManagersConnected() >= numberOfTaskManagers,
new ScheduledExecutorServiceAdapter(Executors.newSingleThreadScheduledExecutor()))
.get();
}
} |
I see 2 paths above for assigning to `from`: ``` String from = String.format("%s/v%s/%s.zip", GITHUB_DOWNLOAD_PREFIX, getSDKVersion(), buildFileName()); if (!Strings.isNullOrEmpty(options.getPrismLocation())) { checkArgument( !options.getPrismLocation().startsWith(GITHUB_TAG_PREFIX), "Provided --prismLocation URL is not an Apache Beam Github " + "Release page URL or download URL: ", from); from = options.getPrismLocation(); } ``` In either of these assignments are followed, won't `from` start with either `GITHUB_DOWNLOAD_PREFIX` or `GITHUB_TAG_PREFIX` which both start with `https://`? | String resolve() throws IOException {
String from =
String.format("%s/v%s/%s.zip", GITHUB_DOWNLOAD_PREFIX, getSDKVersion(), buildFileName());
if (!Strings.isNullOrEmpty(options.getPrismLocation())) {
checkArgument(
!options.getPrismLocation().startsWith(GITHUB_TAG_PREFIX),
"Provided --prismLocation URL is not an Apache Beam Github "
+ "Release page URL or download URL: ",
from);
from = options.getPrismLocation();
}
String fromFileName = getNameWithoutExtension(from);
Path to = Paths.get(userHome(), PRISM_BIN_PATH, fromFileName);
if (Files.exists(to)) {
return to.toString();
}
createDirectoryIfNeeded(to);
if (from.startsWith("http")) {
String result = resolve(new URL(from), to);
checkState(Files.exists(to), "Resolved location does not exist: %s", result);
return result;
}
String result = resolve(Paths.get(from), to);
checkState(Files.exists(to), "Resolved location does not exist: %s", result);
return result;
} | if (from.startsWith("http")) { | String resolve() throws IOException {
String from =
String.format("%s/v%s/%s.zip", GITHUB_DOWNLOAD_PREFIX, getSDKVersion(), buildFileName());
if (!Strings.isNullOrEmpty(options.getPrismLocation())) {
checkArgument(
!options.getPrismLocation().startsWith(GITHUB_TAG_PREFIX),
"Provided --prismLocation URL is not an Apache Beam Github "
+ "Release page URL or download URL: ",
from);
from = options.getPrismLocation();
}
String fromFileName = getNameWithoutExtension(from);
Path to = Paths.get(userHome(), PRISM_BIN_PATH, fromFileName);
if (Files.exists(to)) {
return to.toString();
}
createDirectoryIfNeeded(to);
if (from.startsWith("http")) {
String result = resolve(new URL(from), to);
checkState(Files.exists(to), "Resolved location does not exist: %s", result);
return result;
}
String result = resolve(Paths.get(from), to);
checkState(Files.exists(to), "Resolved location does not exist: %s", result);
return result;
} | class PrismLocator {
static final String OS_NAME_PROPERTY = "os.name";
static final String ARCH_PROPERTY = "os.arch";
static final String USER_HOME_PROPERTY = "user.home";
private static final String ZIP_EXT = "zip";
private static final String SHA512_EXT = "sha512";
private static final ReleaseInfo RELEASE_INFO = ReleaseInfo.getReleaseInfo();
private static final String PRISM_BIN_PATH = ".apache_beam/cache/prism/bin";
private static final Set<PosixFilePermission> PERMS =
PosixFilePermissions.fromString("rwxr-xr-x");
private static final String GITHUB_DOWNLOAD_PREFIX =
"https:
private static final String GITHUB_TAG_PREFIX = "https:
private final PrismPipelineOptions options;
PrismLocator(PrismPipelineOptions options) {
this.options = options;
}
/**
* Downloads and prepares a Prism executable for use with the {@link PrismRunner}, executed by the
* {@link PrismExecutor}. The returned {@link String} is the absolute path to the Prism
* executable.
*/
private String resolve(URL from, Path to) throws IOException {
if (from.toString().startsWith(GITHUB_DOWNLOAD_PREFIX)) {
URL shaSumReference = new URL(from + "." + SHA512_EXT);
validateShaSum512(shaSumReference, from);
}
BiConsumer<URL, Path> downloadFn = PrismLocator::download;
if (from.getPath().endsWith(ZIP_EXT)) {
downloadFn = PrismLocator::unzip;
}
downloadFn.accept(from, to);
Files.setPosixFilePermissions(to, PERMS);
return to.toString();
}
private String resolve(Path from, Path to) throws IOException {
BiConsumer<InputStream, Path> copyFn = PrismLocator::copy;
if (from.endsWith(ZIP_EXT)) {
copyFn = PrismLocator::unzip;
}
copyFn.accept(from.toUri().toURL().openStream(), to);
ByteStreams.copy(from.toUri().toURL().openStream(), Files.newOutputStream(to));
Files.setPosixFilePermissions(to, PERMS);
return to.toString();
}
String buildFileName() {
String version = getSDKVersion();
return String.format("apache_beam-v%s-prism-%s-%s", version, os(), arch());
}
private static void unzip(URL from, Path to) {
try {
unzip(from.openStream(), to);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private static void unzip(InputStream from, Path to) {
try (OutputStream out = Files.newOutputStream(to)) {
ZipInputStream zis = new ZipInputStream(from);
for (ZipEntry entry = zis.getNextEntry(); entry != null; entry = zis.getNextEntry()) {
InputStream in = ByteStreams.limit(zis, entry.getSize());
ByteStreams.copy(in, out);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private static void copy(InputStream from, Path to) {
try {
ByteStreams.copy(from, Files.newOutputStream(to));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private static void download(URL from, Path to) {
try {
ByteStreams.copy(from.openStream(), Files.newOutputStream(to));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private static void validateShaSum512(URL shaSumReference, URL source) throws IOException {
try (InputStream in = shaSumReference.openStream()) {
String rawContent = new String(ByteStreams.toByteArray(in), StandardCharsets.UTF_8);
checkState(!Strings.isNullOrEmpty(rawContent));
String reference = "";
Iterator<String> split = Splitter.onPattern("\\s+").split(rawContent).iterator();
if (split.hasNext()) {
reference = split.next();
}
checkState(!Strings.isNullOrEmpty(reference));
HashCode toVerify = Hashing.sha512().hashBytes(ByteStreams.toByteArray(source.openStream()));
checkState(
reference.equals(toVerify.toString()),
"Expected sha512 derived from: %s does not equal expected: %s, got: %s",
source,
reference,
toVerify.toString());
}
}
private static String getNameWithoutExtension(String path) {
return org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.io.Files
.getNameWithoutExtension(path);
}
private String getSDKVersion() {
if (Strings.isNullOrEmpty(options.getPrismVersionOverride())) {
return RELEASE_INFO.getSdkVersion();
}
return options.getPrismVersionOverride();
}
private static String os() {
String result = mustGetPropertyAsLowerCase(OS_NAME_PROPERTY);
if (result.contains("mac")) {
return "darwin";
}
return result;
}
private static String arch() {
String result = mustGetPropertyAsLowerCase(ARCH_PROPERTY);
if (result.contains("aarch")) {
return "arm64";
}
return result;
}
private static String userHome() {
return mustGetPropertyAsLowerCase(USER_HOME_PROPERTY);
}
private static String mustGetPropertyAsLowerCase(String name) {
return checkStateNotNull(System.getProperty(name), "System property: " + name + " not set")
.toLowerCase();
}
private static void createDirectoryIfNeeded(Path path) throws IOException {
Path parent = path.getParent();
if (parent == null) {
return;
}
if (parent.toFile().exists()) {
return;
}
Files.createDirectories(parent);
}
} | class PrismLocator {
static final String OS_NAME_PROPERTY = "os.name";
static final String ARCH_PROPERTY = "os.arch";
static final String USER_HOME_PROPERTY = "user.home";
private static final String ZIP_EXT = "zip";
private static final ReleaseInfo RELEASE_INFO = ReleaseInfo.getReleaseInfo();
private static final String PRISM_BIN_PATH = ".apache_beam/cache/prism/bin";
private static final Set<PosixFilePermission> PERMS =
PosixFilePermissions.fromString("rwxr-xr-x");
private static final String GITHUB_DOWNLOAD_PREFIX =
"https:
private static final String GITHUB_TAG_PREFIX = "https:
private final PrismPipelineOptions options;
PrismLocator(PrismPipelineOptions options) {
this.options = options;
}
/**
* Downloads and prepares a Prism executable for use with the {@link PrismRunner}. The returned
* {@link String} is the absolute path to the Prism executable.
*/
static Path prismBinDirectory() {
return Paths.get(userHome(), PRISM_BIN_PATH);
}
private String resolve(URL from, Path to) throws IOException {
BiConsumer<URL, Path> downloadFn = PrismLocator::download;
if (from.getPath().endsWith(ZIP_EXT)) {
downloadFn = PrismLocator::unzip;
}
downloadFn.accept(from, to);
Files.setPosixFilePermissions(to, PERMS);
return to.toString();
}
private String resolve(Path from, Path to) throws IOException {
BiConsumer<InputStream, Path> copyFn = PrismLocator::copy;
if (from.endsWith(ZIP_EXT)) {
copyFn = PrismLocator::unzip;
}
copyFn.accept(from.toUri().toURL().openStream(), to);
ByteStreams.copy(from.toUri().toURL().openStream(), Files.newOutputStream(to));
Files.setPosixFilePermissions(to, PERMS);
return to.toString();
}
String buildFileName() {
String version = getSDKVersion();
return String.format("apache_beam-v%s-prism-%s-%s", version, os(), arch());
}
private static void unzip(URL from, Path to) {
try {
unzip(from.openStream(), to);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private static void unzip(InputStream from, Path to) {
try (OutputStream out = Files.newOutputStream(to)) {
ZipInputStream zis = new ZipInputStream(from);
for (ZipEntry entry = zis.getNextEntry(); entry != null; entry = zis.getNextEntry()) {
InputStream in = ByteStreams.limit(zis, entry.getSize());
ByteStreams.copy(in, out);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private static void copy(InputStream from, Path to) {
try {
ByteStreams.copy(from, Files.newOutputStream(to));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private static void download(URL from, Path to) {
try {
ByteStreams.copy(from.openStream(), Files.newOutputStream(to));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private static String getNameWithoutExtension(String path) {
return org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.io.Files
.getNameWithoutExtension(path);
}
private String getSDKVersion() {
if (Strings.isNullOrEmpty(options.getPrismVersionOverride())) {
return RELEASE_INFO.getSdkVersion();
}
return options.getPrismVersionOverride();
}
private static String os() {
String result = mustGetPropertyAsLowerCase(OS_NAME_PROPERTY);
if (result.contains("mac")) {
return "darwin";
}
return result;
}
private static String arch() {
String result = mustGetPropertyAsLowerCase(ARCH_PROPERTY);
if (result.contains("aarch")) {
return "arm64";
}
return result;
}
private static String userHome() {
return mustGetPropertyAsLowerCase(USER_HOME_PROPERTY);
}
private static String mustGetPropertyAsLowerCase(String name) {
return checkStateNotNull(System.getProperty(name), "System property: " + name + " not set")
.toLowerCase();
}
private static void createDirectoryIfNeeded(Path path) throws IOException {
Path parent = path.getParent();
if (parent == null) {
return;
}
Files.createDirectories(parent);
}
} |
Yeah, agree, will fix in a forthcoming PR | private String inProgressOutput(JsonNode hosts) {
ArrayList<String> statusPerHost = new ArrayList<>();
for (JsonNode host : hosts) {
StringBuilder sb = new StringBuilder();
String status = host.get("status").asText();
sb.append(host.get("hostname").asText()).append(": ").append(status);
if (status.equals(statusUnknown))
sb.append(" (").append(host.get("message").asText()).append(")");
else if (status.equals(statusInProgress)) {
JsonNode fileReferencesArray = host.get("fileReferences");
int size = fileReferencesArray.size();
int finished = 0;
for (JsonNode element : fileReferencesArray) {
for (Iterator<Map.Entry<String, JsonNode>> it = element.fields(); it.hasNext(); ) {
Map.Entry<String, JsonNode> fileReferenceStatus = it.next();
if (fileReferenceStatus.getValue().asDouble() == 1.0)
finished++;
}
}
sb.append(" (" + finished + " of " + size + " finished)");
}
statusPerHost.add(sb.toString());
}
return String.join("\n", statusPerHost);
} | else if (status.equals(statusInProgress)) { | private String inProgressOutput(JsonNode hosts) {
ArrayList<String> statusPerHost = new ArrayList<>();
for (JsonNode host : hosts) {
StringBuilder sb = new StringBuilder();
String status = host.get("status").asText();
sb.append(host.get("hostname").asText()).append(": ").append(status);
if (status.equals(statusUnknown))
sb.append(" (").append(host.get("message").asText()).append(")");
else if (status.equals(statusInProgress)) {
JsonNode fileReferencesArray = host.get("fileReferences");
int size = fileReferencesArray.size();
int finished = 0;
for (JsonNode element : fileReferencesArray) {
for (Iterator<Map.Entry<String, JsonNode>> it = element.fields(); it.hasNext(); ) {
Map.Entry<String, JsonNode> fileReferenceStatus = it.next();
if (fileReferenceStatus.getValue().asDouble() == 1.0)
finished++;
}
}
sb.append(" (" + finished + " of " + size + " finished)");
}
statusPerHost.add(sb.toString());
}
return String.join("\n", statusPerHost);
} | class FileDistributionStatusClient {
private static final String statusUnknown = "UNKNOWN";
private static final String statusInProgress = "IN_PROGRESS";
private static final String statusFinished = "FINISHED";
private final String tenantName;
private final String applicationName;
private final String instanceName;
private final String environment;
private final String region;
private final double timeout;
private final boolean debug;
FileDistributionStatusClient(CommandLineArguments arguments) {
tenantName = arguments.getTenantName();
applicationName = arguments.getApplicationName();
instanceName = arguments.getInstanceName();
environment = arguments.getEnvironment();
region = arguments.getRegion();
timeout = arguments.getTimeout();
debug = arguments.getDebugFlag();
}
public static void main(String[] args) {
try {
new FileDistributionStatusClient(CommandLineArguments.build(args)).run();
} catch (Exception e) {
System.err.println(e.getMessage());
System.exit(1);
}
}
public void run() {
String json = doHttpRequest();
System.out.println(parseAndGenerateOutput(json));
}
private String doHttpRequest() {
int timeoutInMillis = (int) (timeout * 1000);
RequestConfig config = RequestConfig.custom()
.setConnectTimeout(timeoutInMillis)
.setConnectionRequestTimeout(timeoutInMillis)
.setSocketTimeout(timeoutInMillis)
.build();
CloseableHttpClient httpClient = HttpClientBuilder.create().setDefaultRequestConfig(config).build();
URI statusUri = createStatusApiUri();
if (debug)
System.out.println("URI:" + statusUri);
try {
CloseableHttpResponse response = httpClient.execute(new HttpGet(statusUri));
String content = EntityUtils.toString(response.getEntity());
if (debug)
System.out.println("response:" + content);
if (response.getStatusLine().getStatusCode() == 200) {
return content;
} else {
throw new RuntimeException("Failed to get status for request " + statusUri + ": " +
response.getStatusLine() + ": " + content);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
String parseAndGenerateOutput(String json) {
ObjectMapper objectMapper = new ObjectMapper();
JsonNode jsonNode;
try {
jsonNode = objectMapper.readTree(json);
} catch (IOException e) {
throw new RuntimeException(e);
}
String status = jsonNode.get("status").asText();
switch (status) {
case statusUnknown:
return "File distribution status unknown: " + jsonNode.get("message").asText();
case statusInProgress:
return "File distribution in progress:\n" + inProgressOutput(jsonNode.get("hosts"));
case statusFinished:
return "File distribution finished";
default:
throw new RuntimeException("Unknown status " + status);
}
}
private URI createStatusApiUri() {
String path = String.format("/application/v2/tenant/%s/application/%s/environment/%s/region/%s/instance/%s/filedistributionstatus",
tenantName, applicationName, environment, region, instanceName);
try {
return new URIBuilder()
.setScheme("http")
.setHost("localhost")
.setPort(19071)
.setPath(path)
.addParameter("timeout", String.valueOf(timeout))
.build();
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
}
@Command(name = "vespa-status-filedistribution", description = "Tool for getting file distribution status.")
public static class CommandLineArguments {
static CommandLineArguments build(String[] args) {
CommandLineArguments arguments = null;
try {
arguments = SingleCommand.singleCommand(CommandLineArguments.class).parse(args);
} catch (Exception e) {
System.err.println(e.getMessage());
System.err.println("Use --help to show usage.\n");
System.exit(1);
}
if (arguments.helpOption.showHelpIfRequested()) {
System.exit(0);
}
if (arguments.getTenantName() == null) {
System.err.println("'--tenant' not set.");
System.exit(1);
}
if (arguments.getApplicationName() == null) {
System.err.println("'--application' not set.");
System.exit(1);
}
return arguments;
}
@Inject
HelpOption helpOption;
@Option(name = {"--tenant"},
description = "tenant name")
private String tenantNameArg;
@Option(name = {"--application"},
description = "application name")
private String applicationNameArg;
@Option(name = {"--instance"},
description = "instance name")
private String instanceNameArg = "default";
@Option(name = {"--environment"},
description = "environment name")
private String environmentArg = "prod";
@Option(name = {"--region"},
description = "region name")
private String regionArg = "default";
@Option(name = {"--timeout"},
description = "The timeout (in seconds).")
private double timeoutArg = 5;
@Option(name = {"--debug"},
description = "Print debug log.")
private boolean debugArg;
public String getTenantName() { return tenantNameArg; }
public String getApplicationName() { return applicationNameArg; }
public String getInstanceName() { return instanceNameArg; }
public String getEnvironment() { return environmentArg; }
public String getRegion() { return regionArg; }
public double getTimeout() { return timeoutArg; }
public boolean getDebugFlag() { return debugArg; }
}
} | class FileDistributionStatusClient {
private static final String statusUnknown = "UNKNOWN";
private static final String statusInProgress = "IN_PROGRESS";
private static final String statusFinished = "FINISHED";
private final String tenantName;
private final String applicationName;
private final String instanceName;
private final String environment;
private final String region;
private final double timeout;
private final boolean debug;
FileDistributionStatusClient(CommandLineArguments arguments) {
tenantName = arguments.getTenantName();
applicationName = arguments.getApplicationName();
instanceName = arguments.getInstanceName();
environment = arguments.getEnvironment();
region = arguments.getRegion();
timeout = arguments.getTimeout();
debug = arguments.getDebugFlag();
}
public static void main(String[] args) {
try {
new FileDistributionStatusClient(CommandLineArguments.build(args)).run();
} catch (Exception e) {
System.err.println(e.getMessage());
System.exit(1);
}
}
public void run() {
String json = doHttpRequest();
System.out.println(parseAndGenerateOutput(json));
}
private String doHttpRequest() {
int timeoutInMillis = (int) (timeout * 1000);
RequestConfig config = RequestConfig.custom()
.setConnectTimeout(timeoutInMillis)
.setConnectionRequestTimeout(timeoutInMillis)
.setSocketTimeout(timeoutInMillis)
.build();
CloseableHttpClient httpClient = HttpClientBuilder.create().setDefaultRequestConfig(config).build();
URI statusUri = createStatusApiUri();
if (debug)
System.out.println("URI:" + statusUri);
try {
CloseableHttpResponse response = httpClient.execute(new HttpGet(statusUri));
String content = EntityUtils.toString(response.getEntity());
if (debug)
System.out.println("response:" + content);
if (response.getStatusLine().getStatusCode() == 200) {
return content;
} else {
throw new RuntimeException("Failed to get status for request " + statusUri + ": " +
response.getStatusLine() + ": " + content);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
String parseAndGenerateOutput(String json) {
ObjectMapper objectMapper = new ObjectMapper();
JsonNode jsonNode;
try {
jsonNode = objectMapper.readTree(json);
} catch (IOException e) {
throw new RuntimeException(e);
}
String status = jsonNode.get("status").asText();
switch (status) {
case statusUnknown:
return "File distribution status unknown: " + jsonNode.get("message").asText();
case statusInProgress:
return "File distribution in progress:\n" + inProgressOutput(jsonNode.get("hosts"));
case statusFinished:
return "File distribution finished";
default:
throw new RuntimeException("Unknown status " + status);
}
}
private URI createStatusApiUri() {
String path = String.format("/application/v2/tenant/%s/application/%s/environment/%s/region/%s/instance/%s/filedistributionstatus",
tenantName, applicationName, environment, region, instanceName);
try {
return new URIBuilder()
.setScheme("http")
.setHost("localhost")
.setPort(19071)
.setPath(path)
.addParameter("timeout", String.valueOf(timeout))
.build();
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
}
@Command(name = "vespa-status-filedistribution", description = "Tool for getting file distribution status.")
public static class CommandLineArguments {
static CommandLineArguments build(String[] args) {
CommandLineArguments arguments = null;
try {
arguments = SingleCommand.singleCommand(CommandLineArguments.class).parse(args);
} catch (Exception e) {
System.err.println(e.getMessage());
System.err.println("Use --help to show usage.\n");
System.exit(1);
}
if (arguments.helpOption.showHelpIfRequested()) {
System.exit(0);
}
if (arguments.getTenantName() == null) {
System.err.println("'--tenant' not set.");
System.exit(1);
}
if (arguments.getApplicationName() == null) {
System.err.println("'--application' not set.");
System.exit(1);
}
return arguments;
}
@Inject
HelpOption helpOption;
@Option(name = {"--tenant"},
description = "tenant name")
private String tenantNameArg;
@Option(name = {"--application"},
description = "application name")
private String applicationNameArg;
@Option(name = {"--instance"},
description = "instance name")
private String instanceNameArg = "default";
@Option(name = {"--environment"},
description = "environment name")
private String environmentArg = "prod";
@Option(name = {"--region"},
description = "region name")
private String regionArg = "default";
@Option(name = {"--timeout"},
description = "The timeout (in seconds).")
private double timeoutArg = 5;
@Option(name = {"--debug"},
description = "Print debug log.")
private boolean debugArg;
public String getTenantName() { return tenantNameArg; }
public String getApplicationName() { return applicationNameArg; }
public String getInstanceName() { return instanceNameArg; }
public String getEnvironment() { return environmentArg; }
public String getRegion() { return regionArg; }
public double getTimeout() { return timeoutArg; }
public boolean getDebugFlag() { return debugArg; }
}
} |
Same with this one. I'm not sure if Yijun fixed this too. | public ServiceBusReceiverAsyncClient buildAsyncClient() {
final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(logger, entityType, queueName, topicName, subscriptionName,
SubQueue.NONE);
validateAndThrow(prefetchCount);
validateAndThrow(maxAutoLockRenewDuration);
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
sessionId, isRollingSessionReceiver(), maxConcurrentSessions, maxAutoLockRenewDuration);
final ServiceBusSessionManager sessionManager = new ServiceBusSessionManager(entityPath, entityType,
connectionProcessor, tracerProvider, messageSerializer, receiverOptions);
return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath,
entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose,
maxAutoLockRenewDuration, sessionManager);
} | validateAndThrow(prefetchCount); | public ServiceBusReceiverAsyncClient buildAsyncClient() {
final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName,
queueName);
final String entityPath = getEntityPath(logger, entityType, queueName, topicName, subscriptionName,
SubQueue.NONE);
final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer);
if (receiveMode == ReceiveMode.RECEIVE_AND_DELETE) {
maxAutoLockRenewDuration = Duration.ZERO;
}
final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount,
sessionId, isRollingSessionReceiver(), maxConcurrentSessions, maxAutoLockRenewDuration);
final ServiceBusSessionManager sessionManager = new ServiceBusSessionManager(entityPath, entityType,
connectionProcessor, tracerProvider, messageSerializer, receiverOptions);
return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath,
entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT,
tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager);
} | class ServiceBusSessionReceiverClientBuilder {
private Integer maxConcurrentSessions = null;
private int prefetchCount = DEFAULT_PREFETCH_COUNT;
private String queueName;
private ReceiveMode receiveMode = ReceiveMode.PEEK_LOCK;
private String sessionId;
private String subscriptionName;
private String topicName;
private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION;
private ServiceBusSessionReceiverClientBuilder() {
}
/**
* Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration
* {@code null} disables auto-renewal.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock.
* {@link Duration
*
* @return The updated {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
this.maxAutoLockRenewDuration = maxAutoLockRenewDuration;
return this;
}
/**
* Enables session processing roll-over by processing at most {@code maxConcurrentSessions}.
*
* @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1.
*/
public ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) {
if (maxConcurrentSessions < 1) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"maxConcurrentSessions cannot be less than 1."));
}
this.maxConcurrentSessions = maxConcurrentSessions;
return this;
}
/**
* Sets the prefetch count of the receiver. For both {@link ReceiveMode
* ReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application asks for one using {@link ServiceBusReceiverAsyncClient
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) {
validateAndThrow(prefetchCount);
this.prefetchCount = prefetchCount;
return this;
}
/**
* Sets the name of the queue to create a receiver for.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the receive mode for the receiver.
*
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder receiveMode(ReceiveMode receiveMode) {
this.receiveMode = receiveMode;
return this;
}
/**
* Sets the session id.
*
* @param sessionId session id.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder sessionId(String sessionId) {
this.sessionId = sessionId;
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
*
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
*/
public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) {
this.subscriptionName = subscriptionName;
return this;
}
/**
* Sets the name of the topic. <b>{@link
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
*/
public ServiceBusSessionReceiverClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading
* {@link ServiceBusMessage messages} from a specific queue or topic.
*
* @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or topic.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
* @throws IllegalArgumentException {
*/
/**
* Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading
* {@link ServiceBusMessage messages} from a specific queue or topic.
*
* @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or topic.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusReceiverClient buildClient() {
return new ServiceBusReceiverClient(buildAsyncClient(), retryOptions.getTryTimeout());
}
/**
* This is a rolling session receiver only if maxConcurrentSessions is > 0 AND sessionId is null or empty. If
* there is a sessionId, this is going to be a single, named session receiver.
*
* @return {@code true} if this is an unnamed rolling session receiver; {@code false} otherwise.
*/
private boolean isRollingSessionReceiver() {
if (maxConcurrentSessions == null) {
return false;
}
if (maxConcurrentSessions < 1) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Maximum number of concurrent sessions must be positive."));
}
return CoreUtils.isNullOrEmpty(sessionId);
}
} | class ServiceBusSessionReceiverClientBuilder {
private Integer maxConcurrentSessions = null;
private int prefetchCount = DEFAULT_PREFETCH_COUNT;
private String queueName;
private ReceiveMode receiveMode = ReceiveMode.PEEK_LOCK;
private String sessionId;
private String subscriptionName;
private String topicName;
private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION;
private ServiceBusSessionReceiverClientBuilder() {
}
/**
* Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration
* {@code null} disables auto-renewal. For {@link ReceiveMode
* auto-renewal is disabled.
*
* @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock.
* {@link Duration
*
* @return The updated {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative.
*/
public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) {
validateAndThrow(maxAutoLockRenewDuration);
this.maxAutoLockRenewDuration = maxAutoLockRenewDuration;
return this;
}
/**
* Enables session processing roll-over by processing at most {@code maxConcurrentSessions}.
*
* @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1.
*/
public ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) {
if (maxConcurrentSessions < 1) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"maxConcurrentSessions cannot be less than 1."));
}
this.maxConcurrentSessions = maxConcurrentSessions;
return this;
}
/**
* Sets the prefetch count of the receiver. For both {@link ReceiveMode
* ReceiveMode
*
* Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when
* and before the application asks for one using {@link ServiceBusReceiverAsyncClient
* Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off.
*
* @param prefetchCount The prefetch count.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @throws IllegalArgumentException If {code prefetchCount} is negative.
*/
public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) {
validateAndThrow(prefetchCount);
this.prefetchCount = prefetchCount;
return this;
}
/**
* Sets the name of the queue to create a receiver for.
*
* @param queueName Name of the queue.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder queueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* Sets the receive mode for the receiver.
*
* @param receiveMode Mode for receiving messages.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder receiveMode(ReceiveMode receiveMode) {
this.receiveMode = receiveMode;
return this;
}
/**
* Sets the session id.
*
* @param sessionId session id.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
*/
public ServiceBusSessionReceiverClientBuilder sessionId(String sessionId) {
this.sessionId = sessionId;
return this;
}
/**
* Sets the name of the subscription in the topic to listen to. <b>{@link
* </b>
*
* @param subscriptionName Name of the subscription.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
*/
public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) {
this.subscriptionName = subscriptionName;
return this;
}
/**
* Sets the name of the topic. <b>{@link
*
* @param topicName Name of the topic.
*
* @return The modified {@link ServiceBusSessionReceiverClientBuilder} object.
* @see
*/
public ServiceBusSessionReceiverClientBuilder topicName(String topicName) {
this.topicName = topicName;
return this;
}
/**
* Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading
* {@link ServiceBusMessage messages} from a specific queue or topic.
*
* @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or topic.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
/**
* Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading
* {@link ServiceBusMessage messages} from a specific queue or topic.
*
* @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or topic.
* @throws IllegalStateException if {@link
* topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link
*
* {@link
*
* @throws IllegalArgumentException Queue or topic name are not set via {@link
* queueName()} or {@link
*/
public ServiceBusReceiverClient buildClient() {
return new ServiceBusReceiverClient(buildAsyncClient(), retryOptions.getTryTimeout());
}
/**
* This is a rolling session receiver only if maxConcurrentSessions is > 0 AND sessionId is null or empty. If
* there is a sessionId, this is going to be a single, named session receiver.
*
* @return {@code true} if this is an unnamed rolling session receiver; {@code false} otherwise.
*/
private boolean isRollingSessionReceiver() {
if (maxConcurrentSessions == null) {
return false;
}
if (maxConcurrentSessions < 1) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Maximum number of concurrent sessions must be positive."));
}
return CoreUtils.isNullOrEmpty(sessionId);
}
} |
Can we verify that the group doesn't include range deletes, and throw an exception if it does? | public static long countOf(SpannerSchema spannerSchema, MutationGroup mutationGroup) {
long mutatedCells = 0L;
for (Mutation mutation : mutationGroup) {
if (mutation.getOperation() != Op.DELETE) {
for (String column : mutation.getColumns()) {
mutatedCells += spannerSchema.getCellsMutatedPerColumn(mutation.getTable(), column);
}
} else {
final KeySet keySet = mutation.getKeySet();
final long rows = Iterables.size(keySet.getKeys());
if (rows > 0) {
mutatedCells += rows * spannerSchema.getCellsMutatedPerRow(mutation.getTable());
}
}
}
return mutatedCells;
} | public static long countOf(SpannerSchema spannerSchema, MutationGroup mutationGroup) {
long mutatedCells = 0L;
for (Mutation mutation : mutationGroup) {
if (mutation.getOperation() == Op.DELETE) {
if (isPointDelete(mutation)) {
final KeySet keySet = mutation.getKeySet();
final long rows = Iterables.size(keySet.getKeys());
mutatedCells += rows * spannerSchema.getCellsMutatedPerRow(mutation.getTable());
}
} else {
for (String column : mutation.getColumns()) {
mutatedCells += spannerSchema.getCellsMutatedPerColumn(mutation.getTable(), column);
}
}
}
return mutatedCells;
} | class MutationCellCounter {
private MutationCellCounter() {
}
/**
* Count the number of cells modified by {@link MutationGroup}.
*/
} | class MutationCellCounter {
private MutationCellCounter() {
}
/**
* Count the number of cells modified by {@link MutationGroup}.
*/
} | |
But, if we keep it in a constant, the error will be initialised even if there's no error. Wouldn't that be unnecessary? | public static long abs(long n) {
if (n <= Long.MIN_VALUE) {
throw ErrorCreator.createError(getModulePrefixedReason(INT_LANG_LIB,
BallerinaErrorReasons.NUMBER_OVERFLOW_ERROR_IDENTIFIER),
BLangExceptionHelper.getErrorDetails(RuntimeErrors.INT_RANGE_OVERFLOW_ERROR));
}
return Math.abs(n);
} | throw ErrorCreator.createError(getModulePrefixedReason(INT_LANG_LIB, | public static long abs(long n) {
if (n <= Long.MIN_VALUE) {
throw ErrorCreator.createError(getModulePrefixedReason(INT_LANG_LIB,
BallerinaErrorReasons.NUMBER_OVERFLOW_ERROR_IDENTIFIER),
BLangExceptionHelper.getErrorDetails(RuntimeErrors.INT_RANGE_OVERFLOW_ERROR));
}
return Math.abs(n);
} | class Abs {
} | class Abs {
} |
@menghaoranss Why not init workerId in `init` method? | private long getWorkerId() {
if (null == instanceContext) {
return DEFAULT_WORKER_ID;
}
long result = instanceContext.getWorkerId();
Preconditions.checkArgument(result >= 0L && result < WORKER_ID_MAX_VALUE, "Illegal worker id.");
return result;
} | long result = instanceContext.getWorkerId(); | private long getWorkerId() {
if (null == instanceContext) {
return DEFAULT_WORKER_ID;
}
long result = instanceContext.getWorkerId();
Preconditions.checkArgument(result >= 0L && result < WORKER_ID_MAX_VALUE, "Illegal worker id.");
return result;
} | class SnowflakeKeyGenerateAlgorithm implements KeyGenerateAlgorithm, ShardingSphereInstanceRequiredAlgorithm {
public static final long EPOCH;
private static final String MAX_VIBRATION_OFFSET_KEY = "max-vibration-offset";
private static final String MAX_TOLERATE_TIME_DIFFERENCE_MILLISECONDS_KEY = "max-tolerate-time-difference-milliseconds";
private static final long SEQUENCE_BITS = 12L;
private static final long WORKER_ID_BITS = 10L;
private static final long SEQUENCE_MASK = (1 << SEQUENCE_BITS) - 1;
private static final long WORKER_ID_LEFT_SHIFT_BITS = SEQUENCE_BITS;
private static final long TIMESTAMP_LEFT_SHIFT_BITS = WORKER_ID_LEFT_SHIFT_BITS + WORKER_ID_BITS;
private static final long WORKER_ID_MAX_VALUE = 1L << WORKER_ID_BITS;
private static final int DEFAULT_VIBRATION_VALUE = 1;
private static final int MAX_TOLERATE_TIME_DIFFERENCE_MILLISECONDS = 10;
private static final long DEFAULT_WORKER_ID = 0;
@Setter
private static TimeService timeService = new TimeService();
@Getter
@Setter
private Properties props = new Properties();
private int maxVibrationOffset;
private int maxTolerateTimeDifferenceMilliseconds;
private int sequenceOffset = -1;
private long sequence;
private long lastMilliseconds;
private InstanceContext instanceContext;
static {
Calendar calendar = Calendar.getInstance();
calendar.set(2016, Calendar.NOVEMBER, 1);
calendar.set(Calendar.HOUR_OF_DAY, 0);
calendar.set(Calendar.MINUTE, 0);
calendar.set(Calendar.SECOND, 0);
calendar.set(Calendar.MILLISECOND, 0);
EPOCH = calendar.getTimeInMillis();
}
@Override
public void init() {
maxVibrationOffset = getMaxVibrationOffset();
maxTolerateTimeDifferenceMilliseconds = getMaxTolerateTimeDifferenceMilliseconds();
}
private int getMaxVibrationOffset() {
int result = Integer.parseInt(props.getOrDefault(MAX_VIBRATION_OFFSET_KEY, DEFAULT_VIBRATION_VALUE).toString());
Preconditions.checkArgument(result >= 0 && result <= SEQUENCE_MASK, "Illegal max vibration offset.");
return result;
}
private int getMaxTolerateTimeDifferenceMilliseconds() {
return Integer.parseInt(props.getOrDefault(MAX_TOLERATE_TIME_DIFFERENCE_MILLISECONDS_KEY, MAX_TOLERATE_TIME_DIFFERENCE_MILLISECONDS).toString());
}
@Override
public synchronized Comparable<?> generateKey() {
long currentMilliseconds = timeService.getCurrentMillis();
if (waitTolerateTimeDifferenceIfNeed(currentMilliseconds)) {
currentMilliseconds = timeService.getCurrentMillis();
}
if (lastMilliseconds == currentMilliseconds) {
if (0L == (sequence = (sequence + 1) & SEQUENCE_MASK)) {
currentMilliseconds = waitUntilNextTime(currentMilliseconds);
}
} else {
vibrateSequenceOffset();
sequence = sequenceOffset;
}
lastMilliseconds = currentMilliseconds;
return ((currentMilliseconds - EPOCH) << TIMESTAMP_LEFT_SHIFT_BITS) | (getWorkerId() << WORKER_ID_LEFT_SHIFT_BITS) | sequence;
}
@SneakyThrows(InterruptedException.class)
private boolean waitTolerateTimeDifferenceIfNeed(final long currentMilliseconds) {
if (lastMilliseconds <= currentMilliseconds) {
return false;
}
long timeDifferenceMilliseconds = lastMilliseconds - currentMilliseconds;
Preconditions.checkState(timeDifferenceMilliseconds < maxTolerateTimeDifferenceMilliseconds,
"Clock is moving backwards, last time is %d milliseconds, current time is %d milliseconds", lastMilliseconds, currentMilliseconds);
Thread.sleep(timeDifferenceMilliseconds);
return true;
}
private long waitUntilNextTime(final long lastTime) {
long result = timeService.getCurrentMillis();
while (result <= lastTime) {
result = timeService.getCurrentMillis();
}
return result;
}
private void vibrateSequenceOffset() {
sequenceOffset = sequenceOffset >= maxVibrationOffset ? 0 : sequenceOffset + 1;
}
@Override
public String getType() {
return "SNOWFLAKE";
}
@Override
public boolean isDefault() {
return true;
}
@Override
public void setInstanceContext(final InstanceContext instanceContext) {
this.instanceContext = instanceContext;
}
} | class SnowflakeKeyGenerateAlgorithm implements KeyGenerateAlgorithm, ShardingSphereInstanceRequiredAlgorithm {
public static final long EPOCH;
private static final String MAX_VIBRATION_OFFSET_KEY = "max-vibration-offset";
private static final String MAX_TOLERATE_TIME_DIFFERENCE_MILLISECONDS_KEY = "max-tolerate-time-difference-milliseconds";
private static final long SEQUENCE_BITS = 12L;
private static final long WORKER_ID_BITS = 10L;
private static final long SEQUENCE_MASK = (1 << SEQUENCE_BITS) - 1;
private static final long WORKER_ID_LEFT_SHIFT_BITS = SEQUENCE_BITS;
private static final long TIMESTAMP_LEFT_SHIFT_BITS = WORKER_ID_LEFT_SHIFT_BITS + WORKER_ID_BITS;
private static final long WORKER_ID_MAX_VALUE = 1L << WORKER_ID_BITS;
private static final int DEFAULT_VIBRATION_VALUE = 1;
private static final int MAX_TOLERATE_TIME_DIFFERENCE_MILLISECONDS = 10;
private static final long DEFAULT_WORKER_ID = 0;
@Setter
private static TimeService timeService = new TimeService();
@Getter
@Setter
private Properties props = new Properties();
private int maxVibrationOffset;
private int maxTolerateTimeDifferenceMilliseconds;
private int sequenceOffset = -1;
private long sequence;
private long lastMilliseconds;
private InstanceContext instanceContext;
static {
Calendar calendar = Calendar.getInstance();
calendar.set(2016, Calendar.NOVEMBER, 1);
calendar.set(Calendar.HOUR_OF_DAY, 0);
calendar.set(Calendar.MINUTE, 0);
calendar.set(Calendar.SECOND, 0);
calendar.set(Calendar.MILLISECOND, 0);
EPOCH = calendar.getTimeInMillis();
}
@Override
public void init() {
maxVibrationOffset = getMaxVibrationOffset();
maxTolerateTimeDifferenceMilliseconds = getMaxTolerateTimeDifferenceMilliseconds();
}
private int getMaxVibrationOffset() {
int result = Integer.parseInt(props.getOrDefault(MAX_VIBRATION_OFFSET_KEY, DEFAULT_VIBRATION_VALUE).toString());
Preconditions.checkArgument(result >= 0 && result <= SEQUENCE_MASK, "Illegal max vibration offset.");
return result;
}
private int getMaxTolerateTimeDifferenceMilliseconds() {
return Integer.parseInt(props.getOrDefault(MAX_TOLERATE_TIME_DIFFERENCE_MILLISECONDS_KEY, MAX_TOLERATE_TIME_DIFFERENCE_MILLISECONDS).toString());
}
@Override
public synchronized Comparable<?> generateKey() {
long currentMilliseconds = timeService.getCurrentMillis();
if (waitTolerateTimeDifferenceIfNeed(currentMilliseconds)) {
currentMilliseconds = timeService.getCurrentMillis();
}
if (lastMilliseconds == currentMilliseconds) {
if (0L == (sequence = (sequence + 1) & SEQUENCE_MASK)) {
currentMilliseconds = waitUntilNextTime(currentMilliseconds);
}
} else {
vibrateSequenceOffset();
sequence = sequenceOffset;
}
lastMilliseconds = currentMilliseconds;
return ((currentMilliseconds - EPOCH) << TIMESTAMP_LEFT_SHIFT_BITS) | (getWorkerId() << WORKER_ID_LEFT_SHIFT_BITS) | sequence;
}
@SneakyThrows(InterruptedException.class)
private boolean waitTolerateTimeDifferenceIfNeed(final long currentMilliseconds) {
if (lastMilliseconds <= currentMilliseconds) {
return false;
}
long timeDifferenceMilliseconds = lastMilliseconds - currentMilliseconds;
Preconditions.checkState(timeDifferenceMilliseconds < maxTolerateTimeDifferenceMilliseconds,
"Clock is moving backwards, last time is %d milliseconds, current time is %d milliseconds", lastMilliseconds, currentMilliseconds);
Thread.sleep(timeDifferenceMilliseconds);
return true;
}
private long waitUntilNextTime(final long lastTime) {
long result = timeService.getCurrentMillis();
while (result <= lastTime) {
result = timeService.getCurrentMillis();
}
return result;
}
private void vibrateSequenceOffset() {
sequenceOffset = sequenceOffset >= maxVibrationOffset ? 0 : sequenceOffset + 1;
}
@Override
public String getType() {
return "SNOWFLAKE";
}
@Override
public boolean isDefault() {
return true;
}
@Override
public void setInstanceContext(final InstanceContext instanceContext) {
this.instanceContext = instanceContext;
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.