language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | spring-projects__spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/function/RouterFunctions.java | {
"start": 53241,
"end": 54830
} | class ____<T extends ServerResponse> extends AbstractRouterFunction<T> {
private final RouterFunction<T> delegate;
private final Map<String,Object> attributes;
public AttributesRouterFunction(RouterFunction<T> delegate, Map<String, Object> attributes) {
this.delegate = delegate;
this.attributes = initAttributes(attributes);
}
private static Map<String, Object> initAttributes(Map<String, Object> attributes) {
return (attributes.isEmpty() ?
Collections.emptyMap() : Collections.unmodifiableMap(new LinkedHashMap<>(attributes)));
}
@Override
public Optional<HandlerFunction<T>> route(ServerRequest request) {
return this.delegate.route(request);
}
@Override
public void accept(Visitor visitor) {
visitor.attributes(this.attributes);
this.delegate.accept(visitor);
}
@Override
public RouterFunction<T> withAttribute(String name, Object value) {
Assert.hasLength(name, "Name must not be empty");
Assert.notNull(value, "Value must not be null");
Map<String, Object> attributes = new LinkedHashMap<>(this.attributes);
attributes.put(name, value);
return new AttributesRouterFunction<>(this.delegate, attributes);
}
@Override
public RouterFunction<T> withAttributes(Consumer<Map<String, Object>> attributesConsumer) {
Assert.notNull(attributesConsumer, "AttributesConsumer must not be null");
Map<String, Object> attributes = new LinkedHashMap<>(this.attributes);
attributesConsumer.accept(attributes);
return new AttributesRouterFunction<>(this.delegate, attributes);
}
}
}
| AttributesRouterFunction |
java | apache__kafka | connect/api/src/test/java/org/apache/kafka/connect/data/TimestampTest.java | {
"start": 1151,
"end": 2917
} | class ____ {
private static final GregorianCalendar EPOCH;
private static final GregorianCalendar EPOCH_PLUS_MILLIS;
private static final int NUM_MILLIS = 2000000000;
private static final long TOTAL_MILLIS = ((long) NUM_MILLIS) * 2;
static {
EPOCH = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0);
EPOCH.setTimeZone(TimeZone.getTimeZone("UTC"));
EPOCH_PLUS_MILLIS = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0);
EPOCH_PLUS_MILLIS.setTimeZone(TimeZone.getTimeZone("UTC"));
EPOCH_PLUS_MILLIS.add(Calendar.MILLISECOND, NUM_MILLIS);
EPOCH_PLUS_MILLIS.add(Calendar.MILLISECOND, NUM_MILLIS);
}
@Test
public void testBuilder() {
Schema plain = Date.SCHEMA;
assertEquals(Date.LOGICAL_NAME, plain.name());
assertEquals(1, (Object) plain.version());
}
@Test
public void testFromLogical() {
assertEquals(0L, Timestamp.fromLogical(Timestamp.SCHEMA, EPOCH.getTime()));
assertEquals(TOTAL_MILLIS, Timestamp.fromLogical(Timestamp.SCHEMA, EPOCH_PLUS_MILLIS.getTime()));
}
@Test
public void testFromLogicalInvalidSchema() {
assertThrows(DataException.class,
() -> Timestamp.fromLogical(Timestamp.builder().name("invalid").build(), EPOCH.getTime()));
}
@Test
public void testToLogical() {
assertEquals(EPOCH.getTime(), Timestamp.toLogical(Timestamp.SCHEMA, 0L));
assertEquals(EPOCH_PLUS_MILLIS.getTime(), Timestamp.toLogical(Timestamp.SCHEMA, TOTAL_MILLIS));
}
@Test
public void testToLogicalInvalidSchema() {
assertThrows(DataException.class,
() -> Date.toLogical(Date.builder().name("invalid").build(), 0));
}
}
| TimestampTest |
java | alibaba__nacos | persistence/src/main/java/com/alibaba/nacos/persistence/repository/embedded/hook/EmbeddedApplyHook.java | {
"start": 956,
"end": 1239
} | class ____ {
protected EmbeddedApplyHook() {
EmbeddedApplyHookHolder.getInstance().register(this);
}
/**
* Called after apply finished.
*
* @param log raft log
*/
public abstract void afterApply(WriteRequest log);
}
| EmbeddedApplyHook |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeSourceValuesAggregator.java | {
"start": 1577,
"end": 2341
} | class ____ extends SpatialExtentLongitudeWrappingAggregator {
public static SpatialExtentStateWrappedLongitudeState initSingle() {
return new SpatialExtentStateWrappedLongitudeState();
}
public static SpatialExtentGroupingStateWrappedLongitudeState initGrouping() {
return new SpatialExtentGroupingStateWrappedLongitudeState();
}
public static void combine(SpatialExtentStateWrappedLongitudeState current, BytesRef bytes) {
current.add(SpatialAggregationUtils.decode(bytes));
}
public static void combine(SpatialExtentGroupingStateWrappedLongitudeState current, int groupId, BytesRef bytes) {
current.add(groupId, SpatialAggregationUtils.decode(bytes));
}
}
| SpatialExtentGeoShapeSourceValuesAggregator |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/batch/BatchExecSortAggregate.java | {
"start": 3163,
"end": 8842
} | class ____ extends ExecNodeBase<RowData>
implements InputSortedExecNode<RowData>, SingleTransformationTranslator<RowData> {
public static final String SORT_AGGREGATE_TRANSFORMATION = "sort-aggregate";
public static final String FIELD_NAME_GROUPING = "grouping";
public static final String FIELD_NAME_AUX_GROUPING = "auxGrouping";
public static final String FIELD_NAME_AGG_CALLS = "aggCalls";
public static final String FIELD_NAME_AGG_INPUT_ROW_TYPE = "aggInputRowType";
public static final String FIELD_NAME_IS_MERGE = "isMerge";
public static final String FIELD_NAME_IS_FINAL = "isFinal";
@JsonProperty(FIELD_NAME_GROUPING)
private final int[] grouping;
@JsonProperty(FIELD_NAME_AUX_GROUPING)
private final int[] auxGrouping;
@JsonProperty(FIELD_NAME_AGG_CALLS)
private final AggregateCall[] aggCalls;
@JsonProperty(FIELD_NAME_AGG_INPUT_ROW_TYPE)
private final RowType aggInputRowType;
@JsonProperty(FIELD_NAME_IS_MERGE)
private final boolean isMerge;
@JsonProperty(FIELD_NAME_IS_FINAL)
private final boolean isFinal;
public BatchExecSortAggregate(
ReadableConfig tableConfig,
int[] grouping,
int[] auxGrouping,
AggregateCall[] aggCalls,
RowType aggInputRowType,
boolean isMerge,
boolean isFinal,
InputProperty inputProperty,
RowType outputType,
String description) {
super(
ExecNodeContext.newNodeId(),
ExecNodeContext.newContext(BatchExecSortAggregate.class),
ExecNodeContext.newPersistedConfig(BatchExecSortAggregate.class, tableConfig),
Collections.singletonList(inputProperty),
outputType,
description);
this.grouping = grouping;
this.auxGrouping = auxGrouping;
this.aggCalls = aggCalls;
this.aggInputRowType = aggInputRowType;
this.isMerge = isMerge;
this.isFinal = isFinal;
}
@JsonCreator
public BatchExecSortAggregate(
@JsonProperty(FIELD_NAME_ID) int id,
@JsonProperty(FIELD_NAME_TYPE) ExecNodeContext context,
@JsonProperty(FIELD_NAME_CONFIGURATION) ReadableConfig persistedConfig,
@JsonProperty(FIELD_NAME_GROUPING) int[] grouping,
@JsonProperty(FIELD_NAME_AUX_GROUPING) int[] auxGrouping,
@JsonProperty(FIELD_NAME_AGG_CALLS) AggregateCall[] aggCalls,
@JsonProperty(FIELD_NAME_AGG_INPUT_ROW_TYPE) RowType aggInputRowType,
@JsonProperty(FIELD_NAME_IS_MERGE) boolean isMerge,
@JsonProperty(FIELD_NAME_IS_FINAL) boolean isFinal,
@JsonProperty(FIELD_NAME_INPUT_PROPERTIES) List<InputProperty> inputProperties,
@JsonProperty(FIELD_NAME_OUTPUT_TYPE) RowType outputType,
@JsonProperty(FIELD_NAME_DESCRIPTION) String description) {
super(id, context, persistedConfig, inputProperties, outputType, description);
this.grouping = grouping;
this.auxGrouping = auxGrouping;
this.aggCalls = aggCalls;
this.aggInputRowType = aggInputRowType;
this.isMerge = isMerge;
this.isFinal = isFinal;
}
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(
PlannerBase planner, ExecNodeConfig config) {
final ExecEdge inputEdge = getInputEdges().get(0);
final Transformation<RowData> inputTransform =
(Transformation<RowData>) inputEdge.translateToPlan(planner);
final RowType inputRowType = (RowType) inputEdge.getOutputType();
final RowType outputRowType = (RowType) getOutputType();
final CodeGeneratorContext ctx =
new CodeGeneratorContext(config, planner.getFlinkContext().getClassLoader());
final AggregateInfoList aggInfos =
AggregateUtil.transformToBatchAggregateInfoList(
planner.getTypeFactory(),
aggInputRowType,
JavaScalaConversionUtil.toScala(Arrays.asList(aggCalls)),
null,
null);
final GeneratedOperator<OneInputStreamOperator<RowData, RowData>> generatedOperator;
if (grouping.length == 0) {
generatedOperator =
AggWithoutKeysCodeGenerator.genWithoutKeys(
ctx,
planner.createRelBuilder(),
aggInfos,
inputRowType,
outputRowType,
isMerge,
isFinal,
"NoGrouping");
} else {
generatedOperator =
SortAggCodeGenerator.genWithKeys(
ctx,
planner.createRelBuilder(),
aggInfos,
inputRowType,
outputRowType,
grouping,
auxGrouping,
isMerge,
isFinal);
}
return ExecNodeUtil.createOneInputTransformation(
inputTransform,
createTransformationMeta(SORT_AGGREGATE_TRANSFORMATION, config),
new CodeGenOperatorFactory<>(generatedOperator),
InternalTypeInfo.of(outputRowType),
inputTransform.getParallelism(),
false);
}
}
| BatchExecSortAggregate |
java | spring-projects__spring-security | web/src/main/java/org/springframework/security/web/servlet/util/matcher/PathPatternRequestMatcher.java | {
"start": 7054,
"end": 11160
} | class ____ {
private final PathPatternParser parser;
private final String basePath;
Builder() {
this(PathPatternParser.defaultInstance);
}
Builder(PathPatternParser parser) {
this(parser, "");
}
Builder(PathPatternParser parser, String basePath) {
this.parser = parser;
this.basePath = basePath;
}
/**
* Match requests starting with this {@code basePath}.
*
* <p>
* Prefixes should be of the form {@code /my/prefix}, starting with a slash, not
* ending in a slash, and not containing and wildcards The special value
* {@code "/"} may be used to indicate the root context.
* @param basePath the path prefix
* @return the {@link Builder} for more configuration
*/
public Builder basePath(String basePath) {
Assert.notNull(basePath, "basePath cannot be null");
Assert.isTrue(basePath.startsWith("/"), "basePath must start with '/'");
Assert.isTrue("/".equals(basePath) || !basePath.endsWith("/"), "basePath must not end with a slash");
Assert.isTrue(!basePath.contains("*"), "basePath must not contain a star");
return new Builder(this.parser, basePath);
}
/**
* Match requests having this path pattern.
*
* <p>
* When the HTTP {@code method} is null, then the matcher does not consider the
* HTTP method
*
* <p>
* Path patterns always start with a slash and may contain placeholders. They can
* also be followed by {@code /**} to signify all URIs under a given path.
*
* <p>
* These must be specified relative to any context path prefix. A
* {@link #basePath} may be specified to reuse a common prefix, for example a
* servlet path.
*
* <p>
* The following are valid patterns and their meaning
* <ul>
* <li>{@code /path} - match exactly and only `/path`</li>
* <li>{@code /path/**} - match `/path` and any of its descendents</li>
* <li>{@code /path/{value}/**} - match `/path/subdirectory` and any of its
* descendents, capturing the value of the subdirectory in
* {@link RequestAuthorizationContext#getVariables()}</li>
* </ul>
*
* <p>
* A more comprehensive list can be found at {@link PathPattern}.
* @param path the path pattern to match
* @return the {@link Builder} for more configuration
*/
public PathPatternRequestMatcher matcher(String path) {
return matcher(null, path);
}
/**
* Match requests having this {@link HttpMethod} and path pattern.
*
* <p>
* When the HTTP {@code method} is null, then the matcher does not consider the
* HTTP method
*
* <p>
* Path patterns always start with a slash and may contain placeholders. They can
* also be followed by {@code /**} to signify all URIs under a given path.
*
* <p>
* These must be specified relative to any context path prefix. A
* {@link #basePath} may be specified to reuse a common prefix, for example a
* servlet path.
*
* <p>
* The following are valid patterns and their meaning
* <ul>
* <li>{@code /path} - match exactly and only `/path`</li>
* <li>{@code /path/**} - match `/path` and any of its descendents</li>
* <li>{@code /path/{value}/**} - match `/path/subdirectory` and any of its
* descendents, capturing the value of the subdirectory in
* {@link RequestAuthorizationContext#getVariables()}</li>
* </ul>
*
* <p>
* A more comprehensive list can be found at {@link PathPattern}.
* @param method the {@link HttpMethod} to match, may be null
* @param path the path pattern to match
* @return the {@link Builder} for more configuration
*/
public PathPatternRequestMatcher matcher(@Nullable HttpMethod method, String path) {
Assert.notNull(path, "pattern cannot be null");
Assert.isTrue(path.startsWith("/"), "pattern must start with a /");
String prefix = ("/".equals(this.basePath)) ? "" : this.basePath;
PathPattern pathPattern = this.parser.parse(prefix + path);
return new PathPatternRequestMatcher(pathPattern,
(method != null) ? new HttpMethodRequestMatcher(method) : AnyRequestMatcher.INSTANCE);
}
}
private static final | Builder |
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/mistral/embeddings/MistralEmbeddingsServiceSettingsTests.java | {
"start": 1422,
"end": 8998
} | class ____ extends ESTestCase {
public void testFromMap_Request_CreatesSettingsCorrectly() {
var model = "mistral-embed";
var dims = 1536;
var maxInputTokens = 512;
var serviceSettings = MistralEmbeddingsServiceSettings.fromMap(
createRequestSettingsMap(model, dims, maxInputTokens, SimilarityMeasure.COSINE),
ConfigurationParseContext.REQUEST
);
assertThat(serviceSettings, is(new MistralEmbeddingsServiceSettings(model, dims, maxInputTokens, SimilarityMeasure.COSINE, null)));
}
public void testFromMap_RequestWithRateLimit_CreatesSettingsCorrectly() {
var model = "mistral-embed";
var dims = 1536;
var maxInputTokens = 512;
var settingsMap = createRequestSettingsMap(model, dims, maxInputTokens, SimilarityMeasure.COSINE);
settingsMap.put(RateLimitSettings.FIELD_NAME, new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, 3)));
var serviceSettings = MistralEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST);
assertThat(
serviceSettings,
is(new MistralEmbeddingsServiceSettings(model, dims, maxInputTokens, SimilarityMeasure.COSINE, new RateLimitSettings(3)))
);
}
public void testFromMap_Persistent_CreatesSettingsCorrectly() {
var model = "mistral-embed";
var dims = 1536;
var maxInputTokens = 512;
var settingsMap = createRequestSettingsMap(model, dims, maxInputTokens, SimilarityMeasure.COSINE);
var serviceSettings = MistralEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.PERSISTENT);
assertThat(serviceSettings, is(new MistralEmbeddingsServiceSettings(model, dims, maxInputTokens, SimilarityMeasure.COSINE, null)));
}
public void testFromMap_PersistentContext_DoesNotThrowException_WhenDimensionsIsNull() {
var model = "mistral-embed";
var settingsMap = createRequestSettingsMap(model, null, null, null);
var serviceSettings = MistralEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.PERSISTENT);
assertThat(serviceSettings, is(new MistralEmbeddingsServiceSettings(model, null, null, null, null)));
}
public void testFromMap_ThrowsException_WhenDimensionsAreZero() {
var model = "mistral-embed";
var dimensions = 0;
var settingsMap = createRequestSettingsMap(model, dimensions, null, SimilarityMeasure.COSINE);
var thrownException = expectThrows(
ValidationException.class,
() -> MistralEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST)
);
assertThat(
thrownException.getMessage(),
containsString("Validation Failed: 1: [service_settings] Invalid value [0]. [dimensions] must be a positive integer;")
);
}
public void testFromMap_ThrowsException_WhenDimensionsAreNegative() {
var model = "mistral-embed";
var dimensions = randomNegativeInt();
var settingsMap = createRequestSettingsMap(model, dimensions, null, SimilarityMeasure.COSINE);
var thrownException = expectThrows(
ValidationException.class,
() -> MistralEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST)
);
assertThat(
thrownException.getMessage(),
containsString(
Strings.format(
"Validation Failed: 1: [service_settings] Invalid value [%d]. [dimensions] must be a positive integer;",
dimensions
)
)
);
}
public void testFromMap_ThrowsException_WhenMaxInputTokensAreZero() {
var model = "mistral-embed";
var maxInputTokens = 0;
var settingsMap = createRequestSettingsMap(model, null, maxInputTokens, SimilarityMeasure.COSINE);
var thrownException = expectThrows(
ValidationException.class,
() -> MistralEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST)
);
assertThat(
thrownException.getMessage(),
containsString("Validation Failed: 1: [service_settings] Invalid value [0]. [max_input_tokens] must be a positive integer;")
);
}
public void testFromMap_ThrowsException_WhenMaxInputTokensAreNegative() {
var model = "mistral-embed";
var maxInputTokens = randomNegativeInt();
var settingsMap = createRequestSettingsMap(model, null, maxInputTokens, SimilarityMeasure.COSINE);
var thrownException = expectThrows(
ValidationException.class,
() -> MistralEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST)
);
assertThat(
thrownException.getMessage(),
containsString(
Strings.format(
"Validation Failed: 1: [service_settings] Invalid value [%d]. [max_input_tokens] must be a positive integer;",
maxInputTokens
)
)
);
}
public void testFromMap_PersistentContext_DoesNotThrowException_WhenSimilarityIsPresent() {
var model = "mistral-embed";
var settingsMap = createRequestSettingsMap(model, null, null, SimilarityMeasure.DOT_PRODUCT);
var serviceSettings = MistralEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.PERSISTENT);
assertThat(serviceSettings, is(new MistralEmbeddingsServiceSettings(model, null, null, SimilarityMeasure.DOT_PRODUCT, null)));
}
public void testToXContent_WritesAllValues() throws IOException {
var entity = new MistralEmbeddingsServiceSettings("model_name", 1024, 512, null, new RateLimitSettings(3));
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
entity.toXContent(builder, null);
String xContentResult = Strings.toString(builder);
assertThat(xContentResult, CoreMatchers.is("""
{"model":"model_name","dimensions":1024,"max_input_tokens":512,""" + """
"rate_limit":{"requests_per_minute":3}}"""));
}
public void testStreamInputAndOutput_WritesValuesCorrectly() throws IOException {
var outputBuffer = new BytesStreamOutput();
var settings = new MistralEmbeddingsServiceSettings("model_name", 1024, 512, null, new RateLimitSettings(3));
settings.writeTo(outputBuffer);
var outputBufferRef = outputBuffer.bytes();
var inputBuffer = new ByteArrayStreamInput(outputBufferRef.array());
var settingsFromBuffer = new MistralEmbeddingsServiceSettings(inputBuffer);
assertEquals(settings, settingsFromBuffer);
}
public static HashMap<String, Object> createRequestSettingsMap(
String model,
@Nullable Integer dimensions,
@Nullable Integer maxTokens,
@Nullable SimilarityMeasure similarityMeasure
) {
var map = new HashMap<String, Object>(Map.of(MistralConstants.MODEL_FIELD, model));
if (dimensions != null) {
map.put(ServiceFields.DIMENSIONS, dimensions);
}
if (maxTokens != null) {
map.put(ServiceFields.MAX_INPUT_TOKENS, maxTokens);
}
if (similarityMeasure != null) {
map.put(SIMILARITY, similarityMeasure.toString());
}
return map;
}
}
| MistralEmbeddingsServiceSettingsTests |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestPathOutputCommitterFactory.java | {
"start": 4122,
"end": 4335
} | class ____ unknown, you cannot
* create committers.
*/
@Test
public void testCommitterFactoryUnknown() throws Throwable {
Configuration conf = new Configuration();
// set the factory to an unknown | is |
java | hibernate__hibernate-orm | hibernate-testing/src/main/java/org/hibernate/testing/bytecode/enhancement/EnhancementSelector.java | {
"start": 283,
"end": 352
} | interface ____ {
/**
* Determine whether the named | EnhancementSelector |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/bind/annotation/ModelAttribute.java | {
"start": 3167,
"end": 3511
} | interface ____ {
/**
* Alias for {@link #name}.
*/
@AliasFor("name")
String value() default "";
/**
* The name of the model attribute to bind to.
* <p>The default model attribute name is inferred from the declared
* attribute type (i.e. the method parameter type or method return type),
* based on the non-qualified | ModelAttribute |
java | apache__commons-lang | src/test/java/org/apache/commons/lang3/builder/MultiLineToStringStyleTest.java | {
"start": 1284,
"end": 11097
} | class ____ extends AbstractLangTest {
private final Integer base = Integer.valueOf(5);
private final String baseStr = base.getClass().getName() + "@" + Integer.toHexString(System.identityHashCode(base));
@BeforeEach
public void setUp() {
ToStringBuilder.setDefaultStyle(ToStringStyle.MULTI_LINE_STYLE);
}
@AfterEach
public void tearDown() {
ToStringBuilder.setDefaultStyle(ToStringStyle.DEFAULT_STYLE);
}
@Test
void testAppendSuper() {
assertEquals(baseStr + "[" + System.lineSeparator() + "]", new ToStringBuilder(base).appendSuper("Integer@8888[" + System.lineSeparator() + "]").toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " <null>" + System.lineSeparator() + "]", new ToStringBuilder(base).appendSuper("Integer@8888[" + System.lineSeparator() + " <null>" + System.lineSeparator() + "]").toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " a=hello" + System.lineSeparator() + "]", new ToStringBuilder(base).appendSuper("Integer@8888[" + System.lineSeparator() + "]").append("a", "hello").toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " <null>" + System.lineSeparator() + " a=hello" + System.lineSeparator() + "]", new ToStringBuilder(base).appendSuper("Integer@8888[" + System.lineSeparator() + " <null>" + System.lineSeparator() + "]").append("a", "hello").toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " a=hello" + System.lineSeparator() + "]", new ToStringBuilder(base).appendSuper(null).append("a", "hello").toString());
}
@Test
void testArray() {
final Integer i3 = Integer.valueOf(3);
final Integer i4 = Integer.valueOf(4);
assertEquals(baseStr + "[" + System.lineSeparator() + " a=<size=0>" + System.lineSeparator() + "]", new ToStringBuilder(base).append("a", (Object) new Integer[0], false).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " a={}" + System.lineSeparator() + "]", new ToStringBuilder(base).append("a", (Object) new Integer[0], true).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " a=<size=1>" + System.lineSeparator() + "]", new ToStringBuilder(base).append("a", (Object) new Integer[] {i3}, false).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " a={3}" + System.lineSeparator() + "]", new ToStringBuilder(base).append("a", (Object) new Integer[] {i3}, true).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " a=<size=2>" + System.lineSeparator() + "]", new ToStringBuilder(base).append("a", (Object) new Integer[] {i3, i4}, false).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " a={3,4}" + System.lineSeparator() + "]", new ToStringBuilder(base).append("a", (Object) new Integer[] {i3, i4}, true).toString());
}
@Test
void testBlank() {
assertEquals(baseStr + "[" + System.lineSeparator() + "]", new ToStringBuilder(base).toString());
}
@Test
void testCollection() {
final Integer i3 = Integer.valueOf(3);
final Integer i4 = Integer.valueOf(4);
assertEquals(baseStr + "[" + System.lineSeparator() + " a=<size=0>" + System.lineSeparator() + "]", new ToStringBuilder(base).append("a", Collections.emptyList(), false).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " a=[]" + System.lineSeparator() + "]", new ToStringBuilder(base).append("a", Collections.emptyList(), true).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " a=<size=1>" + System.lineSeparator() + "]", new ToStringBuilder(base).append("a", Collections.singletonList(i3), false).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " a=[3]" + System.lineSeparator() + "]", new ToStringBuilder(base).append("a", Collections.singletonList(i3), true).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " a=<size=2>" + System.lineSeparator() + "]", new ToStringBuilder(base).append("a", Arrays.asList(i3, i4), false).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " a=[3, 4]" + System.lineSeparator() + "]", new ToStringBuilder(base).append("a", Arrays.asList(i3, i4), true).toString());
}
@Test
void testLong() {
assertEquals(baseStr + "[" + System.lineSeparator() + " 3" + System.lineSeparator() + "]", new ToStringBuilder(base).append(3L).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " a=3" + System.lineSeparator() + "]", new ToStringBuilder(base).append("a", 3L).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " a=3" + System.lineSeparator() + " b=4" + System.lineSeparator() + "]", new ToStringBuilder(base).append("a", 3L).append("b", 4L).toString());
}
@Test
void testLongArray() {
long[] array = {1, 2, -3, 4};
assertEquals(baseStr + "[" + System.lineSeparator() + " {1,2,-3,4}" + System.lineSeparator() + "]", new ToStringBuilder(base).append(array).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " {1,2,-3,4}" + System.lineSeparator() + "]", new ToStringBuilder(base).append((Object) array).toString());
array = null;
assertEquals(baseStr + "[" + System.lineSeparator() + " <null>" + System.lineSeparator() + "]", new ToStringBuilder(base).append(array).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " <null>" + System.lineSeparator() + "]", new ToStringBuilder(base).append((Object) array).toString());
}
@Test
void testLongArrayArray() {
long[][] array = {{1, 2}, null, {5}};
assertEquals(baseStr + "[" + System.lineSeparator() + " {{1,2},<null>,{5}}" + System.lineSeparator() + "]", new ToStringBuilder(base).append(array).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " {{1,2},<null>,{5}}" + System.lineSeparator() + "]", new ToStringBuilder(base).append((Object) array).toString());
array = null;
assertEquals(baseStr + "[" + System.lineSeparator() + " <null>" + System.lineSeparator() + "]", new ToStringBuilder(base).append(array).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " <null>" + System.lineSeparator() + "]", new ToStringBuilder(base).append((Object) array).toString());
}
@Test
void testMap() {
assertEquals(baseStr + "[" + System.lineSeparator() + " a=<size=0>" + System.lineSeparator() + "]", new ToStringBuilder(base).append("a", Collections.emptyMap(), false).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " a={}" + System.lineSeparator() + "]", new ToStringBuilder(base).append("a", Collections.emptyMap(), true).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " a=<size=1>" + System.lineSeparator() + "]", new ToStringBuilder(base).append("a", Collections.singletonMap("k", "v"), false).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " a={k=v}" + System.lineSeparator() + "]", new ToStringBuilder(base).append("a", Collections.singletonMap("k", "v"), true).toString());
}
@Test
void testObject() {
final Integer i3 = Integer.valueOf(3);
final Integer i4 = Integer.valueOf(4);
assertEquals(baseStr + "[" + System.lineSeparator() + " <null>" + System.lineSeparator() + "]", new ToStringBuilder(base).append((Object) null).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " 3" + System.lineSeparator() + "]", new ToStringBuilder(base).append(i3).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " a=<null>" + System.lineSeparator() + "]", new ToStringBuilder(base).append("a", (Object) null).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " a=3" + System.lineSeparator() + "]", new ToStringBuilder(base).append("a", i3).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " a=3" + System.lineSeparator() + " b=4" + System.lineSeparator() + "]", new ToStringBuilder(base).append("a", i3).append("b", i4).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " a=<Integer>" + System.lineSeparator() + "]", new ToStringBuilder(base).append("a", i3, false).toString());
}
@Test
void testObjectArray() {
Object[] array = {null, base, new int[] {3, 6}};
assertEquals(baseStr + "[" + System.lineSeparator() + " {<null>,5,{3,6}}" + System.lineSeparator() + "]", new ToStringBuilder(base).append(array).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " {<null>,5,{3,6}}" + System.lineSeparator() + "]", new ToStringBuilder(base).append((Object) array).toString());
array = null;
assertEquals(baseStr + "[" + System.lineSeparator() + " <null>" + System.lineSeparator() + "]", new ToStringBuilder(base).append(array).toString());
assertEquals(baseStr + "[" + System.lineSeparator() + " <null>" + System.lineSeparator() + "]", new ToStringBuilder(base).append((Object) array).toString());
}
@Test
void testPerson() {
final Person p = new Person();
p.name = "Jane Doe";
p.age = 25;
p.smoker = true;
final String baseStr = p.getClass().getName() + "@" + Integer.toHexString(System.identityHashCode(p));
assertEquals(baseStr + "[" + System.lineSeparator() + " name=Jane Doe" + System.lineSeparator() + " age=25" + System.lineSeparator() + " smoker=true" + System.lineSeparator() + "]", new ToStringBuilder(p).append("name", p.name).append("age", p.age).append("smoker", p.smoker).toString());
}
}
| MultiLineToStringStyleTest |
java | apache__camel | core/camel-core-languages/src/main/java/org/apache/camel/language/simple/ast/BaseSimpleNode.java | {
"start": 1085,
"end": 1952
} | class ____ implements SimpleNode {
protected final SimpleToken token;
protected BaseSimpleNode(SimpleToken token) {
this.token = token;
}
@Override
public SimpleToken getToken() {
return token;
}
@Override
public String toString() {
return token.getText();
}
protected static String createCode(CamelContext camelContext, String expression, CompositeNodes block)
throws SimpleParserException {
String answer = null;
if (block != null) {
answer = block.createCode(camelContext, expression);
}
// use double quote as this become used as string literals in the generated code
if (answer == null) {
answer = "\"\"";
} else {
answer = "\"" + answer + "\"";
}
return answer;
}
}
| BaseSimpleNode |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/state/internals/ValueAndTimestampSerializer.java | {
"start": 1357,
"end": 5427
} | class ____<V> implements WrappingNullableSerializer<ValueAndTimestamp<V>, Void, V> {
public final Serializer<V> valueSerializer;
private final Serializer<Long> timestampSerializer;
ValueAndTimestampSerializer(final Serializer<V> valueSerializer) {
Objects.requireNonNull(valueSerializer);
this.valueSerializer = valueSerializer;
timestampSerializer = new LongSerializer();
}
public static boolean valuesAreSameAndTimeIsIncreasing(final byte[] oldRecord, final byte[] newRecord) {
if (oldRecord == newRecord) {
// same reference, so they are trivially the same (might both be null)
return true;
} else if (oldRecord == null || newRecord == null) {
// only one is null, so they cannot be the same
return false;
} else if (newRecord.length != oldRecord.length) {
// they are different length, so they cannot be the same
return false;
} else if (timeIsDecreasing(oldRecord, newRecord)) {
// the record time represents the beginning of the validity interval, so if the time
// moves backwards, we need to do the update regardless of whether the value has changed
return false;
} else {
// all other checks have fallen through, so we actually compare the binary data of the two values
return valuesAreSame(oldRecord, newRecord);
}
}
@Override
public void configure(final Map<String, ?> configs,
final boolean isKey) {
valueSerializer.configure(configs, isKey);
timestampSerializer.configure(configs, isKey);
}
@Override
public byte[] serialize(final String topic,
final ValueAndTimestamp<V> data) {
if (data == null) {
return null;
}
return serialize(topic, data.value(), data.timestamp());
}
public byte[] serialize(final String topic,
final V data,
final long timestamp) {
if (data == null) {
return null;
}
final byte[] rawValue = valueSerializer.serialize(topic, data);
// Since we can't control the result of the internal serializer, we make sure that the result
// is not null as well.
// Serializing non-null values to null can be useful when working with Optional-like values
// where the Optional.empty case is serialized to null.
// See the discussion here: https://github.com/apache/kafka/pull/7679
if (rawValue == null) {
return null;
}
final byte[] rawTimestamp = timestampSerializer.serialize(topic, timestamp);
return ByteBuffer
.allocate(rawTimestamp.length + rawValue.length)
.put(rawTimestamp)
.put(rawValue)
.array();
}
@Override
public void close() {
valueSerializer.close();
timestampSerializer.close();
}
private static boolean timeIsDecreasing(final byte[] oldRecord, final byte[] newRecord) {
return extractTimestamp(newRecord) <= extractTimestamp(oldRecord);
}
private static long extractTimestamp(final byte[] bytes) {
final byte[] timestampBytes = new byte[Long.BYTES];
System.arraycopy(bytes, 0, timestampBytes, 0, Long.BYTES);
return ByteBuffer.wrap(timestampBytes).getLong();
}
private static boolean valuesAreSame(final byte[] left, final byte[] right) {
for (int i = Long.BYTES; i < left.length; i++) {
if (left[i] != right[i]) {
return false;
}
}
return true;
}
@Override
public void setIfUnset(final SerdeGetter getter) {
// ValueAndTimestampSerializer never wraps a null serializer (or configure would throw),
// but it may wrap a serializer that itself wraps a null serializer.
initNullableSerializer(valueSerializer, getter);
}
}
| ValueAndTimestampSerializer |
java | dropwizard__dropwizard | dropwizard-jetty/src/test/java/io/dropwizard/jetty/GzipServletHandlerTest.java | {
"start": 4416,
"end": 5663
} | class ____ extends HttpServlet {
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {
resp.setCharacterEncoding(StandardCharsets.UTF_8.toString());
resp.setContentType(PLAIN_TEXT_UTF_8);
resp.getWriter().write(new String(getClass().getResourceAsStream("/assets/banner.txt").readAllBytes(), StandardCharsets.UTF_8));
}
@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {
assertThat(req.getHeader(HttpHeader.CONTENT_TYPE.asString())).isEqualToIgnoringCase(PLAIN_TEXT_UTF_8);
assertThat(req.getHeader(HttpHeader.CONTENT_ENCODING.asString())).isNull();
assertThat(req.getHeader(HttpHeader.CONTENT_LENGTH.asString())).isNull();
assertThat(req.getContentLength()).isEqualTo(-1);
assertThat(req.getContentLengthLong()).isEqualTo(-1L);
assertThat(req.getInputStream())
.hasSameContentAs(getClass().getResourceAsStream("/assets/new-banner.txt"));
resp.setContentType(PLAIN_TEXT_UTF_8);
resp.getWriter().write("Banner has been updated");
}
}
}
| BannerServlet |
java | spring-projects__spring-framework | spring-webflux/src/test/java/org/springframework/web/reactive/BindingContextTests.java | {
"start": 1445,
"end": 3099
} | class ____ {
@Test
void jakartaValidatorExcludedWhenMethodValidationApplicable() throws Exception {
BindingContext bindingContext = new BindingContext(null);
bindingContext.setMethodValidationApplicable(true);
Method method = getClass().getDeclaredMethod("handleValidObject", Foo.class);
ResolvableType targetType = ResolvableType.forMethodParameter(method, 0);
WebDataBinder binder = bindingContext.createDataBinder(
MockServerWebExchange.from(MockServerHttpRequest.get("")), new Foo(), "foo", targetType);
Validator springValidator = mock(Validator.class);
when(springValidator.supports(Foo.class)).thenReturn(true);
binder.addValidators(springValidator);
LocalValidatorFactoryBean beanValidator = new LocalValidatorFactoryBean();
beanValidator.afterPropertiesSet();
binder.addValidators(beanValidator);
WrappedBeanValidator wrappedBeanValidator = new WrappedBeanValidator(beanValidator);
binder.addValidators(wrappedBeanValidator);
assertThat(binder.getValidatorsToApply()).containsExactly(springValidator);
}
@SuppressWarnings("unused")
private void handleValidObject(@Valid Foo foo) {
}
private record WrappedBeanValidator(jakarta.validation.Validator validator) implements SmartValidator {
@Override
public boolean supports(Class<?> clazz) {
return true;
}
@Override
public void validate(Object target, Errors errors, Object... validationHints) {
}
@Override
public void validate(Object target, Errors errors) {
}
@SuppressWarnings("unchecked")
@Override
public <T> T unwrap(Class<T> type) {
return (T) this.validator;
}
}
private static final | BindingContextTests |
java | netty__netty | common/src/test/java/io/netty/util/internal/logging/Log4J2LoggerFactoryTest.java | {
"start": 841,
"end": 1102
} | class ____ {
@Test
public void testCreation() {
InternalLogger logger = Log4J2LoggerFactory.INSTANCE.newInstance("foo");
assertTrue(logger instanceof Log4J2Logger);
assertEquals("foo", logger.name());
}
}
| Log4J2LoggerFactoryTest |
java | redisson__redisson | redisson/src/main/java/org/redisson/config/CommandMapper.java | {
"start": 717,
"end": 1151
} | interface ____ {
/**
* Applies map function to the input Redis command <code>name</code>
*
* @param name - original command name
* @return mapped command name
*/
String map(String name);
/**
* Returns input Redis command name. Used by default
*
* @return NameMapper instance
*/
static CommandMapper direct() {
return new DefaultCommandMapper();
}
}
| CommandMapper |
java | alibaba__nacos | sys/src/test/java/com/alibaba/nacos/sys/env/NacosDuplicateConfigurationBeanPostProcessorTest.java | {
"start": 1465,
"end": 4114
} | class ____ {
@Mock
NacosDuplicateConfigurationBeanPostProcessor processor;
@Mock
ConfigurableApplicationContext context;
@Mock
ConfigurableListableBeanFactory beanFactory;
@Mock
BeanDefinition beanDefinition;
@BeforeEach
void setUp() {
processor = new NacosDuplicateConfigurationBeanPostProcessor(context);
}
@Test
void testPostProcessBeforeInstantiationNonExist() {
Class beanClass = LifecycleProcessor.class;
assertNull(processor.postProcessBeforeInstantiation(beanClass, "lifecycleProcessor"));
verify(context, never()).getBean("lifecycleProcessor");
}
@Test
void testPostProcessBeforeInstantiationForConfigurationAnnotation() {
String beanName = "com.alibaba.nacos.sys.env.mock.MockAutoConfiguration$MockConfiguration";
when(context.containsBean(beanName)).thenReturn(true);
when(context.getBeanFactory()).thenReturn(beanFactory);
when(beanFactory.getBeanDefinition(beanName)).thenReturn(beanDefinition);
Class beanClass = MockAutoConfiguration.MockConfiguration.class;
MockAutoConfiguration.MockConfiguration existBean = new MockAutoConfiguration.MockConfiguration();
when(context.getBean(beanName)).thenReturn(existBean);
assertEquals(existBean, processor.postProcessBeforeInstantiation(beanClass, beanName));
}
@Test
void testPostProcessBeforeInstantiationForAutoConfigurationAnnotation() {
String beanName = "com.alibaba.nacos.sys.env.mock.MockAutoConfiguration";
when(context.containsBean(beanName)).thenReturn(true);
when(context.getBeanFactory()).thenReturn(beanFactory);
when(beanFactory.getBeanDefinition(beanName)).thenReturn(beanDefinition);
Class beanClass = MockAutoConfiguration.class;
MockAutoConfiguration existBean = new MockAutoConfiguration();
when(context.getBean(beanName)).thenReturn(existBean);
assertEquals(existBean, processor.postProcessBeforeInstantiation(beanClass, beanName));
}
@Test
void testPostProcessBeforeInstantiationForNormalBean() {
when(context.containsBean("testBean")).thenReturn(true);
when(context.getBeanFactory()).thenReturn(beanFactory);
when(beanFactory.getBeanDefinition("testBean")).thenReturn(beanDefinition);
Class beanClass = NacosDuplicateConfigurationBeanPostProcessor.class;
assertNull(processor.postProcessBeforeInstantiation(beanClass, "testBean"));
verify(context, never()).getBean("testBean");
}
} | NacosDuplicateConfigurationBeanPostProcessorTest |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/output/ReplayOutput.java | {
"start": 4407,
"end": 4683
} | class ____ extends Signal {
final int depth;
public Complete(int depth) {
this.depth = depth;
}
@Override
protected void replay(CommandOutput<?, ?, ?> target) {
target.complete(depth);
}
}
}
| Complete |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/FetchNotFoundException.java | {
"start": 363,
"end": 907
} | class ____ extends EntityNotFoundException {
private final String entityName;
private final Object identifier;
public FetchNotFoundException(String entityName, Object identifier) {
super(
String.format(
Locale.ROOT,
"Entity `%s` with identifier value `%s` does not exist",
entityName,
identifier
)
);
this.entityName = entityName;
this.identifier = identifier;
}
public String getEntityName() {
return entityName;
}
public Object getIdentifier() {
return identifier;
}
}
| FetchNotFoundException |
java | apache__camel | components/camel-jetty/src/generated/java/org/apache/camel/component/jetty12/JettyHttpEndpoint12Configurer.java | {
"start": 734,
"end": 15165
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
JettyHttpEndpoint12 target = (JettyHttpEndpoint12) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "async": target.setAsync(property(camelContext, boolean.class, value)); return true;
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "chunked": target.setChunked(property(camelContext, boolean.class, value)); return true;
case "continuationtimeout":
case "continuationTimeout": target.setContinuationTimeout(property(camelContext, java.lang.Long.class, value)); return true;
case "disablestreamcache":
case "disableStreamCache": target.setDisableStreamCache(property(camelContext, boolean.class, value)); return true;
case "eagercheckcontentavailable":
case "eagerCheckContentAvailable": target.setEagerCheckContentAvailable(property(camelContext, boolean.class, value)); return true;
case "enablecors":
case "enableCORS": target.setEnableCORS(property(camelContext, boolean.class, value)); return true;
case "enablejmx":
case "enableJmx": target.setEnableJmx(property(camelContext, boolean.class, value)); return true;
case "enablemultipartfilter":
case "enableMultipartFilter": target.setEnableMultipartFilter(property(camelContext, boolean.class, value)); return true;
case "exceptionhandler":
case "exceptionHandler": target.setExceptionHandler(property(camelContext, org.apache.camel.spi.ExceptionHandler.class, value)); return true;
case "exchangepattern":
case "exchangePattern": target.setExchangePattern(property(camelContext, org.apache.camel.ExchangePattern.class, value)); return true;
case "filesizethreshold":
case "fileSizeThreshold": target.setFileSizeThreshold(property(camelContext, java.lang.Integer.class, value)); return true;
case "fileslocation":
case "filesLocation": target.setFilesLocation(property(camelContext, java.lang.String.class, value)); return true;
case "filterinitparameters":
case "filterInitParameters": target.setFilterInitParameters(property(camelContext, java.util.Map.class, value)); return true;
case "filters": target.setFilters(property(camelContext, java.util.List.class, value)); return true;
case "handlers": target.setHandlers(property(camelContext, java.util.List.class, value)); return true;
case "headerfilterstrategy":
case "headerFilterStrategy": target.setHeaderFilterStrategy(property(camelContext, org.apache.camel.spi.HeaderFilterStrategy.class, value)); return true;
case "httpbinding":
case "httpBinding": target.setHttpBinding(property(camelContext, org.apache.camel.http.common.HttpBinding.class, value)); return true;
case "httpmethodrestrict":
case "httpMethodRestrict": target.setHttpMethodRestrict(property(camelContext, java.lang.String.class, value)); return true;
case "idletimeout":
case "idleTimeout": target.setIdleTimeout(property(camelContext, long.class, value)); return true;
case "logexception":
case "logException": target.setLogException(property(camelContext, boolean.class, value)); return true;
case "maphttpmessagebody":
case "mapHttpMessageBody": target.setMapHttpMessageBody(property(camelContext, boolean.class, value)); return true;
case "maphttpmessageformurlencodedbody":
case "mapHttpMessageFormUrlEncodedBody": target.setMapHttpMessageFormUrlEncodedBody(property(camelContext, boolean.class, value)); return true;
case "maphttpmessageheaders":
case "mapHttpMessageHeaders": target.setMapHttpMessageHeaders(property(camelContext, boolean.class, value)); return true;
case "matchonuriprefix":
case "matchOnUriPrefix": target.setMatchOnUriPrefix(property(camelContext, boolean.class, value)); return true;
case "maxfilesize":
case "maxFileSize": target.setMaxFileSize(property(camelContext, java.lang.Long.class, value)); return true;
case "maxrequestsize":
case "maxRequestSize": target.setMaxRequestSize(property(camelContext, java.lang.Long.class, value)); return true;
case "multipartfilter":
case "multipartFilter": target.setMultipartFilter(property(camelContext, jakarta.servlet.Filter.class, value)); return true;
case "muteexception":
case "muteException": target.setMuteException(property(camelContext, boolean.class, value)); return true;
case "optionsenabled":
case "optionsEnabled": target.setOptionsEnabled(property(camelContext, boolean.class, value)); return true;
case "responsebuffersize":
case "responseBufferSize": target.setResponseBufferSize(property(camelContext, java.lang.Integer.class, value)); return true;
case "senddateheader":
case "sendDateHeader": target.setSendDateHeader(property(camelContext, boolean.class, value)); return true;
case "sendserverversion":
case "sendServerVersion": target.setSendServerVersion(property(camelContext, boolean.class, value)); return true;
case "sessionsupport":
case "sessionSupport": target.setSessionSupport(property(camelContext, boolean.class, value)); return true;
case "sslcontextparameters":
case "sslContextParameters": target.setSslContextParameters(property(camelContext, org.apache.camel.support.jsse.SSLContextParameters.class, value)); return true;
case "traceenabled":
case "traceEnabled": target.setTraceEnabled(property(camelContext, boolean.class, value)); return true;
case "transferexception":
case "transferException": target.setTransferException(property(camelContext, boolean.class, value)); return true;
case "usecontinuation":
case "useContinuation": target.setUseContinuation(property(camelContext, java.lang.Boolean.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "async": return boolean.class;
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "chunked": return boolean.class;
case "continuationtimeout":
case "continuationTimeout": return java.lang.Long.class;
case "disablestreamcache":
case "disableStreamCache": return boolean.class;
case "eagercheckcontentavailable":
case "eagerCheckContentAvailable": return boolean.class;
case "enablecors":
case "enableCORS": return boolean.class;
case "enablejmx":
case "enableJmx": return boolean.class;
case "enablemultipartfilter":
case "enableMultipartFilter": return boolean.class;
case "exceptionhandler":
case "exceptionHandler": return org.apache.camel.spi.ExceptionHandler.class;
case "exchangepattern":
case "exchangePattern": return org.apache.camel.ExchangePattern.class;
case "filesizethreshold":
case "fileSizeThreshold": return java.lang.Integer.class;
case "fileslocation":
case "filesLocation": return java.lang.String.class;
case "filterinitparameters":
case "filterInitParameters": return java.util.Map.class;
case "filters": return java.util.List.class;
case "handlers": return java.util.List.class;
case "headerfilterstrategy":
case "headerFilterStrategy": return org.apache.camel.spi.HeaderFilterStrategy.class;
case "httpbinding":
case "httpBinding": return org.apache.camel.http.common.HttpBinding.class;
case "httpmethodrestrict":
case "httpMethodRestrict": return java.lang.String.class;
case "idletimeout":
case "idleTimeout": return long.class;
case "logexception":
case "logException": return boolean.class;
case "maphttpmessagebody":
case "mapHttpMessageBody": return boolean.class;
case "maphttpmessageformurlencodedbody":
case "mapHttpMessageFormUrlEncodedBody": return boolean.class;
case "maphttpmessageheaders":
case "mapHttpMessageHeaders": return boolean.class;
case "matchonuriprefix":
case "matchOnUriPrefix": return boolean.class;
case "maxfilesize":
case "maxFileSize": return java.lang.Long.class;
case "maxrequestsize":
case "maxRequestSize": return java.lang.Long.class;
case "multipartfilter":
case "multipartFilter": return jakarta.servlet.Filter.class;
case "muteexception":
case "muteException": return boolean.class;
case "optionsenabled":
case "optionsEnabled": return boolean.class;
case "responsebuffersize":
case "responseBufferSize": return java.lang.Integer.class;
case "senddateheader":
case "sendDateHeader": return boolean.class;
case "sendserverversion":
case "sendServerVersion": return boolean.class;
case "sessionsupport":
case "sessionSupport": return boolean.class;
case "sslcontextparameters":
case "sslContextParameters": return org.apache.camel.support.jsse.SSLContextParameters.class;
case "traceenabled":
case "traceEnabled": return boolean.class;
case "transferexception":
case "transferException": return boolean.class;
case "usecontinuation":
case "useContinuation": return java.lang.Boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
JettyHttpEndpoint12 target = (JettyHttpEndpoint12) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "async": return target.isAsync();
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "chunked": return target.isChunked();
case "continuationtimeout":
case "continuationTimeout": return target.getContinuationTimeout();
case "disablestreamcache":
case "disableStreamCache": return target.isDisableStreamCache();
case "eagercheckcontentavailable":
case "eagerCheckContentAvailable": return target.isEagerCheckContentAvailable();
case "enablecors":
case "enableCORS": return target.isEnableCORS();
case "enablejmx":
case "enableJmx": return target.isEnableJmx();
case "enablemultipartfilter":
case "enableMultipartFilter": return target.isEnableMultipartFilter();
case "exceptionhandler":
case "exceptionHandler": return target.getExceptionHandler();
case "exchangepattern":
case "exchangePattern": return target.getExchangePattern();
case "filesizethreshold":
case "fileSizeThreshold": return target.getFileSizeThreshold();
case "fileslocation":
case "filesLocation": return target.getFilesLocation();
case "filterinitparameters":
case "filterInitParameters": return target.getFilterInitParameters();
case "filters": return target.getFilters();
case "handlers": return target.getHandlers();
case "headerfilterstrategy":
case "headerFilterStrategy": return target.getHeaderFilterStrategy();
case "httpbinding":
case "httpBinding": return target.getHttpBinding();
case "httpmethodrestrict":
case "httpMethodRestrict": return target.getHttpMethodRestrict();
case "idletimeout":
case "idleTimeout": return target.getIdleTimeout();
case "logexception":
case "logException": return target.isLogException();
case "maphttpmessagebody":
case "mapHttpMessageBody": return target.isMapHttpMessageBody();
case "maphttpmessageformurlencodedbody":
case "mapHttpMessageFormUrlEncodedBody": return target.isMapHttpMessageFormUrlEncodedBody();
case "maphttpmessageheaders":
case "mapHttpMessageHeaders": return target.isMapHttpMessageHeaders();
case "matchonuriprefix":
case "matchOnUriPrefix": return target.isMatchOnUriPrefix();
case "maxfilesize":
case "maxFileSize": return target.getMaxFileSize();
case "maxrequestsize":
case "maxRequestSize": return target.getMaxRequestSize();
case "multipartfilter":
case "multipartFilter": return target.getMultipartFilter();
case "muteexception":
case "muteException": return target.isMuteException();
case "optionsenabled":
case "optionsEnabled": return target.isOptionsEnabled();
case "responsebuffersize":
case "responseBufferSize": return target.getResponseBufferSize();
case "senddateheader":
case "sendDateHeader": return target.isSendDateHeader();
case "sendserverversion":
case "sendServerVersion": return target.isSendServerVersion();
case "sessionsupport":
case "sessionSupport": return target.isSessionSupport();
case "sslcontextparameters":
case "sslContextParameters": return target.getSslContextParameters();
case "traceenabled":
case "traceEnabled": return target.isTraceEnabled();
case "transferexception":
case "transferException": return target.isTransferException();
case "usecontinuation":
case "useContinuation": return target.getUseContinuation();
default: return null;
}
}
@Override
public Object getCollectionValueType(Object target, String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "filterinitparameters":
case "filterInitParameters": return java.lang.String.class;
case "filters": return jakarta.servlet.Filter.class;
case "handlers": return org.eclipse.jetty.server.Handler.class;
default: return null;
}
}
}
| JettyHttpEndpoint12Configurer |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/utils/Exit.java | {
"start": 855,
"end": 1003
} | class ____ should be used instead of `System.exit()` and `Runtime.getRuntime().halt()` so that tests can
* easily change the behaviour.
*/
public | that |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/Hamlet.java | {
"start": 563977,
"end": 575466
} | class ____<T extends __> extends EImp<T> implements HamletSpec.STRONG {
public STRONG(String name, T parent, EnumSet<EOpt> opts) {
super(name, parent, opts);
}
@Override
public STRONG<T> $id(String value) {
addAttr("id", value);
return this;
}
@Override
public STRONG<T> $class(String value) {
addAttr("class", value);
return this;
}
@Override
public STRONG<T> $title(String value) {
addAttr("title", value);
return this;
}
@Override
public STRONG<T> $style(String value) {
addAttr("style", value);
return this;
}
@Override
public STRONG<T> $lang(String value) {
addAttr("lang", value);
return this;
}
@Override
public STRONG<T> $dir(Dir value) {
addAttr("dir", value);
return this;
}
@Override
public STRONG<T> $onclick(String value) {
addAttr("onclick", value);
return this;
}
@Override
public STRONG<T> $ondblclick(String value) {
addAttr("ondblclick", value);
return this;
}
@Override
public STRONG<T> $onmousedown(String value) {
addAttr("onmousedown", value);
return this;
}
@Override
public STRONG<T> $onmouseup(String value) {
addAttr("onmouseup", value);
return this;
}
@Override
public STRONG<T> $onmouseover(String value) {
addAttr("onmouseover", value);
return this;
}
@Override
public STRONG<T> $onmousemove(String value) {
addAttr("onmousemove", value);
return this;
}
@Override
public STRONG<T> $onmouseout(String value) {
addAttr("onmouseout", value);
return this;
}
@Override
public STRONG<T> $onkeypress(String value) {
addAttr("onkeypress", value);
return this;
}
@Override
public STRONG<T> $onkeydown(String value) {
addAttr("onkeydown", value);
return this;
}
@Override
public STRONG<T> $onkeyup(String value) {
addAttr("onkeyup", value);
return this;
}
@Override
public STRONG<T> __(Object... lines) {
_p(true, lines);
return this;
}
@Override
public STRONG<T> _r(Object... lines) {
_p(false, lines);
return this;
}
@Override
public B<STRONG<T>> b() {
closeAttrs();
return b_(this, true);
}
@Override
public STRONG<T> b(String cdata) {
return b().__(cdata).__();
}
@Override
public STRONG<T> b(String selector, String cdata) {
return setSelector(b(), selector).__(cdata).__();
}
@Override
public I<STRONG<T>> i() {
closeAttrs();
return i_(this, true);
}
@Override
public STRONG<T> i(String cdata) {
return i().__(cdata).__();
}
@Override
public STRONG<T> i(String selector, String cdata) {
return setSelector(i(), selector).__(cdata).__();
}
@Override
public SMALL<STRONG<T>> small() {
closeAttrs();
return small_(this, true);
}
@Override
public STRONG<T> small(String cdata) {
return small().__(cdata).__();
}
@Override
public STRONG<T> small(String selector, String cdata) {
return setSelector(small(), selector).__(cdata).__();
}
@Override
public STRONG<T> em(String cdata) {
return em().__(cdata).__();
}
@Override
public EM<STRONG<T>> em() {
closeAttrs();
return em_(this, true);
}
@Override
public STRONG<T> em(String selector, String cdata) {
return setSelector(em(), selector).__(cdata).__();
}
@Override
public STRONG<STRONG<T>> strong() {
closeAttrs();
return strong_(this, true);
}
@Override
public STRONG<T> strong(String cdata) {
return strong().__(cdata).__();
}
@Override
public STRONG<T> strong(String selector, String cdata) {
return setSelector(strong(), selector).__(cdata).__();
}
@Override
public DFN<STRONG<T>> dfn() {
closeAttrs();
return dfn_(this, true);
}
@Override
public STRONG<T> dfn(String cdata) {
return dfn().__(cdata).__();
}
@Override
public STRONG<T> dfn(String selector, String cdata) {
return setSelector(dfn(), selector).__(cdata).__();
}
@Override
public CODE<STRONG<T>> code() {
closeAttrs();
return code_(this, true);
}
@Override
public STRONG<T> code(String cdata) {
return code().__(cdata).__();
}
@Override
public STRONG<T> code(String selector, String cdata) {
return setSelector(code(), selector).__(cdata).__();
}
@Override
public STRONG<T> samp(String cdata) {
return samp().__(cdata).__();
}
@Override
public SAMP<STRONG<T>> samp() {
closeAttrs();
return samp_(this, true);
}
@Override
public STRONG<T> samp(String selector, String cdata) {
return setSelector(samp(), selector).__(cdata).__();
}
@Override
public KBD<STRONG<T>> kbd() {
closeAttrs();
return kbd_(this, true);
}
@Override
public STRONG<T> kbd(String cdata) {
return kbd().__(cdata).__();
}
@Override
public STRONG<T> kbd(String selector, String cdata) {
return setSelector(kbd(), selector).__(cdata).__();
}
@Override
public VAR<STRONG<T>> var() {
closeAttrs();
return var_(this, true);
}
@Override
public STRONG<T> var(String cdata) {
return var().__(cdata).__();
}
@Override
public STRONG<T> var(String selector, String cdata) {
return setSelector(var(), selector).__(cdata).__();
}
@Override
public CITE<STRONG<T>> cite() {
closeAttrs();
return cite_(this, true);
}
@Override
public STRONG<T> cite(String cdata) {
return cite().__(cdata).__();
}
@Override
public STRONG<T> cite(String selector, String cdata) {
return setSelector(cite(), selector).__(cdata).__();
}
@Override
public ABBR<STRONG<T>> abbr() {
closeAttrs();
return abbr_(this, true);
}
@Override
public STRONG<T> abbr(String cdata) {
return abbr().__(cdata).__();
}
@Override
public STRONG<T> abbr(String selector, String cdata) {
return setSelector(abbr(), selector).__(cdata).__();
}
@Override
public A<STRONG<T>> a() {
closeAttrs();
return a_(this, true);
}
@Override
public A<STRONG<T>> a(String selector) {
return setSelector(a(), selector);
}
@Override
public STRONG<T> a(String href, String anchorText) {
return a().$href(href).__(anchorText).__();
}
@Override
public STRONG<T> a(String selector, String href, String anchorText) {
return setSelector(a(), selector).$href(href).__(anchorText).__();
}
@Override
public IMG<STRONG<T>> img() {
closeAttrs();
return img_(this, true);
}
@Override
public STRONG<T> img(String src) {
return img().$src(src).__();
}
@Override
public OBJECT<STRONG<T>> object() {
closeAttrs();
return object_(this, true);
}
@Override
public OBJECT<STRONG<T>> object(String selector) {
return setSelector(object(), selector);
}
@Override
public SUB<STRONG<T>> sub() {
closeAttrs();
return sub_(this, true);
}
@Override
public STRONG<T> sub(String cdata) {
return sub().__(cdata).__();
}
@Override
public STRONG<T> sub(String selector, String cdata) {
return setSelector(sub(), selector).__(cdata).__();
}
@Override
public SUP<STRONG<T>> sup() {
closeAttrs();
return sup_(this, true);
}
@Override
public STRONG<T> sup(String cdata) {
return sup().__(cdata).__();
}
@Override
public STRONG<T> sup(String selector, String cdata) {
return setSelector(sup(), selector).__(cdata).__();
}
@Override
public MAP<STRONG<T>> map() {
closeAttrs();
return map_(this, true);
}
@Override
public MAP<STRONG<T>> map(String selector) {
return setSelector(map(), selector);
}
@Override
public STRONG<T> q(String cdata) {
return q().__(cdata).__();
}
@Override
public STRONG<T> q(String selector, String cdata) {
return setSelector(q(), selector).__(cdata).__();
}
@Override
public Q<STRONG<T>> q() {
closeAttrs();
return q_(this, true);
}
@Override
public BR<STRONG<T>> br() {
closeAttrs();
return br_(this, true);
}
@Override
public STRONG<T> br(String selector) {
return setSelector(br(), selector).__();
}
@Override
public BDO<STRONG<T>> bdo() {
closeAttrs();
return bdo_(this, true);
}
@Override
public STRONG<T> bdo(Dir dir, String cdata) {
return bdo().$dir(dir).__(cdata).__();
}
@Override
public SPAN<STRONG<T>> span() {
closeAttrs();
return span_(this, true);
}
@Override
public STRONG<T> span(String cdata) {
return span().__(cdata).__();
}
@Override
public STRONG<T> span(String selector, String cdata) {
return setSelector(span(), selector).__(cdata).__();
}
@Override
public SCRIPT<STRONG<T>> script() {
closeAttrs();
return script_(this, true);
}
@Override
public STRONG<T> script(String src) {
return setScriptSrc(script(), src).__();
}
@Override
public INS<STRONG<T>> ins() {
closeAttrs();
return ins_(this, true);
}
@Override
public STRONG<T> ins(String cdata) {
return ins().__(cdata).__();
}
@Override
public DEL<STRONG<T>> del() {
closeAttrs();
return del_(this, true);
}
@Override
public STRONG<T> del(String cdata) {
return del().__(cdata).__();
}
@Override
public LABEL<STRONG<T>> label() {
closeAttrs();
return label_(this, true);
}
@Override
public STRONG<T> label(String forId, String cdata) {
return label().$for(forId).__(cdata).__();
}
@Override
public INPUT<STRONG<T>> input(String selector) {
return setSelector(input(), selector);
}
@Override
public INPUT<STRONG<T>> input() {
closeAttrs();
return input_(this, true);
}
@Override
public SELECT<STRONG<T>> select() {
closeAttrs();
return select_(this, true);
}
@Override
public SELECT<STRONG<T>> select(String selector) {
return setSelector(select(), selector);
}
@Override
public TEXTAREA<STRONG<T>> textarea(String selector) {
return setSelector(textarea(), selector);
}
@Override
public TEXTAREA<STRONG<T>> textarea() {
closeAttrs();
return textarea_(this, true);
}
@Override
public STRONG<T> textarea(String selector, String cdata) {
return setSelector(textarea(), selector).__(cdata).__();
}
@Override
public BUTTON<STRONG<T>> button() {
closeAttrs();
return button_(this, true);
}
@Override
public BUTTON<STRONG<T>> button(String selector) {
return setSelector(button(), selector);
}
@Override
public STRONG<T> button(String selector, String cdata) {
return setSelector(button(), selector).__(cdata).__();
}
}
public | STRONG |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/VariableNameSameAsType.java | {
"start": 1541,
"end": 2578
} | class ____ extends BugChecker implements VariableTreeMatcher {
@Override
public Description matchVariable(VariableTree varTree, VisitorState state) {
Name varName = varTree.getName();
Matcher<VariableTree> nameSameAsType =
Matchers.variableType(
(typeTree, s) -> {
Symbol typeSymbol = ASTHelpers.getSymbol(typeTree);
if (typeSymbol != null) {
return typeSymbol.getSimpleName().contentEquals(varName);
}
return false;
});
if (!nameSameAsType.matches(varTree, state)) {
return Description.NO_MATCH;
}
String message =
String.format(
"Variable named %s has the type %s. Calling methods using \"%s.something\" are "
+ "difficult to distinguish between static and instance methods.",
varName, SuggestedFixes.prettyType(getType(varTree), /* state= */ null), varName);
return buildDescription(varTree).setMessage(message).build();
}
}
| VariableNameSameAsType |
java | google__guice | core/test/com/google/inject/EagerSingletonTest.java | {
"start": 904,
"end": 2821
} | class ____ extends TestCase {
@Override
public void setUp() {
A.instanceCount = 0;
B.instanceCount = 0;
C.instanceCount = 0;
}
public void testJustInTimeEagerSingletons() {
Guice.createInjector(
Stage.PRODUCTION,
new AbstractModule() {
@Override
protected void configure() {
// create a just-in-time binding for A
getProvider(A.class);
// create a just-in-time binding for C
requestInjection(
new Object() {
@Inject
void inject(Injector injector) {
injector.getInstance(C.class);
}
});
}
});
assertEquals(1, A.instanceCount);
assertEquals(
"Singletons discovered when creating singletons should not be built eagerly",
0,
B.instanceCount);
assertEquals(1, C.instanceCount);
}
public void testJustInTimeSingletonsAreNotEager() {
Injector injector = Guice.createInjector(Stage.PRODUCTION);
injector.getProvider(B.class);
assertEquals(0, B.instanceCount);
}
public void testChildEagerSingletons() {
Injector parent = Guice.createInjector(Stage.PRODUCTION);
parent.createChildInjector(
new AbstractModule() {
@Override
protected void configure() {
bind(D.class).to(C.class);
}
});
assertEquals(1, C.instanceCount);
}
// there used to be a bug that caused a concurrent modification exception if jit bindings were
// loaded during eager singleton creation due to failur to apply the lock when iterating over
// all bindings.
public void testJustInTimeEagerSingletons_multipleThreads() throws Exception {
// in order to make the data race more likely we need a lot of jit bindings. The easiest thing
// is just to 'copy' out | EagerSingletonTest |
java | playframework__playframework | web/play-filters-helpers/src/main/java/play/filters/components/RedirectHttpsComponents.java | {
"start": 520,
"end": 917
} | interface ____ extends ConfigurationComponents {
Environment environment();
default RedirectHttpsConfiguration redirectHttpsConfiguration() {
return new RedirectHttpsConfigurationProvider(configuration(), environment().asScala()).get();
}
default RedirectHttpsFilter redirectHttpsFilter() {
return new RedirectHttpsFilter(redirectHttpsConfiguration());
}
}
| RedirectHttpsComponents |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/InterruptedExceptionSwallowedTest.java | {
"start": 2813,
"end": 3492
} | class ____ {
void test(Future<?> future) {
try {
try {
future.get();
} catch (InterruptedException e) {
throw e;
}
// BUG: Diagnostic contains:
} catch (Exception e) {
throw new IllegalStateException(e);
}
}
}
""")
.doTest();
}
@Test
public void thrownByClose_throwsClauseTooBroad() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import java.util.concurrent.Future;
| Test |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/tls/TlsConfigReloadKeystoreTest.java | {
"start": 1740,
"end": 3368
} | class ____ {
private static final int PORT = 63806;
private static final String EXPECTED_RESPONSE = "HelloWorld";
private static HttpServer testServer;
private static Vertx testVertx;
private static final File temp = new File("target/test-certificates-" + UUID.randomUUID());
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.setArchiveProducer(
() -> ShrinkWrap.create(JavaArchive.class)
.addClasses(Client.class, SSLTools.class))
.overrideRuntimeConfigKey("loc", temp.getAbsolutePath())
.overrideRuntimeConfigKey("quarkus.rest-client.my-client.tls-configuration-name", "my-tls-client")
.overrideRuntimeConfigKey("quarkus.tls.my-tls-client.key-store.p12.path", temp.getAbsolutePath() + "/tls.p12")
.overrideRuntimeConfigKey("quarkus.tls.my-tls-client.key-store.p12.password", "password")
.overrideRuntimeConfigKey("quarkus.rest-client.my-client.url", "https://127.0.0.1:" + PORT)
.overrideRuntimeConfigKey("quarkus.tls.my-tls-client.trust-all", "true")
.setBeforeAllCustomizer(() -> {
try {
temp.mkdirs();
Files.copy(new File("target/certs/wrong-test-reload-client-keystore.p12").toPath(),
new File(temp, "/tls.p12").toPath());
} catch (Exception e) {
throw new RuntimeException(e);
}
});
@RegisterRestClient(configKey = "my-client")
private | TlsConfigReloadKeystoreTest |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/FluxBufferPredicate.java | {
"start": 3529,
"end": 11385
} | class ____<T, C extends Collection<? super T>>
extends AbstractQueue<C>
implements ConditionalSubscriber<T>, InnerOperator<T, C>, BooleanSupplier {
final CoreSubscriber<? super C> actual;
final Supplier<C> bufferSupplier;
final Mode mode;
final Predicate<? super T> predicate;
@Nullable C buffer;
boolean done;
volatile boolean fastpath;
volatile long requestedBuffers;
@SuppressWarnings("rawtypes")
static final AtomicLongFieldUpdater<BufferPredicateSubscriber> REQUESTED_BUFFERS =
AtomicLongFieldUpdater.newUpdater(BufferPredicateSubscriber.class,
"requestedBuffers");
volatile long requestedFromSource;
@SuppressWarnings("rawtypes")
static final AtomicLongFieldUpdater<BufferPredicateSubscriber> REQUESTED_FROM_SOURCE =
AtomicLongFieldUpdater.newUpdater(BufferPredicateSubscriber.class,
"requestedFromSource");
@SuppressWarnings("NotNullFieldNotInitialized") // s initialized in onSubscribe
volatile Subscription s;
// https://github.com/uber/NullAway/issues/1157
@SuppressWarnings({"rawtypes", "DataFlowIssue"})
static final AtomicReferenceFieldUpdater<BufferPredicateSubscriber, @Nullable Subscription> S =
AtomicReferenceFieldUpdater.newUpdater(BufferPredicateSubscriber.class, Subscription.class, "s");
BufferPredicateSubscriber(CoreSubscriber<? super C> actual, C initialBuffer,
Supplier<C> bufferSupplier, Predicate<? super T> predicate, Mode mode) {
this.actual = actual;
this.buffer = initialBuffer;
this.bufferSupplier = bufferSupplier;
this.predicate = predicate;
this.mode = mode;
}
@Override
public void request(long n) {
if (Operators.validate(n)) {
if (n == Long.MAX_VALUE) {
// here we request everything from the source. switching to
// fastpath will avoid unnecessary request(1) during filling
fastpath = true;
REQUESTED_BUFFERS.set(this, Long.MAX_VALUE);
REQUESTED_FROM_SOURCE.set(this, Long.MAX_VALUE);
s.request(Long.MAX_VALUE);
}
else {
// Requesting from source may have been interrupted if downstream
// received enough buffer (requested == 0), so this new request for
// buffer should resume progressive filling from upstream. We can
// directly request the same as the number of needed buffers (if
// buffers turn out 1-sized then we'll have everything, otherwise
// we'll continue requesting one by one)
if (!DrainUtils.postCompleteRequest(n,
actual,
this, REQUESTED_BUFFERS,
this,
this)) {
Operators.addCap(REQUESTED_FROM_SOURCE, this, n);
s.request(n);
}
}
}
}
@Override
public void cancel() {
C b;
synchronized (this) {
b = buffer;
buffer = null;
Operators.onDiscardMultiple(b, actual.currentContext());
}
cleanup();
Operators.terminate(S, this);
}
@Override
public void onSubscribe(Subscription s) {
if (Operators.setOnce(S, this, s)) {
actual.onSubscribe(this);
}
}
@Override
public void onNext(T t) {
if (!tryOnNext(t)) {
s.request(1);
}
}
@Override
public boolean tryOnNext(T t) {
if (done) {
Operators.onNextDropped(t, actual.currentContext());
return true;
}
boolean match;
try {
match = predicate.test(t);
}
catch (Throwable e) {
Context ctx = actual.currentContext();
onError(Operators.onOperatorError(s, e, t, ctx)); //will discard the buffer
Operators.onDiscard(t, ctx);
return true;
}
if (mode == Mode.UNTIL && match) {
if (cancelledWhileAdding(t)) {
return true;
}
onNextNewBuffer();
}
else if (mode == Mode.UNTIL_CUT_BEFORE && match) {
onNextNewBuffer();
if (cancelledWhileAdding(t)) {
return true;
}
}
else if (mode == Mode.WHILE && !match) {
onNextNewBuffer();
}
else {
if (cancelledWhileAdding(t)) {
return true;
}
}
if (fastpath) {
return true;
}
boolean isNotExpectingFromSource = REQUESTED_FROM_SOURCE.decrementAndGet(this) == 0;
boolean isStillExpectingBuffer = REQUESTED_BUFFERS.get(this) > 0;
if (isNotExpectingFromSource && isStillExpectingBuffer
&& REQUESTED_FROM_SOURCE.compareAndSet(this, 0, 1)) {
return false; //explicitly mark as "needing more", either in attached conditional or onNext()
}
return true;
}
boolean cancelledWhileAdding(T value) {
synchronized (this) {
C b = buffer;
if (b == null || s == Operators.cancelledSubscription()) {
Operators.onDiscard(value, actual.currentContext());
return true;
}
else {
b.add(value);
return false;
}
}
}
@Nullable C triggerNewBuffer() {
C b;
synchronized (this) {
b = buffer;
if (b == null || s == Operators.cancelledSubscription()) {
return null;
}
}
if (b.isEmpty()) {
//emit nothing and we'll reuse the same buffer
return null;
}
//we'll create a new buffer
C c;
try {
c = Objects.requireNonNull(bufferSupplier.get(),
"The bufferSupplier returned a null buffer");
}
catch (Throwable e) {
onError(Operators.onOperatorError(s, e, actual.currentContext()));
return null;
}
synchronized (this) {
if (buffer == null) {
return null;
}
buffer = c;
}
return b;
}
private void onNextNewBuffer() {
C b = triggerNewBuffer();
if (b != null) {
if (fastpath) {
actual.onNext(b);
return;
}
long r = REQUESTED_BUFFERS.getAndDecrement(this);
if (r > 0) {
actual.onNext(b);
return;
}
cancel();
actual.onError(Exceptions.failWithOverflow("Could not emit buffer due to lack of requests"));
}
}
@Override
public CoreSubscriber<? super C> actual() {
return actual;
}
@Override
public void onError(Throwable t) {
if (done) {
Operators.onErrorDropped(t, actual.currentContext());
return;
}
done = true;
C b;
synchronized (this) {
b = buffer;
buffer = null;
}
cleanup();
//safe to discard the buffer outside synchronized block
//since onNext MUST NOT happen in parallel with onError
Operators.onDiscardMultiple(b, actual.currentContext());
actual.onError(t);
}
@Override
public void onComplete() {
if (done) {
return;
}
done = true;
cleanup();
DrainUtils.postComplete(actual, this, REQUESTED_BUFFERS, this, this);
}
void cleanup() {
// necessary cleanup if predicate contains a state
if (predicate instanceof Disposable) {
((Disposable) predicate).dispose();
}
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.PARENT) return s;
if (key == Attr.TERMINATED) return done;
if (key == Attr.CANCELLED) return getAsBoolean();
if (key == Attr.CAPACITY) {
C b = buffer;
return b != null ? b.size() : 0;
}
if (key == Attr.REQUESTED_FROM_DOWNSTREAM) return requestedBuffers;
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
return InnerOperator.super.scanUnsafe(key);
}
@Override
public boolean getAsBoolean() {
return s == Operators.cancelledSubscription();
}
@Override
public Iterator<C> iterator() {
if (isEmpty()) {
return Collections.emptyIterator();
}
return Collections.singleton(buffer).iterator();
}
@Override
public boolean offer(C objects) {
throw new IllegalArgumentException();
}
@Override
public @Nullable C poll() {
C b = buffer;
if (b != null && !b.isEmpty()) {
synchronized (this) {
buffer = null;
}
return b;
}
return null;
}
@Override
public @Nullable C peek() {
return buffer;
}
@Override
public int size() {
C b = buffer;
return b == null || b.isEmpty() ? 0 : 1;
}
@Override
public String toString() {
return "FluxBufferPredicate";
}
}
static | BufferPredicateSubscriber |
java | quarkusio__quarkus | independent-projects/arc/processor/src/main/java/io/quarkus/arc/processor/ClientProxyGenerator.java | {
"start": 13346,
"end": 15238
} | interface ____.
if (Methods.isObjectToString(method)) {
// Always use invokevirtual and the original descriptor for java.lang.Object#toString()
ret = bc.invokeVirtual(originalMethodDesc, delegate.get(), params);
} else if (isInterface) {
// make sure we invoke the method upon the provider type, i.e. don't use the original method descriptor
MethodDesc virtualMethod = InterfaceMethodDesc.of(providerClassDesc,
originalMethodDesc.name(), originalMethodDesc.type());
ret = bc.invokeInterface(virtualMethod, delegate.get(), params);
} else if (isReflectionFallbackNeeded(method, targetPackage)) {
// Reflection fallback
reflectionRegistration.registerMethod(method);
Expr paramTypes = bc.newArray(Class.class, method.parameterTypes()
.stream()
.map(paramType -> Const.of(classDescOf(paramType)))
.toList());
ret = bc.invokeStatic(MethodDescs.REFLECTIONS_INVOKE_METHOD,
Const.of(classDescOf(method.declaringClass())), Const.of(method.name()),
paramTypes, delegate.get(), bc.newArray(Object.class, params));
if (method.returnType().kind() == Type.Kind.VOID) {
ret = Const.ofVoid();
}
} else {
// make sure we do not use the original method descriptor as it could point to
// a default | methods |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/DjlComponentBuilderFactory.java | {
"start": 3995,
"end": 4746
} | class ____
extends AbstractComponentBuilder<DJLComponent>
implements DjlComponentBuilder {
@Override
protected DJLComponent buildConcreteComponent() {
return new DJLComponent();
}
@Override
protected boolean setPropertyOnComponent(
Component component,
String name,
Object value) {
switch (name) {
case "lazyStartProducer": ((DJLComponent) component).setLazyStartProducer((boolean) value); return true;
case "autowiredEnabled": ((DJLComponent) component).setAutowiredEnabled((boolean) value); return true;
default: return false;
}
}
}
} | DjlComponentBuilderImpl |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/ResolvableType.java | {
"start": 58563,
"end": 59024
} | class ____ implements VariableResolver {
private final ResolvableType source;
DefaultVariableResolver(ResolvableType resolvableType) {
this.source = resolvableType;
}
@Override
public @Nullable ResolvableType resolveVariable(TypeVariable<?> variable) {
return this.source.resolveVariable(variable);
}
@Override
public Object getSource() {
return this.source;
}
}
@SuppressWarnings("serial")
private static | DefaultVariableResolver |
java | spring-projects__spring-boot | module/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/endpoint/jmx/EndpointMBean.java | {
"start": 3926,
"end": 6029
} | class ____, ignore it and proceed
}
}
return null;
}
private @Nullable Object invoke(JmxOperation operation, Object[] params)
throws MBeanException, ReflectionException {
try {
String[] parameterNames = operation.getParameters()
.stream()
.map(JmxOperationParameter::getName)
.toArray(String[]::new);
Map<String, Object> arguments = getArguments(parameterNames, params);
InvocationContext context = new InvocationContext(SecurityContext.NONE, arguments);
Object result = operation.invoke(context);
if (REACTOR_PRESENT) {
result = ReactiveHandler.handle(result);
}
return this.responseMapper.mapResponse(result);
}
catch (InvalidEndpointRequestException ex) {
throw new ReflectionException(new IllegalArgumentException(ex.getMessage()), ex.getMessage());
}
catch (Exception ex) {
throw new MBeanException(translateIfNecessary(ex), ex.getMessage());
}
}
private Exception translateIfNecessary(Exception exception) {
if (exception.getClass().getName().startsWith("java.")) {
return exception;
}
return new IllegalStateException(exception.getMessage());
}
private Map<String, Object> getArguments(String[] parameterNames, Object[] params) {
Map<String, Object> arguments = new HashMap<>();
for (int i = 0; i < params.length; i++) {
arguments.put(parameterNames[i], params[i]);
}
return arguments;
}
@Override
public Object getAttribute(String attribute)
throws AttributeNotFoundException, MBeanException, ReflectionException {
throw new AttributeNotFoundException("EndpointMBeans do not support attributes");
}
@Override
public void setAttribute(Attribute attribute)
throws AttributeNotFoundException, InvalidAttributeValueException, MBeanException, ReflectionException {
throw new AttributeNotFoundException("EndpointMBeans do not support attributes");
}
@Override
public AttributeList getAttributes(String[] attributes) {
return new AttributeList();
}
@Override
public AttributeList setAttributes(AttributeList attributes) {
return new AttributeList();
}
private static final | loader |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/JCacheEndpointBuilderFactory.java | {
"start": 43515,
"end": 45038
} | interface ____
extends
JCacheEndpointConsumerBuilder,
JCacheEndpointProducerBuilder {
default AdvancedJCacheEndpointBuilder advanced() {
return (AdvancedJCacheEndpointBuilder) this;
}
/**
* The Properties for the javax.cache.spi.CachingProvider to create the
* CacheManager.
*
* The option is a: <code>java.util.Properties</code> type.
*
* Group: common
*
* @param cacheConfigurationProperties the value to set
* @return the dsl builder
*/
default JCacheEndpointBuilder cacheConfigurationProperties(Properties cacheConfigurationProperties) {
doSetProperty("cacheConfigurationProperties", cacheConfigurationProperties);
return this;
}
/**
* The Properties for the javax.cache.spi.CachingProvider to create the
* CacheManager.
*
* The option will be converted to a <code>java.util.Properties</code>
* type.
*
* Group: common
*
* @param cacheConfigurationProperties the value to set
* @return the dsl builder
*/
default JCacheEndpointBuilder cacheConfigurationProperties(String cacheConfigurationProperties) {
doSetProperty("cacheConfigurationProperties", cacheConfigurationProperties);
return this;
}
/**
* The fully qualified | JCacheEndpointBuilder |
java | micronaut-projects__micronaut-core | core-processor/src/main/java/io/micronaut/inject/writer/BeanDefinitionVisitor.java | {
"start": 9641,
"end": 12671
} | class ____ interface.
* @param methodElement The method element
* @param visitorContext The visitor context
* @return The index of a new method
*/
int visitExecutableMethod(TypedElement declaringBean,
MethodElement methodElement,
VisitorContext visitorContext);
/**
* Visits a field injection point.
*
* @param declaringType The declaring type. Either a Class or a string representing the name of the type
* @param fieldElement The field element
* @param requiresReflection Whether accessing the field requires reflection
* @param visitorContext The visitor context
*/
void visitFieldInjectionPoint(TypedElement declaringType,
FieldElement fieldElement,
boolean requiresReflection,
VisitorContext visitorContext);
/**
* Visits an annotation injection point.
*
* @param annotationMemberBeanType The type of the injected bean
* @param annotationMemberProperty Required property of the injected bean
* @param requiredValue Required value of the bean property for the bean to be loaded
* @param notEqualsValue The bean property value which should not be equal to present value for the bean to
* be loaded
*/
void visitAnnotationMemberPropertyInjectionPoint(TypedElement annotationMemberBeanType,
String annotationMemberProperty,
@Nullable String requiredValue,
@Nullable String notEqualsValue);
/**
* Visits a field injection point.
*
* @param declaringType The declaring type. Either a Class or a string representing the name of the type
* @param fieldElement The field element
* @param requiresReflection Whether accessing the field requires reflection
* @param isOptional Is the value optional
*/
void visitFieldValue(TypedElement declaringType,
FieldElement fieldElement,
boolean requiresReflection,
boolean isOptional);
/**
* @return The package name of the bean
*/
String getPackageName();
/**
* @return The short name of the bean
*/
String getBeanSimpleName();
/**
* @return The annotation metadata
*/
AnnotationMetadata getAnnotationMetadata();
/**
* Begin defining a configuration builder.
*
* @param type The type of the builder
* @param field The name of the field that represents the builder
* @param annotationMetadata The annotation metadata associated with the field
* @param isInterface Whether the builder type is an | or |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java | {
"start": 5591,
"end": 7175
} | class ____ extends JobHistory {
private final int maxAllowedTaskNum;
public JobHistoryStubWithAllOversizeJobs(int maxAllowedTaskNum) {
this.maxAllowedTaskNum = maxAllowedTaskNum;
}
@Override
protected HistoryFileManager createHistoryFileManager() {
HistoryFileManager historyFileManager;
try {
HistoryFileInfo historyFileInfo =
createUnparsedJobHistoryFileInfo(maxAllowedTaskNum);
historyFileManager = mock(HistoryFileManager.class);
when(historyFileManager.getFileInfo(any(JobId.class))).thenReturn(
historyFileInfo);
} catch (IOException ex) {
// this should never happen
historyFileManager = super.createHistoryFileManager();
}
return historyFileManager;
}
private static HistoryFileInfo createUnparsedJobHistoryFileInfo(
int maxAllowedTaskNum) throws IOException {
HistoryFileInfo fileInfo = mock(HistoryFileInfo.class);
// create an instance of UnparsedJob for a large job
UnparsedJob unparsedJob = mock(UnparsedJob.class);
when(unparsedJob.getMaxTasksAllowed()).thenReturn(maxAllowedTaskNum);
when(unparsedJob.getTotalMaps()).thenReturn(maxAllowedTaskNum);
when(unparsedJob.getTotalReduces()).thenReturn(maxAllowedTaskNum);
when(fileInfo.loadJob()).thenReturn(unparsedJob);
return fileInfo;
}
}
/**
* A JobHistory stub that treats all jobs as normal size and therefore will
* return a CompletedJob on HistoryFileInfo.loadJob().
*/
static | JobHistoryStubWithAllOversizeJobs |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/processor/internals/TaskExecutionMetadata.java | {
"start": 1409,
"end": 1466
} | class ____
* shared between all StreamThreads.
*/
public | is |
java | quarkusio__quarkus | integration-tests/devtools-registry-client/src/test/java/io/quarkus/registry/RegistryConfigIT.java | {
"start": 115,
"end": 170
} | class ____ extends RegistryConfigTest {
}
| RegistryConfigIT |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/KafkaStreamsTest.java | {
"start": 17631,
"end": 93633
} | class ____ extends KafkaStreams {
KafkaStreamsWithTerminableThread(final Topology topology,
final Properties props,
final KafkaClientSupplier clientSupplier,
final Time time) {
super(topology, props, clientSupplier, time);
}
KafkaStreamsWithTerminableThread(final Topology topology,
final Properties props,
final KafkaClientSupplier clientSupplier) {
super(topology, props, clientSupplier);
}
KafkaStreamsWithTerminableThread(final Topology topology,
final StreamsConfig applicationConfigs) {
super(topology, applicationConfigs);
}
KafkaStreamsWithTerminableThread(final Topology topology,
final StreamsConfig applicationConfigs,
final KafkaClientSupplier clientSupplier) {
super(topology, applicationConfigs, clientSupplier);
}
@Override
public void close() {
terminableThreadBlockingLatch.countDown();
super.close();
}
}
@Test
public void testShouldTransitToNotRunningIfCloseRightAfterCreated() {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.close();
assertEquals(KafkaStreams.State.NOT_RUNNING, streams.state());
}
}
@Test
public void shouldInitializeTasksForLocalStateOnStart() {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
try (final MockedConstruction<StateDirectory> constructed = mockConstruction(StateDirectory.class,
(mock, context) -> when(mock.initializeProcessId()).thenReturn(UUID.randomUUID()))) {
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
assertEquals(1, constructed.constructed().size());
final StateDirectory stateDirectory = constructed.constructed().get(0);
verify(stateDirectory, times(0)).initializeStartupTasks(any(), any(), any());
streams.start();
verify(stateDirectory, times(1)).initializeStartupTasks(any(), any(), any());
}
}
}
@Test
public void shouldCloseStartupTasksAfterFirstRebalance() throws Exception {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
try (final MockedConstruction<StateDirectory> constructed = mockConstruction(StateDirectory.class,
(mock, context) -> when(mock.initializeProcessId()).thenReturn(UUID.randomUUID()))) {
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
assertEquals(1, constructed.constructed().size());
final StateDirectory stateDirectory = constructed.constructed().get(0);
streams.setStateListener(streamsStateListener);
streams.start();
waitForCondition(() -> streams.state() == State.RUNNING, "Streams never started.");
verify(stateDirectory, times(1)).closeStartupTasks();
}
}
}
@Test
public void stateShouldTransitToRunningIfNonDeadThreadsBackToRunning() throws Exception {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.setStateListener(streamsStateListener);
assertEquals(0, streamsStateListener.numChanges);
assertEquals(KafkaStreams.State.CREATED, streams.state());
streams.start();
waitForCondition(
() -> streamsStateListener.numChanges == 2,
"Streams never started.");
assertEquals(KafkaStreams.State.RUNNING, streams.state());
waitForCondition(
() -> streamsStateListener.numChanges == 2,
"Streams never started.");
assertEquals(KafkaStreams.State.RUNNING, streams.state());
for (final StreamThread thread : streams.threads) {
threadStateListenerCapture.getValue().onChange(
thread,
StreamThread.State.PARTITIONS_REVOKED,
StreamThread.State.RUNNING);
}
assertEquals(3, streamsStateListener.numChanges);
assertEquals(KafkaStreams.State.REBALANCING, streams.state());
for (final StreamThread thread : streams.threads) {
threadStateListenerCapture.getValue().onChange(
thread,
StreamThread.State.PARTITIONS_ASSIGNED,
StreamThread.State.PARTITIONS_REVOKED);
}
assertEquals(3, streamsStateListener.numChanges);
assertEquals(KafkaStreams.State.REBALANCING, streams.state());
threadStateListenerCapture.getValue().onChange(
streams.threads.get(NUM_THREADS - 1),
StreamThread.State.PENDING_SHUTDOWN,
StreamThread.State.PARTITIONS_ASSIGNED);
threadStateListenerCapture.getValue().onChange(
streams.threads.get(NUM_THREADS - 1),
StreamThread.State.DEAD,
StreamThread.State.PENDING_SHUTDOWN);
assertEquals(3, streamsStateListener.numChanges);
assertEquals(KafkaStreams.State.REBALANCING, streams.state());
for (final StreamThread thread : streams.threads) {
if (thread != streams.threads.get(NUM_THREADS - 1)) {
threadStateListenerCapture.getValue().onChange(
thread,
StreamThread.State.RUNNING,
StreamThread.State.PARTITIONS_ASSIGNED);
}
}
assertEquals(4, streamsStateListener.numChanges);
assertEquals(KafkaStreams.State.RUNNING, streams.state());
streams.close();
waitForCondition(
() -> streamsStateListener.numChanges == 6,
"Streams never closed.");
assertEquals(KafkaStreams.State.NOT_RUNNING, streams.state());
}
}
@Test
public void shouldCleanupResourcesOnCloseWithoutPreviousStart() throws Exception {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareConsumer(streamThreadOne, state1);
prepareConsumer(streamThreadTwo, state2);
final StreamsBuilder builder = getBuilderWithSource();
builder.globalTable("anyTopic");
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KafkaStreams.class);
final KafkaStreams streams = new KafkaStreams(builder.build(), props, supplier, time)) {
streams.close();
waitForCondition(
() -> streams.state() == KafkaStreams.State.NOT_RUNNING,
"Streams never stopped.");
assertThat(appender.getMessages(), not(hasItem(containsString("ERROR"))));
}
assertTrue(supplier.consumer.closed());
assertTrue(supplier.restoreConsumer.closed());
for (final MockProducer<byte[], byte[]> p : supplier.producers) {
assertTrue(p.closed());
}
}
@Test
public void testStateThreadClose() throws Exception {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
prepareConsumer(streamThreadOne, state1);
prepareConsumer(streamThreadTwo, state2);
prepareThreadLock(streamThreadOne);
prepareThreadLock(streamThreadTwo);
// make sure we have the global state thread running too
final StreamsBuilder builder = getBuilderWithSource();
builder.globalTable("anyTopic");
try (final KafkaStreams streams = new KafkaStreams(builder.build(), props, supplier, time)) {
assertEquals(NUM_THREADS, streams.threads.size());
assertEquals(KafkaStreams.State.CREATED, streams.state());
streams.start();
waitForCondition(
() -> streams.state() == KafkaStreams.State.RUNNING,
"Streams never started.");
for (int i = 0; i < NUM_THREADS; i++) {
final StreamThread tmpThread = streams.threads.get(i);
tmpThread.shutdown(CloseOptions.GroupMembershipOperation.REMAIN_IN_GROUP);
waitForCondition(() -> tmpThread.state() == StreamThread.State.DEAD,
"Thread never stopped.");
streams.threads.get(i).join();
}
waitForCondition(
() -> streams.metadataForLocalThreads().stream().allMatch(t -> t.threadState().equals("DEAD")),
"Streams never stopped"
);
streams.close();
waitForCondition(
() -> streams.state() == KafkaStreams.State.NOT_RUNNING,
"Streams never stopped.");
assertNull(streams.globalStreamThread);
}
}
@Test
public void testStateGlobalThreadClose() throws Exception {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
// make sure we have the global state thread running too
final StreamsBuilder builder = getBuilderWithSource();
builder.globalTable("anyTopic");
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KafkaStreams.class);
final KafkaStreams streams = new KafkaStreams(builder.build(), props, supplier, time)) {
streams.start();
waitForCondition(
() -> streams.state() == KafkaStreams.State.RUNNING,
"Streams never started.");
final GlobalStreamThread globalStreamThread = streams.globalStreamThread;
globalStreamThread.shutdown();
waitForCondition(
() -> globalStreamThread.state() == GlobalStreamThread.State.DEAD,
"Thread never stopped.");
// shutting down the global thread from "external" will yield an error in KafkaStreams
waitForCondition(
() -> streams.state() == KafkaStreams.State.ERROR,
"Thread never stopped."
);
streams.close();
assertEquals(KafkaStreams.State.ERROR, streams.state(), "KafkaStreams should remain in ERROR state after close.");
assertThat(appender.getMessages(), hasItem(containsString("State transition from RUNNING to PENDING_ERROR")));
assertThat(appender.getMessages(), hasItem(containsString("State transition from PENDING_ERROR to ERROR")));
assertThat(appender.getMessages(), hasItem(containsString("Streams client is already in the terminal ERROR state")));
}
}
@Test
public void testInitializesAndDestroysMetricsReporters() {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
final int oldInitCount = MockMetricsReporter.INIT_COUNT.get();
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
final int newInitCount = MockMetricsReporter.INIT_COUNT.get();
final int initDiff = newInitCount - oldInitCount;
assertEquals(1, initDiff, "some reporters including MockMetricsReporter should be initialized by calling on construction");
streams.start();
final int oldCloseCount = MockMetricsReporter.CLOSE_COUNT.get();
streams.close();
assertEquals(KafkaStreams.State.NOT_RUNNING, streams.state());
assertEquals(oldCloseCount + initDiff, MockMetricsReporter.CLOSE_COUNT.get());
}
}
@Test
public void testCloseIsIdempotent() {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.close();
final int closeCount = MockMetricsReporter.CLOSE_COUNT.get();
streams.close();
assertEquals(closeCount, MockMetricsReporter.CLOSE_COUNT.get(), "subsequent close() calls should do nothing");
}
}
@Test
public void testPauseResume() {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.start();
streams.pause();
assertTrue(streams.isPaused());
streams.resume();
assertFalse(streams.isPaused());
}
}
@Test
public void testStartingPaused() {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
// This test shows that a KafkaStreams instance can be started "paused"
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.pause();
streams.start();
assertTrue(streams.isPaused());
streams.resume();
assertFalse(streams.isPaused());
}
}
@Test
public void testShowPauseResumeAreIdempotent() {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
// This test shows that a KafkaStreams instance can be started "paused"
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.start();
streams.pause();
assertTrue(streams.isPaused());
streams.pause();
assertTrue(streams.isPaused());
streams.resume();
assertFalse(streams.isPaused());
streams.resume();
assertFalse(streams.isPaused());
}
}
@Test
public void shouldAddThreadWhenRunning() throws Exception {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
props.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 1);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.start();
final int oldSize = streams.threads.size();
waitForCondition(() -> streams.state() == KafkaStreams.State.RUNNING, 15L, "wait until running");
assertThat(streams.addStreamThread(), equalTo(Optional.of("processId-StreamThread-" + 2)));
assertThat(streams.threads.size(), equalTo(oldSize + 1));
}
}
@Test
public void shouldNotAddThreadWhenCreated() {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
final int oldSize = streams.threads.size();
assertThat(streams.addStreamThread(), equalTo(Optional.empty()));
assertThat(streams.threads.size(), equalTo(oldSize));
}
}
@Test
public void shouldNotAddThreadWhenClosed() {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
final int oldSize = streams.threads.size();
streams.close();
assertThat(streams.addStreamThread(), equalTo(Optional.empty()));
assertThat(streams.threads.size(), equalTo(oldSize));
}
}
@Test
public void shouldNotAddThreadWhenError() {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
// make sure we have the global state thread running too
final StreamsBuilder builder = getBuilderWithSource();
builder.globalTable("anyTopic");
try (final KafkaStreams streams = new KafkaStreams(builder.build(), props, supplier, time)) {
final int oldSize = streams.threads.size();
streams.start();
streams.globalStreamThread.shutdown();
assertThat(streams.addStreamThread(), equalTo(Optional.empty()));
assertThat(streams.threads.size(), equalTo(oldSize));
}
}
@Test
public void shouldNotReturnDeadThreads() {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
prepareThreadLock(streamThreadOne);
prepareThreadLock(streamThreadTwo);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.start();
streamThreadOne.shutdown(CloseOptions.GroupMembershipOperation.LEAVE_GROUP);
final Set<ThreadMetadata> threads = streams.metadataForLocalThreads();
assertThat(threads.size(), equalTo(1));
assertThat(threads, hasItem(streamThreadTwo.threadMetadata()));
}
}
@Test
public void shouldRemoveThread() throws Exception {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
when(streamThreadOne.groupInstanceID()).thenReturn(Optional.empty());
when(streamThreadOne.waitOnThreadState(isA(StreamThread.State.class), anyLong())).thenReturn(true);
when(streamThreadOne.isThreadAlive()).thenReturn(true);
props.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.start();
final int oldSize = streams.threads.size();
waitForCondition(() -> streams.state() == KafkaStreams.State.RUNNING, 15L,
"Kafka Streams client did not reach state RUNNING");
assertThat(streams.removeStreamThread(), equalTo(Optional.of("processId-StreamThread-" + 1)));
assertThat(streams.threads.size(), equalTo(oldSize - 1));
}
}
@Test
public void shouldNotRemoveThreadWhenNotRunning() {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
props.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 1);
try (final KafkaStreams streams =
new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
assertThat(streams.removeStreamThread(), equalTo(Optional.empty()));
assertThat(streams.threads.size(), equalTo(1));
}
}
@Test
public void testCannotStartOnceClosed() {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.start();
streams.close();
try {
streams.start();
fail("Should have throw IllegalStateException");
} catch (final IllegalStateException expected) {
// this is ok
}
}
}
@Test
public void shouldNotSetGlobalRestoreListenerAfterStarting() {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.start();
try {
streams.setGlobalStateRestoreListener(null);
fail("Should throw an IllegalStateException");
} catch (final IllegalStateException e) {
// expected
}
}
}
@Test
public void shouldThrowExceptionSettingUncaughtExceptionHandlerNotInCreateState() {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.start();
assertThrows(IllegalStateException.class, () -> streams.setUncaughtExceptionHandler(null));
}
}
@Test
public void shouldThrowExceptionSettingStreamsUncaughtExceptionHandlerNotInCreateState() {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.start();
assertThrows(IllegalStateException.class, () -> streams.setUncaughtExceptionHandler(null));
}
}
@Test
public void shouldThrowNullPointerExceptionSettingStreamsUncaughtExceptionHandlerIfNull() {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
assertThrows(NullPointerException.class, () -> streams.setUncaughtExceptionHandler(null));
}
}
@Test
public void shouldThrowExceptionSettingStateListenerNotInCreateState() {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.start();
try {
streams.setStateListener(null);
fail("Should throw IllegalStateException");
} catch (final IllegalStateException e) {
// expected
}
}
}
@Test
public void shouldAllowCleanupBeforeStartAndAfterClose() {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
try {
streams.cleanUp();
streams.start();
} finally {
streams.close();
streams.cleanUp();
}
}
}
@Test
public void shouldThrowOnCleanupWhileRunning() throws Exception {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.start();
waitForCondition(
() -> streams.state() == KafkaStreams.State.RUNNING,
"Streams never started.");
try {
streams.cleanUp();
fail("Should have thrown IllegalStateException");
} catch (final IllegalStateException expected) {
assertEquals("Cannot clean up while running.", expected.getMessage());
}
}
}
@Test
public void shouldThrowOnCleanupWhilePaused() throws Exception {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.start();
waitForCondition(
() -> streams.state() == KafkaStreams.State.RUNNING,
"Streams never started.");
streams.pause();
waitForCondition(
streams::isPaused,
"Streams did not pause.");
assertThrows(IllegalStateException.class, streams::cleanUp, "Cannot clean up while running.");
}
}
@Test
public void shouldThrowOnCleanupWhileShuttingDown() throws Exception {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
prepareTerminableThread(streamThreadOne);
try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, supplier, time)) {
streams.start();
waitForCondition(
() -> streams.state() == KafkaStreams.State.RUNNING,
"Streams never started.");
streams.close(Duration.ZERO);
assertThat(streams.state() == State.PENDING_SHUTDOWN, equalTo(true));
assertThrows(IllegalStateException.class, streams::cleanUp);
assertThat(streams.state() == State.PENDING_SHUTDOWN, equalTo(true));
}
}
@Test
public void shouldThrowOnCleanupWhileShuttingDownStreamClosedWithCloseOptionLeaveGroupFalse() throws Exception {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
prepareTerminableThread(streamThreadOne);
final MockClientSupplier mockClientSupplier = spy(MockClientSupplier.class);
when(mockClientSupplier.getAdmin(any())).thenReturn(adminClient);
try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, mockClientSupplier, time)) {
streams.start();
waitForCondition(
() -> streams.state() == KafkaStreams.State.RUNNING,
"Streams never started.");
final CloseOptions closeOptions = CloseOptions.timeout(Duration.ZERO)
.withGroupMembershipOperation(CloseOptions.GroupMembershipOperation.LEAVE_GROUP);
streams.close(closeOptions);
assertThat(streams.state() == State.PENDING_SHUTDOWN, equalTo(true));
assertThrows(IllegalStateException.class, streams::cleanUp);
assertThat(streams.state() == State.PENDING_SHUTDOWN, equalTo(true));
}
}
@Test
public void shouldThrowOnCleanupWhileShuttingDownStreamClosedWithCloseOptionLeaveGroupTrue() throws Exception {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
prepareTerminableThread(streamThreadOne);
try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, supplier, time)) {
streams.start();
waitForCondition(
() -> streams.state() == KafkaStreams.State.RUNNING,
"Streams never started.");
final CloseOptions closeOptions = CloseOptions.timeout(Duration.ZERO);
streams.close(closeOptions);
assertThat(streams.state() == State.PENDING_SHUTDOWN, equalTo(true));
assertThrows(IllegalStateException.class, streams::cleanUp);
assertThat(streams.state() == State.PENDING_SHUTDOWN, equalTo(true));
}
}
@Test
public void shouldNotGetAllTasksWhenNotRunning() throws Exception {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
assertThrows(StreamsNotStartedException.class, streams::metadataForAllStreamsClients);
streams.start();
waitForApplicationState(Collections.singletonList(streams), KafkaStreams.State.RUNNING, DEFAULT_DURATION);
streams.close();
waitForApplicationState(Collections.singletonList(streams), KafkaStreams.State.NOT_RUNNING, DEFAULT_DURATION);
assertThrows(IllegalStateException.class, streams::metadataForAllStreamsClients);
}
}
@Test
public void shouldNotGetAllTasksWithStoreWhenNotRunning() throws Exception {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
assertThrows(StreamsNotStartedException.class, () -> streams.streamsMetadataForStore("store"));
streams.start();
waitForApplicationState(Collections.singletonList(streams), KafkaStreams.State.RUNNING, DEFAULT_DURATION);
streams.close();
waitForApplicationState(Collections.singletonList(streams), KafkaStreams.State.NOT_RUNNING, DEFAULT_DURATION);
assertThrows(IllegalStateException.class, () -> streams.streamsMetadataForStore("store"));
}
}
@Test
public void shouldNotGetQueryMetadataWithSerializerWhenNotRunningOrRebalancing() throws Exception {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
assertThrows(StreamsNotStartedException.class, () -> streams.queryMetadataForKey("store", "key", new StringSerializer()));
streams.start();
waitForApplicationState(Collections.singletonList(streams), KafkaStreams.State.RUNNING, DEFAULT_DURATION);
streams.close();
waitForApplicationState(Collections.singletonList(streams), KafkaStreams.State.NOT_RUNNING, DEFAULT_DURATION);
assertThrows(IllegalStateException.class, () -> streams.queryMetadataForKey("store", "key", new StringSerializer()));
}
}
@Test
public void shouldGetQueryMetadataWithSerializerWhenRunningOrRebalancing() {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.start();
assertEquals(KeyQueryMetadata.NOT_AVAILABLE, streams.queryMetadataForKey("store", "key", new StringSerializer()));
}
}
@Test
public void shouldNotGetQueryMetadataWithPartitionerWhenNotRunningOrRebalancing() throws Exception {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
assertThrows(StreamsNotStartedException.class, () -> streams.queryMetadataForKey("store", "key", (topic, key, value, numPartitions) -> Optional.of(Collections.singleton(0))));
streams.start();
waitForApplicationState(Collections.singletonList(streams), KafkaStreams.State.RUNNING, DEFAULT_DURATION);
streams.close();
waitForApplicationState(Collections.singletonList(streams), KafkaStreams.State.NOT_RUNNING, DEFAULT_DURATION);
assertThrows(IllegalStateException.class, () -> streams.queryMetadataForKey("store", "key", (topic, key, value, numPartitions) -> Optional.of(Collections.singleton(0))));
}
}
@Test
public void shouldThrowUnknownStateStoreExceptionWhenStoreNotExist() throws Exception {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.start();
waitForApplicationState(Collections.singletonList(streams), KafkaStreams.State.RUNNING, DEFAULT_DURATION);
assertThrows(UnknownStateStoreException.class, () -> streams.store(StoreQueryParameters.fromNameAndType("unknown-store", keyValueStore())));
}
}
@Test
public void shouldNotGetStoreWhenWhenNotRunningOrRebalancing() throws Exception {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
assertThrows(StreamsNotStartedException.class, () -> streams.store(StoreQueryParameters.fromNameAndType("store", keyValueStore())));
streams.start();
waitForApplicationState(Collections.singletonList(streams), KafkaStreams.State.RUNNING, DEFAULT_DURATION);
streams.close();
waitForApplicationState(Collections.singletonList(streams), KafkaStreams.State.NOT_RUNNING, DEFAULT_DURATION);
assertThrows(IllegalStateException.class, () -> streams.store(StoreQueryParameters.fromNameAndType("store", keyValueStore())));
}
}
@Test
public void shouldReturnEmptyLocalStorePartitionLags() {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
// Mock all calls made to compute the offset lags,
final KafkaFutureImpl<Map<TopicPartition, ListOffsetsResultInfo>> allFuture = new KafkaFutureImpl<>();
allFuture.complete(Collections.emptyMap());
final MockAdminClient mockAdminClient = spy(MockAdminClient.class);
final MockClientSupplier mockClientSupplier = spy(MockClientSupplier.class);
when(mockClientSupplier.getAdmin(any())).thenReturn(mockAdminClient);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, mockClientSupplier, time)) {
streams.start();
assertEquals(0, streams.allLocalStorePartitionLags().size());
}
}
@Test
public void shouldReturnFalseOnCloseWhenThreadsHaventTerminated() throws Exception {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
prepareTerminableThread(streamThreadOne);
// do not use mock time so that it can really elapse
try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, supplier)) {
assertFalse(streams.close(Duration.ofMillis(10L)));
}
}
@Test
public void shouldThrowOnNegativeTimeoutForClose() throws Exception {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
prepareTerminableThread(streamThreadOne);
try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, supplier, time)) {
assertThrows(IllegalArgumentException.class, () -> streams.close(Duration.ofMillis(-1L)));
}
}
@Test
public void shouldNotBlockInCloseForZeroDuration() throws Exception {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
prepareTerminableThread(streamThreadOne);
try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, supplier, time)) {
// with mock time that does not elapse, close would not return if it ever waits on the state transition
assertFalse(streams.close(Duration.ZERO));
}
}
@Test
public void shouldReturnFalseOnCloseWithCloseOptionWithLeaveGroupFalseWhenThreadsHaventTerminated() throws Exception {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
prepareTerminableThread(streamThreadOne);
final CloseOptions closeOptions = CloseOptions.timeout(Duration.ofMillis(10L));
try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, supplier)) {
assertFalse(streams.close(closeOptions));
}
}
@Test
public void shouldThrowOnNegativeTimeoutForCloseWithCloseOptionLeaveGroupFalse() throws Exception {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
prepareTerminableThread(streamThreadOne);
final CloseOptions closeOptions = CloseOptions.timeout(Duration.ofMillis(-1L));
try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, supplier, time)) {
assertThrows(IllegalArgumentException.class, () -> streams.close(closeOptions));
}
}
@Test
public void shouldNotBlockInCloseWithCloseOptionLeaveGroupFalseForZeroDuration() throws Exception {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
prepareTerminableThread(streamThreadOne);
final CloseOptions closeOptions = CloseOptions.timeout(Duration.ZERO);
try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, supplier)) {
assertFalse(streams.close(closeOptions));
}
}
@Test
public void shouldReturnFalseOnCloseWithCloseOptionWithLeaveGroupTrueWhenThreadsHaventTerminated() throws Exception {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
prepareTerminableThread(streamThreadOne);
final MockClientSupplier mockClientSupplier = spy(MockClientSupplier.class);
when(mockClientSupplier.getAdmin(any())).thenReturn(adminClient);
final CloseOptions closeOptions = CloseOptions.timeout(Duration.ofMillis(10L))
.withGroupMembershipOperation(CloseOptions.GroupMembershipOperation.LEAVE_GROUP);
try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, mockClientSupplier)) {
assertFalse(streams.close(closeOptions));
}
}
@Test
public void shouldThrowOnNegativeTimeoutForCloseWithCloseOptionLeaveGroupTrue() throws Exception {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
prepareTerminableThread(streamThreadOne);
final MockClientSupplier mockClientSupplier = spy(MockClientSupplier.class);
when(mockClientSupplier.getAdmin(any())).thenReturn(adminClient);
final CloseOptions closeOptions = CloseOptions.timeout(Duration.ofMillis(-1L))
.withGroupMembershipOperation(CloseOptions.GroupMembershipOperation.LEAVE_GROUP);
try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, mockClientSupplier, time)) {
assertThrows(IllegalArgumentException.class, () -> streams.close(closeOptions));
}
}
@Test
@SuppressWarnings("unchecked")
public void shouldUseDefaultTimeoutForCloseWithNullTimeout() throws Exception {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
prepareTerminableThread(streamThreadOne);
final MockClientSupplier mockClientSupplier = spy(MockClientSupplier.class);
when(mockClientSupplier.getAdmin(any())).thenReturn(adminClient);
final CloseOptions closeOptions = CloseOptions.timeout(null)
.withGroupMembershipOperation(CloseOptions.GroupMembershipOperation.LEAVE_GROUP);
final KafkaStreams streams = spy(new KafkaStreamsWithTerminableThread(
getBuilderWithSource().build(), props, mockClientSupplier, time));
doReturn(false).when(streams).close(any(Optional.class), any());
streams.close(closeOptions);
verify(streams).close(eq(Optional.of(Long.MAX_VALUE)), eq(CloseOptions.GroupMembershipOperation.LEAVE_GROUP));
}
@Test
public void shouldNotBlockInCloseWithCloseOptionLeaveGroupTrueForZeroDuration() throws Exception {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
prepareTerminableThread(streamThreadOne);
final MockClientSupplier mockClientSupplier = spy(MockClientSupplier.class);
when(mockClientSupplier.getAdmin(any())).thenReturn(adminClient);
final CloseOptions closeOptions = CloseOptions.timeout(Duration.ZERO)
.withGroupMembershipOperation(CloseOptions.GroupMembershipOperation.LEAVE_GROUP);
try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, mockClientSupplier)) {
assertFalse(streams.close(closeOptions));
}
}
@Test
public void shouldTriggerRecordingOfRocksDBMetricsIfRecordingLevelIsDebug() throws Exception {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
prepareTerminableThread(streamThreadOne);
try (final MockedStatic<Executors> executorsMockedStatic = mockStatic(Executors.class)) {
final ScheduledExecutorService cleanupSchedule = mock(ScheduledExecutorService.class);
final ScheduledExecutorService rocksDBMetricsRecordingTriggerThread = mock(ScheduledExecutorService.class);
executorsMockedStatic.when(() -> Executors.newSingleThreadScheduledExecutor(
any(ThreadFactory.class))).thenReturn(cleanupSchedule, rocksDBMetricsRecordingTriggerThread);
final StreamsBuilder builder = new StreamsBuilder();
builder.table("topic", Materialized.as("store"));
props.setProperty(StreamsConfig.METRICS_RECORDING_LEVEL_CONFIG, RecordingLevel.DEBUG.name());
try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, supplier, time)) {
streams.start();
}
executorsMockedStatic.verify(() -> Executors.newSingleThreadScheduledExecutor(any(ThreadFactory.class)),
times(2));
verify(rocksDBMetricsRecordingTriggerThread).scheduleAtFixedRate(any(RocksDBMetricsRecordingTrigger.class),
eq(0L), eq(1L), eq(TimeUnit.MINUTES));
verify(rocksDBMetricsRecordingTriggerThread).shutdownNow();
}
}
@Test
public void shouldGetClientSupplierFromConfigForConstructor() throws Exception {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
prepareTerminableThread(streamThreadOne);
prepareTerminableThread(streamThreadTwo);
final StreamsConfig config = new StreamsConfig(props);
final StreamsConfig mockConfig = spy(config);
when(mockConfig.getKafkaClientSupplier()).thenReturn(supplier);
try (final KafkaStreams ignored = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), mockConfig)) {
// no-op
}
// It's called once in above when mock
verify(mockConfig, times(2)).getKafkaClientSupplier();
}
@Test
public void shouldGetClientSupplierFromConfigForConstructorWithTime() {
prepareStreams();
final AtomicReference<StreamThread.State> state1 = prepareStreamThread(streamThreadOne, 1);
final AtomicReference<StreamThread.State> state2 = prepareStreamThread(streamThreadTwo, 2);
prepareThreadState(streamThreadOne, state1);
prepareThreadState(streamThreadTwo, state2);
final StreamsConfig config = new StreamsConfig(props);
final StreamsConfig mockConfig = spy(config);
when(mockConfig.getKafkaClientSupplier()).thenReturn(supplier);
try (final KafkaStreams ignored = new KafkaStreams(getBuilderWithSource().build(), mockConfig, time)) {
// no-op
}
// It's called once in above when mock
verify(mockConfig, times(2)).getKafkaClientSupplier();
}
@Test
public void shouldUseProvidedClientSupplier() throws Exception {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
prepareTerminableThread(streamThreadOne);
prepareTerminableThread(streamThreadTwo);
final StreamsConfig config = new StreamsConfig(props);
final StreamsConfig mockConfig = spy(config);
try (final KafkaStreams ignored = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), mockConfig, supplier)) {
// no-op
}
// It's called once in above when mock
verify(mockConfig, times(0)).getKafkaClientSupplier();
}
@Test
public void shouldNotTriggerRecordingOfRocksDBMetricsIfRecordingLevelIsInfo() {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
try (final MockedStatic<Executors> executorsMockedStatic = mockStatic(Executors.class)) {
final ScheduledExecutorService cleanupSchedule = mock(ScheduledExecutorService.class);
executorsMockedStatic.when(() ->
Executors.newSingleThreadScheduledExecutor(any(ThreadFactory.class))).thenReturn(cleanupSchedule);
final StreamsBuilder builder = new StreamsBuilder();
builder.table("topic", Materialized.as("store"));
props.setProperty(StreamsConfig.METRICS_RECORDING_LEVEL_CONFIG, RecordingLevel.INFO.name());
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.start();
}
executorsMockedStatic.verify(() -> Executors.newSingleThreadScheduledExecutor(any(ThreadFactory.class)));
}
}
@Test
public void shouldCleanupOldStateDirs() {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
try (final MockedStatic<Executors> executorsMockedStatic = mockStatic(Executors.class)) {
final ScheduledExecutorService cleanupSchedule = mock(ScheduledExecutorService.class);
executorsMockedStatic.when(() -> Executors.newSingleThreadScheduledExecutor(
any(ThreadFactory.class)
)).thenReturn(cleanupSchedule);
try (MockedConstruction<StateDirectory> ignored = mockConstruction(StateDirectory.class,
(mock, context) -> when(mock.initializeProcessId()).thenReturn(UUID.randomUUID()))) {
props.setProperty(StreamsConfig.STATE_CLEANUP_DELAY_MS_CONFIG, "1");
final StreamsBuilder builder = new StreamsBuilder();
builder.table("topic", Materialized.as("store"));
try (final KafkaStreams streams = new KafkaStreams(builder.build(), props, supplier, time)) {
streams.start();
}
}
verify(cleanupSchedule).scheduleAtFixedRate(any(Runnable.class), eq(1L), eq(1L), eq(TimeUnit.MILLISECONDS));
verify(cleanupSchedule).shutdownNow();
}
}
@Test
public void statelessTopologyShouldNotCreateStateDirectory(final TestInfo testInfo) {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
final String safeTestName = safeUniqueTestName(testInfo);
final String inputTopic = safeTestName + "-input";
final String outputTopic = safeTestName + "-output";
final Topology topology = new Topology();
topology.addSource("source", new StringDeserializer(), new StringDeserializer(), inputTopic)
.addProcessor("process", () -> new Processor<String, String, String, String>() {
private ProcessorContext<String, String> context;
@Override
public void init(final ProcessorContext<String, String> context) {
this.context = context;
}
@Override
public void process(final Record<String, String> record) {
if (record.value().length() % 2 == 0) {
context.forward(record.withValue(record.key() + record.value()));
}
}
}, "source")
.addSink("sink", outputTopic, new StringSerializer(), new StringSerializer(), "process");
startStreamsAndCheckDirExists(topology, false);
}
@Test
public void inMemoryStatefulTopologyShouldNotCreateStateDirectory(final TestInfo testInfo) {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
final String safeTestName = safeUniqueTestName(testInfo);
final String inputTopic = safeTestName + "-input";
final String outputTopic = safeTestName + "-output";
final String globalTopicName = safeTestName + "-global";
final String storeName = safeTestName + "-counts";
final String globalStoreName = safeTestName + "-globalStore";
final Topology topology = getStatefulTopology(inputTopic, outputTopic, globalTopicName, storeName, globalStoreName, false);
startStreamsAndCheckDirExists(topology, false);
}
@Test
public void statefulTopologyShouldCreateStateDirectory(final TestInfo testInfo) {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
final String safeTestName = safeUniqueTestName(testInfo);
final String inputTopic = safeTestName + "-input";
final String outputTopic = safeTestName + "-output";
final String globalTopicName = safeTestName + "-global";
final String storeName = safeTestName + "-counts";
final String globalStoreName = safeTestName + "-globalStore";
final Topology topology = getStatefulTopology(inputTopic, outputTopic, globalTopicName, storeName, globalStoreName, true);
startStreamsAndCheckDirExists(topology, true);
}
@Test
public void shouldThrowTopologyExceptionOnEmptyTopology() {
prepareStreams();
try (final KafkaStreams ignored = new KafkaStreams(new StreamsBuilder().build(), props, supplier, time)) {
fail("Should have thrown TopologyException");
} catch (final TopologyException e) {
assertThat(
e.getMessage(),
equalTo("Invalid topology: Topology has no stream threads and no global threads, " +
"must subscribe to at least one source topic or global table."));
}
}
@Test
public void shouldNotCreateStreamThreadsForGlobalOnlyTopology() {
prepareStreams();
final StreamsBuilder builder = new StreamsBuilder();
builder.globalTable("anyTopic");
try (final KafkaStreams streams = new KafkaStreams(builder.build(), props, supplier, time)) {
assertThat(streams.threads.size(), equalTo(0));
}
}
@Test
public void shouldTransitToRunningWithGlobalOnlyTopology() throws Exception {
prepareStreams();
final StreamsBuilder builder = new StreamsBuilder();
builder.globalTable("anyTopic");
try (final KafkaStreams streams = new KafkaStreams(builder.build(), props, supplier, time)) {
assertThat(streams.threads.size(), equalTo(0));
assertEquals(KafkaStreams.State.CREATED, streams.state());
streams.start();
waitForCondition(
() -> streams.state() == KafkaStreams.State.RUNNING,
() -> "Streams never started, state is " + streams.state());
streams.close();
waitForCondition(
() -> streams.state() == KafkaStreams.State.NOT_RUNNING,
"Streams never stopped.");
}
}
@Test
public void shouldThrowOnClientInstanceIdsWithNegativeTimeout() {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
final IllegalArgumentException error = assertThrows(
IllegalArgumentException.class,
() -> streams.clientInstanceIds(Duration.ofMillis(-1L))
);
assertThat(
error.getMessage(),
equalTo("The timeout cannot be negative.")
);
}
}
@Test
public void shouldThrowOnClientInstanceIdsWhenNotStarted() {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
final IllegalStateException error = assertThrows(
IllegalStateException.class,
() -> streams.clientInstanceIds(Duration.ZERO)
);
assertThat(
error.getMessage(),
equalTo("KafkaStreams has not been started, you can retry after calling start().")
);
}
}
@Test
public void shouldThrowOnClientInstanceIdsWhenClosed() {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.close();
final IllegalStateException error = assertThrows(
IllegalStateException.class,
() -> streams.clientInstanceIds(Duration.ZERO)
);
assertThat(
error.getMessage(),
equalTo("KafkaStreams has been stopped (NOT_RUNNING).")
);
}
}
@Test
public void shouldThrowStreamsExceptionWhenAdminNotInitialized() {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.start();
final StreamsException error = assertThrows(
StreamsException.class,
() -> streams.clientInstanceIds(Duration.ZERO)
);
assertThat(
error.getMessage(),
equalTo("Could not retrieve admin client instance id.")
);
final Throwable cause = error.getCause();
assertThat(cause, instanceOf(UnsupportedOperationException.class));
assertThat(
cause.getMessage(),
equalTo("clientInstanceId not set")
);
}
}
@Test
public void shouldNotCrashButThrowLaterIfAdminTelemetryDisabled() {
prepareStreams();
adminClient.disableTelemetry();
// set threads to zero to simplify set setup
props.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 0);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.start();
final ClientInstanceIds clientInstanceIds = streams.clientInstanceIds(Duration.ZERO);
final IllegalStateException error = assertThrows(
IllegalStateException.class,
clientInstanceIds::adminInstanceId
);
assertThat(
error.getMessage(),
equalTo("Telemetry is not enabled on the admin client. Set config `enable.metrics.push` to `true`.")
);
}
}
@Test
public void shouldThrowTimeExceptionWhenAdminTimesOut() {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
adminClient.setClientInstanceId(Uuid.randomUuid());
adminClient.injectTimeoutException(1);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.start();
assertThrows(
TimeoutException.class,
() -> streams.clientInstanceIds(Duration.ZERO)
);
}
}
@Test
public void shouldReturnAdminInstanceID() {
prepareStreams();
final Uuid instanceId = Uuid.randomUuid();
adminClient.setClientInstanceId(instanceId);
// set threads to zero to simplify set setup
props.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 0);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.start();
assertThat(
streams.clientInstanceIds(Duration.ZERO).adminInstanceId(),
equalTo(instanceId)
);
}
}
@Test
public void shouldReturnProducerAndConsumerInstanceIds() {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
props.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 1);
final Uuid mainConsumerInstanceId = Uuid.randomUuid();
final Uuid producerInstanceId = Uuid.randomUuid();
final KafkaFutureImpl<Uuid> consumerFuture = new KafkaFutureImpl<>();
final KafkaFutureImpl<Uuid> producerFuture = new KafkaFutureImpl<>();
consumerFuture.complete(mainConsumerInstanceId);
producerFuture.complete(producerInstanceId);
final Uuid adminInstanceId = Uuid.randomUuid();
adminClient.setClientInstanceId(adminInstanceId);
final Map<String, KafkaFuture<Uuid>> expectedClientIds = Map.of("main-consumer", consumerFuture, "some-thread-producer", producerFuture);
when(streamThreadOne.clientInstanceIds(any())).thenReturn(expectedClientIds);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.start();
final ClientInstanceIds clientInstanceIds = streams.clientInstanceIds(Duration.ZERO);
assertThat(clientInstanceIds.consumerInstanceIds().size(), equalTo(1));
assertThat(clientInstanceIds.consumerInstanceIds().get("main-consumer"), equalTo(mainConsumerInstanceId));
assertThat(clientInstanceIds.producerInstanceIds().size(), equalTo(1));
assertThat(clientInstanceIds.producerInstanceIds().get("some-thread-producer"), equalTo(producerInstanceId));
assertThat(clientInstanceIds.adminInstanceId(), equalTo(adminInstanceId));
}
}
@Test
public void shouldThrowTimeoutExceptionWhenAnyClientFutureDoesNotComplete() {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
when(streamThreadOne.clientInstanceIds(any()))
.thenReturn(Collections.singletonMap("some-client", new KafkaFutureImpl<>()));
adminClient.setClientInstanceId(Uuid.randomUuid());
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
streams.start();
final TimeoutException timeoutException = assertThrows(
TimeoutException.class,
() -> streams.clientInstanceIds(Duration.ZERO)
);
assertThat(timeoutException.getMessage(), equalTo("Could not retrieve consumer/producer instance id for some-client."));
assertThat(timeoutException.getCause(), instanceOf(java.util.concurrent.TimeoutException.class));
}
}
@Test
public void shouldThrowTimeoutExceptionWhenGlobalConsumerFutureDoesNotComplete() {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
adminClient.setClientInstanceId(Uuid.randomUuid());
final StreamsBuilder builder = getBuilderWithSource();
builder.globalTable("anyTopic");
try (final KafkaStreams streams = new KafkaStreams(builder.build(), props, supplier, time)) {
streams.start();
when(globalStreamThreadMockedConstruction.constructed().get(0).globalConsumerInstanceId(any()))
.thenReturn(new KafkaFutureImpl<>());
final TimeoutException timeoutException = assertThrows(
TimeoutException.class,
() -> streams.clientInstanceIds(Duration.ZERO)
);
assertThat(timeoutException.getMessage(), equalTo("Could not retrieve global consumer client instance id."));
assertThat(timeoutException.getCause(), instanceOf(java.util.concurrent.TimeoutException.class));
}
}
@Test
public void shouldCountDownTimeoutAcrossClient() {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
adminClient.setClientInstanceId(Uuid.randomUuid());
adminClient.advanceTimeOnClientInstanceId(time, Duration.ofMillis(10L).toMillis());
final Time mockTime = time;
final AtomicLong expectedTimeout = new AtomicLong(50L);
final AtomicBoolean didAssertThreadOne = new AtomicBoolean(false);
final AtomicBoolean didAssertThreadTwo = new AtomicBoolean(false);
final AtomicBoolean didAssertGlobalThread = new AtomicBoolean(false);
when(streamThreadOne.clientInstanceIds(any()))
.thenReturn(Collections.singletonMap("any-client-1", new KafkaFutureImpl<>() {
@Override
public Uuid get(final long timeout, final TimeUnit timeUnit) {
didAssertThreadOne.set(true);
assertThat(timeout, equalTo(expectedTimeout.getAndAdd(-10L)));
mockTime.sleep(10L);
return null;
}
}));
when(streamThreadTwo.clientInstanceIds(any()))
.thenReturn(Collections.singletonMap("any-client-2", new KafkaFutureImpl<>() {
@Override
public Uuid get(final long timeout, final TimeUnit timeUnit) {
didAssertThreadTwo.set(true);
assertThat(timeout, equalTo(expectedTimeout.getAndAdd(-5L)));
mockTime.sleep(5L);
return null;
}
}));
final StreamsBuilder builder = getBuilderWithSource();
builder.globalTable("anyTopic");
try (final KafkaStreams streams = new KafkaStreams(builder.build(), props, supplier, time)) {
streams.start();
when(globalStreamThreadMockedConstruction.constructed().get(0).globalConsumerInstanceId(any()))
.thenReturn(new KafkaFutureImpl<>() {
@Override
public Uuid get(final long timeout, final TimeUnit timeUnit) {
didAssertGlobalThread.set(true);
assertThat(timeout, equalTo(expectedTimeout.getAndAdd(-8L)));
mockTime.sleep(8L);
return null;
}
});
streams.clientInstanceIds(Duration.ofMillis(60L));
}
assertThat(didAssertThreadOne.get(), equalTo(true));
assertThat(didAssertThreadTwo.get(), equalTo(true));
assertThat(didAssertGlobalThread.get(), equalTo(true));
}
@Test
public void shouldThrowIfPatternSubscriptionUsedWithStreamsProtocol() {
final Properties props = new Properties();
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "test-app");
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:2018");
props.put(StreamsConfig.GROUP_PROTOCOL_CONFIG, "streams");
// Simulate pattern subscription
final Topology topology = new Topology();
topology.addSource("source", java.util.regex.Pattern.compile("topic-.*"));
final UnsupportedOperationException ex = assertThrows(
UnsupportedOperationException.class,
() -> new KafkaStreams(topology, props)
);
assert ex.getMessage().contains("Pattern subscriptions are not supported with the STREAMS protocol");
}
@Test
public void shouldLogWarningIfNonDefaultClientSupplierUsedWithStreamsProtocol() {
final Properties props = new Properties();
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "test-app");
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:2018");
props.put(StreamsConfig.GROUP_PROTOCOL_CONFIG, "streams");
final Topology topology = new Topology();
topology.addSource("source", "topic");
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KafkaStreams.class)) {
appender.setClassLogger(KafkaStreams.class, Level.WARN);
try (@SuppressWarnings("unused") final KafkaStreams ignored = new KafkaStreams(topology, new StreamsConfig(props), new MockClientSupplier())) {
assertTrue(appender.getMessages().stream()
.anyMatch(msg -> msg.contains("A non-default kafka client supplier was supplied. " +
"Note that supplying a custom main consumer is not supported with the STREAMS protocol.")));
}
}
}
private Topology getStatefulTopology(final String inputTopic,
final String outputTopic,
final String globalTopicName,
final String storeName,
final String globalStoreName,
final boolean isPersistentStore) {
final StoreBuilder<KeyValueStore<String, Long>> storeBuilder = Stores.keyValueStoreBuilder(
isPersistentStore ?
Stores.persistentKeyValueStore(storeName)
: Stores.inMemoryKeyValueStore(storeName),
Serdes.String(),
Serdes.Long());
final Topology topology = new Topology();
topology.addSource("source", new StringDeserializer(), new StringDeserializer(), inputTopic)
.addProcessor("process", () -> new Processor<String, String, String, String>() {
private ProcessorContext<String, String> context;
@Override
public void init(final ProcessorContext<String, String> context) {
this.context = context;
}
@Override
public void process(final Record<String, String> record) {
final KeyValueStore<String, Long> kvStore = context.getStateStore(storeName);
kvStore.put(record.key(), 5L);
context.forward(record.withValue("5"));
context.commit();
}
}, "source")
.addStateStore(storeBuilder, "process")
.addSink("sink", outputTopic, new StringSerializer(), new StringSerializer(), "process");
final StoreBuilder<KeyValueStore<String, String>> globalStoreBuilder = Stores.keyValueStoreBuilder(
isPersistentStore ? Stores.persistentKeyValueStore(globalStoreName) : Stores.inMemoryKeyValueStore(globalStoreName),
Serdes.String(), Serdes.String()).withLoggingDisabled();
topology.addGlobalStore(
globalStoreBuilder,
"global",
new StringDeserializer(),
new StringDeserializer(),
globalTopicName,
globalTopicName + "-processor",
new MockProcessorSupplier<>());
return topology;
}
private StreamsBuilder getBuilderWithSource() {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("source-topic");
return builder;
}
private void startStreamsAndCheckDirExists(final Topology topology, final boolean shouldFilesExist) {
try (MockedConstruction<StateDirectory> stateDirectoryMockedConstruction = mockConstruction(StateDirectory.class,
(mock, context) -> {
when(mock.initializeProcessId()).thenReturn(UUID.randomUUID());
assertEquals(4, context.arguments().size());
assertEquals(shouldFilesExist, context.arguments().get(2));
})) {
try (final KafkaStreams ignored = new KafkaStreams(topology, props, supplier, time)) {
// verify that stateDirectory constructor was called
assertFalse(stateDirectoryMockedConstruction.constructed().isEmpty());
}
}
}
}
| KafkaStreamsWithTerminableThread |
java | apache__logging-log4j2 | log4j-core-test/src/test/java/org/apache/logging/log4j/core/filter/RegexFilterTest.java | {
"start": 1670,
"end": 5273
} | class ____ {
@BeforeAll
static void before() {
StatusLogger.getLogger().setLevel(Level.OFF);
}
@Test
void testRegexFilterDoesNotThrowWithAllTheParametersExceptRegexEqualNull() {
assertDoesNotThrow(() -> {
RegexFilter.createFilter(".* test .*", null, null, null, null);
});
}
@Test
void testThresholds() throws Exception {
RegexFilter filter = RegexFilter.createFilter(".* test .*", null, false, null, null);
filter.start();
assertTrue(filter.isStarted());
assertSame(
Filter.Result.NEUTRAL, filter.filter(null, Level.DEBUG, null, (Object) "This is a test message", null));
assertSame(Filter.Result.DENY, filter.filter(null, Level.ERROR, null, (Object) "This is not a test", null));
LogEvent event = Log4jLogEvent.newBuilder() //
.setLevel(Level.DEBUG) //
.setMessage(new SimpleMessage("Another test message")) //
.build();
assertSame(Filter.Result.NEUTRAL, filter.filter(event));
event = Log4jLogEvent.newBuilder() //
.setLevel(Level.ERROR) //
.setMessage(new SimpleMessage("test")) //
.build();
assertSame(Filter.Result.DENY, filter.filter(event));
filter = RegexFilter.createFilter(null, null, false, null, null);
assertNull(filter);
}
@Test
void testDotAllPattern() throws Exception {
final String singleLine = "test single line matches";
final String multiLine = "test multi line matches\nsome more lines";
final RegexFilter filter = RegexFilter.createFilter(
".*line.*", new String[] {"DOTALL", "COMMENTS"}, false, Filter.Result.DENY, Filter.Result.ACCEPT);
final Result singleLineResult = filter.filter(null, null, null, (Object) singleLine, null);
final Result multiLineResult = filter.filter(null, null, null, (Object) multiLine, null);
assertThat(singleLineResult, equalTo(Result.DENY));
assertThat(multiLineResult, equalTo(Result.DENY));
}
@Test
void testNoMsg() throws Exception {
final RegexFilter filter = RegexFilter.createFilter(".* test .*", null, false, null, null);
filter.start();
assertTrue(filter.isStarted());
assertSame(Filter.Result.DENY, filter.filter(null, Level.DEBUG, null, (Object) null, null));
assertSame(Filter.Result.DENY, filter.filter(null, Level.DEBUG, null, (Message) null, null));
assertSame(Filter.Result.DENY, filter.filter(null, Level.DEBUG, null, null, (Object[]) null));
}
@Test
void testParameterizedMsg() throws Exception {
final String msg = "params {} {}";
final Object[] params = {"foo", "bar"};
// match against raw message
final RegexFilter rawFilter = RegexFilter.createFilter(
"params \\{\\} \\{\\}",
null,
true, // useRawMsg
Result.ACCEPT,
Result.DENY);
final Result rawResult = rawFilter.filter(null, null, null, msg, params);
assertThat(rawResult, equalTo(Result.ACCEPT));
// match against formatted message
final RegexFilter fmtFilter = RegexFilter.createFilter(
"params foo bar",
null,
false, // useRawMsg
Result.ACCEPT,
Result.DENY);
final Result fmtResult = fmtFilter.filter(null, null, null, msg, params);
assertThat(fmtResult, equalTo(Result.ACCEPT));
}
}
| RegexFilterTest |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/TaskPool.java | {
"start": 3537,
"end": 3914
} | class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(TaskPool.class);
/**
* Interval in milliseconds to await completion.
*/
private static final int SLEEP_INTERVAL_AWAITING_COMPLETION = 10;
private TaskPool() {
}
/**
* Callback invoked to process an item.
* @param <I> item type being processed
* @param <E> exception | TaskPool |
java | lettuce-io__lettuce-core | src/test/java/io/lettuce/core/AsyncConnectionIntegrationTests.java | {
"start": 1612,
"end": 5281
} | class ____ extends TestSupport {
private final RedisAsyncCommands<String, String> async;
@Inject
AsyncConnectionIntegrationTests(@New final StatefulRedisConnection<String, String> connection) {
this.async = connection.async();
connection.sync().flushall();
}
@Test
void multi() {
assertThat(TestFutures.getOrTimeout(async.multi())).isEqualTo("OK");
Future<String> set = async.set(key, value);
Future<Long> rpush = async.rpush("list", "1", "2");
Future<List<String>> lrange = async.lrange("list", 0, -1);
assertThat(!set.isDone() && !rpush.isDone() && !rpush.isDone()).isTrue();
assertThat(TestFutures.getOrTimeout(async.exec())).contains("OK", 2L, list("1", "2"));
assertThat(TestFutures.getOrTimeout(set)).isEqualTo("OK");
assertThat(TestFutures.getOrTimeout(rpush)).isEqualTo(2L);
assertThat(TestFutures.getOrTimeout(lrange)).isEqualTo(list("1", "2"));
}
@Test
void watch() {
assertThat(TestFutures.getOrTimeout(async.watch(key))).isEqualTo("OK");
async.set(key, value + "X");
async.multi();
Future<String> set = async.set(key, value);
Future<Long> append = async.append(key, "foo");
assertThat(TestFutures.getOrTimeout(async.exec())).isEmpty();
assertThat(TestFutures.getOrTimeout(set)).isNull();
assertThat(TestFutures.getOrTimeout(append)).isNull();
}
@Test
void futureListener() {
// using 'key' causes issues for some strange reason so using a fresh key
final String listKey = "list:" + key;
final List<RedisFuture<?>> futures = new ArrayList<>();
for (int i = 0; i < 1000; i++) {
futures.add(async.lpush(listKey, "" + i));
}
TestFutures.awaitOrTimeout(futures);
Long len = TestFutures.getOrTimeout(async.llen(listKey));
assertThat(len.intValue()).isEqualTo(1000);
RedisFuture<List<String>> sort = async.sort(listKey);
assertThat(sort.isCancelled()).isFalse();
final List<Object> run = new ArrayList<>();
sort.thenRun(() -> run.add(new Object()));
TestFutures.awaitOrTimeout(sort);
Delay.delay(Duration.ofMillis(100));
assertThat(run).hasSize(1);
}
@Test
void futureListenerCompleted() {
final RedisFuture<String> set = async.set(key, value);
TestFutures.awaitOrTimeout(set);
final List<Object> run = new ArrayList<>();
set.thenRun(() -> run.add(new Object()));
assertThat(run).hasSize(1);
}
@Test
void discardCompletesFutures() {
async.multi();
Future<String> set = async.set(key, value);
async.discard();
assertThat(TestFutures.getOrTimeout(set)).isNull();
}
@Test
void awaitAll() {
Future<String> get1 = async.get(key);
Future<String> set = async.set(key, value);
Future<String> get2 = async.get(key);
Future<Long> append = async.append(key, value);
assertThat(Futures.awaitAll(1, TimeUnit.SECONDS, get1, set, get2, append)).isTrue();
assertThat(TestFutures.getOrTimeout(get1)).isNull();
assertThat(TestFutures.getOrTimeout(set)).isEqualTo("OK");
assertThat(TestFutures.getOrTimeout(get2)).isEqualTo(value);
assertThat(TestFutures.getOrTimeout(append).longValue()).isEqualTo(value.length() * 2);
}
@Test
void awaitAllTimeout() {
Future<KeyValue<String, String>> blpop = async.blpop(1, key);
assertThat(Futures.await(1, TimeUnit.NANOSECONDS, blpop)).isFalse();
}
}
| AsyncConnectionIntegrationTests |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/types/extraction/ExtractionUtils.java | {
"start": 47193,
"end": 49582
} | class ____ convert, may be <b>null</b>
* @return the corresponding primitive type if {@code cls} is a wrapper class, <b>null</b>
* otherwise
* @see #primitiveToWrapper(Class)
* @since 2.4
*/
public static Class<?> wrapperToPrimitive(Class<?> cls) {
return wrapperPrimitiveMap.get(cls);
}
/** Helper map for {@link #classForName(String, boolean, ClassLoader)}. */
private static final Map<String, Class<?>> primitiveNameMap = new HashMap<>();
static {
primitiveNameMap.put("int", Integer.TYPE);
primitiveNameMap.put("boolean", Boolean.TYPE);
primitiveNameMap.put("float", Float.TYPE);
primitiveNameMap.put("long", Long.TYPE);
primitiveNameMap.put("short", Short.TYPE);
primitiveNameMap.put("byte", Byte.TYPE);
primitiveNameMap.put("double", Double.TYPE);
primitiveNameMap.put("char", Character.TYPE);
primitiveNameMap.put("void", Void.TYPE);
}
/**
* Similar to {@link Class#forName(String, boolean, ClassLoader)} but resolves primitive names
* as well.
*/
public static Class<?> classForName(String name, boolean initialize, ClassLoader classLoader)
throws ClassNotFoundException {
if (primitiveNameMap.containsKey(name)) {
return primitiveNameMap.get(name);
}
return Class.forName(name, initialize, classLoader);
}
/** Utility to know if the type contains a type variable that needs to be resolved. */
private static boolean containsTypeVariable(Type type) {
if (type instanceof TypeVariable) {
return true;
} else if (type instanceof ParameterizedType) {
return Arrays.stream(((ParameterizedType) type).getActualTypeArguments())
.anyMatch(ExtractionUtils::containsTypeVariable);
} else if (type instanceof GenericArrayType) {
return containsTypeVariable(((GenericArrayType) type).getGenericComponentType());
}
// WildcardType does not contain a type variable, and we don't consider it resolvable.
return false;
}
/**
* {@link ParameterizedType} we use for resolving types, so that if you resolve the type
* CompletableFuture<T>, we can create resolve the parameter and return
* CompletableFuture<Long>.
*/
private static | to |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/nestedbeans/mixed/_target/WaterPlantDto.java | {
"start": 249,
"end": 430
} | class ____ {
private String kind;
public String getKind() {
return kind;
}
public void setKind(String kind) {
this.kind = kind;
}
}
| WaterPlantDto |
java | hibernate__hibernate-orm | tooling/metamodel-generator/src/test/java/org/hibernate/processor/test/arraytype/ArrayTestWithTypeUseTest.java | {
"start": 581,
"end": 1837
} | class ____ {
@Test
@TestForIssue(jiraKey = "HHH-12011")
@WithClasses(TestEntity.class)
void testArrayWithBeanValidation() {
assertMetamodelClassGeneratedFor( TestEntity.class );
// Primitive Arrays
assertAttributeTypeInMetaModelFor( TestEntity.class, "primitiveAnnotatedArray", byte[].class, "Wrong type for field." );
assertAttributeTypeInMetaModelFor( TestEntity.class, "primitiveArray", byte[].class, "Wrong type for field." );
// Primitive non-array
assertAttributeTypeInMetaModelFor( TestEntity.class, "primitiveAnnotated", Byte.class, "Wrong type for field." );
assertAttributeTypeInMetaModelFor( TestEntity.class, "primitive", Byte.class, "Wrong type for field." );
// Non-primitive Arrays
assertAttributeTypeInMetaModelFor( TestEntity.class, "nonPrimitiveAnnotatedArray", Byte[].class, "Wrong type for field." );
assertAttributeTypeInMetaModelFor( TestEntity.class, "nonPrimitiveArray", Byte[].class, "Wrong type for field." );
// Non-primitive non-array
assertAttributeTypeInMetaModelFor( TestEntity.class, "nonPrimitiveAnnotated", Byte.class, "Wrong type for field." );
assertAttributeTypeInMetaModelFor( TestEntity.class, "nonPrimitive", Byte.class, "Wrong type for field." );
// Custom | ArrayTestWithTypeUseTest |
java | quarkusio__quarkus | test-framework/common/src/main/java/io/quarkus/test/common/NativeImageStartedNotifier.java | {
"start": 182,
"end": 352
} | class ____ been deprecated and users should use {@link IntegrationTestStartedNotifier} instead when working
* with {@code @QuarkusIntegrationTest}
*/
@Deprecated
public | has |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/writer/AbstractControlMsgWriter.java | {
"start": 479,
"end": 2123
} | class ____ {
/**
* This should be the same size as the buffer in the C++ native process.
*/
public static final int FLUSH_SPACES_LENGTH = 2048;
protected final LengthEncodedWriter lengthEncodedWriter;
private final int numberOfFields;
/**
* Construct the control message writer with a LengthEncodedWriter
*
* @param lengthEncodedWriter The writer
* @param numberOfFields The number of fields the process expects in each record
*/
public AbstractControlMsgWriter(LengthEncodedWriter lengthEncodedWriter, int numberOfFields) {
this.lengthEncodedWriter = Objects.requireNonNull(lengthEncodedWriter);
this.numberOfFields = numberOfFields;
}
// todo(hendrikm): workaround, see
// https://github.com/elastic/machine-learning-cpp/issues/123
protected void fillCommandBuffer() throws IOException {
char[] spaces = new char[FLUSH_SPACES_LENGTH];
Arrays.fill(spaces, ' ');
writeMessage(new String(spaces));
}
/**
* Transform the supplied control message to length encoded values and
* write to the OutputStream.
*
* @param message The control message to write.
*/
protected void writeMessage(String message) throws IOException {
lengthEncodedWriter.writeNumFields(numberOfFields);
// Write blank values for all fields other than the control field
for (int i = 1; i < numberOfFields; ++i) {
lengthEncodedWriter.writeField("");
}
// The control field comes last
lengthEncodedWriter.writeField(message);
}
}
| AbstractControlMsgWriter |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/IdentifierNameTest.java | {
"start": 12247,
"end": 12638
} | class ____ {
// BUG: Diagnostic contains: getRpcPolicy
int getRPCPolicy;
// BUG: Diagnostic contains: getRpc
int getRPC;
}
""")
.doTest();
}
@Test
public void initialismsInVariableNames_magicNamesExempt() {
helper
.addSourceLines(
"Test.java",
"""
| Test |
java | apache__camel | components/camel-guava-eventbus/src/test/java/org/apache/camel/component/guava/eventbus/GuavaEventBusProducerTest.java | {
"start": 1189,
"end": 1990
} | class ____ extends CamelTestSupport {
@BindToRegistry("eventBus")
EventBus eventBus = new EventBus();
Object receivedEvent;
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").to("guava-eventbus:eventBus");
}
};
}
@Test
public void shouldReceiveMessageFromCamel() {
// Given
String message = "message";
eventBus.register(this);
// When
sendBody("direct:start", message);
// Then
assertEquals(message, receivedEvent);
}
@Subscribe
public void receiveEvent(Object event) {
this.receivedEvent = event;
}
}
| GuavaEventBusProducerTest |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/DisabledPlacementProcessor.java | {
"start": 1528,
"end": 3347
} | class ____ extends AbstractPlacementProcessor {
private static final Logger LOG =
LoggerFactory.getLogger(DisabledPlacementProcessor.class);
@Override
public void registerApplicationMaster(
ApplicationAttemptId applicationAttemptId,
RegisterApplicationMasterRequest request,
RegisterApplicationMasterResponse response)
throws IOException, YarnException {
if (request.getPlacementConstraints() != null && !request
.getPlacementConstraints().isEmpty()) {
String message = "Found non empty placement constraints map in "
+ "RegisterApplicationMasterRequest for application="
+ applicationAttemptId.toString() + ", but the configured "
+ YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER
+ " cannot handle placement constraints. Rejecting this "
+ "registerApplicationMaster operation";
LOG.warn(message);
throw new YarnException(message);
}
nextAMSProcessor.registerApplicationMaster(applicationAttemptId, request,
response);
}
@Override
public void allocate(ApplicationAttemptId appAttemptId,
AllocateRequest request, AllocateResponse response) throws YarnException {
if (request.getSchedulingRequests() != null && !request
.getSchedulingRequests().isEmpty()) {
String message = "Found non empty SchedulingRequest in "
+ "AllocateRequest for application="
+ appAttemptId.toString() + ", but the configured "
+ YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER
+ " cannot handle placement constraints. Rejecting this "
+ "allocate operation";
LOG.warn(message);
throw new YarnException(message);
}
nextAMSProcessor.allocate(appAttemptId, request, response);
}
}
| DisabledPlacementProcessor |
java | spring-projects__spring-security | web/src/main/java/org/springframework/security/web/firewall/StrictHttpFirewall.java | {
"start": 3407,
"end": 25584
} | class ____ implements HttpFirewall {
/**
* Used to specify to {@link #setAllowedHttpMethods(Collection)} that any HTTP method
* should be allowed.
*/
private static final Set<String> ALLOW_ANY_HTTP_METHOD = Collections.emptySet();
private static final String ENCODED_PERCENT = "%25";
private static final String PERCENT = "%";
private static final List<String> FORBIDDEN_ENCODED_PERIOD = Collections
.unmodifiableList(Arrays.asList("%2e", "%2E"));
private static final List<String> FORBIDDEN_SEMICOLON = Collections
.unmodifiableList(Arrays.asList(";", "%3b", "%3B"));
private static final List<String> FORBIDDEN_FORWARDSLASH = Collections
.unmodifiableList(Arrays.asList("%2f", "%2F"));
private static final List<String> FORBIDDEN_DOUBLE_FORWARDSLASH = Collections
.unmodifiableList(Arrays.asList("//", "%2f%2f", "%2f%2F", "%2F%2f", "%2F%2F"));
private static final List<String> FORBIDDEN_BACKSLASH = Collections
.unmodifiableList(Arrays.asList("\\", "%5c", "%5C"));
private static final List<String> FORBIDDEN_NULL = Collections.unmodifiableList(Arrays.asList("\0", "%00"));
private static final List<String> FORBIDDEN_LF = Collections.unmodifiableList(Arrays.asList("\n", "%0a", "%0A"));
private static final List<String> FORBIDDEN_CR = Collections.unmodifiableList(Arrays.asList("\r", "%0d", "%0D"));
private static final List<String> FORBIDDEN_LINE_SEPARATOR = Collections.unmodifiableList(Arrays.asList("\u2028"));
private static final List<String> FORBIDDEN_PARAGRAPH_SEPARATOR = Collections
.unmodifiableList(Arrays.asList("\u2029"));
private Set<String> encodedUrlBlocklist = new HashSet<>();
private Set<String> decodedUrlBlocklist = new HashSet<>();
private Set<String> allowedHttpMethods = createDefaultAllowedHttpMethods();
private Predicate<String> allowedHostnames = (hostname) -> true;
private static final Pattern ASSIGNED_AND_NOT_ISO_CONTROL_PATTERN = Pattern
.compile("[\\p{IsAssigned}&&[^\\p{IsControl}]]*");
private static final Predicate<String> ASSIGNED_AND_NOT_ISO_CONTROL_PREDICATE = (
s) -> ASSIGNED_AND_NOT_ISO_CONTROL_PATTERN.matcher(s).matches();
private static final Pattern HEADER_VALUE_PATTERN = Pattern.compile("[\\p{IsAssigned}&&[[^\\p{IsControl}]||\\t]]*");
private static final Predicate<String> HEADER_VALUE_PREDICATE = (s) -> HEADER_VALUE_PATTERN.matcher(s).matches();
private Predicate<String> allowedHeaderNames = ALLOWED_HEADER_NAMES;
public static final Predicate<String> ALLOWED_HEADER_NAMES = ASSIGNED_AND_NOT_ISO_CONTROL_PREDICATE;
private Predicate<String> allowedHeaderValues = ALLOWED_HEADER_VALUES;
public static final Predicate<String> ALLOWED_HEADER_VALUES = HEADER_VALUE_PREDICATE;
private Predicate<String> allowedParameterNames = ALLOWED_PARAMETER_NAMES;
public static final Predicate<String> ALLOWED_PARAMETER_NAMES = ASSIGNED_AND_NOT_ISO_CONTROL_PREDICATE;
private Predicate<String> allowedParameterValues = ALLOWED_PARAMETER_VALUES;
public static final Predicate<String> ALLOWED_PARAMETER_VALUES = (value) -> true;
public StrictHttpFirewall() {
urlBlocklistsAddAll(FORBIDDEN_SEMICOLON);
urlBlocklistsAddAll(FORBIDDEN_FORWARDSLASH);
urlBlocklistsAddAll(FORBIDDEN_DOUBLE_FORWARDSLASH);
urlBlocklistsAddAll(FORBIDDEN_BACKSLASH);
urlBlocklistsAddAll(FORBIDDEN_NULL);
urlBlocklistsAddAll(FORBIDDEN_LF);
urlBlocklistsAddAll(FORBIDDEN_CR);
this.encodedUrlBlocklist.add(ENCODED_PERCENT);
this.encodedUrlBlocklist.addAll(FORBIDDEN_ENCODED_PERIOD);
this.decodedUrlBlocklist.add(PERCENT);
this.decodedUrlBlocklist.addAll(FORBIDDEN_LINE_SEPARATOR);
this.decodedUrlBlocklist.addAll(FORBIDDEN_PARAGRAPH_SEPARATOR);
}
/**
* Sets if any HTTP method is allowed. If this set to true, then no validation on the
* HTTP method will be performed. This can open the application up to
* <a href="https://www.owasp.org/index.php/Test_HTTP_Methods_(OTG-CONFIG-006)"> HTTP
* Verb tampering and XST attacks</a>
* @param unsafeAllowAnyHttpMethod if true, disables HTTP method validation, else
* resets back to the defaults. Default is false.
* @since 5.1
* @see #setAllowedHttpMethods(Collection)
*/
public void setUnsafeAllowAnyHttpMethod(boolean unsafeAllowAnyHttpMethod) {
this.allowedHttpMethods = unsafeAllowAnyHttpMethod ? ALLOW_ANY_HTTP_METHOD : createDefaultAllowedHttpMethods();
}
/**
* <p>
* Determines which HTTP methods should be allowed. The default is to allow "DELETE",
* "GET", "HEAD", "OPTIONS", "PATCH", "POST", and "PUT".
* </p>
* @param allowedHttpMethods the case-sensitive collection of HTTP methods that are
* allowed.
* @since 5.1
* @see #setUnsafeAllowAnyHttpMethod(boolean)
*/
public void setAllowedHttpMethods(Collection<String> allowedHttpMethods) {
Assert.notNull(allowedHttpMethods, "allowedHttpMethods cannot be null");
this.allowedHttpMethods = (allowedHttpMethods != ALLOW_ANY_HTTP_METHOD) ? new HashSet<>(allowedHttpMethods)
: ALLOW_ANY_HTTP_METHOD;
}
/**
* <p>
* Determines if semicolon is allowed in the URL (i.e. matrix variables). The default
* is to disable this behavior because it is a common way of attempting to perform
* <a href="https://www.owasp.org/index.php/Reflected_File_Download">Reflected File
* Download Attacks</a>. It is also the source of many exploits which bypass URL based
* security.
* </p>
* <p>
* For example, the following CVEs are a subset of the issues related to ambiguities
* in the Servlet Specification on how to treat semicolons that led to CVEs:
* </p>
* <ul>
* <li><a href="https://pivotal.io/security/cve-2016-5007">cve-2016-5007</a></li>
* <li><a href="https://pivotal.io/security/cve-2016-9879">cve-2016-9879</a></li>
* <li><a href="https://pivotal.io/security/cve-2018-1199">cve-2018-1199</a></li>
* </ul>
*
* <p>
* If you are wanting to allow semicolons, please reconsider as it is a very common
* source of security bypasses. A few common reasons users want semicolons and
* alternatives are listed below:
* </p>
* <ul>
* <li>Including the JSESSIONID in the path - You should not include session id (or
* any sensitive information) in a URL as it can lead to leaking. Instead use Cookies.
* </li>
* <li>Matrix Variables - Users wanting to leverage Matrix Variables should consider
* using HTTP parameters instead.</li>
* </ul>
* @param allowSemicolon should semicolons be allowed in the URL. Default is false
*/
public void setAllowSemicolon(boolean allowSemicolon) {
if (allowSemicolon) {
urlBlocklistsRemoveAll(FORBIDDEN_SEMICOLON);
}
else {
urlBlocklistsAddAll(FORBIDDEN_SEMICOLON);
}
}
/**
* <p>
* Determines if a slash "/" that is URL encoded "%2F" should be allowed in the path
* or not. The default is to not allow this behavior because it is a common way to
* bypass URL based security.
* </p>
* <p>
* For example, due to ambiguities in the servlet specification, the value is not
* parsed consistently which results in different values in {@code HttpServletRequest}
* path related values which allow bypassing certain security constraints.
* </p>
* @param allowUrlEncodedSlash should a slash "/" that is URL encoded "%2F" be allowed
* in the path or not. Default is false.
*/
public void setAllowUrlEncodedSlash(boolean allowUrlEncodedSlash) {
if (allowUrlEncodedSlash) {
urlBlocklistsRemoveAll(FORBIDDEN_FORWARDSLASH);
}
else {
urlBlocklistsAddAll(FORBIDDEN_FORWARDSLASH);
}
}
/**
* <p>
* Determines if double slash "//" that is URL encoded "%2F%2F" should be allowed in
* the path or not. The default is to not allow.
* </p>
* @param allowUrlEncodedDoubleSlash should a slash "//" that is URL encoded "%2F%2F"
* be allowed in the path or not. Default is false.
*/
public void setAllowUrlEncodedDoubleSlash(boolean allowUrlEncodedDoubleSlash) {
if (allowUrlEncodedDoubleSlash) {
urlBlocklistsRemoveAll(FORBIDDEN_DOUBLE_FORWARDSLASH);
}
else {
urlBlocklistsAddAll(FORBIDDEN_DOUBLE_FORWARDSLASH);
}
}
/**
* <p>
* Determines if a period "." that is URL encoded "%2E" should be allowed in the path
* or not. The default is to not allow this behavior because it is a frequent source
* of security exploits.
* </p>
* <p>
* For example, due to ambiguities in the servlet specification a URL encoded period
* might lead to bypassing security constraints through a directory traversal attack.
* This is because the path is not parsed consistently which results in different
* values in {@code HttpServletRequest} path related values which allow bypassing
* certain security constraints.
* </p>
* @param allowUrlEncodedPeriod should a period "." that is URL encoded "%2E" be
* allowed in the path or not. Default is false.
*/
public void setAllowUrlEncodedPeriod(boolean allowUrlEncodedPeriod) {
if (allowUrlEncodedPeriod) {
this.encodedUrlBlocklist.removeAll(FORBIDDEN_ENCODED_PERIOD);
}
else {
this.encodedUrlBlocklist.addAll(FORBIDDEN_ENCODED_PERIOD);
}
}
/**
* <p>
* Determines if a backslash "\" or a URL encoded backslash "%5C" should be allowed in
* the path or not. The default is not to allow this behavior because it is a frequent
* source of security exploits.
* </p>
* <p>
* For example, due to ambiguities in the servlet specification a URL encoded period
* might lead to bypassing security constraints through a directory traversal attack.
* This is because the path is not parsed consistently which results in different
* values in {@code HttpServletRequest} path related values which allow bypassing
* certain security constraints.
* </p>
* @param allowBackSlash a backslash "\" or a URL encoded backslash "%5C" be allowed
* in the path or not. Default is false
*/
public void setAllowBackSlash(boolean allowBackSlash) {
if (allowBackSlash) {
urlBlocklistsRemoveAll(FORBIDDEN_BACKSLASH);
}
else {
urlBlocklistsAddAll(FORBIDDEN_BACKSLASH);
}
}
/**
* <p>
* Determines if a null "\0" or a URL encoded nul "%00" should be allowed in the path
* or not. The default is not to allow this behavior because it is a frequent source
* of security exploits.
* </p>
* @param allowNull a null "\0" or a URL encoded null "%00" be allowed in the path or
* not. Default is false
* @since 5.4
*/
public void setAllowNull(boolean allowNull) {
if (allowNull) {
urlBlocklistsRemoveAll(FORBIDDEN_NULL);
}
else {
urlBlocklistsAddAll(FORBIDDEN_NULL);
}
}
/**
* <p>
* Determines if a percent "%" that is URL encoded "%25" should be allowed in the path
* or not. The default is not to allow this behavior because it is a frequent source
* of security exploits.
* </p>
* <p>
* For example, this can lead to exploits that involve double URL encoding that lead
* to bypassing security constraints.
* </p>
* @param allowUrlEncodedPercent if a percent "%" that is URL encoded "%25" should be
* allowed in the path or not. Default is false
*/
public void setAllowUrlEncodedPercent(boolean allowUrlEncodedPercent) {
if (allowUrlEncodedPercent) {
this.encodedUrlBlocklist.remove(ENCODED_PERCENT);
this.decodedUrlBlocklist.remove(PERCENT);
}
else {
this.encodedUrlBlocklist.add(ENCODED_PERCENT);
this.decodedUrlBlocklist.add(PERCENT);
}
}
/**
* Determines if a URL encoded Carriage Return is allowed in the path or not. The
* default is not to allow this behavior because it is a frequent source of security
* exploits.
* @param allowUrlEncodedCarriageReturn if URL encoded Carriage Return is allowed in
* the URL or not. Default is false.
*/
public void setAllowUrlEncodedCarriageReturn(boolean allowUrlEncodedCarriageReturn) {
if (allowUrlEncodedCarriageReturn) {
urlBlocklistsRemoveAll(FORBIDDEN_CR);
}
else {
urlBlocklistsAddAll(FORBIDDEN_CR);
}
}
/**
* Determines if a URL encoded Line Feed is allowed in the path or not. The default is
* not to allow this behavior because it is a frequent source of security exploits.
* @param allowUrlEncodedLineFeed if URL encoded Line Feed is allowed in the URL or
* not. Default is false.
*/
public void setAllowUrlEncodedLineFeed(boolean allowUrlEncodedLineFeed) {
if (allowUrlEncodedLineFeed) {
urlBlocklistsRemoveAll(FORBIDDEN_LF);
}
else {
urlBlocklistsAddAll(FORBIDDEN_LF);
}
}
/**
* Determines if a URL encoded paragraph separator is allowed in the path or not. The
* default is not to allow this behavior because it is a frequent source of security
* exploits.
* @param allowUrlEncodedParagraphSeparator if URL encoded paragraph separator is
* allowed in the URL or not. Default is false.
*/
public void setAllowUrlEncodedParagraphSeparator(boolean allowUrlEncodedParagraphSeparator) {
if (allowUrlEncodedParagraphSeparator) {
this.decodedUrlBlocklist.removeAll(FORBIDDEN_PARAGRAPH_SEPARATOR);
}
else {
this.decodedUrlBlocklist.addAll(FORBIDDEN_PARAGRAPH_SEPARATOR);
}
}
/**
* Determines if a URL encoded line separator is allowed in the path or not. The
* default is not to allow this behavior because it is a frequent source of security
* exploits.
* @param allowUrlEncodedLineSeparator if URL encoded line separator is allowed in the
* URL or not. Default is false.
*/
public void setAllowUrlEncodedLineSeparator(boolean allowUrlEncodedLineSeparator) {
if (allowUrlEncodedLineSeparator) {
this.decodedUrlBlocklist.removeAll(FORBIDDEN_LINE_SEPARATOR);
}
else {
this.decodedUrlBlocklist.addAll(FORBIDDEN_LINE_SEPARATOR);
}
}
/**
* <p>
* Determines which header names should be allowed. The default is to reject header
* names that contain ISO control characters and characters that are not defined.
* </p>
* @param allowedHeaderNames the predicate for testing header names
* @since 5.4
* @see Character#isISOControl(int)
* @see Character#isDefined(int)
*/
public void setAllowedHeaderNames(Predicate<String> allowedHeaderNames) {
Assert.notNull(allowedHeaderNames, "allowedHeaderNames cannot be null");
this.allowedHeaderNames = allowedHeaderNames;
}
/**
* <p>
* Determines which header values should be allowed. The default is to reject header
* values that contain ISO control characters and characters that are not defined.
* </p>
* @param allowedHeaderValues the predicate for testing hostnames
* @since 5.4
* @see Character#isISOControl(int)
* @see Character#isDefined(int)
*/
public void setAllowedHeaderValues(Predicate<String> allowedHeaderValues) {
Assert.notNull(allowedHeaderValues, "allowedHeaderValues cannot be null");
this.allowedHeaderValues = allowedHeaderValues;
}
/**
* Determines which parameter names should be allowed. The default is to reject header
* names that contain ISO control characters and characters that are not defined.
* @param allowedParameterNames the predicate for testing parameter names
* @since 5.4
* @see Character#isISOControl(int)
* @see Character#isDefined(int)
*/
public void setAllowedParameterNames(Predicate<String> allowedParameterNames) {
Assert.notNull(allowedParameterNames, "allowedParameterNames cannot be null");
this.allowedParameterNames = allowedParameterNames;
}
/**
* <p>
* Determines which parameter values should be allowed. The default is to allow any
* parameter value.
* </p>
* @param allowedParameterValues the predicate for testing parameter values
* @since 5.4
*/
public void setAllowedParameterValues(Predicate<String> allowedParameterValues) {
Assert.notNull(allowedParameterValues, "allowedParameterValues cannot be null");
this.allowedParameterValues = allowedParameterValues;
}
/**
* <p>
* Determines which hostnames should be allowed. The default is to allow any hostname.
* </p>
* @param allowedHostnames the predicate for testing hostnames
* @since 5.2
*/
public void setAllowedHostnames(Predicate<String> allowedHostnames) {
Assert.notNull(allowedHostnames, "allowedHostnames cannot be null");
this.allowedHostnames = allowedHostnames;
}
private void urlBlocklistsAddAll(Collection<String> values) {
this.encodedUrlBlocklist.addAll(values);
this.decodedUrlBlocklist.addAll(values);
}
private void urlBlocklistsRemoveAll(Collection<String> values) {
this.encodedUrlBlocklist.removeAll(values);
this.decodedUrlBlocklist.removeAll(values);
}
@Override
public FirewalledRequest getFirewalledRequest(HttpServletRequest request) throws RequestRejectedException {
rejectForbiddenHttpMethod(request);
rejectedBlocklistedUrls(request);
rejectedUntrustedHosts(request);
if (!isNormalized(request)) {
throw new RequestRejectedException("The request was rejected because the URL was not normalized.");
}
rejectNonPrintableAsciiCharactersInFieldName(request.getRequestURI(), "requestURI");
return new StrictFirewalledRequest(request);
}
private void rejectNonPrintableAsciiCharactersInFieldName(String toCheck, String propertyName) {
if (!containsOnlyPrintableAsciiCharacters(toCheck)) {
throw new RequestRejectedException(String
.format("The %s was rejected because it can only contain printable ASCII characters.", propertyName));
}
}
private void rejectForbiddenHttpMethod(HttpServletRequest request) {
if (this.allowedHttpMethods == ALLOW_ANY_HTTP_METHOD) {
return;
}
if (!this.allowedHttpMethods.contains(request.getMethod())) {
throw new RequestRejectedException(
"The request was rejected because the HTTP method \"" + request.getMethod()
+ "\" was not included within the list of allowed HTTP methods " + this.allowedHttpMethods);
}
}
private void rejectedBlocklistedUrls(HttpServletRequest request) {
for (String forbidden : this.encodedUrlBlocklist) {
if (encodedUrlContains(request, forbidden)) {
throw new RequestRejectedException(
"The request was rejected because the URL contained a potentially malicious String \""
+ forbidden + "\"");
}
}
for (String forbidden : this.decodedUrlBlocklist) {
if (decodedUrlContains(request, forbidden)) {
throw new RequestRejectedException(
"The request was rejected because the URL contained a potentially malicious String \""
+ forbidden + "\"");
}
}
}
private void rejectedUntrustedHosts(HttpServletRequest request) {
String serverName = request.getServerName();
if (serverName != null && !this.allowedHostnames.test(serverName)) {
throw new RequestRejectedException(
"The request was rejected because the domain " + serverName + " is untrusted.");
}
}
@Override
public HttpServletResponse getFirewalledResponse(HttpServletResponse response) {
return new FirewalledResponse(response);
}
private static Set<String> createDefaultAllowedHttpMethods() {
Set<String> result = new HashSet<>();
result.add(HttpMethod.DELETE.name());
result.add(HttpMethod.GET.name());
result.add(HttpMethod.HEAD.name());
result.add(HttpMethod.OPTIONS.name());
result.add(HttpMethod.PATCH.name());
result.add(HttpMethod.POST.name());
result.add(HttpMethod.PUT.name());
return result;
}
private static boolean isNormalized(HttpServletRequest request) {
if (!isNormalized(request.getRequestURI())) {
return false;
}
if (!isNormalized(request.getContextPath())) {
return false;
}
if (!isNormalized(request.getServletPath())) {
return false;
}
if (!isNormalized(request.getPathInfo())) {
return false;
}
return true;
}
private static boolean encodedUrlContains(HttpServletRequest request, String value) {
if (valueContains(request.getContextPath(), value)) {
return true;
}
return valueContains(request.getRequestURI(), value);
}
private static boolean decodedUrlContains(HttpServletRequest request, String value) {
if (valueContains(request.getServletPath(), value)) {
return true;
}
return valueContains(request.getPathInfo(), value);
}
private static boolean containsOnlyPrintableAsciiCharacters(String uri) {
if (uri == null) {
return true;
}
int length = uri.length();
for (int i = 0; i < length; i++) {
char ch = uri.charAt(i);
if (ch < '\u0020' || ch > '\u007e') {
return false;
}
}
return true;
}
private static boolean valueContains(String value, String contains) {
return value != null && value.contains(contains);
}
/**
* Checks whether a path is normalized (doesn't contain path traversal sequences like
* "./", "/../" or "/.")
* @param path the path to test
* @return true if the path doesn't contain any path-traversal character sequences.
*/
private static boolean isNormalized(String path) {
if (path == null) {
return true;
}
for (int i = path.length(); i > 0;) {
int slashIndex = path.lastIndexOf('/', i - 1);
int gap = i - slashIndex;
if (gap == 2 && path.charAt(slashIndex + 1) == '.') {
return false; // ".", "/./" or "/."
}
if (gap == 3 && path.charAt(slashIndex + 1) == '.' && path.charAt(slashIndex + 2) == '.') {
return false;
}
i = slashIndex;
}
return true;
}
/**
* Provides the existing encoded url blocklist which can add/remove entries from
* @return the existing encoded url blocklist, never null
*/
public Set<String> getEncodedUrlBlocklist() {
return this.encodedUrlBlocklist;
}
/**
* Provides the existing decoded url blocklist which can add/remove entries from
* @return the existing decoded url blocklist, never null
*/
public Set<String> getDecodedUrlBlocklist() {
return this.decodedUrlBlocklist;
}
/**
* Provides the existing encoded url blocklist which can add/remove entries from
* @return the existing encoded url blocklist, never null
* @deprecated Use {@link #getEncodedUrlBlocklist()} instead
*/
@Deprecated
public Set<String> getEncodedUrlBlacklist() {
return getEncodedUrlBlocklist();
}
/**
* Provides the existing decoded url blocklist which can add/remove entries from
* @return the existing decoded url blocklist, never null
*
*/
public Set<String> getDecodedUrlBlacklist() {
return getDecodedUrlBlocklist();
}
/**
* Strict {@link FirewalledRequest}.
*/
private | StrictHttpFirewall |
java | mockito__mockito | mockito-integration-tests/java-21-tests/src/test/java/org/mockito/internal/stubbing/answers/DeepStubReturnsEnumJava21Test.java | {
"start": 4668,
"end": 4727
} | enum ____ {
A,
B
}
| TestNonAbstractEnum |
java | apache__camel | components/camel-ai/camel-langchain4j-agent-api/src/main/java/org/apache/camel/component/langchain4j/agent/api/AgentConfiguration.java | {
"start": 7439,
"end": 7867
} | class ____
* @return this configuration instance for method chaining
* @see #parseGuardrailClasses(String)
*/
public AgentConfiguration withOutputGuardrailClassesList(String outputGuardrailClasses) {
return withOutputGuardrailClasses(parseGuardrailClasses(outputGuardrailClasses));
}
/**
* Sets output guardrail classes from an array of | names |
java | apache__camel | components/camel-bindy/src/test/java/org/apache/camel/dataformat/bindy/csv2/BindyMarshalWithQuoteTest.java | {
"start": 1173,
"end": 2230
} | class ____ extends CamelTestSupport {
@Test
public void testBindyMarshalWithQuote() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("\"123\",\"Wednesday, November 9, 2011\",\"Central California\""
+ ConverterUtils.getStringCarriageReturn("WINDOWS"));
WeatherModel model = new WeatherModel();
model.setId(123);
model.setDate("Wednesday, November 9, 2011");
model.setPlace("Central California");
template.sendBody("direct:start", model);
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.marshal().bindy(BindyType.Csv, org.apache.camel.dataformat.bindy.csv2.WeatherModel.class)
.to("mock:result");
}
};
}
}
| BindyMarshalWithQuoteTest |
java | apache__camel | components/camel-quartz/src/main/java/org/apache/camel/component/quartz/QuartzEndpoint.java | {
"start": 2464,
"end": 25734
} | class ____ extends DefaultEndpoint {
private static final Logger LOG = LoggerFactory.getLogger(QuartzEndpoint.class);
private TriggerKey triggerKey;
private volatile AsyncProcessor processor;
// An internal variables to track whether a job has been in scheduler or not, and has it paused or not.
private final AtomicBoolean jobAdded = new AtomicBoolean();
private final AtomicBoolean jobPaused = new AtomicBoolean();
@UriPath(description = "The quartz group name to use. The combination of group name and trigger name should be unique.",
defaultValue = "Camel")
private String groupName;
@UriPath(description = "The quartz trigger name to use. The combination of group name and trigger name should be unique.")
@Metadata(required = true)
private String triggerName;
@UriParam
private String cron;
@UriParam
private boolean stateful;
@UriParam(label = "advanced")
private boolean ignoreExpiredNextFireTime;
@UriParam(defaultValue = "true")
private boolean deleteJob = true;
@UriParam
private boolean pauseJob;
@UriParam
private boolean durableJob;
@UriParam
private boolean recoverableJob;
@UriParam(label = "scheduler", defaultValue = "500", javaType = "java.time.Duration")
private long triggerStartDelay = 500;
@UriParam(label = "scheduler", defaultValue = "true")
private boolean autoStartScheduler = true;
@UriParam(label = "advanced")
private boolean usingFixedCamelContextName;
@UriParam(label = "advanced")
private boolean prefixJobNameWithEndpointId;
@UriParam(prefix = "trigger.", multiValue = true, label = "advanced")
private Map<String, Object> triggerParameters;
@UriParam(prefix = "job.", multiValue = true, label = "advanced")
private Map<String, Object> jobParameters;
@UriParam(label = "advanced")
private Calendar customCalendar;
public QuartzEndpoint(String uri, QuartzComponent quartzComponent) {
super(uri, quartzComponent);
}
@Override
public boolean isRemote() {
return false;
}
public String getGroupName() {
return triggerKey.getGroup();
}
public String getTriggerName() {
return triggerKey.getName();
}
public void setTriggerName(String triggerName) {
this.triggerName = triggerName;
}
public String getCron() {
return cron;
}
public boolean isStateful() {
return stateful;
}
public boolean isIgnoreExpiredNextFireTime() {
return ignoreExpiredNextFireTime;
}
/**
* Whether to ignore quartz cannot schedule a trigger because the trigger will never fire in the future. This can
* happen when using a cron trigger that are configured to only run in the past.
*
* By default, Quartz will fail to schedule the trigger and therefore fail to start the Camel route. You can set
* this to true which then logs a WARN and then ignore the problem, meaning that the route will never fire in the
* future.
*/
public void setIgnoreExpiredNextFireTime(boolean ignoreExpiredNextFireTime) {
this.ignoreExpiredNextFireTime = ignoreExpiredNextFireTime;
}
public long getTriggerStartDelay() {
return triggerStartDelay;
}
public boolean isDeleteJob() {
return deleteJob;
}
public boolean isPauseJob() {
return pauseJob;
}
/**
* If set to true, then the trigger automatically pauses when route stop. Else if set to false, it will remain in
* scheduler. When set to false, it will also mean user may reuse pre-configured trigger with camel Uri. Just ensure
* the names match. Notice you cannot have both deleteJob and pauseJob set to true.
*/
public void setPauseJob(boolean pauseJob) {
this.pauseJob = pauseJob;
}
/**
* In case of scheduler has already started, we want the trigger start slightly after current time to ensure
* endpoint is fully started before the job kicks in. Negative value shifts trigger start time in the past.
*/
public void setTriggerStartDelay(long triggerStartDelay) {
this.triggerStartDelay = triggerStartDelay;
}
/**
* If set to true, then the trigger automatically delete when route stop. Else if set to false, it will remain in
* scheduler. When set to false, it will also mean user may reuse pre-configured trigger with camel Uri. Just ensure
* the names match. Notice you cannot have both deleteJob and pauseJob set to true.
*/
public void setDeleteJob(boolean deleteJob) {
this.deleteJob = deleteJob;
}
/**
* Uses a Quartz @PersistJobDataAfterExecution and @DisallowConcurrentExecution instead of the default job.
*/
public void setStateful(boolean stateful) {
this.stateful = stateful;
}
public boolean isDurableJob() {
return durableJob;
}
/**
* Whether or not the job should remain stored after it is orphaned (no triggers point to it).
*/
public void setDurableJob(boolean durableJob) {
this.durableJob = durableJob;
}
public boolean isRecoverableJob() {
return recoverableJob;
}
/**
* Instructs the scheduler whether or not the job should be re-executed if a 'recovery' or 'fail-over' situation is
* encountered.
*/
public void setRecoverableJob(boolean recoverableJob) {
this.recoverableJob = recoverableJob;
}
public boolean isUsingFixedCamelContextName() {
return usingFixedCamelContextName;
}
/**
* If it is true, JobDataMap uses the CamelContext name directly to reference the CamelContext, if it is false,
* JobDataMap uses use the CamelContext management name which could be changed during the deploy time.
*/
public void setUsingFixedCamelContextName(boolean usingFixedCamelContextName) {
this.usingFixedCamelContextName = usingFixedCamelContextName;
}
public Map<String, Object> getTriggerParameters() {
return triggerParameters;
}
/**
* To configure additional options on the trigger. The parameter timeZone is supported if the cron option is
* present. Otherwise the parameters repeatInterval and repeatCount are supported.
* <p>
* <b>Note:</b> When using repeatInterval values of 1000 or less, the first few events after starting the camel
* context may be fired more rapidly than expected.
* </p>
*/
public void setTriggerParameters(Map<String, Object> triggerParameters) {
this.triggerParameters = triggerParameters;
}
public Map<String, Object> getJobParameters() {
return jobParameters;
}
/**
* To configure additional options on the job.
*/
public void setJobParameters(Map<String, Object> jobParameters) {
this.jobParameters = jobParameters;
}
public boolean isAutoStartScheduler() {
return autoStartScheduler;
}
/**
* Whether or not the scheduler should be auto started.
*/
public void setAutoStartScheduler(boolean autoStartScheduler) {
this.autoStartScheduler = autoStartScheduler;
}
public boolean isPrefixJobNameWithEndpointId() {
return prefixJobNameWithEndpointId;
}
/**
* Whether the job name should be prefixed with endpoint id
*
* @param prefixJobNameWithEndpointId
*/
public void setPrefixJobNameWithEndpointId(boolean prefixJobNameWithEndpointId) {
this.prefixJobNameWithEndpointId = prefixJobNameWithEndpointId;
}
/**
* Specifies a cron expression to define when to trigger.
*/
public void setCron(String cron) {
this.cron = cron;
}
public TriggerKey getTriggerKey() {
return triggerKey;
}
public void setTriggerKey(TriggerKey triggerKey) {
this.triggerKey = triggerKey;
}
public Calendar getCustomCalendar() {
return customCalendar;
}
/**
* Specifies a custom calendar to avoid specific range of date
*/
public void setCustomCalendar(Calendar customCalendar) {
this.customCalendar = customCalendar;
}
@Override
public Producer createProducer() throws Exception {
throw new UnsupportedOperationException("Quartz producer is not supported.");
}
@Override
public Consumer createConsumer(Processor processor) throws Exception {
QuartzConsumer result = new QuartzConsumer(this, processor);
configureConsumer(result);
return result;
}
@Override
protected void doStart() throws Exception {
if (isDeleteJob() && isPauseJob()) {
throw new IllegalArgumentException("Cannot have both options deleteJob and pauseJob enabled");
}
if (ObjectHelper.isNotEmpty(customCalendar)) {
getComponent().getScheduler().addCalendar(QuartzConstants.QUARTZ_CAMEL_CUSTOM_CALENDAR, customCalendar, true,
false);
}
addJobInScheduler();
}
@Override
protected void doStop() throws Exception {
removeJobInScheduler();
}
private void removeJobInScheduler() throws Exception {
Scheduler scheduler = getComponent().getScheduler();
if (scheduler == null) {
return;
}
if (deleteJob) {
boolean isClustered = scheduler.getMetaData().isJobStoreClustered();
if (!scheduler.isShutdown() && !isClustered) {
LOG.info("Deleting job {}", triggerKey);
scheduler.unscheduleJob(triggerKey);
jobAdded.set(false);
}
} else if (pauseJob) {
pauseTrigger();
}
// Decrement camel job count for this endpoint
AtomicInteger number = (AtomicInteger) scheduler.getContext().get(QuartzConstants.QUARTZ_CAMEL_JOBS_COUNT);
if (number != null) {
number.decrementAndGet();
}
}
private void addJobInScheduler() throws Exception {
// Add or use existing trigger to/from scheduler
Scheduler scheduler = getComponent().getScheduler();
JobDetail jobDetail;
Trigger oldTrigger = scheduler.getTrigger(triggerKey);
boolean triggerExisted = oldTrigger != null;
if (triggerExisted && !isRecoverableJob()) {
ensureNoDupTriggerKey();
}
jobDetail = createJobDetail();
Trigger trigger = createTrigger(jobDetail);
QuartzHelper.updateJobDataMap(getCamelContext(), jobDetail, getEndpointUri(), isUsingFixedCamelContextName());
boolean scheduled = true;
if (triggerExisted) {
// Reschedule job if trigger settings were changed
if (hasTriggerChanged(oldTrigger, trigger)) {
scheduler.rescheduleJob(triggerKey, trigger);
}
} else {
try {
if (hasTriggerExpired(scheduler, trigger)) {
scheduled = false;
LOG.warn(
"Job {} (cron={}, triggerType={}, jobClass={}) not scheduled, because it will never fire in the future",
trigger.getKey(), cron, trigger.getClass().getSimpleName(),
jobDetail.getJobClass().getSimpleName());
} else {
// Schedule it now. Remember that scheduler might not be started it, but we can schedule now.
scheduler.scheduleJob(jobDetail, trigger);
}
} catch (ObjectAlreadyExistsException ex) {
// some other VM might may have stored the job & trigger in DB in clustered mode, in the mean time
if (!(getComponent().isClustered())) {
throw ex;
} else {
trigger = scheduler.getTrigger(triggerKey);
if (trigger == null) {
throw new SchedulerException("Trigger could not be found in quartz scheduler.");
}
}
}
}
if (scheduled) {
if (LOG.isInfoEnabled()) {
Object nextFireTime = trigger.getNextFireTime();
if (nextFireTime != null) {
nextFireTime = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ").format(nextFireTime);
}
LOG.info("Job {} (cron={}, triggerType={}, jobClass={}) is scheduled. Next fire date is {}",
trigger.getKey(), cron, trigger.getClass().getSimpleName(),
jobDetail.getJobClass().getSimpleName(), nextFireTime);
}
}
// Increase camel job count for this endpoint
AtomicInteger number = (AtomicInteger) scheduler.getContext().get(QuartzConstants.QUARTZ_CAMEL_JOBS_COUNT);
if (number != null) {
number.incrementAndGet();
}
jobAdded.set(true);
}
private boolean hasTriggerExpired(Scheduler scheduler, Trigger trigger) throws SchedulerException {
Calendar cal = null;
if (trigger.getCalendarName() != null) {
cal = scheduler.getCalendar(trigger.getCalendarName());
}
OperableTrigger ot = (OperableTrigger) trigger;
// check if current time is past the Trigger EndDate
if (ot.getEndTime() != null && new Date().after(ot.getEndTime())) {
return true;
}
// calculate whether the trigger can be triggered in the future
Date ft = ot.computeFirstFireTime(cal);
return (ft == null && ignoreExpiredNextFireTime);
}
private boolean hasTriggerChanged(Trigger oldTrigger, Trigger newTrigger) {
if (newTrigger instanceof CronTrigger && oldTrigger instanceof CronTrigger) {
CronTrigger newCron = (CronTrigger) newTrigger;
CronTrigger oldCron = (CronTrigger) oldTrigger;
return !newCron.getCronExpression().equals(oldCron.getCronExpression());
} else if (newTrigger instanceof SimpleTrigger && oldTrigger instanceof SimpleTrigger) {
SimpleTrigger newSimple = (SimpleTrigger) newTrigger;
SimpleTrigger oldSimple = (SimpleTrigger) oldTrigger;
return newSimple.getRepeatInterval() != oldSimple.getRepeatInterval()
|| newSimple.getRepeatCount() != oldSimple.getRepeatCount();
} else {
return !newTrigger.getClass().equals(oldTrigger.getClass()) || !newTrigger.equals(oldTrigger);
}
}
private void ensureNoDupTriggerKey() {
for (Route route : getCamelContext().getRoutes()) {
if (route.getEndpoint() instanceof QuartzEndpoint) {
QuartzEndpoint quartzEndpoint = (QuartzEndpoint) route.getEndpoint();
TriggerKey checkTriggerKey = quartzEndpoint.getTriggerKey();
if (triggerKey.equals(checkTriggerKey)) {
throw new IllegalArgumentException("Trigger key " + triggerKey + " is already in use by " + quartzEndpoint);
}
}
}
}
private Trigger createTrigger(JobDetail jobDetail) throws Exception {
// use a defensive copy to keep the trigger parameters on the endpoint
final Map<String, Object> copy = new HashMap<>(triggerParameters);
final TriggerBuilder<Trigger> triggerBuilder = TriggerBuilder.newTrigger().withIdentity(triggerKey);
if (getComponent().getScheduler().isStarted() || triggerStartDelay < 0) {
triggerBuilder.startAt(new Date(System.currentTimeMillis() + triggerStartDelay));
}
if (cron != null) {
LOG.debug("Creating CronTrigger: {}", cron);
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssz");
final String startAt = (String) copy.get("startAt");
if (startAt != null) {
triggerBuilder.startAt(dateFormat.parse(startAt));
}
final String endAt = (String) copy.get("endAt");
if (endAt != null) {
Date endDate = dateFormat.parse(endAt);
if (endDate.before(new Date()) && startAt == null && isIgnoreExpiredNextFireTime()) {
// Trigger Builder sets startAt to current time. Hence if startAt is null, necessary to add a valid value to honor ignoreExpiredNextFireTime
triggerBuilder.startAt(Date.from(endDate.toInstant().minusSeconds(1)));
}
triggerBuilder.endAt(endDate);
}
final String timeZone = (String) copy.get("timeZone");
if (timeZone != null) {
if (ObjectHelper.isNotEmpty(customCalendar)) {
triggerBuilder
.withSchedule(cronSchedule(cron)
.withMisfireHandlingInstructionFireAndProceed()
.inTimeZone(TimeZone.getTimeZone(timeZone)))
.modifiedByCalendar(QuartzConstants.QUARTZ_CAMEL_CUSTOM_CALENDAR);
} else {
triggerBuilder
.withSchedule(cronSchedule(cron)
.withMisfireHandlingInstructionFireAndProceed()
.inTimeZone(TimeZone.getTimeZone(timeZone)));
}
jobDetail.getJobDataMap().put(QuartzConstants.QUARTZ_TRIGGER_CRON_TIMEZONE, timeZone);
} else {
if (ObjectHelper.isNotEmpty(customCalendar)) {
triggerBuilder
.withSchedule(cronSchedule(cron)
.withMisfireHandlingInstructionFireAndProceed())
.modifiedByCalendar(QuartzConstants.QUARTZ_CAMEL_CUSTOM_CALENDAR);
} else {
triggerBuilder
.withSchedule(cronSchedule(cron)
.withMisfireHandlingInstructionFireAndProceed());
}
}
// enrich job map with details
jobDetail.getJobDataMap().put(QuartzConstants.QUARTZ_TRIGGER_TYPE, "cron");
jobDetail.getJobDataMap().put(QuartzConstants.QUARTZ_TRIGGER_CRON_EXPRESSION, cron);
} else {
LOG.debug("Creating SimpleTrigger.");
int repeat = SimpleTrigger.REPEAT_INDEFINITELY;
String repeatString = (String) copy.get("repeatCount");
if (repeatString != null) {
repeat = EndpointHelper.resolveParameter(getCamelContext(), repeatString, Integer.class);
// need to update the parameters
copy.put("repeatCount", repeat);
}
// default use 1 sec interval
long interval = 1000;
String intervalString = (String) copy.get("repeatInterval");
if (intervalString != null) {
interval = EndpointHelper.resolveParameter(getCamelContext(), intervalString, Long.class);
// need to update the parameters
copy.put("repeatInterval", interval);
}
if (ObjectHelper.isNotEmpty(customCalendar)) {
triggerBuilder
.withSchedule(simpleSchedule().withMisfireHandlingInstructionFireNow()
.withRepeatCount(repeat).withIntervalInMilliseconds(interval))
.modifiedByCalendar(QuartzConstants.QUARTZ_CAMEL_CUSTOM_CALENDAR);
} else {
triggerBuilder
.withSchedule(simpleSchedule().withMisfireHandlingInstructionFireNow()
.withRepeatCount(repeat).withIntervalInMilliseconds(interval));
}
// enrich job map with details
jobDetail.getJobDataMap().put(QuartzConstants.QUARTZ_TRIGGER_TYPE, "simple");
jobDetail.getJobDataMap().put(QuartzConstants.QUARTZ_TRIGGER_SIMPLE_REPEAT_COUNTER, String.valueOf(repeat));
jobDetail.getJobDataMap().put(QuartzConstants.QUARTZ_TRIGGER_SIMPLE_REPEAT_INTERVAL, String.valueOf(interval));
}
final Trigger result = triggerBuilder.build();
if (!copy.isEmpty()) {
LOG.debug("Setting user extra triggerParameters {}", copy);
setProperties(result, copy);
}
LOG.debug("Created trigger={}", result);
return result;
}
private JobDetail createJobDetail() {
// Camel endpoint timer will assume one to one for JobDetail and Trigger, so let's use same name as trigger
String name = triggerKey.getName();
String group = triggerKey.getGroup();
Class<? extends Job> jobClass = stateful ? StatefulCamelJob.class : CamelJob.class;
LOG.debug("Creating new {}.", jobClass.getSimpleName());
JobBuilder builder = JobBuilder.newJob(jobClass)
.withIdentity(name, group);
if (durableJob) {
builder = builder.storeDurably();
}
if (recoverableJob) {
builder = builder.requestRecovery();
}
JobDetail result = builder.build();
// Let user parameters to further set JobDetail properties.
if (jobParameters != null && jobParameters.size() > 0) {
// need to use a copy to keep the parameters on the endpoint
Map<String, Object> copy = new HashMap<>(jobParameters);
LOG.debug("Setting user extra jobParameters {}", copy);
setProperties(result, copy);
}
LOG.debug("Created jobDetail={}", result);
return result;
}
@Override
public QuartzComponent getComponent() {
return (QuartzComponent) super.getComponent();
}
public void pauseTrigger() throws Exception {
Scheduler scheduler = getComponent().getScheduler();
boolean isClustered = scheduler.getMetaData().isJobStoreClustered();
if (jobPaused.get() || isClustered) {
return;
}
jobPaused.set(true);
if (!scheduler.isShutdown()) {
LOG.info("Pausing trigger {}", triggerKey);
scheduler.pauseTrigger(triggerKey);
}
}
public void resumeTrigger() throws Exception {
if (!jobPaused.get()) {
return;
}
jobPaused.set(false);
Scheduler scheduler = getComponent().getScheduler();
if (scheduler != null) {
LOG.info("Resuming trigger {}", triggerKey);
scheduler.resumeTrigger(triggerKey);
}
}
public void onConsumerStart(QuartzConsumer quartzConsumer) throws Exception {
this.processor = quartzConsumer.getAsyncProcessor();
if (!jobAdded.get()) {
addJobInScheduler();
} else {
resumeTrigger();
}
}
public void onConsumerStop(QuartzConsumer quartzConsumer) throws Exception {
if (jobAdded.get()) {
pauseTrigger();
}
this.processor = null;
}
AsyncProcessor getProcessor() {
return this.processor;
}
}
| QuartzEndpoint |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/ancestor_ref/User.java | {
"start": 732,
"end": 1325
} | class ____ {
private Integer id;
private String name;
private User friend;
private List<User> friends;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public User getFriend() {
return friend;
}
public void setFriend(User friend) {
this.friend = friend;
}
public List<User> getFriends() {
return friends;
}
public void setFriends(List<User> friends) {
this.friends = friends;
}
}
| User |
java | apache__camel | components/camel-jcache/src/generated/java/org/apache/camel/component/jcache/JCacheComponentConfigurer.java | {
"start": 733,
"end": 4732
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
JCacheComponent target = (JCacheComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": target.setAutowiredEnabled(property(camelContext, boolean.class, value)); return true;
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "cacheconfiguration":
case "cacheConfiguration": target.setCacheConfiguration(property(camelContext, javax.cache.configuration.Configuration.class, value)); return true;
case "cacheconfigurationproperties":
case "cacheConfigurationProperties": target.setCacheConfigurationProperties(property(camelContext, java.util.Map.class, value)); return true;
case "cacheconfigurationpropertiesref":
case "cacheConfigurationPropertiesRef": target.setCacheConfigurationPropertiesRef(property(camelContext, java.lang.String.class, value)); return true;
case "cachingprovider":
case "cachingProvider": target.setCachingProvider(property(camelContext, java.lang.String.class, value)); return true;
case "configurationuri":
case "configurationUri": target.setConfigurationUri(property(camelContext, java.lang.String.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return boolean.class;
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "cacheconfiguration":
case "cacheConfiguration": return javax.cache.configuration.Configuration.class;
case "cacheconfigurationproperties":
case "cacheConfigurationProperties": return java.util.Map.class;
case "cacheconfigurationpropertiesref":
case "cacheConfigurationPropertiesRef": return java.lang.String.class;
case "cachingprovider":
case "cachingProvider": return java.lang.String.class;
case "configurationuri":
case "configurationUri": return java.lang.String.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
JCacheComponent target = (JCacheComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return target.isAutowiredEnabled();
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "cacheconfiguration":
case "cacheConfiguration": return target.getCacheConfiguration();
case "cacheconfigurationproperties":
case "cacheConfigurationProperties": return target.getCacheConfigurationProperties();
case "cacheconfigurationpropertiesref":
case "cacheConfigurationPropertiesRef": return target.getCacheConfigurationPropertiesRef();
case "cachingprovider":
case "cachingProvider": return target.getCachingProvider();
case "configurationuri":
case "configurationUri": return target.getConfigurationUri();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
default: return null;
}
}
}
| JCacheComponentConfigurer |
java | elastic__elasticsearch | x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/ClaimParser.java | {
"start": 1026,
"end": 7201
} | class ____ {
private final String setting;
private final String claimName;
private final String regexPattern;
private final Function<JWTClaimsSet, List<String>> parser;
public ClaimParser(String setting, String claimName, String regexPattern, Function<JWTClaimsSet, List<String>> parser) {
this.setting = setting;
this.claimName = claimName;
this.regexPattern = regexPattern;
this.parser = parser;
}
public String getSetting() {
return this.setting;
}
public String getClaimName() {
return this.claimName;
}
public String getRegexPattern() {
return this.regexPattern;
}
public Function<JWTClaimsSet, List<String>> getParser() {
return this.parser;
}
public List<String> getClaimValues(JWTClaimsSet claims) {
return parser.apply(claims);
}
public String getClaimValue(JWTClaimsSet claims) {
List<String> claimValues = parser.apply(claims);
if (claimValues == null || claimValues.isEmpty()) {
return null;
} else {
return claimValues.get(0);
}
}
@Override
public String toString() {
if (this.claimName == null) {
return "No claim for [" + this.setting + "]";
} else if (this.regexPattern == null) {
return "Claim [" + this.claimName + "] for [" + this.setting + "]";
} else {
return "Claim [" + this.claimName + "] with pattern [" + this.regexPattern + "] for [" + this.setting + "]";
}
}
@SuppressWarnings("unchecked")
private static Collection<String> parseClaimValues(JWTClaimsSet claimsSet, FallbackableClaim fallbackableClaim, String settingKey) {
Collection<String> values;
final Object claimValueObject = claimsSet.getClaim(fallbackableClaim.getActualName());
if (claimValueObject == null) {
values = List.of();
} else if (claimValueObject instanceof String) {
values = List.of((String) claimValueObject);
} else if (claimValueObject instanceof Collection
&& ((Collection<?>) claimValueObject).stream().allMatch(c -> c instanceof String)) {
values = (Collection<String>) claimValueObject;
} else {
throw new SettingsException(
"Setting [ " + settingKey + "] expects claim [" + fallbackableClaim + "] with String or a String Array value"
);
}
return values;
}
public static ClaimParser forSetting(Logger logger, ClaimSetting setting, RealmConfig realmConfig, boolean required) {
return forSetting(logger, setting, Map.of(), realmConfig, required);
}
public static ClaimParser forSetting(
Logger logger,
ClaimSetting setting,
Map<String, String> fallbackClaimNames,
RealmConfig realmConfig,
boolean required
) {
if (realmConfig.hasSetting(setting.getClaim())) {
final String claimName = realmConfig.getSetting(setting.getClaim());
if (realmConfig.hasSetting(setting.getPattern())) {
Pattern regex = Pattern.compile(realmConfig.getSetting(setting.getPattern()));
return new ClaimParser(setting.name(realmConfig), claimName, regex.pattern(), claims -> {
final FallbackableClaim fallbackableClaim = new FallbackableClaim(claimName, fallbackClaimNames, claims);
Collection<String> values = parseClaimValues(
claims,
fallbackableClaim,
RealmSettings.getFullSettingKey(realmConfig, setting.getClaim())
);
return values.stream().map(s -> {
if (s == null) {
logger.debug("Claim [{}] is null", fallbackableClaim);
return null;
}
final Matcher matcher = regex.matcher(s);
if (matcher.find() == false) {
logger.debug("Claim [{}] is [{}], which does not match [{}]", fallbackableClaim, s, regex.pattern());
return null;
}
final String value = matcher.group(1);
if (Strings.isNullOrEmpty(value)) {
logger.debug(
"Claim [{}] is [{}], which does match [{}] but group(1) is empty",
fallbackableClaim,
s,
regex.pattern()
);
return null;
}
return value;
}).filter(Objects::nonNull).toList();
});
} else {
return new ClaimParser(setting.name(realmConfig), claimName, null, claims -> {
final FallbackableClaim fallbackableClaim = new FallbackableClaim(claimName, fallbackClaimNames, claims);
return parseClaimValues(claims, fallbackableClaim, RealmSettings.getFullSettingKey(realmConfig, setting.getClaim()))
.stream()
.filter(Objects::nonNull)
.toList();
});
}
} else if (required) {
throw new SettingsException("Setting [" + RealmSettings.getFullSettingKey(realmConfig, setting.getClaim()) + "] is required");
} else if (realmConfig.hasSetting(setting.getPattern())) {
throw new SettingsException(
"Setting ["
+ RealmSettings.getFullSettingKey(realmConfig, setting.getPattern())
+ "] cannot be set unless ["
+ RealmSettings.getFullSettingKey(realmConfig, setting.getClaim())
+ "] is also set"
);
} else {
return new ClaimParser(setting.name(realmConfig), null, null, attributes -> List.of());
}
}
}
| ClaimParser |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/sql/parser/MySQLCharSetTest.java | {
"start": 306,
"end": 2763
} | class ____ extends TestCase {
public void testCreateCharset() {
String targetSql = "CREATE TABLE `test_idb`.`acct_certificate` (\n" +
" `id` bigint(20) NOT NULL auto_increment COMMENT '',\n" +
" `nodeid` varchar(5) CHARSET `gbk` COLLATE `gbk_chinese_ci` NULL COMMENT '',\n" +
" `certificatetype` char(1) CHARSET `gbk` COLLATE `gbk_chinese_ci` NULL COMMENT '',\n" +
" `certificateno` varchar(32) CHARSET `gbk` COLLATE `gbk_chinese_ci` NULL COMMENT '',\n" +
" PRIMARY KEY(`id`),\n" +
" INDEX `id_acct_certificate_nodeid`(`nodeid`),\n" +
" INDEX `id_acct_certificate_certificateno`(`certificateno`)\n" +
"\n" +
") engine= InnoDB DEFAULT CHARSET= `gbk` DEFAULT COLLATE `gbk_chinese_ci` comment= '' ;";
String resultSql = "CREATE TABLE `test_idb`.`acct_certificate` (\n" +
"\t`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT '', \n" +
"\t`nodeid` varchar(5) CHARSET `gbk` COLLATE `gbk_chinese_ci` NULL COMMENT '', \n" +
"\t`certificatetype` char(1) CHARSET `gbk` COLLATE `gbk_chinese_ci` NULL COMMENT '', \n" +
"\t`certificateno` varchar(32) CHARSET `gbk` COLLATE `gbk_chinese_ci` NULL COMMENT '', \n" +
"\tPRIMARY KEY (`id`), \n" +
"\tINDEX `id_acct_certificate_nodeid`(`nodeid`), \n" +
"\tINDEX `id_acct_certificate_certificateno`(`certificateno`)\n" +
") ENGINE = InnoDB CHARSET = `gbk` COLLATE = `gbk_chinese_ci` COMMENT = ''";
equal(targetSql, resultSql);
}
public void testAlterCharset() {
String targetSql = "ALTER TABLE acct_certificate MODIFY COLUMN `nodeid` varchar(5) CHARSET `gbk` COLLATE `gbk_chinese_ci` NULL COMMENT ''";
String resultSql = "ALTER TABLE acct_certificate\n" +
"\tMODIFY COLUMN `nodeid` varchar(5) CHARSET `gbk` COLLATE `gbk_chinese_ci` NULL COMMENT ''";
equal(targetSql, resultSql);
}
private void equal(String targetSql, String resultSql) {
MySqlStatementParser parser = new MySqlStatementParser(targetSql);
List<SQLStatement> sqlStatements = parser.parseStatementList();
System.out.println(sqlStatements.get(0).toString());
Assert.assertTrue(sqlStatements.get(0).toString().equals(resultSql));
}
}
| MySQLCharSetTest |
java | google__dagger | dagger-compiler/main/java/dagger/internal/codegen/xprocessing/XExecutableTypes.java | {
"start": 1441,
"end": 1580
} | class ____ {@link XExecutableType} helper methods. */
// TODO(bcorso): Consider moving these methods into XProcessing library.
public final | for |
java | apache__flink | flink-kubernetes/src/main/java/org/apache/flink/kubernetes/utils/Constants.java | {
"start": 940,
"end": 6464
} | class ____ {
// Kubernetes api version
public static final String API_VERSION = "v1";
public static final String APPS_API_VERSION = "apps/v1";
public static final String DNS_POLICY_DEFAULT = "ClusterFirst";
public static final String DNS_POLICY_HOSTNETWORK = "ClusterFirstWithHostNet";
public static final String CONFIG_FILE_LOGBACK_NAME = "logback-console.xml";
public static final String CONFIG_FILE_LOG4J_NAME = "log4j-console.properties";
public static final List<String> CONFIG_FILE_NAME_LIST =
Arrays.asList(
"logback.xml",
"log4j.properties",
"logback-console.xml",
"log4j-console.properties",
"logback-session.xml",
"log4j-session.properties",
"log4j-cli.properties");
public static final String ENV_FLINK_LOG_DIR = "FLINK_LOG_DIR";
public static final String MAIN_CONTAINER_NAME = "flink-main-container";
public static final String FLINK_CONF_VOLUME = "flink-config-volume";
public static final String CONFIG_MAP_PREFIX = "flink-config-";
public static final String HADOOP_CONF_VOLUME = "hadoop-config-volume";
public static final String HADOOP_CONF_CONFIG_MAP_PREFIX = "hadoop-config-";
public static final String HADOOP_CONF_DIR_IN_POD = "/opt/hadoop/conf";
public static final String ENV_HADOOP_CONF_DIR = "HADOOP_CONF_DIR";
public static final String ENV_HADOOP_HOME = "HADOOP_HOME";
public static final String KERBEROS_KEYTAB_VOLUME = "kerberos-keytab-volume";
public static final String KERBEROS_KEYTAB_SECRET_PREFIX = "kerberos-keytab-";
public static final String KERBEROS_KEYTAB_MOUNT_POINT = "/opt/kerberos/kerberos-keytab";
public static final String KERBEROS_KRB5CONF_VOLUME = "kerberos-krb5conf-volume";
public static final String KERBEROS_KRB5CONF_CONFIG_MAP_PREFIX = "kerberos-krb5conf-";
public static final String KERBEROS_KRB5CONF_MOUNT_DIR = "/etc";
public static final String KERBEROS_KRB5CONF_FILE = "krb5.conf";
public static final String FLINK_REST_SERVICE_SUFFIX = "-rest";
public static final String NAME_SEPARATOR = "-";
// Constants for label builder
public static final String LABEL_TYPE_KEY = "type";
public static final String LABEL_TYPE_NATIVE_TYPE = "flink-native-kubernetes";
public static final String LABEL_APP_KEY = "app";
public static final String LABEL_COMPONENT_KEY = "component";
public static final String LABEL_COMPONENT_JOB_MANAGER = "jobmanager";
public static final String LABEL_COMPONENT_TASK_MANAGER = "taskmanager";
// Use fixed port in kubernetes, it needs to be exposed.
public static final int REST_PORT = 8081;
public static final int BLOB_SERVER_PORT = 6124;
public static final int TASK_MANAGER_RPC_PORT = 6122;
public static final String JOB_MANAGER_RPC_PORT_NAME = "jobmanager-rpc";
public static final String BLOB_SERVER_PORT_NAME = "blobserver";
public static final String REST_PORT_NAME = "rest";
public static final String TASK_MANAGER_RPC_PORT_NAME = "taskmanager-rpc";
public static final String RESOURCE_NAME_MEMORY = "memory";
public static final String RESOURCE_NAME_CPU = "cpu";
public static final String RESOURCE_UNIT_MB = "Mi";
public static final String ENV_FLINK_POD_IP_ADDRESS = "_POD_IP_ADDRESS";
public static final String POD_IP_FIELD_PATH = "status.podIP";
public static final String ENV_FLINK_POD_NODE_ID = "_POD_NODE_ID";
public static final String POD_NODE_ID_FIELD_PATH = "spec.nodeName";
public static final int MAXIMUM_CHARACTERS_OF_CLUSTER_ID = 45;
public static final String RESTART_POLICY_OF_NEVER = "Never";
// Constants for Kubernetes high availability
public static final String LEADER_ADDRESS_KEY = "address";
public static final String LEADER_SESSION_ID_KEY = "sessionId";
public static final String EXECUTION_PLAN_STORE_KEY_PREFIX = "executionPlan-";
public static final String SUBMITTED_EXECUTION_PLAN_FILE_PREFIX = "submittedExecutionPlan";
public static final String CHECKPOINT_COUNTER_KEY = "counter";
public static final String CHECKPOINT_ID_KEY_PREFIX = "checkpointID-";
public static final String COMPLETED_CHECKPOINT_FILE_SUFFIX = "completedCheckpoint";
// The file name of the mounted task manager pod template in the JobManager pod if there was any
// specified.
public static final String TASK_MANAGER_POD_TEMPLATE_FILE_NAME =
"taskmanager-pod-template.yaml";
public static final String POD_TEMPLATE_DIR_IN_POD = "/opt/flink/pod-template";
public static final String POD_TEMPLATE_CONFIG_MAP_PREFIX = "pod-template-";
public static final String POD_TEMPLATE_VOLUME = "pod-template-volume";
// Kubernetes start scripts
public static final String KUBERNETES_JOB_MANAGER_SCRIPT_PATH = "kubernetes-jobmanager.sh";
public static final String KUBERNETES_TASK_MANAGER_SCRIPT_PATH = "kubernetes-taskmanager.sh";
public static final String ENV_TM_JVM_MEM_OPTS = "FLINK_TM_JVM_MEM_OPTS";
// "resourceVersion="0" is any resource version.It saves time to access etcd and improves
// performance.
// https://kubernetes.io/docs/reference/using-api/api-concepts/#the-resourceversion-parameter
public static final String KUBERNETES_ZERO_RESOURCE_VERSION = "0";
public static final String USER_ARTIFACTS_VOLUME = "user-artifacts-volume";
}
| Constants |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/PlacementConstraint.java | {
"start": 7729,
"end": 8823
} | class ____ {\n")
.append(" name: ").append(toIndentedString(name)).append("\n")
.append(" type: ").append(toIndentedString(type)).append("\n")
.append(" scope: ").append(toIndentedString(scope)).append("\n")
.append(" targetTags: ").append(toIndentedString(targetTags))
.append("\n")
.append(" nodeAttributes: ").append(toIndentedString(nodeAttributes))
.append("\n")
.append(" nodePartitions: ").append(toIndentedString(nodePartitions))
.append("\n")
.append(" minCardinality: ").append(toIndentedString(minCardinality))
.append("\n")
.append(" maxCardinality: ").append(toIndentedString(maxCardinality))
.append("\n")
.append("}");
return sb.toString();
}
/**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(java.lang.Object o) {
if (o == null) {
return "null";
}
return o.toString().replace("\n", "\n ");
}
}
| PlacementConstraint |
java | spring-projects__spring-boot | documentation/spring-boot-docs/src/main/java/org/springframework/boot/docs/features/developingautoconfiguration/testing/MyServiceAutoConfigurationTests.java | {
"start": 1130,
"end": 2314
} | class ____ {
// tag::runner[]
private final ApplicationContextRunner contextRunner = new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(MyServiceAutoConfiguration.class));
// end::runner[]
// tag::test-env[]
@Test
void serviceNameCanBeConfigured() {
this.contextRunner.withPropertyValues("user.name=test123").run((context) -> {
assertThat(context).hasSingleBean(MyService.class);
assertThat(context.getBean(MyService.class).getName()).isEqualTo("test123");
});
}
// end::test-env[]
// tag::test-classloader[]
@Test
void serviceIsIgnoredIfLibraryIsNotPresent() {
this.contextRunner.withClassLoader(new FilteredClassLoader(MyService.class))
.run((context) -> assertThat(context).doesNotHaveBean("myService"));
}
// end::test-classloader[]
// tag::test-user-config[]
@Test
void defaultServiceBacksOff() {
this.contextRunner.withUserConfiguration(UserConfiguration.class).run((context) -> {
assertThat(context).hasSingleBean(MyService.class);
assertThat(context).getBean("myCustomService").isSameAs(context.getBean(MyService.class));
});
}
@Configuration(proxyBeanMethods = false)
static | MyServiceAutoConfigurationTests |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/deser/AbstractSerializeTest.java | {
"start": 1866,
"end": 2059
} | class ____ {
private A a;
public A getA() {
return a;
}
public void setA(A a) {
this.a = a;
}
}
public static abstract | G |
java | apache__flink | flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStSerializerUtils.java | {
"start": 1062,
"end": 2593
} | class ____ {
/**
* Serialize a key and namespace. No user key.
*
* @param contextKey the context key of current request
* @param builder key builder
* @param defaultNamespace default namespace of the state
* @param namespaceSerializer the namespace serializer
* @param enableKeyReuse whether to enable key reuse
*/
public static <K, N> byte[] serializeKeyAndNamespace(
ContextKey<K, N> contextKey,
SerializedCompositeKeyBuilder<K> builder,
N defaultNamespace,
TypeSerializer<N> namespaceSerializer,
boolean enableKeyReuse)
throws IOException {
N namespace = contextKey.getNamespace();
namespace = (namespace == null ? defaultNamespace : namespace);
if (enableKeyReuse && namespace == defaultNamespace) {
// key reuse.
return contextKey.getOrCreateSerializedKey(
ctxKey -> {
builder.setKeyAndKeyGroup(ctxKey.getRawKey(), ctxKey.getKeyGroup());
return builder.buildCompositeKeyNamespace(
defaultNamespace, namespaceSerializer);
});
} else {
// no key reuse, serialize again.
builder.setKeyAndKeyGroup(contextKey.getRawKey(), contextKey.getKeyGroup());
return builder.buildCompositeKeyNamespace(namespace, namespaceSerializer);
}
}
private ForStSerializerUtils() {}
}
| ForStSerializerUtils |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/TaskConfig.java | {
"start": 44652,
"end": 47212
} | class ____ found.");
}
if (aggObj == null) {
throw new RuntimeException("Missing config entry for aggregator.");
}
String name = this.config.getString(ITERATION_AGGREGATOR_NAME_PREFIX + i, null);
if (name == null) {
throw new RuntimeException("Missing config entry for aggregator.");
}
list.add(new AggregatorWithName<>(name, aggObj));
}
return list;
}
public void setConvergenceCriterion(
String aggregatorName, ConvergenceCriterion<?> convCriterion) {
try {
InstantiationUtil.writeObjectToConfig(
convCriterion, this.config, ITERATION_CONVERGENCE_CRITERION);
} catch (IOException e) {
throw new RuntimeException(
"Error while writing the convergence criterion object to the task configuration.");
}
this.config.setString(ITERATION_CONVERGENCE_CRITERION_AGG_NAME, aggregatorName);
}
/**
* Sets the default convergence criterion of a {@link DeltaIteration}
*
* @param aggregatorName
* @param convCriterion
*/
public void setImplicitConvergenceCriterion(
String aggregatorName, ConvergenceCriterion<?> convCriterion) {
try {
InstantiationUtil.writeObjectToConfig(
convCriterion, this.config, ITERATION_IMPLICIT_CONVERGENCE_CRITERION);
} catch (IOException e) {
throw new RuntimeException(
"Error while writing the implicit convergence criterion object to the task configuration.");
}
this.config.setString(ITERATION_IMPLICIT_CONVERGENCE_CRITERION_AGG_NAME, aggregatorName);
}
@SuppressWarnings("unchecked")
public <T extends Value> ConvergenceCriterion<T> getConvergenceCriterion(ClassLoader cl) {
ConvergenceCriterion<T> convCriterionObj;
try {
convCriterionObj =
InstantiationUtil.readObjectFromConfig(
this.config, ITERATION_CONVERGENCE_CRITERION, cl);
} catch (IOException e) {
throw new RuntimeException(
"Error while reading the convergence criterion object from the task configuration.");
} catch (ClassNotFoundException e) {
throw new RuntimeException(
"Error while reading the convergence criterion object from the task configuration. "
+ "ConvergenceCriterion | not |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java | {
"start": 2099,
"end": 10911
} | class ____ {
{
GenericTestUtils.setLogLevel(
LoggerFactory.getLogger(BlockPlacementPolicy.class), Level.TRACE);
}
private static MiniDFSCluster cluster;
private static Configuration conf;
private final static int NUM_DATA_NODES = 10;
private final static int NUM_FILES = 10;
private final static byte[] SOME_BYTES = "foo".getBytes();
private static DistributedFileSystem dfs;
private static ArrayList<DataNode> datanodes;
@BeforeAll
public static void setUpBeforeClass() throws Exception {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES)
.build();
cluster.waitClusterUp();
dfs = cluster.getFileSystem();
datanodes = cluster.getDataNodes();
}
@AfterAll
public static void tearDownAfterClass() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
@Test
@Timeout(value = 180)
public void testFavoredNodesEndToEnd() throws Exception {
//create 10 files with random preferred nodes
for (int i = 0; i < NUM_FILES; i++) {
Random rand = new Random(System.currentTimeMillis() + i);
//pass a new created rand so as to get a uniform distribution each time
//without too much collisions (look at the do-while loop in getDatanodes)
InetSocketAddress datanode[] = getDatanodes(rand);
Path p = new Path("/filename"+i);
FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true,
4096, (short)3, 4096L, null, datanode);
out.write(SOME_BYTES);
out.close();
BlockLocation[] locations = getBlockLocations(p);
//verify the files got created in the right nodes
for (BlockLocation loc : locations) {
String[] hosts = loc.getNames();
String[] hosts1 = getStringForInetSocketAddrs(datanode);
assertTrue(compareNodes(hosts, hosts1));
}
}
}
@Test
@Timeout(value = 180)
public void testWhenFavoredNodesNotPresent() throws Exception {
//when we ask for favored nodes but the nodes are not there, we should
//get some other nodes. In other words, the write to hdfs should not fail
//and if we do getBlockLocations on the file, we should see one blklocation
//and three hosts for that
InetSocketAddress arbitraryAddrs[] = new InetSocketAddress[3];
for (int i = 0; i < 3; i++) {
arbitraryAddrs[i] = getArbitraryLocalHostAddr();
}
Path p = new Path("/filename-foo-bar");
FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true,
4096, (short)3, 4096L, null, arbitraryAddrs);
out.write(SOME_BYTES);
out.close();
getBlockLocations(p);
}
@Test
@Timeout(value = 180)
public void testWhenSomeNodesAreNotGood() throws Exception {
// 4 favored nodes
final InetSocketAddress addrs[] = new InetSocketAddress[4];
final String[] hosts = new String[addrs.length];
for (int i = 0; i < addrs.length; i++) {
addrs[i] = datanodes.get(i).getXferAddress();
hosts[i] = addrs[i].getAddress().getHostAddress() + ":" + addrs[i].getPort();
}
//make some datanode not "good" so that even if the client prefers it,
//the namenode would not give it as a replica to write to
DatanodeInfo d = cluster.getNameNode().getNamesystem().getBlockManager()
.getDatanodeManager().getDatanodeByXferAddr(
addrs[0].getAddress().getHostAddress(), addrs[0].getPort());
//set the decommission status to true so that
//BlockPlacementPolicyDefault.isGoodTarget returns false for this dn
d.setDecommissioned();
Path p = new Path("/filename-foo-bar-baz");
final short replication = (short)3;
FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true,
4096, replication, 4096L, null, addrs);
out.write(SOME_BYTES);
out.close();
//reset the state
d.stopDecommission();
BlockLocation[] locations = getBlockLocations(p);
assertEquals(replication, locations[0].getNames().length);
//also make sure that the datanode[0] is not in the list of hosts
for (int i = 0; i < replication; i++) {
final String loc = locations[0].getNames()[i];
int j = 0;
for(; j < hosts.length && !loc.equals(hosts[j]); j++);
assertTrue(j > 0, "j=" + j);
assertTrue(j < hosts.length,
"loc=" + loc + " not in host list " + Arrays.asList(hosts) + ", j=" + j);
}
}
@Test
@Timeout(value = 180)
public void testFavoredNodesEndToEndForAppend() throws Exception {
// create 10 files with random preferred nodes
for (int i = 0; i < NUM_FILES; i++) {
Random rand = new Random(System.currentTimeMillis() + i);
// pass a new created rand so as to get a uniform distribution each time
// without too much collisions (look at the do-while loop in getDatanodes)
InetSocketAddress datanode[] = getDatanodes(rand);
Path p = new Path("/filename" + i);
// create and close the file.
dfs.create(p, FsPermission.getDefault(), true, 4096, (short) 3, 4096L,
null, null).close();
// re-open for append
FSDataOutputStream out = dfs.append(p, EnumSet.of(CreateFlag.APPEND),
4096, null, datanode);
out.write(SOME_BYTES);
out.close();
BlockLocation[] locations = getBlockLocations(p);
// verify the files got created in the right nodes
for (BlockLocation loc : locations) {
String[] hosts = loc.getNames();
String[] hosts1 = getStringForInetSocketAddrs(datanode);
assertTrue(compareNodes(hosts, hosts1));
}
}
}
@Test
@Timeout(value = 180)
public void testCreateStreamBuilderFavoredNodesEndToEnd() throws Exception {
//create 10 files with random preferred nodes
for (int i = 0; i < NUM_FILES; i++) {
Random rand = new Random(System.currentTimeMillis() + i);
//pass a new created rand so as to get a uniform distribution each time
//without too much collisions (look at the do-while loop in getDatanodes)
InetSocketAddress[] dns = getDatanodes(rand);
Path p = new Path("/filename"+i);
FSDataOutputStream out =
dfs.createFile(p).favoredNodes(dns).build();
out.write(SOME_BYTES);
out.close();
BlockLocation[] locations = getBlockLocations(p);
//verify the files got created in the right nodes
for (BlockLocation loc : locations) {
String[] hosts = loc.getNames();
String[] hosts1 = getStringForInetSocketAddrs(dns);
assertTrue(compareNodes(hosts, hosts1));
}
}
}
private BlockLocation[] getBlockLocations(Path p) throws Exception {
DFSTestUtil.waitReplication(dfs, p, (short)3);
BlockLocation[] locations = dfs.getClient().getBlockLocations(
p.toUri().getPath(), 0, Long.MAX_VALUE);
assertTrue(locations.length == 1 && locations[0].getHosts().length == 3);
return locations;
}
private String[] getStringForInetSocketAddrs(InetSocketAddress[] datanode) {
String strs[] = new String[datanode.length];
for (int i = 0; i < datanode.length; i++) {
strs[i] = datanode[i].getAddress().getHostAddress() + ":" +
datanode[i].getPort();
}
return strs;
}
private boolean compareNodes(String[] dnList1, String[] dnList2) {
for (int i = 0; i < dnList1.length; i++) {
boolean matched = false;
for (int j = 0; j < dnList2.length; j++) {
if (dnList1[i].equals(dnList2[j])) {
matched = true;
break;
}
}
if (matched == false) {
fail(dnList1[i] + " not a favored node");
}
}
return true;
}
private InetSocketAddress[] getDatanodes(Random rand) {
//Get some unique random indexes
int idx1 = rand.nextInt(NUM_DATA_NODES);
int idx2;
do {
idx2 = rand.nextInt(NUM_DATA_NODES);
} while (idx1 == idx2);
int idx3;
do {
idx3 = rand.nextInt(NUM_DATA_NODES);
} while (idx2 == idx3 || idx1 == idx3);
InetSocketAddress[] addrs = new InetSocketAddress[3];
addrs[0] = datanodes.get(idx1).getXferAddress();
addrs[1] = datanodes.get(idx2).getXferAddress();
addrs[2] = datanodes.get(idx3).getXferAddress();
return addrs;
}
private InetSocketAddress getArbitraryLocalHostAddr()
throws UnknownHostException{
Random rand = new Random(System.currentTimeMillis());
int port = rand.nextInt(65535);
while (true) {
boolean conflict = false;
for (DataNode d : datanodes) {
if (d.getXferAddress().getPort() == port) {
port = rand.nextInt(65535);
conflict = true;
}
}
if (conflict == false) {
break;
}
}
return new InetSocketAddress(InetAddress.getLocalHost(), port);
}
}
| TestFavoredNodesEndToEnd |
java | redisson__redisson | redisson/src/main/java/org/redisson/cache/LocalCachedMapDisabledKey.java | {
"start": 718,
"end": 1197
} | class ____ implements Serializable {
private String requestId;
private long timeout;
public LocalCachedMapDisabledKey() {
}
public LocalCachedMapDisabledKey(String requestId, long timeout) {
super();
this.requestId = requestId;
this.timeout = timeout;
}
public String getRequestId() {
return requestId;
}
public long getTimeout() {
return timeout;
}
}
| LocalCachedMapDisabledKey |
java | netty__netty | microbench/src/main/java/io/netty/microbench/util/ResourceLeakDetectorRecordBenchmark.java | {
"start": 1020,
"end": 2736
} | class ____ extends AbstractMicrobenchmark {
private static final Object TRACKED = new Object();
private static final ResourceLeakHint HINT = new ResourceLeakHint() {
@Override
public String toHintString() {
return "BenchmarkHint";
}
};
@Param({ "8", "16" })
private int recordTimes;
private ResourceLeakDetector.Level level;
ResourceLeakDetector<Object> detector = new ResourceLeakDetector<Object>(
Object.class, 1, Integer.MAX_VALUE) {
@Override
protected void reportTracedLeak(String resourceType, String records) {
// noop
}
@Override
protected void reportUntracedLeak(String resourceType) {
// noop
}
@Override
protected void reportInstancesLeak(String resourceType) {
// noop
}
};
@Setup(Level.Trial)
public void setup() {
level = ResourceLeakDetector.getLevel();
ResourceLeakDetector.setLevel(ResourceLeakDetector.Level.PARANOID);
}
@TearDown(Level.Trial)
public void teardown() {
ResourceLeakDetector.setLevel(level);
}
@Benchmark
public boolean record() {
ResourceLeakTracker<Object> tracker = detector.track(TRACKED);
for (int i = 0 ; i < recordTimes; i++) {
tracker.record();
}
return tracker.close(TRACKED);
}
@Benchmark
public boolean recordWithHint() {
ResourceLeakTracker<Object> tracker = detector.track(TRACKED);
for (int i = 0 ; i < recordTimes; i++) {
tracker.record(HINT);
}
return tracker.close(TRACKED);
}
}
| ResourceLeakDetectorRecordBenchmark |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/mapping/Set.java | {
"start": 869,
"end": 3316
} | class ____ extends Collection {
/**
* Used by hbm.xml binding
*/
public Set(MetadataBuildingContext buildingContext, PersistentClass owner) {
super( buildingContext, owner );
}
/**
* Used by annotation binding
*/
public Set(Supplier<ManagedBean<? extends UserCollectionType>> customTypeBeanResolver, PersistentClass persistentClass, MetadataBuildingContext buildingContext) {
super( customTypeBeanResolver, persistentClass, buildingContext );
}
private Set(Collection original) {
super( original );
}
@Override
public Set copy() {
return new Set( this );
}
public void validate(MappingContext mappingContext) throws MappingException {
super.validate( mappingContext );
//for backward compatibility, disable this:
/*Iterator iter = getElement().getColumnIterator();
while ( iter.hasNext() ) {
Column col = (Column) iter.next();
if ( !col.isNullable() ) {
return;
}
}
throw new MappingException("set element mappings must have at least one non-nullable column: " + getRole() );*/
}
public boolean isSet() {
return true;
}
public CollectionType getDefaultCollectionType() {
if ( isSorted() ) {
return new SortedSetType( getRole(), getReferencedPropertyName(), getComparator() );
}
else if ( hasOrder() ) {
return new OrderedSetType( getRole(), getReferencedPropertyName() );
}
else {
return new SetType( getRole(), getReferencedPropertyName() );
}
}
void createPrimaryKey() {
if ( !isOneToMany() ) {
final var collectionTable = getCollectionTable();
var primaryKey = collectionTable.getPrimaryKey();
if ( primaryKey == null ) {
primaryKey = new PrimaryKey( getCollectionTable() );
primaryKey.addColumns( getKey() );
for ( var selectable : getElement().getSelectables() ) {
if ( selectable instanceof Column col ) {
if ( !col.isNullable() ) {
primaryKey.addColumn( col );
}
else {
return;
}
}
}
if ( primaryKey.getColumnSpan() != getKey().getColumnSpan() ) {
collectionTable.setPrimaryKey( primaryKey );
}
// else {
//for backward compatibility, allow a set with no not-null
//element columns, using all columns in the row locator SQL
//TODO: create an implicit not null constraint on all cols?
// }
}
}
// else {
//create an index on the key columns??
// }
}
public Object accept(ValueVisitor visitor) {
return visitor.accept(this);
}
}
| Set |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/model/FunctionContributions.java | {
"start": 633,
"end": 1135
} | interface ____ {
/**
* The registry into which the contributions should be made.
*/
SqmFunctionRegistry getFunctionRegistry();
/**
* Access to type information.
*/
TypeConfiguration getTypeConfiguration();
/**
* Access to {@linkplain Service services}.
*/
ServiceRegistry getServiceRegistry();
/**
* The {@linkplain Dialect SQL Dialect}.
*/
default Dialect getDialect() {
return getTypeConfiguration().getCurrentBaseSqlTypeIndicators().getDialect();
}
}
| FunctionContributions |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/serialization/SessionFactorySerializationTest.java | {
"start": 833,
"end": 3579
} | class ____ {
public static final String NAME = "mySF";
@BeforeAll
public void clearRegistry() {
SessionFactoryRegistry.INSTANCE.clearRegistrations();
}
@Test
public void testNamedSessionFactorySerialization() {
Configuration cfg = new Configuration()
.setProperty( AvailableSettings.SESSION_FACTORY_NAME, NAME )
.setProperty( AvailableSettings.SESSION_FACTORY_NAME_IS_JNDI, false ); // default is true
ServiceRegistryUtil.applySettings( cfg.getStandardServiceRegistryBuilder() );
try (SessionFactory factory = cfg.buildSessionFactory()) {
// we need to do some tricking here so that Hibernate thinks the deserialization happens in a
// different VM
String uuid = ((SessionFactoryImplementor) factory).getUuid();
// deregister under this uuid...
SessionFactoryRegistry.INSTANCE.removeSessionFactory( uuid, NAME, null, null );
// and then register under a different uuid...
SessionFactoryRegistry.INSTANCE.addSessionFactory(
"some-other-uuid",
NAME,
null,
(SessionFactoryImplementor) factory,
null
);
SessionFactory factory2 = (SessionFactory) SerializationHelper.clone( factory );
assertThat( factory2 ).isSameAs( factory );
SessionFactoryRegistry.INSTANCE.removeSessionFactory( "some-other-uuid", NAME, null, null );
}
assertThat( SessionFactoryRegistry.INSTANCE.hasRegistrations() ).isFalse();
}
@Test
public void testUnNamedSessionFactorySerialization() {
// IMPL NOTE : this test is a control to testNamedSessionFactorySerialization
// here, the test should fail based just on attempted uuid resolution
Configuration cfg = new Configuration()
.setProperty( AvailableSettings.SESSION_FACTORY_NAME_IS_JNDI, false ); // default is true
ServiceRegistryUtil.applySettings( cfg.getStandardServiceRegistryBuilder() );
try (SessionFactory factory = cfg.buildSessionFactory()) {
// we need to do some tricking here so that Hibernate thinks the deserialization happens in a
// different VM
String uuid = ((SessionFactoryImplementor) factory).getUuid();
// deregister under this uuid...
SessionFactoryRegistry.INSTANCE.removeSessionFactory( uuid, null, null, null );
// and then register under a different uuid...
SessionFactoryRegistry.INSTANCE.addSessionFactory(
"some-other-uuid",
null,
null,
(SessionFactoryImplementor) factory,
null
);
try {
SerializationHelper.clone( factory );
fail( "Expecting an error" );
}
catch (SerializationException expected) {
}
SessionFactoryRegistry.INSTANCE.removeSessionFactory( "some-other-uuid", null, null, null );
}
assertThat( SessionFactoryRegistry.INSTANCE.hasRegistrations() ).isFalse();
}
}
| SessionFactorySerializationTest |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/serializer/InetAddressTest.java | {
"start": 222,
"end": 989
} | class ____ extends TestCase {
public void test_inetAddress() throws Exception {
InetAddress address = InetAddress.getLocalHost();
String text = JSON.toJSONString(address);
Assert.assertEquals(JSON.toJSONString(address.getHostAddress()), text);
InetAddress address2 = JSON.parseObject(text, InetAddress.class);
Assert.assertEquals(address, address2);
ParserConfig.getGlobalInstance().getDeserializer(InetAddress.class);
}
public void test_null() throws Exception {
Assert.assertEquals(null, JSON.parseObject("null", InetAddress.class));
}
public void test_empty() throws Exception {
Assert.assertEquals(null, JSON.parseObject("\"\"", InetAddress.class));
}
}
| InetAddressTest |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualTests.java | {
"start": 1055,
"end": 6874
} | class ____ extends AbstractScalarFunctionTestCase {
public LessThanOrEqualTests(@Name("TestCase") Supplier<TestCaseSupplier.TestCase> testCaseSupplier) {
this.testCase = testCaseSupplier.get();
}
@ParametersFactory
public static Iterable<Object[]> parameters() {
List<TestCaseSupplier> suppliers = new ArrayList<>();
suppliers.addAll(
TestCaseSupplier.forBinaryComparisonWithWidening(
new TestCaseSupplier.NumericTypeTestConfigs<>(
new TestCaseSupplier.NumericTypeTestConfig<>(
(Integer.MIN_VALUE >> 1) - 1,
(Integer.MAX_VALUE >> 1) - 1,
(l, r) -> l.intValue() <= r.intValue(),
"LessThanOrEqualIntsEvaluator"
),
new TestCaseSupplier.NumericTypeTestConfig<>(
(Long.MIN_VALUE >> 1) - 1,
(Long.MAX_VALUE >> 1) - 1,
(l, r) -> l.longValue() <= r.longValue(),
"LessThanOrEqualLongsEvaluator"
),
new TestCaseSupplier.NumericTypeTestConfig<>(
Double.NEGATIVE_INFINITY,
Double.POSITIVE_INFINITY,
// NB: this has different behavior than Double::equals
(l, r) -> l.doubleValue() <= r.doubleValue(),
"LessThanOrEqualDoublesEvaluator"
)
),
"lhs",
"rhs",
(lhs, rhs) -> List.of(),
false
)
);
// Unsigned Long cases
// TODO: These should be integrated into the type cross product above, but are currently broken
// see https://github.com/elastic/elasticsearch/issues/102935
suppliers.addAll(
TestCaseSupplier.forBinaryNotCasting(
"LessThanOrEqualLongsEvaluator",
"lhs",
"rhs",
(l, r) -> ((BigInteger) l).compareTo((BigInteger) r) <= 0,
DataType.BOOLEAN,
TestCaseSupplier.ulongCases(BigInteger.ZERO, NumericUtils.UNSIGNED_LONG_MAX, true),
TestCaseSupplier.ulongCases(BigInteger.ZERO, NumericUtils.UNSIGNED_LONG_MAX, true),
List.of(),
false
)
);
suppliers.addAll(
TestCaseSupplier.forBinaryNotCasting(
"LessThanOrEqualKeywordsEvaluator",
"lhs",
"rhs",
(l, r) -> ((BytesRef) l).compareTo((BytesRef) r) <= 0,
DataType.BOOLEAN,
TestCaseSupplier.ipCases(),
TestCaseSupplier.ipCases(),
List.of(),
false
)
);
suppliers.addAll(
TestCaseSupplier.forBinaryNotCasting(
"LessThanOrEqualKeywordsEvaluator",
"lhs",
"rhs",
(l, r) -> ((BytesRef) l).compareTo((BytesRef) r) <= 0,
DataType.BOOLEAN,
TestCaseSupplier.versionCases(""),
TestCaseSupplier.versionCases(""),
List.of(),
false
)
);
// Datetime
suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("LessThanOrEqualLongsEvaluator", "lhs", "rhs", (lhs, rhs) -> {
if (lhs instanceof Instant l && rhs instanceof Instant r) {
return l.isBefore(r) || l.equals(r);
}
throw new UnsupportedOperationException("Got some weird types");
}, DataType.BOOLEAN, TestCaseSupplier.dateCases(), TestCaseSupplier.dateCases(), List.of(), false));
suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("LessThanOrEqualLongsEvaluator", "lhs", "rhs", (lhs, rhs) -> {
if (lhs instanceof Instant l && rhs instanceof Instant r) {
return l.isBefore(r) || l.equals(r);
}
throw new UnsupportedOperationException("Got some weird types");
}, DataType.BOOLEAN, TestCaseSupplier.dateNanosCases(), TestCaseSupplier.dateNanosCases(), List.of(), false));
suppliers.addAll(
TestCaseSupplier.forBinaryNotCasting(
"LessThanOrEqualNanosMillisEvaluator",
"lhs",
"rhs",
(l, r) -> (((Instant) l).isBefore((Instant) r) || l.equals(r)),
DataType.BOOLEAN,
TestCaseSupplier.dateNanosCases(),
TestCaseSupplier.dateCases(),
List.of(),
false
)
);
suppliers.addAll(
TestCaseSupplier.forBinaryNotCasting(
"LessThanOrEqualMillisNanosEvaluator",
"lhs",
"rhs",
(l, r) -> (((Instant) l).isBefore((Instant) r) || l.equals(r)),
DataType.BOOLEAN,
TestCaseSupplier.dateCases(),
TestCaseSupplier.dateNanosCases(),
List.of(),
false
)
);
suppliers.addAll(
TestCaseSupplier.stringCases(
(l, r) -> ((BytesRef) l).compareTo((BytesRef) r) <= 0,
(lhsType, rhsType) -> "LessThanOrEqualKeywordsEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]",
List.of(),
DataType.BOOLEAN
)
);
return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers);
}
@Override
protected Expression build(Source source, List<Expression> args) {
return new LessThanOrEqual(source, args.get(0), args.get(1), null);
}
}
| LessThanOrEqualTests |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponse.java | {
"start": 5814,
"end": 10426
} | class ____ implements Writeable, ToXContentObject {
/**
* Resolved indices to which the action applies. This duplicates information
* which exists in the action, but is included because the action indices may
* or may not be resolved depending on if the security layer is used or not.
*/
private final List<String> indices;
private final AliasActions action;
private final ElasticsearchException error;
/**
* Build result that could be either a success or failure
* @param indices the resolved indices to which the associated action applies
* @param action the alias action consisting of add/remove, aliases, and indices
* @param numAliasesRemoved the number of aliases remove, if any
* @return the action result
*/
public static AliasActionResult build(List<String> indices, AliasActions action, int numAliasesRemoved) {
if (action.actionType() == AliasActions.Type.REMOVE && numAliasesRemoved == 0) {
return buildRemoveError(indices, action);
}
return buildSuccess(indices, action);
}
/**
* Build an error result for a failed remove action.
*/
private static AliasActionResult buildRemoveError(List<String> indices, AliasActions action) {
return new AliasActionResult(indices, action, new AliasesNotFoundException((action.getOriginalAliases())));
}
/**
* Build a success action result with no errors.
*/
public static AliasActionResult buildSuccess(List<String> indices, AliasActions action) {
return new AliasActionResult(indices, action, null);
}
/**
* The error result if the action failed, null if the action succeeded.
*/
public ElasticsearchException getError() {
return error;
}
private int getStatus() {
return error == null ? 200 : error.status().getStatus();
}
private AliasActionResult(List<String> indices, AliasActions action, ElasticsearchException error) {
assert indices.isEmpty() == false : "Alias action result must be instantiated with at least one index";
this.indices = indices;
this.action = action;
this.error = error;
}
private AliasActionResult(StreamInput in) throws IOException {
this.indices = in.readStringCollectionAsList();
this.action = new AliasActions(in);
this.error = in.readException();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeStringCollection(indices);
action.writeTo(out);
out.writeException(error);
}
public static final String ACTION_FIELD = "action";
public static final String ACTION_TYPE_FIELD = "type";
public static final String ACTION_INDICES_FIELD = "indices";
public static final String ACTION_ALIASES_FIELD = "aliases";
public static final String STATUS_FIELD = "status";
public static final String ERROR_FIELD = "error";
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
// include subset of fields from action request
builder.field(ACTION_FIELD);
builder.startObject();
builder.field(ACTION_TYPE_FIELD, action.actionType().getFieldName());
builder.field(ACTION_INDICES_FIELD, indices.stream().sorted().collect(Collectors.toList()));
builder.array(ACTION_ALIASES_FIELD, action.getOriginalAliases());
builder.endObject();
builder.field(STATUS_FIELD, getStatus());
if (error != null) {
builder.startObject(ERROR_FIELD);
error.toXContent(builder, params);
builder.endObject();
}
builder.endObject();
return builder;
}
@Override
// Only used equals in tests
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
AliasActionResult that = (AliasActionResult) o;
return Objects.equals(indices, that.indices) && Objects.equals(action, that.action)
// ElasticsearchException does not have hashCode() so assume errors are equal iff | AliasActionResult |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/encoded/ClientWithPathParamAndEncodedTest.java | {
"start": 3057,
"end": 3464
} | interface ____ {
@Encoded
@GET
@Path("/{path}")
String call(@PathParam("path") String path);
@Encoded
@Path("/a")
SubClientWithoutEncoded sub1();
@Path("/a")
SubClientWithEncodedInMethod sub2();
@Path("/a")
SubClientWithEncodedInClass sub3();
}
@Encoded
@Path("/server")
public | ClientWithEncodedInMethod |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/io/ExternalResourcesTest.java | {
"start": 1274,
"end": 3166
} | class ____ {
private File sourceFile;
private File destFile;
private File badFile;
private File tempFile;
/**
* @throws java.lang.Exception
*/
@BeforeEach
void setUp() throws Exception {
tempFile = Files.createTempFile("migration", "properties").toFile();
tempFile.canWrite();
sourceFile = Files.createTempFile("test1", "sql").toFile();
destFile = Files.createTempFile("test2", "sql").toFile();
}
@Test
void testcopyExternalResource() {
assertDoesNotThrow(() -> {
ExternalResources.copyExternalResource(sourceFile, destFile);
});
}
@Test
void testcopyExternalResource_fileNotFound() {
try {
badFile = Path.of("/tmp/nofile.sql").toFile();
ExternalResources.copyExternalResource(badFile, destFile);
} catch (Exception e) {
assertTrue(e instanceof NoSuchFileException);
}
}
@Test
void testcopyExternalResource_emptyStringAsFile() {
try {
badFile = Path.of(" ").toFile();
ExternalResources.copyExternalResource(badFile, destFile);
} catch (Exception e) {
assertTrue(e instanceof InvalidPathException || e instanceof NoSuchFileException);
}
}
@Test
void getConfiguredTemplate() {
String templateName = "";
try (BufferedWriter fileWriter = Files.newBufferedWriter(tempFile.toPath(), StandardCharsets.UTF_8)) {
fileWriter.append("new_command.template=templates/col_new_template_migration.sql");
fileWriter.flush();
templateName = ExternalResources.getConfiguredTemplate(tempFile.getAbsolutePath(), "new_command.template");
assertEquals("templates/col_new_template_migration.sql", templateName);
} catch (Exception e) {
fail("Test failed with exception: " + e.getMessage());
}
}
@AfterEach
void cleanUp() {
sourceFile.delete();
destFile.delete();
tempFile.delete();
}
}
| ExternalResourcesTest |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/onexception/OnExceptionLoadBalancerDoubleIssueTest.java | {
"start": 985,
"end": 2366
} | class ____ extends ContextTestSupport {
@Test
public void testNotDouble() throws Exception {
// there should only be 3 processors on the load balancer
getMockEndpoint("mock:error").expectedBodiesReceived("A", "D", "G");
getMockEndpoint("mock:error2").expectedBodiesReceived("B", "E");
getMockEndpoint("mock:error3").expectedBodiesReceived("C", "F");
template.sendBody("direct:foo", "A");
template.sendBody("direct:foo", "B");
template.sendBody("direct:bar", "C");
template.sendBody("direct:bar", "D");
template.sendBody("direct:foo", "E");
template.sendBody("direct:bar", "F");
template.sendBody("direct:foo", "G");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
onException(Exception.class).handled(true).to("direct:error");
from("direct:error").loadBalance().roundRobin().id("round").to("mock:error", "mock:error2", "mock:error3");
from("direct:foo").throwException(new IllegalArgumentException("Forced"));
from("direct:bar").throwException(new IllegalArgumentException("Also Forced"));
}
};
}
}
| OnExceptionLoadBalancerDoubleIssueTest |
java | quarkusio__quarkus | extensions/schema-registry/confluent/common/deployment/src/main/java/io/quarkus/confluent/registry/common/ConfluentRegistryClientProcessor.java | {
"start": 672,
"end": 4092
} | class ____ {
@BuildStep
public void confluentRegistryClient(
CurateOutcomeBuildItem curateOutcomeBuildItem,
BuildProducer<ReflectiveClassBuildItem> reflectiveClass,
BuildProducer<ServiceProviderBuildItem> serviceProviders,
BuildProducer<ExtensionSslNativeSupportBuildItem> sslNativeSupport) {
if (curateOutcomeBuildItem.getApplicationModel().getDependencies().stream().anyMatch(
x -> x.getGroupId().equals("io.confluent")
&& x.getArtifactId().equals("kafka-schema-serializer"))) {
String nullContextNameStrategy = "io.confluent.kafka.serializers.context.NullContextNameStrategy";
if (QuarkusClassLoader.isClassPresentAtRuntime(nullContextNameStrategy)) {
// Class not present before v7.0.0
reflectiveClass.produce(ReflectiveClassBuildItem.builder(nullContextNameStrategy)
.build());
}
reflectiveClass
.produce(ReflectiveClassBuildItem.builder("io.confluent.kafka.serializers.subject.TopicNameStrategy",
"io.confluent.kafka.serializers.subject.TopicRecordNameStrategy",
"io.confluent.kafka.serializers.subject.RecordNameStrategy").methods()
.build());
}
if (curateOutcomeBuildItem.getApplicationModel().getDependencies().stream().anyMatch(
x -> x.getGroupId().equals("io.confluent")
&& x.getArtifactId().equals("kafka-schema-registry-client"))) {
reflectiveClass
.produce(ReflectiveClassBuildItem
.builder("io.confluent.kafka.schemaregistry.client.rest.entities.ErrorMessage",
"io.confluent.kafka.schemaregistry.client.rest.entities.Schema",
"io.confluent.kafka.schemaregistry.client.rest.entities.Config",
"io.confluent.kafka.schemaregistry.client.rest.entities.SchemaReference",
"io.confluent.kafka.schemaregistry.client.rest.entities.SchemaString",
"io.confluent.kafka.schemaregistry.client.rest.entities.SchemaTypeConverter",
"io.confluent.kafka.schemaregistry.client.rest.entities.ServerClusterId",
"io.confluent.kafka.schemaregistry.client.rest.entities.SubjectVersion")
.methods().build());
reflectiveClass
.produce(ReflectiveClassBuildItem.builder(
"io.confluent.kafka.schemaregistry.client.rest.entities.requests.CompatibilityCheckResponse",
"io.confluent.kafka.schemaregistry.client.rest.entities.requests.ConfigUpdateRequest",
"io.confluent.kafka.schemaregistry.client.rest.entities.requests.ModeUpdateRequest",
"io.confluent.kafka.schemaregistry.client.rest.entities.requests.RegisterSchemaRequest",
"io.confluent.kafka.schemaregistry.client.rest.entities.requests.RegisterSchemaResponse")
.methods().build());
// Make this a weak registration since the | ConfluentRegistryClientProcessor |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/internal/strings/Strings_assertHasSizeGreaterThan_Test.java | {
"start": 1300,
"end": 2644
} | class ____ extends StringsBaseTest {
@Test
void should_fail_if_actual_is_null() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> strings.assertHasSizeGreaterThan(someInfo(), null, 3))
.withMessage(actualIsNull());
}
@Test
void should_fail_if_size_of_actual_is_equal_to_expected_size() {
AssertionInfo info = someInfo();
String actual = "Han";
String errorMessage = shouldHaveSizeGreaterThan(actual, actual.length(), 3).create();
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> strings.assertHasSizeGreaterThan(info, actual, 3))
.withMessage(errorMessage);
}
@Test
void should_fail_if_size_of_actual_is_less_than_expected_size() {
AssertionInfo info = someInfo();
String actual = "Han";
String errorMessage = shouldHaveSizeGreaterThan(actual, actual.length(), 4).create();
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> strings.assertHasSizeGreaterThan(info, actual, 4))
.withMessage(errorMessage);
}
@Test
void should_pass_if_size_of_actual_is_greater_than_expected_size() {
strings.assertHasSizeGreaterThan(someInfo(), "Han", 2);
}
}
| Strings_assertHasSizeGreaterThan_Test |
java | spring-projects__spring-boot | module/spring-boot-devtools/src/test/java/org/springframework/boot/devtools/restart/RestarterTests.java | {
"start": 7917,
"end": 8718
} | class ____ {
private int count;
private static final AtomicBoolean restart = new AtomicBoolean();
@Scheduled(fixedDelay = 200)
void tickBean() {
System.out.println("Tick " + this.count++ + " " + Thread.currentThread());
}
@Scheduled(initialDelay = 500, fixedDelay = 500)
void restart() {
if (SampleApplication.restart.compareAndSet(false, true)) {
Restarter.getInstance().restart();
}
}
static void main(String... args) {
Restarter.initialize(args, false, new MockRestartInitializer(), true);
AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(
SampleApplication.class);
context.addApplicationListener(new CloseCountingApplicationListener());
Restarter.getInstance().prepare(context);
}
}
static | SampleApplication |
java | quarkusio__quarkus | extensions/qute/runtime/src/main/java/io/quarkus/qute/runtime/jsonobject/JsonObjectValueResolver.java | {
"start": 446,
"end": 2095
} | class ____ implements ValueResolver {
@Override
public boolean appliesTo(EvalContext context) {
return ValueResolvers.matchClass(context, JsonObject.class);
}
@Override
public CompletionStage<Object> resolve(EvalContext context) {
JsonObject jsonObject = (JsonObject) context.getBase();
switch (context.getName()) {
case "fieldNames":
case "fields":
return CompletableFuture.completedFuture(jsonObject.fieldNames());
case "size":
return CompletableFuture.completedFuture(jsonObject.size());
case "empty":
case "isEmpty":
return CompletableFuture.completedFuture(jsonObject.isEmpty());
case "get":
if (context.getParams().size() == 1) {
return context.evaluate(context.getParams().get(0)).thenCompose(k -> {
return CompletableFuture.completedFuture(jsonObject.getValue((String) k));
});
}
case "containsKey":
if (context.getParams().size() == 1) {
return context.evaluate(context.getParams().get(0)).thenCompose(k -> {
return CompletableFuture.completedFuture(jsonObject.containsKey((String) k));
});
}
default:
return jsonObject.containsKey(context.getName())
? CompletableFuture.completedFuture(jsonObject.getValue(context.getName()))
: Results.notFound(context);
}
}
}
| JsonObjectValueResolver |
java | alibaba__nacos | config/src/main/java/com/alibaba/nacos/config/server/controller/ConfigServletInner.java | {
"start": 3007,
"end": 15955
} | class ____ {
private static final int TRY_GET_LOCK_TIMES = 9;
private static final int START_LONG_POLLING_VERSION_NUM = 204;
private static final Logger LOGGER = LoggerFactory.getLogger(ConfigServletInner.class);
private final LongPollingService longPollingService;
private final ConfigQueryChainService configQueryChainService;
public ConfigServletInner(LongPollingService longPollingService, ConfigQueryChainService configQueryChainService) {
this.longPollingService = longPollingService;
this.configQueryChainService = configQueryChainService;
}
private static String getDecryptContent(ConfigQueryChainResponse chainResponse, String dataId) {
Pair<String, String> pair = EncryptionHandler.decryptHandler(dataId, chainResponse.getEncryptedDataKey(),
chainResponse.getContent());
return pair.getSecond();
}
/**
* long polling the config.
*/
public String doPollingConfig(HttpServletRequest request, HttpServletResponse response,
Map<String, ConfigListenState> clientMd5Map, int probeRequestSize) throws IOException {
// Long polling.
if (LongPollingService.isSupportLongPolling(request)) {
longPollingService.addLongPollingClient(request, response, clientMd5Map, probeRequestSize);
return HttpServletResponse.SC_OK + "";
}
// Compatible with short polling logic.
Map<String, ConfigListenState> changedGroups = MD5Util.compareMd5(request, response, clientMd5Map);
// Compatible with short polling result.
String oldResult = MD5Util.compareMd5OldResult(changedGroups);
String newResult = MD5Util.compareMd5ResultString(changedGroups);
String version = request.getHeader(Constants.CLIENT_VERSION_HEADER);
if (version == null) {
version = "2.0.0";
}
int versionNum = Protocol.getVersionNumber(version);
// Before 2.0.4 version, return value is put into header.
if (versionNum < START_LONG_POLLING_VERSION_NUM) {
response.addHeader(Constants.PROBE_MODIFY_RESPONSE, oldResult);
response.addHeader(Constants.PROBE_MODIFY_RESPONSE_NEW, newResult);
} else {
request.setAttribute("content", newResult);
}
// Disable cache.
response.setHeader("Pragma", "no-cache");
response.setDateHeader("Expires", 0);
response.setHeader("Cache-Control", "no-cache,no-store");
response.setStatus(HttpServletResponse.SC_OK);
return HttpServletResponse.SC_OK + "";
}
/**
* Execute to get config [API V1] or [API V2].
*/
public String doGetConfig(HttpServletRequest request, HttpServletResponse response, String dataId, String group,
String tenant, String tag, String isNotify, String clientIp, ApiVersionEnum apiVersion) throws IOException {
boolean notify = StringUtils.isNotBlank(isNotify) && Boolean.parseBoolean(isNotify);
String requestIpApp = RequestUtil.getAppName(request);
ConfigQueryChainRequest chainRequest = ConfigChainRequestExtractorService.getExtractor().extract(request);
chainRequest.setTenant(NamespaceUtil.processNamespaceParameter(chainRequest.getTenant()));
ConfigQueryChainResponse chainResponse = configQueryChainService.handle(chainRequest);
if (ResponseCode.FAIL.getCode() == chainResponse.getResultCode()) {
throw new NacosConfigException(chainResponse.getMessage());
}
logPullEvent(dataId, group, tenant, requestIpApp, chainResponse, clientIp, notify, tag);
switch (chainResponse.getStatus()) {
case CONFIG_NOT_FOUND:
case SPECIAL_TAG_CONFIG_NOT_FOUND:
return handlerConfigNotFound(response, apiVersion);
case CONFIG_QUERY_CONFLICT:
return handlerConfigConflict(response, apiVersion);
default:
return handleResponse(response, chainResponse, dataId, group, apiVersion);
}
}
private String handlerConfigNotFound(HttpServletResponse response, ApiVersionEnum apiVersion) throws IOException {
response.setStatus(HttpServletResponse.SC_NOT_FOUND);
if (apiVersion == ApiVersionEnum.V1) {
return writeResponseForV1(response, Result.failure(ErrorCode.RESOURCE_NOT_FOUND, "config data not exist"));
} else {
return writeResponseForV2(response, Result.failure(ErrorCode.RESOURCE_NOT_FOUND, "config data not exist"));
}
}
private String handlerConfigConflict(HttpServletResponse response, ApiVersionEnum apiVersion) throws IOException {
response.setStatus(HttpServletResponse.SC_CONFLICT);
if (apiVersion == ApiVersionEnum.V1) {
return writeResponseForV1(response,
Result.failure(ErrorCode.RESOURCE_CONFLICT, "requested file is being modified, please try later."));
} else {
return writeResponseForV2(response,
Result.failure(ErrorCode.RESOURCE_CONFLICT, "requested file is being modified, please try later."));
}
}
private String handleResponse(HttpServletResponse response, ConfigQueryChainResponse chainResponse, String dataId,
String group, ApiVersionEnum apiVersion) throws IOException {
if (apiVersion == ApiVersionEnum.V1) {
return handleResponseForV1(response, chainResponse, dataId, group);
} else {
return handleResponseForV2(response, chainResponse, dataId, group);
}
}
private String handleResponseForV1(HttpServletResponse response, ConfigQueryChainResponse chainResponse,
String dataId, String tag) throws IOException {
if (chainResponse.getContent() == null) {
return handlerConfigNotFound(response, ApiVersionEnum.V1);
}
setCommonResponseHead(response, chainResponse, tag);
setResponseHeadForV1(response, chainResponse);
writeContentForV1(response, chainResponse, dataId);
return HttpServletResponse.SC_OK + "";
}
private String handleResponseForV2(HttpServletResponse response, ConfigQueryChainResponse chainResponse,
String dataId, String tag) throws IOException {
if (chainResponse.getContent() == null) {
return handlerConfigNotFound(response, ApiVersionEnum.V2);
}
setCommonResponseHead(response, chainResponse, tag);
setResponseHeadForV2(response);
writeContentForV2(response, chainResponse, dataId);
return HttpServletResponse.SC_OK + "";
}
private void setResponseHeadForV1(HttpServletResponse response, ConfigQueryChainResponse chainResponse) {
String contentType = chainResponse.getContentType();
if (StringUtils.isBlank(contentType)) {
contentType = FileTypeEnum.TEXT.getContentType();
}
response.setHeader(HttpHeaderConsts.CONTENT_TYPE, contentType);
}
private void setResponseHeadForV2(HttpServletResponse response) {
response.setHeader(HttpHeaderConsts.CONTENT_TYPE, MediaType.APPLICATION_JSON);
}
private void writeContentForV1(HttpServletResponse response, ConfigQueryChainResponse chainResponse, String dataId)
throws IOException {
PrintWriter out = response.getWriter();
try {
String decryptContent = getDecryptContent(chainResponse, dataId);
out.print(decryptContent);
} finally {
out.flush();
out.close();
}
}
private void writeContentForV2(HttpServletResponse response, ConfigQueryChainResponse chainResponse, String dataId)
throws IOException {
PrintWriter out = response.getWriter();
try {
String decryptContent = getDecryptContent(chainResponse, dataId);
out.print(JacksonUtils.toJson(Result.success(decryptContent)));
} finally {
out.flush();
out.close();
}
}
private String writeResponseForV1(HttpServletResponse response, Result<String> result) throws IOException {
PrintWriter writer = response.getWriter();
writer.println(result.getData());
return response.getStatus() + "";
}
private String writeResponseForV2(HttpServletResponse response, Result<String> result) throws IOException {
PrintWriter writer = response.getWriter();
writer.println(JacksonUtils.toJson(result));
return response.getStatus() + "";
}
private String resolvePullEvent(ConfigQueryChainResponse chainResponse, String tag) {
switch (chainResponse.getStatus()) {
case CONFIG_FOUND_GRAY:
ConfigCacheGray matchedGray = chainResponse.getMatchedGray();
if (matchedGray != null) {
return ConfigTraceService.PULL_EVENT + "-" + matchedGray.getGrayName();
} else {
return ConfigTraceService.PULL_EVENT;
}
case SPECIAL_TAG_CONFIG_NOT_FOUND:
return ConfigTraceService.PULL_EVENT + "-" + TagGrayRule.TYPE_TAG + "-" + tag;
default:
return ConfigTraceService.PULL_EVENT;
}
}
private void logPullEvent(String dataId, String group, String tenant, String requestIpApp,
ConfigQueryChainResponse chainResponse, String clientIp, boolean notify, String tag) {
String pullEvent = resolvePullEvent(chainResponse, tag);
ConfigQueryChainResponse.ConfigQueryStatus status = chainResponse.getStatus();
if (status == ConfigQueryChainResponse.ConfigQueryStatus.CONFIG_QUERY_CONFLICT) {
ConfigTraceService.logPullEvent(dataId, group, tenant, requestIpApp, -1, pullEvent,
ConfigTraceService.PULL_TYPE_CONFLICT, -1, clientIp, notify, "http");
} else if (status == ConfigQueryChainResponse.ConfigQueryStatus.CONFIG_NOT_FOUND
|| chainResponse.getContent() == null) {
ConfigTraceService.logPullEvent(dataId, group, tenant, requestIpApp, -1, pullEvent,
ConfigTraceService.PULL_TYPE_NOTFOUND, -1, clientIp, notify, "http");
} else {
long delayed = System.currentTimeMillis() - chainResponse.getLastModified();
ConfigTraceService.logPullEvent(dataId, group, tenant, requestIpApp, chainResponse.getLastModified(),
pullEvent, ConfigTraceService.PULL_TYPE_OK, delayed, clientIp, notify, "http");
}
}
private void setCommonResponseHead(HttpServletResponse response, ConfigQueryChainResponse chainResponse,
String tag) {
String configType = chainResponse.getConfigType() != null ? chainResponse.getConfigType()
: FileTypeEnum.TEXT.getFileType();
response.setHeader(CONFIG_TYPE, configType);
response.setHeader(CONTENT_MD5, chainResponse.getMd5());
response.setHeader("Pragma", "no-cache");
response.setDateHeader("Expires", 0);
response.setHeader("Cache-Control", "no-cache,no-store");
response.setDateHeader("Last-Modified", chainResponse.getLastModified());
if (chainResponse.getEncryptedDataKey() != null) {
response.setHeader("Encrypted-Data-Key", chainResponse.getEncryptedDataKey());
}
// Check if there is a matched gray rule
if (ConfigQueryChainResponse.ConfigQueryStatus.CONFIG_FOUND_GRAY == chainResponse.getStatus()) {
if (BetaGrayRule.TYPE_BETA.equals(chainResponse.getMatchedGray().getGrayRule().getType())) {
response.setHeader("isBeta", "true");
} else if (TagGrayRule.TYPE_TAG.equals(chainResponse.getMatchedGray().getGrayRule().getType())) {
try {
response.setHeader(TagGrayRule.TYPE_TAG,
URLEncoder.encode(chainResponse.getMatchedGray().getGrayRule().getRawGrayRuleExp(),
StandardCharsets.UTF_8.displayName()));
} catch (Exception e) {
LOGGER.error("Error encoding tag", e);
}
}
}
// Check if there is a special tag
if (ConfigQueryChainResponse.ConfigQueryStatus.SPECIAL_TAG_CONFIG_NOT_FOUND == chainResponse.getStatus()) {
try {
response.setHeader(VIPSERVER_TAG, URLEncoder.encode(tag, StandardCharsets.UTF_8.displayName()));
} catch (Exception e) {
LOGGER.error("Error encoding tag", e);
}
}
}
}
| ConfigServletInner |
java | apache__flink | flink-clients/src/test/java/org/apache/flink/client/cli/util/DummyCustomCommandLine.java | {
"start": 1188,
"end": 1912
} | class ____ implements CustomCommandLine {
@Override
public boolean isActive(CommandLine commandLine) {
return true;
}
@Override
public String getId() {
return DummyClusterClientFactory.ID;
}
@Override
public void addRunOptions(Options baseOptions) {
// nothing to add
}
@Override
public void addGeneralOptions(Options baseOptions) {
// nothing to add
}
@Override
public Configuration toConfiguration(CommandLine commandLine) {
final Configuration configuration = new Configuration();
configuration.set(DeploymentOptions.TARGET, DummyClusterClientFactory.ID);
return configuration;
}
}
| DummyCustomCommandLine |
java | apache__camel | components/camel-univocity-parsers/src/test/java/org/apache/camel/dataformat/univocity/UniVocityFixedDataFormatUnmarshalTest.java | {
"start": 1763,
"end": 1847
} | class ____ the unmarshalling of {@link UniVocityFixedDataFormat}.
*/
public final | tests |
java | elastic__elasticsearch | x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobsAndModelsIT.java | {
"start": 2827,
"end": 12044
} | class ____ extends BaseMlIntegTestCase {
public void testCluster_GivenAnomalyDetectionJobAndTrainedModelDeployment_ShouldNotAllocateBothOnSameNode() throws Exception {
// This test starts 2 ML nodes and then starts an anomaly detection job and a
// trained model deployment that do not both fit in one node. We then proceed
// to stop both ML nodes and start a single ML node back up. We should see
// that both the job and the model cannot be allocated on that node.
internalCluster().ensureAtMostNumDataNodes(0);
logger.info("Starting dedicated master node...");
internalCluster().startMasterOnlyNode();
logger.info("Starting dedicated data node...");
internalCluster().startDataOnlyNode();
logger.info("Starting dedicated ml node...");
internalCluster().startNode(onlyRoles(Set.of(DiscoveryNodeRole.ML_ROLE)));
logger.info("Starting dedicated ml node...");
internalCluster().startNode(onlyRoles(Set.of(DiscoveryNodeRole.ML_ROLE)));
ensureStableCluster();
MlMemoryAction.Response memoryStats = client().execute(MlMemoryAction.INSTANCE, new MlMemoryAction.Request("ml:true")).actionGet();
long maxNativeBytesPerNode = 0;
for (MlMemoryAction.Response.MlMemoryStats stats : memoryStats.getNodes()) {
maxNativeBytesPerNode = stats.getMlMax().getBytes();
}
String jobId = "test-node-goes-down-while-running-job";
Job.Builder job = createJob(jobId, ByteSizeValue.ofBytes((long) (0.8 * maxNativeBytesPerNode)));
PutJobAction.Request putJobRequest = new PutJobAction.Request(job);
client().execute(PutJobAction.INSTANCE, putJobRequest).actionGet();
client().execute(OpenJobAction.INSTANCE, new OpenJobAction.Request(job.getId())).actionGet();
TrainedModelConfig model = TrainedModelConfig.builder()
.setModelId("test_model")
.setModelType(TrainedModelType.PYTORCH)
.setModelSize((long) (0.3 * maxNativeBytesPerNode))
.setInferenceConfig(new PassThroughConfig(new VocabularyConfig(InferenceIndexConstants.nativeDefinitionStore()), null, null))
.setLocation(new IndexLocation(InferenceIndexConstants.nativeDefinitionStore()))
.build();
TrainedModelDefinitionDoc modelDefinitionDoc = new TrainedModelDefinitionDoc(
new BytesArray(""),
model.getModelId(),
0,
model.getModelSize(),
model.getModelSize(),
1,
true
);
try (XContentBuilder builder = JsonXContent.contentBuilder()) {
modelDefinitionDoc.toXContent(builder, null);
client().execute(
TransportIndexAction.TYPE,
new IndexRequest(InferenceIndexConstants.nativeDefinitionStore()).source(builder)
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
).actionGet();
}
client().execute(PutTrainedModelAction.INSTANCE, new PutTrainedModelAction.Request(model, true)).actionGet();
client().execute(
PutTrainedModelVocabularyAction.INSTANCE,
new PutTrainedModelVocabularyAction.Request(
model.getModelId(),
List.of(
"these",
"are",
"my",
"words",
BertTokenizer.SEPARATOR_TOKEN,
BertTokenizer.CLASS_TOKEN,
BertTokenizer.UNKNOWN_TOKEN,
BertTokenizer.PAD_TOKEN
),
List.of(),
List.of(),
false
)
).actionGet();
logger.info("starting deployment: " + model.getModelId());
client().execute(
StartTrainedModelDeploymentAction.INSTANCE,
new StartTrainedModelDeploymentAction.Request(model.getModelId(), model.getModelId())
).actionGet();
setMlIndicesDelayedNodeLeftTimeoutToZero();
String jobNode = client().execute(GetJobsStatsAction.INSTANCE, new GetJobsStatsAction.Request(job.getId()))
.actionGet()
.getResponse()
.results()
.get(0)
.getNode()
.getName();
String modelNode = client().execute(
GetTrainedModelsStatsAction.INSTANCE,
new GetTrainedModelsStatsAction.Request(model.getModelId())
).actionGet().getResources().results().get(0).getDeploymentStats().getNodeStats().get(0).getNode().getName();
// Assert the job and model were assigned to different nodes as they would not fit in the same node
assertThat(jobNode, not(equalTo(modelNode)));
// Stop both ML nodes
logger.info("Stopping both ml nodes...");
assertThat(internalCluster().stopNode(jobNode), is(true));
assertThat(internalCluster().stopNode(modelNode), is(true));
// Wait for both the job and model to be unassigned
assertBusy(() -> {
GetJobsStatsAction.Response jobStats = client().execute(
GetJobsStatsAction.INSTANCE,
new GetJobsStatsAction.Request(job.getId())
).actionGet();
assertThat(jobStats.getResponse().results().get(0).getNode(), is(nullValue()));
});
assertBusy(() -> {
GetTrainedModelsStatsAction.Response modelStats = client().execute(
GetTrainedModelsStatsAction.INSTANCE,
new GetTrainedModelsStatsAction.Request(model.getModelId())
).actionGet();
assertThat(modelStats.getResources().results().get(0).getDeploymentStats().getNodeStats(), is(empty()));
});
// Start a new ML node
logger.info("Starting dedicated ml node...");
String lastMlNodeName = internalCluster().startNode(onlyRoles(Set.of(DiscoveryNodeRole.ML_ROLE)));
ensureStableCluster();
// Wait until either the job or the model is assigned
assertBusy(() -> {
GetTrainedModelsStatsAction.Response modelStatsResponse = client().execute(
GetTrainedModelsStatsAction.INSTANCE,
new GetTrainedModelsStatsAction.Request(model.getModelId())
).actionGet();
GetTrainedModelsStatsAction.Response.TrainedModelStats modelStats = modelStatsResponse.getResources().results().get(0);
GetJobsStatsAction.Response jobStatsResponse = client().execute(
GetJobsStatsAction.INSTANCE,
new GetJobsStatsAction.Request(job.getId())
).actionGet();
GetJobsStatsAction.Response.JobStats jobStats = jobStatsResponse.getResponse().results().get(0);
boolean isModelAssigned = modelStats.getDeploymentStats().getNodeStats().isEmpty() == false;
boolean isJobAssigned = jobStats.getNode() != null;
assertThat(isJobAssigned ^ isModelAssigned, is(true));
if (isJobAssigned) {
assertThat(jobStats.getNode().getName(), equalTo(lastMlNodeName));
assertThat(modelStats.getDeploymentStats().getReason(), containsString("insufficient available memory"));
} else {
assertThat(modelStats.getDeploymentStats().getNodeStats().get(0).getNode().getName(), equalTo(lastMlNodeName));
assertThat(jobStats.getAssignmentExplanation(), containsString("insufficient available memory"));
}
});
// Start another new ML node
logger.info("Starting dedicated ml node...");
internalCluster().startNode(onlyRoles(Set.of(DiscoveryNodeRole.ML_ROLE)));
ensureStableCluster();
// Wait until both the job and the model are assigned
// and check they are not on the same node
assertBusy(() -> {
GetTrainedModelsStatsAction.Response modelStatsResponse = client().execute(
GetTrainedModelsStatsAction.INSTANCE,
new GetTrainedModelsStatsAction.Request(model.getModelId())
).actionGet();
GetTrainedModelsStatsAction.Response.TrainedModelStats modelStats = modelStatsResponse.getResources().results().get(0);
assertThat(modelStats.getDeploymentStats().getNodeStats().isEmpty(), is(false));
GetJobsStatsAction.Response jobStatsResponse = client().execute(
GetJobsStatsAction.INSTANCE,
new GetJobsStatsAction.Request(job.getId())
).actionGet();
GetJobsStatsAction.Response.JobStats jobStats = jobStatsResponse.getResponse().results().get(0);
assertThat(jobStats.getNode(), is(notNullValue()));
assertThat(jobStats.getNode(), is(not(equalTo(modelStats.getDeploymentStats().getNodeStats().get(0).getNode()))));
});
// Clean up
client().execute(CloseJobAction.INSTANCE, new CloseJobAction.Request(jobId).setForce(true)).actionGet();
client().execute(StopTrainedModelDeploymentAction.INSTANCE, new StopTrainedModelDeploymentAction.Request(model.getModelId()))
.actionGet();
}
}
| JobsAndModelsIT |
java | apache__flink | flink-filesystems/flink-hadoop-fs/src/test/java/org/apache/flink/runtime/fs/hdfs/LimitedConnectionsConfigurationTest.java | {
"start": 1344,
"end": 3323
} | class ____ {
@Test
void testConfiguration() throws Exception {
// nothing configured, we should get a regular file system
FileSystem hdfs = FileSystem.get(URI.create("hdfs://localhost:12345/a/b/c"));
FileSystem ftpfs = FileSystem.get(URI.create("ftp://localhost:12345/a/b/c"));
assertThat(hdfs).isNotInstanceOf(LimitedConnectionsFileSystem.class);
assertThat(ftpfs).isNotInstanceOf(LimitedConnectionsFileSystem.class);
// configure some limits, which should cause "fsScheme" to be limited
final Configuration config = new Configuration();
config.set(getIntConfigOption("fs.hdfs.limit.total"), 40);
config.set(getIntConfigOption("fs.hdfs.limit.input"), 39);
config.set(getIntConfigOption("fs.hdfs.limit.output"), 38);
config.set(getIntConfigOption("fs.hdfs.limit.timeout"), 23456);
config.set(getIntConfigOption("fs.hdfs.limit.stream-timeout"), 34567);
try {
FileSystem.initialize(config);
hdfs = FileSystem.get(URI.create("hdfs://localhost:12345/a/b/c"));
ftpfs = FileSystem.get(URI.create("ftp://localhost:12345/a/b/c"));
assertThat(hdfs).isInstanceOf(LimitedConnectionsFileSystem.class);
assertThat(ftpfs).isNotInstanceOf(LimitedConnectionsFileSystem.class);
LimitedConnectionsFileSystem limitedFs = (LimitedConnectionsFileSystem) hdfs;
assertThat(limitedFs.getMaxNumOpenStreamsTotal()).isEqualTo(40);
assertThat(limitedFs.getMaxNumOpenInputStreams()).isEqualTo(39);
assertThat(limitedFs.getMaxNumOpenOutputStreams()).isEqualTo(38);
assertThat(limitedFs.getStreamOpenTimeout()).isEqualTo(23456);
assertThat(limitedFs.getStreamInactivityTimeout()).isEqualTo(34567);
} finally {
// clear all settings
FileSystem.initialize(new Configuration());
}
}
}
| LimitedConnectionsConfigurationTest |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/vector/VectorSimilarityFunction.java | {
"start": 12497,
"end": 15552
} | class ____ implements VectorValueProvider {
record Factory(EvalOperator.ExpressionEvaluator.Factory expressionEvaluatorFactory) implements VectorValueProviderFactory {
public VectorValueProvider build(DriverContext context) {
return new ExpressionVectorProvider(expressionEvaluatorFactory.get(context));
}
@Override
public String toString() {
return ExpressionVectorProvider.class.getSimpleName() + "[expressionEvaluator=[" + expressionEvaluatorFactory + "]]";
}
}
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ExpressionVectorProvider.class);
private final EvalOperator.ExpressionEvaluator expressionEvaluator;
private FloatBlock block;
private float[] scratch;
ExpressionVectorProvider(EvalOperator.ExpressionEvaluator expressionEvaluator) {
assert expressionEvaluator != null;
this.expressionEvaluator = expressionEvaluator;
}
@Override
public void eval(Page page) {
block = (FloatBlock) expressionEvaluator.eval(page);
}
@Override
public float[] getVector(int position) {
if (block.isNull(position)) {
return null;
}
if (scratch == null) {
int dims = block.getValueCount(position);
if (dims > 0) {
scratch = new float[dims];
}
}
if (scratch != null) {
readFloatArray(block, block.getFirstValueIndex(position), scratch);
}
return scratch;
}
@Override
public int getDimensions() {
for (int p = 0; p < block.getPositionCount(); p++) {
int dims = block.getValueCount(p);
if (dims > 0) {
return dims;
}
}
return 0;
}
@Override
public void finish() {
if (block != null) {
block.close();
block = null;
scratch = null;
}
}
@Override
public long baseRamBytesUsed() {
return BASE_RAM_BYTES_USED + expressionEvaluator.baseRamBytesUsed() + (block == null ? 0 : block.ramBytesUsed())
+ (scratch == null ? 0 : RamUsageEstimator.shallowSizeOf(scratch));
}
@Override
public void close() {
Releasables.close(expressionEvaluator);
}
private static void readFloatArray(FloatBlock block, int firstValueIndex, float[] scratch) {
for (int i = 0; i < scratch.length; i++) {
scratch[i] = block.getFloat(firstValueIndex + i);
}
}
@Override
public String toString() {
return this.getClass().getSimpleName() + "[expressionEvaluator=[" + expressionEvaluator + "]]";
}
}
}
| ExpressionVectorProvider |
java | quarkusio__quarkus | extensions/jdbc/jdbc-postgresql/runtime/src/main/java/io/quarkus/jdbc/postgresql/runtime/graal/UnsupportedTransformerFactoryFeatures.java | {
"start": 749,
"end": 2084
} | class ____ {
@Substitute
private static void setFactoryProperties(Object factory) {
setFeatureQuietly(factory, XMLConstants.FEATURE_SECURE_PROCESSING, true);
// setFeatureQuietly(factory, "http://apache.org/xml/features/disallow-doctype-decl", true);
// setFeatureQuietly(factory, "http://apache.org/xml/features/nonvalidating/load-external-dtd", false);
// setFeatureQuietly(factory, "http://xml.org/sax/features/external-general-entities", false);
// setFeatureQuietly(factory, "http://xml.org/sax/features/external-parameter-entities", false);
// Values from XMLConstants inlined for JDK 1.6 compatibility
setAttributeQuietly(factory, "http://javax.xml.XMLConstants/property/accessExternalDTD", "");
// setAttributeQuietly(factory, "http://javax.xml.XMLConstants/property/accessExternalSchema", "");
setAttributeQuietly(factory, "http://javax.xml.XMLConstants/property/accessExternalStylesheet", "");
}
@Alias
private static void setFeatureQuietly(Object factory, String name, boolean value) {
//no-op : will use the original code
}
@Alias
private static void setAttributeQuietly(Object factory, String name, Object value) {
//no-op : will use the original code
}
}
| UnsupportedTransformerFactoryFeatures |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsLongEvaluator.java | {
"start": 1080,
"end": 3932
} | class ____ implements EvalOperator.ExpressionEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(AbsLongEvaluator.class);
private final Source source;
private final EvalOperator.ExpressionEvaluator fieldVal;
private final DriverContext driverContext;
private Warnings warnings;
public AbsLongEvaluator(Source source, EvalOperator.ExpressionEvaluator fieldVal,
DriverContext driverContext) {
this.source = source;
this.fieldVal = fieldVal;
this.driverContext = driverContext;
}
@Override
public Block eval(Page page) {
try (LongBlock fieldValBlock = (LongBlock) fieldVal.eval(page)) {
LongVector fieldValVector = fieldValBlock.asVector();
if (fieldValVector == null) {
return eval(page.getPositionCount(), fieldValBlock);
}
return eval(page.getPositionCount(), fieldValVector).asBlock();
}
}
@Override
public long baseRamBytesUsed() {
long baseRamBytesUsed = BASE_RAM_BYTES_USED;
baseRamBytesUsed += fieldVal.baseRamBytesUsed();
return baseRamBytesUsed;
}
public LongBlock eval(int positionCount, LongBlock fieldValBlock) {
try(LongBlock.Builder result = driverContext.blockFactory().newLongBlockBuilder(positionCount)) {
position: for (int p = 0; p < positionCount; p++) {
switch (fieldValBlock.getValueCount(p)) {
case 0:
result.appendNull();
continue position;
case 1:
break;
default:
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
result.appendNull();
continue position;
}
long fieldVal = fieldValBlock.getLong(fieldValBlock.getFirstValueIndex(p));
result.appendLong(Abs.process(fieldVal));
}
return result.build();
}
}
public LongVector eval(int positionCount, LongVector fieldValVector) {
try(LongVector.FixedBuilder result = driverContext.blockFactory().newLongVectorFixedBuilder(positionCount)) {
position: for (int p = 0; p < positionCount; p++) {
long fieldVal = fieldValVector.getLong(p);
result.appendLong(p, Abs.process(fieldVal));
}
return result.build();
}
}
@Override
public String toString() {
return "AbsLongEvaluator[" + "fieldVal=" + fieldVal + "]";
}
@Override
public void close() {
Releasables.closeExpectNoException(fieldVal);
}
private Warnings warnings() {
if (warnings == null) {
this.warnings = Warnings.createWarnings(
driverContext.warningsMode(),
source.source().getLineNumber(),
source.source().getColumnNumber(),
source.text()
);
}
return warnings;
}
static | AbsLongEvaluator |
java | quarkusio__quarkus | integration-tests/oidc-code-flow/src/main/java/io/quarkus/it/keycloak/SecurityEventListener.java | {
"start": 300,
"end": 959
} | class ____ {
public void event(@Observes SecurityEvent event) {
String tenantId = event.getSecurityIdentity().getAttribute("tenant-id");
boolean blockingApiAvailable = event.getSecurityIdentity()
.getAttribute(AuthenticationRequestContext.class.getName()) != null;
RoutingContext vertxContext = event.getSecurityIdentity()
.getAttribute(RoutingContext.class.getName());
vertxContext.put("listener-message",
String.format("event:%s,tenantId:%s,blockingApi:%b", event.getEventType().name(), tenantId,
blockingApiAvailable));
}
}
| SecurityEventListener |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/serializer/SerializationConverterTests.java | {
"start": 1573,
"end": 4519
} | class ____ {
@Test
void serializeAndDeserializeStringWithDefaultSerializer() {
SerializingConverter toBytes = new SerializingConverter();
byte[] bytes = toBytes.convert("Testing");
DeserializingConverter fromBytes = new DeserializingConverter();
assertThat(fromBytes.convert(bytes)).isEqualTo("Testing");
}
@Test
void serializeAndDeserializeStringWithExplicitSerializer() {
SerializingConverter toBytes = new SerializingConverter(new DefaultSerializer());
byte[] bytes = toBytes.convert("Testing");
DeserializingConverter fromBytes = new DeserializingConverter();
assertThat(fromBytes.convert(bytes)).isEqualTo("Testing");
}
@Test
void nonSerializableObject() {
SerializingConverter toBytes = new SerializingConverter();
assertThatExceptionOfType(SerializationFailedException.class)
.isThrownBy(() -> toBytes.convert(new Object()))
.havingCause()
.isInstanceOf(IllegalArgumentException.class)
.withMessageContaining("requires a Serializable payload");
}
@Test
void nonSerializableField() {
SerializingConverter toBytes = new SerializingConverter();
assertThatExceptionOfType(SerializationFailedException.class)
.isThrownBy(() -> toBytes.convert(new UnSerializable()))
.withCauseInstanceOf(NotSerializableException.class);
}
@Test
void deserializationFailure() {
DeserializingConverter fromBytes = new DeserializingConverter();
assertThatExceptionOfType(SerializationFailedException.class)
.isThrownBy(() -> fromBytes.convert("Junk".getBytes()));
}
@Test
void deserializationWithExplicitClassLoader() {
DeserializingConverter fromBytes = new DeserializingConverter(getClass().getClassLoader());
SerializingConverter toBytes = new SerializingConverter();
String expected = "SPRING FRAMEWORK";
assertThat(fromBytes.convert(toBytes.convert(expected))).isEqualTo(expected);
}
@Test
void deserializationWithExplicitDeserializer() {
DeserializingConverter fromBytes = new DeserializingConverter(new DefaultDeserializer());
SerializingConverter toBytes = new SerializingConverter();
String expected = "SPRING FRAMEWORK";
assertThat(fromBytes.convert(toBytes.convert(expected))).isEqualTo(expected);
}
@Test
void deserializationIOException() {
ClassNotFoundException classNotFoundException = new ClassNotFoundException();
try (MockedConstruction<ConfigurableObjectInputStream> mocked =
Mockito.mockConstruction(ConfigurableObjectInputStream.class,
(mock, context) -> given(mock.readObject()).willThrow(classNotFoundException))) {
DefaultDeserializer defaultSerializer = new DefaultDeserializer(getClass().getClassLoader());
assertThat(mocked).isNotNull();
assertThatIOException()
.isThrownBy(() -> defaultSerializer.deserialize(new ByteArrayInputStream("test".getBytes())))
.withMessage("Failed to deserialize object type")
.havingCause().isSameAs(classNotFoundException);
}
}
static | SerializationConverterTests |
java | alibaba__nacos | api/src/main/java/com/alibaba/nacos/api/naming/remote/request/BatchInstanceRequest.java | {
"start": 887,
"end": 1709
} | class ____ extends AbstractNamingRequest {
private String type;
/**
* save all service instance.
*/
private List<Instance> instances;
public BatchInstanceRequest() {
}
public BatchInstanceRequest(String namespace, String serviceName, String groupName, String type,
List<Instance> instances) {
super(namespace, serviceName, groupName);
this.type = type;
this.instances = instances;
}
public void setType(String type) {
this.type = type;
}
public String getType() {
return this.type;
}
public List<Instance> getInstances() {
return instances;
}
public void setInstances(List<Instance> instances) {
this.instances = instances;
}
}
| BatchInstanceRequest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/metadata/ProjectMetadata.java | {
"start": 47540,
"end": 97618
} | class ____ {
private final ImmutableOpenMap.Builder<String, IndexMetadata> indices;
private final ImmutableOpenMap.Builder<String, IndexTemplateMetadata> templates;
private final ImmutableOpenMap.Builder<String, Metadata.ProjectCustom> customs;
private SortedMap<String, IndexAbstraction> previousIndicesLookup;
private final Map<String, MappingMetadata> mappingsByHash;
// If this is set to false we can skip checking #mappingsByHash for unused entries in #build(). Used as an optimization to save
// the rather expensive logic for removing unused mappings when building from another instance and we know that no mappings can
// have become unused because no indices were updated or removed from this builder in a way that would cause unused entries in
// #mappingsByHash.
private boolean checkForUnusedMappings = true;
private ProjectId id;
Builder(ProjectMetadata projectMetadata) {
this.id = projectMetadata.id;
this.indices = ImmutableOpenMap.builder(projectMetadata.indices);
this.templates = ImmutableOpenMap.builder(projectMetadata.templates);
this.customs = ImmutableOpenMap.builder(projectMetadata.customs);
this.previousIndicesLookup = projectMetadata.indicesLookup;
this.mappingsByHash = new HashMap<>(projectMetadata.mappingsByHash);
this.checkForUnusedMappings = false;
}
Builder() {
this(Map.of(), 0);
}
Builder(Map<String, MappingMetadata> mappingsByHash, int indexCountHint) {
indices = ImmutableOpenMap.builder(indexCountHint);
templates = ImmutableOpenMap.builder();
customs = ImmutableOpenMap.builder();
previousIndicesLookup = null;
this.mappingsByHash = new HashMap<>(mappingsByHash);
indexGraveyard(IndexGraveyard.builder().build()); // create new empty index graveyard to initialize
}
public Builder id(ProjectId id) {
assert this.id == null : "a project's ID cannot be changed";
this.id = id;
return this;
}
public ProjectId getId() {
return id;
}
public Builder put(IndexMetadata.Builder indexMetadataBuilder) {
// we know its a new one, increment the version and store
indexMetadataBuilder.version(indexMetadataBuilder.version() + 1);
dedupeMapping(indexMetadataBuilder);
IndexMetadata indexMetadata = indexMetadataBuilder.build();
IndexMetadata previous = indices.put(indexMetadata.getIndex().getName(), indexMetadata);
if (unsetPreviousIndicesLookup(previous, indexMetadata)) {
previousIndicesLookup = null;
}
maybeSetMappingPurgeFlag(previous, indexMetadata);
return this;
}
public Builder put(IndexMetadata indexMetadata, boolean incrementVersion) {
final String name = indexMetadata.getIndex().getName();
indexMetadata = dedupeMapping(indexMetadata);
IndexMetadata previous;
if (incrementVersion) {
if (indices.get(name) == indexMetadata) {
return this;
}
// if we put a new index metadata, increment its version
indexMetadata = indexMetadata.withIncrementedVersion();
previous = indices.put(name, indexMetadata);
} else {
previous = indices.put(name, indexMetadata);
if (previous == indexMetadata) {
return this;
}
}
if (unsetPreviousIndicesLookup(previous, indexMetadata)) {
previousIndicesLookup = null;
}
maybeSetMappingPurgeFlag(previous, indexMetadata);
return this;
}
public Builder indices(Map<String, IndexMetadata> indices) {
for (var value : indices.values()) {
put(value, false);
}
return this;
}
/**
* Dedupes {@link MappingMetadata} instance from the provided indexMetadata parameter using the sha256
* hash from the compressed source of the mapping. If there is a mapping with the same sha256 hash then
* a new {@link IndexMetadata} is returned with the found {@link MappingMetadata} instance, otherwise
* the {@link MappingMetadata} instance of the indexMetadata parameter is recorded and the indexMetadata
* parameter is then returned.
*/
private IndexMetadata dedupeMapping(IndexMetadata indexMetadata) {
if (indexMetadata.mapping() == null) {
return indexMetadata;
}
String digest = indexMetadata.mapping().getSha256();
MappingMetadata entry = mappingsByHash.get(digest);
if (entry != null) {
return indexMetadata.withMappingMetadata(entry);
} else {
mappingsByHash.put(digest, indexMetadata.mapping());
return indexMetadata;
}
}
/**
* Similar to {@link #dedupeMapping(IndexMetadata)}.
*/
private void dedupeMapping(IndexMetadata.Builder indexMetadataBuilder) {
if (indexMetadataBuilder.mapping() == null) {
return;
}
String digest = indexMetadataBuilder.mapping().getSha256();
MappingMetadata entry = mappingsByHash.get(digest);
if (entry != null) {
indexMetadataBuilder.putMapping(entry);
} else {
mappingsByHash.put(digest, indexMetadataBuilder.mapping());
}
}
private void maybeSetMappingPurgeFlag(@Nullable IndexMetadata previous, IndexMetadata updated) {
if (checkForUnusedMappings) {
return;
}
if (previous == null) {
return;
}
final MappingMetadata mapping = previous.mapping();
if (mapping == null) {
return;
}
final MappingMetadata updatedMapping = updated.mapping();
if (updatedMapping == null) {
return;
}
if (mapping.getSha256().equals(updatedMapping.getSha256()) == false) {
checkForUnusedMappings = true;
}
}
private static boolean unsetPreviousIndicesLookup(IndexMetadata previous, IndexMetadata current) {
if (previous == null) {
return true;
}
if (previous.getAliases().equals(current.getAliases()) == false) {
return true;
}
if (previous.isHidden() != current.isHidden()) {
return true;
}
if (previous.isSystem() != current.isSystem()) {
return true;
}
if (previous.getState() != current.getState()) {
return true;
}
return false;
}
public IndexMetadata get(String index) {
return indices.get(index);
}
public IndexMetadata getSafe(Index index) {
IndexMetadata indexMetadata = get(index.getName());
if (indexMetadata != null) {
if (indexMetadata.getIndexUUID().equals(index.getUUID())) {
return indexMetadata;
}
throw new IndexNotFoundException(
index,
new IllegalStateException(
"index uuid doesn't match expected: [" + index.getUUID() + "] but got: [" + indexMetadata.getIndexUUID() + "]"
)
);
}
throw new IndexNotFoundException(index);
}
public Builder remove(String index) {
previousIndicesLookup = null;
checkForUnusedMappings = true;
indices.remove(index);
return this;
}
public Builder removeAllIndices() {
previousIndicesLookup = null;
checkForUnusedMappings = true;
indices.clear();
mappingsByHash.clear();
return this;
}
public Builder put(IndexTemplateMetadata.Builder template) {
return put(template.build());
}
public Builder put(IndexTemplateMetadata template) {
templates.put(template.name(), template);
return this;
}
public Builder removeTemplate(String templateName) {
templates.remove(templateName);
return this;
}
public Builder templates(Map<String, IndexTemplateMetadata> templates) {
this.templates.putAllFromMap(templates);
return this;
}
public Builder put(String name, ComponentTemplate componentTemplate) {
Objects.requireNonNull(componentTemplate, "it is invalid to add a null component template: " + name);
var ctm = (ComponentTemplateMetadata) this.customs.get(ComponentTemplateMetadata.TYPE);
Map<String, ComponentTemplate> existingTemplates = ctm != null ? new HashMap<>(ctm.componentTemplates()) : new HashMap<>();
existingTemplates.put(name, componentTemplate);
this.customs.put(ComponentTemplateMetadata.TYPE, new ComponentTemplateMetadata(existingTemplates));
return this;
}
public Builder removeComponentTemplate(String name) {
var ctm = (ComponentTemplateMetadata) this.customs.get(ComponentTemplateMetadata.TYPE);
if (ctm != null) {
var existingTemplates = new HashMap<>(ctm.componentTemplates());
if (existingTemplates.remove(name) != null) {
this.customs.put(ComponentTemplateMetadata.TYPE, new ComponentTemplateMetadata(existingTemplates));
}
}
return this;
}
public Builder componentTemplates(Map<String, ComponentTemplate> componentTemplates) {
this.customs.put(ComponentTemplateMetadata.TYPE, new ComponentTemplateMetadata(componentTemplates));
return this;
}
public Builder indexTemplates(Map<String, ComposableIndexTemplate> indexTemplates) {
this.customs.put(ComposableIndexTemplateMetadata.TYPE, new ComposableIndexTemplateMetadata(indexTemplates));
return this;
}
public Builder put(String name, ComposableIndexTemplate indexTemplate) {
Objects.requireNonNull(indexTemplate, "it is invalid to add a null index template: " + name);
var itmd = (ComposableIndexTemplateMetadata) this.customs.get(ComposableIndexTemplateMetadata.TYPE);
Map<String, ComposableIndexTemplate> existingTemplates = itmd != null ? new HashMap<>(itmd.indexTemplates()) : new HashMap<>();
existingTemplates.put(name, indexTemplate);
this.customs.put(ComposableIndexTemplateMetadata.TYPE, new ComposableIndexTemplateMetadata(existingTemplates));
return this;
}
public Builder removeIndexTemplate(String name) {
var itmd = (ComposableIndexTemplateMetadata) this.customs.get(ComposableIndexTemplateMetadata.TYPE);
if (itmd != null) {
var existingTemplates = new HashMap<>(itmd.indexTemplates());
if (existingTemplates.remove(name) != null) {
this.customs.put(ComposableIndexTemplateMetadata.TYPE, new ComposableIndexTemplateMetadata(existingTemplates));
}
}
return this;
}
public DataStream dataStream(String dataStreamName) {
return dataStreamMetadata().dataStreams().get(dataStreamName);
}
public Builder dataStreams(Map<String, DataStream> dataStreams, Map<String, DataStreamAlias> dataStreamAliases) {
previousIndicesLookup = null;
// Only perform data stream validation only when data streams are modified in Metadata:
for (DataStream dataStream : dataStreams.values()) {
dataStream.validate(indices::get);
}
this.customs.put(
DataStreamMetadata.TYPE,
new DataStreamMetadata(
ImmutableOpenMap.<String, DataStream>builder().putAllFromMap(dataStreams).build(),
ImmutableOpenMap.<String, DataStreamAlias>builder().putAllFromMap(dataStreamAliases).build()
)
);
return this;
}
public Builder put(DataStream dataStream) {
Objects.requireNonNull(dataStream, "it is invalid to add a null data stream");
previousIndicesLookup = null;
// Every time the backing indices of a data stream is modified a new instance will be created and
// that instance needs to be added here. So this is a good place to do data stream validation for
// the data stream and all of its backing indices. Doing this validation in the build() method would
// trigger this validation on each new Metadata creation, even if there are no changes to data streams.
dataStream.validate(indices::get);
this.customs.put(DataStreamMetadata.TYPE, dataStreamMetadata().withAddedDatastream(dataStream));
return this;
}
public DataStreamMetadata dataStreamMetadata() {
return (DataStreamMetadata) this.customs.getOrDefault(DataStreamMetadata.TYPE, DataStreamMetadata.EMPTY);
}
public boolean put(String aliasName, String dataStream, Boolean isWriteDataStream, String filter) {
previousIndicesLookup = null;
DataStreamMetadata existing = dataStreamMetadata();
DataStreamMetadata updated = existing.withAlias(aliasName, dataStream, isWriteDataStream, filter);
if (existing == updated) {
return false;
}
this.customs.put(DataStreamMetadata.TYPE, updated);
return true;
}
public Builder removeDataStream(String name) {
previousIndicesLookup = null;
this.customs.put(DataStreamMetadata.TYPE, dataStreamMetadata().withRemovedDataStream(name));
return this;
}
public boolean removeDataStreamAlias(String aliasName, String dataStreamName, boolean mustExist) {
previousIndicesLookup = null;
DataStreamMetadata existing = dataStreamMetadata();
DataStreamMetadata updated = existing.withRemovedAlias(aliasName, dataStreamName, mustExist);
if (existing == updated) {
return false;
}
this.customs.put(DataStreamMetadata.TYPE, updated);
return true;
}
@SuppressWarnings("unchecked")
public <T extends Metadata.ProjectCustom> T getCustom(String type) {
return (T) customs.get(type);
}
public Builder putCustom(String type, Metadata.ProjectCustom custom) {
customs.put(type, Objects.requireNonNull(custom, type));
return this;
}
public Builder removeCustom(String type) {
customs.remove(type);
return this;
}
public Builder removeCustomIf(BiPredicate<String, ? super Metadata.ProjectCustom> p) {
customs.removeAll(p);
return this;
}
public Builder customs(Map<String, Metadata.ProjectCustom> customs) {
customs.forEach((key, value) -> Objects.requireNonNull(value, key));
this.customs.putAllFromMap(customs);
return this;
}
public Builder indexGraveyard(final IndexGraveyard indexGraveyard) {
return putCustom(IndexGraveyard.TYPE, indexGraveyard);
}
public IndexGraveyard indexGraveyard() {
return (IndexGraveyard) getCustom(IndexGraveyard.TYPE);
}
public Builder updateSettings(Settings settings, String... indices) {
if (indices == null || indices.length == 0) {
indices = this.indices.keys().toArray(String[]::new);
}
for (String index : indices) {
IndexMetadata indexMetadata = this.indices.get(index);
if (indexMetadata == null) {
throw new IndexNotFoundException(index);
}
// Updating version is required when updating settings.
// Otherwise, settings changes may not be replicated to remote clusters.
long newVersion = indexMetadata.getSettingsVersion() + 1;
put(
IndexMetadata.builder(indexMetadata)
.settings(Settings.builder().put(indexMetadata.getSettings()).put(settings))
.settingsVersion(newVersion)
);
}
return this;
}
/**
* Update the number of replicas for the specified indices.
*
* @param numberOfReplicas the number of replicas
* @param indices the indices to update the number of replicas for
* @return the builder
*/
public Builder updateNumberOfReplicas(int numberOfReplicas, String... indices) {
for (String index : indices) {
IndexMetadata indexMetadata = this.indices.get(index);
if (indexMetadata == null) {
throw new IndexNotFoundException(index);
}
put(IndexMetadata.builder(indexMetadata).numberOfReplicas(numberOfReplicas));
}
return this;
}
public ProjectMetadata build() {
return build(false);
}
public ProjectMetadata build(boolean skipNameCollisionChecks) {
// TODO: We should move these datastructures to IndexNameExpressionResolver, this will give the following benefits:
// 1) The datastructures will be rebuilt only when needed. Now during serializing we rebuild these datastructures
// while these datastructures aren't even used.
// 2) The aliasAndIndexLookup can be updated instead of rebuilding it all the time.
final List<String> visibleIndices = new ArrayList<>();
final List<String> allOpenIndices = new ArrayList<>();
final List<String> visibleOpenIndices = new ArrayList<>();
final List<String> allClosedIndices = new ArrayList<>();
final List<String> visibleClosedIndices = new ArrayList<>();
final ImmutableOpenMap<String, IndexMetadata> indicesMap = indices.build();
int oldestIndexVersionId = IndexVersion.current().id();
int totalNumberOfShards = 0;
int totalOpenIndexShards = 0;
ImmutableOpenMap.Builder<String, Set<Index>> aliasedIndicesBuilder = ImmutableOpenMap.builder();
final String[] allIndicesArray = new String[indicesMap.size()];
int i = 0;
final Set<String> sha256HashesInUse = checkForUnusedMappings ? Sets.newHashSetWithExpectedSize(mappingsByHash.size()) : null;
for (var entry : indicesMap.entrySet()) {
allIndicesArray[i++] = entry.getKey();
final IndexMetadata indexMetadata = entry.getValue();
totalNumberOfShards += indexMetadata.getTotalNumberOfShards();
final String name = indexMetadata.getIndex().getName();
final boolean visible = indexMetadata.isHidden() == false;
if (visible) {
visibleIndices.add(name);
}
if (indexMetadata.getState() == IndexMetadata.State.OPEN) {
totalOpenIndexShards += indexMetadata.getTotalNumberOfShards();
allOpenIndices.add(name);
if (visible) {
visibleOpenIndices.add(name);
}
} else if (indexMetadata.getState() == IndexMetadata.State.CLOSE) {
allClosedIndices.add(name);
if (visible) {
visibleClosedIndices.add(name);
}
}
oldestIndexVersionId = Math.min(oldestIndexVersionId, indexMetadata.getCompatibilityVersion().id());
if (sha256HashesInUse != null) {
final var mapping = indexMetadata.mapping();
if (mapping != null) {
sha256HashesInUse.add(mapping.getSha256());
}
}
for (var alias : indexMetadata.getAliases().keySet()) {
var indices = aliasedIndicesBuilder.get(alias);
if (indices == null) {
indices = new HashSet<>();
aliasedIndicesBuilder.put(alias, indices);
}
indices.add(indexMetadata.getIndex());
}
}
for (String alias : aliasedIndicesBuilder.keys()) {
aliasedIndicesBuilder.put(alias, Collections.unmodifiableSet(aliasedIndicesBuilder.get(alias)));
}
var aliasedIndices = aliasedIndicesBuilder.build();
for (var entry : aliasedIndices.entrySet()) {
List<IndexMetadata> aliasIndices = entry.getValue().stream().map(idx -> indicesMap.get(idx.getName())).toList();
validateAlias(entry.getKey(), aliasIndices);
}
SortedMap<String, IndexAbstraction> indicesLookup = null;
if (previousIndicesLookup != null) {
// no changes to the names of indices, datastreams, and their aliases so we can reuse the previous lookup
assert previousIndicesLookup.equals(buildIndicesLookup(dataStreamMetadata(), indicesMap));
indicesLookup = previousIndicesLookup;
} else if (skipNameCollisionChecks == false) {
// we have changes to the entity names so we ensure we have no naming collisions
ensureNoNameCollisions(aliasedIndices.keySet(), indicesMap, dataStreamMetadata());
}
assert assertDataStreams(indicesMap, dataStreamMetadata());
if (sha256HashesInUse != null) {
mappingsByHash.keySet().retainAll(sha256HashesInUse);
}
// build all concrete indices arrays:
// TODO: I think we can remove these arrays. it isn't worth the effort, for operations on all indices.
// When doing an operation across all indices, most of the time is spent on actually going to all shards and
// do the required operations, the bottleneck isn't resolving expressions into concrete indices.
String[] visibleIndicesArray = visibleIndices.toArray(String[]::new);
String[] allOpenIndicesArray = allOpenIndices.toArray(String[]::new);
String[] visibleOpenIndicesArray = visibleOpenIndices.toArray(String[]::new);
String[] allClosedIndicesArray = allClosedIndices.toArray(String[]::new);
String[] visibleClosedIndicesArray = visibleClosedIndices.toArray(String[]::new);
return new ProjectMetadata(
id,
indicesMap,
aliasedIndices,
templates.build(),
customs.build(),
totalNumberOfShards,
totalOpenIndexShards,
allIndicesArray,
visibleIndicesArray,
allOpenIndicesArray,
visibleOpenIndicesArray,
allClosedIndicesArray,
visibleClosedIndicesArray,
indicesLookup,
Collections.unmodifiableMap(mappingsByHash),
IndexVersion.fromId(oldestIndexVersionId)
);
}
static void ensureNoNameCollisions(
Set<String> indexAliases,
ImmutableOpenMap<String, IndexMetadata> indicesMap,
DataStreamMetadata dataStreamMetadata
) {
List<String> duplicates = new ArrayList<>();
Set<String> aliasDuplicatesWithIndices = new HashSet<>();
Set<String> aliasDuplicatesWithDataStreams = new HashSet<>();
var allDataStreams = dataStreamMetadata.dataStreams();
// Adding data stream aliases:
for (String dataStreamAlias : dataStreamMetadata.getDataStreamAliases().keySet()) {
if (indexAliases.contains(dataStreamAlias)) {
duplicates.add("data stream alias and indices alias have the same name (" + dataStreamAlias + ")");
}
if (indicesMap.containsKey(dataStreamAlias)) {
aliasDuplicatesWithIndices.add(dataStreamAlias);
}
if (allDataStreams.containsKey(dataStreamAlias)) {
aliasDuplicatesWithDataStreams.add(dataStreamAlias);
}
}
for (String alias : indexAliases) {
if (allDataStreams.containsKey(alias)) {
aliasDuplicatesWithDataStreams.add(alias);
}
if (indicesMap.containsKey(alias)) {
aliasDuplicatesWithIndices.add(alias);
}
}
allDataStreams.forEach((key, value) -> {
if (indicesMap.containsKey(key)) {
duplicates.add("data stream [" + key + "] conflicts with index");
}
});
if (aliasDuplicatesWithIndices.isEmpty() == false) {
collectAliasDuplicates(indicesMap, aliasDuplicatesWithIndices, duplicates);
}
if (aliasDuplicatesWithDataStreams.isEmpty() == false) {
collectAliasDuplicates(indicesMap, dataStreamMetadata, aliasDuplicatesWithDataStreams, duplicates);
}
if (duplicates.isEmpty() == false) {
throw new IllegalStateException(
"index, alias, and data stream names need to be unique, but the following duplicates "
+ "were found ["
+ Strings.collectionToCommaDelimitedString(duplicates)
+ "]"
);
}
}
/**
* Iterates the detected duplicates between datastreams and aliases and collects them into the duplicates list as helpful messages.
*/
private static void collectAliasDuplicates(
ImmutableOpenMap<String, IndexMetadata> indicesMap,
DataStreamMetadata dataStreamMetadata,
Set<String> aliasDuplicatesWithDataStreams,
List<String> duplicates
) {
for (String alias : aliasDuplicatesWithDataStreams) {
// reported var avoids adding a message twice if an index alias has the same name as a data stream.
boolean reported = false;
for (IndexMetadata cursor : indicesMap.values()) {
if (cursor.getAliases().containsKey(alias)) {
duplicates.add(alias + " (alias of " + cursor.getIndex() + ") conflicts with data stream");
reported = true;
}
}
// This is for adding an error message for when a data stream alias has the same name as a data stream.
if (reported == false && dataStreamMetadata != null && dataStreamMetadata.dataStreams().containsKey(alias)) {
duplicates.add("data stream alias and data stream have the same name (" + alias + ")");
}
}
}
/**
* Collect all duplicate names across indices and aliases that were detected into a list of helpful duplicate failure messages.
*/
private static void collectAliasDuplicates(
ImmutableOpenMap<String, IndexMetadata> indicesMap,
Set<String> aliasDuplicatesWithIndices,
List<String> duplicates
) {
for (IndexMetadata cursor : indicesMap.values()) {
for (String alias : aliasDuplicatesWithIndices) {
if (cursor.getAliases().containsKey(alias)) {
duplicates.add(alias + " (alias of " + cursor.getIndex() + ") conflicts with index");
}
}
}
}
static SortedMap<String, IndexAbstraction> buildIndicesLookup(
DataStreamMetadata dataStreamMetadata,
ImmutableOpenMap<String, IndexMetadata> indices
) {
if (indices.isEmpty()) {
return Collections.emptySortedMap();
}
Map<String, IndexAbstraction> indicesLookup = new HashMap<>();
Map<String, DataStream> indexToDataStreamLookup = new HashMap<>();
collectDataStreams(dataStreamMetadata, indicesLookup, indexToDataStreamLookup);
Map<String, List<IndexMetadata>> aliasToIndices = new HashMap<>();
collectIndices(indices, indexToDataStreamLookup, indicesLookup, aliasToIndices);
collectAliases(aliasToIndices, indicesLookup);
// We do a ton of lookups on this map but also need its sorted properties at times.
// Using this hybrid of a sorted and a hash-map trades some heap overhead relative to just using a TreeMap
// for much faster O(1) lookups in large clusters.
return new SortedMap<>() {
private final SortedMap<String, IndexAbstraction> sortedMap = Collections.unmodifiableSortedMap(
new TreeMap<>(indicesLookup)
);
@Override
public Comparator<? super String> comparator() {
return sortedMap.comparator();
}
@Override
public SortedMap<String, IndexAbstraction> subMap(String fromKey, String toKey) {
return sortedMap.subMap(fromKey, toKey);
}
@Override
public SortedMap<String, IndexAbstraction> headMap(String toKey) {
return sortedMap.headMap(toKey);
}
@Override
public SortedMap<String, IndexAbstraction> tailMap(String fromKey) {
return sortedMap.tailMap(fromKey);
}
@Override
public String firstKey() {
return sortedMap.firstKey();
}
@Override
public String lastKey() {
return sortedMap.lastKey();
}
@Override
public Set<String> keySet() {
return sortedMap.keySet();
}
@Override
public Collection<IndexAbstraction> values() {
return sortedMap.values();
}
@Override
public Set<Entry<String, IndexAbstraction>> entrySet() {
return sortedMap.entrySet();
}
@Override
public int size() {
return indicesLookup.size();
}
@Override
public boolean isEmpty() {
return indicesLookup.isEmpty();
}
@Override
public boolean containsKey(Object key) {
return indicesLookup.containsKey(key);
}
@Override
public boolean containsValue(Object value) {
return indicesLookup.containsValue(value);
}
@Override
public IndexAbstraction get(Object key) {
return indicesLookup.get(key);
}
@Override
public IndexAbstraction put(String key, IndexAbstraction value) {
throw new UnsupportedOperationException();
}
@Override
public IndexAbstraction remove(Object key) {
throw new UnsupportedOperationException();
}
@Override
public void putAll(Map<? extends String, ? extends IndexAbstraction> m) {
throw new UnsupportedOperationException();
}
@Override
public void clear() {
throw new UnsupportedOperationException();
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
return indicesLookup.equals(obj);
}
@Override
public int hashCode() {
return indicesLookup.hashCode();
}
};
}
private static void collectAliases(Map<String, List<IndexMetadata>> aliasToIndices, Map<String, IndexAbstraction> indicesLookup) {
for (var entry : aliasToIndices.entrySet()) {
AliasMetadata alias = entry.getValue().get(0).getAliases().get(entry.getKey());
IndexAbstraction existing = indicesLookup.put(entry.getKey(), new IndexAbstraction.Alias(alias, entry.getValue()));
assert existing == null : "duplicate for " + entry.getKey();
}
}
private static void collectIndices(
Map<String, IndexMetadata> indices,
Map<String, DataStream> indexToDataStreamLookup,
Map<String, IndexAbstraction> indicesLookup,
Map<String, List<IndexMetadata>> aliasToIndices
) {
for (var entry : indices.entrySet()) {
String name = entry.getKey();
IndexMetadata indexMetadata = entry.getValue();
DataStream parent = indexToDataStreamLookup.get(name);
assert assertContainsIndexIfDataStream(parent, indexMetadata);
IndexAbstraction existing = indicesLookup.put(name, new IndexAbstraction.ConcreteIndex(indexMetadata, parent));
assert existing == null : "duplicate for " + indexMetadata.getIndex();
for (var aliasMetadata : indexMetadata.getAliases().values()) {
List<IndexMetadata> aliasIndices = aliasToIndices.computeIfAbsent(aliasMetadata.getAlias(), k -> new ArrayList<>());
aliasIndices.add(indexMetadata);
}
}
}
private static boolean assertContainsIndexIfDataStream(DataStream parent, IndexMetadata indexMetadata) {
assert parent == null
|| parent.getIndices().stream().anyMatch(index -> indexMetadata.getIndex().getName().equals(index.getName()))
|| parent.getFailureComponent()
.getIndices()
.stream()
.anyMatch(index -> indexMetadata.getIndex().getName().equals(index.getName()))
: "Expected data stream [" + parent.getName() + "] to contain index " + indexMetadata.getIndex();
return true;
}
private static void collectDataStreams(
DataStreamMetadata dataStreamMetadata,
Map<String, IndexAbstraction> indicesLookup,
Map<String, DataStream> indexToDataStreamLookup
) {
var dataStreams = dataStreamMetadata.dataStreams();
for (DataStreamAlias alias : dataStreamMetadata.getDataStreamAliases().values()) {
IndexAbstraction existing = indicesLookup.put(alias.getName(), makeDsAliasAbstraction(dataStreams, alias));
assert existing == null : "duplicate data stream alias for " + alias.getName();
}
for (DataStream dataStream : dataStreams.values()) {
IndexAbstraction existing = indicesLookup.put(dataStream.getName(), dataStream);
assert existing == null : "duplicate data stream for " + dataStream.getName();
for (Index i : dataStream.getIndices()) {
indexToDataStreamLookup.put(i.getName(), dataStream);
}
for (Index i : dataStream.getFailureIndices()) {
indexToDataStreamLookup.put(i.getName(), dataStream);
}
}
}
private static IndexAbstraction.Alias makeDsAliasAbstraction(Map<String, DataStream> dataStreams, DataStreamAlias alias) {
Index writeIndexOfWriteDataStream = null;
if (alias.getWriteDataStream() != null) {
DataStream writeDataStream = dataStreams.get(alias.getWriteDataStream());
writeIndexOfWriteDataStream = writeDataStream.getWriteIndex();
}
return new IndexAbstraction.Alias(
alias,
alias.getDataStreams().stream().flatMap(name -> dataStreams.get(name).getIndices().stream()).toList(),
writeIndexOfWriteDataStream,
alias.getDataStreams()
);
}
private static boolean isNonEmpty(List<IndexMetadata> idxMetas) {
return (Objects.isNull(idxMetas) || idxMetas.isEmpty()) == false;
}
static void validateAlias(String aliasName, List<IndexMetadata> indexMetadatas) {
// Validate write indices
List<String> writeIndices = indexMetadatas.stream()
.filter(idxMeta -> Boolean.TRUE.equals(idxMeta.getAliases().get(aliasName).writeIndex()))
.map(im -> im.getIndex().getName())
.toList();
if (writeIndices.size() > 1) {
throw new IllegalStateException(
"alias ["
+ aliasName
+ "] has more than one write index ["
+ Strings.collectionToCommaDelimitedString(writeIndices)
+ "]"
);
}
// Validate hidden status
Map<Boolean, List<IndexMetadata>> groupedByHiddenStatus = indexMetadatas.stream()
.collect(Collectors.groupingBy(idxMeta -> Boolean.TRUE.equals(idxMeta.getAliases().get(aliasName).isHidden())));
if (isNonEmpty(groupedByHiddenStatus.get(true)) && isNonEmpty(groupedByHiddenStatus.get(false))) {
List<String> hiddenOn = groupedByHiddenStatus.get(true).stream().map(idx -> idx.getIndex().getName()).toList();
List<String> nonHiddenOn = groupedByHiddenStatus.get(false).stream().map(idx -> idx.getIndex().getName()).toList();
throw new IllegalStateException(
"alias ["
+ aliasName
+ "] has is_hidden set to true on indices ["
+ Strings.collectionToCommaDelimitedString(hiddenOn)
+ "] but does not have is_hidden set to true on indices ["
+ Strings.collectionToCommaDelimitedString(nonHiddenOn)
+ "]; alias must have the same is_hidden setting "
+ "on all indices"
);
}
// Validate system status
Map<Boolean, List<IndexMetadata>> groupedBySystemStatus = indexMetadatas.stream()
.collect(Collectors.groupingBy(IndexMetadata::isSystem));
// If the alias has either all system or all non-system, then no more validation is required
if (isNonEmpty(groupedBySystemStatus.get(false)) && isNonEmpty(groupedBySystemStatus.get(true))) {
final List<String> newVersionSystemIndices = groupedBySystemStatus.get(true)
.stream()
.filter(i -> i.getCreationVersion().onOrAfter(IndexNameExpressionResolver.SYSTEM_INDEX_ENFORCEMENT_INDEX_VERSION))
.map(i -> i.getIndex().getName())
.sorted() // reliable error message for testing
.toList();
if (newVersionSystemIndices.isEmpty() == false) {
List<String> nonSystemIndices = groupedBySystemStatus.get(false)
.stream()
.map(i -> i.getIndex().getName())
.sorted() // reliable error message for testing
.toList();
throw new IllegalStateException(
"alias ["
+ aliasName
+ "] refers to both system indices "
+ newVersionSystemIndices
+ " and non-system indices: "
+ nonSystemIndices
+ ", but aliases must refer to either system or"
+ " non-system indices, not both"
);
}
}
}
static boolean assertDataStreams(Map<String, IndexMetadata> indices, DataStreamMetadata dsMetadata) {
// Sanity check, because elsewhere a more user friendly error should have occurred:
List<String> conflictingAliases = dsMetadata.dataStreams()
.values()
.stream()
.flatMap(ds -> ds.getIndices().stream())
.map(index -> indices.get(index.getName()))
.filter(Objects::nonNull)
.flatMap(im -> im.getAliases().values().stream())
.map(AliasMetadata::alias)
.toList();
if (conflictingAliases.isEmpty() == false) {
throw new AssertionError("aliases " + conflictingAliases + " cannot refer to backing indices of data streams");
}
return true;
}
@FixForMultiProject(description = "Remove reading reserved_state and settings") // ES-12795
public static ProjectMetadata fromXContent(XContentParser parser) throws IOException {
XContentParser.Token token = parser.currentToken();
if (token == null) {
token = parser.nextToken();
}
String currentFieldName = null;
XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser);
ProjectMetadata.Builder projectBuilder = new Builder();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
switch (currentFieldName) {
case "id" -> projectBuilder.id(ProjectId.fromXContent(parser));
default -> throw new IllegalArgumentException("Unexpected field [" + currentFieldName + "]");
}
} else if (token == XContentParser.Token.START_OBJECT) {
switch (currentFieldName) {
// Remove this (ES-12795)
case "reserved_state" -> {
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
ReservedStateMetadata.fromXContent(parser);
}
}
case "indices" -> {
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
projectBuilder.put(IndexMetadata.Builder.fromXContent(parser), false);
}
}
case "templates" -> {
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
projectBuilder.put(IndexTemplateMetadata.Builder.fromXContent(parser, parser.currentName()));
}
}
// Remove this (ES-12795)
case "settings" -> {
Settings.fromXContent(parser);
}
default -> Metadata.Builder.parseCustomObject(
parser,
currentFieldName,
Metadata.ProjectCustom.class,
projectBuilder::putCustom
);
}
} else {
throw new IllegalArgumentException("Unexpected token " + token);
}
}
return projectBuilder.build();
}
}
@Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params p) {
Metadata.XContentContext context = Metadata.XContentContext.from(p);
Iterator<? extends ToXContent> indices = context == Metadata.XContentContext.API
? ChunkedToXContentHelper.object("indices", indices().values().iterator())
: Collections.emptyIterator();
final var multiProject = p.paramAsBoolean("multi-project", false);
Iterator<ToXContent> customs = Iterators.flatMap(customs().entrySet().iterator(), entry -> {
if (entry.getValue().context().contains(context)
// Include persistent tasks in the output only when multi-project=true.
// In single-project-mode (multi-project=false), we already output them in Metadata.
&& (multiProject || PersistentTasksCustomMetadata.TYPE.equals(entry.getKey()) == false)) {
return ChunkedToXContentHelper.object(entry.getKey(), entry.getValue().toXContentChunked(p));
} else {
return Collections.emptyIterator();
}
});
return Iterators.concat(
ChunkedToXContentHelper.object(
"templates",
Iterators.map(
templates().values().iterator(),
template -> (builder, params) -> IndexTemplateMetadata.Builder.toXContentWithTypes(template, builder, params)
)
),
indices,
customs
);
}
public static ProjectMetadata readFrom(StreamInput in) throws IOException {
ProjectId id = ProjectId.readFrom(in);
Builder builder = builder(id);
Function<String, MappingMetadata> mappingLookup;
Map<String, MappingMetadata> mappingMetadataMap = in.readMapValues(MappingMetadata::new, MappingMetadata::getSha256);
if (mappingMetadataMap.isEmpty() == false) {
mappingLookup = mappingMetadataMap::get;
} else {
mappingLookup = null;
}
int size = in.readVInt();
for (int i = 0; i < size; i++) {
builder.put(IndexMetadata.readFrom(in, mappingLookup), false);
}
size = in.readVInt();
for (int i = 0; i < size; i++) {
builder.put(IndexTemplateMetadata.readFrom(in));
}
readProjectCustoms(in, builder);
if (in.getTransportVersion().supports(PROJECT_RESERVED_STATE_MOVE_TO_REGISTRY) == false) {
int reservedStateSize = in.readVInt();
for (int i = 0; i < reservedStateSize; i++) {
ReservedStateMetadata.readFrom(in);
}
}
if (in.getTransportVersion().supports(PROJECT_METADATA_SETTINGS)
&& in.getTransportVersion().supports(CLUSTER_STATE_PROJECTS_SETTINGS) == false) {
Settings.readSettingsFromStream(in);
}
return builder.build();
}
private static void readProjectCustoms(StreamInput in, Builder builder) throws IOException {
Set<String> clusterScopedNames = in.namedWriteableRegistry().getReaders(Metadata.ProjectCustom.class).keySet();
int count = in.readVInt();
for (int i = 0; i < count; i++) {
String name = in.readString();
if (clusterScopedNames.contains(name)) {
Metadata.ProjectCustom custom = in.readNamedWriteable(Metadata.ProjectCustom.class, name);
builder.putCustom(custom.getWriteableName(), custom);
} else {
throw new IllegalArgumentException("Unknown project custom name [" + name + "]");
}
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
id.writeTo(out);
// we write the mapping metadata first and then write the indices without metadata so that
// we avoid writing duplicate mappings twice
out.writeMapValues(mappingsByHash);
out.writeVInt(indices.size());
for (IndexMetadata indexMetadata : this) {
indexMetadata.writeTo(out, true);
}
out.writeCollection(templates.values());
VersionedNamedWriteable.writeVersionedWriteables(out, customs.values());
if (out.getTransportVersion().supports(PROJECT_RESERVED_STATE_MOVE_TO_REGISTRY) == false) {
out.writeCollection(Collections.emptySet());
}
if (out.getTransportVersion().supports(PROJECT_METADATA_SETTINGS)
&& out.getTransportVersion().supports(CLUSTER_STATE_PROJECTS_SETTINGS) == false) {
Settings.EMPTY.writeTo(out);
}
}
// this needs to be package accessible for bwc serialization in Metadata.java
static | Builder |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetJobModelSnapshotsUpgradeStatsActionRequestTests.java | {
"start": 521,
"end": 1159
} | class ____ extends AbstractWireSerializingTestCase<Request> {
@Override
protected Request createTestInstance() {
Request request = new Request(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20));
request.setAllowNoMatch(randomBoolean());
return request;
}
@Override
protected Request mutateInstance(Request instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
@Override
protected Writeable.Reader<Request> instanceReader() {
return Request::new;
}
}
| GetJobModelSnapshotsUpgradeStatsActionRequestTests |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/support/task/task/BackgroundTaskTest.java | {
"start": 1893,
"end": 9653
} | class ____ extends TaskTestSupport {
@DisplayName("Test that the task does not run for more than the max duration when using a supplier with no delay")
@Test
@Timeout(10)
void testRunNoMoreSupplier() {
/*
* It should run at most 5 times in 4 seconds because:
* 1) there is no delay.
* 2) the interval is of 1 second
*/
BackgroundTask task = Tasks.backgroundTask()
.withScheduledExecutor(Executors.newSingleThreadScheduledExecutor())
.withBudget(Budgets.timeBudget()
.withInterval(Duration.ofSeconds(1))
.withInitialDelay(Duration.ZERO)
.withMaxDuration(Duration.ofSeconds(4))
.build())
.build();
boolean completed = task.run(camelContext, this::booleanSupplier);
assertTrue(taskCount.intValue() <= maxIterations);
assertFalse(completed, "The task did not complete, the return should be false");
Duration duration = task.elapsed();
assertNotNull(duration);
assertFalse(duration.isNegative());
assertFalse(duration.isZero());
assertTrue(duration.getSeconds() >= 4);
assertTrue(duration.getSeconds() <= 5);
}
@DisplayName("Test that the task does not run for more than the max duration when using a supplier with delay")
@Test
@Timeout(10)
void testRunNoMoreSupplierWithDelay() {
/*
* It should run approx most 4 times in 4 seconds because:
* 1) there is a delay.
* 2) the interval is of 1 second
*/
BackgroundTask task = Tasks.backgroundTask()
.withScheduledExecutor(Executors.newSingleThreadScheduledExecutor())
.withBudget(Budgets.timeBudget()
.withInterval(Duration.ofSeconds(1))
.withInitialDelay(Duration.ofSeconds(1))
.withMaxDuration(Duration.ofSeconds(4))
.build())
.build();
boolean completed = task.run(camelContext, this::booleanSupplier);
assertTrue(taskCount.intValue() < maxIterations, "number of runs: " + taskCount.intValue());
assertFalse(completed, "The task did not complete, the return should be false");
Duration duration = task.elapsed();
assertNotNull(duration);
assertFalse(duration.isNegative());
assertFalse(duration.isZero());
assertTrue(duration.getSeconds() >= 4);
assertTrue(duration.getSeconds() <= 5);
}
@DisplayName("Test that the task does not run for more than the max duration when using a predicate and an initial delay")
@Test
@Timeout(10)
void testRunNoMorePredicate() {
/*
* It should run at most 5 times in 4 seconds because:
* 1) there is no delay.
* 2) the interval is of 1 second
*/
BackgroundTask task = Tasks.backgroundTask()
.withScheduledExecutor(Executors.newSingleThreadScheduledExecutor())
.withBudget(Budgets.timeBudget()
.withInterval(Duration.ofSeconds(1))
.withInitialDelay(Duration.ZERO)
.withMaxDuration(Duration.ofSeconds(4))
.build())
.build();
boolean completed = task.run(camelContext, this::taskPredicate, new Object());
assertTrue(taskCount.intValue() <= maxIterations);
assertFalse(completed, "The task did not complete, the return should be false");
Duration duration = task.elapsed();
assertNotNull(duration);
assertFalse(duration.isNegative());
assertFalse(duration.isZero());
assertTrue(duration.getSeconds() >= 4);
assertTrue(duration.getSeconds() <= 5);
}
@DisplayName("Test that the task stops running once the predicate is true")
@Test
@Timeout(10)
void testRunNoMorePredicateWithSuccess() {
/*
* It should run 3 times in 4 seconds because when the task return successfully, the result must be
* deterministic.
*/
BackgroundTask task = Tasks.backgroundTask()
.withScheduledExecutor(Executors.newSingleThreadScheduledExecutor())
.withBudget(Budgets.timeBudget()
.withInterval(Duration.ofSeconds(1))
.withInitialDelay(Duration.ZERO)
.withMaxDuration(Duration.ofSeconds(4))
.build())
.build();
boolean completed = task.run(camelContext, this::taskPredicateWithDeterministicStop, Integer.valueOf(3));
assertEquals(3, taskCount.intValue());
assertTrue(completed, "The task did complete, the return should be true");
}
@DisplayName("Test that the task stops running once the predicate is true when the test is slow")
@Test
@Timeout(10)
void testRunNoMorePredicateWithTimeout() {
/*
* Each execution takes 2 seconds to complete. Therefore, running the task every second means that the task
* count should not exceed 2 because anything greater than that means that the timeout was exceeded.
*/
BackgroundTask task = Tasks.backgroundTask()
.withScheduledExecutor(Executors.newSingleThreadScheduledExecutor())
.withBudget(Budgets.timeBudget()
.withInterval(Duration.ofSeconds(1))
.withInitialDelay(Duration.ZERO)
.withMaxDuration(Duration.ofSeconds(4))
.build())
.build();
boolean completed = task.run(camelContext, this::taskPredicateWithDeterministicStopSlow, Integer.valueOf(3));
assertTrue(taskCount.intValue() <= 2, "Slow task: it should not run more than 2 times in 4 seconds");
Duration duration = task.elapsed();
assertNotNull(duration);
assertFalse(duration.isNegative());
assertFalse(duration.isZero());
assertTrue(duration.getSeconds() >= 4);
assertTrue(duration.getSeconds() <= 5);
assertFalse(completed, "The task did not complete because of timeout, the return should be false");
}
@DisplayName("Test that the task stops running once the predicate is true when the test is slow")
@Test
@Timeout(10)
void testRunNoMorePredicateWithTimeoutAndDelay() {
/*
* Each execution takes 2 seconds to complete, but it has a 1-second delay. Therefore, running the task every
* second means that the task count should not exceed 1 because anything greater than that means that the
* timeout was exceeded.
*/
BackgroundTask task = Tasks.backgroundTask()
.withScheduledExecutor(Executors.newSingleThreadScheduledExecutor())
.withBudget(Budgets.timeBudget()
.withInterval(Duration.ofSeconds(1))
.withInitialDelay(Duration.ofSeconds(1))
.withMaxDuration(Duration.ofSeconds(4))
.build())
.build();
boolean completed = task.run(camelContext, this::taskPredicateWithDeterministicStopSlow, Integer.valueOf(3));
Duration duration = task.elapsed();
assertNotNull(duration);
assertFalse(duration.isNegative());
assertFalse(duration.isZero());
assertTrue(duration.getSeconds() >= 4);
assertTrue(duration.getSeconds() <= 5);
assertFalse(completed, "The task did not complete because of timeout, the return should be false");
}
}
| BackgroundTaskTest |
java | quarkusio__quarkus | extensions/oidc-client-reactive-filter/deployment/src/test/java/io/quarkus/oidc/client/reactive/filter/OidcClientRequestFilterRevokedTokenDevModeTest.java | {
"start": 1854,
"end": 2183
} | class ____ extends AbstractOidcClientRequestReactiveFilter {
@Override
protected boolean refreshOnUnauthorized() {
return true;
}
}
@RegisterRestClient
@RegisterProvider(value = NamedClientRefreshEnabled.class)
@Path(MY_SERVER_RESOURCE_PATH)
public | DefaultClientRefreshEnabled |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/manytoone/lazy/ManyToOneLazyDeleteTest.java | {
"start": 1115,
"end": 1530
} | class ____ anymore.
*
* @author Luke Chen
*/
@JiraKey(value = "HHH-13945")
@RequiresDialectFeature(feature = DialectFeatureChecks.SupportsIdentityColumns.class)
@EnversTest
@DomainModel(annotatedClasses = { Shipment.class, Address.class, AddressVersion.class, User.class, ChildUser.class })
@ServiceRegistry(settings = @Setting(name = EnversSettings.STORE_DATA_AT_DELETE, value = "true"))
@SessionFactory
public | name |
java | apache__camel | components/camel-as2/camel-as2-api/src/main/java/org/apache/camel/component/as2/api/NotFoundHttpRequestHandler.java | {
"start": 1146,
"end": 1452
} | class ____ implements HttpRequestHandler {
@Override
public void handle(ClassicHttpRequest classicHttpRequest, ClassicHttpResponse classicHttpResponse, HttpContext httpContext)
throws HttpException, IOException {
classicHttpResponse.setCode(404);
}
}
| NotFoundHttpRequestHandler |
java | apache__camel | components/camel-aws/camel-aws2-kinesis/src/main/java/org/apache/camel/component/aws2/firehose/KinesisFirehose2Producer.java | {
"start": 2273,
"end": 10443
} | class ____ extends DefaultProducer {
private static final Logger LOG = LoggerFactory.getLogger(KinesisFirehose2Producer.class);
public KinesisFirehose2Producer(KinesisFirehose2Endpoint endpoint) {
super(endpoint);
}
@Override
public KinesisFirehose2Endpoint getEndpoint() {
return (KinesisFirehose2Endpoint) super.getEndpoint();
}
@Override
public void process(Exchange exchange) throws Exception {
KinesisFirehose2Operations operation = determineOperation(exchange);
if (ObjectHelper.isEmpty(operation)) {
processSingleRecord(exchange);
} else {
switch (operation) {
case sendBatchRecord:
sendBatchRecord(getClient(), exchange);
break;
case createDeliveryStream:
createDeliveryStream(getClient(), exchange);
break;
case deleteDeliveryStream:
deleteDeliveryStream(getClient(), exchange);
break;
case updateDestination:
updateDestination(getClient(), exchange);
break;
case describeDeliveryStream:
describeDeliveryStream(getClient(), exchange);
break;
default:
throw new IllegalArgumentException("Unsupported operation");
}
}
}
private void createDeliveryStream(FirehoseClient client, Exchange exchange) {
if (exchange.getIn().getBody() instanceof CreateDeliveryStreamRequest) {
CreateDeliveryStreamRequest req = exchange.getIn().getBody(CreateDeliveryStreamRequest.class);
CreateDeliveryStreamResponse result = client.createDeliveryStream(req);
Message message = getMessageForResponse(exchange);
message.setBody(result);
} else {
throw new IllegalArgumentException(
"The createDeliveryStream operation expects a CreateDeliveryStream instance as body");
}
}
private void deleteDeliveryStream(FirehoseClient client, Exchange exchange) {
if (exchange.getIn().getBody() instanceof DeleteDeliveryStreamRequest) {
DeleteDeliveryStreamRequest req = exchange.getIn().getBody(DeleteDeliveryStreamRequest.class);
DeleteDeliveryStreamResponse result = client.deleteDeliveryStream(req);
Message message = getMessageForResponse(exchange);
message.setBody(result);
} else {
if (ObjectHelper.isNotEmpty(exchange.getIn().getHeader(KinesisFirehose2Constants.KINESIS_FIREHOSE_STREAM_NAME))) {
DeleteDeliveryStreamRequest req = DeleteDeliveryStreamRequest.builder()
.deliveryStreamName(exchange.getIn().getHeader(KinesisFirehose2Constants.KINESIS_FIREHOSE_STREAM_NAME,
String.class))
.build();
DeleteDeliveryStreamResponse result = client.deleteDeliveryStream(req);
Message message = getMessageForResponse(exchange);
message.setBody(result);
} else {
throw new IllegalArgumentException(
"The deleteDeliveryStream operation expects at least an delivery stream name header or a DeleteDeliveryStreamRequest instance");
}
}
}
private void updateDestination(FirehoseClient client, Exchange exchange) {
if (exchange.getIn().getBody() instanceof CreateDeliveryStreamRequest) {
UpdateDestinationRequest req = exchange.getIn().getBody(UpdateDestinationRequest.class);
UpdateDestinationResponse result = client.updateDestination(req);
Message message = getMessageForResponse(exchange);
message.setBody(result);
} else {
throw new IllegalArgumentException(
"The updateDestination operation expects an UpdateDestinationRequest instance as body");
}
}
private void describeDeliveryStream(FirehoseClient client, Exchange exchange) {
if (exchange.getIn().getBody() instanceof DescribeDeliveryStreamRequest) {
DescribeDeliveryStreamRequest req = exchange.getIn().getBody(DescribeDeliveryStreamRequest.class);
DescribeDeliveryStreamResponse result = client.describeDeliveryStream(req);
Message message = getMessageForResponse(exchange);
message.setBody(result);
} else {
if (ObjectHelper.isNotEmpty(exchange.getIn().getHeader(KinesisFirehose2Constants.KINESIS_FIREHOSE_STREAM_NAME))) {
DescribeDeliveryStreamRequest req = DescribeDeliveryStreamRequest.builder()
.deliveryStreamName(exchange.getIn().getHeader(KinesisFirehose2Constants.KINESIS_FIREHOSE_STREAM_NAME,
String.class))
.build();
DescribeDeliveryStreamResponse result = client.describeDeliveryStream(req);
Message message = getMessageForResponse(exchange);
message.setBody(result);
} else {
throw new IllegalArgumentException(
"The describeDeliveryStream operation expects at least an delivery stream name header or a DeleteDeliveryStreamRequest instance");
}
}
}
private void sendBatchRecord(FirehoseClient client, Exchange exchange) {
if (exchange.getIn().getBody() instanceof Iterable) {
Iterable c = exchange.getIn().getBody(Iterable.class);
PutRecordBatchRequest.Builder batchRequest = PutRecordBatchRequest.builder();
batchRequest.deliveryStreamName(getEndpoint().getConfiguration().getStreamName());
batchRequest.records((Collection<Record>) c);
PutRecordBatchResponse result = client.putRecordBatch(batchRequest.build());
Message message = getMessageForResponse(exchange);
message.setBody(result);
} else {
PutRecordBatchRequest req = exchange.getIn().getBody(PutRecordBatchRequest.class);
PutRecordBatchResponse result = client.putRecordBatch(req);
Message message = getMessageForResponse(exchange);
message.setBody(result);
}
}
public void processSingleRecord(final Exchange exchange) {
PutRecordRequest request = createRequest(exchange);
LOG.trace("Sending request [{}] from exchange [{}]...", request, exchange);
PutRecordResponse putRecordResult = getEndpoint().getClient().putRecord(request);
LOG.trace("Received result [{}]", putRecordResult);
Message message = getMessageForResponse(exchange);
message.setHeader(KinesisFirehose2Constants.RECORD_ID, putRecordResult.recordId());
}
private PutRecordRequest createRequest(Exchange exchange) {
ByteBuffer body = exchange.getIn().getBody(ByteBuffer.class);
Record.Builder builder = Record.builder();
builder.data(SdkBytes.fromByteBuffer(body));
PutRecordRequest.Builder putRecordRequest = PutRecordRequest.builder();
putRecordRequest.deliveryStreamName(getEndpoint().getConfiguration().getStreamName());
putRecordRequest.record(builder.build());
return putRecordRequest.build();
}
public static Message getMessageForResponse(final Exchange exchange) {
return exchange.getMessage();
}
protected FirehoseClient getClient() {
return getEndpoint().getClient();
}
protected KinesisFirehose2Configuration getConfiguration() {
return getEndpoint().getConfiguration();
}
private KinesisFirehose2Operations determineOperation(Exchange exchange) {
KinesisFirehose2Operations operation = exchange.getIn().getHeader(KinesisFirehose2Constants.KINESIS_FIREHOSE_OPERATION,
KinesisFirehose2Operations.class);
if (operation == null) {
operation = getConfiguration().getOperation();
}
return operation;
}
}
| KinesisFirehose2Producer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.