language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | google__gson | gson/src/test/java/com/google/gson/GsonBuilderTest.java | {
"start": 10541,
"end": 15234
} | class ____"
+ " interface");
}
@Ignore(
"Registering adapter for JsonElement is allowed (for now) for backward compatibility, see"
+ " https://github.com/google/gson/issues/2787")
@Test
public void testRegisterTypeHierarchyAdapterJsonElements() {
String errorMessage = "Cannot override built-in adapter for ";
Class<?>[] types = {
JsonElement.class, JsonArray.class,
};
GsonBuilder gsonBuilder = new GsonBuilder();
for (Class<?> type : types) {
IllegalArgumentException e =
assertThrows(
IllegalArgumentException.class,
() -> gsonBuilder.registerTypeHierarchyAdapter(type, NULL_TYPE_ADAPTER));
assertThat(e).hasMessageThat().isEqualTo(errorMessage + type);
}
// But registering type hierarchy adapter for Object should be allowed
gsonBuilder.registerTypeHierarchyAdapter(Object.class, NULL_TYPE_ADAPTER);
}
/**
* Verifies that (for now) registering hierarchy adapter for {@link JsonElement} and subclasses is
* possible, but has no effect. See {@link #testRegisterTypeHierarchyAdapterJsonElements()}.
*/
@Test
public void testRegisterTypeHierarchyAdapterJsonElements_Allowed() {
Gson gson =
new GsonBuilder().registerTypeHierarchyAdapter(JsonArray.class, NULL_TYPE_ADAPTER).create();
TypeAdapter<JsonArray> adapter = gson.getAdapter(JsonArray.class);
// Does not use registered adapter
assertThat(adapter).isNotSameInstanceAs(NULL_TYPE_ADAPTER);
assertThat(adapter.toJson(new JsonArray())).isEqualTo("[]");
}
@Test
public void testSetDateFormatWithInvalidPattern() {
GsonBuilder builder = new GsonBuilder();
String invalidPattern = "This is an invalid Pattern";
IllegalArgumentException e =
assertThrows(IllegalArgumentException.class, () -> builder.setDateFormat(invalidPattern));
assertThat(e)
.hasMessageThat()
.isEqualTo("The date pattern '" + invalidPattern + "' is not valid");
}
@Test
public void testSetDateFormatWithValidPattern() {
GsonBuilder builder = new GsonBuilder();
String validPattern = "yyyy-MM-dd";
// Should not throw an exception
builder.setDateFormat(validPattern);
}
@Test
public void testSetDateFormatNullPattern() {
GsonBuilder builder = new GsonBuilder();
@SuppressWarnings("JavaUtilDate")
Date date = new Date(0);
String originalFormatted = builder.create().toJson(date);
String customFormatted = builder.setDateFormat("yyyy-MM-dd").create().toJson(date);
assertThat(customFormatted).isNotEqualTo(originalFormatted);
// `null` should reset the format to the default
String resetFormatted = builder.setDateFormat(null).create().toJson(date);
assertThat(resetFormatted).isEqualTo(originalFormatted);
}
/**
* Tests behavior for an empty date pattern; this behavior is not publicly documented at the
* moment.
*/
@Test
public void testSetDateFormatEmptyPattern() {
GsonBuilder builder = new GsonBuilder();
@SuppressWarnings("JavaUtilDate")
Date date = new Date(0);
String originalFormatted = builder.create().toJson(date);
String emptyFormatted = builder.setDateFormat(" ").create().toJson(date);
// Empty pattern was ignored
assertThat(emptyFormatted).isEqualTo(originalFormatted);
}
@SuppressWarnings("deprecation") // for GsonBuilder.setDateFormat(int)
@Test
public void testSetDateFormatValidStyle() {
GsonBuilder builder = new GsonBuilder();
int[] validStyles = {DateFormat.FULL, DateFormat.LONG, DateFormat.MEDIUM, DateFormat.SHORT};
for (int style : validStyles) {
// Should not throw an exception
builder.setDateFormat(style);
builder.setDateFormat(style, style);
}
}
@SuppressWarnings("deprecation") // for GsonBuilder.setDateFormat(int)
@Test
public void testSetDateFormatInvalidStyle() {
GsonBuilder builder = new GsonBuilder();
IllegalArgumentException e =
assertThrows(IllegalArgumentException.class, () -> builder.setDateFormat(-1));
assertThat(e).hasMessageThat().isEqualTo("Invalid style: -1");
e = assertThrows(IllegalArgumentException.class, () -> builder.setDateFormat(4));
assertThat(e).hasMessageThat().isEqualTo("Invalid style: 4");
e =
assertThrows(
IllegalArgumentException.class, () -> builder.setDateFormat(-1, DateFormat.FULL));
assertThat(e).hasMessageThat().isEqualTo("Invalid style: -1");
e =
assertThrows(
IllegalArgumentException.class, () -> builder.setDateFormat(DateFormat.FULL, -1));
assertThat(e).hasMessageThat().isEqualTo("Invalid style: -1");
}
}
| or |
java | google__dagger | javatests/dagger/hilt/processor/internal/aggregateddeps/AggregatedDepsProcessorErrorsTest.java | {
"start": 9312,
"end": 9435
} | class ____ {",
" @Module",
" @InstallIn(SingletonComponent.class)",
" static final | Outer |
java | google__error-prone | core/src/test/java/com/google/errorprone/fixes/SuggestedFixesTest.java | {
"start": 53444,
"end": 54207
} | class ____ {
void e() {}
void f() throws Exception {}
void g() throws RuntimeException {}
void h() throws RuntimeException, IOException {}
}
""")
.doTest();
}
@Test
public void unusedImportInPackageInfo() {
CompilationTestHelper.newInstance(RemoveUnusedImports.class, getClass())
.addSourceLines(
"in/com/example/package-info.java",
"""
package com.example;
// BUG: Diagnostic contains: Did you mean to remove this line?
import java.util.Map;
""")
.doTest();
}
/** Test checker that renames variables. */
@BugPattern(summary = "", severity = ERROR)
public static | Test |
java | spring-projects__spring-framework | spring-jms/src/main/java/org/springframework/jms/core/MessageCreator.java | {
"start": 1277,
"end": 1638
} | interface ____ {
/**
* Create a {@link Message} to be sent.
* @param session the JMS {@link Session} to be used to create the
* {@code Message} (never {@code null})
* @return the {@code Message} to be sent
* @throws jakarta.jms.JMSException if thrown by JMS API methods
*/
Message createMessage(Session session) throws JMSException;
}
| MessageCreator |
java | netty__netty | common/src/main/java/io/netty/util/ResourceLeakDetector.java | {
"start": 1599,
"end": 2607
} | class ____<T> {
private static final String PROP_LEVEL_OLD = "io.netty.leakDetectionLevel";
private static final String PROP_LEVEL = "io.netty.leakDetection.level";
private static final Level DEFAULT_LEVEL = Level.SIMPLE;
private static final String PROP_TARGET_RECORDS = "io.netty.leakDetection.targetRecords";
private static final int DEFAULT_TARGET_RECORDS = 4;
private static final String PROP_SAMPLING_INTERVAL = "io.netty.leakDetection.samplingInterval";
// There is a minor performance benefit in TLR if this is a power of 2.
private static final int DEFAULT_SAMPLING_INTERVAL = 128;
private static final String PROP_TRACK_CLOSE = "io.netty.leakDetection.trackClose";
private static final boolean DEFAULT_TRACK_CLOSE = true;
private static final int TARGET_RECORDS;
static final int SAMPLING_INTERVAL;
private static final boolean TRACK_CLOSE;
/**
* Represents the level of resource leak detection.
*/
public | ResourceLeakDetector |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialResponse.java | {
"start": 622,
"end": 688
} | class ____ extends ActionResponse {
public | PostStartTrialResponse |
java | apache__camel | components/camel-jackson-avro/src/main/java/org/apache/camel/component/jackson/avro/transform/AvroSchemaResolver.java | {
"start": 2141,
"end": 2277
} | class ____ is either set as an explicit exchange property
* or it gets resolved from exchange body type information.
*/
public | information |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/scripting/config/ScriptBeanDefinitionParser.java | {
"start": 2547,
"end": 3544
} | class ____ extends AbstractBeanDefinitionParser {
private static final String ENGINE_ATTRIBUTE = "engine";
private static final String SCRIPT_SOURCE_ATTRIBUTE = "script-source";
private static final String INLINE_SCRIPT_ELEMENT = "inline-script";
private static final String SCOPE_ATTRIBUTE = "scope";
private static final String AUTOWIRE_ATTRIBUTE = "autowire";
private static final String DEPENDS_ON_ATTRIBUTE = "depends-on";
private static final String INIT_METHOD_ATTRIBUTE = "init-method";
private static final String DESTROY_METHOD_ATTRIBUTE = "destroy-method";
private static final String SCRIPT_INTERFACES_ATTRIBUTE = "script-interfaces";
private static final String REFRESH_CHECK_DELAY_ATTRIBUTE = "refresh-check-delay";
private static final String PROXY_TARGET_CLASS_ATTRIBUTE = "proxy-target-class";
private static final String CUSTOMIZER_REF_ATTRIBUTE = "customizer-ref";
/**
* The {@link org.springframework.scripting.ScriptFactory} | ScriptBeanDefinitionParser |
java | quarkusio__quarkus | extensions/agroal/runtime/src/main/java/io/quarkus/agroal/runtime/DataSourceJdbcBuildTimeConfig.java | {
"start": 322,
"end": 546
} | interface ____ {
/**
* If we create a JDBC datasource for this datasource.
*/
@WithParentName
@WithDefault("true")
boolean enabled();
/**
* The datasource driver | DataSourceJdbcBuildTimeConfig |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/operators/testutils/types/IntPairSerializer.java | {
"start": 1259,
"end": 2934
} | class ____ extends TypeSerializer<IntPair> {
private static final long serialVersionUID = 1L;
@Override
public boolean isImmutableType() {
return false;
}
@Override
public IntPairSerializer duplicate() {
return this;
}
@Override
public IntPair createInstance() {
return new IntPair();
}
@Override
public IntPair copy(IntPair from) {
return new IntPair(from.getKey(), from.getValue());
}
@Override
public IntPair copy(IntPair from, IntPair reuse) {
reuse.setKey(from.getKey());
reuse.setValue(from.getValue());
return reuse;
}
@Override
public int getLength() {
return 8;
}
@Override
public void serialize(IntPair record, DataOutputView target) throws IOException {
target.writeInt(record.getKey());
target.writeInt(record.getValue());
}
@Override
public IntPair deserialize(DataInputView source) throws IOException {
return new IntPair(source.readInt(), source.readInt());
}
@Override
public IntPair deserialize(IntPair reuse, DataInputView source) throws IOException {
reuse.setKey(source.readInt());
reuse.setValue(source.readInt());
return reuse;
}
@Override
public void copy(DataInputView source, DataOutputView target) throws IOException {
target.write(source, 8);
}
@Override
public boolean equals(Object obj) {
return obj instanceof IntPairSerializer;
}
@Override
public int hashCode() {
return IntPairSerializer.class.hashCode();
}
public static final | IntPairSerializer |
java | apache__kafka | group-coordinator/src/main/java/org/apache/kafka/coordinator/group/ConsumerGroupMigrationPolicy.java | {
"start": 2281,
"end": 2794
} | enum ____, in a case-insensitive manner.
*
* @return The {{@link ConsumerGroupMigrationPolicy}} according to the string passed. None is returned if
* the string doesn't correspond to a valid policy.
*/
public static ConsumerGroupMigrationPolicy parse(String name) {
if (name == null) {
return DISABLED;
}
ConsumerGroupMigrationPolicy policy = NAME_TO_ENUM.get(name.toLowerCase(Locale.ROOT));
return policy == null ? DISABLED : policy;
}
}
| value |
java | quarkusio__quarkus | extensions/vertx-http/deployment/src/test/java/io/quarkus/vertx/http/security/CustomNamedHttpSecPolicy.java | {
"start": 313,
"end": 950
} | class ____ implements HttpSecurityPolicy {
@Override
public Uni<CheckResult> checkPermission(RoutingContext request, Uni<SecurityIdentity> identity,
AuthorizationRequestContext requestContext) {
if (isRequestAuthorized(request)) {
return Uni.createFrom().item(CheckResult.PERMIT);
}
return Uni.createFrom().item(CheckResult.DENY);
}
private static boolean isRequestAuthorized(RoutingContext request) {
return request.request().headers().contains("hush-hush");
}
@Override
public String name() {
return "custom123";
}
}
| CustomNamedHttpSecPolicy |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipErrorTests.java | {
"start": 803,
"end": 1430
} | class ____ extends ErrorsForCasesWithoutExamplesTestCase {
@Override
protected List<TestCaseSupplier> cases() {
return paramsToSuppliers(MvZipTests.parameters());
}
@Override
protected Expression build(Source source, List<Expression> args) {
return new MvZip(source, args.get(0), args.get(1), args.size() > 2 ? args.get(2) : null);
}
@Override
protected Matcher<String> expectedTypeErrorMatcher(List<Set<DataType>> validPerPosition, List<DataType> signature) {
return equalTo(typeErrorMessage(true, validPerPosition, signature, (v, p) -> "string"));
}
}
| MvZipErrorTests |
java | apache__logging-log4j2 | log4j-core-test/src/test/java/org/apache/logging/log4j/core/filter/AbstractFilterTest.java | {
"start": 1255,
"end": 2819
} | class ____ {
@Test
void testUnrolledBackwardsCompatible() {
final ConcreteFilter filter = new ConcreteFilter();
final Filter.Result expected = Filter.Result.DENY;
verifyMethodsWithUnrolledVarargs(filter, Filter.Result.DENY);
filter.testResult = Filter.Result.ACCEPT;
verifyMethodsWithUnrolledVarargs(filter, Filter.Result.ACCEPT);
}
private void verifyMethodsWithUnrolledVarargs(final ConcreteFilter filter, final Filter.Result expected) {
final Logger logger = null;
final Level level = null;
final Marker marker = null;
assertEquals(expected, filter.filter(logger, level, marker, "", 1));
assertEquals(expected, filter.filter(logger, level, marker, "", 1, 2));
assertEquals(expected, filter.filter(logger, level, marker, "", 1, 2, 3));
assertEquals(expected, filter.filter(logger, level, marker, "", 1, 2, 3, 4));
assertEquals(expected, filter.filter(logger, level, marker, "", 1, 2, 3, 4, 5));
assertEquals(expected, filter.filter(logger, level, marker, "", 1, 2, 3, 4, 5, 6));
assertEquals(expected, filter.filter(logger, level, marker, "", 1, 2, 3, 4, 5, 6, 7));
assertEquals(expected, filter.filter(logger, level, marker, "", 1, 2, 3, 4, 5, 6, 7, 8));
assertEquals(expected, filter.filter(logger, level, marker, "", 1, 2, 3, 4, 5, 6, 7, 8, 9));
assertEquals(expected, filter.filter(logger, level, marker, "", 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
}
/**
* Concreted filter | AbstractFilterTest |
java | apache__rocketmq | common/src/main/java/org/apache/rocketmq/common/utils/NetworkUtil.java | {
"start": 1599,
"end": 3543
} | class ____ {
public static final String OS_NAME = System.getProperty("os.name");
private static final Logger log = LoggerFactory.getLogger(LoggerName.COMMON_LOGGER_NAME);
private static boolean isLinuxPlatform = false;
private static boolean isWindowsPlatform = false;
static {
if (OS_NAME != null && OS_NAME.toLowerCase().contains("linux")) {
isLinuxPlatform = true;
}
if (OS_NAME != null && OS_NAME.toLowerCase().contains("windows")) {
isWindowsPlatform = true;
}
}
public static boolean isWindowsPlatform() {
return isWindowsPlatform;
}
public static Selector openSelector() throws IOException {
Selector result = null;
if (isLinuxPlatform()) {
try {
final Class<?> providerClazz = Class.forName("sun.nio.ch.EPollSelectorProvider");
try {
final Method method = providerClazz.getMethod("provider");
final SelectorProvider selectorProvider = (SelectorProvider) method.invoke(null);
if (selectorProvider != null) {
result = selectorProvider.openSelector();
}
} catch (final Exception e) {
log.warn("Open ePoll Selector for linux platform exception", e);
}
} catch (final Exception e) {
// ignore
}
}
if (result == null) {
result = Selector.open();
}
return result;
}
public static boolean isLinuxPlatform() {
return isLinuxPlatform;
}
public static List<InetAddress> getLocalInetAddressList() throws SocketException {
Enumeration<NetworkInterface> enumeration = NetworkInterface.getNetworkInterfaces();
List<InetAddress> inetAddressList = new ArrayList<>();
// Traversal Network | NetworkUtil |
java | quarkusio__quarkus | extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/enums/TemplateEnumValidationFailureTest.java | {
"start": 490,
"end": 1627
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot(root -> root
.addClasses(TransactionType.class)
.addAsResource(new StringAsset(
"{TransactionType:FOO}{TransactionType:BAR.scores}"),
"templates/foo.txt"))
.assertException(t -> {
Throwable e = t;
TemplateException te = null;
while (e != null) {
if (e instanceof TemplateException) {
te = (TemplateException) e;
break;
}
e = e.getCause();
}
assertNotNull(te);
assertTrue(te.getMessage().contains("Found incorrect expressions (1)"), te.getMessage());
assertTrue(te.getMessage().contains("TransactionType:BAR.scores"), te.getMessage());
});
@Test
public void test() {
fail();
}
@TemplateEnum
public static | TemplateEnumValidationFailureTest |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCollect.java | {
"start": 3078,
"end": 4972
} | class ____
implements Reducer<IntWritable, IntWritable, IntWritable, IntWritable> {
static int numSeen;
static int actualSum;
public void configure(JobConf job) { }
public void reduce(IntWritable key, Iterator<IntWritable> val,
OutputCollector<IntWritable, IntWritable> out,
Reporter reporter) throws IOException {
actualSum += key.get(); // keep the running count of the seen values
numSeen++; // number of values seen so far
// using '1+2+3+...n = n*(n+1)/2' to validate
int expectedSum = numSeen * (numSeen + 1) / 2;
if (expectedSum != actualSum) {
throw new IOException("Collect test failed!! Ordering mismatch.");
}
}
public void close() { }
}
public void configure(JobConf conf) throws IOException {
conf.setJobName("TestCollect");
conf.setJarByClass(TestCollect.class);
conf.setInputFormat(RandomInputFormat.class); // for self data generation
conf.setOutputKeyClass(IntWritable.class);
conf.setOutputValueClass(IntWritable.class);
FileOutputFormat.setOutputPath(conf, OUTPUT_DIR);
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
conf.setMapperClass(Map.class);
conf.setReducerClass(Reduce.class);
conf.setNumMapTasks(1);
conf.setNumReduceTasks(1);
}
@Test
public void testCollect() throws IOException {
JobConf conf = new JobConf();
configure(conf);
try {
JobClient.runJob(conf);
// check if all the values were seen by the reducer
if (Reduce.numSeen != (NUM_COLLECTS_PER_THREAD * NUM_FEEDERS)) {
throw new IOException("Collect test failed!! Total does not match.");
}
} catch (IOException ioe) {
throw ioe;
} finally {
FileSystem fs = FileSystem.get(conf);
fs.delete(OUTPUT_DIR, true);
}
}
}
| Reduce |
java | grpc__grpc-java | xds/src/main/java/io/grpc/xds/XdsLbPolicies.java | {
"start": 629,
"end": 1223
} | class ____ {
static final String CLUSTER_MANAGER_POLICY_NAME = "cluster_manager_experimental";
static final String CDS_POLICY_NAME = "cds_experimental";
static final String CLUSTER_RESOLVER_POLICY_NAME = "cluster_resolver_experimental";
static final String PRIORITY_POLICY_NAME = "priority_experimental";
static final String CLUSTER_IMPL_POLICY_NAME = "cluster_impl_experimental";
static final String WEIGHTED_TARGET_POLICY_NAME = "weighted_target_experimental";
static final String WRR_LOCALITY_POLICY_NAME = "wrr_locality_experimental";
private XdsLbPolicies() {}
}
| XdsLbPolicies |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/promql/AutomatonUtilsTests.java | {
"start": 1644,
"end": 9989
} | class ____ extends ESTestCase {
@BeforeClass
public static void checkPromqlEnabled() {
assumeTrue("requires snapshot build with promql feature enabled", PromqlFeatures.isEnabled());
}
public void testExtractFragments_ExactString() {
// Single exact string (no wildcards)
List<PatternFragment> fragments = extractFragments("api");
assertThat(fragments, notNullValue());
assertThat(fragments, hasSize(1));
assertFragment(fragments.get(0), EXACT, "api");
}
public void testExtractFragments_Prefix() {
// Prefix pattern: prefix.*
List<PatternFragment> fragments = extractFragments("prod-.*");
assertThat(fragments, notNullValue());
assertThat(fragments, hasSize(1));
assertFragment(fragments.get(0), PREFIX, "prod-");
}
public void testExtractFragments_Suffix() {
// Suffix pattern: .*suffix
List<PatternFragment> fragments = extractFragments(".*-prod");
assertThat(fragments, notNullValue());
assertThat(fragments, hasSize(1));
assertFragment(fragments.get(0), SUFFIX, "-prod");
}
public void testExtractFragments_MixedAlternation() {
// Mixed alternation: prefix|exact|suffix
List<PatternFragment> fragments = extractFragments("prod-.*|staging|.*-dev");
assertThat(fragments, notNullValue());
assertThat(fragments, hasSize(3));
Object[][] expected = { { PREFIX, "prod-" }, { EXACT, "staging" }, { SUFFIX, "-dev" } };
assertFragments(fragments, expected);
}
public void testExtractFragments_ProperPrefixSuffixAlternation() {
List<PatternFragment> fragments = extractFragments("prod-.+|.+-dev");
assertThat(fragments, notNullValue());
assertThat(fragments, hasSize(2));
Object[][] expected = { { PROPER_PREFIX, "prod-" }, { PROPER_SUFFIX, "-dev" } };
assertFragments(fragments, expected);
}
public void testExtractFragments_HomogeneousExactAlternation() {
// All exact values
List<PatternFragment> fragments = extractFragments("api|web|service");
assertThat(fragments, notNullValue());
assertThat(fragments, hasSize(3));
Object[][] expected = { { EXACT, "api" }, { EXACT, "web" }, { EXACT, "service" } };
assertFragments(fragments, expected);
}
public void testExtractFragments_HomogeneousPrefixAlternation() {
// All prefixes
List<PatternFragment> fragments = extractFragments("prod-.*|staging-.*|dev-.*");
assertThat(fragments, notNullValue());
assertThat(fragments, hasSize(3));
Object[][] expected = { { PREFIX, "prod-" }, { PREFIX, "staging-" }, { PREFIX, "dev-" } };
assertFragments(fragments, expected);
}
public void testExtractFragments_HomogeneousSuffixAlternation() {
// All suffixes
List<PatternFragment> fragments = extractFragments(".*-prod|.*-staging|.*-dev");
assertThat(fragments, notNullValue());
assertThat(fragments, hasSize(3));
Object[][] expected = { { SUFFIX, "-prod" }, { SUFFIX, "-staging" }, { SUFFIX, "-dev" } };
assertFragments(fragments, expected);
}
public void testExtractFragments_WithAnchors() {
// Pattern with anchors should be normalized
List<PatternFragment> fragments = extractFragments("^prod-.*|staging|.*-dev$");
assertThat(fragments, notNullValue());
assertThat(fragments, hasSize(3));
Object[][] expected = { { PREFIX, "prod-" }, { EXACT, "staging" }, { SUFFIX, "-dev" } };
assertFragments(fragments, expected);
}
public void testExtractFragments_ContainsPattern() {
// Contains pattern (.*substring.*) should return REGEX type
List<PatternFragment> fragments = extractFragments(".*error.*");
assertThat(fragments, notNullValue());
assertThat(fragments, hasSize(1));
assertFragment(fragments.get(0), REGEX, ".*error.*");
}
public void testExtractFragments_ComplexRegex() {
// Complex regex with character classes should return REGEX type
List<PatternFragment> fragments = extractFragments("[0-9]+");
assertThat(fragments, notNullValue());
assertThat(fragments, hasSize(1));
assertFragment(fragments.get(0), REGEX, "[0-9]+");
}
public void testExtractFragments_MixedWithRegex() {
// Mixed alternation with some REGEX fragments: exact|prefix|regex|suffix
List<PatternFragment> fragments = extractFragments("api|prod-.*|[0-9]+|.*-dev");
assertThat(fragments, notNullValue());
assertThat(fragments, hasSize(4));
Object[][] expected = { { EXACT, "api" }, { PREFIX, "prod-" }, { REGEX, "[0-9]+" }, { SUFFIX, "-dev" } };
assertFragments(fragments, expected);
}
public void testExtractFragments_ComplexPrefixPattern() {
// Prefix with complex part should return REGEX
List<PatternFragment> fragments = extractFragments("test[0-9]+.*");
assertThat(fragments, notNullValue());
assertThat(fragments, hasSize(1));
assertFragment(fragments.get(0), REGEX, "test[0-9]+.*");
}
public void testExtractFragments_ComplexSuffixPattern() {
// Suffix with complex part should return REGEX
List<PatternFragment> fragments = extractFragments(".*[a-z]{3}");
assertThat(fragments, notNullValue());
assertThat(fragments, hasSize(1));
assertFragment(fragments.get(0), REGEX, ".*[a-z]{3}");
}
public void testExtractFragments_NonMatchingPattern_NestedGroups() {
// Nested groups should return null
List<PatternFragment> fragments = extractFragments("(a(b|c))");
assertThat(fragments, nullValue());
}
public void testExtractFragments_NonMatchingPattern_EscapedPipe() {
// Escaped pipe should return null (too complex)
List<PatternFragment> fragments = extractFragments("a\\|b");
assertThat(fragments, nullValue());
}
public void testExtractFragments_RegexMetacharactersInAlternation() {
// Pattern with regex metacharacters - should classify correctly
List<PatternFragment> fragments = extractFragments("test.*|prod[0-9]");
assertThat(fragments, notNullValue());
assertThat(fragments, hasSize(2));
Object[][] expected = { { PREFIX, "test" }, { REGEX, "prod[0-9]" } };
assertFragments(fragments, expected);
}
public void testExtractFragments_NullPattern() {
// Null pattern should return null
List<PatternFragment> fragments = extractFragments(null);
assertThat(fragments, nullValue());
}
public void testExtractFragments_EmptyPattern() {
// Empty pattern should return single EXACT fragment with empty value
List<PatternFragment> fragments = extractFragments("");
assertThat(fragments, notNullValue());
assertThat(fragments, hasSize(1));
assertFragment(fragments.get(0), EXACT, "");
}
public void testExtractFragments_TooManyAlternations() {
// Create a pattern with more than MAX_IN_VALUES (256) alternations
StringBuilder pattern = new StringBuilder();
for (int i = 0; i < 300; i++) {
if (i > 0) {
pattern.append("|");
}
pattern.append("a").append(i);
}
List<PatternFragment> fragments = extractFragments(pattern.toString());
// Should return null because it exceeds MAX_IN_VALUES
assertThat(fragments, nullValue());
}
private void assertFragment(PatternFragment fragment, PatternFragment.Type expectedType, String expectedValue) {
assertThat(fragment.type(), equalTo(expectedType));
assertThat(fragment.value(), equalTo(expectedValue));
}
private void assertFragments(List<PatternFragment> fragments, Object[][] expected) {
assertThat(fragments, hasSize(expected.length));
for (int i = 0; i < expected.length; i++) {
PatternFragment.Type expectedType = (PatternFragment.Type) expected[i][0];
String expectedValue = (String) expected[i][1];
assertFragment(fragments.get(i), expectedType, expectedValue);
}
}
}
| AutomatonUtilsTests |
java | quarkusio__quarkus | integration-tests/smallrye-graphql-client-keycloak/src/main/java/io/quarkus/io/smallrye/graphql/keycloak/GraphQLAuthExpiryTester.java | {
"start": 1044,
"end": 3188
} | class ____ {
@GET
@Path("/dynamic-subscription-auth-expiry/{clientinit}/{subprotocol}/{token}/{url}")
@Blocking
public void dynamicSubscription(@PathParam("clientinit") boolean clientInit, @PathParam("subprotocol") String subprotocol,
@PathParam("token") String token, @PathParam("url") String url) throws Exception {
DynamicGraphQLClientBuilder clientBuilder = DynamicGraphQLClientBuilder.newBuilder()
.url(url + "/graphql")
.executeSingleOperationsOverWebsocket(true)
.subprotocols(WebsocketSubprotocol.valueOf(subprotocol));
if (clientInit) {
clientBuilder.initPayload(Map.of("Authorization", "Bearer " + token));
} else {
clientBuilder.header("Authorization", "Bearer " + token);
}
try (DynamicGraphQLClient client = clientBuilder.build()) {
CompletableFuture<Void> authenticationExpired = new CompletableFuture<>();
AtomicBoolean receivedValue = new AtomicBoolean(false);
client.subscription("subscription { sub { value } }").subscribe().with(item -> {
if (item.hasData()) {
receivedValue.set(true);
} else {
authenticationExpired.completeExceptionally(new RuntimeException("Subscription provided no data"));
}
}, cause -> {
if (cause.getMessage().contains("Authentication expired")) {
authenticationExpired.complete(null);
} else {
authenticationExpired
.completeExceptionally(new RuntimeException("Invalid close response from server.", cause));
}
}, () -> authenticationExpired
.completeExceptionally(new RuntimeException("Subscription should not complete successfully")));
authenticationExpired.get(10, TimeUnit.SECONDS);
if (!receivedValue.get()) {
throw new RuntimeException("Did not receive subscription value");
}
}
}
}
| GraphQLAuthExpiryTester |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/BeforeAndAfterEachTests.java | {
"start": 13448,
"end": 13835
} | class ____ {
@BeforeEach
void beforeEach() {
callSequence.add("beforeEachMethod");
}
@Test
void test() {
callSequence.add("test");
throw new EnigmaException("@Test");
}
@AfterEach
void afterEach() {
callSequence.add("afterEachMethod");
}
}
// -------------------------------------------------------------------------
static | ExceptionInTestMethodTestCase |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/condition/ConditionalOnMissingBeanTests.java | {
"start": 33255,
"end": 33687
} | class ____ implements FactoryBean<ExampleBean> {
ExampleFactoryBean(String value) {
Assert.state(!value.contains("$"), "value should not contain '$'");
}
@Override
public ExampleBean getObject() {
return new ExampleBean("fromFactory");
}
@Override
public Class<?> getObjectType() {
return ExampleBean.class;
}
@Override
public boolean isSingleton() {
return false;
}
}
static | ExampleFactoryBean |
java | apache__camel | components/camel-thrift/src/test/java/org/apache/camel/component/thrift/ThriftConsumerConcurrentTest.java | {
"start": 2232,
"end": 5836
} | class ____ extends CamelTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(ThriftConsumerConcurrentTest.class);
private static final int THRIFT_SYNC_REQUEST_TEST_PORT = AvailablePortFinder.getNextAvailable();
private static final int THRIFT_ASYNC_REQUEST_TEST_PORT = AvailablePortFinder.getNextAvailable();
private static final int THRIFT_TEST_NUM1 = 12;
private static final int CONCURRENT_THREAD_COUNT = 30;
private static final int ROUNDS_PER_THREAD_COUNT = 10;
private static AtomicInteger idCounter = new AtomicInteger();
public static Integer createId() {
return idCounter.getAndIncrement();
}
public static Integer getId() {
return idCounter.get();
}
@Test
public void testSyncWithConcurrentThreads() {
RunnableAssert ra = new RunnableAssert("testSyncWithConcurrentThreads") {
@Override
public void run() throws TTransportException {
TTransport transport = new TSocket("localhost", THRIFT_SYNC_REQUEST_TEST_PORT);
transport.open();
TProtocol protocol = new TBinaryProtocol(new TFramedTransport(transport));
Calculator.Client client = (new Calculator.Client.Factory()).getClient(protocol);
int instanceId = createId();
int calculateResponse = 0;
try {
calculateResponse = client.calculate(1, new Work(instanceId, THRIFT_TEST_NUM1, Operation.MULTIPLY));
} catch (TException e) {
LOG.info("Exception", e);
}
assertNotEquals(0, calculateResponse, "instanceId = " + instanceId);
assertEquals(instanceId * THRIFT_TEST_NUM1, calculateResponse);
transport.close();
}
};
new MultithreadingTester().add(ra).numThreads(CONCURRENT_THREAD_COUNT).numRoundsPerThread(ROUNDS_PER_THREAD_COUNT)
.run();
}
@Test
public void testAsyncWithConcurrentThreads() {
RunnableAssert ra = new RunnableAssert("testAsyncWithConcurrentThreads") {
@Override
public void run() throws TTransportException, IOException, InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
TNonblockingTransport transport = new TNonblockingSocket("localhost", THRIFT_ASYNC_REQUEST_TEST_PORT);
Calculator.AsyncClient client
= (new Calculator.AsyncClient.Factory(new TAsyncClientManager(), new TBinaryProtocol.Factory()))
.getAsyncClient(transport);
int instanceId = createId();
CalculateAsyncMethodCallback calculateCallback = new CalculateAsyncMethodCallback(latch);
try {
client.calculate(1, new Work(instanceId, THRIFT_TEST_NUM1, Operation.MULTIPLY), calculateCallback);
} catch (TException e) {
LOG.info("Exception", e);
}
latch.await(5, TimeUnit.SECONDS);
int calculateResponse = calculateCallback.getCalculateResponse();
LOG.debug("instanceId = {}", instanceId);
assertEquals(instanceId * THRIFT_TEST_NUM1, calculateResponse);
transport.close();
}
};
new MultithreadingTester().add(ra).numThreads(CONCURRENT_THREAD_COUNT).numRoundsPerThread(ROUNDS_PER_THREAD_COUNT)
.run();
}
public | ThriftConsumerConcurrentTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/FutureReturnValueIgnoredTest.java | {
"start": 9231,
"end": 11050
} | class ____ implements CanIgnoreMethod {
@Override
public Future<Object> getFuture() {
return null;
}
}
static void callIgnoredInterfaceMethod() {
new CanIgnoreImpl().getFuture();
}
@CanIgnoreReturnValue
static ListenableFuture<Object> getFutureIgnore() {
return immediateFuture(null);
}
static void putInMap() {
Map<Object, Future<?>> map = new HashMap<>();
map.put(new Object(), immediateFuture(null));
Map map2 = new HashMap();
map2.put(new Object(), immediateFuture(null));
}
static void preconditions()
throws NoSuchMethodException, InvocationTargetException, IllegalAccessException {
Preconditions.checkNotNull(getFuture());
Preconditions.checkNotNull(new Object());
FutureReturnValueIgnoredNegativeCases.class.getDeclaredMethod("preconditions").invoke(null);
}
static void checkIgnore() {
getFutureIgnore();
}
void ignoreForkJoinTaskFork(ForkJoinTask<?> t) {
t.fork();
}
void ignoreForkJoinTaskFork_subclass(RecursiveAction t) {
t.fork();
}
void ignoreExecutorCompletionServiceSubmit(ExecutorCompletionService s) {
s.submit(() -> null);
}
void ignoreChannelFutureAddListener(ChannelFuture cf) {
cf.addListener((ChannelFuture f) -> {});
}
void ignoreChannelFutureAddListeners(ChannelFuture cf) {
cf.addListeners((ChannelFuture f) -> {}, (ChannelFuture f) -> {});
}
<V> ListenableFuture<V> ignoreVarArgs(
Callable<V> combiner, Executor executor, ListenableFuture<?>... futures) {
return combine(combiner, executor, Arrays.asList(futures));
}
public static <V> ListenableFuture<V> combine(
final Callable<V> combiner,
Executor executor,
Iterable<? extends ListenableFuture<?>> futures) {
return null;
}
private static final | CanIgnoreImpl |
java | apache__camel | components/camel-ftp/src/test/java/org/apache/camel/component/file/remote/integration/FromFtpKeepLastModifiedNotStepwiseIT.java | {
"start": 925,
"end": 1181
} | class ____ extends FromFtpKeepLastModifiedIT {
@Override
protected String getFtpUrl() {
return "ftp://admin@localhost:{{ftp.server.port}}/keep?password=admin&binary=false&noop=true&stepwise=false";
}
}
| FromFtpKeepLastModifiedNotStepwiseIT |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/junit/jupiter/web/MultipleWebRequestsSpringExtensionTests.java | {
"start": 2036,
"end": 2683
} | class ____ {
MockMvc mockMvc;
@BeforeEach
void setUpMockMvc(WebApplicationContext wac) {
this.mockMvc = webAppContextSetup(wac)
.alwaysExpect(status().isOk())
.alwaysExpect(content().contentTypeCompatibleWith(APPLICATION_JSON))
.build();
}
@Test
void getPerson42() throws Exception {
this.mockMvc.perform(get("/person/42").accept(MediaType.APPLICATION_JSON))
.andExpect(jsonPath("$.name", is("Dilbert")));
}
@Test
void getPerson99() throws Exception {
this.mockMvc.perform(get("/person/99").accept(MediaType.APPLICATION_JSON))
.andExpect(jsonPath("$.name", is("Wally")));
}
}
| MultipleWebRequestsSpringExtensionTests |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/co/LegacyKeyedCoProcessOperatorTest.java | {
"start": 16681,
"end": 18416
} | class ____
extends CoProcessFunction<Integer, String, String> {
private static final long serialVersionUID = 1L;
private final ValueStateDescriptor<String> state =
new ValueStateDescriptor<>("seen-element", StringSerializer.INSTANCE);
@Override
public void processElement1(Integer value, Context ctx, Collector<String> out)
throws Exception {
handleValue(value, out, ctx.timerService(), 1);
}
@Override
public void processElement2(String value, Context ctx, Collector<String> out)
throws Exception {
handleValue(value, out, ctx.timerService(), 2);
}
private void handleValue(
Object value, Collector<String> out, TimerService timerService, int channel)
throws IOException {
final ValueState<String> state = getRuntimeContext().getState(this.state);
if (state.value() == null) {
out.collect("INPUT" + channel + ":" + value);
state.update(String.valueOf(value));
timerService.registerEventTimeTimer(timerService.currentWatermark() + 5);
} else {
state.clear();
timerService.deleteEventTimeTimer(timerService.currentWatermark() + 4);
}
}
@Override
public void onTimer(long timestamp, OnTimerContext ctx, Collector<String> out)
throws Exception {
assertThat(ctx.timeDomain()).isEqualTo(TimeDomain.EVENT_TIME);
out.collect("STATE:" + getRuntimeContext().getState(state).value());
}
}
private static | EventTimeTriggeringStatefulProcessFunction |
java | google__guava | android/guava-tests/test/com/google/common/collect/FilteredCollectionsTestUtil.java | {
"start": 7261,
"end": 9350
} | class ____<C extends SortedSet<Integer>>
extends AbstractFilteredSetTest<C> {
public void testFirst() {
for (List<Integer> contents : SAMPLE_INPUTS) {
C filtered = filter(createUnfiltered(contents), EVEN);
try {
Integer first = filtered.first();
assertFalse(filtered.isEmpty());
assertEquals(Ordering.natural().min(filtered), first);
} catch (NoSuchElementException e) {
assertTrue(filtered.isEmpty());
}
}
}
public void testLast() {
for (List<Integer> contents : SAMPLE_INPUTS) {
C filtered = filter(createUnfiltered(contents), EVEN);
try {
Integer first = filtered.last();
assertFalse(filtered.isEmpty());
assertEquals(Ordering.natural().max(filtered), first);
} catch (NoSuchElementException e) {
assertTrue(filtered.isEmpty());
}
}
}
@SuppressWarnings("unchecked")
public void testHeadSet() {
for (List<Integer> contents : SAMPLE_INPUTS) {
for (int i = 0; i < 10; i++) {
assertEquals(
filter((C) createUnfiltered(contents).headSet(i), EVEN),
filter(createUnfiltered(contents), EVEN).headSet(i));
}
}
}
@SuppressWarnings("unchecked")
public void testTailSet() {
for (List<Integer> contents : SAMPLE_INPUTS) {
for (int i = 0; i < 10; i++) {
assertEquals(
filter((C) createUnfiltered(contents).tailSet(i), EVEN),
filter(createUnfiltered(contents), EVEN).tailSet(i));
}
}
}
@SuppressWarnings("unchecked")
public void testSubSet() {
for (List<Integer> contents : SAMPLE_INPUTS) {
for (int i = 0; i < 10; i++) {
for (int j = i; j < 10; j++) {
assertEquals(
filter((C) createUnfiltered(contents).subSet(i, j), EVEN),
filter(createUnfiltered(contents), EVEN).subSet(i, j));
}
}
}
}
}
public abstract static | AbstractFilteredSortedSetTest |
java | quarkusio__quarkus | extensions/qute/deployment/src/main/java/io/quarkus/qute/deployment/TemplateDataBuildItem.java | {
"start": 531,
"end": 4444
} | class ____ extends MultiBuildItem {
private final ClassInfo targetClass;
private final String namespace;
private final String[] ignore;
private final Pattern[] ignorePatterns;
private final boolean ignoreSuperclasses;
private final boolean properties;
private final AnnotationInstance annotationInstance;
public TemplateDataBuildItem(AnnotationInstance annotationInstance, ClassInfo targetClass) {
this.annotationInstance = annotationInstance;
AnnotationValue ignoreValue = annotationInstance.value(ValueResolverGenerator.IGNORE);
AnnotationValue propertiesValue = annotationInstance.value(ValueResolverGenerator.PROPERTIES);
AnnotationValue namespaceValue = annotationInstance.value(ValueResolverGenerator.NAMESPACE);
AnnotationValue ignoreSuperclassesValue = annotationInstance.value(ValueResolverGenerator.IGNORE_SUPERCLASSES);
this.targetClass = targetClass;
String namespace = namespaceValue != null ? namespaceValue.asString() : TemplateData.UNDERSCORED_FQCN;
if (namespace.equals(TemplateData.UNDERSCORED_FQCN)) {
namespace = ValueResolverGenerator
.underscoredFullyQualifiedName(targetClass.name().toString());
} else if (namespace.equals(TemplateData.SIMPLENAME)) {
namespace = ValueResolverGenerator.simpleName(targetClass);
}
this.namespace = namespace;
this.ignore = ignoreValue != null ? ignoreValue.asStringArray() : new String[] {};
if (ignore.length > 0) {
ignorePatterns = new Pattern[ignore.length];
for (int i = 0; i < ignore.length; i++) {
ignorePatterns[i] = Pattern.compile(ignore[i]);
}
} else {
ignorePatterns = null;
}
this.ignoreSuperclasses = ignoreSuperclassesValue != null ? ignoreSuperclassesValue.asBoolean() : false;
this.properties = propertiesValue != null ? propertiesValue.asBoolean() : false;
}
public boolean isTargetAnnotatedType() {
AnnotationValue targetValue = annotationInstance.value(ValueResolverGenerator.TARGET);
return targetValue == null || targetValue.asClass().name().equals(ValueResolverGenerator.TEMPLATE_DATA);
}
public ClassInfo getTargetClass() {
return targetClass;
}
public boolean hasNamespace() {
return namespace != null;
}
public String getNamespace() {
return namespace;
}
public String[] getIgnore() {
return ignore;
}
public boolean isIgnoreSuperclasses() {
return ignoreSuperclasses;
}
public boolean isProperties() {
return properties;
}
public AnnotationInstance getAnnotationInstance() {
return annotationInstance;
}
boolean filter(AnnotationTarget target) {
String name = null;
if (target.kind() == Kind.METHOD) {
MethodInfo method = target.asMethod();
if (properties && !method.parameterTypes().isEmpty()) {
return false;
}
name = method.name();
} else if (target.kind() == Kind.FIELD) {
FieldInfo field = target.asField();
name = field.name();
}
if (ignorePatterns != null) {
for (Pattern ignorePattern : ignorePatterns) {
if (ignorePattern.matcher(name).matches()) {
return false;
}
}
}
return true;
}
@Override
public String toString() {
return "TemplateDataBuildItem [targetClass=" + targetClass + ", namespace=" + namespace + ", ignore="
+ Arrays.toString(ignore) + ", ignorePatterns=" + Arrays.toString(ignorePatterns) + ", ignoreSuperclasses="
+ ignoreSuperclasses + ", properties=" + properties + "]";
}
}
| TemplateDataBuildItem |
java | junit-team__junit5 | platform-tests/src/test/java/org/junit/platform/engine/support/hierarchical/ForkJoinDeadLockTests.java | {
"start": 5123,
"end": 6161
} | class ____
implements BeforeTestExecutionCallback, AfterTestExecutionCallback, BeforeAllCallback, AfterAllCallback {
@Override
public void beforeAll(ExtensionContext context) {
log("starting class " + context.getTestClass().orElseThrow().getSimpleName());
}
@Override
public void beforeTestExecution(ExtensionContext context) {
log("starting method " + context.getTestClass().orElseThrow().getSimpleName() + "."
+ context.getTestMethod().orElseThrow().getName());
}
@Override
public void afterTestExecution(ExtensionContext context) {
log("finishing method " + context.getTestClass().orElseThrow().getSimpleName() + "."
+ context.getTestMethod().orElseThrow().getName());
}
@Override
public void afterAll(ExtensionContext context) {
log("finishing class " + context.getTestClass().orElseThrow().getSimpleName());
}
}
private static void log(String message) {
System.out.println("[" + LocalTime.now() + "] " + Thread.currentThread().getName() + " - " + message);
}
}
| StartFinishLogger |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/common/util/concurrent/AsyncIOProcessor.java | {
"start": 1436,
"end": 6133
} | class ____<Item> {
private final Logger logger;
private final ArrayBlockingQueue<Tuple<Item, Consumer<Exception>>> queue;
private final ThreadContext threadContext;
private final Semaphore promiseSemaphore = new Semaphore(1);
protected AsyncIOProcessor(Logger logger, int queueSize, ThreadContext threadContext) {
this.logger = logger;
this.queue = new ArrayBlockingQueue<>(queueSize);
this.threadContext = threadContext;
}
/**
* Adds the given item to the queue. The listener is notified once the item is processed
*/
public final void put(Item item, Consumer<Exception> listener) {
Objects.requireNonNull(item, "item must not be null");
Objects.requireNonNull(listener, "listener must not be null");
// the algorithm here tires to reduce the load on each individual caller.
// we try to have only one caller that processes pending items to disc while others just add to the queue but
// at the same time never overload the node by pushing too many items into the queue.
// we first try make a promise that we are responsible for the processing
final boolean promised = promiseSemaphore.tryAcquire();
if (promised == false) {
// in this case we are not responsible and can just block until there is space
try {
queue.put(new Tuple<>(item, preserveContext(listener)));
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
listener.accept(e);
}
}
// here we have to try to make the promise again otherwise there is a race when a thread puts an entry without making the promise
// while we are draining that mean we might exit below too early in the while loop if the drainAndSync call is fast.
if (promised || promiseSemaphore.tryAcquire()) {
final List<Tuple<Item, Consumer<Exception>>> candidates = new ArrayList<>();
if (promised) {
// we are responsible for processing we don't need to add the tuple to the queue we can just add it to the candidates
// no need to preserve context for listener since it runs in current thread.
candidates.add(new Tuple<>(item, listener));
}
// since we made the promise to process we gotta do it here at least once
drainAndProcessAndRelease(candidates);
while (queue.isEmpty() == false && promiseSemaphore.tryAcquire()) {
// yet if the queue is not empty AND nobody else has yet made the promise to take over we continue processing
drainAndProcessAndRelease(candidates);
}
}
}
private void drainAndProcessAndRelease(List<Tuple<Item, Consumer<Exception>>> candidates) {
Exception exception;
try {
queue.drainTo(candidates);
exception = processList(candidates);
} finally {
promiseSemaphore.release();
}
notifyList(candidates, exception);
candidates.clear();
}
private Exception processList(List<Tuple<Item, Consumer<Exception>>> candidates) {
Exception exception = null;
if (candidates.isEmpty() == false) {
try {
write(candidates);
} catch (Exception ex) { // if this fails we are in deep shit - fail the request
logger.debug("failed to write candidates", ex);
// this exception is passed to all listeners - we don't retry. if this doesn't work we are in deep shit
exception = ex;
}
}
return exception;
}
private void notifyList(List<Tuple<Item, Consumer<Exception>>> candidates, Exception exception) {
for (Tuple<Item, Consumer<Exception>> tuple : candidates) {
Consumer<Exception> consumer = tuple.v2();
try {
consumer.accept(exception);
} catch (Exception ex) {
logger.warn("failed to notify callback", ex);
}
}
}
private Consumer<Exception> preserveContext(Consumer<Exception> consumer) {
Supplier<ThreadContext.StoredContext> restorableContext = threadContext.newRestorableContext(false);
return e -> {
try (ThreadContext.StoredContext ignore = restorableContext.get()) {
consumer.accept(e);
}
};
}
/**
* Writes or processes the items out or to disk.
*/
protected abstract void write(List<Tuple<Item, Consumer<Exception>>> candidates) throws IOException;
}
| AsyncIOProcessor |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/cache/spi/support/DomainDataRegionImpl.java | {
"start": 716,
"end": 1961
} | class ____ extends DomainDataRegionTemplate {
public DomainDataRegionImpl(
DomainDataRegionConfig regionConfig,
RegionFactoryTemplate regionFactory,
DomainDataStorageAccess domainDataStorageAccess,
CacheKeysFactory defaultKeysFactory,
DomainDataRegionBuildingContext buildingContext) {
super(
regionConfig,
regionFactory,
domainDataStorageAccess,
defaultKeysFactory,
buildingContext
);
}
@Override
protected EntityDataAccess generateTransactionalEntityDataAccess(EntityDataCachingConfig entityAccessConfig) {
return new EntityTransactionalAccess(
this,
getEffectiveKeysFactory(),
getCacheStorageAccess(),
entityAccessConfig
);
}
@Override
protected NaturalIdDataAccess generateTransactionalNaturalIdDataAccess(NaturalIdDataCachingConfig accessConfig) {
return new NaturalIdTransactionalAccess(
this,
getEffectiveKeysFactory(),
getCacheStorageAccess(),
accessConfig
);
}
@Override
protected CollectionDataAccess generateTransactionalCollectionDataAccess(CollectionDataCachingConfig accessConfig) {
return new CollectionTransactionAccess(
this,
getEffectiveKeysFactory(),
getCacheStorageAccess(),
accessConfig
);
}
}
| DomainDataRegionImpl |
java | google__error-prone | core/src/test/java/com/google/errorprone/matchers/JUnitMatchersTest.java | {
"start": 2562,
"end": 2841
} | class ____ {
@Before
public void someTest() {}
}
""")
.addSourceLines(
"BeforeClassAnnotationOnMethod.java",
"""
import org.junit.BeforeClass;
public | BeforeAnnotationOnMethod |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/ContainerRequest.java | {
"start": 1055,
"end": 1785
} | class ____ capture resource requests associated with a
* Container, this will be used by scheduler to recover resource requests if the
* container preempted or cancelled before AM acquire the container.
*
* It should include deducted resource requests when the container allocated.
*
* Lifecycle of the ContainerRequest is:
*
* <pre>
* 1) It is instantiated when container created.
* 2) It will be set to ContainerImpl by scheduler.
* 3) When container preempted or cancelled because of whatever reason before
* container acquired by AM. ContainerRequest will be added back to pending
* request pool.
* 4) It will be cleared from ContainerImpl if the container already acquired by
* AM.
* </pre>
*/
public | to |
java | spring-projects__spring-boot | loader/spring-boot-loader/src/test/java/org/springframework/boot/loader/nio/file/NestedFileStoreTests.java | {
"start": 4686,
"end": 4762
} | class ____ implements FileStoreAttributeView {
}
}
| TestFileStoreAttributeView |
java | elastic__elasticsearch | modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthIndicatorServiceTests.java | {
"start": 1862,
"end": 8934
} | class ____ extends ESTestCase {
private DataStreamLifecycleHealthIndicatorService service;
@Before
public void setupService() {
service = new DataStreamLifecycleHealthIndicatorService(TestProjectResolvers.singleProjectOnly(randomProjectIdOrDefault()));
}
public void testGreenWhenNoDSLHealthData() {
HealthIndicatorResult result = service.calculate(true, constructHealthInfo(null));
assertThat(result.status(), is(HealthStatus.GREEN));
assertThat(
result.symptom(),
is("No data stream lifecycle health data available yet. Health information will be reported after the first run.")
);
assertThat(result.details(), is(HealthIndicatorDetails.EMPTY));
assertThat(result.impacts(), is(List.of()));
assertThat(result.diagnosisList(), is(List.of()));
}
public void testGreenWhenEmptyListOfStagnatingIndices() {
HealthIndicatorResult result = service.calculate(true, constructHealthInfo(new DataStreamLifecycleHealthInfo(List.of(), 15)));
assertThat(result.status(), is(HealthStatus.GREEN));
assertThat(result.symptom(), is("Data streams are executing their lifecycles without issues"));
assertThat(result.details(), is(not(HealthIndicatorDetails.EMPTY)));
assertThat(Strings.toString(result.details()), containsString("\"total_backing_indices_in_error\":15"));
assertThat(result.impacts(), is(List.of()));
assertThat(result.diagnosisList(), is(List.of()));
}
public void testYellowWhenStagnatingIndicesPresent() {
String secondGenerationIndex = DataStream.getDefaultBackingIndexName("foo", 2L);
String firstGenerationIndex = DataStream.getDefaultBackingIndexName("foo", 1L);
HealthIndicatorResult result = service.calculate(
true,
constructHealthInfo(
new DataStreamLifecycleHealthInfo(
List.of(new DslErrorInfo(secondGenerationIndex, 1L, 200), new DslErrorInfo(firstGenerationIndex, 3L, 100)),
15
)
)
);
assertThat(result.status(), is(HealthStatus.YELLOW));
assertThat(result.symptom(), is("2 backing indices have repeatedly encountered errors whilst trying to advance in its lifecycle"));
assertThat(result.details(), is(not(HealthIndicatorDetails.EMPTY)));
String detailsAsString = Strings.toString(result.details());
assertThat(detailsAsString, containsString("\"total_backing_indices_in_error\":15"));
assertThat(detailsAsString, containsString("\"stagnating_backing_indices_count\":2"));
assertThat(
detailsAsString,
containsString(
String.format(
Locale.ROOT,
"\"index_name\":\"%s\","
+ "\"first_occurrence_timestamp\":1,\"retry_count\":200},{\"index_name\":\"%s\","
+ "\"first_occurrence_timestamp\":3,\"retry_count\":100",
secondGenerationIndex,
firstGenerationIndex
)
)
);
assertThat(result.impacts(), is(STAGNATING_INDEX_IMPACT));
Diagnosis diagnosis = result.diagnosisList().get(0);
assertThat(diagnosis.definition(), is(STAGNATING_BACKING_INDICES_DIAGNOSIS_DEF));
assertThat(diagnosis.affectedResources().get(0).getValues(), containsInAnyOrder(secondGenerationIndex, firstGenerationIndex));
}
public void testSkippingFieldsWhenVerboseIsFalse() {
String secondGenerationIndex = DataStream.getDefaultBackingIndexName("foo", 2L);
String firstGenerationIndex = DataStream.getDefaultBackingIndexName("foo", 1L);
HealthIndicatorResult result = service.calculate(
false,
constructHealthInfo(
new DataStreamLifecycleHealthInfo(
List.of(new DslErrorInfo(secondGenerationIndex, 1L, 200), new DslErrorInfo(firstGenerationIndex, 3L, 100)),
15
)
)
);
assertThat(result.status(), is(HealthStatus.YELLOW));
assertThat(result.symptom(), is("2 backing indices have repeatedly encountered errors whilst trying to advance in its lifecycle"));
assertThat(result.details(), is(HealthIndicatorDetails.EMPTY));
assertThat(result.impacts(), is(STAGNATING_INDEX_IMPACT));
assertThat(result.diagnosisList().isEmpty(), is(true));
}
public void testMultiProject() {
service = new DataStreamLifecycleHealthIndicatorService(TestProjectResolvers.allProjects());
ProjectId projectId1 = randomProjectIdOrDefault();
ProjectId projectId2 = randomUniqueProjectId();
String index1 = DataStream.getDefaultBackingIndexName("foo", 1L);
String index2 = DataStream.getDefaultBackingIndexName("boo", 1L);
String index1DisplayName = projectId1 + ProjectIndexName.DELIMITER + index1;
String index2DisplayName = projectId2 + ProjectIndexName.DELIMITER + index2;
HealthIndicatorResult result = service.calculate(
true,
constructHealthInfo(
new DataStreamLifecycleHealthInfo(
List.of(new DslErrorInfo(index1, 1L, 100, projectId1), new DslErrorInfo(index2, 3L, 100, projectId2)),
15
)
)
);
assertThat(result.status(), is(HealthStatus.YELLOW));
assertThat(result.symptom(), is("2 backing indices have repeatedly encountered errors whilst trying to advance in its lifecycle"));
assertThat(result.details(), is(not(HealthIndicatorDetails.EMPTY)));
String detailsAsString = Strings.toString(result.details());
assertThat(detailsAsString, containsString("\"total_backing_indices_in_error\":15"));
assertThat(detailsAsString, containsString("\"stagnating_backing_indices_count\":2"));
assertThat(
detailsAsString,
containsString(
String.format(
Locale.ROOT,
"\"index_name\":\"%s\","
+ "\"first_occurrence_timestamp\":1,\"retry_count\":100},{\"index_name\":\"%s\","
+ "\"first_occurrence_timestamp\":3,\"retry_count\":100",
index1DisplayName,
index2DisplayName
)
)
);
assertThat(result.impacts(), is(STAGNATING_INDEX_IMPACT));
Diagnosis diagnosis = result.diagnosisList().get(0);
assertThat(diagnosis.definition(), is(STAGNATING_BACKING_INDICES_DIAGNOSIS_DEF));
assertThat(diagnosis.affectedResources().get(0).getValues(), containsInAnyOrder(index1DisplayName, index2DisplayName));
}
private HealthInfo constructHealthInfo(DataStreamLifecycleHealthInfo dslHealthInfo) {
return new HealthInfo(Map.of(), dslHealthInfo, Map.of(), FileSettingsHealthInfo.INDETERMINATE);
}
}
| DataStreamLifecycleHealthIndicatorServiceTests |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/abilities/source/AggregatePushDownSpec.java | {
"start": 2601,
"end": 2885
} | class ____ {@link SourceAbilitySpec} that can not only serialize/deserialize the aggregation
* to/from JSON, but also can push the local aggregate into a {@link SupportsAggregatePushDown}.
*/
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonTypeName("AggregatePushDown")
public final | of |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/WireTapCustomPool2Test.java | {
"start": 1222,
"end": 2790
} | class ____ extends ContextTestSupport {
protected MockEndpoint tap;
protected MockEndpoint result;
protected ExecutorService pool;
@Override
@AfterEach
public void tearDown() throws Exception {
super.tearDown();
if (pool != null) {
pool.shutdownNow();
}
}
@Test
public void testSend() throws Exception {
// hello must come first, as we have delay on the tapped route
result.expectedBodiesReceived("Hello World", "Tapped");
tap.expectedBodiesReceived("Tapped");
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
}
@Override
@BeforeEach
public void setUp() throws Exception {
super.setUp();
tap = getMockEndpoint("mock:tap");
result = getMockEndpoint("mock:result");
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
// START SNIPPET: e1
// use a custom thread pool for sending tapped messages
pool = Executors.newFixedThreadPool(2);
from("direct:start").to("log:foo")
// pass in the custom pool to the wireTap DSL
.wireTap("direct:tap").executorService(pool).to("mock:result");
// END SNIPPET: e1
from("direct:tap").delay(1000).setBody().constant("Tapped").to("mock:result", "mock:tap");
}
};
}
}
| WireTapCustomPool2Test |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/StringFieldTest_special_2.java | {
"start": 191,
"end": 2385
} | class ____ extends TestCase {
public void test_special() throws Exception {
Model model = new Model();
StringBuilder buf = new StringBuilder();
for (int i = Character.MIN_VALUE; i < Character.MAX_VALUE; ++i) {
buf.append((char) i);
}
model.name = buf.toString();
String text = JSON.toJSONString(model);
Model model2 = JSON.parseObject(text, Model.class);
Assert.assertEquals(model.name, model2.name);
}
public void test_special_browsecue() throws Exception {
Model model = new Model();
StringBuilder buf = new StringBuilder();
for (int i = Character.MIN_VALUE; i < Character.MAX_VALUE; ++i) {
buf.append((char) i);
}
model.name = buf.toString();
String text = JSON.toJSONString(model, SerializerFeature.BrowserSecure);
text = text.replaceAll("<", "<");
text = text.replaceAll(">", ">");
// text = text.replaceAll("\\\\/", "/");
Model model2 = JSON.parseObject(text, Model.class);
for (int i = 0; i < model.name.length() && i < model2.name.length(); ++i) {
char c1 = model.name.charAt(i);
char c2 = model.name.charAt(i);
if (c1 != c2) {
System.out.println("diff : " + c1 + " -> " + c2);
break;
}
}
// String str = model2.name.substring(65535);
// System.out.println(str);
Assert.assertEquals(model.name.length(), model2.name.length());
Assert.assertEquals(model.name, model2.name);
}
public void test_special_browsecompatible() throws Exception {
Model model = new Model();
StringBuilder buf = new StringBuilder();
for (int i = Character.MIN_VALUE; i < Character.MAX_VALUE; ++i) {
buf.append((char) i);
}
model.name = buf.toString();
String text = JSON.toJSONString(model, SerializerFeature.BrowserCompatible);
Model model2 = JSON.parseObject(text, Model.class);
Assert.assertEquals(model.name, model2.name);
}
private static | StringFieldTest_special_2 |
java | hibernate__hibernate-orm | hibernate-spatial/src/test/java/org/hibernate/spatial/mapping/GeographyMappingTest.java | {
"start": 1140,
"end": 2160
} | class ____ {
@Test
public void testSimpleEntity(SessionFactoryScope scope) {
final EntityPersister entityDescriptor = scope.getSessionFactory()
.getMappingMetamodel()
.getEntityDescriptor( PointEntity.class );
final JdbcTypeRegistry jdbcTypeRegistry = entityDescriptor.getFactory()
.getTypeConfiguration()
.getJdbcTypeRegistry();
BasicValuedModelPart part = (BasicValuedModelPart) entityDescriptor.findSubPart( "location" );
assertThat( part.getJdbcMapping().getJdbcType(), equalTo( jdbcTypeRegistry.getDescriptor( SqlTypes.GEOGRAPHY ) ) );
scope.inTransaction(
s -> {
s.persist(
new PointEntity(
1,
"test",
(Point<G2D>) Wkt.fromWkt(
"SRID=4326;POINT(48.2083736 16.3724441)"
)
)
);
s.flush();
s.clear();
PointEntity pointEntity = s.find( PointEntity.class, 1 );
assertThat( pointEntity.location, is( notNullValue() ) );
}
);
}
@Entity(name = "MLEntity")
public static | GeographyMappingTest |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java | {
"start": 41906,
"end": 58224
} | class ____ implements AppContext {
private final Map<JobId, Job> jobs = new ConcurrentHashMap<JobId, Job>();
private final Configuration conf;
private final ClusterInfo clusterInfo = new ClusterInfo();
private final ClientToAMTokenSecretManager clientToAMTokenSecretManager;
private TimelineClient timelineClient = null;
private TimelineV2Client timelineV2Client = null;
private String historyUrl = null;
private final TaskAttemptFinishingMonitor taskAttemptFinishingMonitor;
public RunningAppContext(Configuration config,
TaskAttemptFinishingMonitor taskAttemptFinishingMonitor) {
this.conf = config;
this.clientToAMTokenSecretManager =
new ClientToAMTokenSecretManager(appAttemptID, null);
this.taskAttemptFinishingMonitor = taskAttemptFinishingMonitor;
if (conf.getBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA,
MRJobConfig.DEFAULT_MAPREDUCE_JOB_EMIT_TIMELINE_DATA)
&& YarnConfiguration.timelineServiceEnabled(conf)) {
if (YarnConfiguration.timelineServiceV2Enabled(conf)) {
// create new version TimelineClient
timelineV2Client = TimelineV2Client.createTimelineClient(
appAttemptID.getApplicationId());
} else {
timelineClient = TimelineClient.createTimelineClient();
}
}
}
@Override
public ApplicationAttemptId getApplicationAttemptId() {
return appAttemptID;
}
@Override
public ApplicationId getApplicationID() {
return appAttemptID.getApplicationId();
}
@Override
public String getApplicationName() {
return appName;
}
@Override
public long getStartTime() {
return startTime;
}
@Override
public Job getJob(JobId jobID) {
return jobs.get(jobID);
}
@Override
public Map<JobId, Job> getAllJobs() {
return jobs;
}
@Override
public EventHandler<Event> getEventHandler() {
return dispatcher.getEventHandler();
}
@Override
public CharSequence getUser() {
return this.conf.get(MRJobConfig.USER_NAME);
}
@Override
public Clock getClock() {
return clock;
}
@Override
public ClusterInfo getClusterInfo() {
return this.clusterInfo;
}
@Override
public Set<String> getBlacklistedNodes() {
return ((RMContainerRequestor) containerAllocator).getBlacklistedNodes();
}
@Override
public ClientToAMTokenSecretManager getClientToAMTokenSecretManager() {
return clientToAMTokenSecretManager;
}
@Override
public boolean isLastAMRetry(){
return isLastAMRetry;
}
@Override
public boolean hasSuccessfullyUnregistered() {
return successfullyUnregistered.get();
}
public void markSuccessfulUnregistration() {
successfullyUnregistered.set(true);
}
public void resetIsLastAMRetry() {
isLastAMRetry = false;
}
@Override
public String getNMHostname() {
return nmHost;
}
@Override
public TaskAttemptFinishingMonitor getTaskAttemptFinishingMonitor() {
return taskAttemptFinishingMonitor;
}
public TimelineClient getTimelineClient() {
return timelineClient;
}
// Get Timeline Collector's address (get sync from RM)
public TimelineV2Client getTimelineV2Client() {
return timelineV2Client;
}
@Override
public String getHistoryUrl() {
return historyUrl;
}
@Override
public void setHistoryUrl(String historyUrl) {
this.historyUrl = historyUrl;
}
}
@SuppressWarnings("unchecked")
@Override
protected void serviceStart() throws Exception {
amInfos = new LinkedList<AMInfo>();
completedTasksFromPreviousRun = new HashMap<TaskId, TaskInfo>();
processRecovery();
cleanUpPreviousJobOutput();
// Current an AMInfo for the current AM generation.
AMInfo amInfo =
MRBuilderUtils.newAMInfo(appAttemptID, startTime, containerID, nmHost,
nmPort, nmHttpPort);
// /////////////////// Create the job itself.
job = createJob(getConfig(), forcedState, shutDownMessage);
// End of creating the job.
// Send out an MR AM inited event for all previous AMs.
for (AMInfo info : amInfos) {
dispatcher.getEventHandler().handle(
new JobHistoryEvent(job.getID(), new AMStartedEvent(info
.getAppAttemptId(), info.getStartTime(), info.getContainerId(),
info.getNodeManagerHost(), info.getNodeManagerPort(), info
.getNodeManagerHttpPort(), appSubmitTime)));
}
// Send out an MR AM inited event for this AM.
dispatcher.getEventHandler().handle(
new JobHistoryEvent(job.getID(), new AMStartedEvent(amInfo
.getAppAttemptId(), amInfo.getStartTime(), amInfo.getContainerId(),
amInfo.getNodeManagerHost(), amInfo.getNodeManagerPort(), amInfo
.getNodeManagerHttpPort(), this.forcedState == null ? null
: this.forcedState.toString(), appSubmitTime)));
amInfos.add(amInfo);
// metrics system init is really init & start.
// It's more test friendly to put it here.
DefaultMetricsSystem.initialize("MRAppMaster");
boolean initFailed = false;
if (!errorHappenedShutDown) {
// create a job event for job initialization
JobEvent initJobEvent = new JobEvent(job.getID(), JobEventType.JOB_INIT);
// Send init to the job (this does NOT trigger job execution)
// This is a synchronous call, not an event through dispatcher. We want
// job-init to be done completely here.
jobEventDispatcher.handle(initJobEvent);
// If job is still not initialized, an error happened during
// initialization. Must complete starting all of the services so failure
// events can be processed.
initFailed = (((JobImpl)job).getInternalState() != JobStateInternal.INITED);
// JobImpl's InitTransition is done (call above is synchronous), so the
// "uber-decision" (MR-1220) has been made. Query job and switch to
// ubermode if appropriate (by registering different container-allocator
// and container-launcher services/event-handlers).
if (job.isUber()) {
speculatorEventDispatcher.disableSpeculation();
LOG.info("MRAppMaster uberizing job " + job.getID()
+ " in local container (\"uber-AM\") on node "
+ nmHost + ":" + nmPort + ".");
} else {
// send init to speculator only for non-uber jobs.
// This won't yet start as dispatcher isn't started yet.
dispatcher.getEventHandler().handle(
new SpeculatorEvent(job.getID(), clock.getTime()));
LOG.info("MRAppMaster launching normal, non-uberized, multi-container "
+ "job " + job.getID() + ".");
}
// Start ClientService here, since it's not initialized if
// errorHappenedShutDown is true
clientService.start();
}
//start all the components
super.serviceStart();
// finally set the job classloader
MRApps.setClassLoader(jobClassLoader, getConfig());
if (initFailed) {
JobEvent initFailedEvent = new JobEvent(job.getID(), JobEventType.JOB_INIT_FAILED);
jobEventDispatcher.handle(initFailedEvent);
} else {
// All components have started, start the job.
startJobs();
}
}
@Override
public void stop() {
super.stop();
}
private boolean isRecoverySupported() throws IOException {
boolean isSupported = false;
Configuration conf = getConfig();
if (committer != null) {
final JobContext _jobContext = getJobContextFromConf(conf);
isSupported = callWithJobClassLoader(conf,
new ExceptionAction<Boolean>() {
public Boolean call(Configuration conf) throws IOException {
return committer.isRecoverySupported(_jobContext);
}
});
}
return isSupported;
}
private void processRecovery() throws IOException{
boolean attemptRecovery = shouldAttemptRecovery();
boolean recoverySucceeded = true;
if (attemptRecovery) {
LOG.info("Attempting to recover.");
try {
parsePreviousJobHistory();
} catch (IOException e) {
LOG.warn("Unable to parse prior job history, aborting recovery", e);
recoverySucceeded = false;
}
}
if (!isFirstAttempt() && (!attemptRecovery || !recoverySucceeded)) {
amInfos.addAll(readJustAMInfos());
}
}
private boolean isFirstAttempt() {
return appAttemptID.getAttemptId() == 1;
}
/**
* Check if the current job attempt should try to recover from previous
* job attempts if any.
*/
private boolean shouldAttemptRecovery() throws IOException {
if (isFirstAttempt()) {
return false; // no need to recover on the first attempt
}
boolean recoveryEnabled = getConfig().getBoolean(
MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,
MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE_DEFAULT);
if (!recoveryEnabled) {
LOG.info("Not attempting to recover. Recovery disabled. To enable " +
"recovery, set " + MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE);
return false;
}
boolean recoverySupportedByCommitter = isRecoverySupported();
if (!recoverySupportedByCommitter) {
LOG.info("Not attempting to recover. Recovery is not supported by " +
committer.getClass() + ". Use an OutputCommitter that supports" +
" recovery.");
return false;
}
int reducerCount = getConfig().getInt(MRJobConfig.NUM_REDUCES, 0);
// If a shuffle secret was not provided by the job client, one will be
// generated in this job attempt. However, that disables recovery if
// there are reducers as the shuffle secret would be job attempt specific.
boolean shuffleKeyValidForRecovery =
TokenCache.getShuffleSecretKey(jobCredentials) != null;
if (reducerCount > 0 && !shuffleKeyValidForRecovery) {
LOG.info("Not attempting to recover. The shuffle key is invalid for " +
"recovery.");
return false;
}
// If the intermediate data is encrypted, recovering the job requires the
// access to the key. Until the encryption key is persisted, we should
// avoid attempts to recover.
boolean spillEncrypted = CryptoUtils.isEncryptedSpillEnabled(getConfig());
if (reducerCount > 0 && spillEncrypted) {
LOG.info("Not attempting to recover. Intermediate spill encryption" +
" is enabled.");
return false;
}
return true;
}
private void cleanUpPreviousJobOutput() {
// recovered application masters should not remove data from previous job
if (!isFirstAttempt() && !recovered()) {
JobContext jobContext = getJobContextFromConf(getConfig());
try {
LOG.info("Starting to clean up previous job's temporary files");
this.committer.abortJob(jobContext, State.FAILED);
LOG.info("Finished cleaning up previous job temporary files");
} catch (FileNotFoundException e) {
LOG.info("Previous job temporary files do not exist, " +
"no clean up was necessary.");
} catch (Exception e) {
// the clean up of a previous attempt is not critical to the success
// of this job - only logging the error
LOG.error("Error while trying to clean up previous job's temporary " +
"files", e);
}
}
}
private static FSDataInputStream getPreviousJobHistoryStream(
Configuration conf, ApplicationAttemptId appAttemptId)
throws IOException {
Path historyFile = JobHistoryUtils.getPreviousJobHistoryPath(conf,
appAttemptId);
LOG.info("Previous history file is at " + historyFile);
return historyFile.getFileSystem(conf).open(historyFile);
}
private void parsePreviousJobHistory() throws IOException {
FSDataInputStream in = getPreviousJobHistoryStream(getConfig(),
appAttemptID);
JobHistoryParser parser = new JobHistoryParser(in);
JobInfo jobInfo = parser.parse();
Exception parseException = parser.getParseException();
if (parseException != null) {
LOG.info("Got an error parsing job-history file" +
", ignoring incomplete events.", parseException);
}
Map<org.apache.hadoop.mapreduce.TaskID, TaskInfo> taskInfos = jobInfo
.getAllTasks();
for (TaskInfo taskInfo : taskInfos.values()) {
if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) {
Iterator<Entry<TaskAttemptID, TaskAttemptInfo>> taskAttemptIterator =
taskInfo.getAllTaskAttempts().entrySet().iterator();
while (taskAttemptIterator.hasNext()) {
Map.Entry<TaskAttemptID, TaskAttemptInfo> currentEntry = taskAttemptIterator.next();
if (!jobInfo.getAllCompletedTaskAttempts().containsKey(currentEntry.getKey())) {
taskAttemptIterator.remove();
}
}
completedTasksFromPreviousRun
.put(TypeConverter.toYarn(taskInfo.getTaskId()), taskInfo);
LOG.info("Read from history task "
+ TypeConverter.toYarn(taskInfo.getTaskId()));
}
}
LOG.info("Read completed tasks from history "
+ completedTasksFromPreviousRun.size());
recoveredJobStartTime = jobInfo.getLaunchTime();
// recover AMInfos
List<JobHistoryParser.AMInfo> jhAmInfoList = jobInfo.getAMInfos();
if (jhAmInfoList != null) {
for (JobHistoryParser.AMInfo jhAmInfo : jhAmInfoList) {
AMInfo amInfo = MRBuilderUtils.newAMInfo(jhAmInfo.getAppAttemptId(),
jhAmInfo.getStartTime(), jhAmInfo.getContainerId(),
jhAmInfo.getNodeManagerHost(), jhAmInfo.getNodeManagerPort(),
jhAmInfo.getNodeManagerHttpPort());
amInfos.add(amInfo);
}
}
}
private List<AMInfo> readJustAMInfos() {
List<AMInfo> amInfos = new ArrayList<AMInfo>();
try (FSDataInputStream inputStream =
getPreviousJobHistoryStream(getConfig(), appAttemptID)) {
EventReader jobHistoryEventReader = new EventReader(inputStream);
// All AMInfos are contiguous. Track when the first AMStartedEvent
// appears.
boolean amStartedEventsBegan = false;
HistoryEvent event;
while ((event = jobHistoryEventReader.getNextEvent()) != null) {
if (event.getEventType() == EventType.AM_STARTED) {
if (!amStartedEventsBegan) {
// First AMStartedEvent.
amStartedEventsBegan = true;
}
AMStartedEvent amStartedEvent = (AMStartedEvent) event;
amInfos.add(MRBuilderUtils.newAMInfo(
amStartedEvent.getAppAttemptId(), amStartedEvent.getStartTime(),
amStartedEvent.getContainerId(),
StringInterner.weakIntern(amStartedEvent.getNodeManagerHost()),
amStartedEvent.getNodeManagerPort(),
amStartedEvent.getNodeManagerHttpPort()));
} else if (amStartedEventsBegan) {
// This means AMStartedEvents began and this event is a
// non-AMStarted event.
// No need to continue reading all the other events.
break;
}
}
} catch (IOException e) {
LOG.warn("Could not parse the old history file. "
+ "Will not have old AMinfos ", e);
}
return amInfos;
}
public boolean recovered() {
return recoveredJobStartTime > 0;
}
/**
* This can be overridden to instantiate multiple jobs and create a
* workflow.
*
* TODO: Rework the design to actually support this. Currently much of the
* job stuff has been moved to init() above to support uberization (MR-1220).
* In a typical workflow, one presumably would want to uberize only a subset
* of the jobs (the "small" ones), which is awkward with the current design.
*/
@SuppressWarnings("unchecked")
protected void startJobs() {
/** create a job-start event to get this ball rolling */
JobEvent startJobEvent = new JobStartEvent(job.getID(),
recoveredJobStartTime);
/** send the job-start event. this triggers the job execution. */
dispatcher.getEventHandler().handle(startJobEvent);
}
private | RunningAppContext |
java | alibaba__nacos | api/src/main/java/com/alibaba/nacos/api/config/remote/request/ClientConfigMetricRequest.java | {
"start": 1502,
"end": 3219
} | class ____ implements Serializable {
private static final long serialVersionUID = -2731160029960311757L;
String type;
String key;
public static final String CACHE_DATA = "cacheData";
public static final String SNAPSHOT_DATA = "snapshotData";
/**
* build metrics key.
*
* @param type type.
* @param key key.
* @return metric key.
*/
public static MetricsKey build(String type, String key) {
MetricsKey metricsKey = new MetricsKey();
metricsKey.type = type;
metricsKey.key = key;
return metricsKey;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
@Override
public String toString() {
return "MetricsKey{" + "type='" + type + '\'' + ", key='" + key + '\'' + '}';
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
MetricsKey that = (MetricsKey) o;
return Objects.equals(type, that.type) && Objects.equals(key, that.key);
}
@Override
public int hashCode() {
return Objects.hash(type, key);
}
}
}
| MetricsKey |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/converted/converter/map/MapElementConversionTest.java | {
"start": 1220,
"end": 1833
} | class ____ {
@Test
public void testElementCollectionConversion(SessionFactoryScope scope) {
scope.inTransaction(
(session) -> {
Customer customer = new Customer( 1 );
customer.colors.put( "eyes", ColorType.BLUE );
session.persist( customer );
}
);
scope.inTransaction(
session -> assertEquals( 1, session.find( Customer.class, 1 ).colors.size() )
);
}
@AfterEach
public void dropTestData(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Entity( name = "Customer" )
@Table( name = "CUST" )
public static | MapElementConversionTest |
java | redisson__redisson | redisson/src/main/java/org/redisson/RedissonLiveObjectService.java | {
"start": 38162,
"end": 39003
} | class ____ declaration.");
}
FieldList<FieldDescription.InDefinedShape> fields = Introspectior.getFieldsWithAnnotation(entityClass, RIndex.class);
fields = fields.filter(ElementMatchers.fieldType(ElementMatchers.hasSuperType(
ElementMatchers.anyOf(Map.class, Collection.class, RObject.class))));
for (InDefinedShape field : fields) {
throw new IllegalArgumentException("RIndex annotation couldn't be defined for field '" + field.getName() + "' with type '" + field.getType() + "'");
}
FieldList<FieldDescription.InDefinedShape> fieldsWithRIdAnnotation
= Introspectior.getFieldsWithAnnotation(entityClass, RId.class);
if (fieldsWithRIdAnnotation.size() == 0) {
throw new IllegalArgumentException("RId annotation is missing from | type |
java | quarkusio__quarkus | extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/i18n/MessageBundleValidationTest.java | {
"start": 549,
"end": 1821
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(Hellos.class)
.addAsResource(new StringAsset(
"hello=Hallo {foo}!\nhello_never=Ball!"),
"messages/msg_de.properties"))
.assertException(t -> {
Throwable e = t;
MessageBundleException me = null;
while (e != null) {
if (e instanceof MessageBundleException) {
me = (MessageBundleException) e;
break;
}
e = e.getCause();
}
if (me == null) {
fail("No message bundle exception thrown: " + t);
}
assertTrue(me.getMessage().contains(
"Message bundle method hello_never() not found on: io.quarkus.qute.deployment.i18n.MessageBundleValidationTest$Hellos"),
me.getMessage());
});
@Test
public void testValidation() {
fail();
}
@MessageBundle(DEFAULT_NAME)
public | MessageBundleValidationTest |
java | hibernate__hibernate-orm | local-build-plugins/src/main/java/org/hibernate/orm/antlr/SplitGrammarGenerationTask.java | {
"start": 777,
"end": 5084
} | class ____ extends DefaultTask {
private final SplitGrammarDescriptor grammarDescriptor;
private final ExecOperations execOperations;
private final Provider<RegularFile> lexerGrammarFile;
private final Provider<RegularFile> parserGrammarFile;
private final Provider<Directory> generationDirectory;
private final Provider<Directory> outputDirectory;
@Inject
public SplitGrammarGenerationTask(
SplitGrammarDescriptor grammarDescriptor,
AntlrSpec antlrSpec,
ExecOperations execOperations) {
this.grammarDescriptor = grammarDescriptor;
this.execOperations = execOperations;
lexerGrammarFile = getProject().provider( () -> {
final Directory grammarBaseDirectory = antlrSpec.getGrammarBaseDirectory().get();
final Directory grammarDirectory = grammarBaseDirectory.dir( grammarDescriptor.getPackageName().get().replace( '.', '/' ) );
return grammarDirectory.file( grammarDescriptor.getLexerFileName().get() );
} );
parserGrammarFile = getProject().provider( () -> {
final Directory grammarBaseDirectory = antlrSpec.getGrammarBaseDirectory().get();
final Directory grammarDirectory = grammarBaseDirectory.dir( grammarDescriptor.getPackageName().get().replace( '.', '/' ) );
return grammarDirectory.file( grammarDescriptor.getParserFileName().get() );
} );
generationDirectory = getProject().provider( () -> {
final Directory baseDirectory = getProject().getLayout().getBuildDirectory().dir( "tmp/antlr" ).get();
return baseDirectory.dir( grammarDescriptor.getPackageName().get().replace( '.', '/' ) );
} );
outputDirectory = getProject().provider( () -> {
final Directory outputBaseDirectory = antlrSpec.getOutputBaseDirectory().get();
return outputBaseDirectory.dir( grammarDescriptor.getPackageName().get().replace( '.', '/' ) );
} );
}
@InputFile
@PathSensitive( PathSensitivity.RELATIVE )
public Provider<RegularFile> getLexerGrammarFile() {
return lexerGrammarFile;
}
@InputFile
@PathSensitive( PathSensitivity.RELATIVE )
public Provider<RegularFile> getParserGrammarFile() {
return parserGrammarFile;
}
@OutputDirectory
public Provider<Directory> getGenerationDirectory() {
return generationDirectory;
}
@OutputDirectory
public Provider<Directory> getOutputDirectory() {
return outputDirectory;
}
@TaskAction
public void generateLexerAndParser() {
final File generationDir = generationDirectory.get().getAsFile();
generationDir.mkdirs();
final File outputDir = outputDirectory.get().getAsFile();
outputDir.mkdirs();
generateLexer( generationDir );
generateParser( generationDir );
stripSillyGeneratedFromLines( generationDir, outputDir, getProject() );
}
private void generateLexer(File outputDir) {
final File lexerFile = getLexerGrammarFile().get().getAsFile();
getProject().getLogger().lifecycle(
"Starting Antlr lexer grammar generation `{}` : `{}` -> `{}`",
grammarDescriptor.getName(),
lexerFile.getAbsolutePath(),
outputDir.getAbsolutePath()
);
execOperations.javaexec(
(javaExecSpec) -> {
javaExecSpec.getMainClass().set( "org.antlr.v4.Tool" );
javaExecSpec.classpath( getProject().getConfigurations().getByName( "antlr" ) );
javaExecSpec.args(
"-o", getProject().relativePath( outputDir.getAbsolutePath() ),
"-long-messages",
lexerFile.getAbsolutePath()
);
}
);
}
private void generateParser(File outputDir) {
final File parserFile = getParserGrammarFile().get().getAsFile();
getProject().getLogger().lifecycle(
"Starting Antlr parser grammar generation `{}` : `{}` -> `{}`",
grammarDescriptor.getName(),
parserFile.getAbsolutePath(),
outputDir.getAbsolutePath()
);
execOperations.javaexec(
(javaExecSpec) -> {
javaExecSpec.getMainClass().set( "org.antlr.v4.Tool" );
javaExecSpec.classpath( getProject().getConfigurations().named( "antlr" ) );
javaExecSpec.args(
"-o", getProject().relativePath( outputDir.getAbsolutePath() ),
"-long-messages",
parserFile.getAbsolutePath()
);
if ( grammarDescriptor.generateListener().get() ) {
javaExecSpec.args( "-listener" );
}
if ( grammarDescriptor.generateVisitor().get() ) {
javaExecSpec.args( "-visitor" );
}
}
);
}
}
| SplitGrammarGenerationTask |
java | spring-projects__spring-boot | smoke-test/spring-boot-smoke-test-jackson2-mixed/src/main/java/smoketest/jackson2/mixed/SampleJsonComponent.java | {
"start": 1004,
"end": 1261
} | class ____ extends JsonSerializer<Name> {
@Override
public void serialize(Name value, JsonGenerator gen, SerializerProvider serializers) throws IOException {
gen.writeString("JACKSON2:%s:%s".formatted(value.first(), value.last()));
}
}
}
| Serializer |
java | elastic__elasticsearch | x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/persistence/ProfilingDataStreamManagerTests.java | {
"start": 3308,
"end": 23981
} | class ____ extends ESTestCase {
private final AtomicBoolean templatesCreated = new AtomicBoolean();
private ProfilingDataStreamManager datastreamManager;
private ClusterService clusterService;
private ThreadPool threadPool;
private VerifyingClient client;
private List<ProfilingDataStreamManager.ProfilingDataStream> managedDataStreams;
private int indexTemplateVersion;
private IndexStateResolver indexStateResolver;
@Before
public void createRegistryAndClient() {
templatesCreated.set(false);
threadPool = new TestThreadPool(this.getClass().getName());
client = new VerifyingClient(threadPool);
clusterService = ClusterServiceUtils.createClusterService(threadPool);
managedDataStreams = ProfilingDataStreamManager.PROFILING_DATASTREAMS;
indexTemplateVersion = ProfilingIndexTemplateRegistry.INDEX_TEMPLATE_VERSION;
indexStateResolver = new IndexStateResolver(true) {
@Override
protected int getIndexTemplateVersion() {
return indexTemplateVersion;
}
};
datastreamManager = new ProfilingDataStreamManager(threadPool, client, clusterService, indexStateResolver) {
@Override
protected boolean areAllIndexTemplatesCreated(ClusterChangedEvent event, Settings settings) {
return templatesCreated.get();
}
@Override
protected Iterable<ProfilingDataStream> getManagedIndices() {
return managedDataStreams;
}
};
datastreamManager.setTemplatesEnabled(true);
}
@After
@Override
public void tearDown() throws Exception {
super.tearDown();
threadPool.shutdownNow();
}
public void testThatMissingMasterNodeDoesNothing() {
DiscoveryNode localNode = DiscoveryNodeUtils.create("node");
DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").add(localNode).build();
client.setVerifier((a, r, l) -> {
fail("if the master is missing nothing should happen");
return null;
});
ClusterChangedEvent event = createClusterChangedEvent(Collections.emptyList(), nodes);
datastreamManager.clusterChanged(event);
}
public void testThatMissingTemplatesDoesNothing() {
DiscoveryNode node = DiscoveryNodeUtils.create("node");
DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
client.setVerifier((a, r, l) -> {
fail("if any templates are missing nothing should happen");
return null;
});
ClusterChangedEvent event = createClusterChangedEvent(Collections.emptyList(), nodes);
datastreamManager.clusterChanged(event);
}
public void testThatNonExistingDataStreamsAreAddedImmediately() throws Exception {
DiscoveryNode node = DiscoveryNodeUtils.create("node");
DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
templatesCreated.set(true);
ClusterChangedEvent event = createClusterChangedEvent(Collections.emptyList(), nodes);
AtomicInteger calledTimes = new AtomicInteger(0);
client.setVerifier((action, request, listener) -> verifyDataStreamInstalled(calledTimes, action, request, listener));
datastreamManager.clusterChanged(event);
assertBusy(() -> assertThat(calledTimes.get(), equalTo(ProfilingDataStreamManager.PROFILING_DATASTREAMS.size())));
calledTimes.set(0);
}
public void testThatRedIndexIsNotTouched() throws Exception {
DiscoveryNode node = DiscoveryNodeUtils.create("node");
DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
templatesCreated.set(true);
// This data stream is a rollover candidate
ProfilingDataStreamManager.ProfilingDataStream existingDataStream = randomFrom(ProfilingDataStreamManager.PROFILING_DATASTREAMS);
ClusterChangedEvent event = createClusterChangedEvent(
List.of(existingDataStream.withVersion(0)),
nodes,
IndexMetadata.State.OPEN,
IndexVersion.current(),
false
);
AtomicInteger calledTimes = new AtomicInteger(0);
client.setVerifier((action, request, listener) -> verifyDataStreamInstalled(calledTimes, action, request, listener));
datastreamManager.clusterChanged(event);
// should not create the index because a newer generation with the correct version exists
assertBusy(() -> assertThat(calledTimes.get(), equalTo(ProfilingDataStreamManager.PROFILING_DATASTREAMS.size() - 1)));
calledTimes.set(0);
}
public void testThatOutdatedDataStreamIsDetectedIfCheckEnabled() throws Exception {
DiscoveryNode node = DiscoveryNodeUtils.create("node");
DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
templatesCreated.set(true);
ProfilingDataStreamManager.ProfilingDataStream existingDataStream = randomFrom(ProfilingDataStreamManager.PROFILING_DATASTREAMS);
ClusterChangedEvent event = createClusterChangedEvent(
List.of(existingDataStream),
nodes,
IndexMetadata.State.OPEN,
// This is an outdated version that requires indices to be deleted upon migration
IndexVersions.V_8_8_2,
true
);
AtomicInteger calledTimes = new AtomicInteger(0);
client.setVerifier((action, request, listener) -> verifyDataStreamInstalled(calledTimes, action, request, listener));
datastreamManager.clusterChanged(event);
// should not create this index because the one that has changed is too old. Depending on the point at which the index is
// evaluated, other indices may have already been created.
assertBusy(
() -> assertThat(
calledTimes.get(),
allOf(greaterThanOrEqualTo(0), Matchers.lessThan(ProfilingDataStreamManager.PROFILING_DATASTREAMS.size()))
)
);
calledTimes.set(0);
}
public void testThatOutdatedDataStreamIsIgnoredIfCheckDisabled() throws Exception {
// disable the check
indexStateResolver.setCheckOutdatedIndices(false);
DiscoveryNode node = DiscoveryNodeUtils.create("node");
DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
templatesCreated.set(true);
ProfilingDataStreamManager.ProfilingDataStream existingDataStream = randomFrom(ProfilingDataStreamManager.PROFILING_DATASTREAMS);
ClusterChangedEvent event = createClusterChangedEvent(
List.of(existingDataStream),
nodes,
IndexMetadata.State.OPEN,
IndexVersions.V_8_8_2,
true
);
AtomicInteger calledTimes = new AtomicInteger(0);
client.setVerifier((action, request, listener) -> verifyDataStreamInstalled(calledTimes, action, request, listener));
datastreamManager.clusterChanged(event);
// should create all indices but consider the current one up-to-date
assertBusy(() -> assertThat(calledTimes.get(), equalTo(ProfilingDataStreamManager.PROFILING_DATASTREAMS.size() - 1)));
calledTimes.set(0);
}
public void testThatClosedIndexIsNotTouched() throws Exception {
DiscoveryNode node = DiscoveryNodeUtils.create("node");
DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
templatesCreated.set(true);
// This index is an upgrade candidate
ProfilingDataStreamManager.ProfilingDataStream existingDataStream = randomFrom(ProfilingDataStreamManager.PROFILING_DATASTREAMS);
ClusterChangedEvent event = createClusterChangedEvent(
List.of(existingDataStream.withVersion(0)),
nodes,
IndexMetadata.State.CLOSE,
IndexVersion.current(),
true
);
AtomicInteger calledTimes = new AtomicInteger(0);
client.setVerifier((action, request, listener) -> verifyDataStreamInstalled(calledTimes, action, request, listener));
datastreamManager.clusterChanged(event);
// should not create the index because a newer generation with the correct version exists
assertBusy(() -> assertThat(calledTimes.get(), equalTo(ProfilingDataStreamManager.PROFILING_DATASTREAMS.size() - 1)));
calledTimes.set(0);
}
public void testThatExistingIndicesAreNotCreatedTwice() throws Exception {
DiscoveryNode node = DiscoveryNodeUtils.create("node");
DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
templatesCreated.set(true);
ProfilingDataStreamManager.ProfilingDataStream existingDataStream = randomFrom(ProfilingDataStreamManager.PROFILING_DATASTREAMS);
ClusterChangedEvent event = createClusterChangedEvent(List.of(existingDataStream), nodes);
AtomicInteger calledTimes = new AtomicInteger(0);
client.setVerifier((action, request, listener) -> verifyDataStreamInstalled(calledTimes, action, request, listener));
datastreamManager.clusterChanged(event);
// should not create the existing index
assertBusy(() -> assertThat(calledTimes.get(), equalTo(ProfilingDataStreamManager.PROFILING_DATASTREAMS.size() - 1)));
calledTimes.set(0);
}
public void testThatDataStreamIsRolledOver() throws Exception {
DiscoveryNode node = DiscoveryNodeUtils.create("node");
DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
templatesCreated.set(true);
ProfilingDataStreamManager.ProfilingDataStream dataStreamToRollover = randomFrom(ProfilingDataStreamManager.PROFILING_DATASTREAMS);
List<ProfilingDataStreamManager.ProfilingDataStream> existingDataStreams = new ArrayList<>(
ProfilingDataStreamManager.PROFILING_DATASTREAMS
);
existingDataStreams.remove(dataStreamToRollover);
existingDataStreams.add(dataStreamToRollover.withVersion(0));
ClusterChangedEvent event = createClusterChangedEvent(existingDataStreams, nodes);
AtomicInteger calledTimes = new AtomicInteger(0);
client.setVerifier((action, request, listener) -> verifyDataStreamRolledOver(calledTimes, action, request, listener));
datastreamManager.clusterChanged(event);
assertBusy(() -> assertThat(calledTimes.get(), equalTo(1)));
calledTimes.set(0);
}
public void testNoMigrationsIfIndexTemplateVersionMatches() throws Exception {
DiscoveryNode node = DiscoveryNodeUtils.create("node");
DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
templatesCreated.set(true);
int nextIndexTemplateVersion = ProfilingIndexTemplateRegistry.INDEX_TEMPLATE_VERSION + 1;
ProfilingDataStreamManager.ProfilingDataStream ds = ProfilingDataStreamManager.ProfilingDataStream.of(
"profiling-test",
1,
new Migration.Builder().migrateToIndexTemplateVersion(nextIndexTemplateVersion).addProperty("test", "keyword")
);
managedDataStreams = List.of(ds);
ClusterChangedEvent event = createClusterChangedEvent(managedDataStreams, nodes);
client.setVerifier((a, r, l) -> {
fail("all data streams should be up-to-date; nothing should happen");
return null;
});
datastreamManager.clusterChanged(event);
}
public void testMigratesIfIndexTemplateVersionIsBehind() throws Exception {
DiscoveryNode node = DiscoveryNodeUtils.create("node");
DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build();
templatesCreated.set(true);
int nextIndexTemplateVersion = ProfilingIndexTemplateRegistry.INDEX_TEMPLATE_VERSION + 1;
ProfilingDataStreamManager.ProfilingDataStream ds = ProfilingDataStreamManager.ProfilingDataStream.of(
"profiling-test",
1,
new Migration.Builder().migrateToIndexTemplateVersion(nextIndexTemplateVersion).addProperty("test", "keyword")
);
ProfilingDataStreamManager.ProfilingDataStream ds2 = ProfilingDataStreamManager.ProfilingDataStream.of("profiling-no-change", 1
// no migration specified, should not be changed
);
managedDataStreams = List.of(ds, ds2);
// index is out of date and should be migrated
indexTemplateVersion = nextIndexTemplateVersion;
ClusterChangedEvent event = createClusterChangedEvent(managedDataStreams, nodes);
AtomicInteger mappingUpdates = new AtomicInteger(0);
AtomicInteger settingsUpdates = new AtomicInteger(0);
client.setVerifier(
(action, request, listener) -> verifyIndexMigrated(
".ds-profiling-test",
mappingUpdates,
settingsUpdates,
action,
request,
listener
)
);
datastreamManager.clusterChanged(event);
// one mapping update is the one we specified, the other one is because we need to update _meta
assertBusy(() -> assertThat(mappingUpdates.get(), equalTo(2)));
assertBusy(() -> assertThat(settingsUpdates.get(), equalTo(0)));
mappingUpdates.set(0);
settingsUpdates.set(0);
}
private ActionResponse verifyDataStreamInstalled(
AtomicInteger calledTimes,
ActionType<?> action,
ActionRequest request,
ActionListener<?> listener
) {
if (action instanceof CreateDataStreamAction) {
calledTimes.incrementAndGet();
assertThat(action, instanceOf(CreateDataStreamAction.class));
assertThat(request, instanceOf(CreateDataStreamAction.Request.class));
assertNotNull(listener);
return AcknowledgedResponse.TRUE;
} else {
fail("client called with unexpected request:" + request.toString());
return null;
}
}
private ActionResponse verifyDataStreamRolledOver(
AtomicInteger calledTimes,
ActionType<?> action,
ActionRequest request,
ActionListener<?> listener
) {
if (action instanceof RolloverAction) {
calledTimes.incrementAndGet();
assertThat(action, instanceOf(RolloverAction.class));
assertThat(request, instanceOf(RolloverRequest.class));
assertNotNull(listener);
RolloverRequest rolloverRequest = (RolloverRequest) request;
return new RolloverResponse(
rolloverRequest.getRolloverTarget(),
rolloverRequest.getNewIndexName(),
Map.of(),
false,
true,
true,
true,
false
);
} else {
fail("client called with unexpected request:" + request.toString());
return null;
}
}
private ActionResponse verifyIndexMigrated(
String indexName,
AtomicInteger mappingUpdates,
AtomicInteger settingsUpdates,
ActionType<?> action,
ActionRequest request,
ActionListener<?> listener
) {
if (action == TransportPutMappingAction.TYPE) {
mappingUpdates.incrementAndGet();
assertThat(request, instanceOf(PutMappingRequest.class));
assertThat(((PutMappingRequest) request).indices(), equalTo(new String[] { indexName }));
assertNotNull(listener);
return AcknowledgedResponse.TRUE;
} else if (action == TransportUpdateSettingsAction.TYPE) {
settingsUpdates.incrementAndGet();
assertThat(request, instanceOf(UpdateSettingsRequest.class));
assertNotNull(listener);
return AcknowledgedResponse.TRUE;
} else {
fail("client called with unexpected request:" + request.toString());
return null;
}
}
private ClusterChangedEvent createClusterChangedEvent(
Iterable<ProfilingDataStreamManager.ProfilingDataStream> existingDataStreams,
DiscoveryNodes nodes
) {
return createClusterChangedEvent(existingDataStreams, nodes, IndexMetadata.State.OPEN, IndexVersion.current(), true);
}
private ClusterChangedEvent createClusterChangedEvent(
Iterable<ProfilingDataStreamManager.ProfilingDataStream> existingDataStreams,
DiscoveryNodes nodes,
IndexMetadata.State state,
IndexVersion indexVersion,
boolean allShardsAssigned
) {
ClusterState cs = createClusterState(Settings.EMPTY, existingDataStreams, nodes, state, indexVersion, allShardsAssigned);
ClusterChangedEvent realEvent = new ClusterChangedEvent(
"created-from-test",
cs,
ClusterState.builder(new ClusterName("test")).build()
);
ClusterChangedEvent event = spy(realEvent);
when(event.localNodeMaster()).thenReturn(nodes.isLocalNodeElectedMaster());
return event;
}
private ClusterState createClusterState(
Settings nodeSettings,
Iterable<ProfilingDataStreamManager.ProfilingDataStream> existingDataStreams,
DiscoveryNodes nodes,
IndexMetadata.State state,
IndexVersion indexVersion,
boolean allShardsAssigned
) {
Metadata.Builder metadataBuilder = Metadata.builder();
RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
Map<String, IndexMetadata> indices = new HashMap<>();
for (ProfilingDataStreamManager.ProfilingDataStream existingDataStream : existingDataStreams) {
String writeIndexName = String.format(Locale.ROOT, ".ds-%s", existingDataStream.getName());
Index writeIndex = new Index(writeIndexName, writeIndexName);
DataStream ds = DataStream.builder(existingDataStream.getName(), List.of(writeIndex))
.setMetadata(Map.of())
.setIndexMode(IndexMode.STANDARD)
.build();
metadataBuilder.put(ds);
IndexMetadata.Builder builder = new IndexMetadata.Builder(writeIndexName);
builder.state(state);
builder.settings(indexSettings(indexVersion, 1, 1).put(IndexMetadata.SETTING_INDEX_UUID, writeIndex.getUUID()));
builder.putMapping(
new MappingMetadata(
MapperService.SINGLE_MAPPING_NAME,
Map.of(
"_meta",
Map.of(
"index-version",
existingDataStream.getVersion(),
"index-template-version",
ProfilingIndexTemplateRegistry.INDEX_TEMPLATE_VERSION
)
)
)
);
builder.numberOfReplicas(0);
builder.numberOfShards(1);
IndexMetadata indexMetadata = builder.build();
indices.put(writeIndexName, indexMetadata);
ShardRouting shardRouting = ShardRouting.newUnassigned(
new ShardId(writeIndex, 0),
true,
RecoverySource.ExistingStoreRecoverySource.INSTANCE,
new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, ""),
ShardRouting.Role.DEFAULT
);
if (allShardsAssigned) {
shardRouting = shardRouting.initialize("node0", null, 0).moveToStarted(0);
}
routingTableBuilder.add(
IndexRoutingTable.builder(writeIndex)
.addIndexShard(IndexShardRoutingTable.builder(shardRouting.shardId()).addShard(shardRouting))
);
}
return ClusterState.builder(new ClusterName("test"))
.metadata(metadataBuilder.indices(indices).transientSettings(nodeSettings).build())
.blocks(new ClusterBlocks.Builder().build())
.nodes(nodes)
.routingTable(routingTableBuilder)
.build();
}
}
| ProfilingDataStreamManagerTests |
java | grpc__grpc-java | binder/src/test/java/io/grpc/binder/BinderChannelCredentialsTest.java | {
"start": 351,
"end": 1219
} | class ____ {
private final Context appContext = ApplicationProvider.getApplicationContext();
@Test
public void defaultBinderChannelCredentials() {
BinderChannelCredentials channelCredentials = BinderChannelCredentials.forDefault();
assertThat(channelCredentials.getDevicePolicyAdminComponentName()).isNull();
}
@Test
public void binderChannelCredentialsForDevicePolicyAdmin() {
String deviceAdminClassName = "DevicePolicyAdmin";
BinderChannelCredentials channelCredentials =
BinderChannelCredentials.forDevicePolicyAdmin(
new ComponentName(appContext, deviceAdminClassName));
assertThat(channelCredentials.getDevicePolicyAdminComponentName()).isNotNull();
assertThat(channelCredentials.getDevicePolicyAdminComponentName().getClassName())
.isEqualTo(deviceAdminClassName);
}
}
| BinderChannelCredentialsTest |
java | hibernate__hibernate-orm | tooling/metamodel-generator/src/main/java/org/hibernate/processor/xml/XmlMetaEntity.java | {
"start": 3794,
"end": 4101
} | class ____ is embedding or sub-classing the entity or super-class. This might not be known until
* annotations are processed.
* <p>
* Also note, that if two different classes with different access types embed this entity or extend this mapped
* super-class, the access type of the embeddable/super- | which |
java | apache__flink | flink-clients/src/test/java/org/apache/flink/client/program/rest/RestClusterClientTest.java | {
"start": 27265,
"end": 27652
} | class ____ {
private final TriggerId triggerId = new TriggerId();
private final IntermediateDataSetID intermediateDataSetID;
private TestClusterDatasetDeleteHandlers(IntermediateDataSetID intermediateDatasetId) {
this.intermediateDataSetID = Preconditions.checkNotNull(intermediateDatasetId);
}
private | TestClusterDatasetDeleteHandlers |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/authentication/InternalAuthenticationServiceException.java | {
"start": 1602,
"end": 1978
} | class ____ extends AuthenticationServiceException {
@Serial
private static final long serialVersionUID = -6029644854192497840L;
public InternalAuthenticationServiceException(@Nullable String message, Throwable cause) {
super(message, cause);
}
public InternalAuthenticationServiceException(String message) {
super(message);
}
}
| InternalAuthenticationServiceException |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/event/PreInsertEventListenerVetoUnidirectionalTest.java | {
"start": 1994,
"end": 2426
} | class ____ {
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
private Integer id;
@OneToOne(cascade = CascadeType.ALL)
private Parent parent;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public Parent getParent() {
return parent;
}
public void setParent(Parent parent) {
this.parent = parent;
}
}
@Entity(name = "Parent")
public static | Child |
java | apache__hadoop | hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/RackNode.java | {
"start": 959,
"end": 1670
} | class ____ extends Node {
public RackNode(String name, int level) {
// Hack: ensuring rack name starts with "/".
super(name.startsWith("/") ? name : "/" + name, level);
}
@Override
public synchronized boolean addChild(Node child) {
if (!(child instanceof MachineNode)) {
throw new IllegalArgumentException(
"Only MachineNode can be added to RackNode");
}
return super.addChild(child);
}
/**
* Get the machine nodes that belong to the rack.
* @return The machine nodes that belong to the rack.
*/
@SuppressWarnings({ "cast", "unchecked" })
public Set<MachineNode> getMachinesInRack() {
return (Set<MachineNode>)(Set)getChildren();
}
}
| RackNode |
java | alibaba__nacos | core/src/main/java/com/alibaba/nacos/core/model/form/PageForm.java | {
"start": 931,
"end": 2025
} | class ____ implements NacosForm {
private static final long serialVersionUID = -8912131925234465033L;
private int pageNo = 1;
private int pageSize = 100;
@Override
public void validate() throws NacosApiException {
if (pageNo < 1) {
throw new NacosApiException(HttpStatus.BAD_REQUEST.value(), ErrorCode.PARAMETER_VALIDATE_ERROR,
String.format("Required parameter 'pageNo' should be positive integer, current is %d", pageNo));
}
if (pageSize < 1) {
throw new NacosApiException(HttpStatus.BAD_REQUEST.value(), ErrorCode.PARAMETER_VALIDATE_ERROR,
String.format("Required parameter 'pageSize' should be positive integer, current is %d", pageSize));
}
}
public int getPageNo() {
return pageNo;
}
public void setPageNo(int pageNo) {
this.pageNo = pageNo;
}
public int getPageSize() {
return pageSize;
}
public void setPageSize(int pageSize) {
this.pageSize = pageSize;
}
}
| PageForm |
java | alibaba__nacos | config/src/main/java/com/alibaba/nacos/config/server/service/repository/ConfigInfoGrayPersistService.java | {
"start": 1206,
"end": 7259
} | interface ____ {
/**
* create Pagination utils.
*
* @param <E> Generic object
* @return {@link PaginationHelper}
*/
<E> PaginationHelper<E> createPaginationHelper();
//------------------------------------------insert---------------------------------------------//
/**
* get gray config info state.
*
* @param dataId dataId.
* @param group group.
* @param tenant tenant.
* @param grayName gray name.
* @return config info state.
*/
ConfigInfoStateWrapper findConfigInfo4GrayState(final String dataId, final String group, final String tenant,
String grayName);
/**
* Add gray configuration information and publish data change events.
*
* @param configInfo config info
* @param grayName gray name
* @param grayRule gray rule
* @param srcIp remote ip
* @param srcUser user
* @return config operation result.
*/
ConfigOperateResult addConfigInfo4Gray(ConfigInfo configInfo, String grayName, String grayRule,
String srcIp, String srcUser);
/**
* Adds configuration information with database atomic operations, minimizing SQL actions and avoiding business
* encapsulation.
*
* @param configGrayId the ID for the gray configuration
* @param configInfo the configuration information to be added
* @param grayName the name of the gray configuration
* @param grayRule the rule of the gray configuration
* @param srcIp the IP address of the source
* @param srcUser the user who performs the addition
*/
void addConfigInfoGrayAtomic(final long configGrayId, final ConfigInfo configInfo, final String grayName, final String grayRule,
final String srcIp, final String srcUser);
/**
* insert or update gray config.
*
* @param configInfo config info
* @param grayName gray name
* @param grayRule gray rule
* @param srcIp remote ip
* @param srcUser user
* @return config operation result.
*/
ConfigOperateResult insertOrUpdateGray(final ConfigInfo configInfo, final String grayName, final String grayRule,
final String srcIp, final String srcUser);
/**
* insert or update gray config cas.
*
* @param configInfo config info.
* @param grayName gray name
* @param grayRule gray rule
* @param srcIp remote ip.
* @param srcUser user.
* @return config operation result.
*/
ConfigOperateResult insertOrUpdateGrayCas(final ConfigInfo configInfo, final String grayName, final String grayRule,
final String srcIp, final String srcUser);
//------------------------------------------delete---------------------------------------------//
/**
* Delete configuration; database atomic operation, minimum SQL action, no business encapsulation.
*
* @param dataId dataId
* @param group group
* @param tenant tenant
* @param grayName gray name
* @param srcIp remote ip
* @param srcUser user
*/
void removeConfigInfoGray(final String dataId, final String group, final String tenant, final String grayName,
final String srcIp, final String srcUser);
//------------------------------------------update---------------------------------------------//
/**
* Update gray configuration information.
*
* @param configInfo config info
* @param grayName gray name
* @param grayRule gray rule
* @param srcIp remote ip
* @param srcUser user
* @return config operation result.
*/
ConfigOperateResult updateConfigInfo4Gray(ConfigInfo configInfo, String grayName, String grayRule,
String srcIp, String srcUser);
/**
* Update gray configuration information.
*
* @param configInfo config info
* @param grayName gray name
* @param grayRule gray rule
* @param srcIp remote ip
* @param srcUser user
* @return success or not.
*/
ConfigOperateResult updateConfigInfo4GrayCas(ConfigInfo configInfo, String grayName, String grayRule,
String srcIp, String srcUser);
//------------------------------------------select---------------------------------------------//
/**
* Query gray configuration information based on dataId and group.
*
* @param dataId data id
* @param group group
* @param tenant tenant
* @param grayName gray name
* @return ConfigInfoGrayWrapper gray model instance.
*/
ConfigInfoGrayWrapper findConfigInfo4Gray(final String dataId, final String group, final String tenant,
final String grayName);
/**
* Returns the number of gray configuration items.
*
* @return number of configuration items.
*/
int configInfoGrayCount();
/**
* Query all gray config info for dump task.
*
* @param pageNo page numbser
* @param pageSize page sizxe
* @return {@link Page} with {@link ConfigInfoGrayWrapper} generation
*/
Page<ConfigInfoGrayWrapper> findAllConfigInfoGrayForDumpAll(final int pageNo, final int pageSize);
/**
* Query all gray config info for dump task.
*
* @param startTime startTime
* @param lastMaxId lastMaxId
* @param pageSize pageSize
* @return {@link Page} with {@link ConfigInfoGrayWrapper} generation
*/
List<ConfigInfoGrayWrapper> findChangeConfig(final Timestamp startTime, long lastMaxId, final int pageSize);
/**
* found all config grays.
*
* @param dataId dataId.
* @param group group.
* @param tenant tenant.
* @return
*/
List<String> findConfigInfoGrays(final String dataId, final String group, final String tenant);
}
| ConfigInfoGrayPersistService |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java | {
"start": 2243,
"end": 2664
} | class ____ the required record or null if no record is required
* for this interface.
*/
public Class<R> getRecordClass() {
return this.recordClass;
}
/**
* Get the State Store driver.
*
* @return State Store driver.
*/
public StateStoreDriver getDriver() {
return this.driver;
}
/**
* Build a state store API implementation interface.
*
* @param clazz The specific | of |
java | apache__rocketmq | client/src/main/java/org/apache/rocketmq/client/producer/RequestResponseFuture.java | {
"start": 990,
"end": 3714
} | class ____ {
private final String correlationId;
private final RequestCallback requestCallback;
private final long beginTimestamp = System.currentTimeMillis();
private final Message requestMsg = null;
private long timeoutMillis;
private CountDownLatch countDownLatch = new CountDownLatch(1);
private volatile Message responseMsg = null;
private volatile boolean sendRequestOk = true;
private volatile Throwable cause = null;
public RequestResponseFuture(String correlationId, long timeoutMillis, RequestCallback requestCallback) {
this.correlationId = correlationId;
this.timeoutMillis = timeoutMillis;
this.requestCallback = requestCallback;
}
public void executeRequestCallback() {
if (requestCallback != null) {
if (sendRequestOk && cause == null) {
requestCallback.onSuccess(responseMsg);
} else {
requestCallback.onException(cause);
}
}
}
public boolean isTimeout() {
long diff = System.currentTimeMillis() - this.beginTimestamp;
return diff > this.timeoutMillis;
}
public Message waitResponseMessage(final long timeout) throws InterruptedException {
this.countDownLatch.await(timeout, TimeUnit.MILLISECONDS);
return this.responseMsg;
}
public void putResponseMessage(final Message responseMsg) {
this.responseMsg = responseMsg;
this.countDownLatch.countDown();
}
public String getCorrelationId() {
return correlationId;
}
public long getTimeoutMillis() {
return timeoutMillis;
}
public void setTimeoutMillis(long timeoutMillis) {
this.timeoutMillis = timeoutMillis;
}
public RequestCallback getRequestCallback() {
return requestCallback;
}
public long getBeginTimestamp() {
return beginTimestamp;
}
public CountDownLatch getCountDownLatch() {
return countDownLatch;
}
public void setCountDownLatch(CountDownLatch countDownLatch) {
this.countDownLatch = countDownLatch;
}
public Message getResponseMsg() {
return responseMsg;
}
public void setResponseMsg(Message responseMsg) {
this.responseMsg = responseMsg;
}
public boolean isSendRequestOk() {
return sendRequestOk;
}
public void setSendRequestOk(boolean sendRequestOk) {
this.sendRequestOk = sendRequestOk;
}
public Message getRequestMsg() {
return requestMsg;
}
public Throwable getCause() {
return cause;
}
public void setCause(Throwable cause) {
this.cause = cause;
}
}
| RequestResponseFuture |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/nullness/ParameterMissingNullableTest.java | {
"start": 2940,
"end": 3253
} | class ____ {
void foo(Object o) {
while (true) checkArgument(o != null);
}
}
""")
.doTest();
}
@Test
public void positiveTernary() {
aggressiveHelper
.addSourceLines(
"Foo.java",
"""
| Foo |
java | apache__flink | flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/factories/TableFactoryUtil.java | {
"start": 2334,
"end": 9831
} | class ____ {
/** Returns a table source matching the descriptor. */
@SuppressWarnings("unchecked")
public static <T> TableSource<T> findAndCreateTableSource(TableSourceFactory.Context context) {
try {
return TableFactoryService.find(
TableSourceFactory.class,
((ResolvedCatalogTable) context.getTable())
.toProperties(DefaultSqlFactory.INSTANCE))
.createTableSource(context);
} catch (Throwable t) {
throw new TableException("findAndCreateTableSource failed.", t);
}
}
/**
* Creates a {@link TableSource} from a {@link CatalogTable}.
*
* <p>It considers {@link Catalog#getFactory()} if provided.
*/
@SuppressWarnings("unchecked")
public static <T> TableSource<T> findAndCreateTableSource(
ObjectIdentifier objectIdentifier,
CatalogTable catalogTable,
ReadableConfig configuration,
boolean isTemporary) {
TableSourceFactory.Context context =
new TableSourceFactoryContextImpl(
objectIdentifier, catalogTable, configuration, isTemporary);
return findAndCreateTableSource(context);
}
/** Returns a table sink matching the context. */
@SuppressWarnings("unchecked")
public static <T> TableSink<T> findAndCreateTableSink(TableSinkFactory.Context context) {
try {
return TableFactoryService.find(
TableSinkFactory.class,
((ResolvedCatalogTable) context.getTable())
.toProperties(DefaultSqlFactory.INSTANCE))
.createTableSink(context);
} catch (Throwable t) {
throw new TableException("findAndCreateTableSink failed.", t);
}
}
/**
* Creates a {@link TableSink} from a {@link CatalogTable}.
*
* <p>It considers {@link Catalog#getFactory()} if provided.
*/
@SuppressWarnings("unchecked")
public static <T> TableSink<T> findAndCreateTableSink(
ObjectIdentifier objectIdentifier,
CatalogTable catalogTable,
ReadableConfig configuration,
boolean isStreamingMode,
boolean isTemporary) {
TableSinkFactory.Context context =
new TableSinkFactoryContextImpl(
objectIdentifier,
catalogTable,
configuration,
!isStreamingMode,
isTemporary);
return findAndCreateTableSink(context);
}
/** Checks whether the {@link CatalogTable} uses legacy connector sink options. */
public static boolean isLegacyConnectorOptions(
ReadableConfig configuration,
boolean isStreamingMode,
ObjectIdentifier objectIdentifier,
CatalogTable catalogTable,
boolean isTemporary) {
// normalize option keys
DescriptorProperties properties = new DescriptorProperties(true);
properties.putProperties(catalogTable.getOptions());
if (properties.containsKey(ConnectorDescriptorValidator.CONNECTOR_TYPE)) {
return true;
} else {
try {
// try to create legacy table source using the options,
// some legacy factories may use the 'type' key
TableFactoryUtil.findAndCreateTableSink(
objectIdentifier,
catalogTable,
configuration,
isStreamingMode,
isTemporary);
// success, then we will use the legacy factories
return true;
} catch (Throwable ignore) {
// fail, then we will use new factories
return false;
}
}
}
/** Find and create modification listener list from configuration. */
public static List<CatalogModificationListener> findCatalogModificationListenerList(
final ReadableConfig configuration, final ClassLoader classLoader) {
return configuration
.getOptional(TableConfigOptions.TABLE_CATALOG_MODIFICATION_LISTENERS)
.orElse(Collections.emptyList())
.stream()
.map(
identifier ->
FactoryUtil.discoverFactory(
classLoader,
CatalogModificationListenerFactory.class,
identifier)
.createListener(
new CatalogModificationListenerFactory.Context() {
@Override
public ReadableConfig getConfiguration() {
return configuration;
}
@Override
public ClassLoader getUserClassLoader() {
return classLoader;
}
}))
.collect(Collectors.toList());
}
/**
* Finds and creates a {@link CatalogStoreFactory} using the provided {@link Configuration} and
* user classloader.
*
* <p>The configuration format should be as follows:
*
* <pre>{@code
* table.catalog-store.kind: {identifier}
* table.catalog-store.{identifier}.{param1}: xxx
* table.catalog-store.{identifier}.{param2}: xxx
* }</pre>
*/
public static CatalogStoreFactory findAndCreateCatalogStoreFactory(
Configuration configuration, ClassLoader classLoader) {
String identifier = configuration.get(CommonCatalogOptions.TABLE_CATALOG_STORE_KIND);
CatalogStoreFactory catalogStoreFactory =
FactoryUtil.discoverFactory(classLoader, CatalogStoreFactory.class, identifier);
return catalogStoreFactory;
}
/**
* Build a {@link CatalogStoreFactory.Context} for opening the {@link CatalogStoreFactory}.
*
* <p>The configuration format should be as follows:
*
* <pre>{@code
* table.catalog-store.kind: {identifier}
* table.catalog-store.{identifier}.{param1}: xxx
* table.catalog-store.{identifier}.{param2}: xxx
* }</pre>
*/
public static CatalogStoreFactory.Context buildCatalogStoreFactoryContext(
Configuration configuration, ClassLoader classLoader) {
String identifier = configuration.get(CommonCatalogOptions.TABLE_CATALOG_STORE_KIND);
String catalogStoreOptionPrefix =
CommonCatalogOptions.TABLE_CATALOG_STORE_OPTION_PREFIX + identifier + ".";
Map<String, String> options =
new DelegatingConfiguration(configuration, catalogStoreOptionPrefix).toMap();
CatalogStoreFactory.Context context =
new FactoryUtil.DefaultCatalogStoreContext(options, configuration, classLoader);
return context;
}
}
| TableFactoryUtil |
java | apache__rocketmq | test/src/test/java/org/apache/rocketmq/test/container/AddAndRemoveBrokerIT.java | {
"start": 1426,
"end": 3172
} | class ____ extends ContainerIntegrationTestBase {
private static BrokerContainer brokerContainer4;
@BeforeClass
public static void beforeClass() {
brokerContainer4 = createAndStartBrokerContainer(nsAddr);
}
@AfterClass
public static void afterClass() {
brokerContainer4.shutdown();
}
@Test
public void addBrokerTest()
throws InterruptedException, RemotingTimeoutException, RemotingSendRequestException,
RemotingConnectException {
String remark = null;
int code = 0;
try {
defaultMQAdminExt.addBrokerToContainer(brokerContainer4.getBrokerContainerAddr(), "");
} catch (MQBrokerException e) {
code = e.getResponseCode();
remark = e.getErrorMessage();
}
assertThat(code).isEqualTo(ResponseCode.SYSTEM_ERROR);
assertThat(remark).isEqualTo("addBroker properties empty");
}
@Test
public void removeBrokerTest()
throws InterruptedException, RemotingTimeoutException, RemotingSendRequestException, RemotingConnectException {
boolean exceptionCaught = false;
try {
defaultMQAdminExt.removeBrokerFromContainer(brokerContainer1.getBrokerContainerAddr(),
master3With3Replicas.getBrokerConfig().getBrokerClusterName(),
master3With3Replicas.getBrokerConfig().getBrokerName(), 1);
} catch (MQBrokerException e) {
exceptionCaught = true;
}
assertThat(exceptionCaught).isFalse();
assertThat(brokerContainer1.getSlaveBrokers().size()).isEqualTo(1);
createAndAddSlave(1, brokerContainer1, master3With3Replicas);
awaitUntilSlaveOK();
}
}
| AddAndRemoveBrokerIT |
java | apache__logging-log4j2 | log4j-api/src/main/java/org/apache/logging/log4j/spi/LoggerContextFactory.java | {
"start": 1966,
"end": 2541
} | class ____ of the caller.
* @param loader The ClassLoader to use or null.
* @param currentContext If true returns the current Context, if false returns the Context appropriate
* for the caller if a more appropriate Context can be determined.
* @return true if a LoggerContext has been installed, false otherwise.
* @since 2.13.0
*/
default boolean hasContext(String fqcn, ClassLoader loader, boolean currentContext) {
return false;
}
/**
* Creates a {@link LoggerContext}.
*
* @param fqcn The fully qualified | name |
java | apache__camel | components/camel-sjms/src/main/java/org/apache/camel/component/sjms/TransactionOnCompletion.java | {
"start": 1238,
"end": 2350
} | class ____ extends SynchronizationAdapter {
// TODO: close session, connection
private final Session session;
private final Message message;
public TransactionOnCompletion(Session session, Message message) {
this.session = session;
this.message = message;
}
@Override
public void onDone(Exchange exchange) {
try {
if (exchange.isFailed() || exchange.isRollbackOnly()) {
rollbackIfNeeded(session);
} else {
commitIfNeeded(session, message);
}
} catch (Exception e) {
// ignore
} finally {
closeSession(session);
}
}
@Override
public boolean equals(Object o) {
if (!(o instanceof TransactionOnCompletion)) {
return false;
}
TransactionOnCompletion that = (TransactionOnCompletion) o;
return session == that.session && message == that.message;
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), session, message);
}
}
| TransactionOnCompletion |
java | apache__kafka | metadata/src/main/java/org/apache/kafka/metadata/publisher/ScramPublisher.java | {
"start": 1201,
"end": 2812
} | class ____ implements MetadataPublisher {
private final int nodeId;
private final FaultHandler faultHandler;
private final String nodeType;
private final CredentialProvider credentialProvider;
public ScramPublisher(int nodeId, FaultHandler faultHandler, String nodeType, CredentialProvider credentialProvider) {
this.nodeId = nodeId;
this.faultHandler = faultHandler;
this.nodeType = nodeType;
this.credentialProvider = credentialProvider;
}
@Override
public final String name() {
return "ScramPublisher " + nodeType + " id=" + nodeId;
}
@Override
public void onMetadataUpdate(MetadataDelta delta, MetadataImage newImage, LoaderManifest manifest) {
try {
// Apply changes to SCRAM credentials.
ScramDelta scramDelta = delta.scramDelta();
if (scramDelta != null) {
scramDelta.changes().forEach((mechanism, userChanges) -> {
userChanges.forEach((userName, change) -> {
if (change.isPresent())
credentialProvider.updateCredential(mechanism, userName, change.get().toCredential());
else
credentialProvider.removeCredentials(mechanism, userName);
});
});
}
} catch (Throwable t) {
faultHandler.handleFault("Uncaught exception while publishing SCRAM changes from MetadataDelta up to "
+ newImage.highestOffsetAndEpoch().offset(), t);
}
}
}
| ScramPublisher |
java | apache__flink | flink-core-api/src/main/java/org/apache/flink/util/function/BiFunctionWithException.java | {
"start": 913,
"end": 1165
} | interface ____ can throw exceptions.
*
* @param <T> type of the first parameter
* @param <U> type of the second parameter
* @param <R> type of the return type
* @param <E> type of the exception which can be thrown
*/
@FunctionalInterface
public | which |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/cache/interceptor/CacheAspectSupport.java | {
"start": 12072,
"end": 29433
} | class ____ method is on
* @return log message identifying this method
* @see org.springframework.util.ClassUtils#getQualifiedMethodName
*/
protected String methodIdentification(Method method, Class<?> targetClass) {
Method specificMethod = ClassUtils.getMostSpecificMethod(method, targetClass);
return ClassUtils.getQualifiedMethodName(specificMethod);
}
protected Collection<? extends Cache> getCaches(
CacheOperationInvocationContext<CacheOperation> context, CacheResolver cacheResolver) {
Collection<? extends Cache> caches = cacheResolver.resolveCaches(context);
if (caches.isEmpty()) {
throw new IllegalStateException("No cache could be resolved for '" +
context.getOperation() + "' using resolver '" + cacheResolver +
"'. At least one cache should be provided per cache operation.");
}
return caches;
}
protected CacheOperationContext getOperationContext(
CacheOperation operation, Method method, @Nullable Object[] args, Object target, Class<?> targetClass) {
CacheOperationMetadata metadata = getCacheOperationMetadata(operation, method, targetClass);
return new CacheOperationContext(metadata, args, target);
}
/**
* Return the {@link CacheOperationMetadata} for the specified operation.
* <p>Resolve the {@link CacheResolver} and the {@link KeyGenerator} to be
* used for the operation.
* @param operation the operation
* @param method the method on which the operation is invoked
* @param targetClass the target type
* @return the resolved metadata for the operation
*/
protected CacheOperationMetadata getCacheOperationMetadata(
CacheOperation operation, Method method, Class<?> targetClass) {
CacheOperationCacheKey cacheKey = new CacheOperationCacheKey(operation, method, targetClass);
CacheOperationMetadata metadata = this.metadataCache.get(cacheKey);
if (metadata == null) {
KeyGenerator operationKeyGenerator;
if (StringUtils.hasText(operation.getKeyGenerator())) {
operationKeyGenerator = getBean(operation.getKeyGenerator(), KeyGenerator.class);
}
else {
operationKeyGenerator = getKeyGenerator();
}
CacheResolver operationCacheResolver;
if (StringUtils.hasText(operation.getCacheResolver())) {
operationCacheResolver = getBean(operation.getCacheResolver(), CacheResolver.class);
}
else if (StringUtils.hasText(operation.getCacheManager())) {
CacheManager cacheManager = getBean(operation.getCacheManager(), CacheManager.class);
operationCacheResolver = new SimpleCacheResolver(cacheManager);
}
else {
operationCacheResolver = getCacheResolver();
Assert.state(operationCacheResolver != null, "No CacheResolver/CacheManager set");
}
metadata = new CacheOperationMetadata(operation, method, targetClass,
operationKeyGenerator, operationCacheResolver);
this.metadataCache.put(cacheKey, metadata);
}
return metadata;
}
/**
* Retrieve a bean with the specified name and type.
* Used to resolve services that are referenced by name in a {@link CacheOperation}.
* @param name the name of the bean, as defined by the cache operation
* @param serviceType the type expected by the operation's service reference
* @return the bean matching the expected type, qualified by the given name
* @throws org.springframework.beans.factory.NoSuchBeanDefinitionException if such bean does not exist
* @see CacheOperation#getKeyGenerator()
* @see CacheOperation#getCacheManager()
* @see CacheOperation#getCacheResolver()
*/
protected <T> T getBean(String name, Class<T> serviceType) {
if (this.beanFactory == null) {
throw new IllegalStateException(
"BeanFactory must be set on cache aspect for " + serviceType.getSimpleName() + " retrieval");
}
return BeanFactoryAnnotationUtils.qualifiedBeanOfType(this.beanFactory, serviceType, name);
}
/**
* Clear the cached metadata.
*/
protected void clearMetadataCache() {
this.metadataCache.clear();
this.evaluator.clear();
}
protected @Nullable Object execute(CacheOperationInvoker invoker, Object target, Method method, @Nullable Object[] args) {
// Check whether aspect is enabled (to cope with cases where the AJ is pulled in automatically)
if (this.initialized) {
Class<?> targetClass = AopProxyUtils.ultimateTargetClass(target);
CacheOperationSource cacheOperationSource = getCacheOperationSource();
if (cacheOperationSource != null) {
Collection<CacheOperation> operations = cacheOperationSource.getCacheOperations(method, targetClass);
if (!CollectionUtils.isEmpty(operations)) {
return execute(invoker, method,
new CacheOperationContexts(operations, method, args, target, targetClass));
}
}
}
return invokeOperation(invoker);
}
/**
* Execute the underlying operation (typically in case of cache miss) and return
* the result of the invocation. If an exception occurs it will be wrapped in a
* {@link CacheOperationInvoker.ThrowableWrapper}: the exception can be handled
* or modified but it <em>must</em> be wrapped in a
* {@link CacheOperationInvoker.ThrowableWrapper} as well.
* @param invoker the invoker handling the operation being cached
* @return the result of the invocation
* @see CacheOperationInvoker#invoke()
*/
protected @Nullable Object invokeOperation(CacheOperationInvoker invoker) {
return invoker.invoke();
}
private @Nullable Object execute(CacheOperationInvoker invoker, Method method, CacheOperationContexts contexts) {
if (contexts.isSynchronized()) {
// Special handling of synchronized invocation
return executeSynchronized(invoker, method, contexts);
}
// Process any early evictions
processCacheEvicts(contexts.get(CacheEvictOperation.class), true,
CacheOperationExpressionEvaluator.NO_RESULT);
// Check if we have a cached value matching the conditions
Object cacheHit = findCachedValue(invoker, method, contexts);
if (cacheHit == null || cacheHit instanceof Cache.ValueWrapper) {
return evaluate(cacheHit, invoker, method, contexts);
}
return cacheHit;
}
@SuppressWarnings({ "unchecked", "rawtypes" })
private @Nullable Object executeSynchronized(CacheOperationInvoker invoker, Method method, CacheOperationContexts contexts) {
CacheOperationContext context = contexts.get(CacheableOperation.class).iterator().next();
if (isConditionPassing(context, CacheOperationExpressionEvaluator.NO_RESULT)) {
Object key = generateKey(context, CacheOperationExpressionEvaluator.NO_RESULT);
Cache cache = context.getCaches().iterator().next();
if (CompletableFuture.class.isAssignableFrom(method.getReturnType())) {
AtomicBoolean invokeFailure = new AtomicBoolean(false);
CompletableFuture<?> result = doRetrieve(cache, key,
() -> {
CompletableFuture<?> invokeResult = ((CompletableFuture<?>) invokeOperation(invoker));
if (invokeResult == null) {
throw new IllegalStateException("Returned CompletableFuture must not be null: " + method);
}
return invokeResult.exceptionallyCompose(ex -> {
invokeFailure.set(true);
return CompletableFuture.failedFuture(ex);
});
});
return result.exceptionallyCompose(ex -> {
if (!(ex instanceof RuntimeException rex)) {
return CompletableFuture.failedFuture(ex);
}
try {
getErrorHandler().handleCacheGetError(rex, cache, key);
if (invokeFailure.get()) {
return CompletableFuture.failedFuture(ex);
}
return (CompletableFuture) invokeOperation(invoker);
}
catch (Throwable ex2) {
return CompletableFuture.failedFuture(ex2);
}
});
}
if (this.reactiveCachingHandler != null) {
Object returnValue = this.reactiveCachingHandler.executeSynchronized(invoker, method, cache, key);
if (returnValue != ReactiveCachingHandler.NOT_HANDLED) {
return returnValue;
}
}
try {
return wrapCacheValue(method, doGet(cache, key, () -> unwrapReturnValue(invokeOperation(invoker))));
}
catch (Cache.ValueRetrievalException ex) {
// Directly propagate ThrowableWrapper from the invoker,
// or potentially also an IllegalArgumentException etc.
ReflectionUtils.rethrowRuntimeException(ex.getCause());
// Never reached
return null;
}
}
else {
// No caching required, just call the underlying method
return invokeOperation(invoker);
}
}
/**
* Find a cached value only for {@link CacheableOperation} that passes the condition.
* @param contexts the cacheable operations
* @return a {@link Cache.ValueWrapper} holding the cached value,
* or {@code null} if none is found
*/
private @Nullable Object findCachedValue(CacheOperationInvoker invoker, Method method, CacheOperationContexts contexts) {
for (CacheOperationContext context : contexts.get(CacheableOperation.class)) {
if (isConditionPassing(context, CacheOperationExpressionEvaluator.NO_RESULT)) {
Object key = generateKey(context, CacheOperationExpressionEvaluator.NO_RESULT);
Object cached = findInCaches(context, key, invoker, method, contexts);
if (cached != null) {
if (logger.isTraceEnabled()) {
logger.trace("Cache entry for key '" + key + "' found in cache(s) " + context.getCacheNames());
}
return cached;
}
else {
if (logger.isTraceEnabled()) {
logger.trace("No cache entry for key '" + key + "' in cache(s) " + context.getCacheNames());
}
}
}
}
return null;
}
private @Nullable Object findInCaches(CacheOperationContext context, Object key,
CacheOperationInvoker invoker, Method method, CacheOperationContexts contexts) {
for (Cache cache : context.getCaches()) {
if (CompletableFuture.class.isAssignableFrom(context.getMethod().getReturnType())) {
CompletableFuture<?> result = doRetrieve(cache, key);
if (result != null) {
return result.exceptionallyCompose(ex -> {
if (!(ex instanceof RuntimeException rex)) {
return CompletableFuture.failedFuture(ex);
}
try {
getErrorHandler().handleCacheGetError(rex, cache, key);
return CompletableFuture.completedFuture(null);
}
catch (Throwable ex2) {
return CompletableFuture.failedFuture(ex2);
}
}).thenCompose(value -> (CompletableFuture<?>) evaluate(
(value != null ? CompletableFuture.completedFuture(unwrapCacheValue(value)) : null),
invoker, method, contexts));
}
else {
continue;
}
}
if (this.reactiveCachingHandler != null) {
Object returnValue = this.reactiveCachingHandler.findInCaches(
context, cache, key, invoker, method, contexts);
if (returnValue != ReactiveCachingHandler.NOT_HANDLED) {
return returnValue;
}
}
Cache.ValueWrapper result = doGet(cache, key);
if (result != null) {
return result;
}
}
return null;
}
private @Nullable Object evaluate(@Nullable Object cacheHit, CacheOperationInvoker invoker, Method method,
CacheOperationContexts contexts) {
// Re-invocation in reactive pipeline after late cache hit determination?
if (contexts.processed) {
return cacheHit;
}
Object cacheValue;
Object returnValue;
if (cacheHit != null && !hasCachePut(contexts)) {
// If there are no put requests, just use the cache hit
cacheValue = unwrapCacheValue(cacheHit);
returnValue = wrapCacheValue(method, cacheValue);
}
else {
// Invoke the method if we don't have a cache hit
returnValue = invokeOperation(invoker);
cacheValue = unwrapReturnValue(returnValue);
}
// Collect puts from any @Cacheable miss, if no cached value is found
List<CachePutRequest> cachePutRequests = new ArrayList<>(1);
if (cacheHit == null) {
collectPutRequests(contexts.get(CacheableOperation.class), cacheValue, cachePutRequests);
}
// Collect any explicit @CachePuts
collectPutRequests(contexts.get(CachePutOperation.class), cacheValue, cachePutRequests);
// Process any collected put requests, either from @CachePut or a @Cacheable miss
for (CachePutRequest cachePutRequest : cachePutRequests) {
Object returnOverride = cachePutRequest.apply(cacheValue);
if (returnOverride != null) {
returnValue = returnOverride;
}
}
// Process any late evictions
Object returnOverride = processCacheEvicts(
contexts.get(CacheEvictOperation.class), false, returnValue);
if (returnOverride != null) {
returnValue = returnOverride;
}
// Mark as processed for re-invocation after late cache hit determination
contexts.processed = true;
return returnValue;
}
private @Nullable Object unwrapCacheValue(@Nullable Object cacheValue) {
return (cacheValue instanceof Cache.ValueWrapper wrapper ? wrapper.get() : cacheValue);
}
private @Nullable Object wrapCacheValue(Method method, @Nullable Object cacheValue) {
if (method.getReturnType() == Optional.class &&
(cacheValue == null || cacheValue.getClass() != Optional.class)) {
return Optional.ofNullable(cacheValue);
}
return cacheValue;
}
private @Nullable Object unwrapReturnValue(@Nullable Object returnValue) {
return ObjectUtils.unwrapOptional(returnValue);
}
private boolean hasCachePut(CacheOperationContexts contexts) {
// Evaluate the conditions *without* the result object because we don't have it yet...
Collection<CacheOperationContext> cachePutContexts = contexts.get(CachePutOperation.class);
Collection<CacheOperationContext> excluded = new ArrayList<>(1);
for (CacheOperationContext context : cachePutContexts) {
try {
if (!context.isConditionPassing(CacheOperationExpressionEvaluator.RESULT_UNAVAILABLE)) {
excluded.add(context);
}
}
catch (VariableNotAvailableException ex) {
// Ignoring failure due to missing result, consider the cache put has to proceed
}
}
// Check if all puts have been excluded by condition
return (cachePutContexts.size() != excluded.size());
}
private @Nullable Object processCacheEvicts(Collection<CacheOperationContext> contexts, boolean beforeInvocation,
@Nullable Object result) {
if (contexts.isEmpty()) {
return null;
}
List<CacheOperationContext> applicable = contexts.stream()
.filter(context -> (context.metadata.operation instanceof CacheEvictOperation evict &&
beforeInvocation == evict.isBeforeInvocation())).toList();
if (applicable.isEmpty()) {
return null;
}
if (result instanceof CompletableFuture<?> future) {
return future.whenComplete((value, ex) -> {
if (ex == null) {
performCacheEvicts(applicable, value);
}
});
}
if (this.reactiveCachingHandler != null) {
Object returnValue = this.reactiveCachingHandler.processCacheEvicts(applicable, result);
if (returnValue != ReactiveCachingHandler.NOT_HANDLED) {
return returnValue;
}
}
performCacheEvicts(applicable, result);
return null;
}
private void performCacheEvicts(List<CacheOperationContext> contexts, @Nullable Object result) {
for (CacheOperationContext context : contexts) {
CacheEvictOperation operation = (CacheEvictOperation) context.metadata.operation;
if (isConditionPassing(context, result)) {
Object key = context.getGeneratedKey();
for (Cache cache : context.getCaches()) {
if (operation.isCacheWide()) {
logInvalidating(context, operation, null);
doClear(cache, operation.isBeforeInvocation());
}
else {
if (key == null) {
key = generateKey(context, result);
}
logInvalidating(context, operation, key);
doEvict(cache, key, operation.isBeforeInvocation());
}
}
}
}
}
private void logInvalidating(CacheOperationContext context, CacheEvictOperation operation, @Nullable Object key) {
if (logger.isTraceEnabled()) {
logger.trace("Invalidating " + (key != null ? "cache key [" + key + "]" : "entire cache") +
" for operation " + operation + " on method " + context.metadata.method);
}
}
/**
* Collect a {@link CachePutRequest} for every {@link CacheOperation}
* using the specified result value.
* @param contexts the contexts to handle
* @param result the result value
* @param putRequests the collection to update
*/
private void collectPutRequests(Collection<CacheOperationContext> contexts,
@Nullable Object result, Collection<CachePutRequest> putRequests) {
for (CacheOperationContext context : contexts) {
if (isConditionPassing(context, result)) {
putRequests.add(new CachePutRequest(context));
}
}
}
private boolean isConditionPassing(CacheOperationContext context, @Nullable Object result) {
boolean passing = context.isConditionPassing(result);
if (!passing && logger.isTraceEnabled()) {
logger.trace("Cache condition failed on method " + context.metadata.method +
" for operation " + context.metadata.operation);
}
return passing;
}
private Object generateKey(CacheOperationContext context, @Nullable Object result) {
Object key = context.generateKey(result);
if (key == null) {
throw new IllegalArgumentException("""
Null key returned for cache operation [%s]. If you are using named parameters, \
ensure that the compiler uses the '-parameters' flag."""
.formatted(context.metadata.operation));
}
if (logger.isTraceEnabled()) {
logger.trace("Computed cache key '" + key + "' for operation " + context.metadata.operation);
}
return key;
}
private | the |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adaptive/AdaptiveSchedulerTest.java | {
"start": 9260,
"end": 113695
} | class ____ extends AdaptiveSchedulerTestBase {
private static final Logger LOG = LoggerFactory.getLogger(AdaptiveSchedulerTest.class);
@Test
void testInitialState() throws Exception {
scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.build();
assertThat(scheduler.getState()).isInstanceOf(Created.class);
}
@Test
void testArchivedCheckpointingSettingsNotNullIfCheckpointingIsEnabled() throws Exception {
final JobGraph jobGraph = createJobGraph();
jobGraph.setSnapshotSettings(
new JobCheckpointingSettings(
CheckpointCoordinatorConfiguration.builder().build(), null));
scheduler =
new AdaptiveSchedulerBuilder(
jobGraph,
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.build();
final ArchivedExecutionGraph archivedExecutionGraph =
scheduler.getArchivedExecutionGraph(JobStatus.INITIALIZING, null);
ArchivedExecutionGraphTest.assertContainsCheckpointSettings(archivedExecutionGraph);
}
@Test
void testArchivedJobVerticesPresent() throws Exception {
final JobGraph jobGraph = createJobGraph();
jobGraph.setSnapshotSettings(
new JobCheckpointingSettings(
CheckpointCoordinatorConfiguration.builder().build(), null));
scheduler =
new AdaptiveSchedulerBuilder(
jobGraph,
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.build();
final ArchivedExecutionGraph archivedExecutionGraph =
scheduler.getArchivedExecutionGraph(JobStatus.INITIALIZING, null);
ArchivedExecutionJobVertex jobVertex =
archivedExecutionGraph.getJobVertex(JOB_VERTEX.getID());
assertThat(jobVertex)
.isNotNull()
.satisfies(
archived -> {
assertThat(archived.getParallelism())
.isEqualTo(JOB_VERTEX.getParallelism());
// JOB_VERTEX.maxP == -1, but we want the actual maxP determined by the
// scheduler
assertThat(archived.getMaxParallelism()).isEqualTo(128);
});
ArchivedExecutionGraphTest.assertContainsCheckpointSettings(archivedExecutionGraph);
}
@Test
void testIsState() throws Exception {
scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.build();
final State state = scheduler.getState();
assertThat(scheduler.isState(state)).isTrue();
assertThat(scheduler.isState(new DummyState(scheduler))).isFalse();
}
@Test
void testRunIfState() throws Exception {
scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.build();
AtomicBoolean ran = new AtomicBoolean(false);
scheduler.runIfState(scheduler.getState(), () -> ran.set(true));
assertThat(ran.get()).isTrue();
}
@Test
void testRunIfStateWithStateMismatch() throws Exception {
scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.build();
AtomicBoolean ran = new AtomicBoolean(false);
scheduler.runIfState(new DummyState(scheduler), () -> ran.set(true));
assertThat(ran.get()).isFalse();
}
@Test
void testHasEnoughResourcesReturnsFalseIfUnsatisfied() {
final ResourceCounter resourceRequirement =
ResourceCounter.withResource(ResourceProfile.UNKNOWN, 1);
assertThat(
AdaptiveScheduler.hasDesiredResources(
resourceRequirement, Collections.emptyList()))
.isFalse();
}
@Test
void testHasEnoughResourcesReturnsTrueIfSatisfied() {
final ResourceCounter resourceRequirement =
ResourceCounter.withResource(ResourceProfile.UNKNOWN, 1);
final Collection<TestingSlot> freeSlots =
createSlotsForResourceRequirements(resourceRequirement);
assertThat(AdaptiveScheduler.hasDesiredResources(resourceRequirement, freeSlots)).isTrue();
}
private Collection<TestingSlot> createSlotsForResourceRequirements(
ResourceCounter resourceRequirements) {
final Collection<TestingSlot> slotInfos = new ArrayList<>();
for (Map.Entry<ResourceProfile, Integer> resourceProfileCount :
resourceRequirements.getResourcesWithCount()) {
for (int i = 0; i < resourceProfileCount.getValue(); i++) {
slotInfos.add(new TestingSlot(resourceProfileCount.getKey()));
}
}
return slotInfos;
}
@Test
void testHasEnoughResourcesUsesUnmatchedSlotsAsUnknown() {
final int numRequiredSlots = 1;
final ResourceCounter requiredResources =
ResourceCounter.withResource(ResourceProfile.UNKNOWN, numRequiredSlots);
final ResourceCounter providedResources =
ResourceCounter.withResource(
ResourceProfile.newBuilder().setCpuCores(1).build(), numRequiredSlots);
final Collection<TestingSlot> freeSlots =
createSlotsForResourceRequirements(providedResources);
assertThat(AdaptiveScheduler.hasDesiredResources(requiredResources, freeSlots)).isTrue();
}
@Test
void testExecutionGraphGenerationWithAvailableResources() throws Exception {
final JobGraph jobGraph = createJobGraph();
final DefaultDeclarativeSlotPool declarativeSlotPool =
createDeclarativeSlotPool(jobGraph.getJobID(), singleThreadMainThreadExecutor);
final Configuration configuration = new Configuration();
configuration.set(
JobManagerOptions.SCHEDULER_SUBMISSION_RESOURCE_WAIT_TIMEOUT,
Duration.ofMillis(1L));
scheduler =
new AdaptiveSchedulerBuilder(
jobGraph,
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setDeclarativeSlotPool(declarativeSlotPool)
.setJobMasterConfiguration(configuration)
.build();
final int numAvailableSlots = 2;
final SubmissionBufferingTaskManagerGateway taskManagerGateway =
new SubmissionBufferingTaskManagerGateway(numAvailableSlots);
singleThreadMainThreadExecutor.execute(
() -> {
scheduler.startScheduling();
offerSlots(
declarativeSlotPool,
createSlotOffersForResourceRequirements(
ResourceCounter.withResource(
ResourceProfile.UNKNOWN, numAvailableSlots)),
taskManagerGateway);
});
// wait for all tasks to be submitted
taskManagerGateway.waitForSubmissions(numAvailableSlots);
final ArchivedExecutionGraph executionGraph =
CompletableFuture.supplyAsync(
() -> scheduler.requestJob().getArchivedExecutionGraph(),
singleThreadMainThreadExecutor)
.join();
assertThat(executionGraph.getJobVertex(JOB_VERTEX.getID()).getParallelism())
.isEqualTo(numAvailableSlots);
assertThat(executionGraph.getPlan().getNodes().size()).isOne();
}
@Test
void testExecutionGraphGenerationSetsInitializationTimestamp() throws Exception {
final long initializationTimestamp = 42L;
final JobGraph jobGraph = createJobGraph();
final DefaultDeclarativeSlotPool declarativeSlotPool =
createDeclarativeSlotPool(jobGraph.getJobID(), singleThreadMainThreadExecutor);
final Configuration configuration = new Configuration();
configuration.set(
JobManagerOptions.SCHEDULER_SUBMISSION_RESOURCE_WAIT_TIMEOUT,
Duration.ofMillis(1L));
scheduler =
new AdaptiveSchedulerBuilder(
jobGraph,
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setInitializationTimestamp(initializationTimestamp)
.setDeclarativeSlotPool(declarativeSlotPool)
.setJobMasterConfiguration(configuration)
.build();
final SubmissionBufferingTaskManagerGateway taskManagerGateway =
new SubmissionBufferingTaskManagerGateway(PARALLELISM);
singleThreadMainThreadExecutor.execute(
() -> {
scheduler.startScheduling();
offerSlots(
declarativeSlotPool,
createSlotOffersForResourceRequirements(
ResourceCounter.withResource(
ResourceProfile.UNKNOWN, PARALLELISM)),
taskManagerGateway);
});
// Wait for just the first submission to indicate the execution graph is ready
taskManagerGateway.waitForSubmissions(1);
final ArchivedExecutionGraph executionGraph =
CompletableFuture.supplyAsync(
() -> scheduler.requestJob().getArchivedExecutionGraph(),
singleThreadMainThreadExecutor)
.join();
assertThat(executionGraph.getStatusTimestamp(JobStatus.INITIALIZING))
.isEqualTo(initializationTimestamp);
}
@Test
void testInitializationTimestampForwarding() throws Exception {
final long expectedInitializationTimestamp = 42L;
scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setInitializationTimestamp(expectedInitializationTimestamp)
.build();
final long initializationTimestamp =
scheduler
.requestJob()
.getArchivedExecutionGraph()
.getStatusTimestamp(JobStatus.INITIALIZING);
assertThat(initializationTimestamp).isEqualTo(expectedInitializationTimestamp);
}
@Test
void testFatalErrorsForwardedToFatalErrorHandler() throws Exception {
final TestingFatalErrorHandler fatalErrorHandler = new TestingFatalErrorHandler();
scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setFatalErrorHandler(fatalErrorHandler)
.build();
final RuntimeException exception = new RuntimeException();
scheduler.runIfState(
scheduler.getState(),
() -> {
throw exception;
});
assertThat(fatalErrorHandler.getException()).isEqualTo(exception);
}
@Test
void testResourceTimeout() throws Exception {
final ManuallyTriggeredComponentMainThreadExecutor mainThreadExecutor =
new ManuallyTriggeredComponentMainThreadExecutor(Thread.currentThread());
final Duration resourceTimeout = Duration.ofMinutes(1234);
final Configuration configuration = new Configuration();
configuration.set(
JobManagerOptions.SCHEDULER_SUBMISSION_RESOURCE_WAIT_TIMEOUT, resourceTimeout);
final AdaptiveScheduler scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
mainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setJobMasterConfiguration(configuration)
.build();
scheduler.startScheduling();
// check whether some task was scheduled with the expected timeout
// this is technically not really safe, but the chosen timeout value
// is odd enough that it realistically won't cause issues.
// With this approach we don't have to make assumption as to how many
// tasks are being scheduled.
final boolean b =
mainThreadExecutor.getActiveNonPeriodicScheduledTask().stream()
.anyMatch(
scheduledTask ->
scheduledTask.getDelay(TimeUnit.MINUTES)
== resourceTimeout.toMinutes());
assertThat(b).isTrue();
}
@Test
void testNumRescalesAndStartsMetricForRescale() throws Exception {
final CompletableFuture<Gauge<Long>> numRescalesMetricFuture = new CompletableFuture<>();
final CompletableFuture<Gauge<Long>> numRestartsMetricFuture = new CompletableFuture<>();
final MetricRegistry metricRegistry =
TestingMetricRegistry.builder()
.setRegisterConsumer(
(metric, name, group) -> {
if (MetricNames.NUM_RESCALES.equals(name)) {
numRescalesMetricFuture.complete((Gauge<Long>) metric);
} else if (MetricNames.NUM_RESTARTS.equals(name)) {
numRestartsMetricFuture.complete((Gauge<Long>) metric);
}
})
.build();
final JobGraph jobGraph = createJobGraph();
final DefaultDeclarativeSlotPool declarativeSlotPool =
new DefaultDeclarativeSlotPool(
jobGraph.getJobID(),
new DefaultAllocatedSlotPool(),
ignored -> {},
Duration.ofMinutes(10),
Duration.ofMinutes(10),
Duration.ZERO,
mainThreadExecutor);
scheduler =
new AdaptiveSchedulerBuilder(
jobGraph,
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setJobMasterConfiguration(createConfigurationWithNoTimeouts())
.setJobManagerJobMetricGroup(
JobManagerMetricGroup.createJobManagerMetricGroup(
metricRegistry, "localhost")
.addJob(new JobID(), "jobName"))
.setDeclarativeSlotPool(declarativeSlotPool)
.build();
final Gauge<Long> numRescalesMetric = numRescalesMetricFuture.get();
final Gauge<Long> numRestartsMetric = numRestartsMetricFuture.get();
final SubmissionBufferingTaskManagerGateway taskManagerGateway =
new SubmissionBufferingTaskManagerGateway(1 + PARALLELISM);
taskManagerGateway.setCancelConsumer(createCancelConsumer(scheduler));
startTestInstanceInMainThread();
runInMainThread(
() ->
declarativeSlotPool.offerSlots(
createSlotOffersForResourceRequirements(
ResourceCounter.withResource(ResourceProfile.UNKNOWN, 1)),
new LocalTaskManagerLocation(),
taskManagerGateway,
System.currentTimeMillis()));
// wait for the first task submission
taskManagerGateway.waitForSubmissions(1);
assertThat(numRescalesMetric.getValue()).isEqualTo(0L);
assertThat(numRestartsMetric.getValue()).isEqualTo(0L);
// offer more slots, which will cause a restart in order to scale up
runInMainThread(
() ->
offerSlots(
declarativeSlotPool,
createSlotOffersForResourceRequirements(
ResourceCounter.withResource(
ResourceProfile.UNKNOWN, PARALLELISM)),
taskManagerGateway));
// wait for the second task submissions
taskManagerGateway.waitForSubmissions(PARALLELISM);
assertThat(numRescalesMetric.getValue()).isEqualTo(1L);
assertThat(numRestartsMetric.getValue()).isEqualTo(1L);
}
@Test
void testNumRescalesAndStartsMetricForFailureRecovery() throws Exception {
final CompletableFuture<Gauge<Long>> numRescalesMetricFuture = new CompletableFuture<>();
final CompletableFuture<Gauge<Long>> numRestartsMetricFuture = new CompletableFuture<>();
final MetricRegistry metricRegistry =
TestingMetricRegistry.builder()
.setRegisterConsumer(
(metric, name, group) -> {
if (MetricNames.NUM_RESCALES.equals(name)) {
numRescalesMetricFuture.complete((Gauge<Long>) metric);
} else if (MetricNames.NUM_RESTARTS.equals(name)) {
numRestartsMetricFuture.complete((Gauge<Long>) metric);
}
})
.build();
scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setJobMasterConfiguration(createConfigurationWithNoTimeouts())
.setJobManagerJobMetricGroup(
JobManagerMetricGroup.createJobManagerMetricGroup(
metricRegistry, "localhost")
.addJob(new JobID(), "jobName"))
.build();
try (MockRestartingContext ctx = new MockRestartingContext()) {
StateTrackingMockExecutionGraph executionGraph = new StateTrackingMockExecutionGraph();
final ExecutionGraphHandler executionGraphHandler =
new ExecutionGraphHandler(
executionGraph,
LOG,
ctx.getMainThreadExecutor(),
ctx.getMainThreadExecutor());
final OperatorCoordinatorHandler operatorCoordinatorHandler =
new TestingOperatorCoordinatorHandler();
executionGraph.transitionToRunning();
List<ExceptionHistoryEntry> failureCollection =
List.of(
ExceptionHistoryEntry.createGlobal(
new Exception("test"),
CompletableFuture.completedFuture(Collections.emptyMap())));
runInMainThread(
() ->
scheduler.goToRestarting(
executionGraph,
executionGraphHandler,
operatorCoordinatorHandler,
Duration.ZERO,
new VertexParallelism(
Collections.singletonMap(JOB_VERTEX.getID(), 1)),
failureCollection));
}
final Gauge<Long> numRestartsMetric = numRestartsMetricFuture.get();
assertThat(numRestartsMetric.getValue()).isEqualTo(1L);
final Gauge<Long> numScalesMetric = numRescalesMetricFuture.get();
assertThat(numScalesMetric.getValue()).isEqualTo(0L);
}
@Test
void testStatusMetrics() throws Exception {
final CompletableFuture<UpTimeGauge> upTimeMetricFuture = new CompletableFuture<>();
final CompletableFuture<DownTimeGauge> downTimeMetricFuture = new CompletableFuture<>();
// restartingTime acts as a stand-in for generic status time metrics
final CompletableFuture<Gauge<Long>> restartTimeMetricFuture = new CompletableFuture<>();
final MetricRegistry metricRegistry =
TestingMetricRegistry.builder()
.setRegisterConsumer(
(metric, name, group) -> {
switch (name) {
case UpTimeGauge.METRIC_NAME:
upTimeMetricFuture.complete((UpTimeGauge) metric);
break;
case DownTimeGauge.METRIC_NAME:
downTimeMetricFuture.complete((DownTimeGauge) metric);
break;
case "restartingTimeTotal":
restartTimeMetricFuture.complete((Gauge<Long>) metric);
break;
}
})
.build();
final JobGraph jobGraph = createJobGraph();
final DefaultDeclarativeSlotPool declarativeSlotPool =
createDeclarativeSlotPool(jobGraph.getJobID(), singleThreadMainThreadExecutor);
final Configuration configuration = createConfigurationWithNoTimeouts();
configuration.set(
JobManagerOptions.SCHEDULER_SUBMISSION_RESOURCE_WAIT_TIMEOUT,
Duration.ofMillis(10L));
configuration.set(
MetricOptions.JOB_STATUS_METRICS,
Arrays.asList(MetricOptions.JobStatusMetrics.TOTAL_TIME));
scheduler =
new AdaptiveSchedulerBuilder(
jobGraph,
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setJobMasterConfiguration(configuration)
.setJobManagerJobMetricGroup(
JobManagerMetricGroup.createJobManagerMetricGroup(
metricRegistry, "localhost")
.addJob(new JobID(), "jobName"))
.setDeclarativeSlotPool(declarativeSlotPool)
.build();
final UpTimeGauge upTimeGauge = upTimeMetricFuture.get();
final DownTimeGauge downTimeGauge = downTimeMetricFuture.get();
final Gauge<Long> restartTimeGauge = restartTimeMetricFuture.get();
final SubmissionBufferingTaskManagerGateway taskManagerGateway =
new SubmissionBufferingTaskManagerGateway(1 + PARALLELISM);
taskManagerGateway.setCancelConsumer(createCancelConsumer(scheduler));
startTestInstanceInMainThread();
runInMainThread(
() ->
offerSlots(
declarativeSlotPool,
createSlotOffersForResourceRequirements(
ResourceCounter.withResource(ResourceProfile.UNKNOWN, 1)),
taskManagerGateway));
// wait for the first task submission
taskManagerGateway.waitForSubmissions(1);
CommonTestUtils.waitUntilCondition(() -> upTimeGauge.getValue() > 0L);
assertThat(downTimeGauge.getValue()).isEqualTo(0L);
assertThat(restartTimeGauge.getValue()).isEqualTo(0L);
// offer more slots, which will cause a restart in order to scale up
runInMainThread(
() ->
offerSlots(
declarativeSlotPool,
createSlotOffersForResourceRequirements(
ResourceCounter.withResource(ResourceProfile.UNKNOWN, 1)),
taskManagerGateway));
// wait for the second task submissions
taskManagerGateway.waitForSubmissions(2);
CommonTestUtils.waitUntilCondition(() -> upTimeGauge.getValue() > 0L);
assertThat(downTimeGauge.getValue()).isEqualTo(0L);
// can be zero if the restart is very quick
assertThat(restartTimeGauge.getValue()).isGreaterThanOrEqualTo(0L);
}
// ---------------------------------------------------------------------------------------------
// State transition tests
// ---------------------------------------------------------------------------------------------
@Test
void testStartSchedulingTransitionsToWaitingForResources() throws Exception {
scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.build();
startTestInstanceInMainThread();
assertThat(scheduler.getState()).isInstanceOf(WaitingForResources.class);
}
@Test
void testStartSchedulingSetsResourceRequirementsForDefaultMode() throws Exception {
final JobGraph jobGraph = createJobGraph();
final DefaultDeclarativeSlotPool declarativeSlotPool =
createDeclarativeSlotPool(jobGraph.getJobID(), singleThreadMainThreadExecutor);
scheduler =
new AdaptiveSchedulerBuilder(
jobGraph,
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setDeclarativeSlotPool(declarativeSlotPool)
.build();
startTestInstanceInMainThread();
assertThat(declarativeSlotPool.getResourceRequirements())
.contains(ResourceRequirement.create(ResourceProfile.UNKNOWN, PARALLELISM));
}
@Test
void testStartSchedulingSetsResourceRequirementsForReactiveMode() throws Exception {
final JobGraph jobGraph = createJobGraph();
final DefaultDeclarativeSlotPool declarativeSlotPool =
createDeclarativeSlotPool(jobGraph.getJobID(), singleThreadMainThreadExecutor);
final Configuration configuration = new Configuration();
configuration.set(JobManagerOptions.SCHEDULER_MODE, SchedulerExecutionMode.REACTIVE);
scheduler =
new AdaptiveSchedulerBuilder(
jobGraph,
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setDeclarativeSlotPool(declarativeSlotPool)
.setJobMasterConfiguration(configuration)
.build();
startTestInstanceInMainThread();
// should request the max possible resources
final int expectedParallelism =
KeyGroupRangeAssignment.computeDefaultMaxParallelism(PARALLELISM);
assertThat(declarativeSlotPool.getResourceRequirements())
.contains(ResourceRequirement.create(ResourceProfile.UNKNOWN, expectedParallelism));
}
/** Tests that the listener for new slots is properly set up. */
@Test
void testResourceAcquisitionTriggersJobExecution() throws Exception {
final JobGraph jobGraph = createJobGraph();
final DefaultDeclarativeSlotPool declarativeSlotPool =
createDeclarativeSlotPool(jobGraph.getJobID(), singleThreadMainThreadExecutor);
final Configuration configuration = new Configuration();
configuration.set(
JobManagerOptions.SCHEDULER_SUBMISSION_RESOURCE_WAIT_TIMEOUT,
Duration.ofMillis(1L));
scheduler =
new AdaptiveSchedulerBuilder(
jobGraph,
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setDeclarativeSlotPool(declarativeSlotPool)
.setJobMasterConfiguration(configuration)
.build();
final SubmissionBufferingTaskManagerGateway taskManagerGateway =
new SubmissionBufferingTaskManagerGateway(PARALLELISM);
CompletableFuture<State> startingStateFuture = new CompletableFuture<>();
singleThreadMainThreadExecutor.execute(
() -> {
scheduler.startScheduling();
startingStateFuture.complete(scheduler.getState());
offerSlots(
declarativeSlotPool,
createSlotOffersForResourceRequirements(
ResourceCounter.withResource(
ResourceProfile.UNKNOWN, PARALLELISM)),
taskManagerGateway);
});
assertThat(startingStateFuture.get()).isInstanceOf(WaitingForResources.class);
// Wait for all tasks to be submitted
taskManagerGateway.waitForSubmissions(PARALLELISM);
final ArchivedExecutionGraph executionGraph =
CompletableFuture.supplyAsync(
() -> scheduler.requestJob().getArchivedExecutionGraph(),
singleThreadMainThreadExecutor)
.get();
assertThat(executionGraph.getJobVertex(JOB_VERTEX.getID()).getParallelism())
.isEqualTo(PARALLELISM);
}
@Test
void testGoToFinished() throws Exception {
scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.build();
final ArchivedExecutionGraph archivedExecutionGraph =
new ArchivedExecutionGraphBuilder().setState(JobStatus.FAILED).build();
runInMainThread(() -> scheduler.goToFinished(archivedExecutionGraph));
assertThat(scheduler.getState()).isInstanceOf(Finished.class);
}
@Test
void testJobStatusListenerOnlyCalledIfJobStatusChanges() throws Exception {
final AtomicInteger numStatusUpdates = new AtomicInteger();
scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setJobStatusListener(
(jobId, newJobStatus, timestamp) ->
numStatusUpdates.incrementAndGet())
.build();
// sanity check
assertThat(scheduler.requestJobStatus())
.withFailMessage("Assumption about job status for Scheduler@Created is incorrect.")
.isEqualTo(JobStatus.INITIALIZING);
// transition into next state, for which the job state is still INITIALIZING
runInMainThread(
() ->
scheduler.transitionToState(
new DummyState.Factory(scheduler, JobStatus.INITIALIZING)));
assertThat(numStatusUpdates).hasValue(0);
}
@Test
void testJobStatusListenerNotifiedOfJobStatusChanges() throws Exception {
final JobGraph jobGraph = createJobGraph();
final DefaultDeclarativeSlotPool declarativeSlotPool =
createDeclarativeSlotPool(jobGraph.getJobID(), singleThreadMainThreadExecutor);
final Configuration configuration = new Configuration();
configuration.set(
JobManagerOptions.SCHEDULER_SUBMISSION_RESOURCE_WAIT_TIMEOUT,
Duration.ofMillis(1L));
final CompletableFuture<Void> jobCreatedNotification = new CompletableFuture<>();
final CompletableFuture<Void> jobRunningNotification = new CompletableFuture<>();
final CompletableFuture<Void> jobFinishedNotification = new CompletableFuture<>();
final CompletableFuture<JobStatus> unexpectedJobStatusNotification =
new CompletableFuture<>();
scheduler =
new AdaptiveSchedulerBuilder(
jobGraph,
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setJobMasterConfiguration(configuration)
.setJobStatusListener(
(jobId, newJobStatus, timestamp) -> {
switch (newJobStatus) {
case CREATED:
jobCreatedNotification.complete(null);
break;
case RUNNING:
jobRunningNotification.complete(null);
break;
case FINISHED:
jobFinishedNotification.complete(null);
break;
default:
unexpectedJobStatusNotification.complete(newJobStatus);
}
})
.setDeclarativeSlotPool(declarativeSlotPool)
.build();
final SubmissionBufferingTaskManagerGateway taskManagerGateway =
new SubmissionBufferingTaskManagerGateway(1 + PARALLELISM);
runInMainThread(
() -> {
scheduler.startScheduling();
offerSlots(
declarativeSlotPool,
createSlotOffersForResourceRequirements(
ResourceCounter.withResource(ResourceProfile.UNKNOWN, 1)),
taskManagerGateway);
});
// wait for the task submission
final TaskDeploymentDescriptor submittedTask = taskManagerGateway.submittedTasks.take();
// let the job finish
runInMainThread(
() ->
scheduler.updateTaskExecutionState(
new TaskExecutionState(
submittedTask.getExecutionAttemptId(),
ExecutionState.FINISHED)));
jobCreatedNotification.get();
jobRunningNotification.get();
jobFinishedNotification.get();
assertThat(unexpectedJobStatusNotification.isDone()).isFalse();
}
@Test
void testCloseShutsDownCheckpointingComponents() throws Exception {
final CompletableFuture<JobStatus> completedCheckpointStoreShutdownFuture =
new CompletableFuture<>();
final CompletedCheckpointStore completedCheckpointStore =
TestingCompletedCheckpointStore
.createStoreWithShutdownCheckAndNoCompletedCheckpoints(
completedCheckpointStoreShutdownFuture);
final CompletableFuture<JobStatus> checkpointIdCounterShutdownFuture =
new CompletableFuture<>();
final CheckpointIDCounter checkpointIdCounter =
TestingCheckpointIDCounter.createStoreWithShutdownCheckAndNoStartAction(
checkpointIdCounterShutdownFuture);
final JobGraph jobGraph = createJobGraph();
// checkpointing components are only created if checkpointing is enabled
jobGraph.setSnapshotSettings(
new JobCheckpointingSettings(
CheckpointCoordinatorConfiguration.builder().build(), null));
scheduler =
new AdaptiveSchedulerBuilder(
jobGraph,
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setCheckpointRecoveryFactory(
new TestingCheckpointRecoveryFactory(
completedCheckpointStore, checkpointIdCounter))
.build();
startTestInstanceInMainThread();
final CompletableFuture<Void> closeFuture = new CompletableFuture<>();
runInMainThread(
() -> {
// transition into the FAILED state
scheduler.handleGlobalFailure(new FlinkException("Test exception"));
// we shouldn't block the closeAsync call here because it will trigger
// additional task on the main thread internally
FutureUtils.forward(scheduler.closeAsync(), closeFuture);
});
closeFuture.join();
assertThat(completedCheckpointStoreShutdownFuture.get()).isEqualTo(JobStatus.FAILED);
assertThat(checkpointIdCounterShutdownFuture.get()).isEqualTo(JobStatus.FAILED);
}
@Test
void testCloseAsyncReturnsMainThreadFuture() throws Exception {
DefaultSchedulerTest.runCloseAsyncCompletesInMainThreadTest(
TEST_EXECUTOR_RESOURCE.getExecutor(),
(mainThreadExecutor, checkpointsCleaner) ->
new AdaptiveSchedulerBuilder(
singleNoOpJobGraph(),
mainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setCheckpointCleaner(checkpointsCleaner)
.build());
}
@Test
void testTransitionToStateCallsOnLeave() throws Exception {
scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.build();
final LifecycleMethodCapturingState firstState =
new LifecycleMethodCapturingState(scheduler);
runInMainThread(() -> scheduler.transitionToState(new StateInstanceFactory(firstState)));
firstState.reset();
runInMainThread(() -> scheduler.transitionToState(new DummyState.Factory(scheduler)));
assertThat(firstState.onLeaveCalled).isTrue();
assertThat(firstState.onLeaveNewStateArgument.equals(DummyState.class)).isTrue();
}
@Test
void testConsistentMaxParallelism() throws Exception {
final int parallelism = 240;
final int expectedMaxParallelism =
KeyGroupRangeAssignment.computeDefaultMaxParallelism(parallelism);
final JobVertex vertex = createNoOpVertex(parallelism);
final JobGraph jobGraph = streamingJobGraph(vertex);
final DefaultDeclarativeSlotPool declarativeSlotPool =
createDeclarativeSlotPool(jobGraph.getJobID(), singleThreadMainThreadExecutor);
scheduler =
new AdaptiveSchedulerBuilder(
jobGraph,
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setDeclarativeSlotPool(declarativeSlotPool)
.setJobMasterConfiguration(createConfigurationWithNoTimeouts())
.build();
final SubmissionBufferingTaskManagerGateway taskManagerGateway =
new SubmissionBufferingTaskManagerGateway(1 + parallelism);
taskManagerGateway.setCancelConsumer(createCancelConsumer(scheduler));
// offer just enough resources to run at the lowest possible parallelism
singleThreadMainThreadExecutor.execute(
() -> {
scheduler.startScheduling();
offerSlots(
declarativeSlotPool,
createSlotOffersForResourceRequirements(
ResourceCounter.withResource(ResourceProfile.UNKNOWN, 1)),
taskManagerGateway);
});
// Wait for task to be submitted
taskManagerGateway.waitForSubmissions(1);
ArchivedExecutionGraph executionGraph =
getArchivedExecutionGraphForRunningJob(scheduler).get();
ArchivedExecutionJobVertex archivedVertex = executionGraph.getJobVertex(vertex.getID());
// ensure that the parallelism was submitted based on what is available
assertThat(archivedVertex.getParallelism()).isOne();
// and that the max parallelism was submitted based on what was configured
assertThat(archivedVertex.getMaxParallelism()).isEqualTo(expectedMaxParallelism);
// offer the resources to run at full parallelism
singleThreadMainThreadExecutor.execute(
() -> {
offerSlots(
declarativeSlotPool,
createSlotOffersForResourceRequirements(
ResourceCounter.withResource(
ResourceProfile.UNKNOWN, parallelism)),
taskManagerGateway);
});
// wait for the job to be re-submitted
taskManagerGateway.waitForSubmissions(parallelism);
ArchivedExecutionGraph resubmittedExecutionGraph =
getArchivedExecutionGraphForRunningJob(scheduler).get();
ArchivedExecutionJobVertex resubmittedArchivedVertex =
resubmittedExecutionGraph.getJobVertex(vertex.getID());
// ensure that the parallelism was submitted based on what is available
assertThat(resubmittedArchivedVertex.getParallelism()).isEqualTo(parallelism);
// and that the max parallelism was submitted based on what was configured
assertThat(resubmittedArchivedVertex.getMaxParallelism()).isEqualTo(expectedMaxParallelism);
}
@Test
void testRequirementIncreaseTriggersScaleUp() throws Exception {
final JobGraph jobGraph = createJobGraph();
final DefaultDeclarativeSlotPool declarativeSlotPool =
createDeclarativeSlotPool(jobGraph.getJobID(), singleThreadMainThreadExecutor);
scheduler = createSchedulerWithNoTimeouts(jobGraph, declarativeSlotPool);
final int scaledUpParallelism = PARALLELISM * 2;
final SubmissionBufferingTaskManagerGateway taskManagerGateway =
createSubmissionBufferingTaskManagerGateway(scaledUpParallelism, scheduler);
startJobWithSlotsMatchingParallelism(
scheduler, declarativeSlotPool, taskManagerGateway, PARALLELISM);
awaitJobReachingParallelism(taskManagerGateway, scheduler, PARALLELISM);
JobResourceRequirements newJobResourceRequirements =
createRequirementsWithUpperParallelism(scaledUpParallelism);
singleThreadMainThreadExecutor.execute(
() -> {
// first update requirements as otherwise slots are rejected!
scheduler.updateJobResourceRequirements(newJobResourceRequirements);
offerSlots(
declarativeSlotPool,
createSlotOffersForResourceRequirements(
ResourceCounter.withResource(
ResourceProfile.UNKNOWN, PARALLELISM)),
taskManagerGateway);
});
awaitJobReachingParallelism(taskManagerGateway, scheduler, scaledUpParallelism);
}
@Test
void testRequirementDecreaseTriggersScaleDown() throws Exception {
final JobGraph jobGraph = createJobGraph();
final DefaultDeclarativeSlotPool declarativeSlotPool =
createDeclarativeSlotPool(jobGraph.getJobID(), singleThreadMainThreadExecutor);
scheduler = createSchedulerWithNoTimeouts(jobGraph, declarativeSlotPool);
final SubmissionBufferingTaskManagerGateway taskManagerGateway =
createSubmissionBufferingTaskManagerGateway(PARALLELISM, scheduler);
startJobWithSlotsMatchingParallelism(
scheduler, declarativeSlotPool, taskManagerGateway, PARALLELISM);
awaitJobReachingParallelism(taskManagerGateway, scheduler, PARALLELISM);
int scaledDownParallelism = PARALLELISM - 1;
JobResourceRequirements newJobResourceRequirements =
createRequirementsWithUpperParallelism(scaledDownParallelism);
singleThreadMainThreadExecutor.execute(
() -> scheduler.updateJobResourceRequirements(newJobResourceRequirements));
awaitJobReachingParallelism(taskManagerGateway, scheduler, scaledDownParallelism);
}
@Test
void testRequirementLowerBoundIncreaseBelowCurrentParallelismDoesNotTriggerRescale()
throws Exception {
final JobGraph jobGraph = createJobGraph();
final DefaultDeclarativeSlotPool declarativeSlotPool =
createDeclarativeSlotPool(jobGraph.getJobID(), singleThreadMainThreadExecutor);
scheduler = createSchedulerWithNoTimeouts(jobGraph, declarativeSlotPool);
final SubmissionBufferingTaskManagerGateway taskManagerGateway =
createSubmissionBufferingTaskManagerGateway(PARALLELISM, scheduler);
startJobWithSlotsMatchingParallelism(
scheduler, declarativeSlotPool, taskManagerGateway, PARALLELISM);
awaitJobReachingParallelism(taskManagerGateway, scheduler, PARALLELISM);
final JobResourceRequirements newJobResourceRequirements =
createRequirementsWithEqualLowerAndUpperParallelism(PARALLELISM);
final CompletableFuture<Void> asyncAssertion =
CompletableFuture.runAsync(
() -> {
State state = scheduler.getState();
scheduler.updateJobResourceRequirements(newJobResourceRequirements);
// scheduler shouldn't change states
assertThat(scheduler.getState()).isSameAs(state);
// no new tasks should have been scheduled
assertThat(taskManagerGateway.submittedTasks).isEmpty();
},
singleThreadMainThreadExecutor);
FlinkAssertions.assertThatFuture(asyncAssertion).eventuallySucceeds();
}
@Test
void testRequirementLowerBoundIncreaseBeyondCurrentParallelismKeepsJobRunning()
throws Exception {
final JobGraph jobGraph = createJobGraph();
final DefaultDeclarativeSlotPool declarativeSlotPool =
createDeclarativeSlotPool(jobGraph.getJobID(), singleThreadMainThreadExecutor);
scheduler = createSchedulerWithNoTimeouts(jobGraph, declarativeSlotPool);
int scaledUpParallelism = PARALLELISM * 10;
final SubmissionBufferingTaskManagerGateway taskManagerGateway =
createSubmissionBufferingTaskManagerGateway(scaledUpParallelism, scheduler);
startJobWithSlotsMatchingParallelism(
scheduler, declarativeSlotPool, taskManagerGateway, PARALLELISM);
awaitJobReachingParallelism(taskManagerGateway, scheduler, PARALLELISM);
JobResourceRequirements newJobResourceRequirements =
createRequirementsWithEqualLowerAndUpperParallelism(scaledUpParallelism);
FlinkAssertions.assertThatFuture(
CompletableFuture.runAsync(
() -> {
final State originalState = scheduler.getState();
scheduler.updateJobResourceRequirements(
newJobResourceRequirements);
assertThat(scheduler.getState()).isSameAs(originalState);
},
singleThreadMainThreadExecutor))
.eventuallySucceeds();
// adding a few slots does not cause rescale or failure
FlinkAssertions.assertThatFuture(
CompletableFuture.runAsync(
() -> {
final State originalState = scheduler.getState();
offerSlots(
declarativeSlotPool,
createSlotOffersForResourceRequirements(
ResourceCounter.withResource(
ResourceProfile.UNKNOWN, PARALLELISM)),
taskManagerGateway);
assertThat(scheduler.getState()).isSameAs(originalState);
},
singleThreadMainThreadExecutor))
.eventuallySucceeds();
// adding enough slots to reach minimum causes rescale
FlinkAssertions.assertThatFuture(
CompletableFuture.runAsync(
() ->
offerSlots(
declarativeSlotPool,
createSlotOffersForResourceRequirements(
ResourceCounter.withResource(
ResourceProfile.UNKNOWN,
PARALLELISM * 8)),
taskManagerGateway),
singleThreadMainThreadExecutor))
.eventuallySucceeds();
awaitJobReachingParallelism(taskManagerGateway, scheduler, scaledUpParallelism);
}
@Test
void testInitialRequirementLowerBoundBeyondAvailableSlotsCausesImmediateFailure()
throws Exception {
final JobGraph jobGraph = createJobGraph();
final DefaultDeclarativeSlotPool declarativeSlotPool =
createDeclarativeSlotPool(jobGraph.getJobID(), singleThreadMainThreadExecutor);
final int availableSlots = 1;
JobResourceRequirements initialJobResourceRequirements =
createRequirementsWithEqualLowerAndUpperParallelism(PARALLELISM);
scheduler =
prepareSchedulerWithNoTimeouts(jobGraph, declarativeSlotPool)
.withConfigurationOverride(
conf -> {
conf.set(
JobManagerOptions
.SCHEDULER_SUBMISSION_RESOURCE_WAIT_TIMEOUT,
Duration.ofMillis(1));
return conf;
})
.setJobResourceRequirements(initialJobResourceRequirements)
.build();
final SubmissionBufferingTaskManagerGateway taskManagerGateway =
createSubmissionBufferingTaskManagerGateway(PARALLELISM, scheduler);
startJobWithSlotsMatchingParallelism(
scheduler, declarativeSlotPool, taskManagerGateway, availableSlots);
// the job will fail because not enough slots are available
FlinkAssertions.assertThatFuture(scheduler.getJobTerminationFuture())
.eventuallySucceeds()
.isEqualTo(JobStatus.FAILED);
// no task was ever submitted because we failed immediately
assertThat(taskManagerGateway.submittedTasks).isEmpty();
}
@Test
void testRequirementLowerBoundDecreaseAfterResourceScarcityBelowAvailableSlots()
throws Exception {
final JobGraph jobGraph = createJobGraph();
final DefaultDeclarativeSlotPool declarativeSlotPool =
createDeclarativeSlotPool(jobGraph.getJobID(), singleThreadMainThreadExecutor);
final int availableSlots = 1;
JobResourceRequirements initialJobResourceRequirements =
createRequirementsWithEqualLowerAndUpperParallelism(PARALLELISM);
scheduler =
prepareSchedulerWithNoTimeouts(jobGraph, declarativeSlotPool)
.setJobResourceRequirements(initialJobResourceRequirements)
.build();
final SubmissionBufferingTaskManagerGateway taskManagerGateway =
createSubmissionBufferingTaskManagerGateway(PARALLELISM, scheduler);
startJobWithSlotsMatchingParallelism(
scheduler, declarativeSlotPool, taskManagerGateway, availableSlots);
// unlock job by decreasing the parallelism
JobResourceRequirements newJobResourceRequirements =
createRequirementsWithLowerAndUpperParallelism(availableSlots, PARALLELISM);
singleThreadMainThreadExecutor.execute(
() -> scheduler.updateJobResourceRequirements(newJobResourceRequirements));
awaitJobReachingParallelism(taskManagerGateway, scheduler, availableSlots);
}
private static Configuration createConfigurationWithNoTimeouts() {
return new Configuration()
.set(
JobManagerOptions.SCHEDULER_SUBMISSION_RESOURCE_WAIT_TIMEOUT,
Duration.ofMillis(-1L))
.set(
JobManagerOptions.SCHEDULER_SUBMISSION_RESOURCE_STABILIZATION_TIMEOUT,
Duration.ofMillis(1L))
.set(
JobManagerOptions.SCHEDULER_EXECUTING_COOLDOWN_AFTER_RESCALING,
Duration.ofMillis(1L))
.set(
JobManagerOptions.SCHEDULER_EXECUTING_RESOURCE_STABILIZATION_TIMEOUT,
Duration.ofMillis(1L))
.set(JobManagerOptions.SCHEDULER_RESCALE_TRIGGER_MAX_DELAY, Duration.ZERO);
}
private AdaptiveSchedulerBuilder prepareSchedulerWithNoTimeouts(
JobGraph jobGraph, DeclarativeSlotPool declarativeSlotPool) {
return new AdaptiveSchedulerBuilder(
jobGraph, singleThreadMainThreadExecutor, EXECUTOR_RESOURCE.getExecutor())
.setDeclarativeSlotPool(declarativeSlotPool)
.setJobMasterConfiguration(createConfigurationWithNoTimeouts());
}
private AdaptiveScheduler createSchedulerWithNoTimeouts(
JobGraph jobGraph, DeclarativeSlotPool declarativeSlotPool) throws Exception {
return prepareSchedulerWithNoTimeouts(jobGraph, declarativeSlotPool).build();
}
private SubmissionBufferingTaskManagerGateway createSubmissionBufferingTaskManagerGateway(
int parallelism, SchedulerNG scheduler) {
SubmissionBufferingTaskManagerGateway taskManagerGateway =
new SubmissionBufferingTaskManagerGateway(parallelism);
taskManagerGateway.setCancelConsumer(
executionAttemptID ->
singleThreadMainThreadExecutor.execute(
() ->
scheduler.updateTaskExecutionState(
new TaskExecutionState(
executionAttemptID,
ExecutionState.CANCELED))));
return taskManagerGateway;
}
private void startJobWithSlotsMatchingParallelism(
SchedulerNG scheduler,
DeclarativeSlotPool declarativeSlotPool,
TaskManagerGateway taskManagerGateway,
int parallelism) {
singleThreadMainThreadExecutor.execute(
() -> {
scheduler.startScheduling();
offerSlots(
declarativeSlotPool,
createSlotOffersForResourceRequirements(
ResourceCounter.withResource(
ResourceProfile.UNKNOWN, parallelism)),
taskManagerGateway);
});
}
private void awaitJobReachingParallelism(
SubmissionBufferingTaskManagerGateway taskManagerGateway,
SchedulerNG scheduler,
int parallelism)
throws Exception {
// Wait for all tasks to be submitted
taskManagerGateway.waitForSubmissions(parallelism);
final ArchivedExecutionGraph executionGraph =
CompletableFuture.supplyAsync(
() -> scheduler.requestJob().getArchivedExecutionGraph(),
singleThreadMainThreadExecutor)
.get();
assertThat(executionGraph.getJobVertex(JOB_VERTEX.getID()).getParallelism())
.isEqualTo(parallelism);
}
private static JobResourceRequirements createRequirementsWithUpperParallelism(int parallelism) {
return createRequirementsWithLowerAndUpperParallelism(1, parallelism);
}
private static JobResourceRequirements createRequirementsWithEqualLowerAndUpperParallelism(
int parallelism) {
return createRequirementsWithLowerAndUpperParallelism(parallelism, parallelism);
}
private static JobResourceRequirements createRequirementsWithLowerAndUpperParallelism(
int lowerParallelism, int upperParallelism) {
return new JobResourceRequirements(
Collections.singletonMap(
JOB_VERTEX.getID(),
new JobVertexResourceRequirements(
new JobVertexResourceRequirements.Parallelism(
lowerParallelism, upperParallelism))));
}
// ---------------------------------------------------------------------------------------------
// Failure handling tests
// ---------------------------------------------------------------------------------------------
@Test
void testHowToHandleFailureRejectedByStrategy() throws Exception {
final Configuration configuration = new Configuration();
final List<Event> eventCollector = new ArrayList<>(1);
final UnregisteredMetricGroups.UnregisteredJobManagerJobMetricGroup testMetricGroup =
createTestMetricGroup(eventCollector);
final AdaptiveScheduler scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
mainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setRestartBackoffTimeStrategy(NoRestartBackoffTimeStrategy.INSTANCE)
.setJobMasterConfiguration(configuration)
.setJobManagerJobMetricGroup(testMetricGroup)
.build();
assertThat(
scheduler
.howToHandleFailure(
new Exception("test"), createFailureLabelsFuture())
.canRestart())
.isFalse();
assertThat(eventCollector).isEmpty();
mainThreadExecutor.trigger();
checkMetrics(eventCollector, false);
}
@Test
void testHowToHandleFailureAllowedByStrategy() throws Exception {
final Configuration configuration = new Configuration();
final List<Event> eventCollector = new ArrayList<>(1);
final UnregisteredMetricGroups.UnregisteredJobManagerJobMetricGroup testMetricGroup =
createTestMetricGroup(eventCollector);
final TestRestartBackoffTimeStrategy restartBackoffTimeStrategy =
new TestRestartBackoffTimeStrategy(true, 1234);
final AdaptiveScheduler scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
mainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setRestartBackoffTimeStrategy(restartBackoffTimeStrategy)
.setJobMasterConfiguration(configuration)
.setJobManagerJobMetricGroup(testMetricGroup)
.build();
final FailureResult failureResult =
scheduler.howToHandleFailure(new Exception("test"), createFailureLabelsFuture());
assertThat(failureResult.canRestart()).isTrue();
assertThat(failureResult.getBackoffTime().toMillis())
.isEqualTo(restartBackoffTimeStrategy.getBackoffTime());
assertThat(eventCollector).isEmpty();
mainThreadExecutor.trigger();
checkMetrics(eventCollector, true);
}
@Test
void testHowToHandleFailureUnrecoverableFailure() throws Exception {
final Configuration configuration = new Configuration();
final List<Event> eventCollector = new ArrayList<>(1);
final UnregisteredMetricGroups.UnregisteredJobManagerJobMetricGroup testMetricGroup =
createTestMetricGroup(eventCollector);
final AdaptiveScheduler scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
mainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setJobMasterConfiguration(configuration)
.setJobManagerJobMetricGroup(testMetricGroup)
.build();
assertThat(
scheduler
.howToHandleFailure(
new SuppressRestartsException(new Exception("test")),
createFailureLabelsFuture())
.canRestart())
.isFalse();
assertThat(eventCollector).isEmpty();
mainThreadExecutor.trigger();
checkMetrics(eventCollector, false);
}
@Test
void testExceptionHistoryWithGlobalFailureLabels() throws Exception {
final Exception expectedException = new Exception("Global Exception to label");
BiConsumer<AdaptiveScheduler, List<ExecutionAttemptID>> testLogic =
(scheduler, attemptIds) -> scheduler.handleGlobalFailure(expectedException);
final TestingFailureEnricher failureEnricher = new TestingFailureEnricher();
final Iterable<RootExceptionHistoryEntry> actualExceptionHistory =
new ExceptionHistoryTester(singleThreadMainThreadExecutor)
.withTestLogic(testLogic)
.withFailureEnrichers(Collections.singletonList(failureEnricher))
.run();
assertThat(actualExceptionHistory).hasSize(1);
final RootExceptionHistoryEntry failure = actualExceptionHistory.iterator().next();
assertThat(failure.getTaskManagerLocation()).isNull();
assertThat(failure.getFailingTaskName()).isNull();
assertThat(failureEnricher.getSeenThrowables()).containsExactly(expectedException);
assertThat(failure.getFailureLabels()).isEqualTo(failureEnricher.getFailureLabels());
assertThat(failure.getException().deserializeError(classLoader))
.isEqualTo(expectedException);
}
@Test
void testExceptionHistoryWithGlobalFailure() throws Exception {
final Exception expectedException = new Exception("Expected Global Exception");
BiConsumer<AdaptiveScheduler, List<ExecutionAttemptID>> testLogic =
(scheduler, attemptIds) -> scheduler.handleGlobalFailure(expectedException);
final Iterable<RootExceptionHistoryEntry> actualExceptionHistory =
new ExceptionHistoryTester(singleThreadMainThreadExecutor)
.withTestLogic(testLogic)
.run();
assertThat(actualExceptionHistory).hasSize(1);
final RootExceptionHistoryEntry failure = actualExceptionHistory.iterator().next();
assertThat(failure.getTaskManagerLocation()).isNull();
assertThat(failure.getFailingTaskName()).isNull();
assertThat(failure.getException().deserializeError(classLoader))
.isEqualTo(expectedException);
}
/** Verify AdaptiveScheduler propagates failure labels as generated by Failure Enrichers. */
@Test
void testExceptionHistoryWithTaskFailureLabels() throws Exception {
final Exception taskException = new Exception("Task Exception");
BiConsumer<AdaptiveScheduler, List<ExecutionAttemptID>> testLogic =
(scheduler, attemptIds) -> {
final ExecutionAttemptID attemptId = attemptIds.get(1);
scheduler.updateTaskExecutionState(
new TaskExecutionStateTransition(
new TaskExecutionState(
attemptId, ExecutionState.FAILED, taskException)));
};
final TestingFailureEnricher failureEnricher = new TestingFailureEnricher();
final Iterable<RootExceptionHistoryEntry> actualExceptionHistory =
new ExceptionHistoryTester(singleThreadMainThreadExecutor)
.withFailureEnrichers(Collections.singletonList(failureEnricher))
.withTestLogic(testLogic)
.run();
assertThat(actualExceptionHistory).hasSize(1);
final RootExceptionHistoryEntry failure = actualExceptionHistory.iterator().next();
assertThat(failure.getException().deserializeError(classLoader)).isEqualTo(taskException);
assertThat(failure.getFailureLabels()).isEqualTo(failureEnricher.getFailureLabels());
}
@Test
void testExceptionHistoryWithTaskFailure() throws Exception {
final Exception expectedException = new Exception("Expected Local Exception");
BiConsumer<AdaptiveScheduler, List<ExecutionAttemptID>> testLogic =
(scheduler, attemptIds) -> {
final ExecutionAttemptID attemptId = attemptIds.get(1);
scheduler.updateTaskExecutionState(
new TaskExecutionStateTransition(
new TaskExecutionState(
attemptId, ExecutionState.FAILED, expectedException)));
};
final Iterable<RootExceptionHistoryEntry> actualExceptionHistory =
new ExceptionHistoryTester(singleThreadMainThreadExecutor)
.withTestLogic(testLogic)
.run();
assertThat(actualExceptionHistory).hasSize(1);
final RootExceptionHistoryEntry failure = actualExceptionHistory.iterator().next();
assertThat(failure.getException().deserializeError(classLoader))
.isEqualTo(expectedException);
}
@Test
void testExceptionHistoryWithTaskFailureWithRestart() throws Exception {
final Exception expectedException = new Exception("Expected Local Exception");
final Consumer<AdaptiveSchedulerBuilder> setupScheduler =
builder ->
builder.setRestartBackoffTimeStrategy(
new FixedDelayRestartBackoffTimeStrategy
.FixedDelayRestartBackoffTimeStrategyFactory(1, 100)
.create());
final BiConsumer<AdaptiveScheduler, List<ExecutionAttemptID>> testLogic =
(scheduler, attemptIds) -> {
final ExecutionAttemptID attemptId = attemptIds.get(1);
scheduler.updateTaskExecutionState(
new TaskExecutionStateTransition(
new TaskExecutionState(
attemptId, ExecutionState.FAILED, expectedException)));
};
final Iterable<RootExceptionHistoryEntry> actualExceptionHistory =
new ExceptionHistoryTester(singleThreadMainThreadExecutor)
.withTestLogic(testLogic)
.withModifiedScheduler(setupScheduler)
.run();
assertThat(actualExceptionHistory).hasSize(1);
final RootExceptionHistoryEntry failure = actualExceptionHistory.iterator().next();
assertThat(failure.getException().deserializeError(classLoader))
.isEqualTo(expectedException);
}
@Test
void testExceptionHistoryWithTaskFailureFromStopWithSavepoint() throws Exception {
final Exception expectedException = new Exception("Expected Local Exception");
Consumer<JobGraph> setupJobGraph =
jobGraph ->
jobGraph.setSnapshotSettings(
new JobCheckpointingSettings(
// set a large checkpoint interval so we can easily deduce
// the savepoints checkpoint id
CheckpointCoordinatorConfiguration.builder()
.setCheckpointInterval(Long.MAX_VALUE)
.build(),
null));
final CompletedCheckpointStore completedCheckpointStore =
new StandaloneCompletedCheckpointStore(1);
final CheckpointIDCounter checkpointIDCounter = new StandaloneCheckpointIDCounter();
final CheckpointsCleaner checkpointCleaner = new CheckpointsCleaner();
TestingCheckpointRecoveryFactory checkpointRecoveryFactory =
new TestingCheckpointRecoveryFactory(completedCheckpointStore, checkpointIDCounter);
Consumer<AdaptiveSchedulerBuilder> setupScheduler =
builder ->
builder.setCheckpointRecoveryFactory(checkpointRecoveryFactory)
.setCheckpointCleaner(checkpointCleaner);
BiConsumer<AdaptiveScheduler, List<ExecutionAttemptID>> testLogic =
(scheduler, attemptIds) -> {
final ExecutionAttemptID attemptId = attemptIds.get(1);
scheduler.stopWithSavepoint(
"file:///tmp/target", true, SavepointFormatType.CANONICAL);
scheduler.updateTaskExecutionState(
new TaskExecutionStateTransition(
new TaskExecutionState(
attemptId, ExecutionState.FAILED, expectedException)));
// fail the savepoint so that the job terminates
for (ExecutionAttemptID id : attemptIds) {
scheduler.declineCheckpoint(
new DeclineCheckpoint(
scheduler.requestJob().getJobId(),
id,
checkpointIDCounter.get() - 1,
new CheckpointException(
CheckpointFailureReason.IO_EXCEPTION)));
}
};
final Iterable<RootExceptionHistoryEntry> actualExceptionHistory =
new ExceptionHistoryTester(singleThreadMainThreadExecutor)
.withTestLogic(testLogic)
.withModifiedScheduler(setupScheduler)
.withModifiedJobGraph(setupJobGraph)
.run();
assertThat(actualExceptionHistory).hasSize(1);
final RootExceptionHistoryEntry failure = actualExceptionHistory.iterator().next();
assertThat(failure.getException().deserializeError(classLoader))
.isEqualTo(expectedException);
}
@Test
void testExceptionHistoryWithTaskConcurrentGlobalFailure() throws Exception {
final Exception expectedException1 = new Exception("Expected Global Exception 1");
final Exception expectedException2 = new Exception("Expected Global Exception 2");
final BiConsumer<AdaptiveScheduler, List<ExecutionAttemptID>> testLogic =
(scheduler, attemptIds) -> {
scheduler.handleGlobalFailure(expectedException1);
scheduler.handleGlobalFailure(expectedException2);
};
final Iterable<RootExceptionHistoryEntry> entries =
new ExceptionHistoryTester(singleThreadMainThreadExecutor)
.withTestLogic(testLogic)
.run();
assertThat(entries).hasSize(1);
final RootExceptionHistoryEntry failure = entries.iterator().next();
assertThat(failure.getException().deserializeError(classLoader))
.isEqualTo(expectedException1);
final Iterable<ExceptionHistoryEntry> concurrentExceptions =
failure.getConcurrentExceptions();
final List<Throwable> foundExceptions =
IterableUtils.toStream(concurrentExceptions)
.map(ExceptionHistoryEntry::getException)
.map(exception -> exception.deserializeError(classLoader))
.collect(Collectors.toList());
assertThat(foundExceptions).containsExactly(expectedException2);
}
@Test
void testExceptionHistoryWithTaskConcurrentFailure() throws Exception {
final Exception expectedException1 = new Exception("Expected Local Exception 1");
final Exception expectedException2 = new Exception("Expected Local Exception 2");
BiConsumer<AdaptiveScheduler, List<ExecutionAttemptID>> testLogic =
(scheduler, attemptIds) -> {
final ExecutionAttemptID attemptId = attemptIds.remove(0);
final ExecutionAttemptID attemptId2 = attemptIds.remove(0);
scheduler.updateTaskExecutionState(
new TaskExecutionStateTransition(
new TaskExecutionState(
attemptId, ExecutionState.FAILED, expectedException1)));
scheduler.updateTaskExecutionState(
new TaskExecutionStateTransition(
new TaskExecutionState(
attemptId2,
ExecutionState.FAILED,
expectedException2)));
};
final Iterable<RootExceptionHistoryEntry> entries =
new ExceptionHistoryTester(singleThreadMainThreadExecutor)
.withTestLogic(testLogic)
.run();
assertThat(entries).hasSize(1);
final RootExceptionHistoryEntry failure = entries.iterator().next();
assertThat(failure.getException().deserializeError(classLoader))
.isEqualTo(expectedException1);
final Iterable<ExceptionHistoryEntry> concurrentExceptions =
failure.getConcurrentExceptions();
final List<Throwable> foundExceptions =
IterableUtils.toStream(concurrentExceptions)
.map(ExceptionHistoryEntry::getException)
.map(exception -> exception.deserializeError(classLoader))
.collect(Collectors.toList());
// In the future, concurrent local failures should be stored.
assertThat(foundExceptions).isEmpty();
}
@Test
void testRepeatedTransitionIntoCurrentStateFails() throws Exception {
scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.build();
final State state = scheduler.getState();
// safeguard for this test
assertThat(state).isInstanceOf(Created.class);
assertThatThrownBy(() -> scheduler.transitionToState(new Created.Factory(scheduler, LOG)))
.isInstanceOf(IllegalStateException.class);
}
// ---------------------------------------------------------------------------------------------
// Illegal state behavior tests
// ---------------------------------------------------------------------------------------------
@Test
void testTriggerSavepointFailsInIllegalState() throws Exception {
scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.build();
assertThatFuture(
scheduler.triggerSavepoint(
"some directory", false, SavepointFormatType.CANONICAL))
.eventuallyFailsWith(ExecutionException.class)
.withCauseInstanceOf(CheckpointException.class);
}
@Test
void testStopWithSavepointFailsInIllegalState() throws Exception {
scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.build();
assertThatFuture(
scheduler.triggerSavepoint(
"some directory", false, SavepointFormatType.CANONICAL))
.eventuallyFailsWith(ExecutionException.class)
.withCauseInstanceOf(CheckpointException.class);
}
@Test
void testDeliverOperatorEventToCoordinatorFailsInIllegalState() throws Exception {
scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.build();
assertThatThrownBy(
() ->
scheduler.deliverOperatorEventToCoordinator(
createExecutionAttemptId(),
new OperatorID(),
new TestOperatorEvent()))
.isInstanceOf(TaskNotRunningException.class);
}
@Test
void testDeliverCoordinationRequestToCoordinatorFailsInIllegalState() throws Exception {
scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.build();
assertThatFuture(
scheduler.deliverCoordinationRequestToCoordinator(
new OperatorID(), new CoordinationRequest() {}))
.eventuallyFailsWith(ExecutionException.class)
.withCauseInstanceOf(FlinkException.class);
}
@Test
void testUpdateTaskExecutionStateReturnsFalseInIllegalState() throws Throwable {
final JobGraph jobGraph = createJobGraph();
scheduler =
new AdaptiveSchedulerBuilder(
jobGraph,
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.build();
assertThat(
scheduler.updateTaskExecutionState(
new TaskExecutionStateTransition(
new TaskExecutionState(
createExecutionAttemptId(),
ExecutionState.FAILED))))
.isFalse();
}
@Test
void testRequestNextInputSplitFailsInIllegalState() throws Exception {
scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.build();
assertThatThrownBy(
() ->
scheduler.requestNextInputSplit(
JOB_VERTEX.getID(), createExecutionAttemptId()))
.isInstanceOf(IOException.class);
}
@Test
void testRequestPartitionStateFailsInIllegalState() throws Exception {
scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.build();
assertThatThrownBy(
() ->
scheduler.requestPartitionState(
new IntermediateDataSetID(), new ResultPartitionID()))
.isInstanceOf(PartitionProducerDisposedException.class);
}
@Test
void testTryToAssignSlotsReturnsNotPossibleIfExpectedResourcesAreNotAvailable()
throws Exception {
final TestingSlotAllocator slotAllocator = TestingSlotAllocator.newBuilder().build();
scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setSlotAllocator(slotAllocator)
.build();
final CreatingExecutionGraph.AssignmentResult assignmentResult =
scheduler.tryToAssignSlots(
CreatingExecutionGraph.ExecutionGraphWithVertexParallelism.create(
new StateTrackingMockExecutionGraph(), JobSchedulingPlan.empty()));
assertThat(assignmentResult.isSuccess()).isFalse();
}
@Test
void testComputeVertexParallelismStoreForExecutionInReactiveMode() {
JobVertex v1 = createNoOpVertex("v1", 1, 50);
JobVertex v2 = createNoOpVertex("v2", 50, 50);
JobGraph graph = streamingJobGraph(v1, v2);
VertexParallelismStore parallelismStore =
AdaptiveScheduler.computeVertexParallelismStoreForExecution(
graph,
SchedulerExecutionMode.REACTIVE,
SchedulerBase::getDefaultMaxParallelism);
for (JobVertex vertex : graph.getVertices()) {
VertexParallelismInformation info = parallelismStore.getParallelismInfo(vertex.getID());
assertThat(info.getParallelism()).isEqualTo(vertex.getParallelism());
assertThat(info.getMaxParallelism()).isEqualTo(vertex.getMaxParallelism());
}
}
@Test
void testComputeVertexParallelismStoreForExecutionInDefaultMode() {
JobVertex v1 = createNoOpVertex("v1", 1, 50);
JobVertex v2 = createNoOpVertex("v2", 50, 50);
JobGraph graph = streamingJobGraph(v1, v2);
VertexParallelismStore parallelismStore =
AdaptiveScheduler.computeVertexParallelismStoreForExecution(
graph, null, SchedulerBase::getDefaultMaxParallelism);
for (JobVertex vertex : graph.getVertices()) {
VertexParallelismInformation info = parallelismStore.getParallelismInfo(vertex.getID());
assertThat(info.getParallelism()).isEqualTo(vertex.getParallelism());
assertThat(info.getMaxParallelism()).isEqualTo(vertex.getMaxParallelism());
}
}
@Test
void testCheckpointCleanerIsClosedAfterCheckpointServices() throws Exception {
final ScheduledExecutorService executorService =
Executors.newSingleThreadScheduledExecutor();
try {
DefaultSchedulerTest.doTestCheckpointCleanerIsClosedAfterCheckpointServices(
(checkpointRecoveryFactory, checkpointCleaner) -> {
final JobGraph jobGraph = createJobGraph();
enableCheckpointing(jobGraph);
try {
return new AdaptiveSchedulerBuilder(
jobGraph,
ComponentMainThreadExecutorServiceAdapter
.forSingleThreadExecutor(executorService),
EXECUTOR_RESOURCE.getExecutor())
.setCheckpointRecoveryFactory(checkpointRecoveryFactory)
.setCheckpointCleaner(checkpointCleaner)
.build();
} catch (Exception e) {
throw new RuntimeException(e);
}
},
executorService,
LOG);
} finally {
executorService.shutdownNow();
}
}
@Test
void testIdleSlotsAreReleasedAfterDownScalingTriggeredByLoweredResourceRequirements()
throws Exception {
final JobGraph jobGraph = createJobGraph();
final Duration slotIdleTimeout = Duration.ofMillis(10);
final Configuration configuration = createConfigurationWithNoTimeouts();
configuration.set(JobManagerOptions.SLOT_IDLE_TIMEOUT, slotIdleTimeout);
final DeclarativeSlotPool declarativeSlotPool =
createDeclarativeSlotPool(
jobGraph.getJobID(), singleThreadMainThreadExecutor, slotIdleTimeout);
scheduler =
new AdaptiveSchedulerBuilder(
jobGraph,
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setDeclarativeSlotPool(declarativeSlotPool)
.setJobMasterConfiguration(configuration)
.build();
final int numInitialSlots = 4;
final int numSlotsAfterDownscaling = 2;
final SubmissionBufferingTaskManagerGateway taskManagerGateway =
new SubmissionBufferingTaskManagerGateway(numInitialSlots);
taskManagerGateway.setCancelConsumer(createCancelConsumer(scheduler));
singleThreadMainThreadExecutor.execute(
() -> {
scheduler.startScheduling();
offerSlots(
declarativeSlotPool,
createSlotOffersForResourceRequirements(
ResourceCounter.withResource(
ResourceProfile.UNKNOWN, numInitialSlots)),
taskManagerGateway);
});
// wait for all tasks to be submitted
taskManagerGateway.waitForSubmissions(numInitialSlots);
// lower the resource requirements
singleThreadMainThreadExecutor.execute(
() ->
scheduler.updateJobResourceRequirements(
JobResourceRequirements.newBuilder()
.setParallelismForJobVertex(
JOB_VERTEX.getID(), 1, numSlotsAfterDownscaling)
.build()));
// job should be resubmitted with lower parallelism
taskManagerGateway.waitForSubmissions(numSlotsAfterDownscaling);
// and excessive slots should be freed
taskManagerGateway.waitForFreedSlots(numInitialSlots - numSlotsAfterDownscaling);
final CompletableFuture<JobStatus> jobStatusFuture = new CompletableFuture<>();
singleThreadMainThreadExecutor.execute(
() -> jobStatusFuture.complete(scheduler.getState().getJobStatus()));
assertThatFuture(jobStatusFuture).eventuallySucceeds().isEqualTo(JobStatus.RUNNING);
// make sure we haven't freed up any more slots
assertThat(taskManagerGateway.freedSlots).isEmpty();
}
@Test
void testUpdateResourceRequirementsInReactiveModeIsNotSupported() throws Exception {
final Configuration configuration = new Configuration();
configuration.set(JobManagerOptions.SCHEDULER_MODE, SchedulerExecutionMode.REACTIVE);
scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setJobMasterConfiguration(configuration)
.build();
assertThatThrownBy(
() ->
scheduler.updateJobResourceRequirements(
JobResourceRequirements.empty()))
.isInstanceOf(UnsupportedOperationException.class);
}
@Test
void testRequestDefaultResourceRequirements() throws Exception {
final JobGraph jobGraph = createJobGraph();
final Configuration configuration = new Configuration();
scheduler =
new AdaptiveSchedulerBuilder(
jobGraph,
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setJobMasterConfiguration(configuration)
.build();
assertThat(scheduler.requestJobResourceRequirements())
.isEqualTo(
JobResourceRequirements.newBuilder()
.setParallelismForJobVertex(
JOB_VERTEX.getID(), 1, JOB_VERTEX.getParallelism())
.build());
}
@Test
void testRequestDefaultResourceRequirementsInReactiveMode() throws Exception {
final JobGraph jobGraph = createJobGraph();
final Configuration configuration = new Configuration();
configuration.set(JobManagerOptions.SCHEDULER_MODE, SchedulerExecutionMode.REACTIVE);
scheduler =
new AdaptiveSchedulerBuilder(
jobGraph,
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setJobMasterConfiguration(configuration)
.build();
assertThat(scheduler.requestJobResourceRequirements())
.isEqualTo(
JobResourceRequirements.newBuilder()
.setParallelismForJobVertex(
JOB_VERTEX.getID(),
1,
SchedulerBase.getDefaultMaxParallelism(JOB_VERTEX))
.build());
}
@Test
void testRequestUpdatedResourceRequirements() throws Exception {
final JobGraph jobGraph = createJobGraph();
final Configuration configuration = new Configuration();
scheduler =
new AdaptiveSchedulerBuilder(
jobGraph,
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setJobMasterConfiguration(configuration)
.build();
final JobResourceRequirements newJobResourceRequirements =
JobResourceRequirements.newBuilder()
.setParallelismForJobVertex(JOB_VERTEX.getID(), 1, 12)
.build();
assertThat(scheduler.requestJobResourceRequirements())
.isNotEqualTo(newJobResourceRequirements);
scheduler.updateJobResourceRequirements(newJobResourceRequirements);
assertThat(scheduler.requestJobResourceRequirements())
.isEqualTo(newJobResourceRequirements);
final JobResourceRequirements newJobResourceRequirements2 =
JobResourceRequirements.newBuilder()
.setParallelismForJobVertex(JOB_VERTEX.getID(), 4, 12)
.build();
assertThat(scheduler.requestJobResourceRequirements())
.isNotEqualTo(newJobResourceRequirements2);
scheduler.updateJobResourceRequirements(newJobResourceRequirements2);
assertThat(scheduler.requestJobResourceRequirements())
.isEqualTo(newJobResourceRequirements2);
}
@Test
void testScalingIntervalConfigurationIsRespected() throws ConfigurationException {
final Duration executingCooldownTimeout = Duration.ofMillis(1337);
final Duration scalingStabilizationTimeout = Duration.ofMillis(7331);
final Configuration configuration = createConfigurationWithNoTimeouts();
configuration.set(
JobManagerOptions.SCHEDULER_EXECUTING_COOLDOWN_AFTER_RESCALING,
executingCooldownTimeout);
configuration.set(
JobManagerOptions.SCHEDULER_EXECUTING_RESOURCE_STABILIZATION_TIMEOUT,
scalingStabilizationTimeout);
final AdaptiveScheduler.Settings settings = AdaptiveScheduler.Settings.of(configuration);
assertThat(settings.getExecutingCooldownTimeout()).isEqualTo(executingCooldownTimeout);
assertThat(settings.getExecutingResourceStabilizationTimeout())
.isEqualTo(scalingStabilizationTimeout);
}
@Test
void testOnCompletedCheckpointIsHandledInMainThread() throws Exception {
testCheckpointStatsEventBeingExecutedInTheMainThread(
CheckpointStatsListener::onCompletedCheckpoint, 1, Integer.MAX_VALUE);
}
@Test
void testOnFailedCheckpointIsHandledInMainThread() throws Exception {
testCheckpointStatsEventBeingExecutedInTheMainThread(
CheckpointStatsListener::onFailedCheckpoint, 2, 2);
}
private void testCheckpointStatsEventBeingExecutedInTheMainThread(
Consumer<CheckpointStatsListener> eventCallback,
int eventRepetitions,
int triggerOnFailedCheckpointCount)
throws Exception {
final CompletableFuture<CheckpointStatsListener> statsListenerInstantiatedFuture =
new CompletableFuture<>();
final BlockingQueue<Integer> eventQueue = new ArrayBlockingQueue<>(1);
scheduler =
createSchedulerThatReachesExecutingState(
PARALLELISM,
triggerOnFailedCheckpointCount,
eventQueue,
statsListenerInstantiatedFuture);
// start scheduling to reach Executing state
singleThreadMainThreadExecutor.execute(scheduler::startScheduling);
final CheckpointStatsListener statsListener = statsListenerInstantiatedFuture.get();
assertThat(statsListener)
.as("The CheckpointStatsListener should have been instantiated.")
.isNotNull();
// the first trigger happens in the Executing initialization - let's wait for that event
// to pass
assertThat(eventQueue.take())
.as(
"The first event should have been appeared during Executing state initialization and should be ignored.")
.isEqualTo(0);
// counting the failed checkpoints only starts on a change event
scheduler.updateJobResourceRequirements(
JobResourceRequirements.newBuilder()
.setParallelismForJobVertex(JOB_VERTEX.getID(), 1, PARALLELISM - 1)
.build());
for (int i = 0; i < eventRepetitions; i++) {
assertThatNoException()
.as(
"Triggering the event from outside the main thread should not have caused an error.")
.isThrownBy(() -> eventCallback.accept(statsListener));
}
assertThat(eventQueue.take()).as("Only one event should have been observed.").isEqualTo(1);
}
@Test
void testGoToWaitingForResourcesConfiguresStateTransitionManagerFactory() throws Exception {
final OneShotLatch latch = new OneShotLatch();
final TestingStateTransitionManagerFactory factory =
new TestingStateTransitionManagerFactory(
ctx ->
TestingStateTransitionManager.withOnChangeEventOnly(
() -> {
if (ctx instanceof WaitingForResources) {
latch.trigger();
}
}));
final Configuration configuration = new Configuration();
final Duration resourceStabilizationTimeout = Duration.ofMillis(10L);
configuration.set(
JobManagerOptions.SCHEDULER_SUBMISSION_RESOURCE_STABILIZATION_TIMEOUT,
resourceStabilizationTimeout);
scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setStateTransitionManagerFactory(factory)
.setJobMasterConfiguration(configuration)
.build();
// start scheduling to reach Executing state
singleThreadMainThreadExecutor.execute(scheduler::startScheduling);
// let's wait for the onChange event in Executing state.
latch.await();
assertThat(scheduler.getState()).isInstanceOf(WaitingForResources.class);
assertThat(factory.cooldownTimeout).isEqualTo(Duration.ZERO);
assertThat(factory.maximumDelayForTrigger).isEqualTo(Duration.ZERO);
assertThat(factory.resourceStabilizationTimeout).isEqualTo(resourceStabilizationTimeout);
}
@Test
void testGoToExecutingConfiguresStateTransitionManagerFactory() throws Exception {
final OneShotLatch latch = new OneShotLatch();
final TestingStateTransitionManagerFactory factory =
new TestingStateTransitionManagerFactory(
ctx ->
TestingStateTransitionManager.withOnChangeEventOnly(
() -> {
if (ctx instanceof WaitingForResources) {
ctx.transitionToSubsequentState();
}
if (ctx instanceof Executing) {
latch.trigger();
}
}));
final Configuration configuration = new Configuration();
final Duration executingCooldownTimeout = Duration.ofMillis(1L);
final Duration executingResourceStabilizationTimeout = Duration.ofMillis(5L);
final Duration maxDelayForTrigger = Duration.ofMillis(10L);
configuration.set(
JobManagerOptions.SCHEDULER_EXECUTING_COOLDOWN_AFTER_RESCALING,
executingCooldownTimeout);
configuration.set(
JobManagerOptions.SCHEDULER_RESCALE_TRIGGER_MAX_DELAY, maxDelayForTrigger);
configuration.set(
JobManagerOptions.SCHEDULER_EXECUTING_RESOURCE_STABILIZATION_TIMEOUT,
executingResourceStabilizationTimeout);
scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
singleThreadMainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setJobMasterConfiguration(configuration)
.setDeclarativeSlotPool(getSlotPoolWithFreeSlots(PARALLELISM))
.setStateTransitionManagerFactory(factory)
.build();
// start scheduling to reach Executing state
singleThreadMainThreadExecutor.execute(scheduler::startScheduling);
// let's wait for the onChange event in Executing state.
latch.await();
assertThat(scheduler.getState()).isInstanceOf(Executing.class);
assertThat(factory.cooldownTimeout).isEqualTo(executingCooldownTimeout);
assertThat(factory.maximumDelayForTrigger).isEqualTo(maxDelayForTrigger);
assertThat(factory.resourceStabilizationTimeout)
.isEqualTo(executingResourceStabilizationTimeout);
}
// ---------------------------------------------------------------------------------------------
// Utils
// ---------------------------------------------------------------------------------------------
private static | AdaptiveSchedulerTest |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSOpsCountStatistics.java | {
"start": 1983,
"end": 3561
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(
TestDFSOpsCountStatistics.class);
private static final String NO_SUCH_OP = "no-such-dfs-operation-dude";
private final DFSOpsCountStatistics statistics =
new DFSOpsCountStatistics();
private final Map<OpType, AtomicLong> expectedOpsCountMap = new HashMap<>();
@BeforeEach
public void setup() {
for (OpType opType : OpType.values()) {
expectedOpsCountMap.put(opType, new AtomicLong());
}
incrementOpsCountByRandomNumbers();
}
/**
* This is to test the the {@link OpType} symbols are unique.
*/
@Test
public void testOpTypeSymbolsAreUnique() {
final Set<String> opTypeSymbols = new HashSet<>();
for (OpType opType : OpType.values()) {
assertFalse(opTypeSymbols.contains(opType.getSymbol()));
opTypeSymbols.add(opType.getSymbol());
}
assertEquals(OpType.values().length, opTypeSymbols.size());
}
@Test
public void testGetLongStatistics() {
short iterations = 0; // number of the iter.hasNext()
final Iterator<LongStatistic> iter = statistics.getLongStatistics();
while (iter.hasNext()) {
final LongStatistic longStat = iter.next();
assertNotNull(longStat);
final OpType opType = OpType.fromSymbol(longStat.getName());
assertNotNull(opType);
assertTrue(expectedOpsCountMap.containsKey(opType));
assertEquals(expectedOpsCountMap.get(opType).longValue(),
longStat.getValue());
iterations++;
}
// check that all the OpType | TestDFSOpsCountStatistics |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java | {
"start": 19666,
"end": 22335
} | class ____ extends ExtractFromSource {
private final Predicate<String> isRoutingPath;
ForRoutingPath(IndexMetadata metadata) {
super(metadata, metadata.getRoutingPaths());
isRoutingPath = Regex.simpleMatcher(metadata.getRoutingPaths().toArray(String[]::new));
}
@Override
protected int hashSource(IndexRequest indexRequest) {
return hashRoutingFields(indexRequest.getContentType(), indexRequest.source()).buildHash(
IndexRouting.ExtractFromSource::defaultOnEmpty
);
}
public String createId(XContentType sourceType, BytesReference source, byte[] suffix) {
return hashRoutingFields(sourceType, source).createId(suffix, IndexRouting.ExtractFromSource::defaultOnEmpty);
}
public RoutingHashBuilder builder() {
return new RoutingHashBuilder(isRoutingPath);
}
private RoutingHashBuilder hashRoutingFields(XContentType sourceType, BytesReference source) {
RoutingHashBuilder b = builder();
try (XContentParser parser = XContentHelper.createParserNotCompressed(parserConfig, source, sourceType)) {
parser.nextToken(); // Move to first token
if (parser.currentToken() == null) {
throw new IllegalArgumentException("Error extracting routing: source didn't contain any routing fields");
}
parser.nextToken();
b.extractObject(null, parser);
ensureExpectedToken(null, parser.nextToken(), parser);
} catch (IOException | ParsingException e) {
throw new IllegalArgumentException("Error extracting routing: " + e.getMessage(), e);
}
return b;
}
public boolean matchesField(String fieldName) {
return isRoutingPath.test(fieldName);
}
}
/**
* Strategy for time-series indices that use {@link IndexMetadata#INDEX_DIMENSIONS} to extract the tsid from the source.
* This strategy avoids double hashing of dimensions during indexing.
* It requires that the index was created with {@link IndexVersions#TSID_CREATED_DURING_ROUTING} or later.
* It creates the tsid during routing and makes the routing decision based on the tsid.
* The tsid gets attached to the index request so that the data node can reuse it instead of rebuilding it.
*/
public static | ForRoutingPath |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/query/Query.java | {
"start": 1240,
"end": 1520
} | interface ____ providing custom stores that handle
* them (via {@link org.apache.kafka.streams.state.StoreSupplier}s.
* <p>
* See KIP-796 (https://cwiki.apache.org/confluence/x/34xnCw) for more details.
*
* @param <R> The type of the result returned by this query.
*/
public | and |
java | apache__dubbo | dubbo-config/dubbo-config-spring/src/main/java/org/apache/dubbo/config/spring/util/EnvironmentUtils.java | {
"start": 1488,
"end": 4830
} | class ____ {
/**
* The separator of property name
*/
public static final String PROPERTY_NAME_SEPARATOR = ".";
/**
* The prefix of property name of Dubbo
*/
public static final String DUBBO_PREFIX = "dubbo";
/**
* Extras The properties from {@link ConfigurableEnvironment}
*
* @param environment {@link ConfigurableEnvironment}
* @return Read-only Map
*/
public static Map<String, Object> extractProperties(ConfigurableEnvironment environment) {
return Collections.unmodifiableMap(doExtraProperties(environment));
}
private static Map<String, Object> doExtraProperties(ConfigurableEnvironment environment) {
Map<String, Object> properties = new LinkedHashMap<>(); // orderly
Map<String, PropertySource<?>> map = doGetPropertySources(environment);
for (PropertySource<?> source : map.values()) {
if (source instanceof EnumerablePropertySource) {
EnumerablePropertySource propertySource = (EnumerablePropertySource) source;
String[] propertyNames = propertySource.getPropertyNames();
if (ObjectUtils.isEmpty(propertyNames)) {
continue;
}
for (String propertyName : propertyNames) {
if (!properties.containsKey(propertyName)) { // put If absent
properties.put(propertyName, propertySource.getProperty(propertyName));
}
}
}
}
return properties;
}
private static Map<String, PropertySource<?>> doGetPropertySources(ConfigurableEnvironment environment) {
Map<String, PropertySource<?>> map = new LinkedHashMap<>();
MutablePropertySources sources = environment.getPropertySources();
for (PropertySource<?> source : sources) {
extract("", map, source);
}
return map;
}
private static void extract(String root, Map<String, PropertySource<?>> map, PropertySource<?> source) {
if (source instanceof CompositePropertySource) {
for (PropertySource<?> nest : ((CompositePropertySource) source).getPropertySources()) {
extract(source.getName() + ":", map, nest);
}
} else {
map.put(root + source.getName(), source);
}
}
/**
* Filters Dubbo Properties from {@link ConfigurableEnvironment}
*
* @param environment {@link ConfigurableEnvironment}
* @return Read-only SortedMap
*/
public static SortedMap<String, String> filterDubboProperties(ConfigurableEnvironment environment) {
SortedMap<String, String> dubboProperties = new TreeMap<>();
Map<String, Object> properties = extractProperties(environment);
for (Map.Entry<String, Object> entry : properties.entrySet()) {
String propertyName = entry.getKey();
if (propertyName.startsWith(DUBBO_PREFIX + PROPERTY_NAME_SEPARATOR) && entry.getValue() != null) {
dubboProperties.put(
propertyName,
environment.resolvePlaceholders(entry.getValue().toString()));
}
}
return Collections.unmodifiableSortedMap(dubboProperties);
}
}
| EnvironmentUtils |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/introspect/TestNamingStrategyStd.java | {
"start": 3282,
"end": 3356
} | class ____ {
public int someValue = 3;
}
static | DefaultNaming |
java | google__auto | value/src/main/java/com/google/auto/value/extension/serializable/processor/PropertyMirror.java | {
"start": 966,
"end": 1505
} | class ____ {
private final TypeMirror type;
private final String name;
private final String method;
PropertyMirror(TypeMirror type, String name, String method) {
this.type = type;
this.name = name;
this.method = method;
}
/** Gets the AutoValue property's type. */
TypeMirror getType() {
return type;
}
/** Gets the AutoValue property's name. */
String getName() {
return name;
}
/** Gets the AutoValue property accessor method. */
String getMethod() {
return method;
}
}
| PropertyMirror |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/Aws2TranslateComponentBuilderFactory.java | {
"start": 17519,
"end": 22294
} | class ____
extends AbstractComponentBuilder<Translate2Component>
implements Aws2TranslateComponentBuilder {
@Override
protected Translate2Component buildConcreteComponent() {
return new Translate2Component();
}
private org.apache.camel.component.aws2.translate.Translate2Configuration getOrCreateConfiguration(Translate2Component component) {
if (component.getConfiguration() == null) {
component.setConfiguration(new org.apache.camel.component.aws2.translate.Translate2Configuration());
}
return component.getConfiguration();
}
@Override
protected boolean setPropertyOnComponent(
Component component,
String name,
Object value) {
switch (name) {
case "autodetectSourceLanguage": getOrCreateConfiguration((Translate2Component) component).setAutodetectSourceLanguage((boolean) value); return true;
case "configuration": ((Translate2Component) component).setConfiguration((org.apache.camel.component.aws2.translate.Translate2Configuration) value); return true;
case "lazyStartProducer": ((Translate2Component) component).setLazyStartProducer((boolean) value); return true;
case "operation": getOrCreateConfiguration((Translate2Component) component).setOperation((org.apache.camel.component.aws2.translate.Translate2Operations) value); return true;
case "overrideEndpoint": getOrCreateConfiguration((Translate2Component) component).setOverrideEndpoint((boolean) value); return true;
case "pojoRequest": getOrCreateConfiguration((Translate2Component) component).setPojoRequest((boolean) value); return true;
case "region": getOrCreateConfiguration((Translate2Component) component).setRegion((java.lang.String) value); return true;
case "sourceLanguage": getOrCreateConfiguration((Translate2Component) component).setSourceLanguage((java.lang.String) value); return true;
case "targetLanguage": getOrCreateConfiguration((Translate2Component) component).setTargetLanguage((java.lang.String) value); return true;
case "uriEndpointOverride": getOrCreateConfiguration((Translate2Component) component).setUriEndpointOverride((java.lang.String) value); return true;
case "autowiredEnabled": ((Translate2Component) component).setAutowiredEnabled((boolean) value); return true;
case "translateClient": getOrCreateConfiguration((Translate2Component) component).setTranslateClient((software.amazon.awssdk.services.translate.TranslateClient) value); return true;
case "healthCheckConsumerEnabled": ((Translate2Component) component).setHealthCheckConsumerEnabled((boolean) value); return true;
case "healthCheckProducerEnabled": ((Translate2Component) component).setHealthCheckProducerEnabled((boolean) value); return true;
case "proxyHost": getOrCreateConfiguration((Translate2Component) component).setProxyHost((java.lang.String) value); return true;
case "proxyPort": getOrCreateConfiguration((Translate2Component) component).setProxyPort((java.lang.Integer) value); return true;
case "proxyProtocol": getOrCreateConfiguration((Translate2Component) component).setProxyProtocol((software.amazon.awssdk.core.Protocol) value); return true;
case "accessKey": getOrCreateConfiguration((Translate2Component) component).setAccessKey((java.lang.String) value); return true;
case "profileCredentialsName": getOrCreateConfiguration((Translate2Component) component).setProfileCredentialsName((java.lang.String) value); return true;
case "secretKey": getOrCreateConfiguration((Translate2Component) component).setSecretKey((java.lang.String) value); return true;
case "sessionToken": getOrCreateConfiguration((Translate2Component) component).setSessionToken((java.lang.String) value); return true;
case "trustAllCertificates": getOrCreateConfiguration((Translate2Component) component).setTrustAllCertificates((boolean) value); return true;
case "useDefaultCredentialsProvider": getOrCreateConfiguration((Translate2Component) component).setUseDefaultCredentialsProvider((boolean) value); return true;
case "useProfileCredentialsProvider": getOrCreateConfiguration((Translate2Component) component).setUseProfileCredentialsProvider((boolean) value); return true;
case "useSessionCredentials": getOrCreateConfiguration((Translate2Component) component).setUseSessionCredentials((boolean) value); return true;
default: return false;
}
}
}
} | Aws2TranslateComponentBuilderImpl |
java | google__guava | android/guava-testlib/src/com/google/common/testing/TearDown.java | {
"start": 885,
"end": 1887
} | interface ____ {
/**
* Performs a <b>single</b> tear-down operation. See test-libraries-for-java's {@code
* com.google.common.testing.junit3.TearDownTestCase} and {@code
* com.google.common.testing.junit4.TearDownTestCase} for example.
*
* <p>A failing {@link TearDown} may or may not fail a tl4j test, depending on the version of
* JUnit test case you are running under. To avoid failing in the face of an exception regardless
* of JUnit version, implement a {@link SloppyTearDown} instead.
*
* <p>tl4j details: For backwards compatibility, {@code junit3.TearDownTestCase} currently does
* not fail a test when an exception is thrown from one of its {@link TearDown} instances, but
* this is subject to change. Also, {@code junit4.TearDownTestCase} will.
*
* @throws Exception for any reason. {@code TearDownTestCase} ensures that any exception thrown
* will not interfere with other TearDown operations.
*/
void tearDown() throws Exception;
}
| TearDown |
java | spring-projects__spring-boot | module/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/audit/AuditEventsEndpoint.java | {
"start": 2044,
"end": 2313
} | class ____ implements OperationResponseBody {
private final List<AuditEvent> events;
private AuditEventsDescriptor(List<AuditEvent> events) {
this.events = events;
}
public List<AuditEvent> getEvents() {
return this.events;
}
}
}
| AuditEventsDescriptor |
java | apache__logging-log4j2 | log4j-api/src/main/java/org/apache/logging/log4j/util/LambdaUtil.java | {
"start": 964,
"end": 1021
} | class ____ lambda support.
* @since 2.4
*/
public final | for |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/LocalFSContract.java | {
"start": 1567,
"end": 3513
} | class ____ extends AbstractFSContract {
public static final String CONTRACT_XML = "contract/localfs.xml";
private FileSystem fs;
private String testDataDir = new FileSystemTestHelper().getTestRootDir();
public LocalFSContract(Configuration conf) {
super(conf);
//insert the base features
addConfResource(getContractXml());
}
/**
* Return the contract file for this filesystem
* @return the XML
*/
protected String getContractXml() {
return CONTRACT_XML;
}
@Override
public void init() throws IOException {
super.init();
fs = getLocalFS();
adjustContractToLocalEnvironment();
}
/**
* tweak some of the contract parameters based on the local system
* state
*/
protected void adjustContractToLocalEnvironment() {
if (Shell.WINDOWS) {
//NTFS doesn't do case sensitivity, and its permissions are ACL-based
getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE), false);
getConf().setBoolean(getConfKey(ContractOptions.SUPPORTS_UNIX_PERMISSIONS), false);
} else if (ContractTestUtils.isOSX()) {
//OSX HFS+ is not case sensitive
getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE),
false);
}
}
/**
* Get the local filesystem. This may be overridden
* @return the filesystem
* @throws IOException
*/
protected FileSystem getLocalFS() throws IOException {
return FileSystem.getLocal(getConf());
}
@Override
public FileSystem getTestFileSystem() throws IOException {
return fs;
}
@Override
public String getScheme() {
return "file";
}
@Override
public Path getTestPath() {
Path path = fs.makeQualified(new Path(
getTestDataDir()));
return path;
}
/**
* Get the test data directory
* @return the directory for test data
*/
protected String getTestDataDir() {
return testDataDir;
}
}
| LocalFSContract |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java | {
"start": 40798,
"end": 49438
} | class ____ does not match the file");
}
if (reader.getVersion() != VERSION[3]) {
throw new VersionMismatchException(VERSION[3],
reader.getVersion());
}
if (metadataOption != null) {
LOG.info("MetaData Option is ignored during append");
}
metadataOption = (MetadataOption) SequenceFile.Writer
.metadata(reader.getMetadata());
CompressionOption readerCompressionOption = new CompressionOption(
reader.getCompressionType(), reader.getCompressionCodec());
// Codec comparison will be ignored if the compression is NONE
if (readerCompressionOption.value != compressionTypeOption.value
|| (readerCompressionOption.value != CompressionType.NONE
&& readerCompressionOption.codec
.getClass() != compressionTypeOption.codec
.getClass())) {
throw new IllegalArgumentException(
"Compression option provided does not match the file");
}
sync = reader.getSync();
} finally {
reader.close();
}
out = fs.append(p, bufferSize, progress);
this.appendMode = true;
} else {
out = fs
.create(p, true, bufferSize, replication, blockSize, progress);
}
} else {
out = streamOption.getValue();
}
Class<?> keyClass = keyClassOption == null ?
Object.class : keyClassOption.getValue();
Class<?> valueClass = valueClassOption == null ?
Object.class : valueClassOption.getValue();
Metadata metadata = metadataOption == null ?
new Metadata() : metadataOption.getValue();
this.compress = compressionTypeOption.getValue();
final CompressionCodec codec = compressionTypeOption.getCodec();
this.syncInterval = (syncIntervalOption == null) ?
SYNC_INTERVAL :
syncIntervalOption.getValue();
init(
conf, out, ownStream, keyClass, valueClass,
codec, metadata, syncInterval);
}
/**
* Create the named file.
* @deprecated Use
* {@link SequenceFile#createWriter(Configuration, Writer.Option...)}
* instead.
* @param fs input filesystem.
* @param conf input configuration.
* @param name input name.
* @param keyClass input keyClass.
* @param valClass input valClass.
* @throws IOException raised on errors performing I/O.
*/
@Deprecated
public Writer(FileSystem fs, Configuration conf, Path name,
Class keyClass, Class valClass) throws IOException {
this.compress = CompressionType.NONE;
init(conf, fs.create(name), true, keyClass, valClass, null,
new Metadata(), SYNC_INTERVAL);
}
/**
* Create the named file with write-progress reporter.
* @deprecated Use
* {@link SequenceFile#createWriter(Configuration, Writer.Option...)}
* instead.
* @param fs input filesystem.
* @param conf input configuration.
* @param name input name.
* @param keyClass input keyClass.
* @param valClass input valClass.
* @param progress input progress.
* @param metadata input metadata.
* @throws IOException raised on errors performing I/O.
*/
@Deprecated
public Writer(FileSystem fs, Configuration conf, Path name,
Class keyClass, Class valClass,
Progressable progress, Metadata metadata) throws IOException {
this.compress = CompressionType.NONE;
init(conf, fs.create(name, progress), true, keyClass, valClass,
null, metadata, SYNC_INTERVAL);
}
/**
* Create the named file with write-progress reporter.
* @deprecated Use
* {@link SequenceFile#createWriter(Configuration, Writer.Option...)}
* instead.
* @param fs input filesystem.
* @param conf input configuration.
* @param name input name.
* @param keyClass input keyClass.
* @param valClass input valClass.
* @param bufferSize input bufferSize.
* @param replication input replication.
* @param blockSize input blockSize.
* @param progress input progress.
* @param metadata input metadata.
* @throws IOException raised on errors performing I/O.
*/
@Deprecated
public Writer(FileSystem fs, Configuration conf, Path name,
Class keyClass, Class valClass,
int bufferSize, short replication, long blockSize,
Progressable progress, Metadata metadata) throws IOException {
this.compress = CompressionType.NONE;
init(conf,
fs.create(name, true, bufferSize, replication, blockSize, progress),
true, keyClass, valClass, null, metadata, SYNC_INTERVAL);
}
boolean isCompressed() { return compress != CompressionType.NONE; }
boolean isBlockCompressed() { return compress == CompressionType.BLOCK; }
Writer ownStream() { this.ownOutputStream = true; return this; }
/** Write and flush the file header. */
private void writeFileHeader()
throws IOException {
out.write(VERSION);
Text.writeString(out, keyClass.getName());
Text.writeString(out, valClass.getName());
out.writeBoolean(this.isCompressed());
out.writeBoolean(this.isBlockCompressed());
if (this.isCompressed()) {
Text.writeString(out, (codec.getClass()).getName());
}
this.metadata.write(out);
out.write(sync); // write the sync bytes
out.flush(); // flush header
}
/** Initialize. */
@SuppressWarnings("unchecked")
void init(Configuration config, FSDataOutputStream outStream,
boolean ownStream, Class key, Class val,
CompressionCodec compCodec, Metadata meta,
int syncIntervalVal)
throws IOException {
this.conf = config;
this.out = outStream;
this.ownOutputStream = ownStream;
this.keyClass = key;
this.valClass = val;
this.codec = compCodec;
this.metadata = meta;
this.syncInterval = syncIntervalVal;
SerializationFactory serializationFactory =
new SerializationFactory(config);
this.keySerializer = serializationFactory.getSerializer(keyClass);
if (this.keySerializer == null) {
throw new IOException(
"Could not find a serializer for the Key class: '"
+ keyClass.getCanonicalName() + "'. "
+ "Please ensure that the configuration '" +
CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
+ "properly configured, if you're using"
+ "custom serialization.");
}
this.keySerializer.open(buffer);
this.uncompressedValSerializer = serializationFactory.getSerializer(valClass);
if (this.uncompressedValSerializer == null) {
throw new IOException(
"Could not find a serializer for the Value class: '"
+ valClass.getCanonicalName() + "'. "
+ "Please ensure that the configuration '" +
CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
+ "properly configured, if you're using"
+ "custom serialization.");
}
this.uncompressedValSerializer.open(buffer);
if (this.codec != null) {
ReflectionUtils.setConf(this.codec, this.conf);
this.compressor = CodecPool.getCompressor(this.codec);
this.deflateFilter = this.codec.createOutputStream(buffer, compressor);
this.deflateOut =
new DataOutputStream(new BufferedOutputStream(deflateFilter));
this.compressedValSerializer = serializationFactory.getSerializer(valClass);
if (this.compressedValSerializer == null) {
throw new IOException(
"Could not find a serializer for the Value class: '"
+ valClass.getCanonicalName() + "'. "
+ "Please ensure that the configuration '" +
CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
+ "properly configured, if you're using"
+ "custom serialization.");
}
this.compressedValSerializer.open(deflateOut);
}
if (appendMode) {
sync();
} else {
writeFileHeader();
}
}
/** @return Returns the | provided |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/processor/internals/RepartitionTopicsTest.java | {
"start": 2954,
"end": 23316
} | class ____ {
private static final String SOURCE_TOPIC_NAME1 = "source1";
private static final String SOURCE_TOPIC_NAME2 = "source2";
private static final String SOURCE_TOPIC_NAME3 = "source3";
private static final String SINK_TOPIC_NAME1 = "sink1";
private static final String SINK_TOPIC_NAME2 = "sink2";
private static final String REPARTITION_TOPIC_NAME1 = "repartition1";
private static final String REPARTITION_TOPIC_NAME2 = "repartition2";
private static final String REPARTITION_TOPIC_NAME3 = "repartition3";
private static final String REPARTITION_TOPIC_NAME4 = "repartition4";
private static final String REPARTITION_WITHOUT_PARTITION_COUNT = "repartitionWithoutPartitionCount";
private static final String SOME_OTHER_TOPIC = "someOtherTopic";
private static final Map<String, String> TOPIC_CONFIG1 = Collections.singletonMap("config1", "val1");
private static final Map<String, String> TOPIC_CONFIG2 = Collections.singletonMap("config2", "val2");
private static final Map<String, String> TOPIC_CONFIG5 = Collections.singletonMap("config5", "val5");
private static final RepartitionTopicConfig REPARTITION_TOPIC_CONFIG1 =
new RepartitionTopicConfig(REPARTITION_TOPIC_NAME1, TOPIC_CONFIG1, 4, true);
private static final RepartitionTopicConfig REPARTITION_TOPIC_CONFIG2 =
new RepartitionTopicConfig(REPARTITION_TOPIC_NAME2, TOPIC_CONFIG2, 2, true);
private static final TopicsInfo TOPICS_INFO1 = new TopicsInfo(
Set.of(REPARTITION_TOPIC_NAME1),
Set.of(SOURCE_TOPIC_NAME1, SOURCE_TOPIC_NAME2),
mkMap(
mkEntry(REPARTITION_TOPIC_NAME1, REPARTITION_TOPIC_CONFIG1),
mkEntry(REPARTITION_TOPIC_NAME2, REPARTITION_TOPIC_CONFIG2)
),
Collections.emptyMap()
);
private static final TopicsInfo TOPICS_INFO2 = new TopicsInfo(
Set.of(SINK_TOPIC_NAME1),
Set.of(REPARTITION_TOPIC_NAME1),
mkMap(mkEntry(REPARTITION_TOPIC_NAME1, REPARTITION_TOPIC_CONFIG1)),
Collections.emptyMap()
);
final StreamsConfig config = new DummyStreamsConfig();
@Mock
InternalTopologyBuilder internalTopologyBuilder;
@Mock
InternalTopicManager internalTopicManager;
@Mock
CopartitionedTopicsEnforcer copartitionedTopicsEnforcer;
@Mock
Cluster clusterMetadata;
@BeforeEach
public void setup() {
when(internalTopologyBuilder.hasNamedTopology()).thenReturn(false);
when(internalTopologyBuilder.topologyName()).thenReturn(null);
}
@Test
public void shouldSetupRepartitionTopics() {
when(internalTopologyBuilder.subtopologyToTopicsInfo())
.thenReturn(mkMap(mkEntry(SUBTOPOLOGY_0, TOPICS_INFO1), mkEntry(SUBTOPOLOGY_1, TOPICS_INFO2)));
final Set<String> coPartitionGroup1 = Set.of(SOURCE_TOPIC_NAME1, SOURCE_TOPIC_NAME2);
final Set<String> coPartitionGroup2 = Set.of(REPARTITION_TOPIC_NAME1, REPARTITION_TOPIC_NAME2);
final List<Set<String>> coPartitionGroups = Arrays.asList(coPartitionGroup1, coPartitionGroup2);
when(internalTopologyBuilder.copartitionGroups()).thenReturn(coPartitionGroups);
when(internalTopicManager.makeReady(
mkMap(
mkEntry(REPARTITION_TOPIC_NAME1, REPARTITION_TOPIC_CONFIG1),
mkEntry(REPARTITION_TOPIC_NAME2, REPARTITION_TOPIC_CONFIG2)
))
).thenReturn(Collections.emptySet());
setupCluster(false);
final RepartitionTopics repartitionTopics = new RepartitionTopics(
new TopologyMetadata(internalTopologyBuilder, config),
internalTopicManager,
copartitionedTopicsEnforcer,
clusterMetadata,
"[test] "
);
repartitionTopics.setup();
final Map<TopicPartition, PartitionInfo> topicPartitionsInfo = repartitionTopics.topicPartitionsInfo();
assertThat(topicPartitionsInfo.size(), is(6));
verifyRepartitionTopicPartitionInfo(topicPartitionsInfo, REPARTITION_TOPIC_NAME1, 0);
verifyRepartitionTopicPartitionInfo(topicPartitionsInfo, REPARTITION_TOPIC_NAME1, 1);
verifyRepartitionTopicPartitionInfo(topicPartitionsInfo, REPARTITION_TOPIC_NAME1, 2);
verifyRepartitionTopicPartitionInfo(topicPartitionsInfo, REPARTITION_TOPIC_NAME1, 3);
verifyRepartitionTopicPartitionInfo(topicPartitionsInfo, REPARTITION_TOPIC_NAME2, 0);
verifyRepartitionTopicPartitionInfo(topicPartitionsInfo, REPARTITION_TOPIC_NAME2, 1);
assertThat(repartitionTopics.topologiesWithMissingInputTopics().isEmpty(), is(true));
assertThat(repartitionTopics.missingSourceTopicExceptions().isEmpty(), is(true));
verify(copartitionedTopicsEnforcer).enforce(eq(coPartitionGroup1), any(), eq(clusterMetadata));
verify(copartitionedTopicsEnforcer).enforce(eq(coPartitionGroup2), any(), eq(clusterMetadata));
}
@Test
public void shouldReturnMissingSourceTopics() {
final Set<String> missingSourceTopics = Set.of(SOURCE_TOPIC_NAME1);
when(internalTopologyBuilder.subtopologyToTopicsInfo())
.thenReturn(mkMap(mkEntry(SUBTOPOLOGY_0, TOPICS_INFO1), mkEntry(SUBTOPOLOGY_1, TOPICS_INFO2)));
setupClusterWithMissingTopics(missingSourceTopics, false);
final RepartitionTopics repartitionTopics = new RepartitionTopics(
new TopologyMetadata(internalTopologyBuilder, config),
internalTopicManager,
copartitionedTopicsEnforcer,
clusterMetadata,
"[test] "
);
repartitionTopics.setup();
assertThat(
repartitionTopics.topologiesWithMissingInputTopics(),
equalTo(Collections.singleton(UNNAMED_TOPOLOGY))
);
final StreamsException exception = repartitionTopics.missingSourceTopicExceptions().poll();
assertThat(exception, notNullValue());
assertThat(exception.taskId().isPresent(), is(true));
assertThat(exception.taskId().get(), equalTo(new TaskId(0, 0)));
}
@Test
public void shouldThrowTaskAssignmentExceptionIfPartitionCountCannotBeComputedForAllRepartitionTopics() {
final RepartitionTopicConfig repartitionTopicConfigWithoutPartitionCount =
new RepartitionTopicConfig(REPARTITION_WITHOUT_PARTITION_COUNT, TOPIC_CONFIG5);
when(internalTopologyBuilder.subtopologyToTopicsInfo())
.thenReturn(mkMap(
mkEntry(SUBTOPOLOGY_0, TOPICS_INFO1),
mkEntry(SUBTOPOLOGY_1, setupTopicInfoWithRepartitionTopicWithoutPartitionCount(repartitionTopicConfigWithoutPartitionCount))
));
setupCluster(false);
final RepartitionTopics repartitionTopics = new RepartitionTopics(
new TopologyMetadata(internalTopologyBuilder, config),
internalTopicManager,
copartitionedTopicsEnforcer,
clusterMetadata,
"[test] "
);
final TaskAssignmentException exception = assertThrows(TaskAssignmentException.class, repartitionTopics::setup);
assertThat(exception.getMessage(), is("Failed to compute number of partitions for all repartition topics, make sure all user input topics are created and all Pattern subscriptions match at least one topic in the cluster"));
assertThat(repartitionTopics.topologiesWithMissingInputTopics().isEmpty(), is(true));
assertThat(repartitionTopics.missingSourceTopicExceptions().isEmpty(), is(true));
}
@Test
public void shouldThrowTaskAssignmentExceptionIfSourceTopicHasNoPartitionCount() {
final RepartitionTopicConfig repartitionTopicConfigWithoutPartitionCount =
new RepartitionTopicConfig(REPARTITION_WITHOUT_PARTITION_COUNT, TOPIC_CONFIG5);
final TopicsInfo topicsInfo = new TopicsInfo(
Set.of(REPARTITION_WITHOUT_PARTITION_COUNT),
Set.of(SOURCE_TOPIC_NAME1),
mkMap(
mkEntry(REPARTITION_WITHOUT_PARTITION_COUNT, repartitionTopicConfigWithoutPartitionCount)
),
Collections.emptyMap()
);
when(internalTopologyBuilder.subtopologyToTopicsInfo())
.thenReturn(mkMap(
mkEntry(SUBTOPOLOGY_0, topicsInfo),
mkEntry(SUBTOPOLOGY_1, setupTopicInfoWithRepartitionTopicWithoutPartitionCount(repartitionTopicConfigWithoutPartitionCount))
));
setupClusterWithMissingPartitionCounts(Set.of(SOURCE_TOPIC_NAME1), true);
final RepartitionTopics repartitionTopics = new RepartitionTopics(
new TopologyMetadata(internalTopologyBuilder, config),
internalTopicManager,
copartitionedTopicsEnforcer,
clusterMetadata,
"[test] "
);
final TaskAssignmentException exception = assertThrows(TaskAssignmentException.class, repartitionTopics::setup);
assertThat(
exception.getMessage(),
is("No partition count found for source topic " + SOURCE_TOPIC_NAME1 + ", but it should have been.")
);
assertThat(repartitionTopics.topologiesWithMissingInputTopics().isEmpty(), is(true));
assertThat(repartitionTopics.missingSourceTopicExceptions().isEmpty(), is(true));
}
@Test
public void shouldSetRepartitionTopicPartitionCountFromUpstreamExternalSourceTopic() {
final RepartitionTopicConfig repartitionTopicConfigWithoutPartitionCount =
new RepartitionTopicConfig(REPARTITION_WITHOUT_PARTITION_COUNT, TOPIC_CONFIG5);
final TopicsInfo topicsInfo = new TopicsInfo(
Set.of(REPARTITION_TOPIC_NAME1, REPARTITION_WITHOUT_PARTITION_COUNT),
Set.of(SOURCE_TOPIC_NAME1, REPARTITION_TOPIC_NAME2),
mkMap(
mkEntry(REPARTITION_TOPIC_NAME1, REPARTITION_TOPIC_CONFIG1),
mkEntry(REPARTITION_TOPIC_NAME2, REPARTITION_TOPIC_CONFIG2),
mkEntry(REPARTITION_WITHOUT_PARTITION_COUNT, repartitionTopicConfigWithoutPartitionCount)
),
Collections.emptyMap()
);
when(internalTopologyBuilder.subtopologyToTopicsInfo())
.thenReturn(mkMap(
mkEntry(SUBTOPOLOGY_0, topicsInfo),
mkEntry(SUBTOPOLOGY_1, setupTopicInfoWithRepartitionTopicWithoutPartitionCount(repartitionTopicConfigWithoutPartitionCount))
));
when(internalTopologyBuilder.copartitionGroups()).thenReturn(Collections.emptyList());
when(internalTopicManager.makeReady(
mkMap(
mkEntry(REPARTITION_TOPIC_NAME1, REPARTITION_TOPIC_CONFIG1),
mkEntry(REPARTITION_TOPIC_NAME2, REPARTITION_TOPIC_CONFIG2),
mkEntry(REPARTITION_WITHOUT_PARTITION_COUNT, repartitionTopicConfigWithoutPartitionCount)
))
).thenReturn(Collections.emptySet());
setupCluster(true);
final RepartitionTopics repartitionTopics = new RepartitionTopics(
new TopologyMetadata(internalTopologyBuilder, config),
internalTopicManager,
copartitionedTopicsEnforcer,
clusterMetadata,
"[test] "
);
repartitionTopics.setup();
final Map<TopicPartition, PartitionInfo> topicPartitionsInfo = repartitionTopics.topicPartitionsInfo();
assertThat(topicPartitionsInfo.size(), is(9));
verifyRepartitionTopicPartitionInfo(topicPartitionsInfo, REPARTITION_TOPIC_NAME1, 0);
verifyRepartitionTopicPartitionInfo(topicPartitionsInfo, REPARTITION_TOPIC_NAME1, 1);
verifyRepartitionTopicPartitionInfo(topicPartitionsInfo, REPARTITION_TOPIC_NAME1, 2);
verifyRepartitionTopicPartitionInfo(topicPartitionsInfo, REPARTITION_TOPIC_NAME1, 3);
verifyRepartitionTopicPartitionInfo(topicPartitionsInfo, REPARTITION_TOPIC_NAME2, 0);
verifyRepartitionTopicPartitionInfo(topicPartitionsInfo, REPARTITION_TOPIC_NAME2, 1);
verifyRepartitionTopicPartitionInfo(topicPartitionsInfo, REPARTITION_WITHOUT_PARTITION_COUNT, 0);
verifyRepartitionTopicPartitionInfo(topicPartitionsInfo, REPARTITION_WITHOUT_PARTITION_COUNT, 1);
verifyRepartitionTopicPartitionInfo(topicPartitionsInfo, REPARTITION_WITHOUT_PARTITION_COUNT, 2);
assertThat(repartitionTopics.topologiesWithMissingInputTopics().isEmpty(), is(true));
assertThat(repartitionTopics.missingSourceTopicExceptions().isEmpty(), is(true));
}
@Test
public void shouldSetRepartitionTopicPartitionCountFromUpstreamInternalRepartitionSourceTopic() {
final RepartitionTopicConfig repartitionTopicConfigWithoutPartitionCount =
new RepartitionTopicConfig(REPARTITION_WITHOUT_PARTITION_COUNT, TOPIC_CONFIG5);
final TopicsInfo topicsInfo = new TopicsInfo(
Set.of(REPARTITION_TOPIC_NAME2, REPARTITION_WITHOUT_PARTITION_COUNT),
Set.of(SOURCE_TOPIC_NAME1, REPARTITION_TOPIC_NAME1),
mkMap(
mkEntry(REPARTITION_TOPIC_NAME1, REPARTITION_TOPIC_CONFIG1),
mkEntry(REPARTITION_TOPIC_NAME2, REPARTITION_TOPIC_CONFIG2),
mkEntry(REPARTITION_WITHOUT_PARTITION_COUNT, repartitionTopicConfigWithoutPartitionCount)
),
Collections.emptyMap()
);
when(internalTopologyBuilder.subtopologyToTopicsInfo())
.thenReturn(mkMap(
mkEntry(SUBTOPOLOGY_0, topicsInfo),
mkEntry(SUBTOPOLOGY_1, setupTopicInfoWithRepartitionTopicWithoutPartitionCount(repartitionTopicConfigWithoutPartitionCount))
));
when(internalTopologyBuilder.copartitionGroups()).thenReturn(Collections.emptyList());
when(internalTopicManager.makeReady(
mkMap(
mkEntry(REPARTITION_TOPIC_NAME1, REPARTITION_TOPIC_CONFIG1),
mkEntry(REPARTITION_TOPIC_NAME2, REPARTITION_TOPIC_CONFIG2),
mkEntry(REPARTITION_WITHOUT_PARTITION_COUNT, repartitionTopicConfigWithoutPartitionCount)
))
).thenReturn(Collections.emptySet());
setupCluster(true);
final RepartitionTopics repartitionTopics = new RepartitionTopics(
new TopologyMetadata(internalTopologyBuilder, config),
internalTopicManager,
copartitionedTopicsEnforcer,
clusterMetadata,
"[test] "
);
repartitionTopics.setup();
final Map<TopicPartition, PartitionInfo> topicPartitionsInfo = repartitionTopics.topicPartitionsInfo();
assertThat(topicPartitionsInfo.size(), is(10));
verifyRepartitionTopicPartitionInfo(topicPartitionsInfo, REPARTITION_TOPIC_NAME1, 0);
verifyRepartitionTopicPartitionInfo(topicPartitionsInfo, REPARTITION_TOPIC_NAME1, 1);
verifyRepartitionTopicPartitionInfo(topicPartitionsInfo, REPARTITION_TOPIC_NAME1, 2);
verifyRepartitionTopicPartitionInfo(topicPartitionsInfo, REPARTITION_TOPIC_NAME1, 3);
verifyRepartitionTopicPartitionInfo(topicPartitionsInfo, REPARTITION_TOPIC_NAME2, 0);
verifyRepartitionTopicPartitionInfo(topicPartitionsInfo, REPARTITION_TOPIC_NAME2, 1);
verifyRepartitionTopicPartitionInfo(topicPartitionsInfo, REPARTITION_WITHOUT_PARTITION_COUNT, 0);
verifyRepartitionTopicPartitionInfo(topicPartitionsInfo, REPARTITION_WITHOUT_PARTITION_COUNT, 1);
verifyRepartitionTopicPartitionInfo(topicPartitionsInfo, REPARTITION_WITHOUT_PARTITION_COUNT, 2);
verifyRepartitionTopicPartitionInfo(topicPartitionsInfo, REPARTITION_WITHOUT_PARTITION_COUNT, 3);
assertThat(repartitionTopics.topologiesWithMissingInputTopics().isEmpty(), is(true));
assertThat(repartitionTopics.missingSourceTopicExceptions().isEmpty(), is(true));
}
@Test
public void shouldNotSetupRepartitionTopicsWhenTopologyDoesNotContainAnyRepartitionTopics() {
final TopicsInfo topicsInfo = new TopicsInfo(
Set.of(SINK_TOPIC_NAME1),
Set.of(SOURCE_TOPIC_NAME1),
Collections.emptyMap(),
Collections.emptyMap()
);
when(internalTopologyBuilder.subtopologyToTopicsInfo())
.thenReturn(mkMap(mkEntry(SUBTOPOLOGY_0, topicsInfo)));
setupCluster(false);
final RepartitionTopics repartitionTopics = new RepartitionTopics(
new TopologyMetadata(internalTopologyBuilder, config),
internalTopicManager,
copartitionedTopicsEnforcer,
clusterMetadata,
"[test] "
);
repartitionTopics.setup();
final Map<TopicPartition, PartitionInfo> topicPartitionsInfo = repartitionTopics.topicPartitionsInfo();
assertThat(topicPartitionsInfo, is(Collections.emptyMap()));
assertThat(repartitionTopics.topologiesWithMissingInputTopics().isEmpty(), is(true));
assertThat(repartitionTopics.missingSourceTopicExceptions().isEmpty(), is(true));
}
private void verifyRepartitionTopicPartitionInfo(final Map<TopicPartition, PartitionInfo> topicPartitionsInfo,
final String topic,
final int partition) {
final TopicPartition repartitionTopicPartition = new TopicPartition(topic, partition);
assertThat(topicPartitionsInfo.containsKey(repartitionTopicPartition), is(true));
final PartitionInfo repartitionTopicInfo = topicPartitionsInfo.get(repartitionTopicPartition);
assertThat(repartitionTopicInfo.topic(), is(topic));
assertThat(repartitionTopicInfo.partition(), is(partition));
assertThat(repartitionTopicInfo.inSyncReplicas(), is(new Node[0]));
assertThat(repartitionTopicInfo.leader(), nullValue());
assertThat(repartitionTopicInfo.offlineReplicas(), is(new Node[0]));
assertThat(repartitionTopicInfo.replicas(), is(new Node[0]));
}
private void setupCluster(final boolean mockPartitionCount) {
setupClusterWithMissingTopicsAndMissingPartitionCounts(Collections.emptySet(), Collections.emptySet(), mockPartitionCount);
}
private void setupClusterWithMissingTopics(final Set<String> missingTopics, final boolean mockPartitionCount) {
setupClusterWithMissingTopicsAndMissingPartitionCounts(missingTopics, Collections.emptySet(), mockPartitionCount);
}
private void setupClusterWithMissingPartitionCounts(final Set<String> topicsWithMissingPartitionCounts, final boolean mockPartitionCount) {
setupClusterWithMissingTopicsAndMissingPartitionCounts(Collections.emptySet(),
topicsWithMissingPartitionCounts,
mockPartitionCount);
}
private void setupClusterWithMissingTopicsAndMissingPartitionCounts(final Set<String> missingTopics,
final Set<String> topicsWithMissingPartitionCounts,
final boolean mockPartitionCount) {
final Set<String> topics = new HashSet<>(List.of(
SOURCE_TOPIC_NAME1,
SOURCE_TOPIC_NAME2,
SOURCE_TOPIC_NAME3,
SINK_TOPIC_NAME1,
SINK_TOPIC_NAME2,
REPARTITION_TOPIC_NAME1,
REPARTITION_TOPIC_NAME2,
REPARTITION_TOPIC_NAME3,
REPARTITION_TOPIC_NAME4,
SOME_OTHER_TOPIC
));
topics.removeAll(missingTopics);
when(clusterMetadata.topics()).thenReturn(topics);
if (mockPartitionCount) {
when(clusterMetadata.partitionCountForTopic(SOURCE_TOPIC_NAME1))
.thenReturn(topicsWithMissingPartitionCounts.contains(SOURCE_TOPIC_NAME1) ? null : 3);
}
}
private TopicsInfo setupTopicInfoWithRepartitionTopicWithoutPartitionCount(final RepartitionTopicConfig repartitionTopicConfigWithoutPartitionCount) {
return new TopicsInfo(
Set.of(SINK_TOPIC_NAME2),
Set.of(REPARTITION_TOPIC_NAME1, REPARTITION_WITHOUT_PARTITION_COUNT),
mkMap(
mkEntry(REPARTITION_TOPIC_NAME1, REPARTITION_TOPIC_CONFIG1),
mkEntry(REPARTITION_WITHOUT_PARTITION_COUNT, repartitionTopicConfigWithoutPartitionCount)
),
Collections.emptyMap()
);
}
} | RepartitionTopicsTest |
java | elastic__elasticsearch | x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/predicate/Predicates.java | {
"start": 709,
"end": 3917
} | class ____ {
public static List<Expression> splitAnd(Expression exp) {
if (exp instanceof And and) {
List<Expression> list = new ArrayList<>();
list.addAll(splitAnd(and.left()));
list.addAll(splitAnd(and.right()));
return list;
}
return singletonList(exp);
}
public static List<Expression> splitOr(Expression exp) {
if (exp instanceof Or or) {
List<Expression> list = new ArrayList<>();
list.addAll(splitOr(or.left()));
list.addAll(splitOr(or.right()));
return list;
}
return singletonList(exp);
}
public static Expression combineOr(List<Expression> exps) {
return combine(exps, (l, r) -> new Or(l.source(), l, r));
}
public static Expression combineAnd(List<Expression> exps) {
return combine(exps, (l, r) -> new And(l.source(), l, r));
}
/**
* Build a binary 'pyramid' from the given list:
* <pre>
* AND
* / \
* AND AND
* / \ / \
* A B C D
* </pre>
*
* using the given combiner.
*
* While a bit longer, this method creates a balanced tree as oppose to a plain
* recursive approach which creates an unbalanced one (either to the left or right).
*/
private static Expression combine(List<Expression> exps, BiFunction<Expression, Expression, Expression> combiner) {
if (exps.isEmpty()) {
return null;
}
// clone the list (to modify it)
List<Expression> result = new ArrayList<>(exps);
while (result.size() > 1) {
// combine (in place) expressions in pairs
// NB: this loop modifies the list (just like an array)
for (int i = 0; i < result.size() - 1; i++) {
// keep the current element to update it in place
Expression l = result.get(i);
// remove the next element due to combining
Expression r = result.remove(i + 1);
result.set(i, combiner.apply(l, r));
}
}
return result.get(0);
}
public static List<Expression> inCommon(List<Expression> l, List<Expression> r) {
List<Expression> common = new ArrayList<>(Math.min(l.size(), r.size()));
for (Expression lExp : l) {
for (Expression rExp : r) {
if (lExp.semanticEquals(rExp)) {
common.add(lExp);
}
}
}
return common.isEmpty() ? emptyList() : common;
}
public static List<Expression> subtract(List<Expression> from, List<Expression> list) {
List<Expression> diff = new ArrayList<>(Math.min(from.size(), list.size()));
for (Expression f : from) {
boolean found = false;
for (Expression l : list) {
if (f.semanticEquals(l)) {
found = true;
break;
}
}
if (found == false) {
diff.add(f);
}
}
return diff.isEmpty() ? emptyList() : diff;
}
}
| Predicates |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java | {
"start": 19608,
"end": 20507
} | class ____ extends BaseTasksResponse {
private List<TestTaskResponse> tasks;
public TestTasksResponse(
List<TestTaskResponse> tasks,
List<TaskOperationFailure> taskFailures,
List<? extends FailedNodeException> nodeFailures
) {
super(taskFailures, nodeFailures);
this.tasks = tasks == null ? Collections.emptyList() : List.copyOf(tasks);
}
public TestTasksResponse(StreamInput in) throws IOException {
super(in);
tasks = in.readCollectionAsList(TestTaskResponse::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeCollection(tasks);
}
public List<TestTaskResponse> getTasks() {
return tasks;
}
}
public static | TestTasksResponse |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/cluster/ClusteredRoutePolicyTest.java | {
"start": 1427,
"end": 5808
} | class ____ extends ContextTestSupport {
private ClusteredRoutePolicy policy;
private TestClusterService cs;
@Override
protected CamelContext createCamelContext() throws Exception {
CamelContext context = super.createCamelContext();
cs = new TestClusterService("my-cluster-service");
context.addService(cs);
policy = ClusteredRoutePolicy.forNamespace("my-ns");
return context;
}
@Test
public void testClusteredRoutePolicy() throws Exception {
// route is stopped as we are not leader yet
assertEquals(ServiceStatus.Stopped, context.getRouteController().getRouteStatus("foo"));
MockEndpoint mock = getMockEndpoint("mock:foo");
mock.expectedBodiesReceived("Hello Foo");
cs.getView().setLeader(true);
template.sendBody("seda:foo", "Hello Foo");
assertMockEndpointsSatisfied();
assertEquals(ServiceStatus.Started, context.getRouteController().getRouteStatus("foo"));
}
@Test
public void testClusteredRoutePolicyRemoveAllRoutes() throws Exception {
cs.getView().setLeader(true);
context.getRouteController().stopRoute("foo");
context.getRouteController().stopRoute("baz");
context.removeRoute("foo");
context.removeRoute("baz");
assertFalse(cs.getView().isRunning());
}
@Test
public void testClusteredRoutePolicyDontStartAutoStartFalseRoutes() {
cs.getView().setLeader(true);
assertEquals(ServiceStatus.Stopped, context.getRouteController().getRouteStatus("baz"));
}
@Test
public void testClusteredRoutePolicyAddRoute() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("seda:bar").routeId("bar").routePolicy(policy)
.to("mock:bar");
}
});
// route is stopped as we are not leader yet
assertEquals(ServiceStatus.Stopped, context.getRouteController().getRouteStatus("foo"));
assertEquals(ServiceStatus.Stopped, context.getRouteController().getRouteStatus("bar"));
getMockEndpoint("mock:foo").expectedBodiesReceived("Hello Foo");
getMockEndpoint("mock:bar").expectedBodiesReceived("Hello Bar");
cs.getView().setLeader(true);
template.sendBody("seda:foo", "Hello Foo");
template.sendBody("seda:bar", "Hello Bar");
assertMockEndpointsSatisfied();
assertEquals(ServiceStatus.Started, context.getRouteController().getRouteStatus("foo"));
assertEquals(ServiceStatus.Started, context.getRouteController().getRouteStatus("bar"));
}
@Test
public void testClusteredRoutePolicyAddRouteAlreadyLeader() throws Exception {
cs.getView().setLeader(true);
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("seda:bar").routeId("bar").routePolicy(policy)
.to("mock:bar");
}
});
// route is started as we are leader
assertEquals(ServiceStatus.Started, context.getRouteController().getRouteStatus("foo"));
assertEquals(ServiceStatus.Started, context.getRouteController().getRouteStatus("bar"));
getMockEndpoint("mock:foo").expectedBodiesReceived("Hello Foo");
getMockEndpoint("mock:bar").expectedBodiesReceived("Hello Bar");
template.sendBody("seda:foo", "Hello Foo");
template.sendBody("seda:bar", "Hello Bar");
assertMockEndpointsSatisfied();
assertEquals(ServiceStatus.Started, context.getRouteController().getRouteStatus("foo"));
assertEquals(ServiceStatus.Started, context.getRouteController().getRouteStatus("bar"));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("seda:foo").routeId("foo").routePolicy(policy)
.to("mock:foo");
from("seda:baz").autoStartup(false).routeId("baz").routePolicy(policy)
.to("mock:baz");
}
};
}
// *********************************
// Helpers
// *********************************
private static | ClusteredRoutePolicyTest |
java | apache__flink | flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/typeutils/GenericRecordAvroTypeInfo.java | {
"start": 1413,
"end": 3567
} | class ____ extends TypeInformation<GenericRecord> {
private static final long serialVersionUID = 4141977586453820650L;
private transient Schema schema;
public GenericRecordAvroTypeInfo(Schema schema) {
this.schema = checkNotNull(schema);
}
@Override
public boolean isBasicType() {
return false;
}
@Override
public boolean isTupleType() {
return false;
}
@Override
public int getArity() {
return 1;
}
@Override
public int getTotalFields() {
return 1;
}
@Override
public Class<GenericRecord> getTypeClass() {
return GenericRecord.class;
}
@Override
public boolean isKeyType() {
return false;
}
@Override
public TypeSerializer<GenericRecord> createSerializer(SerializerConfig config) {
return new AvroSerializer<>(GenericRecord.class, schema);
}
@Override
public String toString() {
return String.format("GenericRecord(\"%s\")", schema.toString());
}
@Override
public boolean equals(Object obj) {
if (obj instanceof GenericRecordAvroTypeInfo) {
GenericRecordAvroTypeInfo avroTypeInfo = (GenericRecordAvroTypeInfo) obj;
return Objects.equals(avroTypeInfo.schema, schema);
} else {
return false;
}
}
@Override
public int hashCode() {
return Objects.hashCode(schema);
}
@Override
public boolean canEqual(Object obj) {
return obj instanceof GenericRecordAvroTypeInfo;
}
private void writeObject(ObjectOutputStream oos) throws IOException {
byte[] schemaStrInBytes = schema.toString(false).getBytes(StandardCharsets.UTF_8);
oos.writeInt(schemaStrInBytes.length);
oos.write(schemaStrInBytes);
}
private void readObject(ObjectInputStream ois) throws ClassNotFoundException, IOException {
int len = ois.readInt();
byte[] content = new byte[len];
ois.readFully(content);
this.schema = new Schema.Parser().parse(new String(content, StandardCharsets.UTF_8));
}
}
| GenericRecordAvroTypeInfo |
java | google__dagger | dagger-compiler/main/java/dagger/internal/codegen/bindinggraphvalidation/MissingBindingValidator.java | {
"start": 2424,
"end": 11656
} | class ____ extends ValidationBindingGraphPlugin {
// Limit on the number of similar bindings to print
private static final int SIMILAR_BINDINGS_LIMIT = 20;
private final InjectBindingRegistry injectBindingRegistry;
private final DiagnosticMessageGenerator.Factory diagnosticMessageGeneratorFactory;
@Inject
MissingBindingValidator(
InjectBindingRegistry injectBindingRegistry,
DiagnosticMessageGenerator.Factory diagnosticMessageGeneratorFactory) {
this.injectBindingRegistry = injectBindingRegistry;
this.diagnosticMessageGeneratorFactory = diagnosticMessageGeneratorFactory;
}
@Override
public String pluginName() {
return "Dagger/MissingBinding";
}
@Override
public void visitGraph(BindingGraph graph, DiagnosticReporter diagnosticReporter) {
// Don't report missing bindings when validating a full binding graph or a graph built from a
// subcomponent.
if (graph.isFullBindingGraph() || graph.rootComponentNode().isSubcomponent()) {
return;
}
// A missing binding might exist in a different component as unused binding, thus getting
// stripped. Therefore, full graph needs to be traversed to capture the stripped bindings.
if (!graph.missingBindings().isEmpty()) {
requestVisitFullGraph(graph);
}
}
@Override
public void revisitFullGraph(
BindingGraph prunedGraph, BindingGraph fullGraph, DiagnosticReporter diagnosticReporter) {
prunedGraph
.missingBindings()
.forEach(
missingBinding -> reportMissingBinding(missingBinding, fullGraph, diagnosticReporter));
}
private void reportMissingBinding(
MissingBinding missingBinding,
BindingGraph graph,
DiagnosticReporter diagnosticReporter) {
diagnosticReporter.reportComponent(
ERROR,
graph.componentNode(missingBinding.componentPath()).get(),
missingBindingErrorMessage(missingBinding, graph)
+ diagnosticMessageGeneratorFactory.create(graph).getMessage(missingBinding)
+ alternativeBindingsMessage(missingBinding, graph)
+ similarBindingsMessage(missingBinding, graph));
}
private static ImmutableSet<Binding> getSimilarTypeBindings(
BindingGraph graph, Key missingBindingKey) {
return graph.bindings().stream()
// Filter out multibinding contributions (users can't request these directly).
.filter(binding -> binding.key().multibindingContributionIdentifier().isEmpty())
// Filter out keys that are identical to the missing key (i.e. the binding exists in another
// component, but we don't need to include those here because they're reported elsewhere).
.filter(binding -> !binding.key().equals(missingBindingKey))
.filter(binding -> isSimilar(binding.key(), missingBindingKey))
.collect(toImmutableSet());
}
/**
* Returns {@code true} if the two keys are similar.
*
* <p>Two keys are considered similar if they are equal except for the following differences:
*
* <ul>
* <li> qualifiers: (e.g. {@code @Qualified Foo} and {@code Foo} are similar)
* <li> variances: (e.g. {@code List<Foo>} and {@code List<? extends Foo>} are similar)
* <li> raw types: (e.g. {@code Set} and {@code Set<Foo>} are similar)
* </ul>
*/
private static boolean isSimilar(Key key, Key otherKey) {
TypeDfsIterator typeIterator = new TypeDfsIterator(key.type().xprocessing());
TypeDfsIterator otherTypeIterator = new TypeDfsIterator(otherKey.type().xprocessing());
while (typeIterator.hasNext() || otherTypeIterator.hasNext()) {
if (typeIterator.stack.size() != otherTypeIterator.stack.size()) {
// Exit early if the stacks don't align. This implies the types have a different number
// of type arguments, so we know the types must be dissimilar without checking further.
return false;
}
// If next type is a raw type, don't add the type arguments of either type to the stack.
boolean skipTypeArguments = typeIterator.isNextTypeRaw() || otherTypeIterator.isNextTypeRaw();
TypeName typeName = typeIterator.next(skipTypeArguments);
TypeName otherTypeName = otherTypeIterator.next(skipTypeArguments);
if (!typeName.equals(otherTypeName)) {
return false;
}
}
return true;
}
private String missingBindingErrorMessage(MissingBinding missingBinding, BindingGraph graph) {
Key key = missingBinding.key();
StringBuilder errorMessage = new StringBuilder();
// Wildcards should have already been checked by DependencyRequestValidator.
verify(!isWildcard(key.type().xprocessing()), "unexpected wildcard request: %s", key);
// TODO(ronshapiro): replace "provided" with "satisfied"?
errorMessage.append(key).append(" cannot be provided without ");
if (isValidImplicitProvisionKey(key)) {
errorMessage.append("an @Inject constructor or ");
}
errorMessage.append("an @Provides-"); // TODO(dpb): s/an/a
if (allIncomingDependenciesCanUseProduction(missingBinding, graph)) {
errorMessage.append(" or @Produces-");
}
errorMessage.append("annotated method.");
if (isValidMembersInjectionKey(key) && typeHasInjectionSites(key)) {
errorMessage.append(
" This type supports members injection but cannot be implicitly provided.");
}
return errorMessage.append("\n").toString();
}
private String alternativeBindingsMessage(
MissingBinding missingBinding, BindingGraph graph) {
ImmutableSet<Binding> alternativeBindings = graph.bindings(missingBinding.key());
if (alternativeBindings.isEmpty()) {
return "";
}
StringBuilder message = new StringBuilder();
message.append("\n\nNote: ")
.append(missingBinding.key())
.append(" is provided in the following other components:");
for (Binding alternativeBinding : alternativeBindings) {
// Some alternative bindings appear multiple times because they were re-resolved in multiple
// components (e.g. due to multibinding contributions). To avoid the noise, we only report
// the binding where the module is contributed.
if (alternativeBinding.contributingModule().isPresent()
&& !((ComponentNodeImpl) graph.componentNode(alternativeBinding.componentPath()).get())
.componentDescriptor()
.moduleTypes()
.contains(alternativeBinding.contributingModule().get().xprocessing())) {
continue;
}
message.append("\n").append(INDENT).append(asString(alternativeBinding));
}
return message.toString();
}
private String similarBindingsMessage(
MissingBinding missingBinding, BindingGraph graph) {
ImmutableSet<Binding> similarBindings =
getSimilarTypeBindings(graph, missingBinding.key());
if (similarBindings.isEmpty()) {
return "";
}
StringBuilder message =
new StringBuilder(
"\n\nNote: A similar binding is provided in the following other components:");
int count = 0;
for (Binding similarBinding : similarBindings) {
if (count >= SIMILAR_BINDINGS_LIMIT) {
message
.append("\n")
.append(INDENT)
.append("...and ")
.append(similarBindings.size() - SIMILAR_BINDINGS_LIMIT)
.append(" other bindings not shown");
break;
}
message
.append("\n")
.append(INDENT)
.append(similarBinding.key())
.append(" is provided at:")
.append("\n")
.append(DOUBLE_INDENT)
.append(asString(similarBinding));
count++;
}
message.append("\n")
.append(
"(For Kotlin sources, you may need to use '@JvmSuppressWildcards' or '@JvmWildcard' if "
+ "you need to explicitly control the wildcards at a particular usage site.)");
return message.toString();
}
private String asString(Binding binding) {
return String.format(
"[%s] %s",
binding.componentPath().currentComponent().xprocessing().getQualifiedName(),
binding.bindingElement().isPresent()
? elementToString(
binding.bindingElement().get().xprocessing(),
/* elideMethodParameterTypes= */ true)
// For synthetic bindings just print the Binding#toString()
: binding);
}
private boolean allIncomingDependenciesCanUseProduction(
MissingBinding missingBinding, BindingGraph graph) {
return graph.network().inEdges(missingBinding).stream()
.flatMap(instancesOf(DependencyEdge.class))
.allMatch(edge -> dependencyCanBeProduction(edge, graph));
}
private boolean typeHasInjectionSites(Key key) {
return injectBindingRegistry
.getOrFindMembersInjectionBinding(key)
.map(binding -> !binding.injectionSites().isEmpty())
.orElse(false);
}
/**
* An iterator over a list of {@link TypeName}s produced by flattening a parameterized type. e.g.
* {@code Map<Foo, List<Bar>>} to {@code [Map, Foo, List, Bar]}.
*
* <p>The iterator returns the bound when encounters a wildcard type.
*/
private static | MissingBindingValidator |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java | {
"start": 13254,
"end": 17533
} | class ____ implements Setting.Validator<RelativeByteSizeValue> {
@Override
public void validate(RelativeByteSizeValue value) {
}
@Override
public void validate(final RelativeByteSizeValue value, final Map<Setting<?>, Object> settings) {
final RelativeByteSizeValue low = (RelativeByteSizeValue) settings.get(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING);
final RelativeByteSizeValue high = (RelativeByteSizeValue) settings.get(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING);
final RelativeByteSizeValue flood = (RelativeByteSizeValue) settings.get(
CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING
);
if (low.isAbsolute() == false && high.isAbsolute() == false && flood.isAbsolute() == false) { // Validate as percentages
final double lowWatermarkThreshold = low.getRatio().getAsPercent();
final double highWatermarkThreshold = high.getRatio().getAsPercent();
final double floodThreshold = flood.getRatio().getAsPercent();
if (lowWatermarkThreshold > highWatermarkThreshold) {
throw new IllegalArgumentException(
"low disk watermark [" + low.getStringRep() + "] more than high disk watermark [" + high.getStringRep() + "]"
);
}
if (highWatermarkThreshold > floodThreshold) {
throw new IllegalArgumentException(
"high disk watermark ["
+ high.getStringRep()
+ "] more than flood stage disk watermark ["
+ flood.getStringRep()
+ "]"
);
}
} else if (low.isAbsolute() && high.isAbsolute() && flood.isAbsolute()) { // Validate as absolute values
final ByteSizeValue lowWatermarkBytes = low.getAbsolute();
final ByteSizeValue highWatermarkBytes = high.getAbsolute();
final ByteSizeValue floodStageBytes = flood.getAbsolute();
if (lowWatermarkBytes.getBytes() < highWatermarkBytes.getBytes()) {
throw new IllegalArgumentException(
"low disk watermark [" + low.getStringRep() + "] less than high disk watermark [" + high.getStringRep() + "]"
);
}
if (highWatermarkBytes.getBytes() < floodStageBytes.getBytes()) {
throw new IllegalArgumentException(
"high disk watermark ["
+ high.getStringRep()
+ "] less than flood stage disk watermark ["
+ flood.getStringRep()
+ "]"
);
}
} else {
final String message = Strings.format(
"unable to consistently parse [%s=%s], [%s=%s], and [%s=%s] as percentage or bytes",
CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(),
low.getStringRep(),
CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(),
high.getStringRep(),
CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(),
flood.getStringRep()
);
throw new IllegalArgumentException(message);
}
}
@Override
public Iterator<Setting<?>> settings() {
return WATERMARK_VALIDATOR_SETTINGS_LIST.iterator();
}
}
/**
* Validates that low, high and flood stage max headrooms adhere to the comparison: flood < high < low.
* Also validates that if the low max headroom is set, then the high max headroom must be set as well.
* Also validates that if the high max headroom is set, then the flood stage max headroom must be set as well.
* Also validates that if max headrooms are set, the respective watermark values should be ratios/percentages.
* Else, throws an exception.
*/
static | WatermarkValidator |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ser/SerializationOrderTest.java | {
"start": 1051,
"end": 1387
} | class ____ extends BeanWithOrder
{
public SubBeanWithOrder(int a, int b, int c, int d) {
super(a, b, c, d);
}
}
@JsonPropertyOrder({"b", "a",
// note: including non-existant properties is fine (has no effect, but not an error)
"foobar",
"c"
})
static | SubBeanWithOrder |
java | apache__camel | core/camel-util/src/main/java/org/apache/camel/util/StringHelper.java | {
"start": 34095,
"end": 41888
} | class ____.
*/
public static String normalizeClassName(String name) {
StringBuilder sb = new StringBuilder(name.length());
for (char ch : name.toCharArray()) {
if (ch == '.' || ch == '[' || ch == ']' || ch == '-' || Character.isJavaIdentifierPart(ch)) {
sb.append(ch);
}
}
return sb.toString();
}
/**
* Compares old and new text content and report back which lines are changed
*
* @param oldText the old text
* @param newText the new text
* @return a list of line numbers that are changed in the new text
*/
public static List<Integer> changedLines(String oldText, String newText) {
if (oldText == null || oldText.equals(newText)) {
return Collections.emptyList();
}
List<Integer> changed = new ArrayList<>();
String[] oldLines = oldText.split("\n");
String[] newLines = newText.split("\n");
for (int i = 0; i < newLines.length; i++) {
String newLine = newLines[i];
String oldLine = i < oldLines.length ? oldLines[i] : null;
if (oldLine == null) {
changed.add(i);
} else if (!newLine.equals(oldLine)) {
changed.add(i);
}
}
return changed;
}
/**
* Removes the leading and trailing whitespace and if the resulting string is empty returns {@code null}. Examples:
* <p>
* Examples: <blockquote>
*
* <pre>
* trimToNull("abc") -> "abc"
* trimToNull(" abc") -> "abc"
* trimToNull(" abc ") -> "abc"
* trimToNull(" ") -> null
* trimToNull("") -> null
* </pre>
*
* </blockquote>
*/
public static String trimToNull(final String given) {
if (given == null) {
return null;
}
final String trimmed = given.trim();
if (trimmed.isEmpty()) {
return null;
}
return trimmed;
}
/**
* Checks if the src string contains what
*
* @param src is the source string to be checked
* @param what is the string which will be looked up in the src argument
* @return true/false
*/
public static boolean containsIgnoreCase(String src, String what) {
if (src == null || what == null) {
return false;
}
final int length = what.length();
if (length == 0) {
return true; // Empty string is contained
}
final char firstLo = Character.toLowerCase(what.charAt(0));
final char firstUp = Character.toUpperCase(what.charAt(0));
for (int i = src.length() - length; i >= 0; i--) {
// Quick check before calling the more expensive regionMatches() method:
final char ch = src.charAt(i);
if (ch != firstLo && ch != firstUp) {
continue;
}
if (src.regionMatches(true, i, what, 0, length)) {
return true;
}
}
return false;
}
/**
* Outputs the bytes in human-readable format in units of KB,MB,GB etc.
*
* @param locale The locale to apply during formatting. If l is {@code null} then no localization is applied.
* @param bytes number of bytes
* @return human readable output
* @see java.lang.String#format(Locale, String, Object...)
*/
public static String humanReadableBytes(Locale locale, long bytes) {
int unit = 1024;
if (bytes < unit) {
return bytes + " B";
}
int exp = (int) (Math.log(bytes) / Math.log(unit));
String pre = String.valueOf("KMGTPE".charAt(exp - 1));
return String.format(locale, "%.1f %sB", bytes / Math.pow(unit, exp), pre);
}
/**
* Outputs the bytes in human-readable format in units of KB,MB,GB etc.
*
* The locale always used is the one returned by {@link java.util.Locale#getDefault()}.
*
* @param bytes number of bytes
* @return human readable output
* @see org.apache.camel.util.StringHelper#humanReadableBytes(Locale, long)
*/
public static String humanReadableBytes(long bytes) {
return humanReadableBytes(Locale.getDefault(), bytes);
}
/**
* Check for string pattern matching with a number of strategies in the following order:
*
* - equals - null pattern always matches - * always matches - Ant style matching - Regexp
*
* @param pattern the pattern
* @param target the string to test
* @return true if target matches the pattern
*/
public static boolean matches(String pattern, String target) {
if (Objects.equals(pattern, target)) {
return true;
}
if (Objects.isNull(pattern)) {
return true;
}
if (Objects.equals("*", pattern)) {
return true;
}
if (AntPathMatcher.INSTANCE.match(pattern, target)) {
return true;
}
Pattern p = Pattern.compile(pattern);
Matcher m = p.matcher(target);
return m.matches();
}
/**
* Converts the string from camel case into dot format (helloGreatWorld -> hello.great.world)
*
* @param text the string
* @return the string dot cased
*/
public static String camelCaseToDot(String text) {
if (text == null || text.isEmpty()) {
return text;
}
text = camelCaseToDash(text);
return text.replace('-', '.');
}
/**
* Converts the string from camel case into dash format (helloGreatWorld -> hello-great-world)
*
* @param text the string
* @return the string camel cased
*/
public static String camelCaseToDash(String text) {
if (text == null || text.isEmpty()) {
return text;
}
char prev = 0;
char[] arr = text.toCharArray();
StringBuilder answer = new StringBuilder(arr.length < 13 ? 16 : arr.length + 8);
for (int i = 0; i < arr.length; i++) {
char ch = arr[i];
if (ch == '-' || ch == '_') {
answer.append("-");
} else {
if (Character.isUpperCase(ch) && prev != 0) {
char next;
if (i < arr.length - 1) {
next = arr[i + 1];
} else {
next = 0;
}
if (!Character.isUpperCase(prev) || next != 0 && Character.isLowerCase(next)) {
applyDashPrefix(prev, answer, ch);
} else {
answer.append(Character.toLowerCase(ch));
}
} else {
answer.append(Character.toLowerCase(ch));
}
}
prev = ch;
}
return answer.toString();
}
private static void applyDashPrefix(char prev, StringBuilder answer, char ch) {
if (prev != '-' && prev != '_') {
answer.append("-");
}
answer.append(Character.toLowerCase(ch));
}
/**
* Does the string start with the given prefix (ignoring the case).
*
* @param text the string
* @param prefix the prefix
*/
public static boolean startsWithIgnoreCase(String text, String prefix) {
if (text != null && prefix != null) {
return prefix.length() <= text.length() && text.regionMatches(true, 0, prefix, 0, prefix.length());
} else {
return text == null && prefix == null;
}
}
/**
* Converts the value to an | loader |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/FutureReturnValueIgnored.java | {
"start": 2134,
"end": 6777
} | class ____ extends AbstractReturnValueIgnored
implements ReturnTreeMatcher {
private static final Matcher<ExpressionTree> IGNORED_METHODS =
anyOf(
// ForkJoinTask#fork has side-effects and returns 'this', so it's reasonable to ignore
// the return value.
instanceMethod()
.onDescendantOf(ForkJoinTask.class.getName())
.named("fork")
.withNoParameters(),
// CompletionService is intended to be used in a way where the Future returned
// from submit is discarded, because the Futures are available later via e.g. take()
instanceMethod().onDescendantOf(CompletionService.class.getName()).named("submit"),
// IntelliJ's executeOnPooledThread wraps the Callable/Runnable in one that catches
// Throwable, so it can't fail (unless logging the Throwable also throws, but there's
// nothing much to be done at that point).
instanceMethod()
.onDescendantOf("com.intellij.openapi.application.Application")
.named("executeOnPooledThread"),
// ChannelFuture#addListener(s) returns itself for chaining. Any exception during the
// future execution should be dealt by the listener(s).
instanceMethod()
.onDescendantOf("io.netty.util.concurrent.Future")
.namedAnyOf(
"addListener",
"addListeners",
"removeListener",
"removeListeners",
"sync",
"syncUninterruptibly",
"await",
"awaitUninterruptibly"),
instanceMethod()
.onDescendantOf("io.netty.util.concurrent.Promise")
.namedAnyOf("setSuccess", "setFailure"),
instanceMethod()
.onExactClass("java.util.concurrent.CompletableFuture")
.namedAnyOf("exceptionally", "completeAsync", "orTimeout", "completeOnTimeout"));
private static final Matcher<ExpressionTree> MATCHER =
new Matcher<ExpressionTree>() {
@Override
public boolean matches(ExpressionTree tree, VisitorState state) {
Type futureType = JAVA_UTIL_CONCURRENT_FUTURE.get(state);
if (futureType == null) {
return false;
}
if (!(ASTHelpers.getSymbol(tree) instanceof MethodSymbol sym)) {
Type resultType = ASTHelpers.getResultType(tree);
return resultType != null
&& ASTHelpers.isSubtype(
ASTHelpers.getUpperBound(resultType, state.getTypes()), futureType, state);
}
if (hasAnnotation(sym, CAN_IGNORE_RETURN_VALUE_ANNOTATION, state)) {
return false;
}
for (MethodSymbol superSym : ASTHelpers.findSuperMethods(sym, state.getTypes())) {
// There are interfaces annotated with @CanIgnoreReturnValue (like Guava's Function)
// whose return value really shouldn't be ignored - as a heuristic, check if the super's
// method is returning a future subtype.
if (hasAnnotation(superSym, CAN_IGNORE_RETURN_VALUE_ANNOTATION, state)
&& ASTHelpers.isSubtype(
ASTHelpers.getUpperBound(superSym.getReturnType(), state.getTypes()),
futureType,
state)) {
return false;
}
}
if (IGNORED_METHODS.matches(tree, state)) {
return false;
}
Type returnType = sym.getReturnType();
return ASTHelpers.isSubtype(
ASTHelpers.getUpperBound(returnType, state.getTypes()), futureType, state);
}
};
@Inject
FutureReturnValueIgnored(ConstantExpressions constantExpressions) {
super(constantExpressions);
}
@Override
public Matcher<ExpressionTree> specializedMatcher() {
return MATCHER;
}
@Override
protected Optional<Type> lostType(VisitorState state) {
return Optional.ofNullable(futureType.get(state));
}
@Override
protected String lostTypeMessage(String returnedType, String declaredReturnType) {
return String.format(
"Returning %s from method that returns %s. Errors from the returned future may be ignored.",
returnedType, declaredReturnType);
}
private final Supplier<Type> futureType = Suppliers.typeFromString("java.util.concurrent.Future");
private static final Supplier<Type> JAVA_UTIL_CONCURRENT_FUTURE =
VisitorState.memoize(state -> state.getTypeFromString("java.util.concurrent.Future"));
}
| FutureReturnValueIgnored |
java | apache__camel | components/camel-twilio/src/generated/java/org/apache/camel/component/twilio/internal/KeyApiMethod.java | {
"start": 642,
"end": 2572
} | enum ____ implements ApiMethod {
DELETER(
com.twilio.rest.api.v2010.account.KeyDeleter.class,
"deleter",
arg("pathSid", String.class)),
DELETER_1(
com.twilio.rest.api.v2010.account.KeyDeleter.class,
"deleter",
arg("pathAccountSid", String.class),
arg("pathSid", String.class)),
FETCHER(
com.twilio.rest.api.v2010.account.KeyFetcher.class,
"fetcher",
arg("pathSid", String.class)),
FETCHER_1(
com.twilio.rest.api.v2010.account.KeyFetcher.class,
"fetcher",
arg("pathAccountSid", String.class),
arg("pathSid", String.class)),
READER(
com.twilio.rest.api.v2010.account.KeyReader.class,
"reader"),
READER_1(
com.twilio.rest.api.v2010.account.KeyReader.class,
"reader",
arg("pathAccountSid", String.class)),
UPDATER(
com.twilio.rest.api.v2010.account.KeyUpdater.class,
"updater",
arg("pathSid", String.class)),
UPDATER_1(
com.twilio.rest.api.v2010.account.KeyUpdater.class,
"updater",
arg("pathAccountSid", String.class),
arg("pathSid", String.class));
private final ApiMethod apiMethod;
KeyApiMethod(Class<?> resultType, String name, ApiMethodArg... args) {
this.apiMethod = new ApiMethodImpl(Key.class, resultType, name, args);
}
@Override
public String getName() { return apiMethod.getName(); }
@Override
public Class<?> getResultType() { return apiMethod.getResultType(); }
@Override
public List<String> getArgNames() { return apiMethod.getArgNames(); }
@Override
public List<String> getSetterArgNames() { return apiMethod.getSetterArgNames(); }
@Override
public List<Class<?>> getArgTypes() { return apiMethod.getArgTypes(); }
@Override
public Method getMethod() { return apiMethod.getMethod(); }
}
| KeyApiMethod |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java | {
"start": 8518,
"end": 17578
} | class ____ {
private int maxShardsPerNode;
private int maxShardsPerNodeFrozen;
private int shardCapacityUnhealthyThresholdYellow;
private int shardCapacityUnhealthyThresholdRed;
private Builder() {}
private Builder(ShardLimits shardLimits) {
this.maxShardsPerNode = shardLimits.maxShardsPerNode;
this.maxShardsPerNodeFrozen = shardLimits.maxShardsPerNodeFrozen;
this.shardCapacityUnhealthyThresholdYellow = shardLimits.shardCapacityUnhealthyThresholdYellow;
this.shardCapacityUnhealthyThresholdRed = shardLimits.shardCapacityUnhealthyThresholdRed;
}
public Builder maxShardsPerNode(int maxShardsPerNode) {
this.maxShardsPerNode = maxShardsPerNode;
return this;
}
public Builder maxShardsPerNodeFrozen(int maxShardsPerNodeFrozen) {
this.maxShardsPerNodeFrozen = maxShardsPerNodeFrozen;
return this;
}
public Builder shardCapacityUnhealthyThresholdYellow(int shardCapacityUnhealthyThresholdYellow) {
this.shardCapacityUnhealthyThresholdYellow = shardCapacityUnhealthyThresholdYellow;
return this;
}
public Builder shardCapacityUnhealthyThresholdRed(int shardCapacityUnhealthyThresholdRed) {
this.shardCapacityUnhealthyThresholdRed = shardCapacityUnhealthyThresholdRed;
return this;
}
public ShardLimits build() {
return new ShardLimits(
maxShardsPerNode,
maxShardsPerNodeFrozen,
shardCapacityUnhealthyThresholdYellow,
shardCapacityUnhealthyThresholdRed
);
}
}
}
/**
* Contains the thresholds necessary to determine the health of the disk space of a node. The thresholds are determined by the elected
* master.
*/
public record Disk(
RelativeByteSizeValue highWatermark,
ByteSizeValue highMaxHeadroom,
RelativeByteSizeValue floodStageWatermark,
ByteSizeValue floodStageMaxHeadroom,
RelativeByteSizeValue frozenFloodStageWatermark,
ByteSizeValue frozenFloodStageMaxHeadroom
) implements ToXContentFragment, Writeable {
public static final String TYPE = "disk";
public static final TransportVersion VERSION_SUPPORTING_HEADROOM_FIELDS = TransportVersions.V_8_5_0;
private static final ParseField HIGH_WATERMARK_FIELD = new ParseField("high_watermark");
private static final ParseField HIGH_MAX_HEADROOM_FIELD = new ParseField("high_max_headroom");
private static final ParseField FLOOD_STAGE_WATERMARK_FIELD = new ParseField("flood_stage_watermark");
private static final ParseField FLOOD_STAGE_MAX_HEADROOM_FIELD = new ParseField("flood_stage_max_headroom");
private static final ParseField FROZEN_FLOOD_STAGE_WATERMARK_FIELD = new ParseField("frozen_flood_stage_watermark");
private static final ParseField FROZEN_FLOOD_STAGE_MAX_HEADROOM_FIELD = new ParseField("frozen_flood_stage_max_headroom");
static Disk readFrom(StreamInput in) throws IOException {
RelativeByteSizeValue highWatermark = RelativeByteSizeValue.parseRelativeByteSizeValue(
in.readString(),
HIGH_WATERMARK_FIELD.getPreferredName()
);
RelativeByteSizeValue floodStageWatermark = RelativeByteSizeValue.parseRelativeByteSizeValue(
in.readString(),
FLOOD_STAGE_WATERMARK_FIELD.getPreferredName()
);
RelativeByteSizeValue frozenFloodStageWatermark = RelativeByteSizeValue.parseRelativeByteSizeValue(
in.readString(),
FROZEN_FLOOD_STAGE_WATERMARK_FIELD.getPreferredName()
);
ByteSizeValue frozenFloodStageMaxHeadroom = ByteSizeValue.readFrom(in);
ByteSizeValue highMaxHeadroom = in.getTransportVersion().onOrAfter(VERSION_SUPPORTING_HEADROOM_FIELDS)
? ByteSizeValue.readFrom(in)
: ByteSizeValue.MINUS_ONE;
ByteSizeValue floodStageMaxHeadroom = in.getTransportVersion().onOrAfter(VERSION_SUPPORTING_HEADROOM_FIELDS)
? ByteSizeValue.readFrom(in)
: ByteSizeValue.MINUS_ONE;
return new Disk(
highWatermark,
highMaxHeadroom,
floodStageWatermark,
floodStageMaxHeadroom,
frozenFloodStageWatermark,
frozenFloodStageMaxHeadroom
);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(describeHighWatermark());
out.writeString(describeFloodStageWatermark());
out.writeString(describeFrozenFloodStageWatermark());
frozenFloodStageMaxHeadroom.writeTo(out);
if (out.getTransportVersion().onOrAfter(VERSION_SUPPORTING_HEADROOM_FIELDS)) {
highMaxHeadroom.writeTo(out);
floodStageMaxHeadroom.writeTo(out);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(HIGH_WATERMARK_FIELD.getPreferredName(), describeHighWatermark());
builder.field(HIGH_MAX_HEADROOM_FIELD.getPreferredName(), highMaxHeadroom);
builder.field(FLOOD_STAGE_WATERMARK_FIELD.getPreferredName(), describeFloodStageWatermark());
builder.field(FLOOD_STAGE_MAX_HEADROOM_FIELD.getPreferredName(), floodStageMaxHeadroom);
builder.field(FROZEN_FLOOD_STAGE_WATERMARK_FIELD.getPreferredName(), describeFrozenFloodStageWatermark());
builder.field(FROZEN_FLOOD_STAGE_MAX_HEADROOM_FIELD.getPreferredName(), frozenFloodStageMaxHeadroom);
return builder;
}
private static ByteSizeValue getFreeBytes(ByteSizeValue total, RelativeByteSizeValue watermark, ByteSizeValue maxHeadroom) {
if (watermark.isAbsolute()) {
return watermark.getAbsolute();
}
return ByteSizeValue.subtract(total, watermark.calculateValue(total, maxHeadroom));
}
public ByteSizeValue getFreeBytesHighWatermark(ByteSizeValue total) {
return getFreeBytes(total, highWatermark, highMaxHeadroom);
}
public ByteSizeValue getFreeBytesFloodStageWatermark(ByteSizeValue total) {
return getFreeBytes(total, floodStageWatermark, floodStageMaxHeadroom);
}
public ByteSizeValue getFreeBytesFrozenFloodStageWatermark(ByteSizeValue total) {
return getFreeBytes(total, frozenFloodStageWatermark, frozenFloodStageMaxHeadroom);
}
private static String getThresholdStringRep(RelativeByteSizeValue relativeByteSizeValue) {
if (relativeByteSizeValue.isAbsolute()) {
return relativeByteSizeValue.getAbsolute().getStringRep();
} else {
return relativeByteSizeValue.getRatio().formatNoTrailingZerosPercent();
}
}
public String describeHighWatermark() {
return getThresholdStringRep(highWatermark);
}
public String describeFloodStageWatermark() {
return getThresholdStringRep(floodStageWatermark);
}
public String describeFrozenFloodStageWatermark() {
return getThresholdStringRep(frozenFloodStageWatermark);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Disk disk = (Disk) o;
return Objects.equals(describeHighWatermark(), disk.describeHighWatermark())
&& Objects.equals(highMaxHeadroom, disk.highMaxHeadroom)
&& Objects.equals(describeFloodStageWatermark(), disk.describeFloodStageWatermark())
&& Objects.equals(floodStageMaxHeadroom, disk.floodStageMaxHeadroom)
&& Objects.equals(describeFrozenFloodStageWatermark(), disk.describeFrozenFloodStageWatermark())
&& Objects.equals(frozenFloodStageMaxHeadroom, disk.frozenFloodStageMaxHeadroom);
}
@Override
public int hashCode() {
return Objects.hash(
describeHighWatermark(),
highMaxHeadroom,
describeFloodStageWatermark(),
floodStageMaxHeadroom,
describeFrozenFloodStageWatermark(),
frozenFloodStageMaxHeadroom
);
}
public static Builder newBuilder() {
return new Builder();
}
public static Builder newBuilder(Disk disk) {
return new Builder(disk);
}
public static | Builder |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/StreamFlatMapTest.java | {
"start": 5082,
"end": 6023
} | class ____ extends RichFlatMapFunction<String, String> {
private static final long serialVersionUID = 1L;
public static boolean openCalled = false;
public static boolean closeCalled = false;
@Override
public void open(OpenContext openContext) throws Exception {
super.open(openContext);
assertThat(closeCalled).as("Close called before open.").isFalse();
openCalled = true;
}
@Override
public void close() throws Exception {
super.close();
assertThat(openCalled).as("Open was not called before close.").isTrue();
closeCalled = true;
}
@Override
public void flatMap(String value, Collector<String> out) throws Exception {
assertThat(openCalled).as("Open was not called before run.").isTrue();
out.collect(value);
}
}
}
| TestOpenCloseFlatMapFunction |
java | apache__flink | flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/runtime/operators/join/RandomSortMergeOuterJoinTest.java | {
"start": 2210,
"end": 9947
} | class ____ {
// random seeds for the left and right input data generators
private static final long SEED1 = 561349061987311L;
private static final long SEED2 = 231434613412342L;
@Test
void testFullOuterJoinWithHighNumberOfCommonKeys() throws Exception {
testOuterJoinWithHighNumberOfCommonKeys(
FlinkJoinType.FULL, 200, 500, 2048, 0.02f, 200, 500, 2048, 0.02f);
}
@Test
void testLeftOuterJoinWithHighNumberOfCommonKeys() throws Exception {
testOuterJoinWithHighNumberOfCommonKeys(
FlinkJoinType.LEFT, 200, 10, 4096, 0.02f, 100, 4000, 2048, 0.02f);
}
@Test
void testRightOuterJoinWithHighNumberOfCommonKeys() throws Exception {
testOuterJoinWithHighNumberOfCommonKeys(
FlinkJoinType.RIGHT, 100, 10, 2048, 0.02f, 200, 4000, 4096, 0.02f);
}
@SuppressWarnings("unchecked, rawtypes")
protected void testOuterJoinWithHighNumberOfCommonKeys(
FlinkJoinType outerJoinType,
int input1Size,
int input1Duplicates,
int input1ValueLength,
float input1KeyDensity,
int input2Size,
int input2Duplicates,
int input2ValueLength,
float input2KeyDensity)
throws Exception {
TypeComparator<Tuple2<Integer, String>> comparator1 =
new TupleComparator<>(
new int[] {0},
new TypeComparator<?>[] {new IntComparator(true)},
new TypeSerializer<?>[] {IntSerializer.INSTANCE});
TypeComparator<Tuple2<Integer, String>> comparator2 =
new TupleComparator<>(
new int[] {0},
new TypeComparator<?>[] {new IntComparator(true)},
new TypeSerializer<?>[] {IntSerializer.INSTANCE});
final int duplicateKey = 13;
final TupleGenerator generator1 =
new TupleGenerator(
SEED1,
500,
input1KeyDensity,
input1ValueLength,
KeyMode.SORTED_SPARSE,
ValueMode.RANDOM_LENGTH,
null);
final TupleGenerator generator2 =
new TupleGenerator(
SEED2,
500,
input2KeyDensity,
input2ValueLength,
KeyMode.SORTED_SPARSE,
ValueMode.RANDOM_LENGTH,
null);
final TupleGeneratorIterator gen1Iter = new TupleGeneratorIterator(generator1, input1Size);
final TupleGeneratorIterator gen2Iter = new TupleGeneratorIterator(generator2, input2Size);
final TupleConstantValueIterator const1Iter =
new TupleConstantValueIterator(
duplicateKey, "LEFT String for Duplicate Keys", input1Duplicates);
final TupleConstantValueIterator const2Iter =
new TupleConstantValueIterator(
duplicateKey, "RIGHT String for Duplicate Keys", input2Duplicates);
final List<MutableObjectIterator<Tuple2<Integer, String>>> inList1 = new ArrayList<>();
inList1.add(gen1Iter);
inList1.add(const1Iter);
final List<MutableObjectIterator<Tuple2<Integer, String>>> inList2 = new ArrayList<>();
inList2.add(gen2Iter);
inList2.add(const2Iter);
MutableObjectIterator<Tuple2<Integer, String>> input1 =
new MergeIterator<>(inList1, comparator1.duplicate());
MutableObjectIterator<Tuple2<Integer, String>> input2 =
new MergeIterator<>(inList2, comparator2.duplicate());
// collect expected data
final Map<Integer, Collection<Match>> expectedMatchesMap =
joinValues(
RandomSortMergeInnerJoinTest.collectData(input1),
RandomSortMergeInnerJoinTest.collectData(input2),
outerJoinType);
// re-create the whole thing for actual processing
// reset the generators and iterators
generator1.reset();
generator2.reset();
const1Iter.reset();
const2Iter.reset();
gen1Iter.reset();
gen2Iter.reset();
inList1.clear();
inList1.add(gen1Iter);
inList1.add(const1Iter);
inList2.clear();
inList2.add(gen2Iter);
inList2.add(const2Iter);
input1 = new MergeIterator<>(inList1, comparator1.duplicate());
input2 = new MergeIterator<>(inList2, comparator2.duplicate());
StreamOperator operator = getOperator(outerJoinType);
RandomSortMergeInnerJoinTest.match(
expectedMatchesMap,
RandomSortMergeInnerJoinTest.transformToBinary(myJoin(operator, input1, input2)));
// assert that each expected match was seen
assertThat(expectedMatchesMap).allSatisfy((i, e) -> assertThat(e).isEmpty());
}
public LinkedBlockingQueue<Object> myJoin(
StreamOperator operator,
MutableObjectIterator<Tuple2<Integer, String>> input1,
MutableObjectIterator<Tuple2<Integer, String>> input2)
throws Exception {
return RandomSortMergeInnerJoinTest.join(operator, input1, input2);
}
// --------------------------------------------------------------------------------------------
// Utilities
// --------------------------------------------------------------------------------------------
private Map<Integer, Collection<Match>> joinValues(
Map<Integer, Collection<String>> leftMap,
Map<Integer, Collection<String>> rightMap,
FlinkJoinType outerJoinType) {
Map<Integer, Collection<Match>> map = new HashMap<>();
for (Integer key : leftMap.keySet()) {
Collection<String> leftValues = leftMap.get(key);
Collection<String> rightValues = rightMap.get(key);
if (outerJoinType == FlinkJoinType.RIGHT && rightValues == null) {
continue;
}
if (!map.containsKey(key)) {
map.put(key, new ArrayList<>());
}
Collection<Match> joinedValues = map.get(key);
for (String leftValue : leftValues) {
if (rightValues != null) {
for (String rightValue : rightValues) {
joinedValues.add(new Match(leftValue, rightValue));
}
} else {
joinedValues.add(new Match(leftValue, null));
}
}
}
if (outerJoinType == FlinkJoinType.RIGHT || outerJoinType == FlinkJoinType.FULL) {
for (Integer key : rightMap.keySet()) {
Collection<String> leftValues = leftMap.get(key);
Collection<String> rightValues = rightMap.get(key);
if (leftValues != null) {
continue;
}
if (!map.containsKey(key)) {
map.put(key, new ArrayList<>());
}
Collection<Match> joinedValues = map.get(key);
for (String rightValue : rightValues) {
joinedValues.add(new Match(null, rightValue));
}
}
}
return map;
}
protected StreamOperator getOperator(FlinkJoinType outerJoinType) {
return Int2SortMergeJoinOperatorTest.newOperator(outerJoinType, false);
}
}
| RandomSortMergeOuterJoinTest |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/FluxSubscribeOnCallable.java | {
"start": 2527,
"end": 8159
} | class ____<T>
implements QueueSubscription<T>, InnerProducer<T>, Runnable {
final CoreSubscriber<? super T> actual;
final Callable<? extends @Nullable T> callable;
final Scheduler scheduler;
volatile int state;
@SuppressWarnings("rawtypes")
static final AtomicIntegerFieldUpdater<CallableSubscribeOnSubscription> STATE =
AtomicIntegerFieldUpdater.newUpdater(CallableSubscribeOnSubscription.class,
"state");
@Nullable T value;
static final int NO_REQUEST_HAS_VALUE = 1;
static final int HAS_REQUEST_NO_VALUE = 2;
static final int HAS_REQUEST_HAS_VALUE = 3;
static final int HAS_CANCELLED = 4;
int fusionState;
static final int NO_VALUE = 1;
static final int HAS_VALUE = 2;
static final int COMPLETE = 3;
volatile @Nullable Disposable mainFuture;
// https://github.com/uber/NullAway/issues/1157
@SuppressWarnings({"rawtypes", "DataFlowIssue"})
static final AtomicReferenceFieldUpdater<CallableSubscribeOnSubscription, @Nullable Disposable>
MAIN_FUTURE = AtomicReferenceFieldUpdater.newUpdater(
CallableSubscribeOnSubscription.class,
Disposable.class,
"mainFuture");
volatile @Nullable Disposable requestFuture;
// https://github.com/uber/NullAway/issues/1157
@SuppressWarnings({"rawtypes", "DataFlowIssue"})
static final AtomicReferenceFieldUpdater<CallableSubscribeOnSubscription, @Nullable Disposable>
REQUEST_FUTURE = AtomicReferenceFieldUpdater.newUpdater(
CallableSubscribeOnSubscription.class,
Disposable.class,
"requestFuture");
CallableSubscribeOnSubscription(CoreSubscriber<? super T> actual,
Callable<? extends @Nullable T> callable,
Scheduler scheduler) {
this.actual = actual;
this.callable = callable;
this.scheduler = scheduler;
}
@Override
public CoreSubscriber<? super T> actual() {
return actual;
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.CANCELLED) return state == HAS_CANCELLED;
if (key == Attr.BUFFERED) return value != null ? 1 : 0;
if (key == Attr.RUN_ON) return scheduler;
if (key == Attr.RUN_STYLE) return Attr.RunStyle.ASYNC;
return InnerProducer.super.scanUnsafe(key);
}
@Override
public void cancel() {
state = HAS_CANCELLED;
fusionState = COMPLETE;
Disposable a = mainFuture;
if (a != OperatorDisposables.DISPOSED) {
a = MAIN_FUTURE.getAndSet(this, OperatorDisposables.DISPOSED);
if (a != null && a != OperatorDisposables.DISPOSED) {
a.dispose();
}
}
a = requestFuture;
if (a != OperatorDisposables.DISPOSED) {
a = REQUEST_FUTURE.getAndSet(this, OperatorDisposables.DISPOSED);
if (a != null && a != OperatorDisposables.DISPOSED) {
a.dispose();
}
}
}
@Override
public void clear() {
value = null;
fusionState = COMPLETE;
}
@Override
public boolean isEmpty() {
return fusionState == COMPLETE;
}
@Override
public @Nullable T poll() {
if (fusionState == HAS_VALUE) {
fusionState = COMPLETE;
return value;
}
return null;
}
@Override
public int requestFusion(int requestedMode) {
if ((requestedMode & ASYNC) != 0 && (requestedMode & THREAD_BARRIER) == 0) {
fusionState = NO_VALUE;
return ASYNC;
}
return NONE;
}
@Override
public int size() {
return isEmpty() ? 0 : 1;
}
void setMainFuture(Disposable c) {
for (; ; ) {
Disposable a = mainFuture;
if (a == OperatorDisposables.DISPOSED) {
c.dispose();
return;
}
if (MAIN_FUTURE.compareAndSet(this, a, c)) {
return;
}
}
}
void setRequestFuture(Disposable c) {
for (; ; ) {
Disposable a = requestFuture;
if (a == OperatorDisposables.DISPOSED) {
c.dispose();
return;
}
if (REQUEST_FUTURE.compareAndSet(this, a, c)) {
return;
}
}
}
@Override
public void run() {
T v;
try {
v = callable.call();
}
catch (Throwable ex) {
actual.onError(Operators.onOperatorError(this, ex,
actual.currentContext()));
return;
}
if (v == null) {
fusionState = COMPLETE;
actual.onComplete();
return;
}
for (; ; ) {
int s = state;
if (s == HAS_CANCELLED || s == HAS_REQUEST_HAS_VALUE || s == NO_REQUEST_HAS_VALUE) {
return;
}
if (s == HAS_REQUEST_NO_VALUE) {
if (fusionState == NO_VALUE) {
this.value = v;
this.fusionState = HAS_VALUE;
}
actual.onNext(v);
if (state != HAS_CANCELLED) {
actual.onComplete();
}
return;
}
this.value = v;
if (STATE.compareAndSet(this, s, NO_REQUEST_HAS_VALUE)) {
return;
}
}
}
@Override
public void request(long n) {
if (Operators.validate(n)) {
for (; ; ) {
int s = state;
if (s == HAS_CANCELLED || s == HAS_REQUEST_NO_VALUE || s == HAS_REQUEST_HAS_VALUE) {
return;
}
if (s == NO_REQUEST_HAS_VALUE) {
if (STATE.compareAndSet(this, s, HAS_REQUEST_HAS_VALUE)) {
try {
Disposable f = scheduler.schedule(this::emitValue);
setRequestFuture(f);
}
catch (RejectedExecutionException ree) {
actual.onError(Operators.onRejectedExecution(ree,
actual.currentContext()));
}
}
return;
}
if (STATE.compareAndSet(this, s, HAS_REQUEST_NO_VALUE)) {
return;
}
}
}
}
void emitValue() {
if (fusionState == NO_VALUE) {
this.fusionState = HAS_VALUE;
}
T v = value;
clear();
if (v != null) {
actual.onNext(v);
}
if (state != HAS_CANCELLED) {
actual.onComplete();
}
}
}
}
| CallableSubscribeOnSubscription |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/superclass/auditoverride/EmbeddableTest.java | {
"start": 8668,
"end": 8920
} | class ____ extends SimpleAbstractMappedSuperclass {
}
// an embedddable that introduces all audited values base don audit overrides locally.
@Embeddable
@AuditOverride(forClass = SimpleAbstractMappedSuperclass.class)
public static | OverrideEmbeddable |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/util/collections/binary/BytesMultiMap.java | {
"start": 1367,
"end": 1873
} | class ____ extends AbstractBytesMultiMap<BinaryRowData> {
public BytesMultiMap(
Object owner,
MemoryManager memoryManager,
long memorySize,
LogicalType[] keyTypes,
LogicalType[] valueTypes) {
super(
owner,
memoryManager,
memorySize,
new BinaryRowDataSerializer(keyTypes.length),
valueTypes);
checkArgument(keyTypes.length > 0);
}
}
| BytesMultiMap |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/DatasetTestComponentBuilderFactory.java | {
"start": 5469,
"end": 6529
} | class ____
extends AbstractComponentBuilder<DataSetTestComponent>
implements DatasetTestComponentBuilder {
@Override
protected DataSetTestComponent buildConcreteComponent() {
return new DataSetTestComponent();
}
@Override
protected boolean setPropertyOnComponent(
Component component,
String name,
Object value) {
switch (name) {
case "lazyStartProducer": ((DataSetTestComponent) component).setLazyStartProducer((boolean) value); return true;
case "log": ((DataSetTestComponent) component).setLog((boolean) value); return true;
case "autowiredEnabled": ((DataSetTestComponent) component).setAutowiredEnabled((boolean) value); return true;
case "exchangeFormatter": ((DataSetTestComponent) component).setExchangeFormatter((org.apache.camel.spi.ExchangeFormatter) value); return true;
default: return false;
}
}
}
} | DatasetTestComponentBuilderImpl |
java | netty__netty | handler/src/main/java/io/netty/handler/logging/LoggingHandler.java | {
"start": 3645,
"end": 3945
} | class ____ to generate the logger for
* @param level the log level
*/
public LoggingHandler(Class<?> clazz, LogLevel level) {
this(clazz, level, ByteBufFormat.HEX_DUMP);
}
/**
* Creates a new instance with the specified logger name.
*
* @param clazz the | type |
java | google__dagger | javatests/dagger/internal/codegen/ComponentProcessorTest.java | {
"start": 35774,
"end": 36183
} | class ____ {",
" @Inject InjectedType(",
" String stringInjection,",
" int intInjection,",
" AComponent aComponent,",
" Class<AComponent> aClass) {}",
"}");
Source aComponentFile =
CompilerTests.javaSource(
"test.AComponent",
"package test;",
"",
" | InjectedType |
java | apache__flink | flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/ClientTest.java | {
"start": 33731,
"end": 34480
} | class ____ extends ChannelInboundHandlerAdapter {
private final AtomicReference<Channel> channel;
private final LinkedBlockingQueue<ByteBuf> received;
private ChannelDataCollectingHandler(
AtomicReference<Channel> channel, LinkedBlockingQueue<ByteBuf> received) {
this.channel = channel;
this.received = received;
}
@Override
public void channelActive(ChannelHandlerContext ctx) {
channel.set(ctx.channel());
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
received.add((ByteBuf) msg);
}
}
@ChannelHandler.Sharable
private static final | ChannelDataCollectingHandler |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.