language
stringclasses
1 value
repo
stringclasses
60 values
path
stringlengths
22
294
class_span
dict
source
stringlengths
13
1.16M
target
stringlengths
1
113
java
elastic__elasticsearch
x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/realm/TransportClearRealmCacheActionTests.java
{ "start": 8104, "end": 8259 }
class ____ extends Realm implements CachingRealm { TestCachingRealm(RealmConfig config) { super(config); } } }
TestCachingRealm
java
apache__hadoop
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
{ "start": 4298, "end": 5303 }
interface ____ us to convert to unchecked throw new RuntimeException(RELOAD_ERROR_MESSAGE, ex); } return this; } X509TrustManager loadTrustManager(Path path) throws IOException, GeneralSecurityException { X509TrustManager trustManager = null; KeyStore ks = KeyStore.getInstance(type); InputStream in = Files.newInputStream(path); try { ks.load(in, (password == null) ? null : password.toCharArray()); LOG.debug("Loaded truststore '" + path + "'"); } finally { in.close(); } TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance( SSLFactory.TRUST_MANAGER_SSLCERTIFICATE); trustManagerFactory.init(ks); TrustManager[] trustManagers = trustManagerFactory.getTrustManagers(); for (TrustManager trustManager1 : trustManagers) { if (trustManager1 instanceof X509TrustManager) { trustManager = (X509TrustManager) trustManager1; break; } } return trustManager; } }
forces
java
google__error-prone
core/src/test/java/com/google/errorprone/bugpatterns/InjectOnBugCheckersTest.java
{ "start": 831, "end": 1412 }
class ____ { private final CompilationTestHelper compilationTestHelper = CompilationTestHelper.newInstance(InjectOnBugCheckers.class, getClass()); @Test public void positive() { compilationTestHelper .addSourceLines( "Test.java", """ import com.google.errorprone.BugPattern; import com.google.errorprone.ErrorProneFlags; import com.google.errorprone.bugpatterns.BugChecker; @BugPattern(summary = "", severity = BugPattern.SeverityLevel.WARNING) public
InjectOnBugCheckersTest
java
apache__flink
flink-datastream/src/test/java/org/apache/flink/datastream/impl/operators/KeyedTwoOutputProcessOperatorTest.java
{ "start": 1867, "end": 9443 }
class ____ { @Test void testProcessRecord() throws Exception { OutputTag<Long> sideOutputTag = new OutputTag<Long>("side-output") {}; KeyedTwoOutputProcessOperator<Integer, Integer, Integer, Long> processOperator = new KeyedTwoOutputProcessOperator<>( new TwoOutputStreamProcessFunction<Integer, Integer, Long>() { @Override public void processRecord( Integer record, Collector<Integer> output1, Collector<Long> output2, TwoOutputPartitionedContext<Integer, Long> ctx) { output1.collect(record); output2.collect((long) (record * 2)); } }, sideOutputTag); try (AsyncKeyedOneInputStreamOperatorTestHarness<Integer, Integer, Integer> testHarness = AsyncKeyedOneInputStreamOperatorTestHarness.create( processOperator, (KeySelector<Integer, Integer>) value -> value, Types.INT)) { testHarness.open(); testHarness.processElement(new StreamRecord<>(1)); testHarness.processElement(new StreamRecord<>(2)); testHarness.processElement(new StreamRecord<>(3)); Collection<StreamRecord<Integer>> firstOutput = testHarness.getRecordOutput(); ConcurrentLinkedQueue<StreamRecord<Long>> secondOutput = testHarness.getSideOutput(sideOutputTag); assertThat(firstOutput) .containsExactly( new StreamRecord<>(1), new StreamRecord<>(2), new StreamRecord<>(3)); assertThat(secondOutput) .containsExactly( new StreamRecord<>(2L), new StreamRecord<>(4L), new StreamRecord<>(6L)); } } @Test void testEndInput() throws Exception { AtomicInteger counter = new AtomicInteger(); OutputTag<Long> sideOutputTag = new OutputTag<Long>("side-output") {}; KeyedTwoOutputProcessOperator<Integer, Integer, Integer, Long> processOperator = new KeyedTwoOutputProcessOperator<>( new TwoOutputStreamProcessFunction<Integer, Integer, Long>() { @Override public void processRecord( Integer record, Collector<Integer> output1, Collector<Long> output2, TwoOutputPartitionedContext<Integer, Long> ctx) { // do nothing. } @Override public void endInput( TwoOutputNonPartitionedContext<Integer, Long> ctx) { try { ctx.applyToAllPartitions( (firstOutput, secondOutput, context) -> { counter.incrementAndGet(); Integer currentKey = context.getStateManager().getCurrentKey(); firstOutput.collect(currentKey); secondOutput.collect(Long.valueOf(currentKey)); }); } catch (Exception e) { throw new RuntimeException(e); } } }, sideOutputTag); try (AsyncKeyedOneInputStreamOperatorTestHarness<Integer, Integer, Integer> testHarness = AsyncKeyedOneInputStreamOperatorTestHarness.create( processOperator, (KeySelector<Integer, Integer>) value -> value, Types.INT)) { testHarness.open(); testHarness.processElement(new StreamRecord<>(1)); // key is 1 testHarness.processElement(new StreamRecord<>(2)); // key is 2 testHarness.endInput(); assertThat(counter).hasValue(2); Collection<StreamRecord<Integer>> firstOutput = testHarness.getRecordOutput(); ConcurrentLinkedQueue<StreamRecord<Long>> secondOutput = testHarness.getSideOutput(sideOutputTag); assertThat(firstOutput).containsExactly(new StreamRecord<>(1), new StreamRecord<>(2)); assertThat(secondOutput) .containsExactly(new StreamRecord<>(1L), new StreamRecord<>(2L)); } } @Test void testKeyCheck() throws Exception { OutputTag<Long> sideOutputTag = new OutputTag<Long>("side-output") {}; AtomicBoolean emitToFirstOutput = new AtomicBoolean(true); KeyedTwoOutputProcessOperator<Integer, Integer, Integer, Long> processOperator = new KeyedTwoOutputProcessOperator<>( new TwoOutputStreamProcessFunction<Integer, Integer, Long>() { @Override public void processRecord( Integer record, Collector<Integer> output1, Collector<Long> output2, TwoOutputPartitionedContext<Integer, Long> ctx) { if (emitToFirstOutput.get()) { output1.collect(record); } else { output2.collect((long) (record)); } } }, sideOutputTag, // -1 is an invalid key in this suite. (KeySelector<Integer, Integer>) value -> -1, // -1 is an invalid key in this suite. (KeySelector<Long, Integer>) value -> -1); try (AsyncKeyedOneInputStreamOperatorTestHarness<Integer, Integer, Integer> testHarness = AsyncKeyedOneInputStreamOperatorTestHarness.create( processOperator, (KeySelector<Integer, Integer>) value -> value, Types.INT)) { testHarness.open(); assertThatThrownBy(() -> testHarness.processElement(new StreamRecord<>(1))) .isInstanceOf(IllegalStateException.class); } try (AsyncKeyedOneInputStreamOperatorTestHarness<Integer, Integer, Integer> testHarness = AsyncKeyedOneInputStreamOperatorTestHarness.create( processOperator, (KeySelector<Integer, Integer>) value -> value, Types.INT)) { testHarness.open(); emitToFirstOutput.set(false); assertThatThrownBy(() -> testHarness.processElement(new StreamRecord<>(1))) .isInstanceOf(IllegalStateException.class); } } }
KeyedTwoOutputProcessOperatorTest
java
reactor__reactor-core
reactor-core/src/test/java/reactor/util/concurrent/SpscArrayQueueTest.java
{ "start": 1052, "end": 4346 }
class ____ { @Test void spscArrayQueuesAPI() { assertThat(Queues.xs().get()).isInstanceOf(SpscArrayQueue.class); } @Test void shouldRejectNullableValues() { SpscArrayQueue<Object> q = new SpscArrayQueue<>(32); assertThatExceptionOfType(NullPointerException.class).isThrownBy(() -> { q.offer(null); }); } @Test void shouldNotAllowIteratingWithIterator() { SpscArrayQueue<Object> q = new SpscArrayQueue<>(32); assertThatExceptionOfType(UnsupportedOperationException.class).isThrownBy(() -> { q.iterator(); }); } @Test void shouldNotAllowElementsRemoving() { SpscArrayQueue<Object> q = new SpscArrayQueue<>(32); q.offer(1); assertThatExceptionOfType(UnsupportedOperationException.class).isThrownBy(() -> { q.remove(1); }); } @Test void shouldNotAllowAllElementsRemoving() { SpscArrayQueue<Object> q = new SpscArrayQueue<>(32); q.offer(1); q.offer(2); assertThatExceptionOfType(UnsupportedOperationException.class).isThrownBy(() -> { q.removeAll(Arrays.asList(1, 2)); }); } @Test void shouldNotAllowAllElementsRetaining() { SpscArrayQueue<Object> q = new SpscArrayQueue<>(32); q.offer(1); q.offer(2); assertThatExceptionOfType(UnsupportedOperationException.class).isThrownBy(() -> { q.retainAll(Arrays.asList(1, 2)); }); } @Test void shouldNotAllowAdd() { SpscArrayQueue<Object> q = new SpscArrayQueue<>(32); assertThatExceptionOfType(UnsupportedOperationException.class).isThrownBy(() -> { q.add(1); }); } @Test void shouldNotAllowAddAll() { SpscArrayQueue<Object> q = new SpscArrayQueue<>(32); assertThatExceptionOfType(UnsupportedOperationException.class).isThrownBy(() -> { q.addAll(Arrays.asList(1, 2, 3)); }); } @Test void shouldClearQueue() { SpscArrayQueue<Object> q = new SpscArrayQueue<>(32); q.offer(1); q.offer(2); assertThat(q.isEmpty()).as("isEmpty() false").isFalse(); assertThat(q.size()).isEqualTo(2); q.clear(); assertThat(q.isEmpty()).as("isEmpty() true").isTrue(); assertThat(q.size()).isEqualTo(0); } @Test void shouldNotRemoveElementOnPeek() { SpscArrayQueue<Object> q = new SpscArrayQueue<>(32); q.offer(1); q.offer(2); for (int i = 0; i < 100; i++) { assertThat(q.peek()).isEqualTo(1); assertThat(q.size()).isEqualTo(2); } } @Test @Tag("slow") void objectPadding() { ClassLayout layout = ClassLayout.parseClass(SpscArrayQueue.class); AtomicLong currentPaddingSize = new AtomicLong(); List<String> interestingFields = new ArrayList<>(); List<Long> paddingSizes = new ArrayList<>(); layout.fields().forEach(field -> { if (field.name().startsWith("pad")) { currentPaddingSize.addAndGet(field.size()); } else { if (currentPaddingSize.get() > 0) { interestingFields.add("[padding]"); paddingSizes.add(currentPaddingSize.getAndSet(0)); } interestingFields.add(field.name()); } }); if (currentPaddingSize.get() > 0) { interestingFields.add("[padding]"); paddingSizes.add(currentPaddingSize.getAndSet(0)); } assertThat(interestingFields).containsExactly( "array", "mask", "[padding]", "producerIndex", "[padding]", "consumerIndex", "[padding]" ); assertThat(paddingSizes).containsExactly(132L, 128L, 128L); } }
SpscArrayQueueTest
java
spring-projects__spring-framework
spring-test/src/test/java/org/springframework/test/context/support/ActiveProfilesUtilsTests.java
{ "start": 7093, "end": 7246 }
class ____ extends DuplicatedProfiles { } @ActiveProfiles(profiles = { "dog", "cat" }, inheritProfiles = false) private static
ExtendedDuplicatedProfiles
java
processing__processing4
java/src/processing/mode/java/debug/FieldNode.java
{ "start": 1227, "end": 1909 }
class ____ extends VariableNode { protected Field field; protected ObjectReference obj; /** * Construct a {@link FieldNode}. * @param obj a reference to the object containing the field */ public FieldNode(String name, String type, Value value, Field field, ObjectReference obj) { super(name, type, value); this.field = field; this.obj = obj; } @Override public void setValue(Value value) { try { obj.setValue(field, value); } catch (InvalidTypeException ite) { Messages.err(null, ite); } catch (ClassNotLoadedException cnle) { Messages.err(null, cnle); } this.value = value; } }
FieldNode
java
elastic__elasticsearch
x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/BasicEnrichTests.java
{ "start": 2209, "end": 20495 }
class ____ extends ESSingleNodeTestCase { static final String SOURCE_INDEX_NAME = "users"; static final String MATCH_FIELD = "email"; static final String[] DECORATE_FIELDS = new String[] { "address", "city", "country" }; @Override protected Collection<Class<? extends Plugin>> getPlugins() { return Arrays.asList( LocalStateEnrich.class, ReindexPlugin.class, IngestCommonPlugin.class, MustachePlugin.class, SpatialPlugin.class ); } @Override protected boolean resetNodeAfterTest() { return true; } @Override protected Settings nodeSettings() { return Settings.builder() // TODO Fix the test so that it runs with security enabled // https://github.com/elastic/elasticsearch/issues/75940 .put(XPackSettings.SECURITY_ENABLED.getKey(), false) .build(); } public void testIngestDataWithMatchProcessor() { int numDocs = 32; int maxMatches = randomIntBetween(2, 8); List<String> keys = createSourceMatchIndex(numDocs, maxMatches); String policyName = "my-policy"; EnrichPolicy enrichPolicy = new EnrichPolicy( EnrichPolicy.MATCH_TYPE, null, List.of(SOURCE_INDEX_NAME), MATCH_FIELD, List.of(DECORATE_FIELDS) ); PutEnrichPolicyAction.Request request = new PutEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, policyName, enrichPolicy); client().execute(PutEnrichPolicyAction.INSTANCE, request).actionGet(); client().execute(ExecuteEnrichPolicyAction.INSTANCE, new ExecuteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, policyName)) .actionGet(); String pipelineName = "my-pipeline"; putJsonPipeline(pipelineName, Strings.format(""" { "processors": [ { "enrich": { "policy_name": "%s", "field": "%s", "target_field": "users", "max_matches": %s } } ] }""", policyName, MATCH_FIELD, maxMatches)); BulkRequest bulkRequest = new BulkRequest("my-index"); for (int i = 0; i < numDocs; i++) { IndexRequest indexRequest = new IndexRequest(); indexRequest.id(Integer.toString(i)); indexRequest.setPipeline(pipelineName); indexRequest.source(Map.of(MATCH_FIELD, keys.get(i))); bulkRequest.add(indexRequest); } BulkResponse bulkResponse = client().bulk(bulkRequest).actionGet(); assertThat("Expected no failure, but " + bulkResponse.buildFailureMessage(), bulkResponse.hasFailures(), is(false)); int expectedId = 0; for (BulkItemResponse itemResponse : bulkResponse) { assertThat(itemResponse.getId(), equalTo(Integer.toString(expectedId++))); } for (int doc = 0; doc < numDocs; doc++) { GetResponse getResponse = client().get(new GetRequest("my-index", Integer.toString(doc))).actionGet(); Map<String, Object> source = getResponse.getSourceAsMap(); List<?> userEntries = (List<?>) source.get("users"); assertThat(userEntries, notNullValue()); assertThat(userEntries.size(), equalTo(maxMatches)); for (int i = 0; i < maxMatches; i++) { Map<?, ?> userEntry = (Map<?, ?>) userEntries.get(i); assertThat(userEntry.size(), equalTo(DECORATE_FIELDS.length + 1)); for (int j = 0; j < 3; j++) { String field = DECORATE_FIELDS[j]; assertThat(userEntry.get(field), equalTo(keys.get(doc) + j)); } assertThat(keys.contains(userEntry.get(MATCH_FIELD)), is(true)); } } EnrichStatsAction.Response statsResponse = client().execute( EnrichStatsAction.INSTANCE, new EnrichStatsAction.Request(TEST_REQUEST_TIMEOUT) ).actionGet(); assertThat(statsResponse.getCoordinatorStats().size(), equalTo(1)); String localNodeId = getInstanceFromNode(ClusterService.class).localNode().getId(); assertThat(statsResponse.getCoordinatorStats().get(0).nodeId(), equalTo(localNodeId)); assertThat(statsResponse.getCoordinatorStats().get(0).remoteRequestsTotal(), greaterThanOrEqualTo(1L)); assertThat(statsResponse.getCoordinatorStats().get(0).executedSearchesTotal(), equalTo((long) numDocs)); } public void testIngestDataWithGeoMatchProcessor() { String matchField = "location"; String enrichField = "zipcode"; // create enrich index { IndexRequest indexRequest = new IndexRequest(SOURCE_INDEX_NAME); indexRequest.source( Map.of( matchField, "POLYGON((" + "-122.08592534065245 37.38501746624134," + "-122.08193421363829 37.38501746624134," + "-122.08193421363829 37.3879329075567," + "-122.08592534065245 37.3879329075567," + "-122.08592534065245 37.38501746624134))", "zipcode", "94040" ) ); client().index(indexRequest).actionGet(); client().admin().indices().refresh(new RefreshRequest(SOURCE_INDEX_NAME)).actionGet(); } String policyName = "my-policy"; EnrichPolicy enrichPolicy = new EnrichPolicy( EnrichPolicy.GEO_MATCH_TYPE, null, List.of(SOURCE_INDEX_NAME), matchField, List.of(enrichField) ); PutEnrichPolicyAction.Request request = new PutEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, policyName, enrichPolicy); client().execute(PutEnrichPolicyAction.INSTANCE, request).actionGet(); client().execute(ExecuteEnrichPolicyAction.INSTANCE, new ExecuteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, policyName)) .actionGet(); String pipelineName = "my-pipeline"; putJsonPipeline(pipelineName, Strings.format(""" { "processors": [ { "enrich": { "policy_name": "%s", "field": "%s", "target_field": "enriched", "max_matches": 1 } } ] }""", policyName, matchField)); BulkRequest bulkRequest = new BulkRequest("my-index"); IndexRequest indexRequest = new IndexRequest(); indexRequest.id("_id"); indexRequest.setPipeline(pipelineName); indexRequest.source(Map.of(matchField, "37.386444, -122.083863")); // point within match boundary bulkRequest.add(indexRequest); BulkResponse bulkResponse = client().bulk(bulkRequest).actionGet(); assertThat("Expected no failure, but " + bulkResponse.buildFailureMessage(), bulkResponse.hasFailures(), is(false)); assertThat(bulkResponse.getItems().length, equalTo(1)); assertThat(bulkResponse.getItems()[0].getId(), equalTo("_id")); GetResponse getResponse = client().get(new GetRequest("my-index", "_id")).actionGet(); Map<String, Object> source = getResponse.getSourceAsMap(); Map<?, ?> entries = (Map) source.get("enriched"); assertThat(entries, notNullValue()); assertThat(entries.size(), equalTo(2)); assertThat(entries.containsKey(matchField), is(true)); assertThat(entries.get(enrichField), equalTo("94040")); EnrichStatsAction.Response statsResponse = client().execute( EnrichStatsAction.INSTANCE, new EnrichStatsAction.Request(TEST_REQUEST_TIMEOUT) ).actionGet(); assertThat(statsResponse.getCoordinatorStats().size(), equalTo(1)); String localNodeId = getInstanceFromNode(ClusterService.class).localNode().getId(); assertThat(statsResponse.getCoordinatorStats().get(0).nodeId(), equalTo(localNodeId)); assertThat(statsResponse.getCoordinatorStats().get(0).remoteRequestsTotal(), greaterThanOrEqualTo(1L)); assertThat(statsResponse.getCoordinatorStats().get(0).executedSearchesTotal(), equalTo(1L)); } public void testMultiplePolicies() { int numPolicies = 8; for (int i = 0; i < numPolicies; i++) { String policyName = "policy" + i; IndexRequest indexRequest = new IndexRequest("source-" + i); indexRequest.source("key", "key", "value", "val" + i); client().index(indexRequest).actionGet(); client().admin().indices().refresh(new RefreshRequest("source-" + i)).actionGet(); EnrichPolicy enrichPolicy = new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of("source-" + i), "key", List.of("value")); PutEnrichPolicyAction.Request request = new PutEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, policyName, enrichPolicy); client().execute(PutEnrichPolicyAction.INSTANCE, request).actionGet(); client().execute(ExecuteEnrichPolicyAction.INSTANCE, new ExecuteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, policyName)) .actionGet(); String pipelineName = "pipeline" + i; putJsonPipeline(pipelineName, Strings.format(""" { "processors": [ { "enrich": { "policy_name": "%s", "field": "key", "target_field": "target" } } ] }""", policyName)); } BulkRequest bulkRequest = new BulkRequest("my-index"); for (int i = 0; i < numPolicies; i++) { IndexRequest indexRequest = new IndexRequest(); indexRequest.id(Integer.toString(i)); indexRequest.setPipeline("pipeline" + i); indexRequest.source(Map.of("key", "key")); bulkRequest.add(indexRequest); } BulkResponse bulkResponse = client().bulk(bulkRequest).actionGet(); assertThat("Expected no failure, but " + bulkResponse.buildFailureMessage(), bulkResponse.hasFailures(), is(false)); for (int i = 0; i < numPolicies; i++) { GetResponse getResponse = client().get(new GetRequest("my-index", Integer.toString(i))).actionGet(); Map<String, Object> source = getResponse.getSourceAsMap(); assertThat(source.size(), equalTo(2)); assertThat(source.get("target"), equalTo(Map.of("key", "key", "value", "val" + i))); } } public void testAsyncTaskExecute() throws Exception { String policyName = "async-policy"; String sourceIndexName = "async-policy-source"; { IndexRequest indexRequest = new IndexRequest(sourceIndexName); indexRequest.source("key", "key", "value", "val1"); client().index(indexRequest).actionGet(); client().admin().indices().refresh(new RefreshRequest(sourceIndexName)).actionGet(); } EnrichPolicy enrichPolicy = new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of(sourceIndexName), "key", List.of("value")); PutEnrichPolicyAction.Request request = new PutEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, policyName, enrichPolicy); client().execute(PutEnrichPolicyAction.INSTANCE, request).actionGet(); ExecuteEnrichPolicyAction.Response executeResponse = client().execute( ExecuteEnrichPolicyAction.INSTANCE, new ExecuteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, policyName).setWaitForCompletion(false) ).actionGet(); assertThat(executeResponse.getStatus(), is(nullValue())); assertThat(executeResponse.getTaskId(), is(not(nullValue()))); GetTaskRequest getPolicyTaskRequest = new GetTaskRequest().setTaskId(executeResponse.getTaskId()).setWaitForCompletion(true); assertBusy(() -> { GetTaskResponse taskResponse = client().execute(TransportGetTaskAction.TYPE, getPolicyTaskRequest).actionGet(); assertThat( ((ExecuteEnrichPolicyStatus) taskResponse.getTask().getTask().status()).getPhase(), is(ExecuteEnrichPolicyStatus.PolicyPhases.COMPLETE) ); }); String pipelineName = "test-pipeline"; putJsonPipeline(pipelineName, Strings.format(""" { "processors": [ { "enrich": { "policy_name": "%s", "field": "key", "target_field": "target" } } ] }""", policyName)); BulkRequest bulkRequest = new BulkRequest("my-index"); int numTestDocs = randomIntBetween(3, 10); for (int i = 0; i < numTestDocs; i++) { IndexRequest indexRequest = new IndexRequest("my-index"); indexRequest.id(Integer.toString(i)); indexRequest.setPipeline(pipelineName); indexRequest.source(Map.of("key", "key")); bulkRequest.add(indexRequest); } BulkResponse bulkResponse = client().bulk(bulkRequest).actionGet(); assertThat("Expected no failure, but " + bulkResponse.buildFailureMessage(), bulkResponse.hasFailures(), is(false)); for (int i = 0; i < numTestDocs; i++) { GetResponse getResponse = client().get(new GetRequest("my-index", Integer.toString(i))).actionGet(); Map<String, Object> source = getResponse.getSourceAsMap(); assertThat(source.size(), equalTo(2)); assertThat(source.get("target"), equalTo(Map.of("key", "key", "value", "val1"))); } } public void testTemplating() throws Exception { List<String> keys = createSourceMatchIndex(1, 1); String policyName = "my-policy"; EnrichPolicy enrichPolicy = new EnrichPolicy( EnrichPolicy.MATCH_TYPE, null, List.of(SOURCE_INDEX_NAME), MATCH_FIELD, List.of(DECORATE_FIELDS) ); PutEnrichPolicyAction.Request request = new PutEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, policyName, enrichPolicy); client().execute(PutEnrichPolicyAction.INSTANCE, request).actionGet(); client().execute(ExecuteEnrichPolicyAction.INSTANCE, new ExecuteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, policyName)) .actionGet(); String pipelineName = "my-pipeline"; putJsonPipeline( pipelineName, Strings.format( """ {"processors": [{"enrich": {"policy_name":"%s", "field": "{{indirection1}}", "target_field": "{{indirection2}}"}}]}""", policyName ) ); IndexRequest indexRequest = new IndexRequest("my-index").id("1") .setPipeline(pipelineName) .source(Map.of("indirection1", MATCH_FIELD, "indirection2", "users", MATCH_FIELD, keys.get(0))); client().index(indexRequest).get(); GetResponse getResponse = client().get(new GetRequest("my-index", "1")).actionGet(); Map<String, Object> source = getResponse.getSourceAsMap(); Map<?, ?> userEntry = (Map<?, ?>) source.get("users"); assertThat(userEntry.size(), equalTo(DECORATE_FIELDS.length + 1)); for (int j = 0; j < 3; j++) { String field = DECORATE_FIELDS[j]; assertThat(userEntry.get(field), equalTo(keys.get(0) + j)); } assertThat(keys.contains(userEntry.get(MATCH_FIELD)), is(true)); } public void testFailureAfterEnrich() throws Exception { List<String> keys = createSourceMatchIndex(1, 1); String policyName = "my-policy"; EnrichPolicy enrichPolicy = new EnrichPolicy( EnrichPolicy.MATCH_TYPE, null, List.of(SOURCE_INDEX_NAME), MATCH_FIELD, Arrays.asList(DECORATE_FIELDS) ); PutEnrichPolicyAction.Request request = new PutEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, policyName, enrichPolicy); client().execute(PutEnrichPolicyAction.INSTANCE, request).actionGet(); client().execute(ExecuteEnrichPolicyAction.INSTANCE, new ExecuteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, policyName)) .actionGet(); // A pipeline with a foreach that uses a non existing field that is specified after enrich has run: String pipelineName = "my-pipeline"; String pipelineBody = "{\"processors\": [{\"enrich\": {\"policy_name\":\"" + policyName + "\", \"field\": \"email\", \"target_field\": \"users\"}}," + "{ \"foreach\": {\"field\":\"users\", \"processor\":{\"append\":{\"field\":\"matched2\",\"value\":\"{{_ingest._value}}\"}}}}" + "]}"; putJsonPipeline(pipelineName, pipelineBody); for (int i = 0; i < 5; i++) { IndexRequest indexRequest = new IndexRequest("my-index").id("1") .setPipeline(pipelineName) .source(Map.of(MATCH_FIELD, "non_existing")); Exception e = expectThrows(IllegalArgumentException.class, client().index(indexRequest)); assertThat(e.getMessage(), equalTo("field [users] not present as part of path [users]")); } } private List<String> createSourceMatchIndex(int numKeys, int numDocsPerKey) { Set<String> keys = new HashSet<>(); for (int id = 0; id < numKeys; id++) { String key; do { key = randomAlphaOfLength(16); } while (keys.add(key) == false); for (int doc = 0; doc < numDocsPerKey; doc++) { IndexRequest indexRequest = new IndexRequest(SOURCE_INDEX_NAME); indexRequest.source( Map.of(MATCH_FIELD, key, DECORATE_FIELDS[0], key + "0", DECORATE_FIELDS[1], key + "1", DECORATE_FIELDS[2], key + "2") ); client().index(indexRequest).actionGet(); } } client().admin().indices().refresh(new RefreshRequest(SOURCE_INDEX_NAME)).actionGet(); return List.copyOf(keys); } }
BasicEnrichTests
java
FasterXML__jackson-databind
src/test/java/tools/jackson/databind/jsontype/TestSubtypes.java
{ "start": 2555, "end": 2862 }
class ____ { public Base1125 value; public Issue1125Wrapper() { } public Issue1125Wrapper(Base1125 v) { value = v; } } @JsonTypeInfo(use=JsonTypeInfo.Id.NAME, defaultImpl=Default1125.class) @JsonSubTypes({ @JsonSubTypes.Type(Interm1125.class) }) static
Issue1125Wrapper
java
hibernate__hibernate-orm
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/compliance/tck2_2/caching/CachingWithSecondaryTablesTests.java
{ "start": 1382, "end": 5253 }
class ____ { private SessionFactoryImplementor sessionFactory; @Test public void testUnstrictUnversioned() { sessionFactory = buildSessionFactory( Person.class, false ); final StatisticsImplementor statistics = sessionFactory.getStatistics(); inTransaction( sessionFactory, s -> s.persist( new Person( "1", "John Doe", true ) ) ); // it should not be in the cache because it should be invalidated instead assertEquals( 0, statistics.getSecondLevelCachePutCount() ); assertFalse( sessionFactory.getCache().contains( Person.class, "1" ) ); inTransaction( sessionFactory, s -> { statistics.clear(); final Person person = s.find( Person.class, "1" ); assertTrue( Hibernate.isInitialized( person ) ); assertThat( statistics.getSecondLevelCacheHitCount(), CoreMatchers.is( 0L) ); statistics.clear(); } ); } @Test public void testStrictUnversioned() { sessionFactory = buildSessionFactory( Person.class, true ); final StatisticsImplementor statistics = sessionFactory.getStatistics(); inTransaction( sessionFactory, s -> s.persist( new Person( "1", "John Doe", true ) ) ); // this time it should be iun the cache because we enabled JPA compliance assertEquals( 1, statistics.getSecondLevelCachePutCount() ); assertTrue( sessionFactory.getCache().contains( Person.class, "1" ) ); inTransaction( sessionFactory, s -> { statistics.clear(); final Person person = s.find( Person.class, "1" ); assertTrue( Hibernate.isInitialized( person ) ); assertThat( statistics.getSecondLevelCacheHitCount(), CoreMatchers.is( 1L) ); statistics.clear(); } ); } @Test public void testVersioned() { sessionFactory = buildSessionFactory( VersionedPerson.class, false ); final StatisticsImplementor statistics = sessionFactory.getStatistics(); inTransaction( sessionFactory, s -> s.persist( new VersionedPerson( "1", "John Doe", true ) ) ); // versioned data should be cacheable regardless assertEquals( 1, statistics.getSecondLevelCachePutCount() ); assertTrue( sessionFactory.getCache().contains( VersionedPerson.class, "1" ) ); inTransaction( sessionFactory, s -> { statistics.clear(); final VersionedPerson person = s.find( VersionedPerson.class, "1" ); assertTrue( Hibernate.isInitialized( person ) ); assertThat( statistics.getSecondLevelCacheHitCount(), CoreMatchers.is( 1L ) ); statistics.clear(); } ); } private SessionFactoryImplementor buildSessionFactory(Class<?> entityClass, boolean strict) { final Map<String,Object> settings = new HashMap<>(); settings.put( AvailableSettings.USE_SECOND_LEVEL_CACHE, "true" ); settings.put( AvailableSettings.JPA_SHARED_CACHE_MODE, SharedCacheMode.ENABLE_SELECTIVE ); settings.put( AvailableSettings.GENERATE_STATISTICS, "true" ); settings.put( AvailableSettings.HBM2DDL_AUTO, "create-drop" ); if ( strict ) { settings.put( AvailableSettings.JPA_CACHING_COMPLIANCE, "true" ); } final StandardServiceRegistry serviceRegistry = ServiceRegistryUtil.serviceRegistryBuilder() .applySettings( settings ) .build(); try { return (SessionFactoryImplementor) new MetadataSources( serviceRegistry ) .addAnnotatedClass( Person.class ) .addAnnotatedClass( VersionedPerson.class ) .buildMetadata() .buildSessionFactory(); } catch (Throwable t) { serviceRegistry.close(); throw t; } } @AfterEach public void cleanupData() { if ( sessionFactory == null ) { return; } inTransaction( sessionFactory, s -> { s.createQuery( "delete from Person" ).executeUpdate(); } ); sessionFactory.close(); } @Entity( name = "Person" ) @Table( name = "persons" ) @Cacheable() @SecondaryTable( name = "crm_persons" ) public static
CachingWithSecondaryTablesTests
java
quarkusio__quarkus
independent-projects/tools/devtools-common/src/main/java/io/quarkus/cli/plugin/CatalogService.java
{ "start": 474, "end": 8551 }
class ____<T extends Catalog<T>> { protected static final Path USER_HOME = Paths.get(System.getProperty("user.home")); protected static final Predicate<Path> EXISTS_AND_WRITABLE = p -> p != null && p.toFile().exists() && p.toFile().canRead() && p.toFile().canWrite(); protected static final Predicate<Path> IS_USER_HOME = p -> USER_HOME.equals(p); protected static final Predicate<Path> IS_ELIGIBLE_PROJECT_ROOT = EXISTS_AND_WRITABLE.and(Predicate.not(IS_USER_HOME)); protected static final Predicate<Path> HAS_POM_XML = p -> p != null && p.resolve("pom.xml").toFile().exists(); protected static final Predicate<Path> HAS_BUILD_GRADLE = p -> p != null && p.resolve("build.gradle").toFile().exists(); protected static final Predicate<Path> GIT_ROOT = p -> p != null && p.resolve(".git").toFile().exists(); protected final ObjectMapper objectMapper = new ObjectMapper() .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) .enable(SerializationFeature.INDENT_OUTPUT) .registerModule(new Jdk8Module()); protected final Class<T> catalogType; protected final Predicate<Path> projectRoot; protected final Function<Path, Path> relativePath; public CatalogService(Class<T> catalogType, Predicate<Path> projectRoot, Function<Path, Path> relativePath) { this.catalogType = catalogType; this.projectRoot = projectRoot; this.relativePath = relativePath; } /** * Reads the plguin catalog from the user home. * * @param userdir An optional path pointing to the user directory. * @return a catalog wrapped in optional or empty if the catalog is not present. */ public Optional<T> readUserCatalog(Optional<Path> userDir) { Path userCatalogPath = getUserCatalogPath(userDir); return Optional.of(userCatalogPath).map(this::readCatalog); } public Optional<T> readProjectCatalog(Optional<Path> dir) { Optional<Path> projectCatalogPath = findProjectCatalogPath(dir); return projectCatalogPath.map(this::readCatalog); } /** * Get the project catalog path relative to the specified path. * The method will traverse from the specified path up to upmost directory that the user can write and * is under version control seeking for a `.quarkus/cli/plugins/catalog.json`. * * @param dir the specified path * @return the catalog path wrapped as {@link Optional} or empty if the catalog does not exist. */ public Optional<Path> findProjectCatalogPath(Path dir) { return findProjectRoot(dir).map(relativePath); } public Optional<Path> findProjectCatalogPath(Optional<Path> dir) { return dir.flatMap(this::findProjectCatalogPath); } /** * Read the catalog from project or fallback to global catalog. * * @param projectDir An optional path pointing to the project directory. * @param userdir An optional path pointing to the user directory * @return the catalog */ public Optional<T> readCatalog(Optional<Path> projectDir, Optional<Path> userDir) { return readProjectCatalog(projectDir).or(() -> readUserCatalog(userDir)); } /** * Read the catalog from the specified path. * * @param path the path to read the catalog from. * @return the catalog */ public T readCatalog(Path path) { try { return (path.toFile().length() == 0 ? catalogType.getConstructor().newInstance() : objectMapper.readValue(path.toFile(), catalogType)).withCatalogLocation(path); } catch (Exception e) { throw new RuntimeException(e); } } /** * Write the catalog to the specified {@link Path}. * The method will create the directory structure if missing. * * @param catalog the catalog * @param path the path */ public void writeCatalog(T catalog) { try { File catalogFile = catalog.getCatalogLocation().map(Path::toFile) .orElseThrow(() -> new IllegalStateException("Don't know where to save catalog!")); if (!catalogFile.exists() && !catalogFile.getParentFile().mkdirs() && !catalogFile.createNewFile()) { throw new IOException("Failed to create catalog at: " + catalogFile.getAbsolutePath()); } objectMapper.writeValue(catalogFile, catalog.refreshLastUpdate()); } catch (IOException e) { throw new RuntimeException(e); } } /** * Get the global catalog path that is under `.quarkus/cli/plugins/catalog.json` under the specified user home directory. * The specified directory is optional and the method will fallback to the `user.home` system property. * Using a different value if mostly needed for testing. * * @param userDir An optional user directory to use as a base path for the catalog lookup * * @return the catalog path wrapped as {@link Optional} or empty if the catalog does not exist. */ public Path getUserCatalogPath(Optional<Path> userDir) { return relativePath.apply(userDir.orElse(USER_HOME)); } /** * Get the global catalog path that is under `~/.quarkus/cli/plugins/catalog.json` * * @return the catalog path wrapped as {@link Optional} or empty if the catalog does not exist. */ public Path getUserCatalogPath() { return getUserCatalogPath(Optional.empty()); } /** * Get the catalog relative to the specified path. * * @param dir the specified path * * @return the catalog path wrapped as {@link Optional} or empty if the catalog does not exist. */ public Optional<Path> getRelativeCatalogPath(Path dir) { return getRelativeCatalogPath(Optional.of(dir)); } /** * Get the catalog relative to the current dir. * * @param output an {@link OutputOptionMixin} that can be used for tests to substitute current dir with a test directory. * @return the catalog path wrapped as {@link Optional} or empty if the catalog does not exist. */ public Optional<Path> getRelativeCatalogPath(Optional<Path> dir) { return dir.or(() -> Optional.ofNullable(Paths.get(System.getProperty("user.dir")))).map(relativePath); } /** * Get the project or user catalog path. * The method with lookup the relative catalog path to the current dir and will fallback to the user catalog path. * * @param projectDir An optional path pointing to the project directory. * @param userdir An optional path pointing to the user directory * @return the catalog path wrapped as {@link Optional} or empty if the catalog does not exist. */ public Optional<Path> getCatalogPath(Optional<Path> projectDir, Optional<Path> userDir) { return getRelativeCatalogPath(projectDir).filter(EXISTS_AND_WRITABLE) .or(() -> Optional.of(getUserCatalogPath(userDir))); } /** * Get the project root of the specified path. * The method will traverse from the specified path up to upmost directory that the user can write and * is under version control. * * @param dir the specified path * @return the project path wrapped as {@link Optional} or empty if the catalog does not exist. */ public static Optional<Path> findProjectRoot(Path dir) { Optional<Path> lastKnownProjectDirectory = Optional.empty(); for (Path current = dir; IS_ELIGIBLE_PROJECT_ROOT.test(current); current = current.getParent()) { if (GIT_ROOT.test(current)) { return Optional.of(current); } if (HAS_POM_XML.test(current)) { lastKnownProjectDirectory = Optional.of(current); } if (HAS_BUILD_GRADLE.test(current)) { lastKnownProjectDirectory = Optional.of(current); } } return lastKnownProjectDirectory; } }
CatalogService
java
assertj__assertj-core
assertj-core/src/test/java/org/assertj/core/api/objectarray/ObjectArrayAssert_satisfiesOnlyOnce_with_ThrowingConsumer_Test.java
{ "start": 1238, "end": 2480 }
class ____ extends ObjectArrayAssertBaseTest { private ThrowingConsumer<Object> requirements = element -> assertThat(element).isNotNull(); @Override protected ObjectArrayAssert<Object> invoke_api_method() { return assertions.satisfiesOnlyOnce(requirements); } @Override protected void verify_internal_effects() { verify(iterables).assertSatisfiesOnlyOnce(getInfo(assertions), list(getActual(assertions)), requirements); } @Test void should_rethrow_throwables_as_runtime_exceptions() { // GIVEN Throwable exception = new Throwable("boom!"); // WHEN Throwable throwable = catchThrowable(() -> assertThat(array("foo")).satisfiesOnlyOnce(throwingConsumer(exception))); // THEN then(throwable).isInstanceOf(RuntimeException.class) .cause().isSameAs(exception); } @Test void should_propagate_RuntimeException_as_is() { // GIVEN RuntimeException runtimeException = new RuntimeException("boom!"); // WHEN Throwable throwable = catchThrowable(() -> assertThat(array("foo")).satisfiesOnlyOnce(throwingConsumer(runtimeException))); // THEN then(throwable).isSameAs(runtimeException); } }
ObjectArrayAssert_satisfiesOnlyOnce_with_ThrowingConsumer_Test
java
apache__hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletSpec.java
{ "start": 57309, "end": 57656 }
interface ____ extends _Child { /** * Add a LEGEND element. * @return a new LEGEND element builder */ LEGEND legend(); /** * Add a LEGEND element. * @param cdata the content of the element. * @return the current element builder */ _Legend legend(String cdata); } /** * */ public
_Legend
java
apache__kafka
raft/src/main/java/org/apache/kafka/raft/RequestManager.java
{ "start": 1711, "end": 10050 }
class ____ { private final Map<String, ConnectionState> connections = new HashMap<>(); private final ArrayList<Node> bootstrapServers; private final int retryBackoffMs; private final int requestTimeoutMs; private final Random random; public RequestManager( Collection<Node> bootstrapServers, int retryBackoffMs, int requestTimeoutMs, Random random ) { this.bootstrapServers = new ArrayList<>(bootstrapServers); this.retryBackoffMs = retryBackoffMs; this.requestTimeoutMs = requestTimeoutMs; this.random = random; } /** * Returns true if there are any connections with pending requests. * * This is useful for satisfying the invariant that there is only one pending Fetch request. * If there are more than one pending fetch request, it is possible for the follower to write * the same offset twice. * * @param currentTimeMs the current time * @return true if the request manager is tracking at least one request */ public boolean hasAnyInflightRequest(long currentTimeMs) { boolean result = false; Iterator<ConnectionState> iterator = connections.values().iterator(); while (iterator.hasNext()) { ConnectionState connection = iterator.next(); if (connection.hasRequestTimedOut(currentTimeMs)) { // Mark the node as ready after request timeout iterator.remove(); } else if (connection.isBackoffComplete(currentTimeMs)) { // Mark the node as ready after completed backoff iterator.remove(); } else if (connection.hasInflightRequest(currentTimeMs)) { // If there is at least one inflight request, it is enough // to stop checking the rest of the connections result = true; break; } } return result; } /** * Returns a random bootstrap node that is ready to receive a request. * * This method doesn't return a node if there is at least one request pending. In general this * method is used to send Fetch requests. Fetch requests have the invariant that there can * only be one pending Fetch request for the LEO. * * @param currentTimeMs the current time * @return a random ready bootstrap node */ public Optional<Node> findReadyBootstrapServer(long currentTimeMs) { // Check that there are no inflight requests across any of the known nodes not just // the bootstrap servers if (hasAnyInflightRequest(currentTimeMs)) { return Optional.empty(); } int startIndex = random.nextInt(bootstrapServers.size()); Optional<Node> result = Optional.empty(); for (int i = 0; i < bootstrapServers.size(); i++) { int index = (startIndex + i) % bootstrapServers.size(); Node node = bootstrapServers.get(index); if (isReady(node, currentTimeMs)) { result = Optional.of(node); break; } } return result; } /** * Computes the amount of time needed to wait before a bootstrap server is ready for a Fetch * request. * * If there is a connection with a pending request it returns the amount of time to wait until * the request times out. * * Returns zero, if there are no pending requests and at least one of the bootstrap servers is * ready. * * If all the bootstrap servers are backing off and there are no pending requests, return * the minimum amount of time until a bootstrap server becomes ready. * * @param currentTimeMs the current time * @return the amount of time to wait until bootstrap server can accept a Fetch request */ public long backoffBeforeAvailableBootstrapServer(long currentTimeMs) { long minBackoffMs = retryBackoffMs; Iterator<ConnectionState> iterator = connections.values().iterator(); while (iterator.hasNext()) { ConnectionState connection = iterator.next(); if (connection.hasRequestTimedOut(currentTimeMs)) { // Mark the node as ready after request timeout iterator.remove(); } else if (connection.isBackoffComplete(currentTimeMs)) { // Mark the node as ready after completed backoff iterator.remove(); } else if (connection.hasInflightRequest(currentTimeMs)) { // There can be at most one inflight fetch request return connection.remainingRequestTimeMs(currentTimeMs); } else if (connection.isBackingOff(currentTimeMs)) { minBackoffMs = Math.min(minBackoffMs, connection.remainingBackoffMs(currentTimeMs)); } } // There are no inflight fetch requests so check if there is a ready bootstrap server for (Node node : bootstrapServers) { if (isReady(node, currentTimeMs)) { return 0L; } } // There are no ready bootstrap servers and inflight fetch requests, return the backoff return minBackoffMs; } public boolean hasRequestTimedOut(Node node, long timeMs) { ConnectionState state = connections.get(node.idString()); if (state == null) { return false; } return state.hasRequestTimedOut(timeMs); } public boolean isReady(Node node, long timeMs) { ConnectionState state = connections.get(node.idString()); if (state == null) { return true; } boolean ready = state.isReady(timeMs); if (ready) { reset(node); } return ready; } public boolean isBackingOff(Node node, long timeMs) { ConnectionState state = connections.get(node.idString()); if (state == null) { return false; } return state.isBackingOff(timeMs); } public long remainingRequestTimeMs(Node node, long timeMs) { ConnectionState state = connections.get(node.idString()); if (state == null) { return 0; } return state.remainingRequestTimeMs(timeMs); } public long remainingBackoffMs(Node node, long timeMs) { ConnectionState state = connections.get(node.idString()); if (state == null) { return 0; } return state.remainingBackoffMs(timeMs); } public boolean isResponseExpected(Node node, long correlationId) { ConnectionState state = connections.get(node.idString()); if (state == null) { return false; } return state.isResponseExpected(correlationId); } /** * Updates the manager when a response is received. * * @param node the source of the response * @param correlationId the correlation id of the response * @param success true if the request was successful, false otherwise * @param timeMs the current time */ public void onResponseResult(Node node, long correlationId, boolean success, long timeMs) { if (isResponseExpected(node, correlationId)) { if (success) { // Mark the connection as ready by resetting it reset(node); } else { // Backoff the connection connections.get(node.idString()).onResponseError(correlationId, timeMs); } } } /** * Updates the manager when a request is sent. * * @param node the destination of the request * @param correlationId the correlation id of the request * @param timeMs the current time */ public void onRequestSent(Node node, long correlationId, long timeMs) { ConnectionState state = connections.computeIfAbsent( node.idString(), key -> new ConnectionState(node, retryBackoffMs, requestTimeoutMs) ); state.onRequestSent(correlationId, timeMs); } public void reset(Node node) { connections.remove(node.idString()); } public void resetAll() { connections.clear(); } private
RequestManager
java
netty__netty
codec-mqtt/src/test/java/io/netty/handler/codec/mqtt/MqttMessageBuildersTest.java
{ "start": 858, "end": 1637 }
class ____ { @Test public void testConnAckWithProperties() { final MqttConnAckMessage ackMsg = MqttMessageBuilders.connAck() .properties(new PropertiesInitializer<MqttMessageBuilders.ConnAckPropertiesBuilder>() { @Override public void apply(MqttMessageBuilders.ConnAckPropertiesBuilder builder) { builder.assignedClientId("client1234"); builder.userProperty("custom_property", "value"); } }).build(); final String clientId = (String) ackMsg.variableHeader() .properties() .getProperty(MqttProperties.ASSIGNED_CLIENT_IDENTIFIER) .value(); assertEquals("client1234", clientId); } }
MqttMessageBuildersTest
java
alibaba__druid
core/src/test/java/com/alibaba/druid/bvt/pool/basic/PoolableStatementTest2.java
{ "start": 1207, "end": 27877 }
class ____ extends TestCase { private MockDriver driver; private DruidDataSource dataSource; protected void setUp() throws Exception { DruidDataSourceStatManager.clear(); driver = new MockDriver(); dataSource = new DruidDataSource(); dataSource.setUrl("jdbc:mock:xxx"); dataSource.setDriver(driver); dataSource.setInitialSize(1); dataSource.setMaxActive(2); dataSource.setMaxIdle(2); dataSource.setMinIdle(1); dataSource.setMinEvictableIdleTimeMillis(300 * 1000); // 300 / 10 dataSource.setTimeBetweenEvictionRunsMillis(180 * 1000); // 180 / 10 dataSource.setTestWhileIdle(true); dataSource.setTestOnBorrow(false); dataSource.setValidationQuery("SELECT 1"); dataSource.setFilters("stat,trace"); dataSource.setRemoveAbandoned(true); dataSource.setExceptionSorterClassName(null); assertTrue(dataSource.getExceptionSorter() instanceof NullExceptionSorter); dataSource.setExceptionSorterClassName(""); assertTrue(dataSource.getExceptionSorter() instanceof NullExceptionSorter); JdbcStatContext context = new JdbcStatContext(); context.setTraceEnable(true); JdbcStatManager.getInstance().setStatContext(context); } protected void tearDown() throws Exception { assertEquals(true, dataSource.getCreateTimespanNano() > 0); dataSource.close(); assertEquals(0, DruidDataSourceStatManager.getInstance().getDataSourceList().size()); JdbcStatManager.getInstance().setStatContext(null); } public void test_dupClose() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.close(); stmt.close(); conn.close(); } public void test_executeUpdate() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.executeUpdate("SET @VAR = 1"); stmt.close(); conn.close(); } public void test_executeUpdate_error() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.executeUpdate("SET @VAR = 1"); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_execute_error() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.execute("SET @VAR = 1"); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_executeQuery_error() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.executeQuery("SELECT 1"); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_setEscapeProcessing() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.setEscapeProcessing(true); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.setEscapeProcessing(true); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_getMaxFieldSize() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.setMaxFieldSize(23); assertEquals(23, stmt.getMaxFieldSize()); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.getMaxFieldSize(); } catch (SQLException ex) { error = ex; } assertNotNull(error); } { SQLException error = null; try { stmt.setMaxFieldSize(23); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_QueryTimeout() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.setQueryTimeout(33); assertEquals(33, stmt.getQueryTimeout()); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.getQueryTimeout(); } catch (SQLException ex) { error = ex; } assertNotNull(error); } { SQLException error = null; try { stmt.setQueryTimeout(23); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_MaxRows() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.setMaxRows(44); assertEquals(44, stmt.getMaxRows()); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.getMaxRows(); } catch (SQLException ex) { error = ex; } assertNotNull(error); } { SQLException error = null; try { stmt.setMaxRows(23); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_FetchDirection() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.setFetchDirection(144); assertEquals(144, stmt.getFetchDirection()); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.getFetchDirection(); } catch (SQLException ex) { error = ex; } assertNotNull(error); } { SQLException error = null; try { stmt.setFetchDirection(23); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_FetchSize() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.setFetchSize(144); assertEquals(144, stmt.getFetchSize()); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.getFetchSize(); } catch (SQLException ex) { error = ex; } assertNotNull(error); } { SQLException error = null; try { stmt.setFetchSize(23); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_cancel() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.cancel(); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.cancel(); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_getWarnings() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.getWarnings(); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.getWarnings(); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_clearWarnings() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.clearWarnings(); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.clearWarnings(); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_setCursorName() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.setCursorName("c_name"); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.setCursorName("c_name"); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_getResultSet() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.getResultSet(); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.getResultSet(); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_getUpdateCount() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.getUpdateCount(); stmt.executeQuery("select 1"); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.getUpdateCount(); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_getMoreResults() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.getMoreResults(); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.getMoreResults(); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_getResultSetConcurrency() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.getResultSetConcurrency(); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.getResultSetConcurrency(); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_getResultSetType() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.getResultSetType(); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.getResultSetType(); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_addBatch() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.addBatch("select 1"); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.addBatch("select 1"); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_clearBatch() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.clearBatch(); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.clearBatch(); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_executeBatch() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.executeBatch(); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.executeBatch(); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_getMoreResults_1() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.getMoreResults(1); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.getMoreResults(1); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_getGeneratedKeys() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.getGeneratedKeys(); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.getGeneratedKeys(); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_getResultSetHoldability() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.getResultSetHoldability(); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.getResultSetHoldability(); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_execute() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.execute("SELECT 1", new String[0]); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.execute("SELECT 1", new String[0]); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_execute_1() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.execute("SELECT 1", new int[0]); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.execute("SELECT 1", new int[0]); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_execute_2() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.execute("SELECT 1", Statement.NO_GENERATED_KEYS); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.execute("SELECT 1", Statement.NO_GENERATED_KEYS); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_executeUpdate_1() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.executeUpdate("SELECT 1", new String[0]); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.execute("SELECT 1", new String[0]); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_executeUpdate_2() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.executeUpdate("SELECT 1", new int[0]); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.execute("SELECT 1", new int[0]); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_executeUpdate_3() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.executeUpdate("SELECT 1", Statement.NO_GENERATED_KEYS); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.execute("SELECT 1", Statement.NO_GENERATED_KEYS); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_getMeta() throws Exception { Connection conn = dataSource.getConnection(); PreparedStatement stmt = conn.prepareStatement("SELELCT 1"); stmt.getMetaData(); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.getMetaData(); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_getParameterMetaData() throws Exception { Connection conn = dataSource.getConnection(); PreparedStatement stmt = conn.prepareStatement("SELELCT 1"); stmt.getParameterMetaData(); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.getParameterMetaData(); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_wasNull() throws Exception { Connection conn = dataSource.getConnection(); CallableStatement stmt = conn.prepareCall("SELELCT 1"); stmt.wasNull(); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.wasNull(); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_executeQuery() throws Exception { Connection conn = dataSource.getConnection(); PreparedStatement stmt = conn.prepareStatement("SELECT 1"); stmt.executeQuery(); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.executeQuery(); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_executeUpdate_4() throws Exception { Connection conn = dataSource.getConnection(); PreparedStatement stmt = conn.prepareStatement("SELECT 1"); stmt.executeQuery(); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.executeUpdate(); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_execute_3() throws Exception { Connection conn = dataSource.getConnection(); PreparedStatement stmt = conn.prepareStatement("SELELCT 1"); stmt.execute(); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.execute(); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_clearParameters() throws Exception { Connection conn = dataSource.getConnection(); PreparedStatement stmt = conn.prepareStatement("SELELCT 1"); stmt.clearParameters(); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.clearParameters(); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_addBatch_1() throws Exception { Connection conn = dataSource.getConnection(); PreparedStatement stmt = conn.prepareStatement("SELELCT 1"); stmt.addBatch(); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.addBatch(); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_executeUpdate_5() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.executeUpdate("SET 1", Statement.RETURN_GENERATED_KEYS); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.executeUpdate("SET 1", Statement.RETURN_GENERATED_KEYS); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_executeUpdate_6() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.executeUpdate("SET 1", new String[0]); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.executeUpdate("SET 1", new String[0]); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_executeUpdate_7() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.executeUpdate("SET 1", new int[0]); ((DruidPooledStatement) stmt).getStatement().close(); { SQLException error = null; try { stmt.executeUpdate("SET 1", new int[0]); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } public void test_setPoolable() throws Exception { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); stmt.setPoolable(true); { SQLException error = null; try { stmt.setPoolable(false); } catch (SQLException ex) { error = ex; } assertNotNull(error); } stmt.close(); conn.close(); } }
PoolableStatementTest2
java
apache__camel
test-infra/camel-test-infra-ollama/src/test/java/org/apache/camel/test/infra/ollama/services/OllamaLocalContainerService.java
{ "start": 864, "end": 1168 }
class ____ extends OllamaLocalContainerInfraService implements OllamaService { public OllamaLocalContainerService() { super(); } public OllamaLocalContainerService(OllamaServiceConfiguration serviceConfiguration) { super(serviceConfiguration); } }
OllamaLocalContainerService
java
apache__flink
flink-core/src/main/java/org/apache/flink/api/common/typeutils/base/array/BytePrimitiveArraySerializer.java
{ "start": 1323, "end": 3416 }
class ____ extends TypeSerializerSingleton<byte[]> { private static final long serialVersionUID = 1L; private static final byte[] EMPTY = new byte[0]; public static final BytePrimitiveArraySerializer INSTANCE = new BytePrimitiveArraySerializer(); @Override public boolean isImmutableType() { return false; } @Override public byte[] createInstance() { return EMPTY; } @Override public byte[] copy(byte[] from) { byte[] copy = new byte[from.length]; System.arraycopy(from, 0, copy, 0, from.length); return copy; } @Override public byte[] copy(byte[] from, byte[] reuse) { return copy(from); } @Override public int getLength() { return -1; } @Override public void serialize(byte[] record, DataOutputView target) throws IOException { if (record == null) { throw new IllegalArgumentException("The record must not be null."); } final int len = record.length; target.writeInt(len); target.write(record); } @Override public byte[] deserialize(DataInputView source) throws IOException { final int len = source.readInt(); byte[] result = new byte[len]; source.readFully(result); return result; } @Override public byte[] deserialize(byte[] reuse, DataInputView source) throws IOException { return deserialize(source); } @Override public void copy(DataInputView source, DataOutputView target) throws IOException { final int len = source.readInt(); target.writeInt(len); target.write(source, len); } @Override public TypeSerializerSnapshot<byte[]> snapshotConfiguration() { return new BytePrimitiveArraySerializerSnapshot(); } // ------------------------------------------------------------------------ /** Serializer configuration snapshot for compatibility and format evolution. */ @SuppressWarnings("WeakerAccess") public static final
BytePrimitiveArraySerializer
java
hibernate__hibernate-orm
hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/ids/embeddedid/NestedEmbeddedIdentifiersTest.java
{ "start": 789, "end": 3086 }
class ____ { private OwnerOfRelationCodeId id; @BeforeClassTemplate public void initData(EntityManagerFactoryScope scope) { // Revision 1, test insert final OwnerOfRelationCode owner = scope.fromTransaction( session -> { CompositeEntity compositeEntity = new CompositeEntity(); compositeEntity.setFirstCode( "firstCode" ); compositeEntity.setSecondCode( "secondCode" ); session.persist( compositeEntity ); OwnerOfRelationCode ownerEntity = new OwnerOfRelationCode(); ownerEntity.setCompositeEntity( compositeEntity ); ownerEntity.setSecondIdentifier( "secondIdentifier" ); session.persist( ownerEntity ); return ownerEntity; } ); this.id = owner.getCodeObject(); // Revision 2, test update scope.inTransaction( session -> { OwnerOfRelationCode ownerEntity = session.find( OwnerOfRelationCode.class, id ); ownerEntity.setDescription( "first description" ); } ); } @Test public void testRevisionCounts(EntityManagerFactoryScope scope) { scope.inEntityManager( em -> { assertEquals( Arrays.asList( 1, 2 ), AuditReaderFactory.get( em ).getRevisions( OwnerOfRelationCode.class, id ) ); } ); } @Test public void testIdentifierAtRevision1(EntityManagerFactoryScope scope) { scope.inEntityManager( em -> { final OwnerOfRelationCode rev1 = AuditReaderFactory.get( em ) .find( OwnerOfRelationCode.class, id, 1 ); assertEquals( rev1.getCodeObject().getSecondIdentifier(), "secondIdentifier" ); assertEquals( rev1.getCodeObject().getCompositeEntity().getFirstCode(), "firstCode" ); assertEquals( rev1.getCodeObject().getCompositeEntity().getSecondCode(), "secondCode" ); assertNull( rev1.getDescription() ); } ); } @Test public void testIdentifierAtRevision2(EntityManagerFactoryScope scope) { scope.inEntityManager( em -> { final OwnerOfRelationCode rev2 = AuditReaderFactory.get( em ) .find( OwnerOfRelationCode.class, id, 2 ); assertEquals( rev2.getCodeObject().getSecondIdentifier(), "secondIdentifier" ); assertEquals( rev2.getCodeObject().getCompositeEntity().getFirstCode(), "firstCode" ); assertEquals( rev2.getCodeObject().getCompositeEntity().getSecondCode(), "secondCode" ); assertEquals( rev2.getDescription(), "first description" ); } ); } }
NestedEmbeddedIdentifiersTest
java
quarkusio__quarkus
extensions/tls-registry/deployment/src/test/java/io/quarkus/tls/ExpiredTrustStoreWithMTLSTest.java
{ "start": 1148, "end": 5299 }
class ____ { private static final String configuration = """ # Server quarkus.tls.key-store.p12.path=target/certs/expired-mtls-keystore.p12 quarkus.tls.key-store.p12.password=password quarkus.tls.trust-store.p12.path=target/certs/expired-mtls-server-truststore.p12 quarkus.tls.trust-store.p12.password=password # The server will ignore the expired client certificates # Clients quarkus.tls.warn.trust-store.p12.path=target/certs/expired-mtls-client-truststore.p12 quarkus.tls.warn.trust-store.p12.password=password quarkus.tls.warn.trust-store.certificate-expiration-policy=warn quarkus.tls.warn.key-store.p12.path=target/certs/expired-mtls-client-keystore.p12 quarkus.tls.warn.key-store.p12.password=password quarkus.tls.reject.trust-store.p12.path=target/certs/expired-mtls-client-truststore.p12 quarkus.tls.reject.trust-store.p12.password=password quarkus.tls.reject.trust-store.certificate-expiration-policy=reject quarkus.tls.reject.key-store.p12.path=target/certs/expired-mtls-client-keystore.p12 quarkus.tls.reject.key-store.p12.password=password """; @RegisterExtension static final QuarkusUnitTest config = new QuarkusUnitTest().setArchiveProducer( () -> ShrinkWrap.create(JavaArchive.class) .add(new StringAsset(configuration), "application.properties")); @Inject TlsConfigurationRegistry certificates; @Inject Vertx vertx; private HttpServer server; @AfterEach void cleanup() { if (server != null) { server.close().toCompletionStage().toCompletableFuture().join(); } } @Test void testWarn() throws InterruptedException { TlsConfiguration cf = certificates.get("warn").orElseThrow(); assertThat(cf.getTrustStoreOptions()).isNotNull(); WebClient client = WebClient.create(vertx, new WebClientOptions() .setSsl(true) .setKeyCertOptions(cf.getKeyStoreOptions()) .setTrustOptions(cf.getTrustStoreOptions())); server = vertx.createHttpServer(new HttpServerOptions() .setSsl(true) .setClientAuth(ClientAuth.REQUIRED) .setTrustOptions(certificates.getDefault().orElseThrow().getTrustStoreOptions()) .setKeyCertOptions(certificates.getDefault().orElseThrow().getKeyStoreOptions())) .requestHandler(rc -> rc.response().end("Hello")).listen(8081).toCompletionStage().toCompletableFuture().join(); CountDownLatch latch = new CountDownLatch(1); client.get(8081, "localhost", "/").send(ar -> { assertThat(ar.succeeded()).isTrue(); assertThat(ar.result().bodyAsString()).isEqualTo("Hello"); latch.countDown(); }); assertThat(latch.await(10, java.util.concurrent.TimeUnit.SECONDS)).isTrue(); } @Test void testReject() { TlsConfiguration cf = certificates.get("reject").orElseThrow(); assertThat(cf.getTrustStoreOptions()).isNotNull(); WebClient client = WebClient.create(vertx, new WebClientOptions() .setSsl(true) .setKeyCertOptions(cf.getKeyStoreOptions()) .setTrustOptions(cf.getTrustStoreOptions())); server = vertx.createHttpServer(new HttpServerOptions() .setSsl(true) .setClientAuth(ClientAuth.REQUIRED) .setTrustOptions(certificates.getDefault().orElseThrow().getTrustStoreOptions()) .setKeyCertOptions(certificates.getDefault().orElseThrow().getKeyStoreOptions())) .requestHandler(rc -> rc.response().end("Hello")).listen(8081).toCompletionStage().toCompletableFuture().join(); assertThatThrownBy(() -> client.get(8081, "localhost", "/") .send().toCompletionStage().toCompletableFuture().join()).hasCauseInstanceOf(SSLHandshakeException.class); } }
ExpiredTrustStoreWithMTLSTest
java
spring-projects__spring-framework
spring-context/src/test/java/org/springframework/validation/DataBinderTests.java
{ "start": 88861, "end": 89253 }
class ____ { private String id; private Optional<String> name; public String getId() { return id; } @SuppressWarnings("unused") public void setId(String id) { this.id = id; } public Optional<String> getName() { return name; } @SuppressWarnings("unused") public void setName(Optional<String> name) { this.name = name; } } private static
OptionalHolder
java
quarkusio__quarkus
extensions/funqy/funqy-google-cloud-functions/deployment/src/main/java/io/quarkus/funqy/gcp/functions/deployment/bindings/ConfigurationCustomizer.java
{ "start": 503, "end": 1261 }
class ____ implements SmallRyeConfigBuilderCustomizer { public void configBuilder(final SmallRyeConfigBuilder builder) { builder.withInterceptorFactories(new ConfigSourceInterceptorFactory() { public ConfigSourceInterceptor getInterceptor(final ConfigSourceInterceptorContext context) { return (ic, name) -> switch (name) { case "quarkus.package.jar.type" -> ConfigValue.builder().withName(name).withValue("uber-jar").build(); default -> ic.proceed(name); }; } public OptionalInt getPriority() { return OptionalInt.of(Integer.MIN_VALUE + 100); } }); } }
ConfigurationCustomizer
java
alibaba__fastjson
src/test/java/com/alibaba/json/test/benchmark/BenchmarkExecutor.java
{ "start": 6804, "end": 8048 }
class ____ { private String name; private long millis; private long youngGC; private long youngGCTime; private long fullGC; private Throwable error; public String getName() { return name; } public void setName(String name) { this.name = name; } public long getMillis() { return millis; } public void setMillis(long millis) { this.millis = millis; } public long getYoungGC() { return youngGC; } public void setYoungGC(long youngGC) { this.youngGC = youngGC; } public long getYoungGCTime() { return youngGCTime; } public void setYoungGCTime(long youngGCTime) { this.youngGCTime = youngGCTime; } public long getFullGC() { return fullGC; } public void setFullGC(long fullGC) { this.fullGC = fullGC; } public Throwable getError() { return error; } public void setError(Throwable error) { this.error = error; } } }
Result
java
google__dagger
javatests/dagger/internal/codegen/ComponentProcessorTest.java
{ "start": 31604, "end": 31847 }
class ____ {", " @Inject A() {}", "}"); Source a2 = CompilerTests.javaSource("pkg2.A", "package pkg2;", "", "import javax.inject.Inject;", "", "public final
A
java
apache__hadoop
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
{ "start": 4142, "end": 4281 }
interface ____ the {@link Router} implemented by * {@link RouterAdminServer}. */ @SuppressWarnings("checkstyle:visibilitymodifier") public
of
java
apache__hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationFinishEvent.java
{ "start": 1027, "end": 1621 }
class ____ extends WritingApplicationHistoryEvent { private ApplicationId appId; private ApplicationFinishData appFinish; public WritingApplicationFinishEvent(ApplicationId appId, ApplicationFinishData appFinish) { super(WritingHistoryEventType.APP_FINISH); this.appId = appId; this.appFinish = appFinish; } @Override public int hashCode() { return appId.hashCode(); } public ApplicationId getApplicationId() { return appId; } public ApplicationFinishData getApplicationFinishData() { return appFinish; } }
WritingApplicationFinishEvent
java
apache__kafka
connect/runtime/src/test/java/org/apache/kafka/connect/test/util/MockitoUtils.java
{ "start": 1070, "end": 2803 }
class ____ { /** * Create a verification mode that accepts any number of method invocations, including zero. * <p> * Sample usage: * <p> * {@code verify(sourceTask, anyTimes()).poll();} * @return the verification mode; never null */ public static VerificationMode anyTimes() { return atLeast(0); } /** * Count the number of times a method has been invoked on a mock object. * <p> * Sample usage: * <p> * <pre> * Producer<byte[], byte[]> producer = mock(Producer.class); * // ... run through some test case that uses the mocked producer * assertEquals( * "Producer should have aborted every record it sent", * countInvocations(producer, "abortTransaction"), * countInvocations(producer, "send", ProducerRecord.class, Callback.class) * ); * </pre> * @param mock the mock object whose method invocations should be counted; may not be null * @param methodName the name of the method whose invocations should be counted; may not be null * @param parameters the types of the parameters for the method whose invocations should be counted; * may be empty, but may not contain any null elements * @return the number of times the method was invoked on the mock */ public static long countInvocations(Object mock, String methodName, Class<?>... parameters) { return mockingDetails(mock).getInvocations().stream() .map(InvocationOnMock::getMethod) .filter(m -> methodName.equals(m.getName())) .filter(m -> Arrays.equals(parameters, m.getParameterTypes())) .count(); } }
MockitoUtils
java
spring-projects__spring-boot
test-support/spring-boot-docker-test-support/src/main/java/org/springframework/boot/testsupport/container/TestImage.java
{ "start": 2440, "end": 14313 }
enum ____ { /** * A container image suitable for testing ActiveMQ. */ ACTIVE_MQ("symptoma/activemq", "5.18.3", () -> SymptomaActiveMQContainer.class), /** * A container image suitable for testing ActiveMQ classic. */ ACTIVE_MQ_CLASSIC("apache/activemq-classic", "5.18.3", () -> ActiveMQContainer.class), /** * A container image suitable for testing Apache Kafka. */ APACHE_KAFKA("apache/kafka", "3.7.0", () -> org.testcontainers.kafka.KafkaContainer.class), /** * A container image suitable for testing Artemis. */ ARTEMIS("apache/activemq-artemis", "2.34.0", () -> ArtemisContainer.class), /** * A container image suitable for testing Cassandra. */ CASSANDRA("cassandra", "3.11.10", () -> CassandraContainer.class, (container) -> ((CassandraContainer) container).withStartupTimeout(Duration.ofMinutes(10))), /** * A container image suitable for testing ClickHouse. */ CLICKHOUSE("clickhouse/clickhouse-server", "24.3"), /** * A container image suitable for testing Couchbase. */ COUCHBASE("couchbase/server", "7.1.4", () -> CouchbaseContainer.class, (container) -> ((CouchbaseContainer) container).withStartupAttempts(5) .withStartupTimeout(Duration.ofMinutes(10))), /** * A container image suitable for testing Elasticsearch 7. */ ELASTICSEARCH("docker.elastic.co/elasticsearch/elasticsearch", "7.17.28", () -> ElasticsearchContainer.class, (container) -> ((ElasticsearchContainer) container).withEnv("ES_JAVA_OPTS", "-Xms32m -Xmx512m") .withStartupAttempts(5) .withStartupTimeout(Duration.ofMinutes(10))), /** * A container image suitable for testing Elasticsearch 8. */ ELASTICSEARCH_8("elasticsearch", "8.17.1"), /** * A container image suitable for testing Elasticsearch 9. */ ELASTICSEARCH_9("elasticsearch", "9.0.2"), /** * A container image suitable for testing Grafana OTel LGTM. */ GRAFANA_OTEL_LGTM("grafana/otel-lgtm", "0.6.0", () -> LgtmStackContainer.class, (container) -> ((LgtmStackContainer) container).withStartupTimeout(Duration.ofMinutes(2))), /** * A container image suitable for testing Hazelcast. */ HAZELCAST("hazelcast/hazelcast", "5.5.0-slim-jdk17", () -> HazelcastContainer.class), /** * A container image suitable for testing Confluent's distribution of Kafka. */ CONFLUENT_KAFKA("confluentinc/cp-kafka", "7.4.0", () -> ConfluentKafkaContainer.class), /** * A container image suitable for testing LLDAP. */ LLDAP("lldap/lldap", "v0.6.1-alpine", () -> LLdapContainer.class), /** * A container image suitable for testing OpenLDAP. */ OPEN_LDAP("osixia/openldap", "1.5.0", () -> OpenLdapContainer.class), /** * A container image suitable for testing SMTP. */ MAILPIT("axllent/mailpit", "v1.19.0", () -> MailpitContainer.class), /** * A container image suitable for testing MariaDB. */ MARIADB("mariadb", "10.10", () -> MariaDBContainer.class), /** * A container image suitable for testing MongoDB. */ MONGODB("mongo", "5.0.17", () -> MongoDBContainer.class, (container) -> ((MongoDBContainer) container).withStartupAttempts(5) .withStartupTimeout(Duration.ofMinutes(5))), /** * A container image suitable for testing MongoDB using the deprecated * {@link org.testcontainers.containers.MongoDBContainer}. * @deprecated since 4.0.0 for removal in 4.2.0 in favor of {@link #MONGODB} */ @SuppressWarnings("deprecation") @Deprecated(since = "3.4.0", forRemoval = true) MONGODB_DEPRECATED("mongo", "5.0.17", () -> org.testcontainers.containers.MongoDBContainer.class, (container) -> ((org.testcontainers.containers.MongoDBContainer) container).withStartupAttempts(5) .withStartupTimeout(Duration.ofMinutes(5))), /** * A container image suitable for testing MongoDB Atlas. */ MONGODB_ATLAS("mongodb/mongodb-atlas-local", "8.0.4", () -> MongoDBAtlasLocalContainer.class, (container) -> ((MongoDBAtlasLocalContainer) container).withStartupAttempts(5) .withStartupTimeout(Duration.ofMinutes(5))), /** * A container image suitable for testing MySQL. */ MYSQL("mysql", "8.0", () -> MySQLContainer.class), /** * A container image suitable for testing Neo4j. */ NEO4J("neo4j", "5.26.11", () -> Neo4jContainer.class, (container) -> ((Neo4jContainer) container).withStartupAttempts(5) .withStartupTimeout(Duration.ofMinutes(10))), /** * A container image suitable for testing Neo4j using the deprecated * {@link org.testcontainers.containers.Neo4jContainer}. * @deprecated since 4.0.0 for removal in 4.2.0 in favor of {@link #NEO4J} */ @SuppressWarnings({ "deprecation", "rawtypes" }) @Deprecated(since = "3.4.0", forRemoval = true) NEO4J_DEPRECATED("neo4j", "5.26.11", () -> org.testcontainers.containers.Neo4jContainer.class, (container) -> ((org.testcontainers.containers.Neo4jContainer) container).withStartupAttempts(5) .withStartupTimeout(Duration.ofMinutes(10))), /** * A container image suitable for testing Oracle Free. */ ORACLE_FREE("gvenzl/oracle-free", "23.6-slim", () -> org.testcontainers.oracle.OracleContainer.class, (container) -> ((org.testcontainers.oracle.OracleContainer) container) .withStartupTimeout(Duration.ofMinutes(2))), /** * A container image suitable for testing Oracle XA. */ ORACLE_XE("gvenzl/oracle-xe", "18.4.0-slim", () -> org.testcontainers.containers.OracleContainer.class, (container) -> ((org.testcontainers.containers.OracleContainer) container) .withStartupTimeout(Duration.ofMinutes(2))), /** * A container image suitable for testing OpenTelemetry using the OpenTelemetry * collector. */ OTEL_COLLECTOR("otel/opentelemetry-collector-contrib", "0.75.0"), /** * A container image suitable for testing Postgres. */ POSTGRESQL("postgres", "14.0", () -> PostgreSQLContainer.class), /** * A container image suitable for testing Pulsar. */ PULSAR("apachepulsar/pulsar", "3.3.3", () -> PulsarContainer.class, (container) -> ((PulsarContainer) container).withStartupAttempts(2) .withStartupTimeout(Duration.ofMinutes(3))), /** * A container image suitable for testing Pulsar using the deprecated * {@link org.testcontainers.containers.PulsarContainer}. * @deprecated since 4.0.0 for removal in 4.2.0 in favor of {@link #PULSAR} */ @SuppressWarnings("deprecation") @Deprecated(since = "3.4.0", forRemoval = true) PULSAR_DEPRECATED("apachepulsar/pulsar", "3.3.3", () -> org.testcontainers.containers.PulsarContainer.class, (container) -> ((org.testcontainers.containers.PulsarContainer) container).withStartupAttempts(2) .withStartupTimeout(Duration.ofMinutes(3))), /** * A container image suitable for testing RabbitMQ. */ RABBITMQ("rabbitmq", "3.11-alpine", () -> RabbitMQContainer.class, (container) -> ((RabbitMQContainer) container).withStartupTimeout(Duration.ofMinutes(4))), /** * A container image suitable for testing RabbitMQ using the deprecated * {@link org.testcontainers.containers.RabbitMQContainer}. * @deprecated since 4.0.0 for removal in 4.2.0 in favor of {@link #RABBITMQ} */ @SuppressWarnings("deprecation") @Deprecated(since = "3.4.0", forRemoval = true) RABBITMQ_DEPRECATED("rabbitmq", "3.11-alpine", () -> org.testcontainers.containers.RabbitMQContainer.class, (container) -> ((org.testcontainers.containers.RabbitMQContainer) container) .withStartupTimeout(Duration.ofMinutes(4))), /** * A container image suitable for testing Redis. */ REDIS("redis", "7.0.11", () -> RedisContainer.class, (container) -> ((RedisContainer) container).withStartupAttempts(5) .withStartupTimeout(Duration.ofMinutes(10))), /** * A container image suitable for testing Redis Stack. */ REDIS_STACK("redis/redis-stack", "7.2.0-v11", () -> RedisStackContainer.class, (container) -> ((RedisStackContainer) container).withStartupAttempts(5) .withStartupTimeout(Duration.ofMinutes(10))), /** * A container image suitable for testing Redis Stack Server. */ REDIS_STACK_SERVER("redis/redis-stack-server", "7.2.0-v11", () -> RedisStackServerContainer.class, (container) -> ((RedisStackServerContainer) container).withStartupAttempts(5) .withStartupTimeout(Duration.ofMinutes(10))), /** * A container image suitable for testing Redpanda. */ REDPANDA("redpandadata/redpanda", "v23.1.2", () -> RedpandaContainer.class, (container) -> ((RedpandaContainer) container).withStartupTimeout(Duration.ofMinutes(5))), /** * A container image suitable for testing Docker Registry. */ REGISTRY("registry", "2.7.1", () -> RegistryContainer.class, (container) -> ((RegistryContainer) container).withStartupAttempts(5) .withStartupTimeout(Duration.ofMinutes(3))), /** * A container image suitable for testing MS SQL Server. */ SQL_SERVER("mcr.microsoft.com/mssql/server"), /** * A container image suitable for testing Zipkin. */ ZIPKIN("openzipkin/zipkin", "3.0.6", () -> ZipkinContainer.class); private final String name; private final String tag; private final Class<?> containerClass; private final Consumer<?> containerSetup; TestImage(String name) { this(name, null); } TestImage(String name, String tag) { this(name, tag, null, null); } TestImage(String name, String tag, Supplier<Class<?>> containerClass) { this(name, tag, containerClass, null); } TestImage(String name, String tag, Consumer<?> containerSetup) { this(name, tag, null, containerSetup); } TestImage(String name, String tag, Supplier<Class<?>> containerClass, Consumer<?> containerSetup) { this.name = name; this.tag = tag; this.containerClass = getIfPossible(containerClass); this.containerSetup = containerSetup; } static Class<?> getIfPossible(Supplier<Class<?>> supplier) { try { return (supplier != null) ? supplier.get() : null; } catch (NoClassDefFoundError ex) { return null; } } private boolean matchesContainerClass(Class<?> containerClass) { return this.containerClass != null && this.containerClass.isAssignableFrom(containerClass); } /** * Create a {@link GenericContainer} for the given {@link TestImage}. * @return a generic container for the test image */ public GenericContainer<?> genericContainer() { return createContainer(GenericContainer.class); } @SuppressWarnings({ "rawtypes", "unchecked" }) private <C extends Container<?>> C createContainer(Class<C> containerClass) { DockerImageName dockerImageName = DockerImageName.parse(toString()); try { Constructor<C> constructor = containerClass.getDeclaredConstructor(DockerImageName.class); constructor.setAccessible(true); C container = constructor.newInstance(dockerImageName); if (this.containerSetup != null) { ((Consumer) this.containerSetup).accept(container); } return container; } catch (Exception ex) { throw new IllegalStateException("Unable to create container " + containerClass, ex); } } public String getTag() { return this.tag; } @Override public String toString() { return (this.tag != null) ? this.name + ":" + this.tag : this.name; } /** * Factory method to create and configure a {@link Container} using a deduced * {@link TestImage}. * @param <C> the container type * @param containerClass the container type * @return a container instance */ public static <C extends Container<?>> C container(Class<C> containerClass) { return forContainerClass(containerClass).createContainer(containerClass); } private static TestImage forContainerClass(Class<?> containerClass) { List<TestImage> images = Arrays.stream(values()) .filter((image) -> image.matchesContainerClass(containerClass)) .toList(); Assert.state(!images.isEmpty(), () -> "Unknown container class " + containerClass); Assert.state(images.size() == 1, () -> "Multiple test images match container class " + containerClass); return images.get(0); } }
TestImage
java
ReactiveX__RxJava
src/main/java/io/reactivex/rxjava3/internal/schedulers/NewThreadWorker.java
{ "start": 934, "end": 1071 }
class ____ manages a single-threaded ScheduledExecutorService as a * worker but doesn't perform task-tracking operations. * */ public
that
java
apache__hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/ApplicationAttemptEntity.java
{ "start": 1107, "end": 1568 }
class ____ extends HierarchicalTimelineEntity { public ApplicationAttemptEntity() { super(TimelineEntityType.YARN_APPLICATION_ATTEMPT.toString()); } public ApplicationAttemptEntity(TimelineEntity entity) { super(entity); if (!entity.getType().equals( TimelineEntityType.YARN_APPLICATION_ATTEMPT.toString())) { throw new IllegalArgumentException("Incompatible entity type: " + getId()); } } }
ApplicationAttemptEntity
java
apache__camel
core/camel-management/src/main/java/org/apache/camel/management/mbean/StatisticValue.java
{ "start": 902, "end": 1430 }
class ____ extends Statistic { private final AtomicLong value = new AtomicLong(-1); @Override public void updateValue(long newValue) { value.set(newValue); } @Override public long getValue() { return value.get(); } @Override public String toString() { return Long.toString(value.get()); } @Override public boolean isUpdated() { return value.get() != -1; } @Override public void reset() { value.set(-1); } }
StatisticValue
java
quarkusio__quarkus
extensions/hibernate-validator/deployment/src/test/java/io/quarkus/hibernate/validator/test/CustomConfigurationViaBeansTest.java
{ "start": 5202, "end": 5509 }
class ____ implements ScriptEvaluatorFactory { @Override public void clear() { } @Override public ScriptEvaluator getScriptEvaluatorByLanguageName(String arg0) { return null; } } @ApplicationScoped public static
MyScriptEvaluatorFactory
java
apache__camel
components/camel-github/src/main/java/org/apache/camel/component/github/GitHubType.java
{ "start": 854, "end": 1050 }
enum ____ { CLOSEPULLREQUEST, PULLREQUESTCOMMENT, COMMIT, PULLREQUEST, TAG, PULLREQUESTSTATE, PULLREQUESTFILES, GETCOMMITFILE, CREATEISSUE, EVENT; }
GitHubType
java
FasterXML__jackson-core
src/test/java/tools/jackson/core/unittest/read/NumberDeferredReadTest.java
{ "start": 466, "end": 5794 }
class ____ extends JacksonCoreTestBase { protected JsonFactory jsonFactory() { return sharedStreamFactory(); } /* /********************************************************************** /* Tests, integral types /********************************************************************** */ // Int, long eagerly decoded, always @Test void deferredInt() throws Exception { _testDeferredInt(MODE_INPUT_STREAM); _testDeferredInt(MODE_INPUT_STREAM_THROTTLED); _testDeferredInt(MODE_READER); _testDeferredInt(MODE_DATA_INPUT); } private void _testDeferredInt(int mode) throws Exception { // trailing space to avoid problems with DataInput try (JsonParser p = createParser(jsonFactory(), mode, " 12345 ")) { assertToken(JsonToken.VALUE_NUMBER_INT, p.nextToken()); assertEquals(Integer.valueOf(12345), p.getNumberValueDeferred()); assertEquals(NumberType.INT, p.getNumberType()); assertNull(p.nextToken()); } } @Test void deferredLong() throws Exception { _testDeferredLong(MODE_INPUT_STREAM); _testDeferredLong(MODE_INPUT_STREAM_THROTTLED); _testDeferredLong(MODE_READER); _testDeferredLong(MODE_DATA_INPUT); } private void _testDeferredLong(int mode) throws Exception { final long value = 100L + Integer.MAX_VALUE; try (JsonParser p = createParser(jsonFactory(), mode, " "+value+" ")) { assertToken(JsonToken.VALUE_NUMBER_INT, p.nextToken()); assertEquals(Long.valueOf(value), p.getNumberValueDeferred()); assertEquals(NumberType.LONG, p.getNumberType()); assertNull(p.nextToken()); } } @Test void deferredBigInteger() throws Exception { _testDeferredBigInteger(MODE_INPUT_STREAM); _testDeferredBigInteger(MODE_INPUT_STREAM_THROTTLED); _testDeferredBigInteger(MODE_READER); _testDeferredBigInteger(MODE_DATA_INPUT); } private void _testDeferredBigInteger(int mode) throws Exception { BigInteger value = BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.TEN); try (JsonParser p = createParser(jsonFactory(), mode, " "+value+" ")) { assertToken(JsonToken.VALUE_NUMBER_INT, p.nextToken()); assertEquals(NumberType.BIG_INTEGER, p.getNumberType()); Object nr = p.getNumberValueDeferred(); assertEquals(String.class, nr.getClass()); assertEquals(value.toString(), nr); // But if forced to, we'll get BigInteger assertEquals(value, p.getBigIntegerValue()); assertEquals(value, p.getNumberValueDeferred()); } } /* /********************************************************************** /* Tests, floating point types /********************************************************************** */ @Test void deferredFloatingPoint() throws Exception { _testDeferredFloatingPoint(MODE_INPUT_STREAM); _testDeferredFloatingPoint(MODE_INPUT_STREAM_THROTTLED); _testDeferredFloatingPoint(MODE_READER); _testDeferredFloatingPoint(MODE_DATA_INPUT); } private void _testDeferredFloatingPoint(int mode) throws Exception { // Try with BigDecimal/Double/Float; work very similarly try (JsonParser p = createParser(jsonFactory(), mode, " 0.25 ")) { BigDecimal value = new BigDecimal("0.25"); assertToken(JsonToken.VALUE_NUMBER_FLOAT, p.nextToken()); // NOTE! Important NOT to call "p.getNumberType()" as that'll fix // type to Double... Object nr = p.getNumberValueDeferred(); assertEquals(String.class, nr.getClass()); assertEquals(value.toString(), nr); // But if forced to, we'll get BigInteger assertEquals(value, p.getDecimalValue()); assertEquals(value, p.getNumberValueDeferred()); assertEquals(NumberType.BIG_DECIMAL, p.getNumberType()); } try (JsonParser p = createParser(jsonFactory(), mode, " 0.25 ")) { Double value = 0.25d; assertToken(JsonToken.VALUE_NUMBER_FLOAT, p.nextToken()); Object nr = p.getNumberValueDeferred(); assertEquals(String.class, nr.getClass()); assertEquals(value.toString(), nr); // But if forced to, we'll get BigInteger assertEquals(value, p.getDoubleValue()); assertEquals(value, p.getNumberValueDeferred()); assertEquals(NumberType.DOUBLE, p.getNumberType()); } try (JsonParser p = createParser(jsonFactory(), mode, " 0.25 ")) { Float value = 0.25f; assertToken(JsonToken.VALUE_NUMBER_FLOAT, p.nextToken()); Object nr = p.getNumberValueDeferred(); assertEquals(String.class, nr.getClass()); assertEquals(value.toString(), nr); // But if forced to, we'll get BigInteger assertEquals(value, p.getFloatValue()); assertEquals(value, p.getNumberValueDeferred()); assertEquals(NumberType.FLOAT, p.getNumberType()); } } }
NumberDeferredReadTest
java
spring-projects__spring-framework
spring-core/src/test/java/org/springframework/util/MethodInvokerTests.java
{ "start": 5280, "end": 6242 }
class ____ { public static int _staticField1; public int _field1 = 0; public int method1() { return ++_field1; } public static int staticMethod1() { return ++TestClass1._staticField1; } public static void voidRetvalMethod() { } public static void nullArgument(Object arg) { } public static void intArgument(int arg) { } public static void intArguments(int[] arg) { } public static String supertypes(Collection<?> c, Integer i) { return i.toString(); } public static String supertypes(Collection<?> c, List<?> l, String s) { return s; } public static String supertypes2(Collection<?> c, List<?> l, Integer i) { return i.toString(); } public static String supertypes2(Collection<?> c, List<?> l, String s, Integer i) { return s; } public static String supertypes2(Collection<?> c, List<?> l, String s, String s2) { return s; } } @SuppressWarnings("unused") public static
TestClass1
java
apache__hadoop
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
{ "start": 2412, "end": 4203 }
class ____ { /** * Sets the timeout to use when making requests to the storage service. * <p> * The server timeout interval begins at the time that the complete request * has been received by the service, and the server begins processing the * response. If the timeout interval elapses before the response is returned * to the client, the operation times out. The timeout interval resets with * each retry, if the request is retried. * * The default timeout interval for a request made via the service client is * 90 seconds. You can change this value on the service client by setting this * property, so that all subsequent requests made via the service client will * use the new timeout interval. You can also change this value for an * individual request, by setting the * {@link com.microsoft.azure.storage.RequestOptions#timeoutIntervalInMs} * property. * * If you are downloading a large blob, you should increase the value of the * timeout beyond the default value. * * @param timeoutInMs * The timeout, in milliseconds, to use when making requests to the * storage service. */ public abstract void setTimeoutInMs(int timeoutInMs); /** * Sets the RetryPolicyFactory object to use when making service requests. * * @param retryPolicyFactory * the RetryPolicyFactory object to use when making service requests. */ public abstract void setRetryPolicyFactory( final RetryPolicyFactory retryPolicyFactory); /** * Creates a new Blob service client. * * @param account cloud storage account. */ public abstract void createBlobClient(CloudStorageAccount account); /** * Creates an instance of the <code>CloudBlobClient</code>
StorageInterface
java
elastic__elasticsearch
server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java
{ "start": 2873, "end": 26168 }
class ____ extends ESIntegTestCase { @Override protected boolean addMockInternalEngine() { // testRecoverBrokenIndexMetadata replies on the flushing on shutdown behavior which can be randomly disabled in MockInternalEngine. return false; } public void testMappingMetadataParsed() throws Exception { logger.info("--> starting 1 nodes"); internalCluster().startNode(); logger.info("--> creating test index, with meta routing"); indicesAdmin().prepareCreate("test") .setMapping( XContentFactory.jsonBuilder() .startObject() .startObject("_doc") .startObject("_routing") .field("required", true) .endObject() .endObject() .endObject() ) .get(); logger.info("--> verify meta _routing required exists"); MappingMetadata mappingMd = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .metadata() .getProject() .index("test") .mapping(); assertThat(mappingMd.routingRequired(), equalTo(true)); logger.info("--> restarting nodes..."); internalCluster().fullRestart(); logger.info("--> waiting for yellow status"); ensureYellow(); logger.info("--> verify meta _routing required exists"); mappingMd = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().getProject().index("test").mapping(); assertThat(mappingMd.routingRequired(), equalTo(true)); } public void testSimpleOpenClose() throws Exception { logger.info("--> starting 2 nodes"); internalCluster().startNodes(2); logger.info("--> creating test index"); createIndex("test"); NumShards test = getNumShards("test"); logger.info("--> waiting for green status"); ensureGreen(); ClusterStateResponse stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().getProject().index("test").getState(), equalTo(IndexMetadata.State.OPEN)); assertThat(stateResponse.getState().routingTable().index("test").size(), equalTo(test.numPrimaries)); assertThat( stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(test.totalNumShards) ); logger.info("--> indexing a simple document"); prepareIndex("test").setId("1").setSource("field1", "value1").get(); logger.info("--> closing test index..."); assertAcked(indicesAdmin().prepareClose("test")); stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().getProject().index("test").getState(), equalTo(IndexMetadata.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test"), notNullValue()); logger.info("--> verifying that the state is green"); ensureGreen(); logger.info("--> trying to index into a closed index ..."); try { prepareIndex("test").setId("1").setSource("field1", "value1").get(); fail(); } catch (IndexClosedException e) { // all is well } logger.info("--> creating another index (test2) by indexing into it"); prepareIndex("test2").setId("1").setSource("field1", "value1").get(); logger.info("--> verifying that the state is green"); ensureGreen(); logger.info("--> opening the first index again..."); assertAcked(indicesAdmin().prepareOpen("test")); logger.info("--> verifying that the state is green"); ensureGreen(); stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().getProject().index("test").getState(), equalTo(IndexMetadata.State.OPEN)); assertThat(stateResponse.getState().routingTable().index("test").size(), equalTo(test.numPrimaries)); assertThat( stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(test.totalNumShards) ); logger.info("--> trying to get the indexed document on the first index"); GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.isExists(), equalTo(true)); logger.info("--> closing test index..."); assertAcked(indicesAdmin().prepareClose("test")); stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().getProject().index("test").getState(), equalTo(IndexMetadata.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test"), notNullValue()); logger.info("--> restarting nodes..."); internalCluster().fullRestart(); logger.info("--> waiting for two nodes and green status"); ensureGreen(); stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().getProject().index("test").getState(), equalTo(IndexMetadata.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test"), notNullValue()); logger.info("--> trying to index into a closed index ..."); try { prepareIndex("test").setId("1").setSource("field1", "value1").get(); fail(); } catch (IndexClosedException e) { // all is well } logger.info("--> opening index..."); indicesAdmin().prepareOpen("test").get(); logger.info("--> waiting for green status"); ensureGreen(); stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().getProject().index("test").getState(), equalTo(IndexMetadata.State.OPEN)); assertThat(stateResponse.getState().routingTable().index("test").size(), equalTo(test.numPrimaries)); assertThat( stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(test.totalNumShards) ); logger.info("--> trying to get the indexed document on the first round (before close and shutdown)"); getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.isExists(), equalTo(true)); logger.info("--> indexing a simple document"); prepareIndex("test").setId("2").setSource("field1", "value1").get(); } public void testJustMasterNode() throws Exception { logger.info("--> cleaning nodes"); logger.info("--> starting 1 master node non data"); internalCluster().startNode(nonDataNode()); logger.info("--> create an index"); indicesAdmin().prepareCreate("test").setWaitForActiveShards(ActiveShardCount.NONE).get(); logger.info("--> restarting master node"); internalCluster().fullRestart(new RestartCallback() { @Override public Settings onNodeStopped(String nodeName) { return nonDataNode(); } }); logger.info("--> waiting for test index to be created"); ClusterHealthResponse health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setIndices("test") .get(); assertThat(health.isTimedOut(), equalTo(false)); logger.info("--> verify we have an index"); ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setIndices("test").get(); assertThat(clusterStateResponse.getState().metadata().getProject().hasIndex("test"), equalTo(true)); } public void testJustMasterNodeAndJustDataNode() { logger.info("--> cleaning nodes"); logger.info("--> starting 1 master node non data"); internalCluster().startMasterOnlyNode(); internalCluster().startDataOnlyNode(); logger.info("--> create an index"); indicesAdmin().prepareCreate("test").get(); prepareIndex("test").setSource("field1", "value1").get(); } public void testTwoNodesSingleDoc() throws Exception { logger.info("--> cleaning nodes"); logger.info("--> starting 2 nodes"); internalCluster().startNodes(2); logger.info("--> indexing a simple document"); prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); logger.info("--> waiting for green status"); ClusterHealthResponse health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForNodes("2") .get(); assertThat(health.isTimedOut(), equalTo(false)); logger.info("--> verify 1 doc in the index"); for (int i = 0; i < 10; i++) { assertHitCount(prepareSearch().setQuery(matchAllQuery()), 1L); } logger.info("--> closing test index..."); assertAcked(indicesAdmin().prepareClose("test")); ClusterStateResponse stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().getProject().index("test").getState(), equalTo(IndexMetadata.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test"), notNullValue()); logger.info("--> opening the index..."); indicesAdmin().prepareOpen("test").get(); logger.info("--> waiting for green status"); health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForNodes("2") .get(); assertThat(health.isTimedOut(), equalTo(false)); logger.info("--> verify 1 doc in the index"); assertHitCount(prepareSearch().setQuery(matchAllQuery()), 1L); for (int i = 0; i < 10; i++) { assertHitCount(prepareSearch().setQuery(matchAllQuery()), 1L); } } /** * This test ensures that when an index deletion takes place while a node is offline, when that * node rejoins the cluster, it deletes the index locally instead of importing it as a dangling index. */ public void testIndexDeletionWhenNodeRejoins() throws Exception { final String indexName = "test-index-del-on-node-rejoin-idx"; final int numNodes = 2; final List<String> nodes; logger.info("--> starting a cluster with " + numNodes + " nodes"); nodes = internalCluster().startNodes( numNodes, Settings.builder().put(IndexGraveyard.SETTING_MAX_TOMBSTONES.getKey(), randomIntBetween(10, 100)).build() ); logger.info("--> create an index"); createIndex(indexName); logger.info("--> waiting for green status"); ensureGreen(); final String indexUUID = resolveIndex(indexName).getUUID(); logger.info("--> restart a random date node, deleting the index in between stopping and restarting"); internalCluster().restartRandomDataNode(new RestartCallback() { @Override public Settings onNodeStopped(final String nodeName) throws Exception { nodes.remove(nodeName); logger.info("--> stopped node[{}], remaining nodes {}", nodeName, nodes); assert nodes.size() > 0; final String otherNode = nodes.get(0); logger.info("--> delete index and verify it is deleted"); final Client client = client(otherNode); client.admin().indices().prepareDelete(indexName).get(); assertFalse(indexExists(indexName, client)); logger.info("--> index deleted"); return super.onNodeStopped(nodeName); } }); logger.info("--> wait until all nodes are back online"); clusterAdmin().health( new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, new String[] {}).waitForEvents(Priority.LANGUID) .waitForNodes(Integer.toString(numNodes)) ).actionGet(); logger.info("--> waiting for green status"); ensureGreen(); logger.info("--> verify that the deleted index is removed from the cluster and not reimported as dangling by the restarted node"); assertFalse(indexExists(indexName)); assertBusy(() -> { final NodeEnvironment nodeEnv = internalCluster().getInstance(NodeEnvironment.class); try { assertFalse("index folder " + indexUUID + " should be deleted", nodeEnv.availableIndexFolders().contains(indexUUID)); } catch (IOException e) { logger.error("Unable to retrieve available index folders from the node", e); fail("Unable to retrieve available index folders from the node"); } }); } /** * This test really tests worst case scenario where we have a broken setting or any setting that prevents an index from being * allocated in our metadata that we recover. In that case we now have the ability to check the index on local recovery from disk * if it is sane and if we can successfully create an IndexService. This also includes plugins etc. */ public void testRecoverBrokenIndexMetadata() throws Exception { logger.info("--> starting one node"); internalCluster().startNode(); logger.info("--> indexing a simple document"); prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); logger.info("--> waiting for green status"); if (usually()) { ensureYellow(); } else { internalCluster().startNode(); clusterAdmin().health( new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, new String[] {}).waitForGreenStatus() .waitForEvents(Priority.LANGUID) .waitForNoRelocatingShards(true) .waitForNodes("2") ).actionGet(); } ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata metadata = state.getMetadata().getProject().index("test"); final IndexMetadata.Builder brokenMeta = IndexMetadata.builder(metadata) .settings( Settings.builder() .put(metadata.getSettings()) .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersions.MINIMUM_COMPATIBLE) // this is invalid but should be archived .put("index.similarity.BM25.type", "boolean") // this one is not validated ahead of time and breaks allocation .put("index.analysis.filter.myCollator.type", "icu_collation") ); restartNodesOnBrokenClusterState(ClusterState.builder(state).metadata(Metadata.builder(state.getMetadata()).put(brokenMeta))); // check that the cluster does not keep reallocating shards assertBusy(() -> { final RoutingTable routingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().routingTable(); final IndexRoutingTable indexRoutingTable = routingTable.index("test"); assertNotNull(indexRoutingTable); for (int i = 0; i < indexRoutingTable.size(); i++) { IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(i); assertTrue(shardRoutingTable.primaryShard().unassigned()); assertEquals( UnassignedInfo.AllocationStatus.DECIDERS_NO, shardRoutingTable.primaryShard().unassignedInfo().lastAllocationStatus() ); assertThat(shardRoutingTable.primaryShard().unassignedInfo().failedAllocations(), greaterThan(0)); } }, 60, TimeUnit.SECONDS); indicesAdmin().prepareClose("test").get(); state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertEquals(IndexMetadata.State.CLOSE, state.getMetadata().getProject().index(metadata.getIndex()).getState()); assertEquals( "boolean", state.getMetadata().getProject().index(metadata.getIndex()).getSettings().get("archived.index.similarity.BM25.type") ); // try to open it with the broken setting - fail again! ElasticsearchException ex = expectThrows(ElasticsearchException.class, indicesAdmin().prepareOpen("test")); assertEquals(ex.getMessage(), "Failed to verify index " + metadata.getIndex()); assertNotNull(ex.getCause()); assertEquals(IllegalArgumentException.class, ex.getCause().getClass()); assertEquals(ex.getCause().getMessage(), "Unknown filter type [icu_collation] for [myCollator]"); } /** * This test really tests worst case scenario where we have a missing analyzer setting. * In that case we now have the ability to check the index on local recovery from disk * if it is sane and if we can successfully create an IndexService. * This also includes plugins etc. */ public void testRecoverMissingAnalyzer() throws Exception { logger.info("--> starting one node"); internalCluster().startNode(); prepareCreate("test").setSettings( Settings.builder().put("index.analysis.analyzer.test.tokenizer", "standard").put("index.number_of_shards", "1") ).setMapping(""" { "properties": { "field1": { "type": "text", "analyzer": "test" } } }""").get(); logger.info("--> indexing a simple document"); prepareIndex("test").setId("1").setSource("field1", "value one").setRefreshPolicy(IMMEDIATE).get(); logger.info("--> waiting for green status"); if (usually()) { ensureYellow(); } else { internalCluster().startNode(); clusterAdmin().health( new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, new String[] {}).waitForGreenStatus() .waitForEvents(Priority.LANGUID) .waitForNoRelocatingShards(true) .waitForNodes("2") ).actionGet(); } ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata metadata = state.getMetadata().getProject().index("test"); final IndexMetadata.Builder brokenMeta = IndexMetadata.builder(metadata) .settings(metadata.getSettings().filter((s) -> "index.analysis.analyzer.test.tokenizer".equals(s) == false)); restartNodesOnBrokenClusterState(ClusterState.builder(state).metadata(Metadata.builder(state.getMetadata()).put(brokenMeta))); // check that the cluster does not keep reallocating shards assertBusy(() -> { final RoutingTable routingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().routingTable(); final IndexRoutingTable indexRoutingTable = routingTable.index("test"); assertNotNull(indexRoutingTable); for (int i = 0; i < indexRoutingTable.size(); i++) { IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(i); assertTrue(shardRoutingTable.primaryShard().unassigned()); assertEquals( UnassignedInfo.AllocationStatus.DECIDERS_NO, shardRoutingTable.primaryShard().unassignedInfo().lastAllocationStatus() ); assertThat(shardRoutingTable.primaryShard().unassignedInfo().failedAllocations(), greaterThan(0)); } }, 60, TimeUnit.SECONDS); indicesAdmin().prepareClose("test").get(); // try to open it with the broken setting - fail again! ElasticsearchException ex = expectThrows(ElasticsearchException.class, indicesAdmin().prepareOpen("test")); assertEquals(ex.getMessage(), "Failed to verify index " + metadata.getIndex()); assertNotNull(ex.getCause()); assertEquals(MapperParsingException.class, ex.getCause().getClass()); assertThat(ex.getCause().getMessage(), containsString("analyzer [test] has not been configured in mappings")); } public void testArchiveBrokenClusterSettings() throws Exception { logger.info("--> starting one node"); internalCluster().startNode(); prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); logger.info("--> waiting for green status"); if (usually()) { ensureYellow(); } else { internalCluster().startNode(); clusterAdmin().health( new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, new String[] {}).waitForGreenStatus() .waitForEvents(Priority.LANGUID) .waitForNoRelocatingShards(true) .waitForNodes("2") ).actionGet(); } ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final Metadata metadata = state.getMetadata(); final Metadata brokenMeta = Metadata.builder(metadata) .persistentSettings( Settings.builder() .put(metadata.persistentSettings()) .put("this.is.unknown", true) .put(ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), "broken") .build() ) .build(); restartNodesOnBrokenClusterState(ClusterState.builder(state).metadata(brokenMeta)); ensureYellow("test"); // wait for state recovery state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertEquals("true", state.metadata().persistentSettings().get("archived.this.is.unknown")); assertEquals( "broken", state.metadata().persistentSettings().get("archived." + ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()) ); // delete these settings updateClusterSettings(Settings.builder().putNull("archived.*")); state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertNull(state.metadata().persistentSettings().get("archived.this.is.unknown")); assertNull( state.metadata().persistentSettings().get("archived." + ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()) ); assertHitCount(prepareSearch().setQuery(matchAllQuery()), 1L); } }
GatewayIndexStateIT
java
assertj__assertj-core
assertj-core/src/test/java/org/assertj/core/api/GenericComparableAssertBaseTest.java
{ "start": 795, "end": 1192 }
class ____ extends ComparableAssertBaseTest<GenericComparableAssert<Integer>, Integer> { @Override protected GenericComparableAssert<Integer> create_assertions() { return new GenericComparableAssert<>(8); } @Override protected Comparables getComparables(GenericComparableAssert<Integer> someAssertions) { return someAssertions.comparables; } }
GenericComparableAssertBaseTest
java
elastic__elasticsearch
server/src/main/java/org/elasticsearch/action/downsample/DownsampleTask.java
{ "start": 876, "end": 1628 }
class ____ extends CancellableTask { private static final String ROLLUP_FIELD_NAME = "rollup"; private final String downsampleIndex; private final DownsampleConfig config; public DownsampleTask( long id, String type, String action, TaskId parentTask, String downsampleIndex, DownsampleConfig config, Map<String, String> headers ) { super(id, type, action, ROLLUP_FIELD_NAME + "_" + downsampleIndex, parentTask, headers); this.downsampleIndex = downsampleIndex; this.config = config; } public String getDownsampleIndex() { return downsampleIndex; } public DownsampleConfig config() { return config; } }
DownsampleTask
java
apache__flink
flink-table/flink-table-common/src/main/java/org/apache/flink/table/factories/FactoryUtil.java
{ "start": 3258, "end": 24229 }
class ____ { private static final Logger LOG = LoggerFactory.getLogger(FactoryUtil.class); /** * Describes the property version. This can be used for backwards compatibility in case the * property format changes. */ public static final ConfigOption<Integer> PROPERTY_VERSION = ConfigOptions.key("property-version") .intType() .defaultValue(1) .withDescription( "Version of the overall property design. This option is meant for future backwards compatibility."); public static final ConfigOption<String> CONNECTOR = ConfigOptions.key("connector") .stringType() .noDefaultValue() .withDescription( "Uniquely identifies the connector of a dynamic table that is used for accessing data in " + "an external system. Its value is used during table source and table sink discovery."); public static final ConfigOption<String> PROVIDER = ConfigOptions.key("provider") .stringType() .noDefaultValue() .withDescription( "Uniquely identifies the provider of a model that is used for model inference." + " Its value is used during model provider discovery."); public static final ConfigOption<String> FORMAT = ConfigOptions.key("format") .stringType() .noDefaultValue() .withDescription( "Defines the format identifier for encoding data. " + "The identifier is used to discover a suitable format factory."); public static final ConfigOption<Integer> SINK_PARALLELISM = ConfigOptions.key("sink.parallelism") .intType() .noDefaultValue() .withDescription( "Defines a custom parallelism for the sink. " + "By default, if this option is not defined, the planner will derive the parallelism " + "for each statement individually by also considering the global configuration."); public static final ConfigOption<List<String>> SQL_GATEWAY_ENDPOINT_TYPE = ConfigOptions.key("sql-gateway.endpoint.type") .stringType() .asList() .defaultValues("rest") .withDescription("Specify the endpoints that are used."); public static final ConfigOption<Integer> SOURCE_PARALLELISM = ConfigOptions.key("scan.parallelism") .intType() .noDefaultValue() .withDescription( "Defines a custom parallelism for the source. " + "By default, if this option is not defined, the planner will derive the parallelism " + "for each statement individually by also considering the global configuration."); public static final ConfigOption<WatermarkEmitStrategy> WATERMARK_EMIT_STRATEGY = ConfigOptions.key("scan.watermark.emit.strategy") .enumType(WatermarkEmitStrategy.class) .defaultValue(WatermarkEmitStrategy.ON_PERIODIC) .withDescription( "The strategy for emitting watermark. " + "'on-event' means emitting watermark for every event. " + "'on-periodic' means emitting watermark periodically. " + "The default strategy is 'on-periodic'"); public static final ConfigOption<String> WATERMARK_ALIGNMENT_GROUP = ConfigOptions.key("scan.watermark.alignment.group") .stringType() .noDefaultValue() .withDescription("The watermark alignment group name."); public static final ConfigOption<Duration> WATERMARK_ALIGNMENT_MAX_DRIFT = ConfigOptions.key("scan.watermark.alignment.max-drift") .durationType() .noDefaultValue() .withDescription("The max allowed watermark drift."); public static final ConfigOption<Duration> WATERMARK_ALIGNMENT_UPDATE_INTERVAL = ConfigOptions.key("scan.watermark.alignment.update-interval") .durationType() .defaultValue(Duration.ofMillis(1000)) .withDescription("Update interval to align watermark."); public static final ConfigOption<Duration> SOURCE_IDLE_TIMEOUT = ConfigOptions.key("scan.watermark.idle-timeout") .durationType() .noDefaultValue() .withDescription( "When a source do not receive any elements for the timeout time, " + "it will be marked as temporarily idle. This allows downstream " + "tasks to advance their watermarks without the need to wait for " + "watermarks from this source while it is idle."); public static final ConfigOption<String> WORKFLOW_SCHEDULER_TYPE = ConfigOptions.key("workflow-scheduler.type") .stringType() .noDefaultValue() .withDescription( "Specify the workflow scheduler type that is used for materialized table."); /** * Suffix for keys of {@link ConfigOption} in case a connector requires multiple formats (e.g. * for both key and value). * * <p>See {@link #createTableFactoryHelper(DynamicTableFactory, DynamicTableFactory.Context)} * for more information. */ public static final String FORMAT_SUFFIX = ".format"; /** * The placeholder symbol to be used for keys of options which can be templated. See {@link * Factory} for details. */ public static final String PLACEHOLDER_SYMBOL = "#"; private static final Set<ConfigOption<?>> watermarkOptionSet; static { Set<ConfigOption<?>> set = new HashSet<>(); set.add(WATERMARK_EMIT_STRATEGY); set.add(WATERMARK_ALIGNMENT_GROUP); set.add(WATERMARK_ALIGNMENT_MAX_DRIFT); set.add(WATERMARK_ALIGNMENT_UPDATE_INTERVAL); set.add(SOURCE_IDLE_TIMEOUT); watermarkOptionSet = Collections.unmodifiableSet(set); } /** * Creates a {@link DynamicTableSource} from a {@link CatalogTable}. * * <p>If {@param preferredFactory} is passed, the table source is created from that factory. * Otherwise, an attempt is made to discover a matching factory using Java SPI (see {@link * Factory} for details). */ public static DynamicTableSource createDynamicTableSource( @Nullable DynamicTableSourceFactory preferredFactory, ObjectIdentifier objectIdentifier, ResolvedCatalogTable catalogTable, Map<String, String> enrichmentOptions, ReadableConfig configuration, ClassLoader classLoader, boolean isTemporary) { final DefaultDynamicTableContext context = new DefaultDynamicTableContext( objectIdentifier, catalogTable, enrichmentOptions, configuration, classLoader, isTemporary); try { final DynamicTableSourceFactory factory = preferredFactory != null ? preferredFactory : discoverTableFactory(DynamicTableSourceFactory.class, context); return factory.createDynamicTableSource(context); } catch (Throwable t) { throw new ValidationException( String.format( "Unable to create a source for reading table '%s'.\n\n" + "Table options are:\n\n" + "%s", objectIdentifier.asSummaryString(), catalogTable.getOptions().entrySet().stream() .map(e -> stringifyOption(e.getKey(), e.getValue())) .sorted() .collect(Collectors.joining("\n"))), t); } } /** * Creates a {@link DynamicTableSink} from a {@link CatalogTable}. * * <p>If {@param preferredFactory} is passed, the table sink is created from that factory. * Otherwise, an attempt is made to discover a matching factory using Java SPI (see {@link * Factory} for details). */ public static DynamicTableSink createDynamicTableSink( @Nullable DynamicTableSinkFactory preferredFactory, ObjectIdentifier objectIdentifier, ResolvedCatalogTable catalogTable, Map<String, String> enrichmentOptions, ReadableConfig configuration, ClassLoader classLoader, boolean isTemporary) { final DefaultDynamicTableContext context = new DefaultDynamicTableContext( objectIdentifier, catalogTable, enrichmentOptions, configuration, classLoader, isTemporary); try { final DynamicTableSinkFactory factory = preferredFactory != null ? preferredFactory : discoverTableFactory(DynamicTableSinkFactory.class, context); return factory.createDynamicTableSink(context); } catch (Throwable t) { throw new ValidationException( String.format( "Unable to create a sink for writing table '%s'.\n\n" + "Table options are:\n\n" + "%s", objectIdentifier.asSummaryString(), catalogTable.getOptions().entrySet().stream() .map(e -> stringifyOption(e.getKey(), e.getValue())) .sorted() .collect(Collectors.joining("\n"))), t); } } /** * Creates a {@link ModelProvider} from a {@link ResolvedCatalogModel}. * * <p>If {@param preferredFactory} is passed, the model provider is created from that factory. * Otherwise, an attempt is made to discover a matching factory using Java SPI (see {@link * Factory} for details). */ public static ModelProvider createModelProvider( @Nullable ModelProviderFactory preferredFactory, ObjectIdentifier objectIdentifier, ResolvedCatalogModel catalogModel, ReadableConfig configuration, ClassLoader classLoader, boolean isTemporary) { final DefaultModelProviderContext context = new DefaultModelProviderContext( objectIdentifier, catalogModel, configuration, classLoader, isTemporary); try { final ModelProviderFactory factory = preferredFactory != null ? preferredFactory : discoverModelProviderFactory(context); return factory.createModelProvider(context); } catch (Throwable t) { throw new ValidationException( String.format( "Unable to create a model provider for model '%s'.\n\n" + "Model options are:\n\n" + "%s", objectIdentifier.asSummaryString(), catalogModel.getOptions().entrySet().stream() .map(e -> stringifyOption(e.getKey(), e.getValue())) .sorted() .collect(Collectors.joining("\n"))), t); } } /** * Creates a utility that helps validating options for a {@link CatalogFactory}. * * <p>Note: This utility checks for left-over options in the final step. */ public static CatalogFactoryHelper createCatalogFactoryHelper( CatalogFactory factory, CatalogFactory.Context context) { return new CatalogFactoryHelper(factory, context); } /** * Creates a utility that helps validating options for a {@link CatalogStoreFactory}. * * <p>Note: This utility checks for left-over options in the final step. */ public static CatalogStoreFactoryHelper createCatalogStoreFactoryHelper( CatalogStoreFactory factory, CatalogStoreFactory.Context context) { return new CatalogStoreFactoryHelper(factory, context); } /** * Creates a utility that helps validating options for a {@link ModuleFactory}. * * <p>Note: This utility checks for left-over options in the final step. */ public static ModuleFactoryHelper createModuleFactoryHelper( ModuleFactory factory, ModuleFactory.Context context) { return new ModuleFactoryHelper(factory, context); } /** * Creates a utility that helps in discovering formats, merging options with {@link * DynamicTableFactory.Context#getEnrichmentOptions()} and validating them all for a {@link * DynamicTableFactory}. * * <p>The following example sketches the usage: * * <pre>{@code * // in createDynamicTableSource() * helper = FactoryUtil.createTableFactoryHelper(this, context); * * keyFormat = helper.discoverDecodingFormat(DeserializationFormatFactory.class, KEY_FORMAT); * valueFormat = helper.discoverDecodingFormat(DeserializationFormatFactory.class, VALUE_FORMAT); * * helper.validate(); * * ... // construct connector with discovered formats * }</pre> * * <p>Note: The format option parameter of {@link * TableFactoryHelper#discoverEncodingFormat(Class, ConfigOption)} and {@link * TableFactoryHelper#discoverDecodingFormat(Class, ConfigOption)} must be {@link #FORMAT} or * end with {@link #FORMAT_SUFFIX}. The discovery logic will replace 'format' with the factory * identifier value as the format prefix. For example, assuming the identifier is 'json', if the * format option key is 'format', then the format prefix is 'json.'. If the format option key is * 'value.format', then the format prefix is 'value.json'. The format prefix is used to project * the options for the format factory. * * <p>Note: When created, this utility merges the options from {@link * DynamicTableFactory.Context#getEnrichmentOptions()} using {@link * DynamicTableFactory#forwardOptions()}. When invoking {@link TableFactoryHelper#validate()}, * this utility checks for left-over options in the final step. */ public static TableFactoryHelper createTableFactoryHelper( DynamicTableFactory factory, DynamicTableFactory.Context context) { return new TableFactoryHelper(factory, context); } /** * Creates a utility that helps validate options for a {@link ModelProviderFactory}. * * <p>Note: This utility checks for left-over options in the final step. */ public static ModelProviderFactoryHelper createModelProviderFactoryHelper( ModelProviderFactory factory, ModelProviderFactory.Context context) { return new ModelProviderFactoryHelper(factory, context); } /** * Attempts to discover an appropriate catalog factory and creates an instance of the catalog. * * <p>This first uses the legacy {@link TableFactory} stack to discover a matching {@link * CatalogFactory}. If none is found, it falls back to the new stack using {@link Factory} * instead. */ public static Catalog createCatalog( String catalogName, Map<String, String> options, ReadableConfig configuration, ClassLoader classLoader) { final DefaultCatalogContext discoveryContext = new DefaultCatalogContext(catalogName, options, configuration, classLoader); try { final CatalogFactory factory = getCatalogFactory(discoveryContext); // The type option is only used for discovery, we don't actually want to forward it // to the catalog factory itself. final Map<String, String> factoryOptions = options.entrySet().stream() .filter( entry -> !CommonCatalogOptions.CATALOG_TYPE .key() .equals(entry.getKey())) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); final DefaultCatalogContext context = new DefaultCatalogContext( catalogName, factoryOptions, configuration, classLoader); return factory.createCatalog(context); } catch (Throwable t) { throw new ValidationException( String.format( "Unable to create catalog '%s'.%n%nCatalog options are:%n%s", catalogName, options.entrySet().stream() .map( optionEntry -> stringifyOption( optionEntry.getKey(), optionEntry.getValue())) .sorted() .collect(Collectors.joining("\n"))), t); } } /** * Discovers a matching module factory and creates an instance of it. * * <p>This first uses the legacy {@link TableFactory} stack to discover a matching {@link * ModuleFactory}. If none is found, it falls back to the new stack using {@link Factory} * instead. */ public static Module createModule( String moduleName, Map<String, String> options, ReadableConfig configuration, ClassLoader classLoader) { if (options.containsKey(MODULE_TYPE.key())) { throw new ValidationException( String.format( "Option '%s' = '%s' is not supported since module name " + "is used to find module", MODULE_TYPE.key(), options.get(MODULE_TYPE.key()))); } final DefaultModuleContext discoveryContext = new DefaultModuleContext(options, configuration, classLoader); try { final ModuleFactory factory = discoverFactory( ((ModuleFactory.Context) discoveryContext).getClassLoader(), ModuleFactory.class, moduleName); final DefaultModuleContext context = new DefaultModuleContext(options, configuration, classLoader); return factory.createModule(context); } catch (Throwable t) { throw new ValidationException( String.format( "Unable to create module '%s'.%n%nModule options are:%n%s", moduleName, options.entrySet().stream() .map( optionEntry -> stringifyOption( optionEntry.getKey(), optionEntry.getValue())) .sorted() .collect(Collectors.joining("\n"))), t); } } /** * Discovers a factory using the given factory base
FactoryUtil
java
elastic__elasticsearch
x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlParser.java
{ "start": 878, "end": 2622 }
class ____ { private static final Logger log = LogManager.getLogger(KqlParser.class); public QueryBuilder parseKqlQuery(String kqlQuery, KqlParsingContext kqlParserContext) { log.trace("Parsing KQL query: {}", kqlQuery); return invokeParser(kqlQuery, kqlParserContext, KqlBaseParser::topLevelQuery, KqlAstBuilder::toQueryBuilder); } private <T> T invokeParser( String kqlQuery, KqlParsingContext kqlParsingContext, Function<KqlBaseParser, ParserRuleContext> parseFunction, BiFunction<KqlAstBuilder, ParserRuleContext, T> visitor ) { KqlBaseLexer lexer = new KqlBaseLexer(CharStreams.fromString(kqlQuery)); lexer.removeErrorListeners(); lexer.addErrorListener(ERROR_LISTENER); CommonTokenStream tokenStream = new CommonTokenStream(lexer); KqlBaseParser parser = new KqlBaseParser(tokenStream); parser.removeErrorListeners(); parser.addErrorListener(ERROR_LISTENER); parser.getInterpreter().setPredictionMode(PredictionMode.SLL); ParserRuleContext tree = parseFunction.apply(parser); log.trace(() -> Strings.format("Parse tree: %s", tree.toStringTree())); return visitor.apply(new KqlAstBuilder(kqlParsingContext), tree); } private static final BaseErrorListener ERROR_LISTENER = new BaseErrorListener() { @Override public void syntaxError( Recognizer<?, ?> recognizer, Object offendingSymbol, int line, int charPositionInLine, String message, RecognitionException e ) { throw new KqlParsingException(message, line, charPositionInLine, e); } }; }
KqlParser
java
quarkusio__quarkus
extensions/vertx/runtime/src/main/java/io/quarkus/vertx/runtime/jackson/JsonUtil.java
{ "start": 1784, "end": 4112 }
enum ____ {@code String}</li> * </ul> * * @param val java type * @return wrapped type or {@code val} if not applicable. */ public static Object wrapJsonValue(Object val) { if (val == null) { return null; } // perform wrapping if (val instanceof Map) { val = new JsonObject((Map) val); } else if (val instanceof List) { val = new JsonArray((List) val); } else if (val instanceof Instant) { val = ISO_INSTANT.format((Instant) val); } else if (val instanceof byte[]) { val = BASE64_ENCODER.encodeToString((byte[]) val); } else if (val instanceof Buffer) { val = BASE64_ENCODER.encodeToString(((Buffer) val).getBytes()); } else if (val instanceof Enum) { val = ((Enum) val).name(); } return val; } @SuppressWarnings("unchecked") public static Object checkAndCopy(Object val) { if (val == null) { // OK } else if (val instanceof Number) { // OK } else if (val instanceof Boolean) { // OK } else if (val instanceof String) { // OK } else if (val instanceof Character) { // OK } else if (val instanceof CharSequence) { // CharSequences are not immutable, so we force toString() to become immutable val = val.toString(); } else if (val instanceof Shareable) { // Shareable objects know how to copy themselves, this covers: // JsonObject, JsonArray or any user defined type that can shared across the cluster val = ((Shareable) val).copy(); } else if (val instanceof Map) { val = (new JsonObject((Map) val)).copy(); } else if (val instanceof List) { val = (new JsonArray((List) val)).copy(); } else if (val instanceof Buffer) { val = ((Buffer) val).copy(); } else if (val instanceof byte[]) { // OK } else if (val instanceof Instant) { // OK } else if (val instanceof Enum) { // OK } else { throw new IllegalStateException("Illegal type in Json: " + val.getClass()); } return val; } }
name
java
quarkusio__quarkus
extensions/quartz/deployment/src/test/java/io/quarkus/quartz/test/SuccessfulExecutionTest.java
{ "start": 1171, "end": 1308 }
class ____ { @Scheduled(identity = "successful_schedule", every = "0.2s") void successfulSchedule() { } } }
Jobs
java
spring-projects__spring-security
oauth2/oauth2-authorization-server/src/main/java/org/springframework/security/oauth2/server/authorization/JdbcOAuth2AuthorizationService.java
{ "start": 33358, "end": 38435 }
class ____ implements Function<OAuth2Authorization, List<SqlParameterValue>> { private AbstractOAuth2AuthorizationParametersMapper() { } @Override public List<SqlParameterValue> apply(OAuth2Authorization authorization) { List<SqlParameterValue> parameters = new ArrayList<>(); parameters.add(new SqlParameterValue(Types.VARCHAR, authorization.getId())); parameters.add(new SqlParameterValue(Types.VARCHAR, authorization.getRegisteredClientId())); parameters.add(new SqlParameterValue(Types.VARCHAR, authorization.getPrincipalName())); parameters.add(new SqlParameterValue(Types.VARCHAR, authorization.getAuthorizationGrantType().getValue())); String authorizedScopes = null; if (!CollectionUtils.isEmpty(authorization.getAuthorizedScopes())) { authorizedScopes = StringUtils.collectionToDelimitedString(authorization.getAuthorizedScopes(), ","); } parameters.add(new SqlParameterValue(Types.VARCHAR, authorizedScopes)); String attributes = writeMap(authorization.getAttributes()); parameters.add(mapToSqlParameter("attributes", attributes)); String state = null; String authorizationState = authorization.getAttribute(OAuth2ParameterNames.STATE); if (StringUtils.hasText(authorizationState)) { state = authorizationState; } parameters.add(new SqlParameterValue(Types.VARCHAR, state)); OAuth2Authorization.Token<OAuth2AuthorizationCode> authorizationCode = authorization .getToken(OAuth2AuthorizationCode.class); List<SqlParameterValue> authorizationCodeSqlParameters = toSqlParameterList(AUTHORIZATION_CODE_VALUE, AUTHORIZATION_CODE_METADATA, authorizationCode); parameters.addAll(authorizationCodeSqlParameters); OAuth2Authorization.Token<OAuth2AccessToken> accessToken = authorization.getToken(OAuth2AccessToken.class); List<SqlParameterValue> accessTokenSqlParameters = toSqlParameterList(ACCESS_TOKEN_VALUE, ACCESS_TOKEN_METADATA, accessToken); parameters.addAll(accessTokenSqlParameters); String accessTokenType = null; String accessTokenScopes = null; if (accessToken != null) { accessTokenType = accessToken.getToken().getTokenType().getValue(); if (!CollectionUtils.isEmpty(accessToken.getToken().getScopes())) { accessTokenScopes = StringUtils.collectionToDelimitedString(accessToken.getToken().getScopes(), ","); } } parameters.add(new SqlParameterValue(Types.VARCHAR, accessTokenType)); parameters.add(new SqlParameterValue(Types.VARCHAR, accessTokenScopes)); OAuth2Authorization.Token<OidcIdToken> oidcIdToken = authorization.getToken(OidcIdToken.class); List<SqlParameterValue> oidcIdTokenSqlParameters = toSqlParameterList(OIDC_ID_TOKEN_VALUE, OIDC_ID_TOKEN_METADATA, oidcIdToken); parameters.addAll(oidcIdTokenSqlParameters); OAuth2Authorization.Token<OAuth2RefreshToken> refreshToken = authorization.getRefreshToken(); List<SqlParameterValue> refreshTokenSqlParameters = toSqlParameterList(REFRESH_TOKEN_VALUE, REFRESH_TOKEN_METADATA, refreshToken); parameters.addAll(refreshTokenSqlParameters); OAuth2Authorization.Token<OAuth2UserCode> userCode = authorization.getToken(OAuth2UserCode.class); List<SqlParameterValue> userCodeSqlParameters = toSqlParameterList(USER_CODE_VALUE, USER_CODE_METADATA, userCode); parameters.addAll(userCodeSqlParameters); OAuth2Authorization.Token<OAuth2DeviceCode> deviceCode = authorization.getToken(OAuth2DeviceCode.class); List<SqlParameterValue> deviceCodeSqlParameters = toSqlParameterList(DEVICE_CODE_VALUE, DEVICE_CODE_METADATA, deviceCode); parameters.addAll(deviceCodeSqlParameters); return parameters; } private <T extends OAuth2Token> List<SqlParameterValue> toSqlParameterList(String tokenColumnName, String tokenMetadataColumnName, OAuth2Authorization.Token<T> token) { List<SqlParameterValue> parameters = new ArrayList<>(); String tokenValue = null; Timestamp tokenIssuedAt = null; Timestamp tokenExpiresAt = null; String metadata = null; if (token != null) { tokenValue = token.getToken().getTokenValue(); if (token.getToken().getIssuedAt() != null) { tokenIssuedAt = Timestamp.from(token.getToken().getIssuedAt()); } if (token.getToken().getExpiresAt() != null) { tokenExpiresAt = Timestamp.from(token.getToken().getExpiresAt()); } metadata = writeMap(token.getMetadata()); } parameters.add(mapToSqlParameter(tokenColumnName, tokenValue)); parameters.add(new SqlParameterValue(Types.TIMESTAMP, tokenIssuedAt)); parameters.add(new SqlParameterValue(Types.TIMESTAMP, tokenExpiresAt)); parameters.add(mapToSqlParameter(tokenMetadataColumnName, metadata)); return parameters; } private String writeMap(Map<String, Object> data) { try { return writeValueAsString(data); } catch (Exception ex) { throw new IllegalArgumentException(ex.getMessage(), ex); } } abstract String writeValueAsString(Map<String, Object> data) throws Exception; } /** * Nested
AbstractOAuth2AuthorizationParametersMapper
java
assertj__assertj-core
assertj-core/src/test/java/org/assertj/core/util/Introspection_getProperty_Test.java
{ "start": 3267, "end": 3325 }
class ____ { public void getSurname() {} } }
VoidGetter
java
apache__flink
flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowTranslationTest.java
{ "start": 4329, "end": 83750 }
class ____ { // ------------------------------------------------------------------------ // Rich Pre-Aggregation Functions // ------------------------------------------------------------------------ /** * .reduce() does not support RichReduceFunction, since the reduce function is used internally * in a {@code ReducingState}. */ @Test void testReduceWithRichReducerFails() { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple2<String, Integer>> source = env.fromData(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); assertThatThrownBy( () -> source.keyBy(x -> x.f0) .window( SlidingEventTimeWindows.of( Duration.ofSeconds(1), Duration.ofMillis(100))) .reduce( new RichReduceFunction<Tuple2<String, Integer>>() { @Override public Tuple2<String, Integer> reduce( Tuple2<String, Integer> value1, Tuple2<String, Integer> value2) { return null; } })) .isInstanceOf(UnsupportedOperationException.class); } /** * .aggregate() does not support RichAggregateFunction, since the AggregationFunction is used * internally in a {@code AggregatingState}. */ @Test void testAggregateWithRichFunctionFails() { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple2<String, Integer>> source = env.fromData(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); assertThatThrownBy( () -> source.keyBy(x -> x.f0) .window( SlidingEventTimeWindows.of( Duration.ofSeconds(1), Duration.ofMillis(100))) .aggregate(new DummyRichAggregationFunction<>())) .isInstanceOf(UnsupportedOperationException.class); } // ------------------------------------------------------------------------ // Merging Windows Support // ------------------------------------------------------------------------ @Test void testMergingAssignerWithNonMergingTriggerFails() { // verify that we check for trigger compatibility StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); WindowedStream<String, String, TimeWindow> windowedStream = env.fromData("Hello", "Ciao") .keyBy( new KeySelector<String, String>() { private static final long serialVersionUID = 598309916882894293L; @Override public String getKey(String value) throws Exception { return value; } }) .window(EventTimeSessionWindows.withGap(Duration.ofSeconds(5))); assertThatThrownBy( () -> windowedStream.trigger( new Trigger<String, TimeWindow>() { private static final long serialVersionUID = 6558046711583024443L; @Override public TriggerResult onElement( String element, long timestamp, TimeWindow window, TriggerContext ctx) throws Exception { return null; } @Override public TriggerResult onProcessingTime( long time, TimeWindow window, TriggerContext ctx) throws Exception { return null; } @Override public TriggerResult onEventTime( long time, TimeWindow window, TriggerContext ctx) throws Exception { return null; } @Override public boolean canMerge() { return false; } @Override public void clear(TimeWindow window, TriggerContext ctx) throws Exception {} })) .isInstanceOf(UnsupportedOperationException.class); } @Test @SuppressWarnings("rawtypes") void testMergingWindowsWithEvictor() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Integer> source = env.fromData(1, 2); DataStream<String> window1 = source.keyBy( new KeySelector<Integer, String>() { @Override public String getKey(Integer value) throws Exception { return value.toString(); } }) .window(EventTimeSessionWindows.withGap(Duration.ofSeconds(5))) .evictor(CountEvictor.of(5)) .process(new TestProcessWindowFunction()); final OneInputTransformation<Integer, String> transform = (OneInputTransformation<Integer, String>) window1.getTransformation(); final OneInputStreamOperator<Integer, String> operator = transform.getOperator(); assertThat(operator).isInstanceOf(WindowOperator.class); WindowOperator<String, Integer, ?, ?, ?> winOperator = (WindowOperator<String, Integer, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(EventTimeTrigger.class); assertThat(winOperator.getWindowAssigner()).isInstanceOf(EventTimeSessionWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(ListStateDescriptor.class); processElementAndEnsureOutput( winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, 1); } // ------------------------------------------------------------------------ // Reduce Translation Tests // ------------------------------------------------------------------------ @Test @SuppressWarnings("rawtypes") void testReduceEventTime() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple2<String, Integer>> source = env.fromData(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); DataStream<Tuple2<String, Integer>> window1 = source.keyBy(new TupleKeySelector()) .window( SlidingEventTimeWindows.of( Duration.ofSeconds(1), Duration.ofMillis(100))) .reduce(new DummyReducer()); OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>>) window1.getTransformation(); OneInputStreamOperator<Tuple2<String, Integer>, Tuple2<String, Integer>> operator = transform.getOperator(); assertThat(operator).isInstanceOf(WindowOperator.class); WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(EventTimeTrigger.class); assertThat(winOperator.getWindowAssigner()).isInstanceOf(SlidingEventTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(ReducingStateDescriptor.class); processElementAndEnsureOutput( winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1)); } @Test @SuppressWarnings("rawtypes") void testReduceProcessingTime() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple2<String, Integer>> source = env.fromData(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); DataStream<Tuple2<String, Integer>> window1 = source.keyBy(new TupleKeySelector()) .window( SlidingProcessingTimeWindows.of( Duration.ofSeconds(1), Duration.ofMillis(100))) .reduce(new DummyReducer()); OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>>) window1.getTransformation(); OneInputStreamOperator<Tuple2<String, Integer>, Tuple2<String, Integer>> operator = transform.getOperator(); assertThat(operator).isInstanceOf(WindowOperator.class); WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(ProcessingTimeTrigger.class); assertThat(winOperator.getWindowAssigner()) .isInstanceOf(SlidingProcessingTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(ReducingStateDescriptor.class); processElementAndEnsureOutput( winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1)); } @Test @SuppressWarnings("rawtypes") void testReduceWithWindowFunctionEventTime() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple2<String, Integer>> source = env.fromData(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); DummyReducer reducer = new DummyReducer(); DataStream<Tuple3<String, String, Integer>> window = source.keyBy(new TupleKeySelector()) .window(TumblingEventTimeWindows.of(Duration.ofSeconds(1))) .reduce( reducer, new WindowFunction< Tuple2<String, Integer>, Tuple3<String, String, Integer>, String, TimeWindow>() { private static final long serialVersionUID = 1L; @Override public void apply( String key, TimeWindow window, Iterable<Tuple2<String, Integer>> values, Collector<Tuple3<String, String, Integer>> out) throws Exception { for (Tuple2<String, Integer> in : values) { out.collect(new Tuple3<>(in.f0, in.f0, in.f1)); } } }); OneInputTransformation<Tuple2<String, Integer>, Tuple3<String, String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple3<String, String, Integer>>) window.getTransformation(); OneInputStreamOperator<Tuple2<String, Integer>, Tuple3<String, String, Integer>> operator = transform.getOperator(); assertThat(operator).isInstanceOf(WindowOperator.class); WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(EventTimeTrigger.class); assertThat(winOperator.getWindowAssigner()).isInstanceOf(TumblingEventTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(ReducingStateDescriptor.class); processElementAndEnsureOutput( operator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1)); } @Test @SuppressWarnings("rawtypes") void testReduceWithWindowFunctionProcessingTime() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple2<String, Integer>> source = env.fromData(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); DataStream<Tuple3<String, String, Integer>> window = source.keyBy(new TupleKeySelector()) .window(TumblingProcessingTimeWindows.of(Duration.ofSeconds(1))) .reduce( new DummyReducer(), new WindowFunction< Tuple2<String, Integer>, Tuple3<String, String, Integer>, String, TimeWindow>() { private static final long serialVersionUID = 1L; @Override public void apply( String tuple, TimeWindow window, Iterable<Tuple2<String, Integer>> values, Collector<Tuple3<String, String, Integer>> out) throws Exception { for (Tuple2<String, Integer> in : values) { out.collect(new Tuple3<>(in.f0, in.f0, in.f1)); } } }); OneInputTransformation<Tuple2<String, Integer>, Tuple3<String, String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple3<String, String, Integer>>) window.getTransformation(); OneInputStreamOperator<Tuple2<String, Integer>, Tuple3<String, String, Integer>> operator = transform.getOperator(); assertThat(operator).isInstanceOf(WindowOperator.class); WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(ProcessingTimeTrigger.class); assertThat(winOperator.getWindowAssigner()) .isInstanceOf(TumblingProcessingTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(ReducingStateDescriptor.class); processElementAndEnsureOutput( operator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1)); } @Test @SuppressWarnings("rawtypes") void testReduceWithProcesWindowFunctionEventTime() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple2<String, Integer>> source = env.fromData(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); DummyReducer reducer = new DummyReducer(); DataStream<Tuple3<String, String, Integer>> window = source.keyBy(new TupleKeySelector()) .window(TumblingEventTimeWindows.of(Duration.ofSeconds(1))) .reduce( reducer, new ProcessWindowFunction< Tuple2<String, Integer>, Tuple3<String, String, Integer>, String, TimeWindow>() { private static final long serialVersionUID = 1L; @Override public void process( String key, Context ctx, Iterable<Tuple2<String, Integer>> values, Collector<Tuple3<String, String, Integer>> out) throws Exception { for (Tuple2<String, Integer> in : values) { out.collect(new Tuple3<>(in.f0, in.f0, in.f1)); } } }); OneInputTransformation<Tuple2<String, Integer>, Tuple3<String, String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple3<String, String, Integer>>) window.getTransformation(); OneInputStreamOperator<Tuple2<String, Integer>, Tuple3<String, String, Integer>> operator = transform.getOperator(); assertThat(operator).isInstanceOf(WindowOperator.class); WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(EventTimeTrigger.class); assertThat(winOperator.getWindowAssigner()).isInstanceOf(TumblingEventTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(ReducingStateDescriptor.class); processElementAndEnsureOutput( operator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1)); } @Test @SuppressWarnings("rawtypes") void testReduceWithProcessWindowFunctionProcessingTime() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple2<String, Integer>> source = env.fromData(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); DataStream<Tuple3<String, String, Integer>> window = source.keyBy(new TupleKeySelector()) .window(TumblingProcessingTimeWindows.of(Duration.ofSeconds(1))) .reduce( new DummyReducer(), new ProcessWindowFunction< Tuple2<String, Integer>, Tuple3<String, String, Integer>, String, TimeWindow>() { private static final long serialVersionUID = 1L; @Override public void process( String tuple, Context ctx, Iterable<Tuple2<String, Integer>> values, Collector<Tuple3<String, String, Integer>> out) throws Exception { for (Tuple2<String, Integer> in : values) { out.collect(new Tuple3<>(in.f0, in.f0, in.f1)); } } }); OneInputTransformation<Tuple2<String, Integer>, Tuple3<String, String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple3<String, String, Integer>>) window.getTransformation(); OneInputStreamOperator<Tuple2<String, Integer>, Tuple3<String, String, Integer>> operator = transform.getOperator(); assertThat(operator).isInstanceOf(WindowOperator.class); WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(ProcessingTimeTrigger.class); assertThat(winOperator.getWindowAssigner()) .isInstanceOf(TumblingProcessingTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(ReducingStateDescriptor.class); processElementAndEnsureOutput( operator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1)); } /** Test for the deprecated .apply(Reducer, WindowFunction). */ @Test @SuppressWarnings("rawtypes") void testApplyWithPreReducerEventTime() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple2<String, Integer>> source = env.fromData(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); DummyReducer reducer = new DummyReducer(); DataStream<Tuple3<String, String, Integer>> window = source.keyBy(new TupleKeySelector()) .window(TumblingEventTimeWindows.of(Duration.ofSeconds(1))) .reduce( reducer, new WindowFunction< Tuple2<String, Integer>, Tuple3<String, String, Integer>, String, TimeWindow>() { private static final long serialVersionUID = 1L; @Override public void apply( String key, TimeWindow window, Iterable<Tuple2<String, Integer>> values, Collector<Tuple3<String, String, Integer>> out) throws Exception { for (Tuple2<String, Integer> in : values) { out.collect(new Tuple3<>(in.f0, in.f0, in.f1)); } } }); OneInputTransformation<Tuple2<String, Integer>, Tuple3<String, String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple3<String, String, Integer>>) window.getTransformation(); OneInputStreamOperator<Tuple2<String, Integer>, Tuple3<String, String, Integer>> operator = transform.getOperator(); assertThat(operator).isInstanceOf(WindowOperator.class); WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(EventTimeTrigger.class); assertThat(winOperator.getWindowAssigner()).isInstanceOf(TumblingEventTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(ReducingStateDescriptor.class); processElementAndEnsureOutput( operator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1)); } /** Test for the deprecated .apply(Reducer, WindowFunction). */ @Test @SuppressWarnings("rawtypes") void testApplyWithPreReducerAndEvictor() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple2<String, Integer>> source = env.fromData(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); DummyReducer reducer = new DummyReducer(); DataStream<Tuple3<String, String, Integer>> window = source.keyBy(new TupleKeySelector()) .window(TumblingEventTimeWindows.of(Duration.ofSeconds(1))) .evictor(CountEvictor.of(100)) .reduce( reducer, new WindowFunction< Tuple2<String, Integer>, Tuple3<String, String, Integer>, String, TimeWindow>() { private static final long serialVersionUID = 1L; @Override public void apply( String key, TimeWindow window, Iterable<Tuple2<String, Integer>> values, Collector<Tuple3<String, String, Integer>> out) throws Exception { for (Tuple2<String, Integer> in : values) { out.collect(new Tuple3<>(in.f0, in.f0, in.f1)); } } }); OneInputTransformation<Tuple2<String, Integer>, Tuple3<String, String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple3<String, String, Integer>>) window.getTransformation(); OneInputStreamOperator<Tuple2<String, Integer>, Tuple3<String, String, Integer>> operator = transform.getOperator(); assertThat(operator).isInstanceOf(WindowOperator.class); WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(EventTimeTrigger.class); assertThat(winOperator.getWindowAssigner()).isInstanceOf(TumblingEventTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(ListStateDescriptor.class); processElementAndEnsureOutput( operator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1)); } // ------------------------------------------------------------------------ // Aggregate Translation Tests // ------------------------------------------------------------------------ @Test void testAggregateEventTime() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple3<String, String, Integer>> source = env.fromData(Tuple3.of("hello", "hallo", 1), Tuple3.of("hello", "hallo", 2)); DataStream<Integer> window1 = source.keyBy(new Tuple3KeySelector()) .window( SlidingEventTimeWindows.of( Duration.ofSeconds(1), Duration.ofMillis(100))) .aggregate(new DummyAggregationFunction()); final OneInputTransformation<Tuple3<String, String, Integer>, Integer> transform = (OneInputTransformation<Tuple3<String, String, Integer>, Integer>) window1.getTransformation(); final OneInputStreamOperator<Tuple3<String, String, Integer>, Integer> operator = transform.getOperator(); assertThat(operator).isInstanceOf(WindowOperator.class); WindowOperator<String, Tuple3<String, String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple3<String, String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(EventTimeTrigger.class); assertThat(winOperator.getWindowAssigner()).isInstanceOf(SlidingEventTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(AggregatingStateDescriptor.class); processElementAndEnsureOutput( winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple3<>("hello", "hallo", 1)); } @Test void testAggregateProcessingTime() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple3<String, String, Integer>> source = env.fromData(Tuple3.of("hello", "hallo", 1), Tuple3.of("hello", "hallo", 2)); DataStream<Integer> window1 = source.keyBy(new Tuple3KeySelector()) .window( SlidingProcessingTimeWindows.of( Duration.ofSeconds(1), Duration.ofMillis(100))) .aggregate(new DummyAggregationFunction()); final OneInputTransformation<Tuple3<String, String, Integer>, Integer> transform = (OneInputTransformation<Tuple3<String, String, Integer>, Integer>) window1.getTransformation(); final OneInputStreamOperator<Tuple3<String, String, Integer>, Integer> operator = transform.getOperator(); assertThat(operator).isInstanceOf(WindowOperator.class); WindowOperator<String, Tuple3<String, String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple3<String, String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(ProcessingTimeTrigger.class); assertThat(winOperator.getWindowAssigner()) .isInstanceOf(SlidingProcessingTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(AggregatingStateDescriptor.class); processElementAndEnsureOutput( winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple3<>("hello", "hallo", 1)); } @Test void testAggregateWithWindowFunctionEventTime() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple3<String, String, Integer>> source = env.fromData(Tuple3.of("hello", "hallo", 1), Tuple3.of("hello", "hallo", 2)); DummyReducer reducer = new DummyReducer(); DataStream<String> window = source.keyBy(new Tuple3KeySelector()) .window(TumblingEventTimeWindows.of(Duration.ofSeconds(1))) .aggregate(new DummyAggregationFunction(), new TestWindowFunction()); final OneInputTransformation<Tuple3<String, String, Integer>, String> transform = (OneInputTransformation<Tuple3<String, String, Integer>, String>) window.getTransformation(); final OneInputStreamOperator<Tuple3<String, String, Integer>, String> operator = transform.getOperator(); assertThat(operator).isInstanceOf(WindowOperator.class); WindowOperator<String, Tuple3<String, String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple3<String, String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(EventTimeTrigger.class); assertThat(winOperator.getWindowAssigner()).isInstanceOf(TumblingEventTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(AggregatingStateDescriptor.class); processElementAndEnsureOutput( operator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple3<>("hello", "hallo", 1)); } @Test void testAggregateWithWindowFunctionProcessingTime() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple3<String, String, Integer>> source = env.fromData(Tuple3.of("hello", "hallo", 1), Tuple3.of("hello", "hallo", 2)); DataStream<String> window = source.keyBy(new Tuple3KeySelector()) .window(TumblingProcessingTimeWindows.of(Duration.ofSeconds(1))) .aggregate(new DummyAggregationFunction(), new TestWindowFunction()); final OneInputTransformation<Tuple3<String, String, Integer>, String> transform = (OneInputTransformation<Tuple3<String, String, Integer>, String>) window.getTransformation(); final OneInputStreamOperator<Tuple3<String, String, Integer>, String> operator = transform.getOperator(); assertThat(operator).isInstanceOf(WindowOperator.class); WindowOperator<String, Tuple3<String, String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple3<String, String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(ProcessingTimeTrigger.class); assertThat(winOperator.getWindowAssigner()) .isInstanceOf(TumblingProcessingTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(AggregatingStateDescriptor.class); processElementAndEnsureOutput( operator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple3<>("hello", "hallo", 1)); } @Test void testAggregateWithProcessWindowFunctionEventTime() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple3<String, String, Integer>> source = env.fromData(Tuple3.of("hello", "hallo", 1), Tuple3.of("hello", "hallo", 2)); DataStream<String> window = source.keyBy(new Tuple3KeySelector()) .window(TumblingEventTimeWindows.of(Duration.ofSeconds(1))) .aggregate(new DummyAggregationFunction(), new TestProcessWindowFunction()); final OneInputTransformation<Tuple3<String, String, Integer>, String> transform = (OneInputTransformation<Tuple3<String, String, Integer>, String>) window.getTransformation(); final OneInputStreamOperator<Tuple3<String, String, Integer>, String> operator = transform.getOperator(); assertThat(operator).isInstanceOf(WindowOperator.class); WindowOperator<String, Tuple3<String, String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple3<String, String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(EventTimeTrigger.class); assertThat(winOperator.getWindowAssigner()).isInstanceOf(TumblingEventTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(AggregatingStateDescriptor.class); processElementAndEnsureOutput( operator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple3<>("hello", "hallo", 1)); } @Test void testAggregateWithProcessWindowFunctionProcessingTime() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple3<String, String, Integer>> source = env.fromData(Tuple3.of("hello", "hallo", 1), Tuple3.of("hello", "hallo", 2)); DataStream<String> window = source.keyBy(new Tuple3KeySelector()) .window(TumblingProcessingTimeWindows.of(Duration.ofSeconds(1))) .aggregate(new DummyAggregationFunction(), new TestProcessWindowFunction()); final OneInputTransformation<Tuple3<String, String, Integer>, String> transform = (OneInputTransformation<Tuple3<String, String, Integer>, String>) window.getTransformation(); final OneInputStreamOperator<Tuple3<String, String, Integer>, String> operator = transform.getOperator(); assertThat(operator).isInstanceOf(WindowOperator.class); WindowOperator<String, Tuple3<String, String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple3<String, String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(ProcessingTimeTrigger.class); assertThat(winOperator.getWindowAssigner()) .isInstanceOf(TumblingProcessingTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(AggregatingStateDescriptor.class); processElementAndEnsureOutput( operator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple3<>("hello", "hallo", 1)); } // ------------------------------------------------------------------------ // Apply Translation Tests // ------------------------------------------------------------------------ @Test @SuppressWarnings("rawtypes") void testApplyEventTime() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple2<String, Integer>> source = env.fromData(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); DataStream<Tuple2<String, Integer>> window1 = source.keyBy(new TupleKeySelector()) .window(TumblingEventTimeWindows.of(Duration.ofSeconds(1))) .apply( new WindowFunction< Tuple2<String, Integer>, Tuple2<String, Integer>, String, TimeWindow>() { private static final long serialVersionUID = 1L; @Override public void apply( String key, TimeWindow window, Iterable<Tuple2<String, Integer>> values, Collector<Tuple2<String, Integer>> out) throws Exception { for (Tuple2<String, Integer> in : values) { out.collect(in); } } }); OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>>) window1.getTransformation(); OneInputStreamOperator<Tuple2<String, Integer>, Tuple2<String, Integer>> operator = transform.getOperator(); assertThat(operator).isInstanceOf(WindowOperator.class); WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(EventTimeTrigger.class); assertThat(winOperator.getWindowAssigner()).isInstanceOf(TumblingEventTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(ListStateDescriptor.class); processElementAndEnsureOutput( winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1)); } @Test @SuppressWarnings("rawtypes") void testApplyProcessingTime() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple2<String, Integer>> source = env.fromData(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); DataStream<Tuple2<String, Integer>> window1 = source.keyBy(new TupleKeySelector()) .window(TumblingProcessingTimeWindows.of(Duration.ofSeconds(1))) .apply( new WindowFunction< Tuple2<String, Integer>, Tuple2<String, Integer>, String, TimeWindow>() { private static final long serialVersionUID = 1L; @Override public void apply( String key, TimeWindow window, Iterable<Tuple2<String, Integer>> values, Collector<Tuple2<String, Integer>> out) throws Exception { for (Tuple2<String, Integer> in : values) { out.collect(in); } } }); OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>>) window1.getTransformation(); OneInputStreamOperator<Tuple2<String, Integer>, Tuple2<String, Integer>> operator = transform.getOperator(); assertThat(operator).isInstanceOf(WindowOperator.class); WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(ProcessingTimeTrigger.class); assertThat(winOperator.getWindowAssigner()) .isInstanceOf(TumblingProcessingTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(ListStateDescriptor.class); processElementAndEnsureOutput( winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1)); } @Test @SuppressWarnings("rawtypes") void testProcessEventTime() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple2<String, Integer>> source = env.fromData(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); DataStream<Tuple2<String, Integer>> window1 = source.keyBy(new TupleKeySelector()) .window(TumblingEventTimeWindows.of(Duration.ofSeconds(1))) .process( new ProcessWindowFunction< Tuple2<String, Integer>, Tuple2<String, Integer>, String, TimeWindow>() { private static final long serialVersionUID = 1L; @Override public void process( String key, Context ctx, Iterable<Tuple2<String, Integer>> values, Collector<Tuple2<String, Integer>> out) throws Exception { for (Tuple2<String, Integer> in : values) { out.collect(in); } } }); OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>>) window1.getTransformation(); OneInputStreamOperator<Tuple2<String, Integer>, Tuple2<String, Integer>> operator = transform.getOperator(); assertThat(operator).isInstanceOf(WindowOperator.class); WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(EventTimeTrigger.class); assertThat(winOperator.getWindowAssigner()).isInstanceOf(TumblingEventTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(ListStateDescriptor.class); processElementAndEnsureOutput( winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1)); } @Test @SuppressWarnings("rawtypes") void testProcessProcessingTime() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple2<String, Integer>> source = env.fromData(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); DataStream<Tuple2<String, Integer>> window1 = source.keyBy(new TupleKeySelector()) .window(TumblingProcessingTimeWindows.of(Duration.ofSeconds(1))) .process( new ProcessWindowFunction< Tuple2<String, Integer>, Tuple2<String, Integer>, String, TimeWindow>() { private static final long serialVersionUID = 1L; @Override public void process( String key, Context ctx, Iterable<Tuple2<String, Integer>> values, Collector<Tuple2<String, Integer>> out) throws Exception { for (Tuple2<String, Integer> in : values) { out.collect(in); } } }); OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>>) window1.getTransformation(); OneInputStreamOperator<Tuple2<String, Integer>, Tuple2<String, Integer>> operator = transform.getOperator(); assertThat(operator).isInstanceOf(WindowOperator.class); WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(ProcessingTimeTrigger.class); assertThat(winOperator.getWindowAssigner()) .isInstanceOf(TumblingProcessingTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(ListStateDescriptor.class); processElementAndEnsureOutput( winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1)); } @ParameterizedTest(name = "Enable async state = {0}") @ValueSource(booleans = {false, true}) @SuppressWarnings("rawtypes") void testReduceWithCustomTrigger(boolean enableAsyncState) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple2<String, Integer>> source = env.fromData(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); DummyReducer reducer = new DummyReducer(); DataStream<Tuple2<String, Integer>> window1 = enableAsyncState ? source.keyBy(x -> x.f0) .window( SlidingEventTimeWindows.of( Duration.ofSeconds(1), Duration.ofMillis(100))) .trigger(AsyncCountTrigger.of(1)) .reduce(reducer) : source.keyBy(x -> x.f0) .window( SlidingEventTimeWindows.of( Duration.ofSeconds(1), Duration.ofMillis(100))) .trigger(CountTrigger.of(1)) .reduce(reducer); OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>>) window1.getTransformation(); OneInputStreamOperator<Tuple2<String, Integer>, Tuple2<String, Integer>> operator = transform.getOperator(); assertThat(((AbstractStreamOperator<?>) operator).isAsyncKeyOrderedProcessingEnabled()) .isEqualTo(enableAsyncState); KeySelector<Tuple2<String, Integer>, String> keySelector; if (enableAsyncState) { assertThat(operator).isInstanceOf(AsyncWindowOperator.class); AsyncWindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (AsyncWindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(AsyncCountTrigger.class); assertThat(winOperator.getWindowAssigner()).isInstanceOf(SlidingEventTimeWindows.class); assertThat(winOperator.getStateDescriptor()) .isInstanceOf( org.apache.flink.api.common.state.v2.ReducingStateDescriptor.class); keySelector = winOperator.getKeySelector(); } else { assertThat(operator).isInstanceOf(WindowOperator.class); WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(CountTrigger.class); assertThat(winOperator.getWindowAssigner()).isInstanceOf(SlidingEventTimeWindows.class); assertThat(winOperator.getStateDescriptor()) .isInstanceOf(ReducingStateDescriptor.class); keySelector = winOperator.getKeySelector(); } processElementAndEnsureOutput( operator, keySelector, BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1)); } @ParameterizedTest(name = "Enable async state = {0}") @ValueSource(booleans = {false, true}) @SuppressWarnings("rawtypes") void testApplyWithCustomTrigger(boolean enableAsyncState) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple2<String, Integer>> source = env.fromData(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); WindowFunction<Tuple2<String, Integer>, Tuple2<String, Integer>, String, TimeWindow> windowFunc = new WindowFunction<>() { private static final long serialVersionUID = 1L; @Override public void apply( String key, TimeWindow window, Iterable<Tuple2<String, Integer>> values, Collector<Tuple2<String, Integer>> out) { for (Tuple2<String, Integer> in : values) { out.collect(in); } } }; DataStream<Tuple2<String, Integer>> window1 = enableAsyncState ? source.keyBy(new TupleKeySelector()) .window(TumblingEventTimeWindows.of(Duration.ofSeconds(1))) .trigger(AsyncCountTrigger.of(1)) .apply(windowFunc) : source.keyBy(new TupleKeySelector()) .window(TumblingEventTimeWindows.of(Duration.ofSeconds(1))) .trigger(CountTrigger.of(1)) .apply(windowFunc); OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>>) window1.getTransformation(); OneInputStreamOperator<Tuple2<String, Integer>, Tuple2<String, Integer>> operator = transform.getOperator(); assertThat(((AbstractStreamOperator<?>) operator).isAsyncKeyOrderedProcessingEnabled()) .isEqualTo(enableAsyncState); KeySelector<Tuple2<String, Integer>, String> keySelector; if (enableAsyncState) { assertThat(operator).isInstanceOf(AsyncWindowOperator.class); AsyncWindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (AsyncWindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(AsyncCountTrigger.class); assertThat(winOperator.getWindowAssigner()) .isInstanceOf(TumblingEventTimeWindows.class); assertThat(winOperator.getStateDescriptor()) .isInstanceOf(org.apache.flink.api.common.state.v2.ListStateDescriptor.class); keySelector = winOperator.getKeySelector(); } else { assertThat(operator).isInstanceOf(WindowOperator.class); WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(CountTrigger.class); assertThat(winOperator.getWindowAssigner()) .isInstanceOf(TumblingEventTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(ListStateDescriptor.class); keySelector = winOperator.getKeySelector(); } processElementAndEnsureOutput( operator, keySelector, BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1)); } @ParameterizedTest(name = "Enable async state = {0}") @ValueSource(booleans = {false, true}) @SuppressWarnings("rawtypes") void testProcessWithCustomTrigger(boolean enableAsyncState) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple2<String, Integer>> source = env.fromData(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); ProcessWindowFunction<Tuple2<String, Integer>, Tuple2<String, Integer>, String, TimeWindow> windowFunc = new ProcessWindowFunction<>() { private static final long serialVersionUID = 1L; @Override public void process( String key, Context ctx, Iterable<Tuple2<String, Integer>> values, Collector<Tuple2<String, Integer>> out) throws Exception { for (Tuple2<String, Integer> in : values) { out.collect(in); } } }; DataStream<Tuple2<String, Integer>> window1 = enableAsyncState ? source.keyBy(new TupleKeySelector()) .window(TumblingEventTimeWindows.of(Duration.ofSeconds(1))) .trigger(AsyncCountTrigger.of(1)) .process(windowFunc) : source.keyBy(new TupleKeySelector()) .window(TumblingEventTimeWindows.of(Duration.ofSeconds(1))) .trigger(CountTrigger.of(1)) .process(windowFunc); OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>>) window1.getTransformation(); OneInputStreamOperator<Tuple2<String, Integer>, Tuple2<String, Integer>> operator = transform.getOperator(); assertThat(((AbstractStreamOperator<?>) operator).isAsyncKeyOrderedProcessingEnabled()) .isEqualTo(enableAsyncState); KeySelector<Tuple2<String, Integer>, String> keySelector; if (enableAsyncState) { assertThat(operator).isInstanceOf(AsyncWindowOperator.class); AsyncWindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (AsyncWindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(AsyncCountTrigger.class); assertThat(winOperator.getWindowAssigner()) .isInstanceOf(TumblingEventTimeWindows.class); assertThat(winOperator.getStateDescriptor()) .isInstanceOf(org.apache.flink.api.common.state.v2.ListStateDescriptor.class); keySelector = winOperator.getKeySelector(); } else { assertThat(operator).isInstanceOf(WindowOperator.class); WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(CountTrigger.class); assertThat(winOperator.getWindowAssigner()) .isInstanceOf(TumblingEventTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(ListStateDescriptor.class); keySelector = winOperator.getKeySelector(); } processElementAndEnsureOutput( operator, keySelector, BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1)); } @Test @SuppressWarnings("rawtypes") void testReduceWithEvictor() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple2<String, Integer>> source = env.fromData(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); DummyReducer reducer = new DummyReducer(); DataStream<Tuple2<String, Integer>> window1 = source.keyBy(x -> x.f0) .window( SlidingEventTimeWindows.of( Duration.ofSeconds(1), Duration.ofMillis(100))) .evictor(CountEvictor.of(100)) .reduce(reducer); OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>>) window1.getTransformation(); OneInputStreamOperator<Tuple2<String, Integer>, Tuple2<String, Integer>> operator = transform.getOperator(); assertThat(operator).isInstanceOf(EvictingWindowOperator.class); EvictingWindowOperator<String, Tuple2<String, Integer>, ?, ?> winOperator = (EvictingWindowOperator<String, Tuple2<String, Integer>, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(EventTimeTrigger.class); assertThat(winOperator.getEvictor()).isInstanceOf(CountEvictor.class); assertThat(winOperator.getWindowAssigner()).isInstanceOf(SlidingEventTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(ListStateDescriptor.class); processElementAndEnsureOutput( winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1)); } @Test @SuppressWarnings("rawtypes") void testReduceWithEvictorAndProcessFunction() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple2<String, Integer>> source = env.fromData(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); DummyReducer reducer = new DummyReducer(); DataStream<Tuple2<String, Integer>> window1 = source.keyBy(x -> x.f0) .window( SlidingEventTimeWindows.of( Duration.ofSeconds(1), Duration.ofMillis(100))) .evictor(CountEvictor.of(100)) .reduce( reducer, new ProcessWindowFunction< Tuple2<String, Integer>, Tuple2<String, Integer>, String, TimeWindow>() { @Override public void process( String str, Context context, Iterable<Tuple2<String, Integer>> elements, Collector<Tuple2<String, Integer>> out) throws Exception { for (Tuple2<String, Integer> in : elements) { out.collect(in); } } }); OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>>) window1.getTransformation(); OneInputStreamOperator<Tuple2<String, Integer>, Tuple2<String, Integer>> operator = transform.getOperator(); assertThat(operator).isInstanceOf(EvictingWindowOperator.class); EvictingWindowOperator<String, Tuple2<String, Integer>, ?, ?> winOperator = (EvictingWindowOperator<String, Tuple2<String, Integer>, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(EventTimeTrigger.class); assertThat(winOperator.getEvictor()).isInstanceOf(CountEvictor.class); assertThat(winOperator.getWindowAssigner()).isInstanceOf(SlidingEventTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(ListStateDescriptor.class); processElementAndEnsureOutput( winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1)); } @Test void testAggregateWithEvictor() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple3<String, String, Integer>> source = env.fromData(Tuple3.of("hello", "hallo", 1), Tuple3.of("hello", "hallo", 2)); DataStream<Integer> window1 = source.keyBy(new Tuple3KeySelector()) .window( SlidingEventTimeWindows.of( Duration.ofSeconds(1), Duration.ofMillis(100))) .evictor(CountEvictor.of(100)) .aggregate(new DummyAggregationFunction()); final OneInputTransformation<Tuple3<String, String, Integer>, Integer> transform = (OneInputTransformation<Tuple3<String, String, Integer>, Integer>) window1.getTransformation(); final OneInputStreamOperator<Tuple3<String, String, Integer>, Integer> operator = transform.getOperator(); assertThat(operator).isInstanceOf(WindowOperator.class); WindowOperator<String, Tuple3<String, String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple3<String, String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(EventTimeTrigger.class); assertThat(winOperator.getWindowAssigner()).isInstanceOf(SlidingEventTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(ListStateDescriptor.class); processElementAndEnsureOutput( winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple3<>("hello", "hallo", 1)); } @Test void testAggregateWithEvictorAndProcessFunction() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple3<String, String, Integer>> source = env.fromData(Tuple3.of("hello", "hallo", 1), Tuple3.of("hello", "hallo", 2)); DataStream<String> window1 = source.keyBy(new Tuple3KeySelector()) .window( SlidingEventTimeWindows.of( Duration.ofSeconds(1), Duration.ofMillis(100))) .evictor(CountEvictor.of(100)) .aggregate(new DummyAggregationFunction(), new TestProcessWindowFunction()); final OneInputTransformation<Tuple3<String, String, Integer>, String> transform = (OneInputTransformation<Tuple3<String, String, Integer>, String>) window1.getTransformation(); final OneInputStreamOperator<Tuple3<String, String, Integer>, String> operator = transform.getOperator(); assertThat(operator).isInstanceOf(WindowOperator.class); WindowOperator<String, Tuple3<String, String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple3<String, String, Integer>, ?, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(EventTimeTrigger.class); assertThat(winOperator.getWindowAssigner()).isInstanceOf(SlidingEventTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(ListStateDescriptor.class); processElementAndEnsureOutput( winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple3<>("hello", "hallo", 1)); } @Test @SuppressWarnings("rawtypes") void testApplyWithEvictor() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple2<String, Integer>> source = env.fromData(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); DataStream<Tuple2<String, Integer>> window1 = source.keyBy(new TupleKeySelector()) .window(TumblingEventTimeWindows.of(Duration.ofSeconds(1))) .trigger(CountTrigger.of(1)) .evictor(TimeEvictor.of(Duration.ofMillis(100))) .apply( new WindowFunction< Tuple2<String, Integer>, Tuple2<String, Integer>, String, TimeWindow>() { private static final long serialVersionUID = 1L; @Override public void apply( String key, TimeWindow window, Iterable<Tuple2<String, Integer>> values, Collector<Tuple2<String, Integer>> out) throws Exception { for (Tuple2<String, Integer> in : values) { out.collect(in); } } }); OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>>) window1.getTransformation(); OneInputStreamOperator<Tuple2<String, Integer>, Tuple2<String, Integer>> operator = transform.getOperator(); assertThat(operator).isInstanceOf(EvictingWindowOperator.class); EvictingWindowOperator<String, Tuple2<String, Integer>, ?, ?> winOperator = (EvictingWindowOperator<String, Tuple2<String, Integer>, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(CountTrigger.class); assertThat(winOperator.getEvictor()).isInstanceOf(TimeEvictor.class); assertThat(winOperator.getWindowAssigner()).isInstanceOf(TumblingEventTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(ListStateDescriptor.class); processElementAndEnsureOutput( winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1)); } @Test @SuppressWarnings("rawtypes") void testProcessWithEvictor() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple2<String, Integer>> source = env.fromData(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); DataStream<Tuple2<String, Integer>> window1 = source.keyBy(new TupleKeySelector()) .window(TumblingEventTimeWindows.of(Duration.ofSeconds(1))) .trigger(CountTrigger.of(1)) .evictor(TimeEvictor.of(Duration.ofMillis(100))) .process( new ProcessWindowFunction< Tuple2<String, Integer>, Tuple2<String, Integer>, String, TimeWindow>() { private static final long serialVersionUID = 1L; @Override public void process( String key, Context ctx, Iterable<Tuple2<String, Integer>> values, Collector<Tuple2<String, Integer>> out) throws Exception { for (Tuple2<String, Integer> in : values) { out.collect(in); } } }); OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>>) window1.getTransformation(); OneInputStreamOperator<Tuple2<String, Integer>, Tuple2<String, Integer>> operator = transform.getOperator(); assertThat(operator).isInstanceOf(EvictingWindowOperator.class); EvictingWindowOperator<String, Tuple2<String, Integer>, ?, ?> winOperator = (EvictingWindowOperator<String, Tuple2<String, Integer>, ?, ?>) operator; assertThat(winOperator.getTrigger()).isInstanceOf(CountTrigger.class); assertThat(winOperator.getEvictor()).isInstanceOf(TimeEvictor.class); assertThat(winOperator.getWindowAssigner()).isInstanceOf(TumblingEventTimeWindows.class); assertThat(winOperator.getStateDescriptor()).isInstanceOf(ListStateDescriptor.class); processElementAndEnsureOutput( winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1)); } /** * Ensure that we get some output from the given operator when pushing in an element and setting * watermark and processing time to {@code Long.MAX_VALUE}. */ private static <K, IN, OUT> void processElementAndEnsureOutput( OneInputStreamOperator<IN, OUT> operator, KeySelector<IN, K> keySelector, TypeInformation<K> keyType, IN element) throws Exception { boolean enableAsyncState = ((AbstractStreamOperator<?>) operator).isAsyncKeyOrderedProcessingEnabled(); KeyedOneInputStreamOperatorTestHarness<K, IN, OUT> testHarness = enableAsyncState ? AsyncKeyedOneInputStreamOperatorTestHarness.create( operator, keySelector, keyType) : new KeyedOneInputStreamOperatorTestHarness<>( operator, keySelector, keyType); if (operator instanceof OutputTypeConfigurable) { // use a dummy type since window functions just need the ExecutionConfig // this is also only needed for Fold, which we're getting rid off soon. ((OutputTypeConfigurable) operator) .setOutputType(BasicTypeInfo.STRING_TYPE_INFO, new ExecutionConfig()); } testHarness.open(); testHarness.setProcessingTime(0); testHarness.processWatermark(Long.MIN_VALUE); testHarness.processElement(new StreamRecord<>(element, 0)); // provoke any processing-time/event-time triggers testHarness.setProcessingTime(Long.MAX_VALUE); testHarness.processWatermark(Long.MAX_VALUE); // we at least get the two watermarks and should also see an output element assertThat(testHarness.getOutput()).hasSizeGreaterThanOrEqualTo(3); testHarness.close(); } // ------------------------------------------------------------------------ // UDFs // ------------------------------------------------------------------------ private static
WindowTranslationTest
java
alibaba__nacos
naming/src/main/java/com/alibaba/nacos/naming/remote/rpc/handler/PersistentInstanceRequestHandler.java
{ "start": 2216, "end": 5188 }
class ____ extends RequestHandler<PersistentInstanceRequest, InstanceResponse> { private final PersistentClientOperationServiceImpl clientOperationService; public PersistentInstanceRequestHandler(PersistentClientOperationServiceImpl clientOperationService) { this.clientOperationService = clientOperationService; } @Override @NamespaceValidation @TpsControl(pointName = "RemoteNamingInstanceRegisterDeregister", name = "RemoteNamingInstanceRegisterDeregister") @Secured(action = ActionTypes.WRITE) @ExtractorManager.Extractor(rpcExtractor = PersistentInstanceRequestParamExtractor.class) public InstanceResponse handle(PersistentInstanceRequest request, RequestMeta meta) throws NacosException { Service service = Service.newService(request.getNamespace(), request.getGroupName(), request.getServiceName(), false); InstanceUtil.setInstanceIdIfEmpty(request.getInstance(), service.getGroupedServiceName()); switch (request.getType()) { case NamingRemoteConstants.REGISTER_INSTANCE: return registerInstance(service, request, meta); case NamingRemoteConstants.DE_REGISTER_INSTANCE: return deregisterInstance(service, request, meta); default: throw new NacosException(NacosException.INVALID_PARAM, String.format("Unsupported request type %s", request.getType())); } } private InstanceResponse registerInstance(Service service, PersistentInstanceRequest request, RequestMeta meta) { Instance instance = request.getInstance(); String clientId = IpPortBasedClient.getClientId(instance.toInetAddr(), false); clientOperationService.registerInstance(service, instance, clientId); NotifyCenter.publishEvent(new RegisterInstanceTraceEvent(System.currentTimeMillis(), NamingRequestUtil.getSourceIpForGrpcRequest(meta), true, service.getNamespace(), service.getGroup(), service.getName(), instance.getIp(), instance.getPort())); return new InstanceResponse(NamingRemoteConstants.REGISTER_INSTANCE); } private InstanceResponse deregisterInstance(Service service, PersistentInstanceRequest request, RequestMeta meta) { Instance instance = request.getInstance(); String clientId = IpPortBasedClient.getClientId(instance.toInetAddr(), false); clientOperationService.deregisterInstance(service, instance, clientId); NotifyCenter.publishEvent(new DeregisterInstanceTraceEvent(System.currentTimeMillis(), NamingRequestUtil.getSourceIpForGrpcRequest(meta), true, DeregisterInstanceReason.REQUEST, service.getNamespace(), service.getGroup(), service.getName(), instance.getIp(), instance.getPort())); return new InstanceResponse(NamingRemoteConstants.DE_REGISTER_INSTANCE); } }
PersistentInstanceRequestHandler
java
redisson__redisson
redisson/src/main/java/org/redisson/api/redisnode/RedisClusterMasterAsync.java
{ "start": 738, "end": 806 }
interface ____ extends RedisClusterNodeAsync { }
RedisClusterMasterAsync
java
spring-projects__spring-security
config/src/main/java/org/springframework/security/config/http/HttpSecurityBeanDefinitionParser.java
{ "start": 22297, "end": 23913 }
class ____ implements FactoryBean<AuthenticationManager> { private final ProviderManager delegate; private AuthenticationEventPublisher authenticationEventPublisher = new DefaultAuthenticationEventPublisher(); private boolean eraseCredentialsAfterAuthentication = true; private ObservationRegistry observationRegistry = ObservationRegistry.NOOP; public ChildAuthenticationManagerFactoryBean(List<AuthenticationProvider> providers, AuthenticationManager parent) { this.delegate = new ProviderManager(providers, parent); } @Override public AuthenticationManager getObject() throws Exception { this.delegate.setAuthenticationEventPublisher(this.authenticationEventPublisher); this.delegate.setEraseCredentialsAfterAuthentication(this.eraseCredentialsAfterAuthentication); if (!this.observationRegistry.isNoop()) { return new ObservationAuthenticationManager(this.observationRegistry, this.delegate); } return this.delegate; } @Override public Class<?> getObjectType() { return AuthenticationManager.class; } public void setEraseCredentialsAfterAuthentication(boolean eraseCredentialsAfterAuthentication) { this.eraseCredentialsAfterAuthentication = eraseCredentialsAfterAuthentication; } public void setAuthenticationEventPublisher(AuthenticationEventPublisher authenticationEventPublisher) { this.authenticationEventPublisher = authenticationEventPublisher; } public void setObservationRegistry(ObservationRegistry observationRegistry) { this.observationRegistry = observationRegistry; } } static
ChildAuthenticationManagerFactoryBean
java
apache__hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
{ "start": 46821, "end": 49755 }
class ____ implements Runnable { private final long tid; private final boolean run; private final ThreadMXBean tmxb; private final ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics(); private final int samples; EventProcessorMonitor(long id, int samplesPerMin) { assert samplesPerMin > 0; this.tid = id; this.samples = samplesPerMin; this.tmxb = ManagementFactory.getThreadMXBean(); if (clusterMetrics != null && tmxb != null && tmxb.isThreadCpuTimeSupported()) { this.run = true; clusterMetrics.setRmEventProcMonitorEnable(true); } else { this.run = false; } } public void run() { int index = 0; long[] values = new long[samples]; int sleepMs = (60 * 1000) / samples; while (run && !isStopped() && !Thread.currentThread().isInterrupted()) { try { long cpuBefore = tmxb.getThreadCpuTime(tid); long wallClockBefore = Time.monotonicNow(); Thread.sleep(sleepMs); long wallClockDelta = Time.monotonicNow() - wallClockBefore; long cpuDelta = tmxb.getThreadCpuTime(tid) - cpuBefore; // Nanoseconds / Milliseconds = usec per second values[index] = cpuDelta / wallClockDelta; index = (index + 1) % samples; long max = 0; long sum = 0; for (int i = 0; i < samples; i++) { sum += values[i]; max = Math.max(max, values[i]); } clusterMetrics.setRmEventProcCPUAvg(sum / samples); clusterMetrics.setRmEventProcCPUMax(max); } catch (InterruptedException e) { LOG.error("Returning, interrupted : " + e); return; } } } } @Override protected void serviceStart() throws Exception { super.serviceStart(); this.eventProcessorMonitor.start(); } @Override protected void serviceStop() throws Exception { super.serviceStop(); this.eventProcessorMonitor.interrupt(); try { this.eventProcessorMonitor.join(); } catch (InterruptedException e) { throw new YarnRuntimeException(e); } } } /** * Transition to standby state in a new thread. The transition operation is * asynchronous to avoid deadlock caused by cyclic dependency. */ private synchronized void handleTransitionToStandByInNewThread() { if (rmContext.getHAServiceState() == HAServiceProtocol.HAServiceState.STANDBY) { LOG.info("RM already in standby state"); return; } Thread standByTransitionThread = new SubjectInheritingThread(activeServices.standByTransitionRunnable); standByTransitionThread.setName("StandByTransitionThread"); standByTransitionThread.start(); } /** * The
EventProcessorMonitor
java
hibernate__hibernate-orm
hibernate-core/src/main/java/org/hibernate/event/monitor/spi/EventMonitor.java
{ "start": 1277, "end": 6581 }
interface ____ { DiagnosticEvent beginSessionOpenEvent(); void completeSessionOpenEvent( DiagnosticEvent sessionOpenEvent, SharedSessionContractImplementor session); DiagnosticEvent beginSessionClosedEvent(); void completeSessionClosedEvent( DiagnosticEvent sessionClosedEvent, SharedSessionContractImplementor session); DiagnosticEvent beginJdbcConnectionAcquisitionEvent(); void completeJdbcConnectionAcquisitionEvent( DiagnosticEvent jdbcConnectionAcquisitionEvent, SharedSessionContractImplementor session, Object tenantId); DiagnosticEvent beginJdbcConnectionReleaseEvent(); void completeJdbcConnectionReleaseEvent( DiagnosticEvent jdbcConnectionReleaseEvent, SharedSessionContractImplementor session, Object tenantId); DiagnosticEvent beginJdbcPreparedStatementCreationEvent(); void completeJdbcPreparedStatementCreationEvent( DiagnosticEvent jdbcPreparedStatementCreation, String preparedStatementSql); DiagnosticEvent beginJdbcPreparedStatementExecutionEvent(); void completeJdbcPreparedStatementExecutionEvent( DiagnosticEvent jdbcPreparedStatementExecutionEvent, String preparedStatementSql); DiagnosticEvent beginJdbcBatchExecutionEvent(); void completeJdbcBatchExecutionEvent( DiagnosticEvent jdbcBatchExecutionEvent, String statementSql); DiagnosticEvent beginCachePutEvent(); void completeCachePutEvent( DiagnosticEvent cachePutEvent, SharedSessionContractImplementor session, Region region, boolean cacheContentChanged, CacheActionDescription description); void completeCachePutEvent( DiagnosticEvent cachePutEvent, SharedSessionContractImplementor session, CachedDomainDataAccess cachedDomainDataAccess, EntityPersister persister, boolean cacheContentChanged, CacheActionDescription description); void completeCachePutEvent( DiagnosticEvent cachePutEvent, SharedSessionContractImplementor session, CachedDomainDataAccess cachedDomainDataAccess, EntityPersister persister, boolean cacheContentChanged, boolean isNatualId, CacheActionDescription description); void completeCachePutEvent( DiagnosticEvent cachePutEvent, SharedSessionContractImplementor session, CachedDomainDataAccess cachedDomainDataAccess, CollectionPersister persister, boolean cacheContentChanged, CacheActionDescription description); DiagnosticEvent beginCacheGetEvent(); void completeCacheGetEvent( DiagnosticEvent cacheGetEvent, SharedSessionContractImplementor session, Region region, boolean hit); void completeCacheGetEvent( DiagnosticEvent cacheGetEvent, SharedSessionContractImplementor session, Region region, EntityPersister persister, boolean isNaturalKey, boolean hit); void completeCacheGetEvent( DiagnosticEvent cacheGetEvent, SharedSessionContractImplementor session, Region region, CollectionPersister persister, boolean hit); DiagnosticEvent beginFlushEvent(); void completeFlushEvent( DiagnosticEvent flushEvent, org.hibernate.event.spi.FlushEvent event); void completeFlushEvent( DiagnosticEvent flushEvent, org.hibernate.event.spi.FlushEvent event, boolean autoFlush); DiagnosticEvent beginPartialFlushEvent(); void completePartialFlushEvent( DiagnosticEvent flushEvent, AutoFlushEvent event); DiagnosticEvent beginDirtyCalculationEvent(); void completeDirtyCalculationEvent( DiagnosticEvent dirtyCalculationEvent, SharedSessionContractImplementor session, EntityPersister persister, EntityEntry entry, int[] dirtyProperties); DiagnosticEvent beginPrePartialFlush(); void completePrePartialFlush( DiagnosticEvent prePartialFlush, SharedSessionContractImplementor session ); DiagnosticEvent beginEntityInsertEvent(); void completeEntityInsertEvent(DiagnosticEvent event, Object id, String entityName, boolean success, SharedSessionContractImplementor session); DiagnosticEvent beginEntityUpdateEvent(); void completeEntityUpdateEvent(DiagnosticEvent event, Object id, String entityName, boolean success, SharedSessionContractImplementor session); DiagnosticEvent beginEntityUpsertEvent(); void completeEntityUpsertEvent(DiagnosticEvent event, Object id, String entityName, boolean success, SharedSessionContractImplementor session); DiagnosticEvent beginEntityDeleteEvent(); void completeEntityDeleteEvent(DiagnosticEvent event, Object id, String entityName, boolean success, SharedSessionContractImplementor session); DiagnosticEvent beginEntityLockEvent(); void completeEntityLockEvent(DiagnosticEvent event, Object id, String entityName, LockMode lockMode, boolean success, SharedSessionContractImplementor session); DiagnosticEvent beginCollectionRecreateEvent(); void completeCollectionRecreateEvent(DiagnosticEvent event, Object id, String role, boolean success, SharedSessionContractImplementor session); DiagnosticEvent beginCollectionUpdateEvent(); void completeCollectionUpdateEvent(DiagnosticEvent event, Object id, String role, boolean success, SharedSessionContractImplementor session); DiagnosticEvent beginCollectionRemoveEvent(); void completeCollectionRemoveEvent(DiagnosticEvent event, Object id, String role, boolean success, SharedSessionContractImplementor session);
EventMonitor
java
quarkusio__quarkus
extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/quoting_strategies/Group.java
{ "start": 473, "end": 999 }
class ____ { private Long id; private String name; private String value; @Id public Long getId() { return id; } public void setId(Long id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } @Column(columnDefinition = "varchar(255)") public String getValue() { return value; } public void setValue(String value) { this.value = value; } }
Group
java
apache__camel
components/camel-micrometer/src/main/java/org/apache/camel/component/micrometer/json/MicrometerModule.java
{ "start": 6688, "end": 7216 }
class ____ extends MeterSerializer<AbstractTimer> { private final TimeUnit timeUnit; private TimerSerializer(TimeUnit timeUnit) { super(AbstractTimer.class); this.timeUnit = timeUnit; } @Override protected void serializeStatistics(AbstractTimer timer, JsonGenerator json, SerializerProvider provider) throws IOException { serializeSnapshot(json, timer.takeSnapshot(), timeUnit); } } private static final
TimerSerializer
java
micronaut-projects__micronaut-core
test-suite/src/test/java/io/micronaut/test/lombok/SimpleEntity.java
{ "start": 864, "end": 974 }
class ____ { String compartmentId; Long timeCreated; } }
CompartmentCreationTimeIndexPrefix
java
apache__camel
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/IgniteMessagingEndpointBuilderFactory.java
{ "start": 4681, "end": 10581 }
interface ____ extends EndpointConsumerBuilder { default IgniteMessagingEndpointConsumerBuilder basic() { return (IgniteMessagingEndpointConsumerBuilder) this; } /** * Allows for bridging the consumer to the Camel routing Error Handler, * which mean any exceptions (if possible) occurred while the Camel * consumer is trying to pickup incoming messages, or the likes, will * now be processed as a message and handled by the routing Error * Handler. Important: This is only possible if the 3rd party component * allows Camel to be alerted if an exception was thrown. Some * components handle this internally only, and therefore * bridgeErrorHandler is not possible. In other situations we may * improve the Camel component to hook into the 3rd party component and * make this possible for future releases. By default the consumer will * use the org.apache.camel.spi.ExceptionHandler to deal with * exceptions, that will be logged at WARN or ERROR level and ignored. * * The option is a: <code>boolean</code> type. * * Default: false * Group: consumer (advanced) * * @param bridgeErrorHandler the value to set * @return the dsl builder */ default AdvancedIgniteMessagingEndpointConsumerBuilder bridgeErrorHandler(boolean bridgeErrorHandler) { doSetProperty("bridgeErrorHandler", bridgeErrorHandler); return this; } /** * Allows for bridging the consumer to the Camel routing Error Handler, * which mean any exceptions (if possible) occurred while the Camel * consumer is trying to pickup incoming messages, or the likes, will * now be processed as a message and handled by the routing Error * Handler. Important: This is only possible if the 3rd party component * allows Camel to be alerted if an exception was thrown. Some * components handle this internally only, and therefore * bridgeErrorHandler is not possible. In other situations we may * improve the Camel component to hook into the 3rd party component and * make this possible for future releases. By default the consumer will * use the org.apache.camel.spi.ExceptionHandler to deal with * exceptions, that will be logged at WARN or ERROR level and ignored. * * The option will be converted to a <code>boolean</code> type. * * Default: false * Group: consumer (advanced) * * @param bridgeErrorHandler the value to set * @return the dsl builder */ default AdvancedIgniteMessagingEndpointConsumerBuilder bridgeErrorHandler(String bridgeErrorHandler) { doSetProperty("bridgeErrorHandler", bridgeErrorHandler); return this; } /** * To let the consumer use a custom ExceptionHandler. Notice if the * option bridgeErrorHandler is enabled then this option is not in use. * By default the consumer will deal with exceptions, that will be * logged at WARN or ERROR level and ignored. * * The option is a: <code>org.apache.camel.spi.ExceptionHandler</code> * type. * * Group: consumer (advanced) * * @param exceptionHandler the value to set * @return the dsl builder */ default AdvancedIgniteMessagingEndpointConsumerBuilder exceptionHandler(org.apache.camel.spi.ExceptionHandler exceptionHandler) { doSetProperty("exceptionHandler", exceptionHandler); return this; } /** * To let the consumer use a custom ExceptionHandler. Notice if the * option bridgeErrorHandler is enabled then this option is not in use. * By default the consumer will deal with exceptions, that will be * logged at WARN or ERROR level and ignored. * * The option will be converted to a * <code>org.apache.camel.spi.ExceptionHandler</code> type. * * Group: consumer (advanced) * * @param exceptionHandler the value to set * @return the dsl builder */ default AdvancedIgniteMessagingEndpointConsumerBuilder exceptionHandler(String exceptionHandler) { doSetProperty("exceptionHandler", exceptionHandler); return this; } /** * Sets the exchange pattern when the consumer creates an exchange. * * The option is a: <code>org.apache.camel.ExchangePattern</code> type. * * Group: consumer (advanced) * * @param exchangePattern the value to set * @return the dsl builder */ default AdvancedIgniteMessagingEndpointConsumerBuilder exchangePattern(org.apache.camel.ExchangePattern exchangePattern) { doSetProperty("exchangePattern", exchangePattern); return this; } /** * Sets the exchange pattern when the consumer creates an exchange. * * The option will be converted to a * <code>org.apache.camel.ExchangePattern</code> type. * * Group: consumer (advanced) * * @param exchangePattern the value to set * @return the dsl builder */ default AdvancedIgniteMessagingEndpointConsumerBuilder exchangePattern(String exchangePattern) { doSetProperty("exchangePattern", exchangePattern); return this; } } /** * Builder for endpoint producers for the Ignite Messaging component. */ public
AdvancedIgniteMessagingEndpointConsumerBuilder
java
apache__spark
core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorter.java
{ "start": 9471, "end": 14029 }
class ____ extends UnsafeSorterIterator implements Cloneable { private final int numRecords; private int position; private int offset; private Object baseObject; private long baseOffset; private long keyPrefix; private int recordLength; private long currentPageNumber; private final TaskContext taskContext = TaskContext.get(); private SortedIterator(int numRecords, int offset) { this.numRecords = numRecords; this.position = 0; this.offset = offset; } @Override public SortedIterator clone() { SortedIterator iter = new SortedIterator(numRecords, offset); iter.position = position; iter.baseObject = baseObject; iter.baseOffset = baseOffset; iter.keyPrefix = keyPrefix; iter.recordLength = recordLength; iter.currentPageNumber = currentPageNumber; return iter; } @Override public int getNumRecords() { return numRecords; } @Override public boolean hasNext() { return position / 2 < numRecords; } @Override public void loadNext() { // Kill the task in case it has been marked as killed. This logic is from // InterruptibleIterator, but we inline it here instead of wrapping the iterator in order // to avoid performance overhead. This check is added here in `loadNext()` instead of in // `hasNext()` because it's technically possible for the caller to be relying on // `getNumRecords()` instead of `hasNext()` to know when to stop. if (taskContext != null) { taskContext.killTaskIfInterrupted(); } // This pointer points to a 4-byte record length, followed by the record's bytes final long recordPointer = array.get(offset + position); currentPageNumber = TaskMemoryManager.decodePageNumber(recordPointer); int uaoSize = UnsafeAlignedOffset.getUaoSize(); baseObject = memoryManager.getPage(recordPointer); // Skip over record length baseOffset = memoryManager.getOffsetInPage(recordPointer) + uaoSize; recordLength = UnsafeAlignedOffset.getSize(baseObject, baseOffset - uaoSize); keyPrefix = array.get(offset + position + 1); position += 2; } @Override public Object getBaseObject() { return baseObject; } @Override public long getBaseOffset() { return baseOffset; } @Override public long getCurrentPageNumber() { return currentPageNumber; } @Override public int getRecordLength() { return recordLength; } @Override public long getKeyPrefix() { return keyPrefix; } } /** * Return an iterator over record pointers in sorted order. For efficiency, all calls to * {@code next()} will return the same mutable object. */ public UnsafeSorterIterator getSortedIterator() { if (numRecords() == 0) { // `array` might be null, so make sure that it is not accessed by returning early. return new SortedIterator(0, 0); } int offset = 0; long start = System.nanoTime(); if (sortComparator != null) { if (this.radixSortSupport != null) { offset = RadixSort.sortKeyPrefixArray( array, nullBoundaryPos, (pos - nullBoundaryPos) / 2L, 0, 7, radixSortSupport.sortDescending(), radixSortSupport.sortSigned()); } else { MemoryBlock unused = new MemoryBlock( array.getBaseObject(), array.getBaseOffset() + pos * 8L, (array.size() - pos) * 8L); LongArray buffer = new LongArray(unused); Sorter<RecordPointerAndKeyPrefix, LongArray> sorter = new Sorter<>(new UnsafeSortDataFormat(buffer)); sorter.sort(array, 0, pos / 2, sortComparator); } } totalSortTimeNanos += System.nanoTime() - start; if (nullBoundaryPos > 0) { assert radixSortSupport != null : "Nulls are only stored separately with radix sort"; LinkedList<UnsafeSorterIterator> queue = new LinkedList<>(); // The null order is either LAST or FIRST, regardless of sorting direction (ASC|DESC) if (radixSortSupport.nullsFirst()) { queue.add(new SortedIterator(nullBoundaryPos / 2, 0)); queue.add(new SortedIterator((pos - nullBoundaryPos) / 2, offset)); } else { queue.add(new SortedIterator((pos - nullBoundaryPos) / 2, offset)); queue.add(new SortedIterator(nullBoundaryPos / 2, 0)); } return new UnsafeExternalSorter.ChainedIterator(queue); } else { return new SortedIterator(pos / 2, offset); } } }
SortedIterator
java
quarkusio__quarkus
core/deployment/src/main/java/io/quarkus/deployment/recording/RecorderContext.java
{ "start": 342, "end": 1011 }
interface ____ { /** * Registers a way to construct an object via a non-default constructor. Each object may only have at most one * non-default constructor registered * * @param constructor The constructor * @param parameters A function that maps the object to a list of constructor parameters * @param <T> The type of the object */ <T> void registerNonDefaultConstructor(Constructor<T> constructor, Function<T, List<Object>> parameters); /** * Registers a substitution to allow objects that are not serializable to bytecode to be substituted for an object * that is. * * @param from The
RecorderContext
java
apache__camel
dsl/camel-yaml-dsl/camel-yaml-dsl-deserializers/src/generated/java/org/apache/camel/dsl/yaml/deserializers/ModelDeserializers.java
{ "start": 742464, "end": 746635 }
class ____ extends YamlDeserializerBase<PolicyDefinition> { public PolicyDefinitionDeserializer() { super(PolicyDefinition.class); } @Override protected PolicyDefinition newInstance() { return new PolicyDefinition(); } @Override protected boolean setProperty(PolicyDefinition target, String propertyKey, String propertyName, Node node) { propertyKey = org.apache.camel.util.StringHelper.dashToCamelCase(propertyKey); switch(propertyKey) { case "disabled": { String val = asText(node); target.setDisabled(val); break; } case "ref": { String val = asText(node); target.setRef(val); break; } case "id": { String val = asText(node); target.setId(val); break; } case "description": { String val = asText(node); target.setDescription(val); break; } case "note": { String val = asText(node); target.setNote(val); break; } case "steps": { setSteps(target, node); break; } default: { return false; } } return true; } } @YamlType( nodes = "poll", inline = true, types = org.apache.camel.model.PollDefinition.class, order = org.apache.camel.dsl.yaml.common.YamlDeserializerResolver.ORDER_LOWEST - 1, displayName = "Poll", description = "Polls a message from a static endpoint", deprecated = false, properties = { @YamlProperty(name = "description", type = "string", description = "Sets the description of this node", displayName = "Description"), @YamlProperty(name = "disabled", type = "boolean", defaultValue = "false", description = "Disables this EIP from the route.", displayName = "Disabled"), @YamlProperty(name = "id", type = "string", description = "Sets the id of this node", displayName = "Id"), @YamlProperty(name = "note", type = "string", description = "Sets the note of this node", displayName = "Note"), @YamlProperty(name = "parameters", type = "object"), @YamlProperty(name = "timeout", type = "string", defaultValue = "20000", description = "Timeout in millis when polling from the external service. The timeout has influence about the poll enrich behavior. It basically operations in three different modes: negative value - Waits until a message is available and then returns it. Warning that this method could block indefinitely if no messages are available. 0 - Attempts to receive a message exchange immediately without waiting and returning null if a message exchange is not available yet. positive value - Attempts to receive a message exchange, waiting up to the given timeout to expire if a message is not yet available. Returns null if timed out The default value is 20000 (20 seconds).", displayName = "Timeout"), @YamlProperty(name = "uri", type = "string", required = true, description = "Sets the uri of the endpoint to poll from.", displayName = "Uri"), @YamlProperty(name = "variableReceive", type = "string", description = "To use a variable to store the received message body (only body, not headers). This makes it handy to use variables for user data and to easily control what data to use for sending and receiving. Important: When using receive variable then the received body is stored only in this variable and not on the current message.", displayName = "Variable Receive") } ) public static
PolicyDefinitionDeserializer
java
ReactiveX__RxJava
src/test/java/io/reactivex/rxjava3/internal/operators/maybe/MaybeFlatMapSingleElementTest.java
{ "start": 883, "end": 5111 }
class ____ extends RxJavaTest { @Test public void flatMapSingleValue() { Maybe.just(1).flatMapSingle(new Function<Integer, SingleSource<Integer>>() { @Override public SingleSource<Integer> apply(final Integer integer) throws Exception { if (integer == 1) { return Single.just(2); } return Single.just(1); } }) .test() .assertResult(2); } @Test public void flatMapSingleValueDifferentType() { Maybe.just(1).flatMapSingle(new Function<Integer, SingleSource<String>>() { @Override public SingleSource<String> apply(final Integer integer) throws Exception { if (integer == 1) { return Single.just("2"); } return Single.just("1"); } }) .test() .assertResult("2"); } @Test public void flatMapSingleValueNull() { Maybe.just(1).flatMapSingle(new Function<Integer, SingleSource<Integer>>() { @Override public SingleSource<Integer> apply(final Integer integer) throws Exception { return null; } }) .to(TestHelper.<Integer>testConsumer()) .assertNoValues() .assertError(NullPointerException.class) .assertErrorMessage("The mapper returned a null SingleSource"); } @Test public void flatMapSingleValueErrorThrown() { Maybe.just(1).flatMapSingle(new Function<Integer, SingleSource<Integer>>() { @Override public SingleSource<Integer> apply(final Integer integer) throws Exception { throw new RuntimeException("something went terribly wrong!"); } }) .to(TestHelper.<Integer>testConsumer()) .assertNoValues() .assertError(RuntimeException.class) .assertErrorMessage("something went terribly wrong!"); } @Test public void flatMapSingleError() { RuntimeException exception = new RuntimeException("test"); Maybe.error(exception).flatMapSingle(new Function<Object, SingleSource<Object>>() { @Override public SingleSource<Object> apply(final Object integer) throws Exception { return Single.just(new Object()); } }) .test() .assertError(exception); } @Test public void flatMapSingleEmpty() { Maybe.<Integer>empty().flatMapSingle(new Function<Integer, SingleSource<Integer>>() { @Override public SingleSource<Integer> apply(final Integer integer) throws Exception { return Single.just(2); } }) .test() .assertNoValues() .assertResult(); } @Test public void dispose() { TestHelper.checkDisposed(Maybe.just(1).flatMapSingle(new Function<Integer, SingleSource<Integer>>() { @Override public SingleSource<Integer> apply(final Integer integer) throws Exception { return Single.just(2); } })); } @Test public void doubleOnSubscribe() { TestHelper.checkDoubleOnSubscribeMaybe(new Function<Maybe<Integer>, Maybe<Integer>>() { @Override public Maybe<Integer> apply(Maybe<Integer> m) throws Exception { return m.flatMapSingle(new Function<Integer, SingleSource<Integer>>() { @Override public SingleSource<Integer> apply(final Integer integer) throws Exception { return Single.just(2); } }); } }); } @Test public void singleErrors() { Maybe.just(1) .flatMapSingle(new Function<Integer, SingleSource<Integer>>() { @Override public SingleSource<Integer> apply(final Integer integer) throws Exception { return Single.error(new TestException()); } }) .test() .assertFailure(TestException.class); } }
MaybeFlatMapSingleElementTest
java
elastic__elasticsearch
server/src/main/java/org/elasticsearch/index/codec/perfield/XPerFieldDocValuesFormat.java
{ "start": 1739, "end": 3110 }
class ____ extends DocValuesFormat { /** Name of this {@link DocValuesFormat}. */ public static final String PER_FIELD_NAME = "ESPerFieldDV819"; /** {@link FieldInfo} attribute name used to store the format name for each field. */ // FORK note: usage of PerFieldDocValuesFormat is needed for bwc purposes. // (Otherwise, we load no fields from indices that use PerFieldDocValuesFormat) public static final String PER_FIELD_FORMAT_KEY = PerFieldDocValuesFormat.class.getSimpleName() + ".format"; /** {@link FieldInfo} attribute name used to store the segment suffix name for each field. */ // FORK note: usage of PerFieldDocValuesFormat is needed for bwc purposes. public static final String PER_FIELD_SUFFIX_KEY = PerFieldDocValuesFormat.class.getSimpleName() + ".suffix"; /** Sole constructor. */ protected XPerFieldDocValuesFormat() { super(PER_FIELD_NAME); } @Override public final DocValuesConsumer fieldsConsumer(SegmentWriteState state) throws IOException { return new FieldsWriter(state); } record ConsumerAndSuffix(DocValuesConsumer consumer, int suffix) implements Closeable { @Override public void close() throws IOException { consumer.close(); } } @SuppressForbidden(reason = "forked from Lucene") private
XPerFieldDocValuesFormat
java
assertj__assertj-core
assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/internal/short2darrays/Short2DArrays_assertNumberOfRows_Test.java
{ "start": 752, "end": 1030 }
class ____ extends Short2DArraysBaseTest { @Test void should_delegate_to_Arrays2D() { // WHEN short2DArrays.assertNumberOfRows(info, actual, 2); // THEN verify(arrays2d).assertNumberOfRows(info, failures, actual, 2); } }
Short2DArrays_assertNumberOfRows_Test
java
ReactiveX__RxJava
src/main/java/io/reactivex/rxjava3/internal/operators/completable/CompletablePeek.java
{ "start": 955, "end": 2062 }
class ____ extends Completable { final CompletableSource source; final Consumer<? super Disposable> onSubscribe; final Consumer<? super Throwable> onError; final Action onComplete; final Action onTerminate; final Action onAfterTerminate; final Action onDispose; public CompletablePeek(CompletableSource source, Consumer<? super Disposable> onSubscribe, Consumer<? super Throwable> onError, Action onComplete, Action onTerminate, Action onAfterTerminate, Action onDispose) { this.source = source; this.onSubscribe = onSubscribe; this.onError = onError; this.onComplete = onComplete; this.onTerminate = onTerminate; this.onAfterTerminate = onAfterTerminate; this.onDispose = onDispose; } @Override protected void subscribeActual(final CompletableObserver observer) { source.subscribe(new CompletableObserverImplementation(observer)); } final
CompletablePeek
java
apache__camel
core/camel-core/src/test/java/org/apache/camel/component/bean/BeanNoCacheTest.java
{ "start": 1872, "end": 2177 }
class ____ { private final int count; public MyCoolBean() { count = COUNTER.incrementAndGet(); } public int getCount() { return count; } public String doSomething(String s) { return s + count; } } }
MyCoolBean
java
spring-projects__spring-boot
module/spring-boot-web-server/src/test/java/org/springframework/boot/web/server/context/WebServerApplicationContextTests.java
{ "start": 1000, "end": 1905 }
class ____ { @Test void hasServerNamespaceWhenContextIsNotWebServerApplicationContextReturnsFalse() { ApplicationContext context = mock(ApplicationContext.class); assertThat(WebServerApplicationContext.hasServerNamespace(context, "test")).isFalse(); } @Test void hasServerNamespaceWhenContextIsWebServerApplicationContextAndNamespaceDoesNotMatchReturnsFalse() { ApplicationContext context = mock(WebServerApplicationContext.class); assertThat(WebServerApplicationContext.hasServerNamespace(context, "test")).isFalse(); } @Test void hasServerNamespaceWhenContextIsWebServerApplicationContextAndNamespaceMatchesReturnsTrue() { WebServerApplicationContext context = mock(WebServerApplicationContext.class); given(context.getServerNamespace()).willReturn("test"); assertThat(WebServerApplicationContext.hasServerNamespace(context, "test")).isTrue(); } }
WebServerApplicationContextTests
java
quarkusio__quarkus
extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/devmode/SourceRoute.java
{ "start": 214, "end": 547 }
class ____ { @Inject Template test; @Route(path = "test") public void test(RoutingContext ctx) { URI source = test.getSource().orElse(null); if (source == null) { ctx.response().setStatusCode(500).end(); } else { ctx.end(source.toString()); } } }
SourceRoute
java
apache__camel
components/camel-pqc/src/test/java/org/apache/camel/component/pqc/HashicorpVaultKeyLifecycleIT.java
{ "start": 2219, "end": 11230 }
class ____ extends CamelTestSupport { @RegisterExtension public static HashicorpVaultService service = HashicorpServiceFactory.createService(); private HashicorpVaultKeyLifecycleManager keyManager; @EndpointInject("mock:signed") private MockEndpoint mockSigned; @EndpointInject("mock:verified") private MockEndpoint mockVerified; @BeforeAll public static void startup() { if (Security.getProvider(BouncyCastleProvider.PROVIDER_NAME) == null) { Security.addProvider(new BouncyCastleProvider()); } if (Security.getProvider(BouncyCastlePQCProvider.PROVIDER_NAME) == null) { Security.addProvider(new BouncyCastlePQCProvider()); } } @Override protected CamelContext createCamelContext() throws Exception { CamelContext context = super.createCamelContext(); // Create HashicorpVaultKeyLifecycleManager using Vault test infrastructure keyManager = new HashicorpVaultKeyLifecycleManager( service.host(), service.port(), "http", // Test container uses http service.token(), "secret", "pqc/test-keys"); // Register the manager in the registry context.getRegistry().bind("keyLifecycleManager", keyManager); return context; } @Test public void testGenerateAndStoreKeyInVault() throws Exception { // Generate a Dilithium key KeyPair keyPair = keyManager.generateKeyPair("DILITHIUM", "test-dilithium-key", DilithiumParameterSpec.dilithium2); assertNotNull(keyPair); assertNotNull(keyPair.getPublic()); assertNotNull(keyPair.getPrivate()); // Verify metadata was created KeyMetadata metadata = keyManager.getKeyMetadata("test-dilithium-key"); assertNotNull(metadata); assertEquals("test-dilithium-key", metadata.getKeyId()); assertEquals("DILITHIUM", metadata.getAlgorithm()); assertEquals(KeyMetadata.KeyStatus.ACTIVE, metadata.getStatus()); } @Test public void testRetrieveKeyFromVault() throws Exception { // Generate and store key keyManager.generateKeyPair("FALCON", "test-falcon-key", FalconParameterSpec.falcon_512); // Clear cache to force Vault read // (In production this would simulate a different process/server accessing the key) // Retrieve key from Vault KeyPair retrieved = keyManager.getKey("test-falcon-key"); assertNotNull(retrieved); assertNotNull(retrieved.getPublic()); assertNotNull(retrieved.getPrivate()); // Verify metadata KeyMetadata metadata = keyManager.getKeyMetadata("test-falcon-key"); assertEquals("FALCON", metadata.getAlgorithm()); } @Test public void testKeyRotation() throws Exception { // Generate initial key keyManager.generateKeyPair("DILITHIUM", "rotation-key-old", DilithiumParameterSpec.dilithium2); KeyMetadata oldMetadata = keyManager.getKeyMetadata("rotation-key-old"); assertEquals(KeyMetadata.KeyStatus.ACTIVE, oldMetadata.getStatus()); // Rotate the key KeyPair newKeyPair = keyManager.rotateKey("rotation-key-old", "rotation-key-new", "DILITHIUM"); assertNotNull(newKeyPair); // Verify old key is deprecated oldMetadata = keyManager.getKeyMetadata("rotation-key-old"); assertEquals(KeyMetadata.KeyStatus.DEPRECATED, oldMetadata.getStatus()); // Verify new key is active KeyMetadata newMetadata = keyManager.getKeyMetadata("rotation-key-new"); assertEquals(KeyMetadata.KeyStatus.ACTIVE, newMetadata.getStatus()); } @Test public void testNeedsRotation() throws Exception { keyManager.generateKeyPair("DILITHIUM", "rotation-check-key", DilithiumParameterSpec.dilithium2); // New key should not need rotation assertFalse(keyManager.needsRotation("rotation-check-key", Duration.ofDays(90), 10000)); // Simulate old key by setting next rotation time in the past KeyMetadata metadata = keyManager.getKeyMetadata("rotation-check-key"); metadata.setNextRotationAt(java.time.Instant.now().minusSeconds(1)); keyManager.updateKeyMetadata("rotation-check-key", metadata); // Now it should need rotation assertTrue(keyManager.needsRotation("rotation-check-key", Duration.ofDays(90), 10000)); } @Test public void testListKeys() throws Exception { // Generate multiple keys keyManager.generateKeyPair("DILITHIUM", "list-key-1", DilithiumParameterSpec.dilithium2); keyManager.generateKeyPair("FALCON", "list-key-2", FalconParameterSpec.falcon_512); keyManager.generateKeyPair("DILITHIUM", "list-key-3", DilithiumParameterSpec.dilithium3); // List all keys List<KeyMetadata> keys = keyManager.listKeys(); assertTrue(keys.size() >= 3, "Should have at least 3 keys"); // Verify all our keys are present assertTrue(keys.stream().anyMatch(k -> k.getKeyId().equals("list-key-1"))); assertTrue(keys.stream().anyMatch(k -> k.getKeyId().equals("list-key-2"))); assertTrue(keys.stream().anyMatch(k -> k.getKeyId().equals("list-key-3"))); } @Test public void testExpireAndRevokeKey() throws Exception { // Test expiration keyManager.generateKeyPair("DILITHIUM", "expire-key", DilithiumParameterSpec.dilithium2); keyManager.expireKey("expire-key"); KeyMetadata expiredMetadata = keyManager.getKeyMetadata("expire-key"); assertEquals(KeyMetadata.KeyStatus.EXPIRED, expiredMetadata.getStatus()); // Test revocation keyManager.generateKeyPair("DILITHIUM", "revoke-key", DilithiumParameterSpec.dilithium2); keyManager.revokeKey("revoke-key", "Key compromised in test"); KeyMetadata revokedMetadata = keyManager.getKeyMetadata("revoke-key"); assertEquals(KeyMetadata.KeyStatus.REVOKED, revokedMetadata.getStatus()); assertTrue(revokedMetadata.getDescription().contains("Revoked: Key compromised in test")); } @Test public void testDeleteKey() throws Exception { keyManager.generateKeyPair("DILITHIUM", "delete-key", DilithiumParameterSpec.dilithium2); assertNotNull(keyManager.getKey("delete-key")); keyManager.deleteKey("delete-key"); // Should throw exception when trying to get deleted key assertThrows(IllegalArgumentException.class, () -> keyManager.getKey("delete-key")); } @Test public void testExportAndImportKey() throws Exception { KeyPair keyPair = keyManager.generateKeyPair("DILITHIUM", "export-key", DilithiumParameterSpec.dilithium2); // Export public key as PEM byte[] exported = keyManager.exportPublicKey(keyPair, KeyLifecycleManager.KeyFormat.PEM); assertNotNull(exported); assertTrue(exported.length > 0); String pemString = new String(exported); assertTrue(pemString.contains("-----BEGIN PUBLIC KEY-----")); assertTrue(pemString.contains("-----END PUBLIC KEY-----")); // Import the key KeyPair imported = keyManager.importKey(exported, KeyLifecycleManager.KeyFormat.PEM, "DILITHIUM"); assertNotNull(imported); assertNotNull(imported.getPublic()); } @Test public void testMetadataTracking() throws Exception { // Generate key keyManager.generateKeyPair("DILITHIUM", "tracking-key", DilithiumParameterSpec.dilithium2); // Get initial metadata KeyMetadata metadata = keyManager.getKeyMetadata("tracking-key"); assertEquals(0, metadata.getUsageCount()); assertEquals(KeyMetadata.KeyStatus.ACTIVE, metadata.getStatus()); // Simulate usage by updating metadata for (int i = 0; i < 5; i++) { metadata.updateLastUsed(); } keyManager.updateKeyMetadata("tracking-key", metadata); // Verify usage was tracked metadata = keyManager.getKeyMetadata("tracking-key"); assertEquals(5, metadata.getUsageCount()); assertNotNull(metadata.getLastUsedAt()); // Verify age calculation long ageInDays = metadata.getAgeInDays(); assertEquals(0, ageInDays); // Should be 0 for a newly created key } @Override protected RouteBuilder createRouteBuilder() { return new RouteBuilder() { @Override public void configure() { // Signing route using PQC component with Vault-stored key from("direct:sign") .to("pqc:sign?operation=sign&signatureAlgorithm=DILITHIUM") .to("mock:signed") .to("pqc:verify?operation=verify&signatureAlgorithm=DILITHIUM") .to("mock:verified"); } }; } }
HashicorpVaultKeyLifecycleIT
java
apache__flink
flink-runtime/src/main/java/org/apache/flink/runtime/operators/coordination/ComponentClosingUtils.java
{ "start": 1399, "end": 7778 }
class ____ { private static Clock clock = SystemClock.getInstance(); /** Utility class, not meant to be instantiated. */ private ComponentClosingUtils() {} /** * Close a component with a timeout. * * @param componentName the name of the component. * @param closingSequence the closing logic which is a callable that can throw exceptions. * @param closeTimeout the timeout to wait for the component to close. * @return An optional throwable which is non-empty if an error occurred when closing the * component. */ public static CompletableFuture<Void> closeAsyncWithTimeout( String componentName, Runnable closingSequence, Duration closeTimeout) { return closeAsyncWithTimeout( componentName, (ThrowingRunnable<Exception>) closingSequence::run, closeTimeout); } /** * Close a component with a timeout. * * @param componentName the name of the component. * @param closingSequence the closing logic. * @param closeTimeout the timeout to wait for the component to close. * @return An optional throwable which is non-empty if an error occurred when closing the * component. */ public static CompletableFuture<Void> closeAsyncWithTimeout( String componentName, ThrowingRunnable<Exception> closingSequence, Duration closeTimeout) { final CompletableFuture<Void> future = new CompletableFuture<>(); // Start a dedicate thread to close the component. final Thread t = new Thread( () -> { try { closingSequence.run(); future.complete(null); } catch (Throwable error) { future.completeExceptionally(error); } }); t.start(); // if the future fails due to a timeout, we interrupt the thread future.exceptionally( (error) -> { if (error instanceof TimeoutException && t.isAlive()) { abortThread(t); } return null; }); FutureUtils.orTimeout( future, closeTimeout.toMillis(), TimeUnit.MILLISECONDS, String.format( "Failed to close the %s before timeout of %d ms", componentName, closeTimeout.toMillis())); return future; } /** * A util method that tries to shut down an {@link ExecutorService} elegantly within the given * timeout. If the executor has not been shut down before it hits timeout or the thread is * interrupted when waiting for the termination, a forceful shutdown will be attempted on the * executor. * * @param executor the {@link ExecutorService} to shut down. * @param timeout the timeout duration. * @return true if the given executor has been successfully closed, false otherwise. */ @SuppressWarnings("ResultOfMethodCallIgnored") public static boolean tryShutdownExecutorElegantly(ExecutorService executor, Duration timeout) { try { executor.shutdown(); executor.awaitTermination(timeout.toMillis(), TimeUnit.MILLISECONDS); } catch (InterruptedException ie) { // Let it go. } if (!executor.isTerminated()) { shutdownExecutorForcefully(executor, Duration.ZERO, false); } return executor.isTerminated(); } /** * Shutdown the given executor forcefully within the given timeout. The method returns if it is * interrupted. * * @param executor the executor to shut down. * @param timeout the timeout duration. * @return true if the given executor is terminated, false otherwise. */ public static boolean shutdownExecutorForcefully(ExecutorService executor, Duration timeout) { return shutdownExecutorForcefully(executor, timeout, true); } /** * Shutdown the given executor forcefully within the given timeout. * * @param executor the executor to shut down. * @param timeout the timeout duration. * @param interruptable when set to true, the method can be interrupted. Each interruption to * the thread results in another {@code ExecutorService.shutdownNow()} call to the shutting * down executor. * @return true if the given executor is terminated, false otherwise. */ public static boolean shutdownExecutorForcefully( ExecutorService executor, Duration timeout, boolean interruptable) { Deadline deadline = Deadline.fromNowWithClock(timeout, clock); boolean isInterrupted = false; do { executor.shutdownNow(); try { executor.awaitTermination(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS); } catch (InterruptedException e) { isInterrupted = interruptable; } } while (!isInterrupted && deadline.hasTimeLeft() && !executor.isTerminated()); return executor.isTerminated(); } private static void abortThread(Thread t) { // Try our best here to ensure the thread is aborted. Keep interrupting the // thread for 10 times with 10 ms intervals. This helps handle the case // where the shutdown sequence consists of a bunch of closeQuietly() calls // that will swallow the InterruptedException so the thread to be aborted // may block multiple times. If the thread is still alive after all the // attempts, just let it go. The caller of closeAsyncWithTimeout() should // have received a TimeoutException in this case. int i = 0; while (t.isAlive() && i < 10) { t.interrupt(); i++; try { Thread.sleep(10); } catch (InterruptedException e) { // Let it go. } } } // ========= Method visible for testing ======== @VisibleForTesting static void setClock(Clock clock) { ComponentClosingUtils.clock = clock; } }
ComponentClosingUtils
java
elastic__elasticsearch
x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/predicate/fulltext/MultiMatchQueryPredicate.java
{ "start": 601, "end": 1936 }
class ____ extends FullTextPredicate { private final String fieldString; private final Map<String, Float> fields; public MultiMatchQueryPredicate(Source source, String fieldString, String query, String options) { super(source, query, options, emptyList()); this.fieldString = fieldString; // inferred this.fields = FullTextUtils.parseFields(fieldString, source); } @Override protected NodeInfo<MultiMatchQueryPredicate> info() { return NodeInfo.create(this, MultiMatchQueryPredicate::new, fieldString, query(), options()); } @Override public Expression replaceChildren(List<Expression> newChildren) { throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); } public String fieldString() { return fieldString; } public Map<String, Float> fields() { return fields; } @Override public int hashCode() { return Objects.hash(fieldString, super.hashCode()); } @Override public boolean equals(Object obj) { if (super.equals(obj)) { MultiMatchQueryPredicate other = (MultiMatchQueryPredicate) obj; return Objects.equals(fieldString, other.fieldString); } return false; } }
MultiMatchQueryPredicate
java
apache__hadoop
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverBackwardsCompat.java
{ "start": 2726, "end": 3322 }
class ____ { private void failWithException(String message, Exception exception) { ByteArrayOutputStream buffer = new ByteArrayOutputStream(); exception.printStackTrace(new PrintStream(buffer)); String stacktrace = buffer.toString(); fail(message + ": " + exception + "; " + exception.getMessage() + "\n" + stacktrace); } /** * Used for mocking DataNode. Mockito does not provide a way to mock * properties (like data or saslClient) so we have to manually set up mocks * of those properties inside our own class. */ public
TestDataXceiverBackwardsCompat
java
quarkusio__quarkus
extensions/smallrye-reactive-messaging/deployment/src/test/java/io/quarkus/smallrye/reactivemessaging/wiring/MissingIncomingConnectorDetectionTest.java
{ "start": 1049, "end": 1152 }
class ____ { @Incoming("a") public void consume(Integer integer) { } } }
Foo
java
apache__hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java
{ "start": 9072, "end": 9264 }
class ____ extends LightService { public ServiceB() { super("B", 'B', 66, ByteBuffer.wrap("B".getBytes())); } } // Override getMetaData() method to return current //
ServiceB
java
google__dagger
javatests/dagger/internal/codegen/MapMultibindingValidationTest.java
{ "start": 9066, "end": 9668 }
class ____ {", " @Provides @IntoMap @StringKey(\"AKey\") Object provideObjectForAKey() {", " return \"one\";", " }", "", " @Provides @IntoMap @StringKeyTwo(\"BKey\") Object provideObjectForBKey() {", " return \"two\";", " }", "}"); Source stringKeyTwoFile = CompilerTests.javaSource( "test.StringKeyTwo", "package test;", "", "import dagger.MapKey;", "", "@MapKey(unwrapValue = true)", "public @
MapModule
java
apache__camel
components/camel-gson/src/test/java/org/apache/camel/component/gson/PersonPojo.java
{ "start": 852, "end": 1379 }
class ____ { private int id; private String firstName; private String lastName; public int getId() { return id; } public void setId(int id) { this.id = id; } public String getFirstName() { return firstName; } public void setFirstName(String firstName) { this.firstName = firstName; } public String getLastName() { return lastName; } public void setLastName(String lastName) { this.lastName = lastName; } }
PersonPojo
java
apache__avro
lang/java/avro/src/main/java/org/apache/avro/SchemaBuilder.java
{ "start": 82303, "end": 84109 }
class ____<R> { private final Completion<R> context; private final NameContext names; private final List<Schema> schemas; private UnionAccumulator(Completion<R> context, NameContext names, List<Schema> schemas) { this.context = context; this.names = names; this.schemas = schemas; } /** Add an additional type to this union **/ public BaseTypeBuilder<UnionAccumulator<R>> and() { return new UnionBuilder<>(context, names, schemas); } /** Complete this union **/ public R endUnion() { Schema schema = Schema.createUnion(schemas); return context.complete(schema); } } // create default value JsonNodes from objects private static JsonNode toJsonNode(Object o) { try { String s; if (o instanceof ByteBuffer) { // special case since GenericData.toString() is incorrect for bytes // note that this does not handle the case of a default value with nested bytes ByteBuffer bytes = ((ByteBuffer) o); ((Buffer) bytes).mark(); byte[] data = new byte[bytes.remaining()]; bytes.get(data); ((Buffer) bytes).reset(); // put the buffer back the way we got it s = new String(data, StandardCharsets.ISO_8859_1); char[] quoted = JsonStringEncoder.getInstance().quoteAsString(s); s = "\"" + new String(quoted) + "\""; } else if (o instanceof byte[]) { s = new String((byte[]) o, StandardCharsets.ISO_8859_1); char[] quoted = JsonStringEncoder.getInstance().quoteAsString(s); s = '\"' + new String(quoted) + '\"'; } else { s = GenericData.get().toString(o); } return MAPPER.readTree(s); } catch (IOException e) { throw new SchemaBuilderException(e); } } }
UnionAccumulator
java
micronaut-projects__micronaut-core
inject/src/main/java/io/micronaut/context/LocalizedMessageSource.java
{ "start": 828, "end": 3415 }
interface ____ { /** * Resolve a message for the given code. * @param code The code * @return A message if present */ @NonNull Optional<String> getMessage(@NonNull String code); /** * Resolve a message for the given code and variables for the messages. * @param code The code * @param variables to be used to interpolate the message * @return A message if present */ @NonNull Optional<String> getMessage(@NonNull String code, Object... variables); /** * Resolve a message for the given code and variables for the messages. * @param code The code * @param variables to be used to interpolate the message * @return A message if present */ @NonNull Optional<String> getMessage(@NonNull String code, Map<String, Object> variables); /** * Resolve a message for the given code. If the message is not present then default message is returned. * @param code The code * @param defaultMessage The default message to use if no other message is found * @return A message if present. If the message is not present then default message supplied is returned. */ default @NonNull String getMessageOrDefault(@NonNull String code, @NonNull String defaultMessage) { return getMessage(code).orElse(defaultMessage); } /** * Resolve a message for the given code. If the message is not present then default message is returned. * @param code The code * @param defaultMessage The default message to use if no other message is found * @param variables to be used to interpolate the message * @return A message if present. If the message is not present then default message supplied is returned. */ default @NonNull String getMessageOrDefault(@NonNull String code, @NonNull String defaultMessage, Object... variables) { return getMessage(code, variables).orElse(defaultMessage); } /** * Resolve a message for the given code. If the message is not present then default message is returned. * @param code The code * @param defaultMessage The default message to use if no other message is found * @param variables to be used to interpolate the message * @return A message if present. If the message is not present then default message supplied is returned. */ default @NonNull String getMessageOrDefault(@NonNull String code, @NonNull String defaultMessage, Map<String, Object> variables) { return getMessage(code, variables).orElse(defaultMessage); } }
LocalizedMessageSource
java
apache__camel
components/camel-ftp/src/test/java/org/apache/camel/component/file/remote/integration/FromFtpThirdPoolOkIT.java
{ "start": 1371, "end": 3385 }
class ____ extends FtpServerTestSupport { private final AtomicInteger counter = new AtomicInteger(); private String getFtpUrl() { return "ftp://admin@localhost:{{ftp.server.port}}/thirdpool?password=admin&delete=true"; } @Test void testPollFileAndShouldBeDeletedAtThirdPoll() throws Exception { String body = "Hello World this file will be deleted"; template.sendBodyAndHeader(getFtpUrl(), body, Exchange.FILE_NAME, "hello.txt"); getMockEndpoint("mock:result").expectedBodiesReceived(body); // 2 first attempt should fail getMockEndpoint("mock:error").expectedMessageCount(2); MockEndpoint.assertIsSatisfied(context); // give time to delete file await().atMost(200, TimeUnit.MILLISECONDS) .untilAsserted(() -> assertEquals(3, counter.get())); // assert the file is deleted File file = service.ftpFile("thirdpool/hello.txt").toFile(); await().atMost(1, TimeUnit.MINUTES) .untilAsserted(() -> assertFalse(file.exists(), "The file should have been deleted")); } @Override protected RouteBuilder createRouteBuilder() { return new RouteBuilder() { public void configure() { // no redeliveries as we want the ftp consumer to try again // use no delay for fast unit testing onException(IllegalArgumentException.class).logStackTrace(false).to("mock:error"); from(getFtpUrl()).process(exchange -> { if (counter.incrementAndGet() < 3) { // file should exist File file = service.ftpFile("thirdpool/hello.txt").toFile(); assertTrue(file.exists(), "The file should NOT have been deleted"); throw new IllegalArgumentException("Forced by unit test"); } }).to("mock:result"); } }; } }
FromFtpThirdPoolOkIT
java
grpc__grpc-java
interop-testing/src/generated/main/grpc/io/grpc/testing/integration/UnimplementedServiceGrpc.java
{ "start": 13405, "end": 13593 }
class ____ extends UnimplementedServiceBaseDescriptorSupplier { UnimplementedServiceFileDescriptorSupplier() {} } private static final
UnimplementedServiceFileDescriptorSupplier
java
hibernate__hibernate-orm
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/converted/converter/ExplicitJavaTypeDescriptorTest.java
{ "start": 8469, "end": 9798 }
class ____ implements JavaType<PseudoMutableState> { /** * Singleton access */ public static final PseudoMutableStateJavaType INSTANCE = new PseudoMutableStateJavaType(); @Override public Class<PseudoMutableState> getJavaTypeClass() { return PseudoMutableState.class; } @Override public MutabilityPlan<PseudoMutableState> getMutabilityPlan() { return ImmutableMutabilityPlan.instance(); } @Override public JdbcType getRecommendedJdbcType(JdbcTypeIndicators context) { return context.getJdbcType( Types.VARCHAR ); } @Override public PseudoMutableState fromString(CharSequence string) { return string == null ? null : new PseudoMutableState( string.toString() ); } @Override public <X> X unwrap(PseudoMutableState value, Class<X> type, WrapperOptions options) { if ( value == null ) { return null; } if ( PseudoMutableState.class.equals( type ) ) { return (X) value; } if ( String.class.equals( type ) ) { return (X) value.state; } throw new IllegalArgumentException( String.format( Locale.ROOT, "Cannot convert value '%s' to type `%s`", value.state, type ) ); } @Override public <X> PseudoMutableState wrap(X value, WrapperOptions options) { return null; } } }
PseudoMutableStateJavaType
java
apache__avro
lang/java/thrift/src/test/java/org/apache/avro/thrift/TestThrift.java
{ "start": 1331, "end": 3028 }
class ____ { @org.junit.jupiter.api.Test void testStruct() throws Exception { System.out.println(ThriftData.get().getSchema(Test.class).toString(true)); Test test = new Test(); test.setBoolField(true); test.setByteField((byte) 2); test.setI16Field((short) 3); test.setI16OptionalField((short) 14); test.setI32Field(4); test.setI64Field(5L); test.setDoubleField(2.0); test.setStringField("foo"); test.setBinaryField(ByteBuffer.wrap(new byte[] { 0, -1 })); test.setMapField(Collections.singletonMap("x", 1)); test.setListField(Collections.singletonList(7)); test.setSetField(Collections.singleton(8)); test.setEnumField(E.X); test.setStructField(new Nested(9)); test.setFooOrBar(FooOrBar.foo("x")); System.out.println(test); check(test); } @org.junit.jupiter.api.Test void testOptionals() throws Exception { Test test = new Test(); test.setBoolField(true); test.setByteField((byte) 2); test.setByteOptionalField((byte) 4); test.setI16Field((short) 3); test.setI16OptionalField((short) 15); test.setI64Field(5L); test.setDoubleField(2.0); System.out.println(test); check(test); } private void check(Test test) throws Exception { ByteArrayOutputStream bao = new ByteArrayOutputStream(); ThriftDatumWriter<Test> w = new ThriftDatumWriter<>(Test.class); Encoder e = EncoderFactory.get().binaryEncoder(bao, null); w.write(test, e); e.flush(); Object o = new ThriftDatumReader<>(Test.class).read(null, DecoderFactory.get().binaryDecoder(new ByteArrayInputStream(bao.toByteArray()), null)); assertEquals(test, o); } }
TestThrift
java
apache__hadoop
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/hash/MurmurHash.java
{ "start": 1312, "end": 2545 }
class ____ extends Hash { private static MurmurHash _instance = new MurmurHash(); public static Hash getInstance() { return _instance; } @Override public int hash(byte[] data, int length, int seed) { return hash(data, 0, length, seed); } public int hash(byte[] data, int offset, int length, int seed) { int m = 0x5bd1e995; int r = 24; int h = seed ^ length; int len_4 = length >> 2; for (int i = 0; i < len_4; i++) { int i_4 = offset + (i << 2); int k = data[i_4 + 3]; k = k << 8; k = k | (data[i_4 + 2] & 0xff); k = k << 8; k = k | (data[i_4 + 1] & 0xff); k = k << 8; k = k | (data[i_4 + 0] & 0xff); k *= m; k ^= k >>> r; k *= m; h *= m; h ^= k; } // avoid calculating modulo int len_m = len_4 << 2; int left = length - len_m; if (left != 0) { length += offset; if (left >= 3) { h ^= (int) data[length - 3] << 16; } if (left >= 2) { h ^= (int) data[length - 2] << 8; } if (left >= 1) { h ^= (int) data[length - 1]; } h *= m; } h ^= h >>> 13; h *= m; h ^= h >>> 15; return h; } }
MurmurHash
java
spring-projects__spring-boot
module/spring-boot-micrometer-tracing-opentelemetry/src/test/java/org/springframework/boot/micrometer/tracing/opentelemetry/autoconfigure/otlp/OtlpTracingAutoConfigurationTests.java
{ "start": 13334, "end": 13546 }
class ____ { @Bean OtlpGrpcSpanExporter customOtlpGrpcSpanExporter() { return OtlpGrpcSpanExporter.builder().build(); } } @Configuration(proxyBeanMethods = false) static
CustomGrpcExporterConfiguration
java
hibernate__hibernate-orm
hibernate-core/src/test/java/org/hibernate/orm/test/type/UUIDTypeConverterTest.java
{ "start": 2948, "end": 3152 }
class ____ { @Column(unique = true, length = 16, nullable = false) @jakarta.persistence.Id private UUID id = safeRandomUUID(); } @Entity(name = "Image") @Table(name = "TEST_IMAGE") public static
Id
java
google__error-prone
core/src/test/java/com/google/errorprone/bugpatterns/nullness/NullablePrimitiveTest.java
{ "start": 3483, "end": 4116 }
class ____ { // BUG: Diagnostic contains: List<@Nullable int[]> xs; } """) .doTest(); } // regression test for #418 @Test public void typeParameter() { compilationHelper .addSourceLines( "Nullable.java", """ import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; @Retention(RetentionPolicy.RUNTIME) @Target(ElementType.TYPE_USE) public @
Test
java
apache__flink
flink-runtime/src/test/java/org/apache/flink/runtime/operators/testutils/Match.java
{ "start": 877, "end": 978 }
class ____ keeping track of matches in join operator tests. * * @see MatchRemovingJoiner */ public
for
java
apache__flink
flink-runtime/src/main/java/org/apache/flink/runtime/security/token/DelegationTokenManager.java
{ "start": 1276, "end": 1514 }
interface ____ { /** * Listener for events in the {@link DelegationTokenManager}. * * <p>By registering it in the manager one can receive callbacks when events are happening. */ @Internal
DelegationTokenManager
java
spring-projects__spring-boot
module/spring-boot-jooq/src/main/java/org/springframework/boot/jooq/autoconfigure/JooqAutoConfiguration.java
{ "start": 6092, "end": 7108 }
class ____ { private Settings load(InputStream inputStream) { try { SAXParser parser = createParserFactory().newSAXParser(); Source source = new SAXSource(parser.getXMLReader(), new InputSource(inputStream)); JAXBContext context = JAXBContext.newInstance(Settings.class); return context.createUnmarshaller().unmarshal(source, Settings.class).getValue(); } catch (ParserConfigurationException | JAXBException | SAXException ex) { throw new IllegalStateException("Failed to unmarshal settings", ex); } } private SAXParserFactory createParserFactory() throws ParserConfigurationException, SAXNotRecognizedException, SAXNotSupportedException { SAXParserFactory factory = SAXParserFactory.newInstance(); factory.setFeature("http://apache.org/xml/features/disallow-doctype-decl", true); factory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true); factory.setNamespaceAware(true); factory.setXIncludeAware(false); return factory; } } }
JaxbSettingsLoader
java
apache__flink
flink-core/src/test/java/org/apache/flink/api/common/typeutils/CompositeTypeSerializerSnapshotTest.java
{ "start": 23257, "end": 23791 }
class ____ extends NestedSerializer { private static final long serialVersionUID = -1396401178636869659L; public ReconfiguredNestedSerializer(TargetCompatibility targetCompatibility) { super(targetCompatibility); } } /** * A variant of the {@link NestedSerializer} used only when creating a restored instance of the * serializer. This is used in tests as a tag to identify that the correct serializer instances * are being used. */ static
ReconfiguredNestedSerializer
java
apache__camel
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/SpringLdapEndpointBuilderFactory.java
{ "start": 3472, "end": 5978 }
interface ____ extends EndpointProducerBuilder { default SpringLdapEndpointBuilder basic() { return (SpringLdapEndpointBuilder) this; } /** * Whether the producer should be started lazy (on the first message). * By starting lazy you can use this to allow CamelContext and routes to * startup in situations where a producer may otherwise fail during * starting and cause the route to fail being started. By deferring this * startup to be lazy then the startup failure can be handled during * routing messages via Camel's routing error handlers. Beware that when * the first message is processed then creating and starting the * producer may take a little time and prolong the total processing time * of the processing. * * The option is a: <code>boolean</code> type. * * Default: false * Group: producer (advanced) * * @param lazyStartProducer the value to set * @return the dsl builder */ default AdvancedSpringLdapEndpointBuilder lazyStartProducer(boolean lazyStartProducer) { doSetProperty("lazyStartProducer", lazyStartProducer); return this; } /** * Whether the producer should be started lazy (on the first message). * By starting lazy you can use this to allow CamelContext and routes to * startup in situations where a producer may otherwise fail during * starting and cause the route to fail being started. By deferring this * startup to be lazy then the startup failure can be handled during * routing messages via Camel's routing error handlers. Beware that when * the first message is processed then creating and starting the * producer may take a little time and prolong the total processing time * of the processing. * * The option will be converted to a <code>boolean</code> type. * * Default: false * Group: producer (advanced) * * @param lazyStartProducer the value to set * @return the dsl builder */ default AdvancedSpringLdapEndpointBuilder lazyStartProducer(String lazyStartProducer) { doSetProperty("lazyStartProducer", lazyStartProducer); return this; } } public
AdvancedSpringLdapEndpointBuilder
java
apache__hadoop
hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/BufferPool.java
{ "start": 1294, "end": 1497 }
class ____ used to manage the buffers during program execution. * It is provided in a thread-safe singleton mode,and * keeps the program's memory and disk consumption at a stable value. */ public final
is
java
quarkusio__quarkus
integration-tests/simple with space/src/main/java/io/quarkus/it/spaces/GreetingResource.java
{ "start": 215, "end": 535 }
class ____ { // make sure we reference something from Primefaces so GraalVM doesn't throw out the entire jar private static final String PRIMEFACES_DOWNLOAD_COOKIE = Constants.DOWNLOAD_COOKIE; @GET @Produces(MediaType.TEXT_PLAIN) public String hello() { return "hello"; } }
GreetingResource
java
mockito__mockito
mockito-core/src/testFixtures/java/org/mockitoutil/ClassLoaders.java
{ "start": 1368, "end": 3093 }
class ____ { protected ClassLoader parent = currentClassLoader(); protected ClassLoaders() {} public static IsolatedURLClassLoaderBuilder isolatedClassLoader() { return new IsolatedURLClassLoaderBuilder(); } public static ExcludingURLClassLoaderBuilder excludingClassLoader() { return new ExcludingURLClassLoaderBuilder(); } public static InMemoryClassLoaderBuilder inMemoryClassLoader() { return new InMemoryClassLoaderBuilder(); } public static ReachableClassesFinder in(ClassLoader classLoader) { return new ReachableClassesFinder(classLoader); } public static ClassLoader jdkClassLoader() { return String.class.getClassLoader(); } public static ClassLoader systemClassLoader() { return ClassLoader.getSystemClassLoader(); } public static ClassLoader currentClassLoader() { return ClassLoaders.class.getClassLoader(); } public abstract ClassLoader build(); public static Class<?>[] coverageTool() { HashSet<Class<?>> classes = new HashSet<Class<?>>(); classes.add(safeGetClass("net.sourceforge.cobertura.coveragedata.TouchCollector")); classes.add(safeGetClass("org.slf4j.LoggerFactory")); classes.remove(null); return classes.toArray(new Class<?>[classes.size()]); } private static Class<?> safeGetClass(String className) { try { return Class.forName(className); } catch (ClassNotFoundException e) { return null; } } public static ClassLoaderExecutor using(final ClassLoader classLoader) { return new ClassLoaderExecutor(classLoader); } public static
ClassLoaders
java
google__guice
core/src/com/google/inject/internal/aop/AnonymousClassDefiner.java
{ "start": 861, "end": 1042 }
class ____ implements ClassDefiner { private static final sun.misc.Unsafe THE_UNSAFE; private static final Method ANONYMOUS_DEFINE_METHOD; /** True if this
AnonymousClassDefiner
java
apache__camel
components/camel-http/src/generated/java/org/apache/camel/component/http/LoggingHttpActivityListenerConfigurer.java
{ "start": 725, "end": 5467 }
class ____ extends org.apache.camel.support.component.PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter { @Override public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) { org.apache.camel.component.http.LoggingHttpActivityListener target = (org.apache.camel.component.http.LoggingHttpActivityListener) obj; switch (ignoreCase ? name.toLowerCase() : name) { case "logmask": case "logMask": target.setLogMask(property(camelContext, java.lang.Boolean.class, value)); return true; case "logginglevel": case "loggingLevel": target.setLoggingLevel(property(camelContext, java.lang.String.class, value)); return true; case "maxchars": case "maxChars": target.setMaxChars(property(camelContext, int.class, value)); return true; case "multiline": target.setMultiline(property(camelContext, boolean.class, value)); return true; case "showbinary": case "showBinary": target.setShowBinary(property(camelContext, boolean.class, value)); return true; case "showbody": case "showBody": target.setShowBody(property(camelContext, boolean.class, value)); return true; case "showexchangeid": case "showExchangeId": target.setShowExchangeId(property(camelContext, boolean.class, value)); return true; case "showheaders": case "showHeaders": target.setShowHeaders(property(camelContext, boolean.class, value)); return true; case "showroutegroup": case "showRouteGroup": target.setShowRouteGroup(property(camelContext, boolean.class, value)); return true; case "showrouteid": case "showRouteId": target.setShowRouteId(property(camelContext, boolean.class, value)); return true; case "showstreams": case "showStreams": target.setShowStreams(property(camelContext, boolean.class, value)); return true; case "sourcelocationloggername": case "sourceLocationLoggerName": target.setSourceLocationLoggerName(property(camelContext, boolean.class, value)); return true; default: return false; } } @Override public Class<?> getOptionType(String name, boolean ignoreCase) { switch (ignoreCase ? name.toLowerCase() : name) { case "logmask": case "logMask": return java.lang.Boolean.class; case "logginglevel": case "loggingLevel": return java.lang.String.class; case "maxchars": case "maxChars": return int.class; case "multiline": return boolean.class; case "showbinary": case "showBinary": return boolean.class; case "showbody": case "showBody": return boolean.class; case "showexchangeid": case "showExchangeId": return boolean.class; case "showheaders": case "showHeaders": return boolean.class; case "showroutegroup": case "showRouteGroup": return boolean.class; case "showrouteid": case "showRouteId": return boolean.class; case "showstreams": case "showStreams": return boolean.class; case "sourcelocationloggername": case "sourceLocationLoggerName": return boolean.class; default: return null; } } @Override public Object getOptionValue(Object obj, String name, boolean ignoreCase) { org.apache.camel.component.http.LoggingHttpActivityListener target = (org.apache.camel.component.http.LoggingHttpActivityListener) obj; switch (ignoreCase ? name.toLowerCase() : name) { case "logmask": case "logMask": return target.getLogMask(); case "logginglevel": case "loggingLevel": return target.getLoggingLevel(); case "maxchars": case "maxChars": return target.getMaxChars(); case "multiline": return target.isMultiline(); case "showbinary": case "showBinary": return target.isShowBinary(); case "showbody": case "showBody": return target.isShowBody(); case "showexchangeid": case "showExchangeId": return target.isShowExchangeId(); case "showheaders": case "showHeaders": return target.isShowHeaders(); case "showroutegroup": case "showRouteGroup": return target.isShowRouteGroup(); case "showrouteid": case "showRouteId": return target.isShowRouteId(); case "showstreams": case "showStreams": return target.isShowStreams(); case "sourcelocationloggername": case "sourceLocationLoggerName": return target.isSourceLocationLoggerName(); default: return null; } } }
LoggingHttpActivityListenerConfigurer
java
google__auto
value/src/test/java/com/google/auto/value/processor/ExtensionTest.java
{ "start": 40416, "end": 40861 }
interface ____<T> {", " T thing();", " ImmutableList<T> list();", "}"); JavaFileObject autoValueClass = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "import com.google.common.collect.ImmutableList;", "", "@AutoValue", "abstract
Parent
java
spring-projects__spring-data-jpa
spring-data-jpa/src/main/java/org/springframework/data/jpa/mapping/JpaPersistentEntityImpl.java
{ "start": 3423, "end": 4388 }
class ____ extends IdPropertyIdentifierAccessor { private final Object bean; private final ProxyIdAccessor proxyIdAccessor; /** * Creates a new {@link JpaProxyAwareIdentifierAccessor} for the given {@link JpaPersistentEntity}, target bean and * {@link ProxyIdAccessor}. * * @param entity must not be {@literal null}. * @param bean must not be {@literal null}. * @param proxyIdAccessor must not be {@literal null}. */ JpaProxyAwareIdentifierAccessor(JpaPersistentEntity<?> entity, Object bean, ProxyIdAccessor proxyIdAccessor) { super(entity, bean); Assert.notNull(proxyIdAccessor, "Proxy identifier accessor must not be null"); this.proxyIdAccessor = proxyIdAccessor; this.bean = bean; } @Override public @Nullable Object getIdentifier() { return proxyIdAccessor.shouldUseAccessorFor(bean) // ? proxyIdAccessor.getIdentifierFrom(bean)// : super.getIdentifier(); } } }
JpaProxyAwareIdentifierAccessor
java
apache__flink
flink-table/flink-sql-parser/src/main/java/org/apache/flink/sql/parser/ddl/SqlDropTable.java
{ "start": 1158, "end": 2018 }
class ____ extends SqlDropObject { private static final SqlOperator OPERATOR = new SqlSpecialOperator("DROP TABLE", SqlKind.DROP_TABLE); private final boolean isTemporary; public SqlDropTable( SqlParserPos pos, SqlIdentifier tableName, boolean ifExists, boolean isTemporary) { super(OPERATOR, pos, tableName, ifExists); this.isTemporary = isTemporary; } public boolean isTemporary() { return this.isTemporary; } @Override public void unparse(SqlWriter writer, int leftPrec, int rightPrec) { writer.keyword("DROP"); if (isTemporary) { writer.keyword("TEMPORARY"); } writer.keyword("TABLE"); if (ifExists) { writer.keyword("IF EXISTS"); } name.unparse(writer, leftPrec, rightPrec); } }
SqlDropTable
java
apache__hadoop
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/nonsorttest/NonSortTest.java
{ "start": 2053, "end": 5166 }
class ____ { @Test public void nonSortTest() throws Exception { Configuration nativeConf = ScenarioConfiguration.getNativeConfiguration(); nativeConf.addResource(TestConstants.NONSORT_TEST_CONF); nativeConf.set(TestConstants.NATIVETASK_MAP_OUTPUT_SORT, "false"); final Job nativeNonSort = getJob(nativeConf, "NativeNonSort", TestConstants.NATIVETASK_NONSORT_TEST_INPUTDIR, TestConstants.NATIVETASK_NONSORT_TEST_NATIVE_OUTPUT); assertThat(nativeNonSort.waitForCompletion(true)).isTrue(); Configuration normalConf = ScenarioConfiguration.getNormalConfiguration(); normalConf.addResource(TestConstants.NONSORT_TEST_CONF); final Job hadoopWithSort = getJob(normalConf, "NormalJob", TestConstants.NATIVETASK_NONSORT_TEST_INPUTDIR, TestConstants.NATIVETASK_NONSORT_TEST_NORMAL_OUTPUT); assertThat(hadoopWithSort.waitForCompletion(true)).isTrue(); final boolean compareRet = ResultVerifier.verify( TestConstants.NATIVETASK_NONSORT_TEST_NATIVE_OUTPUT, TestConstants.NATIVETASK_NONSORT_TEST_NORMAL_OUTPUT); assertThat(compareRet) .withFailMessage( "file compare result: if they are the same ,then return true") .isTrue(); ResultVerifier.verifyCounters(hadoopWithSort, nativeNonSort); } @BeforeEach public void startUp() throws Exception { assumeTrue(NativeCodeLoader.isNativeCodeLoaded()); assumeTrue(NativeRuntime.isNativeLibraryLoaded()); final ScenarioConfiguration conf = new ScenarioConfiguration(); conf.addNonSortTestConf(); final FileSystem fs = FileSystem.get(conf); final Path path = new Path(TestConstants.NATIVETASK_NONSORT_TEST_INPUTDIR); if (!fs.exists(path)) { int filesize = conf.getInt(TestConstants.NATIVETASK_NONSORTTEST_FILESIZE, 10000000); new TestInputFile(filesize, Text.class.getName(), Text.class.getName(), conf).createSequenceTestFile(path.toString()); } fs.close(); } @AfterAll public static void cleanUp() throws IOException { final FileSystem fs = FileSystem.get(new ScenarioConfiguration()); fs.delete(new Path(TestConstants.NATIVETASK_NONSORT_TEST_DIR), true); fs.close(); } private Job getJob(Configuration conf, String jobName, String inputpath, String outputpath) throws IOException { final FileSystem fs = FileSystem.get(conf); if (fs.exists(new Path(outputpath))) { fs.delete(new Path(outputpath), true); } fs.close(); final Job job = Job.getInstance(conf, jobName); job.setJarByClass(NonSortTestMR.class); job.setMapperClass(NonSortTestMR.Map.class); job.setReducerClass(NonSortTestMR.KeyHashSumReduce.class); job.setOutputKeyClass(Text.class); job.setMapOutputValueClass(IntWritable.class); job.setOutputValueClass(LongWritable.class); job.setInputFormatClass(SequenceFileInputFormat.class); job.setOutputFormatClass(TextOutputFormat.class); FileInputFormat.addInputPath(job, new Path(inputpath)); FileOutputFormat.setOutputPath(job, new Path(outputpath)); return job; } }
NonSortTest
java
apache__dubbo
dubbo-remoting/dubbo-remoting-api/src/test/java/org/apache/dubbo/remoting/transport/dispatcher/ChannelEventRunnableTest.java
{ "start": 1157, "end": 3388 }
class ____ { @Test void test() throws Exception { ChannelEventRunnable.ChannelState[] values = ChannelEventRunnable.ChannelState.values(); Assertions.assertEquals(Arrays.toString(values), "[CONNECTED, DISCONNECTED, SENT, RECEIVED, CAUGHT]"); Channel channel = Mockito.mock(Channel.class); ChannelHandler handler = Mockito.mock(ChannelHandler.class); ChannelEventRunnable connectRunnable = new ChannelEventRunnable(channel, handler, ChannelEventRunnable.ChannelState.CONNECTED); ChannelEventRunnable disconnectRunnable = new ChannelEventRunnable(channel, handler, ChannelEventRunnable.ChannelState.DISCONNECTED); ChannelEventRunnable sentRunnable = new ChannelEventRunnable(channel, handler, ChannelEventRunnable.ChannelState.SENT); ChannelEventRunnable receivedRunnable = new ChannelEventRunnable(channel, handler, ChannelEventRunnable.ChannelState.RECEIVED, ""); ChannelEventRunnable caughtRunnable = new ChannelEventRunnable( channel, handler, ChannelEventRunnable.ChannelState.CAUGHT, new RuntimeException()); connectRunnable.run(); disconnectRunnable.run(); sentRunnable.run(); receivedRunnable.run(); caughtRunnable.run(); ArgumentCaptor<Channel> channelArgumentCaptor = ArgumentCaptor.forClass(Channel.class); ArgumentCaptor<Throwable> throwableArgumentCaptor = ArgumentCaptor.forClass(Throwable.class); ArgumentCaptor<Object> objectArgumentCaptor = ArgumentCaptor.forClass(Object.class); Mockito.verify(handler, Mockito.times(1)).connected(channelArgumentCaptor.capture()); Mockito.verify(handler, Mockito.times(1)).disconnected(channelArgumentCaptor.capture()); Mockito.verify(handler, Mockito.times(1)).sent(channelArgumentCaptor.capture(), Mockito.any()); Mockito.verify(handler, Mockito.times(1)) .received(channelArgumentCaptor.capture(), objectArgumentCaptor.capture()); Mockito.verify(handler, Mockito.times(1)) .caught(channelArgumentCaptor.capture(), throwableArgumentCaptor.capture()); } }
ChannelEventRunnableTest
java
junit-team__junit5
junit-platform-commons/src/main/java/org/junit/platform/commons/util/ReflectionUtils.java
{ "start": 3250, "end": 3895 }
enum ____ { /** * Traverse the hierarchy using top-down semantics. */ TOP_DOWN, /** * Traverse the hierarchy using bottom-up semantics. */ BOTTOM_UP } // Pattern: "java.lang.String[]", "int[]", "int[][][][]", etc. // ?> => non-capturing atomic group // ++ => possessive quantifier private static final Pattern SOURCE_CODE_SYNTAX_ARRAY_PATTERN = Pattern.compile("^([^\\[\\]]+)((?>\\[\\])++)$"); static final Class<?>[] EMPTY_CLASS_ARRAY = new Class<?>[0]; private static final ClasspathScanner classpathScanner = ClasspathScannerLoader.getInstance(); /** * Cache for equivalent methods on an
HierarchyTraversalMode
java
apache__hadoop
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcDetailedMetrics.java
{ "start": 1265, "end": 1477 }
class ____ for maintaining RPC method related statistics * and publishing them through the metrics interfaces. */ @InterfaceAudience.Private @Metrics(about="Per method RPC metrics", context="rpcdetailed") public
is