language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
apache__flink
|
flink-tests/src/test/java/org/apache/flink/test/recovery/UpdateJobResourceRequirementsRecoveryITCase.java
|
{
"start": 2466,
"end": 6154
}
|
class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(UpdateJobResourceRequirementsRecoveryITCase.class);
@RegisterExtension
private static final AllCallbackWrapper<ZooKeeperExtension> ZOOKEEPER_EXTENSION =
new AllCallbackWrapper<>(new ZooKeeperExtension());
/** Tests that a rescaled job graph will be recovered with the latest parallelism. */
@Test
void testRescaledJobGraphsWillBeRecoveredCorrectly(@TempDir Path tmpFolder) throws Exception {
final Configuration configuration = new Configuration();
final JobVertex jobVertex = new JobVertex("operator");
jobVertex.setParallelism(1);
jobVertex.setInvokableClass(BlockingNoOpInvokable.class);
final JobGraph jobGraph = JobGraphTestUtils.streamingJobGraph(jobVertex);
final JobID jobId = jobGraph.getJobID();
// We need to have a restart strategy set, to prevent the job from failing during the first
// cluster shutdown when TM disconnects.
configuration.set(RestartStrategyOptions.RESTART_STRATEGY, FIXED_DELAY.getMainValue());
configuration.set(
RestartStrategyOptions.RESTART_STRATEGY_FIXED_DELAY_ATTEMPTS, Integer.MAX_VALUE);
configuration.set(
RestartStrategyOptions.RESTART_STRATEGY_FIXED_DELAY_DELAY, Duration.ofMillis(100));
// The test is only supposed to pass with AdaptiveScheduler enabled.
configuration.set(JobManagerOptions.SCHEDULER, JobManagerOptions.SchedulerType.Adaptive);
// High-Availability settings.
configuration.set(HighAvailabilityOptions.HA_MODE, "zookeeper");
configuration.set(
HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM,
ZOOKEEPER_EXTENSION.getCustomExtension().getConnectString());
configuration.set(
HighAvailabilityOptions.HA_STORAGE_PATH, tmpFolder.toFile().getAbsolutePath());
final MiniClusterConfiguration miniClusterConfiguration =
new MiniClusterConfiguration.Builder()
.setConfiguration(configuration)
.setNumSlotsPerTaskManager(2)
.build();
final RestClusterClient<?> restClusterClient =
new RestClusterClient<>(configuration, "foobar");
final MiniCluster miniCluster = new MiniCluster(miniClusterConfiguration);
miniCluster.start();
assertThatFuture(restClusterClient.submitJob(jobGraph)).eventuallySucceeds();
ClientUtils.waitUntilJobInitializationFinished(
() -> restClusterClient.getJobStatus(jobId).get(),
() -> restClusterClient.requestJobResult(jobId).get(),
getClass().getClassLoader());
assertThatFuture(
restClusterClient.updateJobResourceRequirements(
jobGraph.getJobID(),
JobResourceRequirements.newBuilder()
.setParallelismForJobVertex(jobVertex.getID(), 1, 2)
.build()))
.eventuallySucceeds();
assertThatFuture(miniCluster.closeAsyncWithoutCleaningHighAvailabilityData())
.eventuallySucceeds();
LOG.info("Start second mini cluster to recover the persisted job.");
try (final MiniCluster recoveredMiniCluster = new MiniCluster(miniClusterConfiguration)) {
recoveredMiniCluster.start();
UpdateJobResourceRequirementsITCase.waitForRunningTasks(restClusterClient, jobId, 2);
}
}
}
|
UpdateJobResourceRequirementsRecoveryITCase
|
java
|
apache__camel
|
components/camel-metrics/src/test/java/org/apache/camel/component/metrics/HistogramEndpointTest.java
|
{
"start": 1508,
"end": 2859
}
|
class ____ {
private static final String METRICS_NAME = "metrics.name";
private static final Long VALUE = System.currentTimeMillis();
@Mock
private MetricRegistry registry;
private MetricsEndpoint endpoint;
private InOrder inOrder;
@BeforeEach
public void setUp() {
endpoint = new MetricsEndpoint(null, null, registry, MetricsType.HISTOGRAM, METRICS_NAME);
inOrder = Mockito.inOrder(registry);
}
@AfterEach
public void tearDown() {
inOrder.verifyNoMoreInteractions();
}
@Test
public void testHistogramEndpoint() {
assertThat(endpoint, is(notNullValue()));
assertThat(endpoint.getRegistry(), is(registry));
assertThat(endpoint.getMetricsName(), is(METRICS_NAME));
}
@Test
public void testCreateProducer() throws Exception {
Producer producer = endpoint.createProducer();
assertThat(producer, is(notNullValue()));
assertThat(producer, is(instanceOf(HistogramProducer.class)));
}
@Test
public void testGetValue() {
assertThat(endpoint.getValue(), is(nullValue()));
}
@Test
public void testSetValue() {
assertThat(endpoint.getValue(), is(nullValue()));
endpoint.setValue(VALUE);
assertThat(endpoint.getValue(), is(VALUE));
}
}
|
HistogramEndpointTest
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/SmooksEndpointBuilderFactory.java
|
{
"start": 8906,
"end": 9670
}
|
class ____ {
/**
* The internal instance of the builder used to access to all the
* methods representing the name of headers.
*/
private static final SmooksHeaderNameBuilder INSTANCE = new SmooksHeaderNameBuilder();
/**
* The Smooks execution context.
*
* The option is a: {@code org.smooks.api.ExecutionContext} type.
*
* Group: advanced
*
* @return the name of the header {@code SmooksExecutionContext}.
*/
public String smooksExecutionContext() {
return "CamelSmooksExecutionContext";
}
}
static SmooksEndpointBuilder endpointBuilder(String componentName, String path) {
|
SmooksHeaderNameBuilder
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/AsynchronousFileIOChannel.java
|
{
"start": 16036,
"end": 17569
}
|
class ____ implements ReadRequest {
private final AsynchronousFileIOChannel<FileSegment, ReadRequest> channel;
private final AtomicBoolean hasReachedEndOfFile;
private FileSegment fileSegment;
protected FileSegmentReadRequest(
AsynchronousFileIOChannel<FileSegment, ReadRequest> targetChannel,
AtomicBoolean hasReachedEndOfFile) {
this.channel = targetChannel;
this.hasReachedEndOfFile = hasReachedEndOfFile;
}
@Override
public void read() throws IOException {
final FileChannel fileChannel = channel.fileChannel;
if (fileChannel.size() - fileChannel.position() > 0) {
final ByteBuffer header = ByteBuffer.allocateDirect(8);
fileChannel.read(header);
header.flip();
final long position = fileChannel.position();
final boolean isBuffer = header.getInt() == 1;
final int length = header.getInt();
fileSegment = new FileSegment(fileChannel, position, length, isBuffer);
// Skip the binary data
fileChannel.position(position + length);
hasReachedEndOfFile.set(fileChannel.size() - fileChannel.position() == 0);
} else {
hasReachedEndOfFile.set(true);
}
}
@Override
public void requestDone(IOException error) {
channel.handleProcessedBuffer(fileSegment, error);
}
}
/** Request that seeks the underlying file channel to the given position. */
final
|
FileSegmentReadRequest
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/ext/jdk8/OptionalTest.java
|
{
"start": 3349,
"end": 9011
}
|
class ____ extends StdScalarDeserializer<String>
{
public LowerCasingDeserializer() { super(String.class); }
@Override
public String deserialize(JsonParser p, DeserializationContext ctxt)
{
return p.getString().toLowerCase();
}
}
/*
/**********************************************************
/* Test methods
/**********************************************************
*/
private final ObjectMapper MAPPER = newJsonMapper();
@Test
public void testStringAbsent() throws Exception
{
assertFalse(roundtrip(Optional.empty(), OPTIONAL_STRING_TYPE).isPresent());
}
@Test
public void testStringPresent() throws Exception
{
assertEquals("test", roundtrip(Optional.of("test"), OPTIONAL_STRING_TYPE).get());
}
@Test
public void testBeanAbsent() throws Exception
{
assertFalse(roundtrip(Optional.empty(), OPTIONAL_BEAN_TYPE).isPresent());
}
@Test
public void testBeanPresent() throws Exception
{
final TestBean bean = new TestBean(Integer.MAX_VALUE, "woopwoopwoopwoopwoop");
assertEquals(bean, roundtrip(Optional.of(bean), OPTIONAL_BEAN_TYPE).get());
}
@Test
public void testBeanWithCreator() throws Exception
{
final Issue4Entity emptyEntity = new Issue4Entity(Optional.empty());
final String json = MAPPER.writeValueAsString(emptyEntity);
final Issue4Entity deserialisedEntity = MAPPER.readValue(json, Issue4Entity.class);
if (!deserialisedEntity.equals(emptyEntity)) {
throw new IOException("Entities not equal");
}
}
@Test
public void testOptionalStringInBean() throws Exception
{
OptionalStringBean bean = MAPPER.readValue("{\"value\":\"xyz\"}", OptionalStringBean.class);
assertNotNull(bean.value);
assertEquals("xyz", bean.value.get());
}
// To support [datatype-jdk8#8]
@Test
public void testExcludeIfOptionalAbsent() throws Exception
{
ObjectMapper mapper = jsonMapperBuilder()
.changeDefaultPropertyInclusion(incl -> incl.withValueInclusion(JsonInclude.Include.NON_NULL))
.build();
assertEquals(a2q("{'value':'foo'}"),
mapper.writeValueAsString(new OptionalStringBean("foo")));
// absent is not strictly null so
assertEquals(a2q("{'value':null}"),
mapper.writeValueAsString(new OptionalStringBean(null)));
// however:
mapper = jsonMapperBuilder()
.changeDefaultPropertyInclusion(incl -> incl.withValueInclusion(JsonInclude.Include.NON_ABSENT))
.build();
assertEquals(a2q("{'value':'foo'}"),
mapper.writeValueAsString(new OptionalStringBean("foo")));
assertEquals(a2q("{}"),
mapper.writeValueAsString(new OptionalStringBean(null)));
}
@Test
public void testWithCustomDeserializer() throws Exception
{
CaseChangingStringWrapper w = MAPPER.readValue(a2q("{'value':'FoobaR'}"),
CaseChangingStringWrapper.class);
assertEquals("foobar", w.value.get());
}
// [modules-java8#36]
@Test
public void testWithCustomDeserializerIfOptionalAbsent() throws Exception
{
// 10-Aug-2017, tatu: Actually this is not true: missing value does not trigger
// specific handling
/*
assertEquals(Optional.empty(), MAPPER.readValue("{}",
CaseChangingStringWrapper.class).value);
*/
assertEquals(Optional.empty(), MAPPER.readValue(a2q("{'value':null}"),
CaseChangingStringWrapper.class).value);
}
@Test
public void testCustomSerializer() throws Exception
{
final String VALUE = "fooBAR";
String json = MAPPER.writeValueAsString(new CaseChangingStringWrapper(VALUE));
assertEquals(json, a2q("{'value':'FOOBAR'}"));
}
@Test
public void testCustomSerializerIfOptionalAbsent() throws Exception
{
ObjectMapper mapper = jsonMapperBuilder()
.changeDefaultPropertyInclusion(incl -> incl.withValueInclusion(JsonInclude.Include.NON_NULL))
.build();
assertEquals(a2q("{'value':'FOO'}"),
mapper.writeValueAsString(new CaseChangingStringWrapper("foo")));
// absent is not strictly null so
assertEquals(a2q("{'value':null}"),
mapper.writeValueAsString(new CaseChangingStringWrapper(null)));
// however:
mapper = jsonMapperBuilder()
.changeDefaultPropertyInclusion(incl -> incl.withValueInclusion(JsonInclude.Include.NON_ABSENT))
.build();
assertEquals(a2q("{'value':'FOO'}"),
mapper.writeValueAsString(new CaseChangingStringWrapper("foo")));
assertEquals(a2q("{}"),
mapper.writeValueAsString(new CaseChangingStringWrapper(null)));
}
// [modules-java8#33]: Verify against regression...
@Test
public void testOtherRefSerializers() throws Exception
{
String json = MAPPER.writeValueAsString(new AtomicReference<String>("foo"));
assertEquals(q("foo"), json);
}
/*
/**********************************************************
/* Helper methods
/**********************************************************
*/
private <T> Optional<T> roundtrip(Optional<T> obj, TypeReference<Optional<T>> type) throws IOException
{
return MAPPER.readValue(MAPPER.writeValueAsBytes(obj), type);
}
}
|
LowerCasingDeserializer
|
java
|
quarkusio__quarkus
|
integration-tests/elytron-resteasy/src/test/java/io/quarkus/it/resteasy/elytron/BaseAuthIT.java
|
{
"start": 126,
"end": 168
}
|
class ____ extends BaseAuthTest {
}
|
BaseAuthIT
|
java
|
google__auto
|
value/src/main/java/com/google/auto/value/processor/TypeEncoder.java
|
{
"start": 5353,
"end": 5792
}
|
class ____ for {@link TypeEncoder}
* covers the details of annotation encoding.
*
* @param extraAnnotations additional type annotations to include with the type
*/
static String encodeWithAnnotations(
TypeMirror type, ImmutableList<AnnotationMirror> extraAnnotations) {
return encodeWithAnnotations(type, extraAnnotations, ImmutableSet.of());
}
/**
* Encodes the given type and its type annotations. The
|
comment
|
java
|
google__guice
|
core/src/com/google/inject/internal/ProvidesMethodScanner.java
|
{
"start": 1584,
"end": 4123
}
|
class ____ extends ModuleAnnotatedMethodScanner {
static final ProvidesMethodScanner INSTANCE = new ProvidesMethodScanner();
private static final ImmutableSet<Class<? extends Annotation>> ANNOTATIONS =
ImmutableSet.of(
Provides.class, ProvidesIntoSet.class, ProvidesIntoMap.class, ProvidesIntoOptional.class);
private ProvidesMethodScanner() {}
@Override
public Set<? extends Class<? extends Annotation>> annotationClasses() {
return ANNOTATIONS;
}
@SuppressWarnings({"unchecked", "rawtypes"}) // mapKey doesn't know its key type
@Override
public <T> Key<T> prepareMethod(
Binder binder, Annotation annotation, Key<T> key, InjectionPoint injectionPoint) {
Method method = (Method) injectionPoint.getMember();
AnnotationOrError mapKey = findMapKeyAnnotation(binder, method);
if (annotation instanceof Provides) {
if (mapKey.annotation != null) {
binder.addError("Found a MapKey annotation on non map binding at %s.", method);
}
// no key rewriting for plain old @Provides
return key;
}
if (annotation instanceof ProvidesIntoSet) {
if (mapKey.annotation != null) {
binder.addError("Found a MapKey annotation on non map binding at %s.", method);
}
return RealMultibinder.newRealSetBinder(binder, key).getKeyForNewItem();
} else if (annotation instanceof ProvidesIntoMap) {
if (mapKey.error) {
// Already failed on the MapKey, don't bother doing more work.
return key;
}
if (mapKey.annotation == null) {
// If no MapKey, make an error and abort.
binder.addError("No MapKey found for map binding at %s.", method);
return key;
}
TypeAndValue typeAndValue = typeAndValueOfMapKey(mapKey.annotation);
return RealMapBinder.newRealMapBinder(binder, typeAndValue.type, key)
.getKeyForNewValue(typeAndValue.value);
} else if (annotation instanceof ProvidesIntoOptional) {
if (mapKey.annotation != null) {
binder.addError("Found a MapKey annotation on non map binding at %s.", method);
}
switch (((ProvidesIntoOptional) annotation).value()) {
case DEFAULT:
return RealOptionalBinder.newRealOptionalBinder(binder, key).getKeyForDefaultBinding();
case ACTUAL:
return RealOptionalBinder.newRealOptionalBinder(binder, key).getKeyForActualBinding();
}
}
throw new IllegalStateException("Invalid annotation: " + annotation);
}
private static
|
ProvidesMethodScanner
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/common/geo/GeoFormatterFactory.java
|
{
"start": 996,
"end": 3161
}
|
interface ____<T> {
/**
* Format name
*/
String getName();
/**
* Generates a formatter builder that parses the formatter configuration and generates a formatter
*/
Function<String, Function<List<T>, List<Object>>> getFormatterBuilder();
}
private final Map<String, Function<String, Function<List<T>, List<Object>>>> factories;
/**
* Creates an extensible geo formatter. The extension points can be added as a list of factories
*/
public GeoFormatterFactory(List<FormatterFactory<T>> factories) {
Map<String, Function<String, Function<List<T>, List<Object>>>> factoriesBuilder = new HashMap<>();
for (FormatterFactory<T> factory : factories) {
if (factoriesBuilder.put(factory.getName(), factory.getFormatterBuilder()) != null) {
throw new IllegalArgumentException(
"More then one formatter factory with the name [" + factory.getName() + "] was configured"
);
}
}
this.factories = Collections.unmodifiableMap(factoriesBuilder);
}
/**
* Returns a formatter by name
*
* The format can contain an optional parameters in parentheses such as "mvt(1/2/3)". Parameterless formats are getting resolved
* using standard GeometryFormatterFactory and formats with parameters are getting resolved using factories specified during
* construction.
*/
public Function<List<T>, List<Object>> getFormatter(String format, Function<T, Geometry> toGeometry) {
final int start = format.indexOf('(');
if (start == -1) {
return GeometryFormatterFactory.getFormatter(format, toGeometry);
}
final String formatName = format.substring(0, start);
Function<String, Function<List<T>, List<Object>>> factory = factories.get(formatName);
if (factory == null) {
throw new IllegalArgumentException("Invalid format: " + formatName);
}
final String param = format.substring(start + 1, format.length() - 1);
return factory.apply(param);
}
}
|
FormatterFactory
|
java
|
spring-projects__spring-framework
|
spring-core/src/test/java/org/springframework/core/io/support/MyDummyFactory1.java
|
{
"start": 822,
"end": 932
}
|
class ____ implements DummyFactory {
@Override
public String getString() {
return "Foo";
}
}
|
MyDummyFactory1
|
java
|
spring-projects__spring-framework
|
spring-test/src/main/java/org/springframework/test/context/support/ContextLoaderUtils.java
|
{
"start": 13430,
"end": 14159
}
|
class ____ then adding the attributes to the supplied list.
*/
private static void convertContextConfigToConfigAttributesAndAddToList(ContextConfiguration contextConfiguration,
Class<?> declaringClass, List<ContextConfigurationAttributes> attributesList) {
if (logger.isTraceEnabled()) {
logger.trace(String.format("Retrieved @ContextConfiguration [%s] for declaring class [%s].",
contextConfiguration, declaringClass.getName()));
}
ContextConfigurationAttributes attributes =
new ContextConfigurationAttributes(declaringClass, contextConfiguration);
if (logger.isTraceEnabled()) {
logger.trace("Resolved context configuration attributes: " + attributes);
}
attributesList.add(attributes);
}
}
|
and
|
java
|
apache__kafka
|
streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/EosIntegrationTest.java
|
{
"start": 6087,
"end": 48983
}
|
class ____ {
private static final Logger LOG = LoggerFactory.getLogger(EosIntegrationTest.class);
private static final int NUM_BROKERS = 3;
private static final int MAX_POLL_INTERVAL_MS = 30_000;
private static final int MAX_WAIT_TIME_MS = 120_000;
public static final EmbeddedKafkaCluster CLUSTER = new EmbeddedKafkaCluster(
NUM_BROKERS,
Utils.mkProperties(mkMap(
mkEntry("auto.create.topics.enable", "true"),
mkEntry("transaction.max.timeout.ms", "" + Integer.MAX_VALUE)
))
);
@BeforeAll
public static void startCluster() throws IOException {
CLUSTER.start();
}
@AfterAll
public static void closeCluster() {
CLUSTER.stop();
}
private String applicationId;
private static final int NUM_TOPIC_PARTITIONS = 2;
private static final String CONSUMER_GROUP_ID = "readCommitted";
private static final String SINGLE_PARTITION_INPUT_TOPIC = "singlePartitionInputTopic";
private static final String SINGLE_PARTITION_THROUGH_TOPIC = "singlePartitionThroughTopic";
private static final String SINGLE_PARTITION_OUTPUT_TOPIC = "singlePartitionOutputTopic";
private static final String MULTI_PARTITION_INPUT_TOPIC = "multiPartitionInputTopic";
private static final String MULTI_PARTITION_THROUGH_TOPIC = "multiPartitionThroughTopic";
private static final String MULTI_PARTITION_OUTPUT_TOPIC = "multiPartitionOutputTopic";
private final String storeName = "store";
private AtomicBoolean errorInjected;
private AtomicBoolean stallInjected;
private AtomicReference<String> stallingHost;
private volatile boolean doStall = true;
private AtomicInteger commitRequested;
private Throwable uncaughtException;
private static final AtomicInteger TEST_NUMBER = new AtomicInteger(0);
private volatile boolean hasUnexpectedError = false;
private String stateTmpDir;
private static java.util.stream.Stream<Arguments> groupProtocolAndProcessingThreadsParameters() {
return java.util.stream.Stream.of(
Arguments.of("classic", true),
Arguments.of("classic", false),
Arguments.of("streams", true),
Arguments.of("streams", false)
);
}
@BeforeEach
public void createTopics() throws Exception {
applicationId = "appId-" + TEST_NUMBER.getAndIncrement();
CLUSTER.deleteTopics(
SINGLE_PARTITION_INPUT_TOPIC, MULTI_PARTITION_INPUT_TOPIC,
SINGLE_PARTITION_THROUGH_TOPIC, MULTI_PARTITION_THROUGH_TOPIC,
SINGLE_PARTITION_OUTPUT_TOPIC, MULTI_PARTITION_OUTPUT_TOPIC);
CLUSTER.createTopics(SINGLE_PARTITION_INPUT_TOPIC, SINGLE_PARTITION_THROUGH_TOPIC, SINGLE_PARTITION_OUTPUT_TOPIC);
CLUSTER.createTopic(MULTI_PARTITION_INPUT_TOPIC, NUM_TOPIC_PARTITIONS, 1);
CLUSTER.createTopic(MULTI_PARTITION_THROUGH_TOPIC, NUM_TOPIC_PARTITIONS, 1);
CLUSTER.createTopic(MULTI_PARTITION_OUTPUT_TOPIC, NUM_TOPIC_PARTITIONS, 1);
CLUSTER.setGroupStandbyReplicas(applicationId, 1);
}
@ParameterizedTest
@ValueSource(strings = {"classic", "streams"})
public void shouldBeAbleToRunWithEosEnabled(final String groupProtocol) throws Exception {
runSimpleCopyTest(1, SINGLE_PARTITION_INPUT_TOPIC, null, SINGLE_PARTITION_OUTPUT_TOPIC, false, groupProtocol);
}
@ParameterizedTest
@ValueSource(strings = {"classic", "streams"})
public void shouldCommitCorrectOffsetIfInputTopicIsTransactional(final String groupProtocol) throws Exception {
runSimpleCopyTest(1, SINGLE_PARTITION_INPUT_TOPIC, null, SINGLE_PARTITION_OUTPUT_TOPIC, true, groupProtocol);
try (final Admin adminClient = Admin.create(mkMap(mkEntry(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers())));
final Consumer<byte[], byte[]> consumer = new KafkaConsumer<>(mkMap(
mkEntry(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()),
mkEntry(ConsumerConfig.GROUP_ID_CONFIG, applicationId),
mkEntry(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class),
mkEntry(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class)))) {
waitForEmptyConsumerGroup(adminClient, applicationId, 5 * MAX_POLL_INTERVAL_MS);
final TopicPartition topicPartition = new TopicPartition(SINGLE_PARTITION_INPUT_TOPIC, 0);
final Collection<TopicPartition> topicPartitions = Collections.singleton(topicPartition);
final long committedOffset = adminClient.listConsumerGroupOffsets(applicationId).partitionsToOffsetAndMetadata().get().get(topicPartition).offset();
consumer.assign(topicPartitions);
final long consumerPosition = consumer.position(topicPartition);
final long endOffset = consumer.endOffsets(topicPartitions).get(topicPartition);
assertThat(committedOffset, equalTo(consumerPosition));
assertThat(committedOffset, equalTo(endOffset));
}
}
@ParameterizedTest
@ValueSource(strings = {"classic", "streams"})
public void shouldBeAbleToRestartAfterClose(final String groupProtocol) throws Exception {
runSimpleCopyTest(2, SINGLE_PARTITION_INPUT_TOPIC, null, SINGLE_PARTITION_OUTPUT_TOPIC, false, groupProtocol);
}
@ParameterizedTest
@ValueSource(strings = {"classic", "streams"})
public void shouldBeAbleToCommitToMultiplePartitions(final String groupProtocol) throws Exception {
runSimpleCopyTest(1, SINGLE_PARTITION_INPUT_TOPIC, null, MULTI_PARTITION_OUTPUT_TOPIC, false, groupProtocol);
}
@ParameterizedTest
@ValueSource(strings = {"classic", "streams"})
public void shouldBeAbleToCommitMultiplePartitionOffsets(final String groupProtocol) throws Exception {
runSimpleCopyTest(1, MULTI_PARTITION_INPUT_TOPIC, null, SINGLE_PARTITION_OUTPUT_TOPIC, false, groupProtocol);
}
@ParameterizedTest
@ValueSource(strings = {"classic", "streams"})
public void shouldBeAbleToRunWithTwoSubtopologies(final String groupProtocol) throws Exception {
runSimpleCopyTest(1, SINGLE_PARTITION_INPUT_TOPIC, SINGLE_PARTITION_THROUGH_TOPIC, SINGLE_PARTITION_OUTPUT_TOPIC, false, groupProtocol);
}
@ParameterizedTest
@ValueSource(strings = {"classic", "streams"})
public void shouldBeAbleToRunWithTwoSubtopologiesAndMultiplePartitions(final String groupProtocol) throws Exception {
runSimpleCopyTest(1, MULTI_PARTITION_INPUT_TOPIC, MULTI_PARTITION_THROUGH_TOPIC, MULTI_PARTITION_OUTPUT_TOPIC, false, groupProtocol);
}
private void runSimpleCopyTest(final int numberOfRestarts,
final String inputTopic,
final String throughTopic,
final String outputTopic,
final boolean inputTopicTransactional,
final String groupProtocol) throws Exception {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<Long, Long> input = builder.stream(inputTopic);
KStream<Long, Long> output = input;
if (throughTopic != null) {
input.to(throughTopic);
output = builder.stream(throughTopic);
}
output.to(outputTopic);
final Properties properties = new Properties();
properties.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE_V2);
properties.put(StreamsConfig.STATESTORE_CACHE_MAX_BYTES_CONFIG, 0);
properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_RECORDS_CONFIG), 1);
properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.METADATA_MAX_AGE_CONFIG), "1000");
properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG), "earliest");
properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG), MAX_POLL_INTERVAL_MS - 1);
properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG), MAX_POLL_INTERVAL_MS);
properties.put(StreamsConfig.GROUP_PROTOCOL_CONFIG, groupProtocol);
for (int i = 0; i < numberOfRestarts; ++i) {
final Properties config = StreamsTestUtils.getStreamsConfig(
applicationId,
CLUSTER.bootstrapServers(),
Serdes.LongSerde.class.getName(),
Serdes.LongSerde.class.getName(),
properties);
final List<KeyValue<Long, Long>> inputData = prepareData(i * 100, i * 100 + 10L, 0L, 1L);
final Properties producerConfigs = new Properties();
if (inputTopicTransactional) {
producerConfigs.setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, applicationId + "-input-producer");
}
IntegrationTestUtils.produceKeyValuesSynchronously(
inputTopic,
inputData,
TestUtils.producerConfig(CLUSTER.bootstrapServers(), LongSerializer.class, LongSerializer.class, producerConfigs),
CLUSTER.time,
inputTopicTransactional
);
try (final KafkaStreams streams = new KafkaStreams(builder.build(), config)) {
startApplicationAndWaitUntilRunning(streams);
final List<KeyValue<Long, Long>> committedRecords = readResult(outputTopic, inputData.size(), CONSUMER_GROUP_ID);
checkResultPerKey(committedRecords, inputData, "The committed records do not match what expected");
}
}
}
private void checkResultPerKey(final List<KeyValue<Long, Long>> result,
final List<KeyValue<Long, Long>> expectedResult,
final String reason) {
final Set<Long> allKeys = new HashSet<>();
addAllKeys(allKeys, result);
addAllKeys(allKeys, expectedResult);
for (final Long key : allKeys) {
assertThat(reason, getAllRecordPerKey(key, result), equalTo(getAllRecordPerKey(key, expectedResult)));
}
}
private void addAllKeys(final Set<Long> allKeys, final List<KeyValue<Long, Long>> records) {
for (final KeyValue<Long, Long> record : records) {
allKeys.add(record.key);
}
}
private List<KeyValue<Long, Long>> getAllRecordPerKey(final Long key, final List<KeyValue<Long, Long>> records) {
final List<KeyValue<Long, Long>> recordsPerKey = new ArrayList<>(records.size());
for (final KeyValue<Long, Long> record : records) {
if (record.key.equals(key)) {
recordsPerKey.add(record);
}
}
return recordsPerKey;
}
@ParameterizedTest
@ValueSource(strings = {"classic", "streams"})
public void shouldBeAbleToPerformMultipleTransactions(final String groupProtocol) throws Exception {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream(SINGLE_PARTITION_INPUT_TOPIC).to(SINGLE_PARTITION_OUTPUT_TOPIC);
final Properties properties = new Properties();
properties.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE_V2);
properties.put(StreamsConfig.STATESTORE_CACHE_MAX_BYTES_CONFIG, 0);
properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
properties.put(ConsumerConfig.METADATA_MAX_AGE_CONFIG, "1000");
properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
properties.put(StreamsConfig.GROUP_PROTOCOL_CONFIG, groupProtocol);
final Properties config = StreamsTestUtils.getStreamsConfig(
applicationId,
CLUSTER.bootstrapServers(),
Serdes.LongSerde.class.getName(),
Serdes.LongSerde.class.getName(),
properties);
try (final KafkaStreams streams = new KafkaStreams(builder.build(), config)) {
startApplicationAndWaitUntilRunning(streams);
final List<KeyValue<Long, Long>> firstBurstOfData = prepareData(0L, 5L, 0L);
final List<KeyValue<Long, Long>> secondBurstOfData = prepareData(5L, 8L, 0L);
IntegrationTestUtils.produceKeyValuesSynchronously(
SINGLE_PARTITION_INPUT_TOPIC,
firstBurstOfData,
TestUtils.producerConfig(CLUSTER.bootstrapServers(), LongSerializer.class, LongSerializer.class),
CLUSTER.time
);
final List<KeyValue<Long, Long>> firstCommittedRecords = readResult(SINGLE_PARTITION_OUTPUT_TOPIC, firstBurstOfData.size(), CONSUMER_GROUP_ID);
assertThat(firstCommittedRecords, equalTo(firstBurstOfData));
IntegrationTestUtils.produceKeyValuesSynchronously(
SINGLE_PARTITION_INPUT_TOPIC,
secondBurstOfData,
TestUtils.producerConfig(CLUSTER.bootstrapServers(), LongSerializer.class, LongSerializer.class),
CLUSTER.time
);
final List<KeyValue<Long, Long>> secondCommittedRecords = readResult(SINGLE_PARTITION_OUTPUT_TOPIC, secondBurstOfData.size(), CONSUMER_GROUP_ID);
assertThat(secondCommittedRecords, equalTo(secondBurstOfData));
}
}
@ParameterizedTest
@MethodSource("groupProtocolAndProcessingThreadsParameters")
public void shouldNotViolateEosIfOneTaskFails(final String groupProtocol, final boolean processingThreadsEnabled) throws Exception {
// this test writes 10 + 5 + 5 records per partition (running with 2 partitions)
// the app is supposed to copy all 40 records into the output topic
//
// the app first commits after each 10 records per partition(total 20 records), and thus will have 2 * 5 uncommitted writes
//
// the failure gets inject after 20 committed and 30 uncommitted records got received
// -> the failure only kills one thread
// after fail over, we should read 40 committed records (even if 50 record got written)
try (final KafkaStreams streams = getKafkaStreams("dummy", false, "appDir", 2, groupProtocol, processingThreadsEnabled)) {
startApplicationAndWaitUntilRunning(streams);
final List<KeyValue<Long, Long>> committedDataBeforeFailure = prepareData(0L, 10L, 0L, 1L);
final List<KeyValue<Long, Long>> uncommittedDataBeforeFailure = prepareData(10L, 15L, 0L, 1L);
final List<KeyValue<Long, Long>> dataBeforeFailure = new ArrayList<>(
committedDataBeforeFailure.size() + uncommittedDataBeforeFailure.size());
dataBeforeFailure.addAll(committedDataBeforeFailure);
dataBeforeFailure.addAll(uncommittedDataBeforeFailure);
final List<KeyValue<Long, Long>> dataAfterFailure = prepareData(15L, 20L, 0L, 1L);
writeInputData(committedDataBeforeFailure);
waitForCondition(
() -> commitRequested.get() == 2, MAX_WAIT_TIME_MS,
"StreamsTasks did not request commit.");
// expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
//
// p-0: ---> 10 rec + C
// p-1: ---> 10 rec + C
final List<KeyValue<Long, Long>> committedRecords = readResult(SINGLE_PARTITION_OUTPUT_TOPIC, committedDataBeforeFailure.size(), CONSUMER_GROUP_ID);
checkResultPerKey(
committedRecords,
committedDataBeforeFailure,
"The committed records before failure do not match what expected");
writeInputData(uncommittedDataBeforeFailure);
// expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
//
// p-0: ---> 10 rec + C + 5 rec (pending)
// p-1: ---> 10 rec + C + 5 rec (pending)
final List<KeyValue<Long, Long>> uncommittedRecords = readResult(SINGLE_PARTITION_OUTPUT_TOPIC, dataBeforeFailure.size(), null);
checkResultPerKey(
uncommittedRecords,
dataBeforeFailure,
"The uncommitted records before failure do not match what expected");
errorInjected.set(true);
writeInputData(dataAfterFailure);
waitForCondition(
() -> uncaughtException != null, MAX_WAIT_TIME_MS,
"Should receive uncaught exception from one StreamThread.");
// expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
//
// p-0: ---> 10 rec + C + 5 rec + C + 5 rec + C
// p-1: ---> 10 rec + C + 5 rec + C + 5 rec + C
final List<KeyValue<Long, Long>> allCommittedRecords = readResult(
SINGLE_PARTITION_OUTPUT_TOPIC,
committedDataBeforeFailure.size() + uncommittedDataBeforeFailure.size() + dataAfterFailure.size(),
CONSUMER_GROUP_ID + "_ALL");
final List<KeyValue<Long, Long>> committedRecordsAfterFailure = readResult(
SINGLE_PARTITION_OUTPUT_TOPIC,
uncommittedDataBeforeFailure.size() + dataAfterFailure.size(),
CONSUMER_GROUP_ID);
final int allCommittedRecordsAfterRecoverySize = committedDataBeforeFailure.size() +
uncommittedDataBeforeFailure.size() + dataAfterFailure.size();
final List<KeyValue<Long, Long>> allExpectedCommittedRecordsAfterRecovery = new ArrayList<>(allCommittedRecordsAfterRecoverySize);
allExpectedCommittedRecordsAfterRecovery.addAll(committedDataBeforeFailure);
allExpectedCommittedRecordsAfterRecovery.addAll(uncommittedDataBeforeFailure);
allExpectedCommittedRecordsAfterRecovery.addAll(dataAfterFailure);
final int committedRecordsAfterRecoverySize = uncommittedDataBeforeFailure.size() + dataAfterFailure.size();
final List<KeyValue<Long, Long>> expectedCommittedRecordsAfterRecovery = new ArrayList<>(committedRecordsAfterRecoverySize);
expectedCommittedRecordsAfterRecovery.addAll(uncommittedDataBeforeFailure);
expectedCommittedRecordsAfterRecovery.addAll(dataAfterFailure);
checkResultPerKey(
allCommittedRecords,
allExpectedCommittedRecordsAfterRecovery,
"The all committed records after recovery do not match what expected");
checkResultPerKey(
committedRecordsAfterFailure,
expectedCommittedRecordsAfterRecovery,
"The committed records after recovery do not match what expected");
assertThat("Should only get one uncaught exception from Streams.", hasUnexpectedError, is(false));
}
}
@ParameterizedTest
@MethodSource("groupProtocolAndProcessingThreadsParameters")
public void shouldNotViolateEosIfOneTaskFailsWithState(final String groupProtocol, final boolean processingThreadsEnabled) throws Exception {
// this test updates a store with 10 + 5 + 5 records per partition (running with 2 partitions)
// the app is supposed to emit all 40 update records into the output topic
//
// the app first commits after each 10 records per partition (total 20 records), and thus will have 2 * 5 uncommitted writes
// and store updates (ie, another 5 uncommitted writes to a changelog topic per partition)
// in the uncommitted batch, sending some data for the new key to validate that upon resuming they will not be shown up in the store
//
// the failure gets inject after 20 committed and 30 uncommitted records got received
// -> the failure only kills one thread
// after fail over, we should read 40 committed records and the state stores should contain the correct sums
// per key (even if some records got processed twice)
// We need more processing time under "with state" situation, so increasing the max.poll.interval.ms
// to avoid unexpected rebalance during test, which will cause unexpected fail over triggered
try (final KafkaStreams streams = getKafkaStreams("dummy", true, "appDir", 2, groupProtocol, processingThreadsEnabled)) {
startApplicationAndWaitUntilRunning(streams);
final List<KeyValue<Long, Long>> committedDataBeforeFailure = prepareData(0L, 10L, 0L, 1L);
final List<KeyValue<Long, Long>> uncommittedDataBeforeFailure = prepareData(10L, 15L, 0L, 1L, 2L, 3L);
final List<KeyValue<Long, Long>> dataBeforeFailure = new ArrayList<>(
committedDataBeforeFailure.size() + uncommittedDataBeforeFailure.size());
dataBeforeFailure.addAll(committedDataBeforeFailure);
dataBeforeFailure.addAll(uncommittedDataBeforeFailure);
final List<KeyValue<Long, Long>> dataAfterFailure = prepareData(15L, 20L, 0L, 1L);
writeInputData(committedDataBeforeFailure);
waitForCondition(
() -> commitRequested.get() == 2, MAX_WAIT_TIME_MS,
"StreamsTasks did not request commit.");
// expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
//
// p-0: ---> 10 rec + C
// p-1: ---> 10 rec + C
final List<KeyValue<Long, Long>> committedRecords = readResult(SINGLE_PARTITION_OUTPUT_TOPIC, committedDataBeforeFailure.size(), CONSUMER_GROUP_ID);
checkResultPerKey(
committedRecords,
computeExpectedResult(committedDataBeforeFailure),
"The committed records before failure do not match what expected");
writeInputData(uncommittedDataBeforeFailure);
// expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
//
// p-0: ---> 10 rec + C + 5 rec (pending)
// p-1: ---> 10 rec + C + 5 rec (pending)
final List<KeyValue<Long, Long>> uncommittedRecords = readResult(SINGLE_PARTITION_OUTPUT_TOPIC, dataBeforeFailure.size(), null);
final List<KeyValue<Long, Long>> expectedResultBeforeFailure = computeExpectedResult(dataBeforeFailure);
checkResultPerKey(
uncommittedRecords,
expectedResultBeforeFailure,
"The uncommitted records before failure do not match what expected");
verifyStateStore(
streams,
getMaxPerKey(expectedResultBeforeFailure),
"The state store content before failure do not match what expected");
errorInjected.set(true);
writeInputData(dataAfterFailure);
waitForCondition(
() -> uncaughtException != null, MAX_WAIT_TIME_MS,
"Should receive uncaught exception from one StreamThread.");
// expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
//
// p-0: ---> 10 rec + C + 5 rec + C + 5 rec + C
// p-1: ---> 10 rec + C + 5 rec + C + 5 rec + C
final List<KeyValue<Long, Long>> allCommittedRecords = readResult(
SINGLE_PARTITION_OUTPUT_TOPIC,
committedDataBeforeFailure.size() + uncommittedDataBeforeFailure.size() + dataAfterFailure.size(),
CONSUMER_GROUP_ID + "_ALL");
final List<KeyValue<Long, Long>> committedRecordsAfterFailure = readResult(
SINGLE_PARTITION_OUTPUT_TOPIC,
uncommittedDataBeforeFailure.size() + dataAfterFailure.size(),
CONSUMER_GROUP_ID);
final int allCommittedRecordsAfterRecoverySize = committedDataBeforeFailure.size() +
uncommittedDataBeforeFailure.size() + dataAfterFailure.size();
final List<KeyValue<Long, Long>> allExpectedCommittedRecordsAfterRecovery = new ArrayList<>(allCommittedRecordsAfterRecoverySize);
allExpectedCommittedRecordsAfterRecovery.addAll(committedDataBeforeFailure);
allExpectedCommittedRecordsAfterRecovery.addAll(uncommittedDataBeforeFailure);
allExpectedCommittedRecordsAfterRecovery.addAll(dataAfterFailure);
final List<KeyValue<Long, Long>> expectedResult = computeExpectedResult(allExpectedCommittedRecordsAfterRecovery);
checkResultPerKey(
allCommittedRecords,
expectedResult,
"The all committed records after recovery do not match what expected");
checkResultPerKey(
committedRecordsAfterFailure,
expectedResult.subList(committedDataBeforeFailure.size(), expectedResult.size()),
"The committed records after recovery do not match what expected");
verifyStateStore(
streams,
getMaxPerKey(expectedResult),
"The state store content after recovery do not match what expected");
assertThat("Should only get one uncaught exception from Streams.", hasUnexpectedError, is(false));
}
}
@ParameterizedTest
@MethodSource("groupProtocolAndProcessingThreadsParameters")
public void shouldNotViolateEosIfOneTaskGetsFencedUsingIsolatedAppInstances(final String groupProtocol, final boolean processingThreadsEnabled) throws Exception {
// this test writes 10 + 5 + 5 + 10 records per partition (running with 2 partitions)
// the app is supposed to copy all 60 records into the output topic
//
// the app first commits after each 10 records per partition, and thus will have 2 * 5 uncommitted writes
//
// Then, a stall gets injected after 20 committed and 30 uncommitted records got received
// -> the stall only affects one thread and should trigger a rebalance
// after rebalancing, we should read 40 committed records (even if 50 record got written)
//
// afterward, the "stalling" thread resumes, and another rebalance should get triggered
// we write the remaining 20 records and verify to read 60 result records
try (
final KafkaStreams streams1 = getKafkaStreams("streams1", false, "appDir1", 1, groupProtocol, processingThreadsEnabled);
final KafkaStreams streams2 = getKafkaStreams("streams2", false, "appDir2", 1, groupProtocol, processingThreadsEnabled)
) {
startApplicationAndWaitUntilRunning(streams1);
startApplicationAndWaitUntilRunning(streams2);
final List<KeyValue<Long, Long>> committedDataBeforeStall = prepareData(0L, 10L, 0L, 1L);
final List<KeyValue<Long, Long>> uncommittedDataBeforeStall = prepareData(10L, 15L, 0L, 1L);
final List<KeyValue<Long, Long>> dataBeforeStall = new ArrayList<>(
committedDataBeforeStall.size() + uncommittedDataBeforeStall.size());
dataBeforeStall.addAll(committedDataBeforeStall);
dataBeforeStall.addAll(uncommittedDataBeforeStall);
final List<KeyValue<Long, Long>> dataToTriggerFirstRebalance = prepareData(15L, 20L, 0L, 1L);
final List<KeyValue<Long, Long>> dataAfterSecondRebalance = prepareData(20L, 30L, 0L, 1L);
writeInputData(committedDataBeforeStall);
waitForCondition(
() -> commitRequested.get() == 2, MAX_WAIT_TIME_MS,
"StreamsTasks did not request commit.");
// expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
//
// p-0: ---> 10 rec + C
// p-1: ---> 10 rec + C
final List<KeyValue<Long, Long>> committedRecords = readResult(SINGLE_PARTITION_OUTPUT_TOPIC, committedDataBeforeStall.size(), CONSUMER_GROUP_ID);
checkResultPerKey(
committedRecords,
committedDataBeforeStall,
"The committed records before stall do not match what expected");
writeInputData(uncommittedDataBeforeStall);
// expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
//
// p-0: ---> 10 rec + C + 5 rec (pending)
// p-1: ---> 10 rec + C + 5 rec (pending)
final List<KeyValue<Long, Long>> uncommittedRecords = readResult(SINGLE_PARTITION_OUTPUT_TOPIC, dataBeforeStall.size(), null);
checkResultPerKey(
uncommittedRecords,
dataBeforeStall,
"The uncommitted records before stall do not match what expected");
LOG.info("Injecting Stall");
stallInjected.set(true);
writeInputData(dataToTriggerFirstRebalance);
LOG.info("Input Data Written");
waitForCondition(
() -> stallingHost.get() != null,
MAX_WAIT_TIME_MS,
"Expected a host to start stalling"
);
final String observedStallingHost = stallingHost.get();
final KafkaStreams remainingInstance;
if ("streams1".equals(observedStallingHost)) {
remainingInstance = streams2;
} else if ("streams2".equals(observedStallingHost)) {
remainingInstance = streams1;
} else {
throw new IllegalArgumentException("unexpected host name: " + observedStallingHost);
}
// the stalling instance won't have an updated view, and it doesn't matter what it thinks
// the assignment is. We only really care that the remaining instance only sees one host
// that owns both partitions.
waitForCondition(
() -> remainingInstance.metadataForAllStreamsClients().size() == 1
&& remainingInstance.metadataForAllStreamsClients().iterator().next().topicPartitions().size() == 2,
MAX_WAIT_TIME_MS,
() -> "Should have rebalanced.\n" +
"Streams1[" + streams1.metadataForAllStreamsClients() + "]\n" +
"Streams2[" + streams2.metadataForAllStreamsClients() + "]");
// expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
//
// p-0: ---> 10 rec + C + 5 rec + C + 5 rec + C
// p-1: ---> 10 rec + C + 5 rec + C + 5 rec + C
final List<KeyValue<Long, Long>> committedRecordsAfterRebalance = readResult(
SINGLE_PARTITION_OUTPUT_TOPIC,
uncommittedDataBeforeStall.size() + dataToTriggerFirstRebalance.size(),
CONSUMER_GROUP_ID);
final List<KeyValue<Long, Long>> expectedCommittedRecordsAfterRebalance = new ArrayList<>(
uncommittedDataBeforeStall.size() + dataToTriggerFirstRebalance.size());
expectedCommittedRecordsAfterRebalance.addAll(uncommittedDataBeforeStall);
expectedCommittedRecordsAfterRebalance.addAll(dataToTriggerFirstRebalance);
checkResultPerKey(
committedRecordsAfterRebalance,
expectedCommittedRecordsAfterRebalance,
"The all committed records after rebalance do not match what expected");
LOG.info("Releasing Stall");
doStall = false;
// Once the stalling host rejoins the group, we expect both instances to see both instances.
// It doesn't really matter what the assignment is, but we might as well also assert that they
// both see both partitions assigned exactly once
waitForCondition(
() -> streams1.metadataForAllStreamsClients().size() == 2
&& streams2.metadataForAllStreamsClients().size() == 2
&& streams1.metadataForAllStreamsClients().stream().mapToLong(meta -> meta.topicPartitions().size()).sum() == 2
&& streams2.metadataForAllStreamsClients().stream().mapToLong(meta -> meta.topicPartitions().size()).sum() == 2,
MAX_WAIT_TIME_MS,
() -> "Should have rebalanced.\n" +
"Streams1[" + streams1.metadataForAllStreamsClients() + "]\n" +
"Streams2[" + streams2.metadataForAllStreamsClients() + "]");
writeInputData(dataAfterSecondRebalance);
// expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
//
// p-0: ---> 10 rec + C + 5 rec + C + 5 rec + C + 10 rec + C
// p-1: ---> 10 rec + C + 5 rec + C + 5 rec + C + 10 rec + C
final List<KeyValue<Long, Long>> allCommittedRecords = readResult(
SINGLE_PARTITION_OUTPUT_TOPIC,
committedDataBeforeStall.size() + uncommittedDataBeforeStall.size()
+ dataToTriggerFirstRebalance.size() + dataAfterSecondRebalance.size(),
CONSUMER_GROUP_ID + "_ALL");
final int allCommittedRecordsAfterRecoverySize = committedDataBeforeStall.size() +
uncommittedDataBeforeStall.size() + dataToTriggerFirstRebalance.size() + dataAfterSecondRebalance.size();
final List<KeyValue<Long, Long>> allExpectedCommittedRecordsAfterRecovery = new ArrayList<>(allCommittedRecordsAfterRecoverySize);
allExpectedCommittedRecordsAfterRecovery.addAll(committedDataBeforeStall);
allExpectedCommittedRecordsAfterRecovery.addAll(uncommittedDataBeforeStall);
allExpectedCommittedRecordsAfterRecovery.addAll(dataToTriggerFirstRebalance);
allExpectedCommittedRecordsAfterRecovery.addAll(dataAfterSecondRebalance);
checkResultPerKey(
allCommittedRecords,
allExpectedCommittedRecordsAfterRecovery,
"The all committed records after recovery do not match what expected");
}
}
@ParameterizedTest
@MethodSource("groupProtocolAndProcessingThreadsParameters")
public void shouldWriteLatestOffsetsToCheckpointOnShutdown(final String groupProtocol, final boolean processingThreadsEnabled) throws Exception {
final List<KeyValue<Long, Long>> writtenData = prepareData(0L, 10, 0L, 1L);
final List<KeyValue<Long, Long>> expectedResult = computeExpectedResult(writtenData);
try (final KafkaStreams streams = getKafkaStreams("streams", true, "appDir", 1, groupProtocol, processingThreadsEnabled)) {
writeInputData(writtenData);
startApplicationAndWaitUntilRunning(streams);
waitForCondition(
() -> commitRequested.get() == 2, MAX_WAIT_TIME_MS,
"StreamsTasks did not request commit.");
final List<KeyValue<Long, Long>> committedRecords = readResult(SINGLE_PARTITION_OUTPUT_TOPIC, writtenData.size(), CONSUMER_GROUP_ID);
checkResultPerKey(
committedRecords,
expectedResult,
"The committed records do not match what expected");
verifyStateStore(
streams,
getMaxPerKey(expectedResult),
"The state store content do not match what expected");
}
verifyOffsetsAreInCheckpoint(0);
verifyOffsetsAreInCheckpoint(1);
}
@ParameterizedTest
@MethodSource("groupProtocolAndProcessingThreadsParameters")
public void shouldCheckpointRestoredOffsetsWhenClosingCleanDuringRestoring(
final String groupProtocol, final boolean processingThreadsEnabled) throws Exception {
final Properties streamsConfiguration = new Properties();
streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, applicationId);
streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.IntegerSerde.class);
streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.StringSerde.class);
streamsConfiguration.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE_V2);
streamsConfiguration.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 1);
streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory(applicationId).getPath());
streamsConfiguration.put(InternalConfig.PROCESSING_THREADS_ENABLED, processingThreadsEnabled);
streamsConfiguration.put(StreamsConfig.GROUP_PROTOCOL_CONFIG, groupProtocol);
streamsConfiguration.put(StreamsConfig.restoreConsumerPrefix(ConsumerConfig.MAX_POLL_RECORDS_CONFIG), 100);
final String stateStoreName = "stateStore";
purgeLocalStreamsState(streamsConfiguration);
final int startKey = 1;
final int endKey = 30001;
final List<KeyValue<Integer, Integer>> recordBatch1 = IntStream.range(startKey, endKey - 1000).mapToObj(i -> KeyValue.pair(i, 0)).collect(Collectors.toList());
IntegrationTestUtils.produceKeyValuesSynchronously(MULTI_PARTITION_INPUT_TOPIC,
recordBatch1,
TestUtils.producerConfig(CLUSTER.bootstrapServers(),
IntegerSerializer.class,
IntegerSerializer.class),
CLUSTER.time);
final StoreBuilder<KeyValueStore<Integer, String>> stateStore = Stores.keyValueStoreBuilder(
Stores.persistentKeyValueStore(stateStoreName),
Serdes.Integer(),
Serdes.String()).withCachingEnabled();
final int partitionToVerify = 0;
final CountDownLatch latch = new CountDownLatch(1);
final AtomicBoolean throwException = new AtomicBoolean(false);
final TaskId task00 = new TaskId(0, partitionToVerify);
final AtomicLong restoredOffsetsForPartition0 = new AtomicLong(0);
final Topology topology = new Topology();
topology
.addSource("source", MULTI_PARTITION_INPUT_TOPIC)
.addProcessor("processor", () -> new Processor<Integer, String, Integer, String>() {
KeyValueStore<Integer, String> stateStore;
ProcessorContext<Integer, String> context;
@Override
public void init(final ProcessorContext<Integer, String> context) {
Processor.super.init(context);
this.context = context;
stateStore = context.getStateStore(stateStoreName);
}
@Override
public void process(final Record<Integer, String> record) {
context.recordMetadata().ifPresent(recordMetadata -> {
if (recordMetadata.partition() == partitionToVerify) {
if (throwException.compareAndSet(true, false)) {
throw new TaskCorruptedException(Collections.singleton(task00));
}
stateStore.put(record.key(), record.value());
} else {
stateStore.put(record.key(), record.value());
}
});
}
@Override
public void close() {
Processor.super.close();
}
}, "source")
.addStateStore(stateStore, "processor");
final KafkaStreams kafkaStreams = new KafkaStreams(topology, streamsConfiguration);
kafkaStreams.setGlobalStateRestoreListener(new StateRestoreListener() {
@Override
public void onRestoreStart(final TopicPartition topicPartition,
final String storeName,
final long startingOffset,
final long endingOffset) {}
@Override
public void onBatchRestored(final TopicPartition topicPartition,
final String storeName,
final long batchEndOffset,
final long numRestored) {
if (topicPartition.partition() == 0) {
restoredOffsetsForPartition0.set(batchEndOffset);
if (batchEndOffset > 100) {
latch.countDown();
}
}
}
@Override
public void onRestoreEnd(final TopicPartition topicPartition,
final String storeName,
final long totalRestored) {}
});
startApplicationAndWaitUntilRunning(Collections.singletonList(kafkaStreams), Duration.ofSeconds(60));
ensureCommittedRecordsInTopicPartition(
applicationId + "-" + stateStoreName + "-changelog",
partitionToVerify,
2000
);
throwException.set(true);
final List<KeyValue<Integer, Integer>> recordBatch2 = IntStream.range(endKey - 1000, endKey).mapToObj(i -> KeyValue.pair(i, 0)).collect(Collectors.toList());
IntegrationTestUtils.produceKeyValuesSynchronously(MULTI_PARTITION_INPUT_TOPIC,
recordBatch2,
TestUtils.producerConfig(CLUSTER.bootstrapServers(),
IntegerSerializer.class,
IntegerSerializer.class),
CLUSTER.time);
latch.await();
kafkaStreams.close();
waitForApplicationState(Collections.singletonList(kafkaStreams), KafkaStreams.State.NOT_RUNNING, Duration.ofSeconds(60));
final File checkpointFile = Paths.get(
streamsConfiguration.getProperty(StreamsConfig.STATE_DIR_CONFIG),
streamsConfiguration.getProperty(StreamsConfig.APPLICATION_ID_CONFIG),
task00.toString(),
".checkpoint"
).toFile();
assertTrue(checkpointFile.exists());
final Map<TopicPartition, Long> checkpoints = new OffsetCheckpoint(checkpointFile).read();
assertEquals(
Long.valueOf(restoredOffsetsForPartition0.get()),
new ArrayList<>(checkpoints.values()).get(0)
);
}
private final AtomicReference<String> transactionalProducerId = new AtomicReference<>();
private
|
EosIntegrationTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/mapping/List.java
|
{
"start": 617,
"end": 1621
}
|
class ____ extends IndexedCollection {
private int baseIndex;
/**
* hbm.xml binding
*/
public List(MetadataBuildingContext buildingContext, PersistentClass owner) {
super( buildingContext, owner );
}
/**
* annotation binding
*/
public List(Supplier<ManagedBean<? extends UserCollectionType>> customTypeBeanResolver, PersistentClass owner, MetadataBuildingContext buildingContext) {
super( customTypeBeanResolver, owner, buildingContext );
}
protected List(List original) {
super( original );
this.baseIndex = original.baseIndex;
}
@Override
public List copy() {
return new List( this );
}
public boolean isList() {
return true;
}
public CollectionType getDefaultCollectionType() {
return new ListType( getRole(), getReferencedPropertyName() );
}
public Object accept(ValueVisitor visitor) {
return visitor.accept(this);
}
public int getBaseIndex() {
return baseIndex;
}
public void setBaseIndex(int baseIndex) {
this.baseIndex = baseIndex;
}
}
|
List
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/bugs/_2839/Issue2839Exception.java
|
{
"start": 229,
"end": 357
}
|
class ____ extends Exception {
public Issue2839Exception(String message) {
super( message );
}
}
|
Issue2839Exception
|
java
|
apache__camel
|
components/camel-test/camel-test-main-junit5/src/main/java/org/apache/camel/test/main/junit5/CamelMainTest.java
|
{
"start": 2640,
"end": 2686
}
|
class ____ used.
*
* @return the main
|
is
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/cglib/core/NamingPolicy.java
|
{
"start": 765,
"end": 939
}
|
interface ____ {
/**
* Choose a name for a generated class.
* @param prefix a dotted-name chosen by the generating class (possibly to put the generated
|
NamingPolicy
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/adaptivebatch/AdaptiveBatchSchedulerFactory.java
|
{
"start": 5493,
"end": 21384
}
|
class ____ implements SchedulerNGFactory {
private static final Logger LOG = LoggerFactory.getLogger(AdaptiveBatchSchedulerFactory.class);
@Override
public SchedulerNG createInstance(
Logger log,
ExecutionPlan executionPlan,
Executor ioExecutor,
Configuration jobMasterConfiguration,
SlotPoolService slotPoolService,
ScheduledExecutorService futureExecutor,
ClassLoader userCodeLoader,
CheckpointRecoveryFactory checkpointRecoveryFactory,
Duration rpcTimeout,
BlobWriter blobWriter,
JobManagerJobMetricGroup jobManagerJobMetricGroup,
Duration slotRequestTimeout,
ShuffleMaster<?> shuffleMaster,
JobMasterPartitionTracker partitionTracker,
ExecutionDeploymentTracker executionDeploymentTracker,
long initializationTimestamp,
ComponentMainThreadExecutor mainThreadExecutor,
FatalErrorHandler fatalErrorHandler,
JobStatusListener jobStatusListener,
Collection<FailureEnricher> failureEnrichers,
BlocklistOperations blocklistOperations)
throws Exception {
ExecutionConfig executionConfig;
if (executionPlan instanceof JobGraph) {
executionConfig =
executionPlan.getSerializedExecutionConfig().deserializeValue(userCodeLoader);
} else if (executionPlan instanceof StreamGraph) {
executionConfig = ((StreamGraph) executionPlan).getExecutionConfig();
} else {
throw new FlinkException(
"Unsupported execution plan " + executionPlan.getClass().getCanonicalName());
}
final SlotPool slotPool =
slotPoolService
.castInto(SlotPool.class)
.orElseThrow(
() ->
new IllegalStateException(
"The AdaptiveBatchScheduler requires a SlotPool."));
final ExecutionSlotAllocatorFactory allocatorFactory =
createExecutionSlotAllocatorFactory(jobMasterConfiguration, slotPool);
final RestartBackoffTimeStrategy restartBackoffTimeStrategy =
RestartBackoffTimeStrategyFactoryLoader.createRestartBackoffTimeStrategyFactory(
executionPlan.getJobConfiguration(),
jobMasterConfiguration,
executionPlan.isCheckpointingEnabled())
.create();
log.info(
"Using restart back off time strategy {} for {} ({}).",
restartBackoffTimeStrategy,
executionPlan.getName(),
executionPlan.getJobID());
final boolean isJobRecoveryEnabled =
jobMasterConfiguration.get(BatchExecutionOptions.JOB_RECOVERY_ENABLED)
&& shuffleMaster.supportsBatchSnapshot();
BatchJobRecoveryHandler jobRecoveryHandler;
if (isJobRecoveryEnabled) {
FileSystemJobEventStore jobEventStore =
new FileSystemJobEventStore(executionPlan.getJobID(), jobMasterConfiguration);
JobEventManager jobEventManager = new JobEventManager(jobEventStore);
jobRecoveryHandler =
new DefaultBatchJobRecoveryHandler(
jobEventManager, jobMasterConfiguration, executionPlan.getJobID());
} else {
jobRecoveryHandler = new DummyBatchJobRecoveryHandler();
}
return createScheduler(
log,
executionPlan,
executionConfig,
ioExecutor,
jobMasterConfiguration,
futureExecutor,
userCodeLoader,
checkpointRecoveryFactory,
new CheckpointsCleaner(),
rpcTimeout,
blobWriter,
jobManagerJobMetricGroup,
shuffleMaster,
partitionTracker,
executionDeploymentTracker,
initializationTimestamp,
mainThreadExecutor,
jobStatusListener,
failureEnrichers,
blocklistOperations,
new DefaultExecutionOperations(),
allocatorFactory,
restartBackoffTimeStrategy,
new ScheduledExecutorServiceAdapter(futureExecutor),
DefaultVertexParallelismAndInputInfosDecider.from(
getDefaultMaxParallelism(jobMasterConfiguration, executionConfig),
executionPlan
.getJobConfiguration()
.get(
BatchExecutionOptionsInternal
.ADAPTIVE_SKEWED_OPTIMIZATION_SKEWED_FACTOR),
executionPlan
.getJobConfiguration()
.get(
BatchExecutionOptionsInternal
.ADAPTIVE_SKEWED_OPTIMIZATION_SKEWED_THRESHOLD)
.getBytes(),
jobMasterConfiguration),
jobRecoveryHandler);
}
@VisibleForTesting
public static AdaptiveBatchScheduler createScheduler(
Logger log,
ExecutionPlan executionPlan,
ExecutionConfig executionConfig,
Executor ioExecutor,
Configuration jobMasterConfiguration,
ScheduledExecutorService futureExecutor,
ClassLoader userCodeLoader,
CheckpointRecoveryFactory checkpointRecoveryFactory,
CheckpointsCleaner checkpointsCleaner,
Duration rpcTimeout,
BlobWriter blobWriter,
JobManagerJobMetricGroup jobManagerJobMetricGroup,
ShuffleMaster<?> shuffleMaster,
JobMasterPartitionTracker partitionTracker,
ExecutionDeploymentTracker executionDeploymentTracker,
long initializationTimestamp,
ComponentMainThreadExecutor mainThreadExecutor,
JobStatusListener jobStatusListener,
Collection<FailureEnricher> failureEnrichers,
BlocklistOperations blocklistOperations,
ExecutionOperations executionOperations,
ExecutionSlotAllocatorFactory allocatorFactory,
RestartBackoffTimeStrategy restartBackoffTimeStrategy,
ScheduledExecutor delayExecutor,
VertexParallelismAndInputInfosDecider vertexParallelismAndInputInfosDecider,
BatchJobRecoveryHandler jobRecoveryHandler)
throws Exception {
checkState(
executionPlan.getJobType() == JobType.BATCH,
"Adaptive batch scheduler only supports batch jobs");
checkAllExchangesAreSupported(executionPlan);
final boolean enableSpeculativeExecution =
jobMasterConfiguration.get(BatchExecutionOptions.SPECULATIVE_ENABLED);
final HybridPartitionDataConsumeConstraint hybridPartitionDataConsumeConstraint =
getOrDecideHybridPartitionDataConsumeConstraint(
jobMasterConfiguration, enableSpeculativeExecution);
final ExecutionGraphFactory executionGraphFactory =
new DefaultExecutionGraphFactory(
jobMasterConfiguration,
userCodeLoader,
executionDeploymentTracker,
futureExecutor,
ioExecutor,
rpcTimeout,
jobManagerJobMetricGroup,
blobWriter,
shuffleMaster,
partitionTracker,
true,
createExecutionJobVertexFactory(enableSpeculativeExecution),
hybridPartitionDataConsumeConstraint == ONLY_FINISHED_PRODUCERS);
final SchedulingStrategyFactory schedulingStrategyFactory =
new VertexwiseSchedulingStrategy.Factory(
loadInputConsumableDeciderFactory(hybridPartitionDataConsumeConstraint));
int defaultMaxParallelism =
getDefaultMaxParallelism(jobMasterConfiguration, executionConfig);
AdaptiveExecutionHandler adaptiveExecutionHandler =
AdaptiveExecutionHandlerFactory.create(
executionPlan,
jobRecoveryHandler instanceof DefaultBatchJobRecoveryHandler,
userCodeLoader,
futureExecutor);
return new AdaptiveBatchScheduler(
log,
adaptiveExecutionHandler,
ioExecutor,
jobMasterConfiguration,
componentMainThreadExecutor -> {},
delayExecutor,
userCodeLoader,
checkpointsCleaner,
checkpointRecoveryFactory,
jobManagerJobMetricGroup,
schedulingStrategyFactory,
FailoverStrategyFactoryLoader.loadFailoverStrategyFactory(jobMasterConfiguration),
restartBackoffTimeStrategy,
executionOperations,
new ExecutionVertexVersioner(),
allocatorFactory,
initializationTimestamp,
mainThreadExecutor,
jobStatusListener,
failureEnrichers,
executionGraphFactory,
shuffleMaster,
rpcTimeout,
vertexParallelismAndInputInfosDecider,
defaultMaxParallelism,
blocklistOperations,
hybridPartitionDataConsumeConstraint,
jobRecoveryHandler,
adaptiveExecutionHandler.createExecutionPlanSchedulingContext(
defaultMaxParallelism));
}
public static InputConsumableDecider.Factory loadInputConsumableDeciderFactory(
HybridPartitionDataConsumeConstraint hybridPartitionDataConsumeConstraint) {
switch (hybridPartitionDataConsumeConstraint) {
case ALL_PRODUCERS_FINISHED:
return AllFinishedInputConsumableDecider.Factory.INSTANCE;
case ONLY_FINISHED_PRODUCERS:
return PartialFinishedInputConsumableDecider.Factory.INSTANCE;
case UNFINISHED_PRODUCERS:
return DefaultInputConsumableDecider.Factory.INSTANCE;
default:
throw new IllegalStateException(
hybridPartitionDataConsumeConstraint + "is not supported.");
}
}
public static HybridPartitionDataConsumeConstraint
getOrDecideHybridPartitionDataConsumeConstraint(
Configuration configuration, boolean enableSpeculativeExecution) {
final HybridPartitionDataConsumeConstraint hybridPartitionDataConsumeConstraint =
configuration
.getOptional(JobManagerOptions.HYBRID_PARTITION_DATA_CONSUME_CONSTRAINT)
.orElseGet(
() -> {
HybridPartitionDataConsumeConstraint defaultConstraint =
enableSpeculativeExecution
? ONLY_FINISHED_PRODUCERS
: UNFINISHED_PRODUCERS;
LOG.info(
"Set {} to {} as it is not configured",
JobManagerOptions
.HYBRID_PARTITION_DATA_CONSUME_CONSTRAINT
.key(),
defaultConstraint.name());
return defaultConstraint;
});
if (enableSpeculativeExecution) {
Preconditions.checkState(
hybridPartitionDataConsumeConstraint != UNFINISHED_PRODUCERS,
"For speculative execution, only supports consume finished partition now.");
}
return hybridPartitionDataConsumeConstraint;
}
private static ExecutionSlotAllocatorFactory createExecutionSlotAllocatorFactory(
Configuration configuration, SlotPool slotPool) {
final SlotSelectionStrategy slotSelectionStrategy =
SlotSelectionStrategyUtils.selectSlotSelectionStrategy(
JobType.BATCH, configuration);
final PhysicalSlotProvider physicalSlotProvider =
new PhysicalSlotProviderImpl(slotSelectionStrategy, slotPool);
return new SimpleExecutionSlotAllocator.Factory(physicalSlotProvider, false);
}
private static ExecutionJobVertex.Factory createExecutionJobVertexFactory(
boolean enableSpeculativeExecution) {
if (enableSpeculativeExecution) {
return new SpeculativeExecutionJobVertex.Factory();
} else {
return new ExecutionJobVertex.Factory();
}
}
private static void checkAllExchangesAreSupported(final ExecutionPlan executionPlan) {
String errMsg =
String.format(
"At the moment, adaptive batch scheduler requires batch workloads "
+ "to be executed with types of all edges being BLOCKING or HYBRID_FULL/HYBRID_SELECTIVE. "
+ "To do that, you need to configure '%s' to '%s' or '%s/%s'. ",
ExecutionOptions.BATCH_SHUFFLE_MODE.key(),
BatchShuffleMode.ALL_EXCHANGES_BLOCKING,
BatchShuffleMode.ALL_EXCHANGES_HYBRID_FULL,
BatchShuffleMode.ALL_EXCHANGES_HYBRID_SELECTIVE);
if (executionPlan instanceof JobGraph) {
for (JobVertex jobVertex : ((JobGraph) executionPlan).getVertices()) {
for (IntermediateDataSet dataSet : jobVertex.getProducedDataSets()) {
checkState(
dataSet.getResultType().isBlockingOrBlockingPersistentResultPartition()
|| dataSet.getResultType().isHybridResultPartition(),
errMsg);
}
}
} else {
for (StreamNode streamNode : ((StreamGraph) executionPlan).getStreamNodes()) {
for (StreamEdge edge : streamNode.getOutEdges()) {
checkState(
!edge.getExchangeMode().equals(StreamExchangeMode.PIPELINED), errMsg);
}
}
}
}
static int getDefaultMaxParallelism(
Configuration configuration, ExecutionConfig executionConfig) {
return configuration
.getOptional(BatchExecutionOptions.ADAPTIVE_AUTO_PARALLELISM_MAX_PARALLELISM)
.orElse(
executionConfig.getParallelism() == ExecutionConfig.PARALLELISM_DEFAULT
? BatchExecutionOptions.ADAPTIVE_AUTO_PARALLELISM_MAX_PARALLELISM
.defaultValue()
: executionConfig.getParallelism());
}
@Override
public JobManagerOptions.SchedulerType getSchedulerType() {
return JobManagerOptions.SchedulerType.AdaptiveBatch;
}
}
|
AdaptiveBatchSchedulerFactory
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/impl/PojoProduceProxyInterceptEndpointTest.java
|
{
"start": 3168,
"end": 3251
}
|
interface ____ {
String echo(String word);
}
public static
|
EchoService
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/component/log/ConsumingAppender.java
|
{
"start": 1469,
"end": 3403
}
|
class ____ extends AbstractAppender {
private final Consumer<LogEvent> consumer;
public ConsumingAppender(String name, Consumer<LogEvent> consumer) {
this(name, PatternLayout.SIMPLE_CONVERSION_PATTERN, consumer);
}
public ConsumingAppender(String name, String pattern, Consumer<LogEvent> consumer) {
super(name, null, PatternLayout.newBuilder().withPattern(pattern).build(), true,
Property.EMPTY_ARRAY);
this.consumer = consumer;
}
@Override
public void append(LogEvent event) {
this.consumer.accept(event);
}
// *******************
// Helpers
// *******************
public static Appender newAppender(String loggerName, String appenderName, Level level, Consumer<LogEvent> consumer) {
return newAppender(loggerName, appenderName, PatternLayout.SIMPLE_CONVERSION_PATTERN, level, consumer);
}
public static Appender newAppender(
String loggerName, String appenderName, String patter, Level level, Consumer<LogEvent> consumer) {
final LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
final Configuration config = ctx.getConfiguration();
config.removeLogger(loggerName);
ConsumingAppender appender = new ConsumingAppender(appenderName, patter, consumer);
appender.start();
LoggerConfig loggerConfig = LoggerConfig.newBuilder()
.withIncludeLocation("true")
.withLoggerName(loggerName)
.withLevel(level)
.withAdditivity(true)
.withConfig(config)
.withRefs(new AppenderRef[] { AppenderRef.createAppenderRef(appenderName, null, null) })
.build();
loggerConfig.addAppender(appender, null, null);
config.addLogger(loggerName, loggerConfig);
ctx.updateLoggers();
return appender;
}
}
|
ConsumingAppender
|
java
|
assertj__assertj-core
|
assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/future/CompletableFutureAssert_isCompleted_Test.java
|
{
"start": 1155,
"end": 2780
}
|
class ____ {
@Test
void should_pass_if_completable_future_is_completed() {
// GIVEN
CompletableFuture<String> future = completedFuture("done");
// THEN
then(future).isCompleted();
}
@Test
void should_fail_when_completable_future_is_null() {
// GIVEN
CompletableFuture<String> future = null;
// WHEN
var assertionError = expectAssertionError(() -> assertThat(future).isCompleted());
// THEN
then(assertionError).hasMessage(actualIsNull());
}
@Test
void should_fail_if_completable_future_is_incomplete() {
// GIVEN
CompletableFuture<String> future = new CompletableFuture<>();
// WHEN
var assertionError = expectAssertionError(() -> assertThat(future).isCompleted());
// THEN
then(assertionError).hasMessage(shouldBeCompleted(future).create());
}
@Test
void should_fail_if_completable_future_has_failed() {
// GIVEN
CompletableFuture<String> future = new CompletableFuture<>();
future.completeExceptionally(new RuntimeException("boom!"));
// WHEN
var assertionError = expectAssertionError(() -> assertThat(future).isCompleted());
// THEN
then(assertionError).hasMessage(shouldBeCompleted(future).create());
}
@Test
void should_fail_if_completable_future_was_cancelled() {
// GIVEN
CompletableFuture<String> future = new CompletableFuture<>();
future.cancel(true);
// WHEN
var assertionError = expectAssertionError(() -> assertThat(future).isCompleted());
// THEN
then(assertionError).hasMessage(shouldBeCompleted(future).create());
}
}
|
CompletableFutureAssert_isCompleted_Test
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/SetBindingRequestFulfillmentTest.java
|
{
"start": 6994,
"end": 7442
}
|
class ____ {",
" @Provides @IntoSet static Object parentObject() {",
" return \"parent object\";",
" }",
"}");
JavaFileObject child =
JavaFileObjects.forSourceLines(
"test.Child",
"package test;",
"",
"import dagger.Subcomponent;",
"import java.util.Set;",
"",
"@Subcomponent",
"
|
ParentModule
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshottableDirListing.java
|
{
"start": 1829,
"end": 9101
}
|
class ____ {
static final long seed = 0;
static final short REPLICATION = 3;
static final long BLOCKSIZE = 1024;
private final Path root = new Path("/");
private final Path dir1 = new Path("/TestSnapshot1");
private final Path dir2 = new Path("/TestSnapshot2");
Configuration conf;
MiniDFSCluster cluster;
FSNamesystem fsn;
DistributedFileSystem hdfs;
@BeforeEach
public void setUp() throws Exception {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
hdfs.mkdirs(dir1);
hdfs.mkdirs(dir2);
}
@AfterEach
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
/**
* Test listing all the snapshottable directories
*/
@Test
@Timeout(value = 60)
public void testListSnapshottableDir() throws Exception {
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
// Initially there is no snapshottable directories in the system
SnapshottableDirectoryStatus[] dirs = hdfs.getSnapshottableDirListing();
assertNull(dirs);
// Make root as snapshottable
final Path root = new Path("/");
hdfs.allowSnapshot(root);
dirs = hdfs.getSnapshottableDirListing();
assertEquals(1, dirs.length);
assertEquals("", dirs[0].getDirStatus().getLocalName());
assertEquals(root, dirs[0].getFullPath());
// Make root non-snaphsottable
hdfs.disallowSnapshot(root);
dirs = hdfs.getSnapshottableDirListing();
assertNull(dirs);
// Make dir1 as snapshottable
hdfs.allowSnapshot(dir1);
dirs = hdfs.getSnapshottableDirListing();
assertEquals(1, dirs.length);
assertEquals(dir1.getName(), dirs[0].getDirStatus().getLocalName());
assertEquals(dir1, dirs[0].getFullPath());
// There is no snapshot for dir1 yet
assertEquals(0, dirs[0].getSnapshotNumber());
// Make dir2 as snapshottable
hdfs.allowSnapshot(dir2);
dirs = hdfs.getSnapshottableDirListing();
assertEquals(2, dirs.length);
assertEquals(dir1.getName(), dirs[0].getDirStatus().getLocalName());
assertEquals(dir1, dirs[0].getFullPath());
assertEquals(dir2.getName(), dirs[1].getDirStatus().getLocalName());
assertEquals(dir2, dirs[1].getFullPath());
// There is no snapshot for dir2 yet
assertEquals(0, dirs[1].getSnapshotNumber());
// Create dir3
final Path dir3 = new Path("/TestSnapshot3");
hdfs.mkdirs(dir3);
// Rename dir3 to dir2
hdfs.rename(dir3, dir2, Rename.OVERWRITE);
// Now we only have one snapshottable dir: dir1
dirs = hdfs.getSnapshottableDirListing();
assertEquals(1, dirs.length);
assertEquals(dir1, dirs[0].getFullPath());
// Make dir2 snapshottable again
hdfs.allowSnapshot(dir2);
// Create a snapshot for dir2
hdfs.createSnapshot(dir2, "s1");
hdfs.createSnapshot(dir2, "s2");
dirs = hdfs.getSnapshottableDirListing();
// There are now 2 snapshots for dir2
assertEquals(dir2, dirs[1].getFullPath());
assertEquals(2, dirs[1].getSnapshotNumber());
// Create sub-dirs under dir1
Path sub1 = new Path(dir1, "sub1");
Path file1 = new Path(sub1, "file1");
Path sub2 = new Path(dir1, "sub2");
Path file2 = new Path(sub2, "file2");
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);
// Make sub1 and sub2 snapshottable
hdfs.allowSnapshot(sub1);
hdfs.allowSnapshot(sub2);
dirs = hdfs.getSnapshottableDirListing();
assertEquals(4, dirs.length);
assertEquals(dir1, dirs[0].getFullPath());
assertEquals(dir2, dirs[1].getFullPath());
assertEquals(sub1, dirs[2].getFullPath());
assertEquals(sub2, dirs[3].getFullPath());
// reset sub1
hdfs.disallowSnapshot(sub1);
dirs = hdfs.getSnapshottableDirListing();
assertEquals(3, dirs.length);
assertEquals(dir1, dirs[0].getFullPath());
assertEquals(dir2, dirs[1].getFullPath());
assertEquals(sub2, dirs[2].getFullPath());
// Remove dir1, both dir1 and sub2 will be removed
hdfs.delete(dir1, true);
dirs = hdfs.getSnapshottableDirListing();
assertEquals(1, dirs.length);
assertEquals(dir2.getName(), dirs[0].getDirStatus().getLocalName());
assertEquals(dir2, dirs[0].getFullPath());
}
/**
* Test the listing with different user names to make sure only directories
* that are owned by the user are listed.
*/
@Test
@Timeout(value = 60)
public void testListWithDifferentUser() throws Exception {
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
// first make dir1 and dir2 snapshottable
hdfs.allowSnapshot(dir1);
hdfs.allowSnapshot(dir2);
hdfs.setPermission(root, FsPermission.valueOf("-rwxrwxrwx"));
// create two dirs and make them snapshottable under the name of user1
UserGroupInformation ugi1 = UserGroupInformation.createUserForTesting(
"user1", new String[] { "group1" });
DistributedFileSystem fs1 = (DistributedFileSystem) DFSTestUtil
.getFileSystemAs(ugi1, conf);
Path dir1_user1 = new Path("/dir1_user1");
Path dir2_user1 = new Path("/dir2_user1");
fs1.mkdirs(dir1_user1);
fs1.mkdirs(dir2_user1);
hdfs.allowSnapshot(dir1_user1);
hdfs.allowSnapshot(dir2_user1);
// user2
UserGroupInformation ugi2 = UserGroupInformation.createUserForTesting(
"user2", new String[] { "group2" });
DistributedFileSystem fs2 = (DistributedFileSystem) DFSTestUtil
.getFileSystemAs(ugi2, conf);
Path dir_user2 = new Path("/dir_user2");
Path subdir_user2 = new Path(dir_user2, "subdir");
fs2.mkdirs(dir_user2);
fs2.mkdirs(subdir_user2);
hdfs.allowSnapshot(dir_user2);
hdfs.allowSnapshot(subdir_user2);
// super user
String supergroup = conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY,
DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
UserGroupInformation superUgi = UserGroupInformation.createUserForTesting(
"superuser", new String[] { supergroup });
DistributedFileSystem fs3 = (DistributedFileSystem) DFSTestUtil
.getFileSystemAs(superUgi, conf);
// list the snapshottable dirs for superuser
SnapshottableDirectoryStatus[] dirs = fs3.getSnapshottableDirListing();
// 6 snapshottable dirs: dir1, dir2, dir1_user1, dir2_user1, dir_user2, and
// subdir_user2
assertEquals(6, dirs.length);
// list the snapshottable dirs for user1
dirs = fs1.getSnapshottableDirListing();
// 2 dirs owned by user1: dir1_user1 and dir2_user1
assertEquals(2, dirs.length);
assertEquals(dir1_user1, dirs[0].getFullPath());
assertEquals(dir2_user1, dirs[1].getFullPath());
// list the snapshottable dirs for user2
dirs = fs2.getSnapshottableDirListing();
// 2 dirs owned by user2: dir_user2 and subdir_user2
assertEquals(2, dirs.length);
assertEquals(dir_user2, dirs[0].getFullPath());
assertEquals(subdir_user2, dirs[1].getFullPath());
}
}
|
TestSnapshottableDirListing
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/annotations/comment/CommentTest.java
|
{
"start": 1850,
"end": 2129
}
|
class ____ {
@Id
@GeneratedValue
@Comment("I am id")
private Long id;
@Comment("I am name")
@jakarta.persistence.Column(length = 50)
private String name;
@ManyToOne
@JoinColumn(name = "other")
@Comment("I am other")
private TestEntity other;
}
}
|
TestEntity
|
java
|
apache__camel
|
components/camel-tracing/src/main/java/org/apache/camel/tracing/Tracer.java
|
{
"start": 14958,
"end": 15658
}
|
class ____ implements LogListener {
@Override
public String onLog(Exchange exchange, CamelLogger camelLogger, String message) {
try {
SpanAdapter span = ActiveSpanManager.getSpan(exchange);
if (span != null) {
Map<String, String> fields = new HashMap<>();
fields.put("message", message);
span.log(fields);
}
} catch (Exception t) {
// This exception is ignored
LOG.warn("Tracing: Failed to capture tracing data. This exception is ignored.", t);
}
return message;
}
}
}
|
TracingLogListener
|
java
|
mockito__mockito
|
mockito-core/src/test/java/org/mockitousage/PlaygroundTest.java
|
{
"start": 2485,
"end": 2833
}
|
interface ____ {
// <T extends Foo & Colored> T getColoredPoint();
// }
//
// @Test
// public void testname() throws Exception {
// when(mock.get()).then(returnArgument());
//
// Bar mock = mock(Bar.class);
// when(mock.getColoredPoint()).thenReturn(new Foo());
// }
}
|
Bar
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/search/vectors/QueryProfilerProvider.java
|
{
"start": 874,
"end": 1115
}
|
interface ____ {
/**
* Store the profiling information in the {@link QueryProfiler}
* @param queryProfiler an instance of {@link KnnFloatVectorField}.
*/
void profile(QueryProfiler queryProfiler);
}
|
QueryProfilerProvider
|
java
|
spring-projects__spring-framework
|
spring-web/src/main/java/org/springframework/http/client/reactive/ReactorClientHttpRequest.java
|
{
"start": 1656,
"end": 4883
}
|
class ____ extends AbstractClientHttpRequest implements ZeroCopyHttpOutputMessage {
private final HttpMethod httpMethod;
private final URI uri;
private final HttpClientRequest request;
private final NettyOutbound outbound;
private final NettyDataBufferFactory bufferFactory;
public ReactorClientHttpRequest(HttpMethod method, URI uri, HttpClientRequest request, NettyOutbound outbound) {
this.httpMethod = method;
this.uri = uri;
this.request = request;
this.outbound = outbound;
this.bufferFactory = new NettyDataBufferFactory(outbound.alloc());
}
@Override
public HttpMethod getMethod() {
return this.httpMethod;
}
@Override
public URI getURI() {
return this.uri;
}
@Override
public DataBufferFactory bufferFactory() {
return this.bufferFactory;
}
@Override
@SuppressWarnings("unchecked")
public <T> T getNativeRequest() {
return (T) this.request;
}
@Override
public Mono<Void> writeWith(Publisher<? extends DataBuffer> body) {
return doCommit(() -> {
// Send as Mono if possible as an optimization hint to Reactor Netty
if (body instanceof Mono) {
Mono<ByteBuf> byteBufMono = Mono.from(body).map(NettyDataBufferFactory::toByteBuf);
return this.outbound.send(byteBufMono).then();
}
else {
Flux<ByteBuf> byteBufFlux = Flux.from(body).map(NettyDataBufferFactory::toByteBuf);
return this.outbound.send(byteBufFlux).then();
}
});
}
@Override
public Mono<Void> writeAndFlushWith(Publisher<? extends Publisher<? extends DataBuffer>> body) {
Publisher<Publisher<ByteBuf>> byteBufs = Flux.from(body).map(ReactorClientHttpRequest::toByteBufs);
return doCommit(() -> this.outbound.sendGroups(byteBufs).then());
}
private static Publisher<ByteBuf> toByteBufs(Publisher<? extends DataBuffer> dataBuffers) {
return Flux.from(dataBuffers).map(NettyDataBufferFactory::toByteBuf);
}
@Override
public Mono<Void> writeWith(Path file, long position, long count) {
return doCommit(() -> this.outbound.sendFile(file, position, count).then());
}
@Override
public Mono<Void> setComplete() {
// NettyOutbound#then() expects a body
// Use null as the write action for a more optimal send
return doCommit(null);
}
@Override
protected void applyHeaders() {
getHeaders().forEach((key, value) -> this.request.requestHeaders().set(key, value));
}
@Override
protected void applyCookies() {
getCookies().values().forEach(values -> values.forEach(value -> {
DefaultCookie cookie = new DefaultCookie(value.getName(), value.getValue());
this.request.addCookie(cookie);
}));
}
/**
* Saves the {@link #getAttributes() request attributes} to the
* {@link reactor.netty.channel.ChannelOperations#channel() channel} as a single map
* attribute under the key {@link ReactorClientHttpConnector#ATTRIBUTES_KEY}.
*/
@Override
protected void applyAttributes() {
if (!getAttributes().isEmpty()) {
((ChannelOperations<?, ?>) this.request).channel()
.attr(ReactorClientHttpConnector.ATTRIBUTES_KEY).set(getAttributes());
}
}
@Override
protected HttpHeaders initReadOnlyHeaders() {
return HttpHeaders.readOnlyHttpHeaders(new Netty4HeadersAdapter(this.request.requestHeaders()));
}
}
|
ReactorClientHttpRequest
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/util/DummyMXBean.java
|
{
"start": 865,
"end": 912
}
|
interface ____ test JMX registration.
*/
public
|
to
|
java
|
google__dagger
|
javatests/artifacts/dagger/build-tests/src/test/java/buildtests/TransitiveSubcomponentModulesTest.java
|
{
"start": 3932,
"end": 6692
}
|
class ____ {",
" public abstract int getInt();",
"}"));
BuildResult result;
switch (transitiveDependencyType) {
case "implementation":
result = runner.buildAndFail();
assertThat(result.getOutput()).contains("Task :app:compileJava FAILED");
String expectedErrorMsg =
"error: ComponentProcessingStep was unable to process 'app.MyComponent' because"
+ " 'library2.TransitiveModule' could not be resolved."
+ "\n "
+ "\n Dependency trace:"
+ "\n => element (INTERFACE): library1.IncludesTransitiveModule"
+ "\n => annotation type: dagger.Module"
+ "\n => annotation: "
+ "@dagger.Module(includes={library2.TransitiveModule}, subcomponents={})"
+ "\n => annotation value (TYPE_ARRAY): includes={library2.TransitiveModule}"
+ "\n => annotation value (TYPE): includes=library2.TransitiveModule";
assertThat(result.getOutput()).contains(expectedErrorMsg);
break;
case "api":
result = runner.build();
assertThat(result.task(":app:assemble").getOutcome()).isEqualTo(SUCCESS);
break;
}
}
private GradleRunner setupRunner(GradleFile subcomponent) throws IOException {
File projectDir = folder.getRoot();
GradleModule.create(projectDir)
.addSettingsFile("include 'app'", "include 'library1'", "include 'library2'")
.addBuildFile(
"buildscript {",
" ext {",
String.format("dagger_version = \"%s\"", System.getProperty("dagger_version")),
" }",
"}",
"",
"allprojects {",
" repositories {",
" mavenCentral()",
" mavenLocal()",
" }",
"}");
GradleModule.create(projectDir, "app")
.addBuildFile(
"plugins {",
" id 'java'",
" id 'application'",
"}",
"tasks.withType(JavaCompile) {",
" options.compilerArgs += '-Adagger.experimentalDaggerErrorMessages=ENABLED'",
"}",
"dependencies {",
" implementation project(':library1')",
" implementation \"com.google.dagger:dagger:$dagger_version\"",
" annotationProcessor \"com.google.dagger:dagger-compiler:$dagger_version\"",
"}")
.addSrcFile(
"MyComponent.java",
"package app;",
"",
"import dagger.Component;",
"import library1.MySubcomponent;",
"",
"@Component",
"public
|
MySubcomponent
|
java
|
spring-projects__spring-boot
|
loader/spring-boot-loader/src/test/java/org/springframework/boot/loader/net/protocol/jar/UrlJarEntryTests.java
|
{
"start": 1046,
"end": 1690
}
|
class ____ {
@Test
void ofWhenEntryIsNullReturnsNull() {
assertThat(UrlJarEntry.of(null, null)).isNull();
}
@Test
void ofReturnsUrlJarEntry() {
JarEntry entry = new JarEntry("test");
assertThat(UrlJarEntry.of(entry, null)).isNotNull();
}
@Test
void getAttributesDelegatesToUrlJarManifest() throws Exception {
JarEntry entry = new JarEntry("test");
UrlJarManifest manifest = mock(UrlJarManifest.class);
Attributes attributes = mock(Attributes.class);
given(manifest.getEntryAttributes(any())).willReturn(attributes);
assertThat(UrlJarEntry.of(entry, manifest).getAttributes()).isSameAs(attributes);
}
}
|
UrlJarEntryTests
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/annotation/method/configuration/NamespaceGlobalMethodSecurityTests.java
|
{
"start": 16078,
"end": 16256
}
|
class ____ extends GlobalMethodSecurityConfiguration {
}
@Configuration
@EnableGlobalMethodSecurity(prePostEnabled = true)
public static
|
DefaultOrderExtendsMethodSecurityConfig
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/mysql/alterTable/MySqlAlterTableTest10.java
|
{
"start": 986,
"end": 1962
}
|
class ____ extends TestCase {
public void test_alter_first() throws Exception {
String sql = "ALTER TABLE t1 CHANGE a b INTEGER;";
MySqlStatementParser parser = new MySqlStatementParser(sql);
SQLStatement stmt = parser.parseStatementList().get(0);
parser.match(Token.EOF);
MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor();
stmt.accept(visitor);
// System.out.println("Tables : " + visitor.getTables());
// System.out.println("fields : " + visitor.getColumns());
// System.out.println("coditions : " + visitor.getConditions());
// System.out.println("orderBy : " + visitor.getOrderByColumns());
String output = SQLUtils.toMySqlString(stmt);
assertEquals("ALTER TABLE t1" +
"\n\tCHANGE COLUMN a b INTEGER;", output);
assertEquals(1, visitor.getTables().size());
assertEquals(1, visitor.getColumns().size());
}
}
|
MySqlAlterTableTest10
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/issue_1200/Issue1281.java
|
{
"start": 575,
"end": 625
}
|
class ____<T> {
public T value;
}
}
|
Result
|
java
|
netty__netty
|
codec-http3/src/main/java/io/netty/handler/codec/http3/Http3ClientConnectionHandler.java
|
{
"start": 914,
"end": 4263
}
|
class ____ extends Http3ConnectionHandler {
private final LongFunction<ChannelHandler> pushStreamHandlerFactory;
/**
* Create a new instance.
*/
public Http3ClientConnectionHandler() {
this(null, null, null, null, true);
}
/**
* Create a new instance.
*
* @param inboundControlStreamHandler the {@link ChannelHandler} which will be notified about
* {@link Http3RequestStreamFrame}s or {@code null} if the user is not
* interested in these.
* @param pushStreamHandlerFactory the {@link LongFunction} that will provide a custom
* {@link ChannelHandler} for push streams {@code null} if no special
* handling should be done. When present, push ID will be passed as an
* argument to the {@link LongFunction}.
* @param unknownInboundStreamHandlerFactory the {@link LongFunction} that will provide a custom
* {@link ChannelHandler} for unknown inbound stream types or
* {@code null} if no special handling should be done.
* @param localSettings the local {@link Http3SettingsFrame} that should be sent to the
* remote peer or {@code null} if the default settings should be used.
* @param disableQpackDynamicTable If QPACK dynamic table should be disabled.
*/
public Http3ClientConnectionHandler(@Nullable ChannelHandler inboundControlStreamHandler,
@Nullable LongFunction<ChannelHandler> pushStreamHandlerFactory,
@Nullable LongFunction<ChannelHandler> unknownInboundStreamHandlerFactory,
@Nullable Http3SettingsFrame localSettings, boolean disableQpackDynamicTable) {
super(false, inboundControlStreamHandler, unknownInboundStreamHandlerFactory, localSettings,
disableQpackDynamicTable);
this.pushStreamHandlerFactory = pushStreamHandlerFactory;
}
@Override
void initBidirectionalStream(ChannelHandlerContext ctx, QuicStreamChannel channel) {
// See https://tools.ietf.org/html/draft-ietf-quic-http-32#section-6.1
Http3CodecUtils.connectionError(ctx, Http3ErrorCode.H3_STREAM_CREATION_ERROR,
"Server initiated bidirectional streams are not allowed", true);
}
@Override
void initUnidirectionalStream(ChannelHandlerContext ctx, QuicStreamChannel streamChannel) {
final long maxTableCapacity = maxTableCapacity();
streamChannel.pipeline().addLast(
new Http3UnidirectionalStreamInboundClientHandler(codecFactory,
localControlStreamHandler, remoteControlStreamHandler,
unknownInboundStreamHandlerFactory, pushStreamHandlerFactory,
() -> new QpackEncoderHandler(maxTableCapacity, qpackDecoder),
() -> new QpackDecoderHandler(qpackEncoder)));
}
}
|
Http3ClientConnectionHandler
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/ser/filter/TestSimpleSerializationIgnore.java
|
{
"start": 1314,
"end": 1625
}
|
class ____
extends BaseClassIgnore
{
// Annotations to disable ignorance, in sub-class; note that
// we must still get "JsonProperty" fro super class
@Override
@JsonIgnore(false)
public int x() { return 3; }
}
@JsonIgnoreType
static
|
SubClassNonIgnore
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/token/TestZKDelegationTokenSecretManagerImpl.java
|
{
"start": 2317,
"end": 9360
}
|
class ____
extends TestZKDelegationTokenSecretManager {
private static final Logger LOG =
LoggerFactory.getLogger(TestZKDelegationTokenSecretManagerImpl.class);
@Override
@AfterEach
public void tearDown() throws Exception {
super.tearDown();
// Prevent a STOPPED Curator from leaking into the next test.
ZKDelegationTokenSecretManager.setCurator(null);
}
@SuppressWarnings("unchecked")
@Test
public void testMultiNodeOperationWithoutWatch() throws Exception {
String connectString = zkServer.getConnectString();
Configuration conf = getSecretConf(connectString);
// disable watch
conf.setBoolean(ZK_DTSM_TOKEN_WATCHER_ENABLED, false);
conf.setInt(ZK_DTSM_ROUTER_TOKEN_SYNC_INTERVAL, 3);
for (int i = 0; i < TEST_RETRIES; i++) {
ZKDelegationTokenSecretManagerImpl dtsm1 =
new ZKDelegationTokenSecretManagerImpl(conf);
ZKDelegationTokenSecretManagerImpl dtsm2 =
new ZKDelegationTokenSecretManagerImpl(conf);
DelegationTokenManager tm1, tm2;
tm1 = new DelegationTokenManager(conf, new Text("bla"));
tm1.setExternalDelegationTokenSecretManager(dtsm1);
tm2 = new DelegationTokenManager(conf, new Text("bla"));
tm2.setExternalDelegationTokenSecretManager(dtsm2);
// common token operation without watchers should still be working
Token<DelegationTokenIdentifier> token =
(Token<DelegationTokenIdentifier>) tm1.createToken(
UserGroupInformation.getCurrentUser(), "foo");
assertNotNull(token);
tm2.verifyToken(token);
tm2.renewToken(token, "foo");
tm1.verifyToken(token);
tm1.cancelToken(token, "foo");
try {
verifyTokenFail(tm2, token);
fail("Expected InvalidToken");
} catch (SecretManager.InvalidToken it) {
// Ignore
}
token = (Token<DelegationTokenIdentifier>) tm2.createToken(
UserGroupInformation.getCurrentUser(), "bar");
assertNotNull(token);
tm1.verifyToken(token);
tm1.renewToken(token, "bar");
tm2.verifyToken(token);
tm2.cancelToken(token, "bar");
try {
verifyTokenFail(tm1, token);
fail("Expected InvalidToken");
} catch (SecretManager.InvalidToken it) {
// Ignore
}
dtsm1.stopThreads();
dtsm2.stopThreads();
verifyDestroy(tm1, conf);
verifyDestroy(tm2, conf);
}
}
@Test
public void testMultiNodeTokenRemovalShortSyncWithoutWatch()
throws Exception {
String connectString = zkServer.getConnectString();
Configuration conf = getSecretConf(connectString);
// disable watch
conf.setBoolean(ZK_DTSM_TOKEN_WATCHER_ENABLED, false);
// make sync quick
conf.setInt(ZK_DTSM_ROUTER_TOKEN_SYNC_INTERVAL, 3);
// set the renewal window and removal interval to be a
// short time to trigger the background cleanup
conf.setInt(RENEW_INTERVAL, 10);
conf.setInt(REMOVAL_SCAN_INTERVAL, 10);
for (int i = 0; i < TEST_RETRIES; i++) {
ZKDelegationTokenSecretManagerImpl dtsm1 =
new ZKDelegationTokenSecretManagerImpl(conf);
ZKDelegationTokenSecretManagerImpl dtsm2 =
new ZKDelegationTokenSecretManagerImpl(conf);
DelegationTokenManager tm1, tm2;
tm1 = new DelegationTokenManager(conf, new Text("bla"));
tm1.setExternalDelegationTokenSecretManager(dtsm1);
tm2 = new DelegationTokenManager(conf, new Text("bla"));
tm2.setExternalDelegationTokenSecretManager(dtsm2);
// time: X
// token expiry time:
// tm1: X + 10
// tm2: X + 10
Token<DelegationTokenIdentifier> token =
(Token<DelegationTokenIdentifier>) tm1.createToken(
UserGroupInformation.getCurrentUser(), "foo");
assertNotNull(token);
tm2.verifyToken(token);
// time: X + 9
// token expiry time:
// tm1: X + 10
// tm2: X + 19
Thread.sleep(9 * 1000);
tm2.renewToken(token, "foo");
tm1.verifyToken(token);
// time: X + 13
// token expiry time: (sync happened)
// tm1: X + 19
// tm2: X + 19
Thread.sleep(4 * 1000);
tm1.verifyToken(token);
tm2.verifyToken(token);
dtsm1.stopThreads();
dtsm2.stopThreads();
verifyDestroy(tm1, conf);
verifyDestroy(tm2, conf);
}
}
// This is very unlikely to happen in real case, but worth putting
// the case out
@Test
public void testMultiNodeTokenRemovalLongSyncWithoutWatch()
throws Exception {
String connectString = zkServer.getConnectString();
Configuration conf = getSecretConf(connectString);
// disable watch
conf.setBoolean(ZK_DTSM_TOKEN_WATCHER_ENABLED, false);
// make sync quick
conf.setInt(ZK_DTSM_ROUTER_TOKEN_SYNC_INTERVAL, 20);
// set the renewal window and removal interval to be a
// short time to trigger the background cleanup
conf.setInt(RENEW_INTERVAL, 10);
conf.setInt(REMOVAL_SCAN_INTERVAL, 10);
for (int i = 0; i < TEST_RETRIES; i++) {
ZKDelegationTokenSecretManagerImpl dtsm1 =
new ZKDelegationTokenSecretManagerImpl(conf);
ZKDelegationTokenSecretManagerImpl dtsm2 =
new ZKDelegationTokenSecretManagerImpl(conf);
ZKDelegationTokenSecretManagerImpl dtsm3 =
new ZKDelegationTokenSecretManagerImpl(conf);
DelegationTokenManager tm1, tm2, tm3;
tm1 = new DelegationTokenManager(conf, new Text("bla"));
tm1.setExternalDelegationTokenSecretManager(dtsm1);
tm2 = new DelegationTokenManager(conf, new Text("bla"));
tm2.setExternalDelegationTokenSecretManager(dtsm2);
tm3 = new DelegationTokenManager(conf, new Text("bla"));
tm3.setExternalDelegationTokenSecretManager(dtsm3);
// time: X
// token expiry time:
// tm1: X + 10
// tm2: X + 10
// tm3: No token due to no sync
Token<DelegationTokenIdentifier> token =
(Token<DelegationTokenIdentifier>) tm1.createToken(
UserGroupInformation.getCurrentUser(), "foo");
assertNotNull(token);
tm2.verifyToken(token);
// time: X + 9
// token expiry time:
// tm1: X + 10
// tm2: X + 19
// tm3: No token due to no sync
Thread.sleep(9 * 1000);
long renewalTime = tm2.renewToken(token, "foo");
LOG.info("Renew for token {} at current time {} renewal time {}",
token.getIdentifier(), Time.formatTime(Time.now()),
Time.formatTime(renewalTime));
tm1.verifyToken(token);
// time: X + 13
// token expiry time: (sync din't happen)
// tm1: X + 10
// tm2: X + 19
// tm3: X + 19 due to fetch from zk
Thread.sleep(4 * 1000);
tm2.verifyToken(token);
tm3.verifyToken(token);
dtsm1.stopThreads();
dtsm2.stopThreads();
dtsm3.stopThreads();
verifyDestroy(tm1, conf);
verifyDestroy(tm2, conf);
verifyDestroy(tm3, conf);
}
}
}
|
TestZKDelegationTokenSecretManagerImpl
|
java
|
apache__camel
|
test-infra/camel-test-infra-torchserve/src/main/java/org/apache/camel/test/infra/torchserve/services/TorchServeInfraService.java
|
{
"start": 943,
"end": 1087
}
|
interface ____ extends InfrastructureService {
int inferencePort();
int managementPort();
int metricsPort();
}
|
TorchServeInfraService
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/sql/ast/spi/SqlAstTreeHelper.java
|
{
"start": 339,
"end": 1694
}
|
class ____ {
private SqlAstTreeHelper() {
}
public static Predicate combinePredicates(Predicate baseRestriction, Predicate incomingRestriction) {
if ( baseRestriction == null ) {
return incomingRestriction;
}
if ( incomingRestriction == null ) {
return baseRestriction;
}
final Junction combinedPredicate;
if ( baseRestriction instanceof Junction junction ) {
if ( junction.isEmpty() ) {
return incomingRestriction;
}
if ( junction.getNature() == Junction.Nature.CONJUNCTION ) {
combinedPredicate = junction;
}
else {
combinedPredicate = new Junction( Junction.Nature.CONJUNCTION );
combinedPredicate.add( baseRestriction );
}
}
else {
combinedPredicate = new Junction( Junction.Nature.CONJUNCTION );
combinedPredicate.add( baseRestriction );
}
final Junction secondJunction;
if ( incomingRestriction instanceof Junction junction
&& ( secondJunction = junction).getNature() == Junction.Nature.CONJUNCTION ) {
for ( Predicate predicate : secondJunction.getPredicates() ) {
combinedPredicate.add( predicate );
}
}
else {
combinedPredicate.add( incomingRestriction );
}
return combinedPredicate;
}
public static boolean hasAggregateFunctions(QuerySpec querySpec) {
return AggregateFunctionChecker.hasAggregateFunctions( querySpec );
}
}
|
SqlAstTreeHelper
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/aop/aspectj/autoproxy/benchmark/BenchmarkTests.java
|
{
"start": 1825,
"end": 6216
}
|
class ____ {
private static final Class<?> CLASS = BenchmarkTests.class;
private static final String ASPECTJ_CONTEXT = CLASS.getSimpleName() + "-aspectj.xml";
private static final String SPRING_AOP_CONTEXT = CLASS.getSimpleName() + "-springAop.xml";
@Test
void repeatedAroundAdviceInvocationsWithAspectJ() {
testRepeatedAroundAdviceInvocations(ASPECTJ_CONTEXT, getCount(), "AspectJ");
}
@Test
void repeatedAroundAdviceInvocationsWithSpringAop() {
testRepeatedAroundAdviceInvocations(SPRING_AOP_CONTEXT, getCount(), "Spring AOP");
}
@Test
void repeatedBeforeAdviceInvocationsWithAspectJ() {
testBeforeAdviceWithoutJoinPoint(ASPECTJ_CONTEXT, getCount(), "AspectJ");
}
@Test
void repeatedBeforeAdviceInvocationsWithSpringAop() {
testBeforeAdviceWithoutJoinPoint(SPRING_AOP_CONTEXT, getCount(), "Spring AOP");
}
@Test
void repeatedAfterReturningAdviceInvocationsWithAspectJ() {
testAfterReturningAdviceWithoutJoinPoint(ASPECTJ_CONTEXT, getCount(), "AspectJ");
}
@Test
void repeatedAfterReturningAdviceInvocationsWithSpringAop() {
testAfterReturningAdviceWithoutJoinPoint(SPRING_AOP_CONTEXT, getCount(), "Spring AOP");
}
@Test
void repeatedMixWithAspectJ() {
testMix(ASPECTJ_CONTEXT, getCount(), "AspectJ");
}
@Test
void repeatedMixWithSpringAop() {
testMix(SPRING_AOP_CONTEXT, getCount(), "Spring AOP");
}
/**
* Change the return number to a higher number to make this test useful.
*/
protected int getCount() {
return 10;
}
private long testRepeatedAroundAdviceInvocations(String file, int howmany, String technology) {
ClassPathXmlApplicationContext ac = new ClassPathXmlApplicationContext(file, CLASS);
StopWatch sw = new StopWatch();
sw.start(howmany + " repeated around advice invocations with " + technology);
ITestBean adrian = (ITestBean) ac.getBean("adrian");
assertThat(AopUtils.isAopProxy(adrian)).isTrue();
assertThat(adrian.getAge()).isEqualTo(68);
for (int i = 0; i < howmany; i++) {
adrian.getAge();
}
sw.stop();
// System.out.println(sw.prettyPrint());
ac.close();
return sw.getTotalTimeMillis();
}
private long testBeforeAdviceWithoutJoinPoint(String file, int howmany, String technology) {
ClassPathXmlApplicationContext ac = new ClassPathXmlApplicationContext(file, CLASS);
StopWatch sw = new StopWatch();
sw.start(howmany + " repeated before advice invocations with " + technology);
ITestBean adrian = (ITestBean) ac.getBean("adrian");
assertThat(AopUtils.isAopProxy(adrian)).isTrue();
Advised a = (Advised) adrian;
assertThat(a.getAdvisors()).hasSizeGreaterThanOrEqualTo(3);
assertThat(adrian.getName()).isEqualTo("adrian");
for (int i = 0; i < howmany; i++) {
adrian.getName();
}
sw.stop();
// System.out.println(sw.prettyPrint());
ac.close();
return sw.getTotalTimeMillis();
}
private long testAfterReturningAdviceWithoutJoinPoint(String file, int howmany, String technology) {
ClassPathXmlApplicationContext ac = new ClassPathXmlApplicationContext(file, CLASS);
StopWatch sw = new StopWatch();
sw.start(howmany + " repeated after returning advice invocations with " + technology);
ITestBean adrian = (ITestBean) ac.getBean("adrian");
assertThat(AopUtils.isAopProxy(adrian)).isTrue();
Advised a = (Advised) adrian;
assertThat(a.getAdvisors()).hasSizeGreaterThanOrEqualTo(3);
// Hits joinpoint
adrian.setAge(25);
for (int i = 0; i < howmany; i++) {
adrian.setAge(i);
}
sw.stop();
// System.out.println(sw.prettyPrint());
ac.close();
return sw.getTotalTimeMillis();
}
private long testMix(String file, int howmany, String technology) {
ClassPathXmlApplicationContext ac = new ClassPathXmlApplicationContext(file, CLASS);
StopWatch sw = new StopWatch();
sw.start(howmany + " repeated mixed invocations with " + technology);
ITestBean adrian = (ITestBean) ac.getBean("adrian");
assertThat(AopUtils.isAopProxy(adrian)).isTrue();
Advised a = (Advised) adrian;
assertThat(a.getAdvisors()).hasSizeGreaterThanOrEqualTo(3);
for (int i = 0; i < howmany; i++) {
// Hit all 3 joinpoints
adrian.getAge();
adrian.getName();
adrian.setAge(i);
// Invoke three non-advised methods
adrian.getDoctor();
adrian.getLawyer();
adrian.getSpouse();
}
sw.stop();
// System.out.println(sw.prettyPrint());
ac.close();
return sw.getTotalTimeMillis();
}
}
|
BenchmarkTests
|
java
|
apache__avro
|
lang/java/ipc/src/test/java/org/apache/avro/specific/TestSpecificBuilderTree.java
|
{
"start": 1413,
"end": 14426
}
|
class ____ {
private Request.Builder createPartialBuilder() {
Request.Builder requestBuilder = Request.newBuilder();
requestBuilder.setTimestamp(1234567890);
requestBuilder.getConnectionBuilder().setNetworkType(NetworkType.IPv4);
requestBuilder.getHttpRequestBuilder().getUserAgentBuilder().setUseragent("Chrome 123").setId("Foo");
requestBuilder.getHttpRequestBuilder().getURIBuilder().setMethod(HttpMethod.GET).setPath("/index.html");
if (!requestBuilder.getHttpRequestBuilder().getURIBuilder().hasParameters()) {
requestBuilder.getHttpRequestBuilder().getURIBuilder().setParameters(new ArrayList<>());
}
requestBuilder.getHttpRequestBuilder().getURIBuilder().getParameters()
.add(QueryParameter.newBuilder().setName("Foo").setValue("Bar").build());
return requestBuilder;
}
@Test
void failOnIncompleteTree() {
assertThrows(AvroMissingFieldException.class, () -> {
try {
createPartialBuilder().build();
} catch (AvroMissingFieldException amfe) {
assertEquals("Field networkAddress type:STRING pos:1 not set and has no default value", amfe.getMessage());
assertEquals("Path in schema: --> connection --> networkAddress", amfe.toString());
throw amfe;
}
fail("Should NEVER get here");
});
}
@Test
void copyBuilder() {
Request.Builder requestBuilder1 = createPartialBuilder();
Request.Builder requestBuilder2 = Request.newBuilder(requestBuilder1);
requestBuilder1.getConnectionBuilder().setNetworkAddress("1.1.1.1");
requestBuilder2.getConnectionBuilder().setNetworkAddress("2.2.2.2");
requestBuilder2.getHttpRequestBuilder().getUserAgentBuilder().setId("Bar");
Request request1 = requestBuilder1.build();
Request request2 = requestBuilder2.build();
assertEquals(NetworkType.IPv4, request1.getConnection().getNetworkType());
assertEquals("1.1.1.1", request1.getConnection().getNetworkAddress());
assertEquals("Chrome 123", request1.getHttpRequest().getUserAgent().getUseragent());
assertEquals("Foo", request1.getHttpRequest().getUserAgent().getId());
assertEquals(HttpMethod.GET, request1.getHttpRequest().getURI().getMethod());
assertEquals("/index.html", request1.getHttpRequest().getURI().getPath());
assertEquals(1, request1.getHttpRequest().getURI().getParameters().size());
assertEquals("Foo", request1.getHttpRequest().getURI().getParameters().get(0).getName());
assertEquals("Bar", request1.getHttpRequest().getURI().getParameters().get(0).getValue());
assertEquals(NetworkType.IPv4, request2.getConnection().getNetworkType());
assertEquals("2.2.2.2", request2.getConnection().getNetworkAddress());
assertEquals("Chrome 123", request2.getHttpRequest().getUserAgent().getUseragent());
assertEquals("Bar", request2.getHttpRequest().getUserAgent().getId());
assertEquals(HttpMethod.GET, request2.getHttpRequest().getURI().getMethod());
assertEquals("/index.html", request2.getHttpRequest().getURI().getPath());
assertEquals(1, request2.getHttpRequest().getURI().getParameters().size());
assertEquals("Foo", request2.getHttpRequest().getURI().getParameters().get(0).getName());
assertEquals("Bar", request2.getHttpRequest().getURI().getParameters().get(0).getValue());
}
@Test
void createBuilderFromInstance() {
Request.Builder requestBuilder1 = createPartialBuilder();
requestBuilder1.getConnectionBuilder().setNetworkAddress("1.1.1.1");
Request request1 = requestBuilder1.build();
Request.Builder requestBuilder2 = Request.newBuilder(request1);
requestBuilder2.getConnectionBuilder().setNetworkAddress("2.2.2.2");
requestBuilder2.getHttpRequestBuilder().getUserAgentBuilder().setId("Bar");
requestBuilder2.getHttpRequestBuilder().getURIBuilder().setMethod(HttpMethod.POST);
requestBuilder2.getHttpRequestBuilder().getUserAgentBuilder().setUseragent("Firefox 456");
Request request2 = requestBuilder2.build();
assertEquals(NetworkType.IPv4, request1.getConnection().getNetworkType());
assertEquals("1.1.1.1", request1.getConnection().getNetworkAddress());
assertEquals("Chrome 123", request1.getHttpRequest().getUserAgent().getUseragent());
assertEquals("Foo", request1.getHttpRequest().getUserAgent().getId());
assertEquals(HttpMethod.GET, request1.getHttpRequest().getURI().getMethod());
assertEquals("/index.html", request1.getHttpRequest().getURI().getPath());
assertEquals(1, request1.getHttpRequest().getURI().getParameters().size());
assertEquals("Foo", request1.getHttpRequest().getURI().getParameters().get(0).getName());
assertEquals("Bar", request1.getHttpRequest().getURI().getParameters().get(0).getValue());
assertEquals(NetworkType.IPv4, request2.getConnection().getNetworkType());
assertEquals("2.2.2.2", request2.getConnection().getNetworkAddress());
assertEquals("Firefox 456", request2.getHttpRequest().getUserAgent().getUseragent());
assertEquals("Bar", request2.getHttpRequest().getUserAgent().getId());
assertEquals(HttpMethod.POST, request2.getHttpRequest().getURI().getMethod());
assertEquals("/index.html", request2.getHttpRequest().getURI().getPath());
assertEquals(1, request2.getHttpRequest().getURI().getParameters().size());
assertEquals("Foo", request2.getHttpRequest().getURI().getParameters().get(0).getName());
assertEquals("Bar", request2.getHttpRequest().getURI().getParameters().get(0).getValue());
}
private Request.Builder createLastOneTestsBuilder() {
Request.Builder requestBuilder = Request.newBuilder();
requestBuilder.setTimestamp(1234567890);
requestBuilder.getConnectionBuilder().setNetworkType(NetworkType.IPv4).setNetworkAddress("1.1.1.1");
return requestBuilder;
}
@Test
void lastOneWins_Setter() {
Request.Builder requestBuilder = createLastOneTestsBuilder();
requestBuilder.getHttpRequestBuilder().getURIBuilder().setMethod(HttpMethod.GET).setPath("/index.html");
requestBuilder.getHttpRequestBuilder().getUserAgentBuilder().setUseragent("Chrome 123").setId("Foo");
HttpRequest httpRequest = HttpRequest.newBuilder().setUserAgent(new UserAgent("Bar", "Firefox 321"))
.setURI(HttpURI.newBuilder().setMethod(HttpMethod.POST).setPath("/login.php").build()).build();
Request request = requestBuilder.setHttpRequest(httpRequest).build();
assertEquals(NetworkType.IPv4, request.getConnection().getNetworkType());
assertEquals("1.1.1.1", request.getConnection().getNetworkAddress());
assertEquals(0, request.getHttpRequest().getURI().getParameters().size());
assertEquals("Firefox 321", request.getHttpRequest().getUserAgent().getUseragent());
assertEquals("Bar", request.getHttpRequest().getUserAgent().getId());
assertEquals(HttpMethod.POST, request.getHttpRequest().getURI().getMethod());
assertEquals("/login.php", request.getHttpRequest().getURI().getPath());
}
@Test
void lastOneWins_Builder() {
Request.Builder requestBuilder = createLastOneTestsBuilder();
HttpRequest httpRequest = HttpRequest.newBuilder().setUserAgent(new UserAgent("Bar", "Firefox 321"))
.setURI(HttpURI.newBuilder().setMethod(HttpMethod.POST).setPath("/login.php").build()).build();
requestBuilder.setHttpRequest(httpRequest);
requestBuilder.getHttpRequestBuilder().getURIBuilder().setMethod(HttpMethod.GET).setPath("/index.html");
requestBuilder.getHttpRequestBuilder().getUserAgentBuilder().setUseragent("Chrome 123").setId("Foo");
Request request = requestBuilder.build();
assertEquals(NetworkType.IPv4, request.getConnection().getNetworkType());
assertEquals("1.1.1.1", request.getConnection().getNetworkAddress());
assertEquals("Chrome 123", request.getHttpRequest().getUserAgent().getUseragent());
assertEquals("Foo", request.getHttpRequest().getUserAgent().getId());
assertEquals(0, request.getHttpRequest().getURI().getParameters().size());
assertEquals(HttpMethod.GET, request.getHttpRequest().getURI().getMethod());
assertEquals("/index.html", request.getHttpRequest().getURI().getPath());
}
@Test
void copyBuilderWithNullables() {
RecordWithNullables.Builder builder = RecordWithNullables.newBuilder();
assertFalse(builder.hasNullableRecordBuilder());
assertFalse(builder.hasNullableRecord());
assertFalse(builder.hasNullableString());
assertFalse(builder.hasNullableLong());
assertFalse(builder.hasNullableInt());
assertFalse(builder.hasNullableMap());
assertFalse(builder.hasNullableArray());
RecordWithNullables.Builder builderCopy = RecordWithNullables.newBuilder(builder);
assertFalse(builderCopy.hasNullableRecordBuilder());
assertFalse(builderCopy.hasNullableRecord());
assertFalse(builderCopy.hasNullableString());
assertFalse(builderCopy.hasNullableLong());
assertFalse(builderCopy.hasNullableInt());
assertFalse(builderCopy.hasNullableMap());
assertFalse(builderCopy.hasNullableArray());
builderCopy.getNullableRecordBuilder();
}
@Test
void copyBuilderWithNullablesAndSetToNull() {
// Create builder with all values default to null, yet unset.
RecordWithNullables.Builder builder = RecordWithNullables.newBuilder();
// Ensure all values have not been set
assertFalse(builder.hasNullableRecordBuilder());
assertFalse(builder.hasNullableRecord());
assertFalse(builder.hasNullableString());
assertFalse(builder.hasNullableLong());
assertFalse(builder.hasNullableInt());
assertFalse(builder.hasNullableMap());
assertFalse(builder.hasNullableArray());
// Set all values to null
builder.setNullableRecordBuilder(null);
builder.setNullableRecord(null);
builder.setNullableString(null);
builder.setNullableLong(null);
builder.setNullableInt(null);
builder.setNullableMap(null);
builder.setNullableArray(null);
// A Builder remains False because it is null
assertFalse(builder.hasNullableRecordBuilder());
// Ensure all values have been set
assertTrue(builder.hasNullableRecord());
assertTrue(builder.hasNullableString());
assertTrue(builder.hasNullableLong());
assertTrue(builder.hasNullableInt());
assertTrue(builder.hasNullableMap());
assertTrue(builder.hasNullableArray());
// Implicitly create a builder instance and clear the actual value.
builder.getNullableRecordBuilder();
assertTrue(builder.hasNullableRecordBuilder());
assertFalse(builder.hasNullableRecord());
// Create a copy of this builder.
RecordWithNullables.Builder builderCopy = RecordWithNullables.newBuilder(builder);
// Ensure all values are still the same
assertTrue(builder.hasNullableRecordBuilder());
assertFalse(builder.hasNullableRecord());
assertTrue(builder.hasNullableString());
assertTrue(builder.hasNullableLong());
assertTrue(builder.hasNullableInt());
assertTrue(builder.hasNullableMap());
assertTrue(builder.hasNullableArray());
}
@Test
void getBuilderForRecordWithNullRecord() {
// Create a record with all nullable fields set to the default value : null
RecordWithNullables recordWithNullables = RecordWithNullables.newBuilder().build();
// Now create a Builder using this record as the base
RecordWithNullables.Builder builder = RecordWithNullables.newBuilder(recordWithNullables);
// In the past this caused an NPE
builder.getNullableRecordBuilder();
}
@Test
void getBuilderForNullRecord() {
// In the past this caused an NPE
RecordWithNullables.newBuilder((RecordWithNullables) null);
}
@Test
void getBuilderForNullBuilder() {
// In the past this caused an NPE
RecordWithNullables.newBuilder((RecordWithNullables.Builder) null);
}
@Test
void validateBrowsingOptionals() {
Request.Builder requestBuilder = Request.newBuilder();
requestBuilder.setTimestamp(1234567890);
requestBuilder.getHttpRequestBuilder().getUserAgentBuilder().setUseragent("Chrome 123");
requestBuilder.getHttpRequestBuilder().getURIBuilder().setMethod(HttpMethod.GET).setPath("/index.html");
Request request = requestBuilder.build();
assertEquals("Chrome 123", Optional.of(request).flatMap(Request::getOptionalHttpRequest)
.flatMap(HttpRequest::getOptionalUserAgent).flatMap(UserAgent::getOptionalUseragent).orElse("UNKNOWN"));
assertFalse(Optional.of(request).flatMap(Request::getOptionalHttpRequest).flatMap(HttpRequest::getOptionalUserAgent)
.flatMap(UserAgent::getOptionalId).isPresent());
assertEquals(HttpMethod.GET, Optional.of(request).flatMap(Request::getOptionalHttpRequest)
.flatMap(HttpRequest::getOptionalURI).flatMap(HttpURI::getOptionalMethod).orElse(null));
assertEquals("/index.html", Optional.of(request).flatMap(Request::getOptionalHttpRequest)
.flatMap(HttpRequest::getOptionalURI).flatMap(HttpURI::getOptionalPath).orElse(null));
}
}
|
TestSpecificBuilderTree
|
java
|
apache__camel
|
components/camel-jms/src/main/java/org/apache/camel/component/jms/JmsKeyFormatStrategy.java
|
{
"start": 952,
"end": 1431
}
|
interface ____ {
/**
* Encodes the key before its sent as a {@link jakarta.jms.Message} message.
*
* @param key the original key
* @return the encoded key
*/
String encodeKey(String key);
/**
* Decodes the key after its received from a {@link jakarta.jms.Message} message.
*
* @param key the encoded key
* @return the decoded key as the original key
*/
String decodeKey(String key);
}
|
JmsKeyFormatStrategy
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/inheritance/joined/JoinedInheritanceWithExplicitDiscriminatorTest.java
|
{
"start": 4082,
"end": 4598
}
|
class ____ {
private Integer id;
private String name;
public Customer() {
}
public Customer(Integer id, String name) {
this.id = id;
this.name = name;
}
@Id
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
@Entity(name = "DomesticCustomer")
@Table(name = "DomesticCustomer")
@DiscriminatorValue( "dc" )
public static
|
Customer
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/VarifierTest.java
|
{
"start": 6551,
"end": 6853
}
|
class ____ {
public void t() {
var now = Instant.ofEpochMilli(1);
}
}
""")
.doTest();
}
@Test
public void varUnused() {
refactoringHelper
.addInputLines(
"Test.java",
"""
|
Test
|
java
|
apache__dubbo
|
dubbo-common/src/test/java/org/apache/dubbo/rpc/model/ScopeModelAwareExtensionProcessorTest.java
|
{
"start": 1108,
"end": 3677
}
|
class ____ {
private FrameworkModel frameworkModel;
private ApplicationModel applicationModel;
private ModuleModel moduleModel;
@BeforeEach
public void setUp() {
frameworkModel = new FrameworkModel();
applicationModel = frameworkModel.newApplication();
moduleModel = applicationModel.newModule();
}
@AfterEach
public void reset() {
frameworkModel.destroy();
}
@Test
void testInitialize() {
ScopeModelAwareExtensionProcessor processor1 = new ScopeModelAwareExtensionProcessor(frameworkModel);
Assertions.assertEquals(processor1.getFrameworkModel(), frameworkModel);
Assertions.assertEquals(processor1.getScopeModel(), frameworkModel);
Assertions.assertNull(processor1.getApplicationModel());
Assertions.assertNull(processor1.getModuleModel());
ScopeModelAwareExtensionProcessor processor2 = new ScopeModelAwareExtensionProcessor(applicationModel);
Assertions.assertEquals(processor2.getApplicationModel(), applicationModel);
Assertions.assertEquals(processor2.getScopeModel(), applicationModel);
Assertions.assertEquals(processor2.getFrameworkModel(), frameworkModel);
Assertions.assertNull(processor2.getModuleModel());
ScopeModelAwareExtensionProcessor processor3 = new ScopeModelAwareExtensionProcessor(moduleModel);
Assertions.assertEquals(processor3.getModuleModel(), moduleModel);
Assertions.assertEquals(processor3.getScopeModel(), moduleModel);
Assertions.assertEquals(processor2.getApplicationModel(), applicationModel);
Assertions.assertEquals(processor2.getFrameworkModel(), frameworkModel);
}
@Test
void testPostProcessAfterInitialization() throws Exception {
ScopeModelAwareExtensionProcessor processor = new ScopeModelAwareExtensionProcessor(moduleModel);
MockScopeModelAware mockScopeModelAware = new MockScopeModelAware();
Object object = processor.postProcessAfterInitialization(
mockScopeModelAware, mockScopeModelAware.getClass().getName());
Assertions.assertEquals(object, mockScopeModelAware);
Assertions.assertEquals(mockScopeModelAware.getScopeModel(), moduleModel);
Assertions.assertEquals(mockScopeModelAware.getFrameworkModel(), frameworkModel);
Assertions.assertEquals(mockScopeModelAware.getApplicationModel(), applicationModel);
Assertions.assertEquals(mockScopeModelAware.getModuleModel(), moduleModel);
}
}
|
ScopeModelAwareExtensionProcessorTest
|
java
|
resilience4j__resilience4j
|
resilience4j-core/src/main/java/io/github/resilience4j/core/Registry.java
|
{
"start": 1083,
"end": 2773
}
|
interface ____<E, C> {
/**
* Adds a configuration to the registry
*
* @param configName the configuration name
* @param configuration the added configuration
*/
void addConfiguration(String configName, C configuration);
/**
* Find a named entry in the Registry
*
* @param name the name
*/
Optional<E> find(String name);
/**
* Remove an entry from the Registry
*
* @param name the name
*/
Optional<E> remove(String name);
/**
* Replace an existing entry in the Registry by a new one.
*
* @param name the existing name
* @param newEntry a new entry
*/
Optional<E> replace(String name, E newEntry);
/**
* Get a configuration by name
*
* @param configName the configuration name
* @return the found configuration if any
*/
Optional<C> getConfiguration(String configName);
/**
* Get the default configuration
*
* @return the default configuration
*/
C getDefaultConfig();
/**
* @return global configured registry tags
*/
Map<String, String> getTags();
/**
* Returns an EventPublisher which can be used to register event consumers.
*
* @return an EventPublisher
*/
EventPublisher<E> getEventPublisher();
/**
+ * Removes a configuration from the registry
+ *
+ * @param configName the configuration name
+ * @return configuration mapped to the configName.
+ */
C removeConfiguration(String configName);
/**
* An EventPublisher can be used to register event consumers.
*/
|
Registry
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/annotation/web/configuration/OAuth2AuthorizedClientManagerConfigurationTests.java
|
{
"start": 16837,
"end": 18499
}
|
class ____ extends OAuth2ClientBaseConfig {
@Bean
AuthorizationCodeOAuth2AuthorizedClientProvider authorizationCodeProvider() {
return spy(new AuthorizationCodeOAuth2AuthorizedClientProvider());
}
@Bean
RefreshTokenOAuth2AuthorizedClientProvider refreshTokenProvider() {
RefreshTokenOAuth2AuthorizedClientProvider authorizedClientProvider = new RefreshTokenOAuth2AuthorizedClientProvider();
authorizedClientProvider.setAccessTokenResponseClient(new MockAccessTokenResponseClient<>());
return authorizedClientProvider;
}
@Bean
ClientCredentialsOAuth2AuthorizedClientProvider clientCredentialsProvider() {
ClientCredentialsOAuth2AuthorizedClientProvider authorizedClientProvider = new ClientCredentialsOAuth2AuthorizedClientProvider();
authorizedClientProvider.setAccessTokenResponseClient(new MockAccessTokenResponseClient<>());
return authorizedClientProvider;
}
@Bean
JwtBearerOAuth2AuthorizedClientProvider jwtBearerAuthorizedClientProvider() {
JwtBearerOAuth2AuthorizedClientProvider authorizedClientProvider = new JwtBearerOAuth2AuthorizedClientProvider();
authorizedClientProvider.setAccessTokenResponseClient(new MockAccessTokenResponseClient<>());
return authorizedClientProvider;
}
@Bean
TokenExchangeOAuth2AuthorizedClientProvider tokenExchangeAuthorizedClientProvider() {
TokenExchangeOAuth2AuthorizedClientProvider authorizedClientProvider = new TokenExchangeOAuth2AuthorizedClientProvider();
authorizedClientProvider.setAccessTokenResponseClient(new MockAccessTokenResponseClient<>());
return authorizedClientProvider;
}
}
abstract static
|
CustomAuthorizedClientProvidersConfig
|
java
|
apache__kafka
|
server/src/main/java/org/apache/kafka/server/AssignmentsManager.java
|
{
"start": 2747,
"end": 8882
}
|
class ____ {
static final ExponentialBackoff STANDARD_BACKOFF = new ExponentialBackoff(
TimeUnit.MILLISECONDS.toNanos(100),
2,
TimeUnit.SECONDS.toNanos(10),
0.02);
/**
* The minimum amount of time we will wait before logging individual assignment failures.
*/
static final long MIN_NOISY_FAILURE_INTERVAL_NS = TimeUnit.MINUTES.toNanos(2);
@Deprecated(since = "4.2")
static final MetricName DEPRECATED_QUEUED_REPLICA_TO_DIR_ASSIGNMENTS_METRIC =
KafkaYammerMetrics.getMetricName("org.apache.kafka.server", "AssignmentsManager", "QueuedReplicaToDirAssignments");
/**
* The metric reflecting the number of pending assignments.
*/
static final MetricName QUEUED_REPLICA_TO_DIR_ASSIGNMENTS_METRIC =
KafkaYammerMetrics.getMetricName("kafka.server", "AssignmentsManager", "QueuedReplicaToDirAssignments");
/**
* The event at which we send assignments, if appropriate.
*/
static final String MAYBE_SEND_ASSIGNMENTS_EVENT = "MaybeSendAssignmentsEvent";
/**
* The log4j object.
*/
private final Logger log;
/**
* The exponential backoff strategy to use.
*/
private final ExponentialBackoff backoff;
/**
* The clock object to use.
*/
private final Time time;
/**
* Used to send messages to the controller.
*/
private final NodeToControllerChannelManager channelManager;
/**
* The node ID.
*/
private final int nodeId;
/**
* Supplies the latest MetadataImage.
*/
private final Supplier<MetadataImage> metadataImageSupplier;
/**
* Maps directory IDs to descriptions for logging purposes.
*/
private final Function<Uuid, String> directoryIdToDescription;
/**
* Maps partitions to assignments that are ready to send.
*/
private final ConcurrentHashMap<TopicIdPartition, Assignment> ready;
/**
* Maps partitions to assignments that are in-flight. Older entries come first.
*/
private volatile Map<TopicIdPartition, Assignment> inflight;
/**
* The registry to register our metrics with.
*/
private final MetricsRegistry metricsRegistry;
/**
* The number of global failures we had previously (cleared after any success).
*/
private int previousGlobalFailures;
/**
* The event queue.
*/
private final KafkaEventQueue eventQueue;
public AssignmentsManager(
Time time,
NodeToControllerChannelManager channelManager,
int nodeId,
Supplier<MetadataImage> metadataImageSupplier,
Function<Uuid, String> directoryIdToDescription
) {
this(STANDARD_BACKOFF,
time,
channelManager,
nodeId,
metadataImageSupplier,
directoryIdToDescription,
KafkaYammerMetrics.defaultRegistry());
}
AssignmentsManager(
ExponentialBackoff backoff,
Time time,
NodeToControllerChannelManager channelManager,
int nodeId,
Supplier<MetadataImage> metadataImageSupplier,
Function<Uuid, String> directoryIdToDescription,
MetricsRegistry metricsRegistry
) {
this.log = new LogContext("[AssignmentsManager id=" + nodeId + "] ").
logger(AssignmentsManager.class);
this.backoff = backoff;
this.time = time;
this.channelManager = channelManager;
this.nodeId = nodeId;
this.directoryIdToDescription = directoryIdToDescription;
this.metadataImageSupplier = metadataImageSupplier;
this.ready = new ConcurrentHashMap<>();
this.inflight = Map.of();
this.metricsRegistry = metricsRegistry;
this.metricsRegistry.newGauge(DEPRECATED_QUEUED_REPLICA_TO_DIR_ASSIGNMENTS_METRIC, new Gauge<Integer>() {
@Override
public Integer value() {
return numPending();
}
});
this.metricsRegistry.newGauge(QUEUED_REPLICA_TO_DIR_ASSIGNMENTS_METRIC, new Gauge<Integer>() {
@Override
public Integer value() {
return numPending();
}
});
this.previousGlobalFailures = 0;
this.eventQueue = new KafkaEventQueue(time,
new LogContext("[AssignmentsManager id=" + nodeId + "]"),
"broker-" + nodeId + "-directory-assignments-manager-",
new ShutdownEvent());
channelManager.start();
}
public int numPending() {
return ready.size() + inflight.size();
}
public void close() throws InterruptedException {
eventQueue.close();
}
public void onAssignment(
TopicIdPartition topicIdPartition,
Uuid directoryId,
String reason,
Runnable successCallback
) {
long nowNs = time.nanoseconds();
Assignment assignment = new Assignment(
topicIdPartition, directoryId, nowNs, successCallback);
ready.put(topicIdPartition, assignment);
if (log.isTraceEnabled()) {
String topicDescription = Optional.ofNullable(metadataImageSupplier.get().topics().
getTopic(assignment.topicIdPartition().topicId())).
map(TopicImage::name).orElse(assignment.topicIdPartition().topicId().toString());
log.trace("Registered assignment {}: {}, moving {}-{} into {}",
assignment,
reason,
topicDescription,
topicIdPartition.partitionId(),
directoryIdToDescription.apply(assignment.directoryId()));
}
rescheduleMaybeSendAssignmentsEvent(nowNs);
}
void rescheduleMaybeSendAssignmentsEvent(long nowNs) {
eventQueue.scheduleDeferred(MAYBE_SEND_ASSIGNMENTS_EVENT,
new AssignmentsManagerDeadlineFunction(backoff,
nowNs, previousGlobalFailures, !inflight.isEmpty(), ready.size()),
new MaybeSendAssignmentsEvent());
}
/**
* Handles shutdown.
*/
private
|
AssignmentsManager
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/AutoValueBoxedValuesTest.java
|
{
"start": 5149,
"end": 6197
}
|
class ____ {",
" abstract Builder setLongId(long value);",
" abstract Builder setIntId(int value);",
" abstract Builder setByteId(byte value);",
" abstract Builder setShortId(short value);",
" abstract Builder setFloatId(float value);",
" abstract Builder setDoubleId(double value);",
" abstract Builder setBooleanId(boolean value);",
" abstract Builder setCharId(char value);",
" abstract Test build();",
" }"),
lines("}")))
.doTest();
}
@Test
public void nullableBoxedTypes() {
compilationHelper
.addSourceLines(
"in/Test.java",
mergeLines(
lines(
"import com.google.auto.value.AutoValue;",
"import javax.annotation.Nullable;",
"@AutoValue",
"abstract
|
Builder
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilder.java
|
{
"start": 3971,
"end": 6157
}
|
class ____
builder.field(FIELD_FIELD.getPreferredName(), field);
if (seed != null) {
builder.field(SEED_FIELD.getPreferredName(), seed);
}
}
@Override
public boolean isCompoundBuilder() {
return false;
}
@Override
public Explanation explainHit(Explanation baseExplanation, RankDoc scoreDoc, List<String> queryNames) {
if (scoreDoc == null) {
return baseExplanation;
}
if (false == baseExplanation.isMatch()) {
return baseExplanation;
}
assert scoreDoc instanceof RankFeatureDoc : "ScoreDoc is not an instance of RankFeatureDoc";
RankFeatureDoc rankFeatureDoc = (RankFeatureDoc) scoreDoc;
return Explanation.match(
rankFeatureDoc.score,
"rank after reranking: [" + rankFeatureDoc.rank + "] using seed [" + seed + "] with score: [" + rankFeatureDoc.score + "]",
baseExplanation
);
}
@Override
public QueryPhaseRankShardContext buildQueryPhaseShardContext(List<Query> queries, int from) {
return new RerankingQueryPhaseRankShardContext(queries, rankWindowSize());
}
@Override
public QueryPhaseRankCoordinatorContext buildQueryPhaseCoordinatorContext(int size, int from) {
return new RerankingQueryPhaseRankCoordinatorContext(rankWindowSize());
}
@Override
public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() {
return new RerankingRankFeaturePhaseRankShardContext(field);
}
@Override
public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorContext(int size, int from, Client client) {
return new RandomRankFeaturePhaseRankCoordinatorContext(size, from, rankWindowSize(), seed);
}
public String field() {
return field;
}
@Override
protected boolean doEquals(RankBuilder other) {
RandomRankBuilder that = (RandomRankBuilder) other;
return Objects.equals(field, that.field) && Objects.equals(seed, that.seed);
}
@Override
protected int doHashCode() {
return Objects.hash(field, seed);
}
}
|
RankBuilder
|
java
|
assertj__assertj-core
|
assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/recursive/comparison/fields/RecursiveComparisonAssert_isIn_Test.java
|
{
"start": 2352,
"end": 4080
}
|
class ____ {
private final Integer personId;
private final List<String> personInfo;
private final Integer age;
public PersonDto(Integer personId, List<String> personInfo, Integer age) {
this.personId = personId;
this.personInfo = personInfo;
this.age = age;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
PersonDto personDto = (PersonDto) o;
return Objects.equals(personId, personDto.personId) && Objects.equals(personInfo, personDto.personInfo)
&& Objects.equals(age, personDto.age);
}
@Override
public int hashCode() {
return Objects.hash(personId, personInfo, age);
}
}
@Test
void should_succeed_running_issue_2843_test_case() {
// GIVEN
PersonDto person1 = new PersonDto(10, list("John"), 25);
PersonDto person2 = new PersonDto(20, list("Ben"), 30);
PersonDto person3 = new PersonDto(30, list("Rick", "Sanchez"), 35);
// Here is personInfo has the same value as in person3, but in another order
PersonDto person4 = new PersonDto(30, list("Sanchez", "Rick"), 35);
List<PersonDto> list1 = list(person1, person2, person3);
List<PersonDto> list2 = list(person1, person2, person4);
SoftAssertions softAssertions = new SoftAssertions();
// WHEN/THEN
list2.forEach(person -> softAssertions.assertThat(person)
.usingRecursiveComparison(recursiveComparisonConfiguration)
.ignoringCollectionOrder()
.isIn(list1));
softAssertions.assertAll();
}
}
|
PersonDto
|
java
|
quarkusio__quarkus
|
independent-projects/arc/processor/src/main/java/io/quarkus/arc/processor/ContextRegistrar.java
|
{
"start": 426,
"end": 696
}
|
interface ____ extends BuildContext {
/**
*
* @param scopeAnnotation
* @return a new custom context configurator
*/
ContextConfigurator configure(Class<? extends Annotation> scopeAnnotation);
}
}
|
RegistrationContext
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/ModuleFactoryGeneratorTest.java
|
{
"start": 26912,
"end": 27316
}
|
class ____ {",
" @Provides",
" int provideInt() {",
" return 42;",
" }",
"}");
Source okNonPublicModuleFile =
CompilerTests.javaSource("test.OkNonPublicModule",
"package test;",
"",
"import dagger.Module;",
"import dagger.Provides;",
"",
"@Module",
"final
|
BadNonPublicModule
|
java
|
apache__kafka
|
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/TopicIds.java
|
{
"start": 1496,
"end": 3346
}
|
interface ____ {
/**
* @return The TopicsImage used by the resolver.
*/
CoordinatorMetadataImage image();
/**
* Converts a topic id to a topic name.
*
* @param id The topic id.
* @return The topic name for the given topic id, or null if the topic does not exist.
*/
String name(Uuid id);
/**
* Converts a topic name to a topic id.
*
* @param name The topic name.
* @return The topic id for the given topic name, or null if the topic does not exist.
*/
Uuid id(String name);
/**
* Clears any cached data.
*
* Used for benchmarking purposes.
*/
void clear();
}
/**
* A TopicResolver without any caching.
*/
public record DefaultTopicResolver(CoordinatorMetadataImage image) implements TopicResolver {
public DefaultTopicResolver(
CoordinatorMetadataImage image
) {
this.image = Objects.requireNonNull(image);
}
@Override
public String name(Uuid id) {
return image.topicMetadata(id).map(CoordinatorMetadataImage.TopicMetadata::name).orElse(null);
}
@Override
public Uuid id(String name) {
return image.topicMetadata(name).map(CoordinatorMetadataImage.TopicMetadata::id).orElse(null);
}
@Override
public void clear() {
}
@Override
public String toString() {
return "DefaultTopicResolver(image=" + image + ")";
}
}
/**
* A TopicResolver that caches results.
*
* This cache is expected to be short-lived and only used within a single
* TargetAssignmentBuilder.build() call.
*/
public static
|
TopicResolver
|
java
|
spring-projects__spring-framework
|
spring-core-test/src/main/java/org/springframework/core/test/tools/SourceFile.java
|
{
"start": 2170,
"end": 2540
}
|
class ____ to get the source from
* @return a {@link SourceFile} instance
*/
public static SourceFile forTestClass(Class<?> type) {
return forClass(TEST_SOURCE_DIRECTORY, type);
}
/**
* Factory method to create a new {@link SourceFile} by looking up source
* for the given {@code Class}.
* @param sourceDirectory the source directory
* @param type the
|
file
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/graphs/queryhint/EmbeddableQueryHintEntityGraphTest.java
|
{
"start": 5440,
"end": 6150
}
|
class ____ {
@Embedded
private AnotherEmbeddable nestedEmbedded;
@ManyToOne(fetch = FetchType.LAZY)
private Child child;
@OneToMany(fetch = FetchType.LAZY)
private List<Child> children = new ArrayList<>();
public AnEmbeddable() {
}
public AnEmbeddable(AnotherEmbeddable nestedEmbeddedObject, Child child) {
this.nestedEmbedded = nestedEmbeddedObject;
this.child = child;
}
public AnotherEmbeddable getNestedEmbedded() {
return nestedEmbedded;
}
public Child getChild() {
return child;
}
public List<Child> getChildren() {
return children;
}
public void addChild(Child child) {
this.children.add( child );
}
}
@Embeddable
public static
|
AnEmbeddable
|
java
|
apache__camel
|
core/camel-core-engine/src/generated/java/org/apache/camel/impl/CamelContextConfigurer.java
|
{
"start": 685,
"end": 24111
}
|
class ____ extends org.apache.camel.support.component.PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
org.apache.camel.CamelContext target = (org.apache.camel.CamelContext) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "allowuseoriginalmessage":
case "allowUseOriginalMessage": target.setAllowUseOriginalMessage(property(camelContext, java.lang.Boolean.class, value)); return true;
case "applicationcontextclassloader":
case "applicationContextClassLoader": target.setApplicationContextClassLoader(property(camelContext, java.lang.ClassLoader.class, value)); return true;
case "autostartup":
case "autoStartup": target.setAutoStartup(property(camelContext, java.lang.Boolean.class, value)); return true;
case "autostartupexcludepattern":
case "autoStartupExcludePattern": target.setAutoStartupExcludePattern(property(camelContext, java.lang.String.class, value)); return true;
case "autowiredenabled":
case "autowiredEnabled": target.setAutowiredEnabled(property(camelContext, java.lang.Boolean.class, value)); return true;
case "backlogtracing":
case "backlogTracing": target.setBacklogTracing(property(camelContext, java.lang.Boolean.class, value)); return true;
case "backlogtracingrests":
case "backlogTracingRests": target.setBacklogTracingRests(property(camelContext, boolean.class, value)); return true;
case "backlogtracingstandby":
case "backlogTracingStandby": target.setBacklogTracingStandby(property(camelContext, boolean.class, value)); return true;
case "backlogtracingtemplates":
case "backlogTracingTemplates": target.setBacklogTracingTemplates(property(camelContext, boolean.class, value)); return true;
case "caseinsensitiveheaders":
case "caseInsensitiveHeaders": target.setCaseInsensitiveHeaders(property(camelContext, java.lang.Boolean.class, value)); return true;
case "classresolver":
case "classResolver": target.setClassResolver(property(camelContext, org.apache.camel.spi.ClassResolver.class, value)); return true;
case "debugstandby":
case "debugStandby": target.setDebugStandby(property(camelContext, boolean.class, value)); return true;
case "debugger": target.setDebugger(property(camelContext, org.apache.camel.spi.Debugger.class, value)); return true;
case "debugging": target.setDebugging(property(camelContext, java.lang.Boolean.class, value)); return true;
case "delayer": target.setDelayer(property(camelContext, java.lang.Long.class, value)); return true;
case "devconsole":
case "devConsole": target.setDevConsole(property(camelContext, java.lang.Boolean.class, value)); return true;
case "dumproutes":
case "dumpRoutes": target.setDumpRoutes(property(camelContext, java.lang.String.class, value)); return true;
case "executorservicemanager":
case "executorServiceManager": target.setExecutorServiceManager(property(camelContext, org.apache.camel.spi.ExecutorServiceManager.class, value)); return true;
case "globaloptions":
case "globalOptions": target.setGlobalOptions(property(camelContext, java.util.Map.class, value)); return true;
case "inflightrepository":
case "inflightRepository": target.setInflightRepository(property(camelContext, org.apache.camel.spi.InflightRepository.class, value)); return true;
case "injector": target.setInjector(property(camelContext, org.apache.camel.spi.Injector.class, value)); return true;
case "loadhealthchecks":
case "loadHealthChecks": target.setLoadHealthChecks(property(camelContext, java.lang.Boolean.class, value)); return true;
case "loadtypeconverters":
case "loadTypeConverters": target.setLoadTypeConverters(property(camelContext, java.lang.Boolean.class, value)); return true;
case "logexhaustedmessagebody":
case "logExhaustedMessageBody": target.setLogExhaustedMessageBody(property(camelContext, java.lang.Boolean.class, value)); return true;
case "logmask":
case "logMask": target.setLogMask(property(camelContext, java.lang.Boolean.class, value)); return true;
case "mdcloggingkeyspattern":
case "mDCLoggingKeysPattern": target.setMDCLoggingKeysPattern(property(camelContext, java.lang.String.class, value)); return true;
case "managementname":
case "managementName": target.setManagementName(property(camelContext, java.lang.String.class, value)); return true;
case "managementnamestrategy":
case "managementNameStrategy": target.setManagementNameStrategy(property(camelContext, org.apache.camel.spi.ManagementNameStrategy.class, value)); return true;
case "managementstrategy":
case "managementStrategy": target.setManagementStrategy(property(camelContext, org.apache.camel.spi.ManagementStrategy.class, value)); return true;
case "messagehistory":
case "messageHistory": target.setMessageHistory(property(camelContext, java.lang.Boolean.class, value)); return true;
case "messagehistoryfactory":
case "messageHistoryFactory": target.setMessageHistoryFactory(property(camelContext, org.apache.camel.spi.MessageHistoryFactory.class, value)); return true;
case "modeline": target.setModeline(property(camelContext, java.lang.Boolean.class, value)); return true;
case "namestrategy":
case "nameStrategy": target.setNameStrategy(property(camelContext, org.apache.camel.spi.CamelContextNameStrategy.class, value)); return true;
case "propertiescomponent":
case "propertiesComponent": target.setPropertiesComponent(property(camelContext, org.apache.camel.spi.PropertiesComponent.class, value)); return true;
case "restconfiguration":
case "restConfiguration": target.setRestConfiguration(property(camelContext, org.apache.camel.spi.RestConfiguration.class, value)); return true;
case "restregistry":
case "restRegistry": target.setRestRegistry(property(camelContext, org.apache.camel.spi.RestRegistry.class, value)); return true;
case "routecontroller":
case "routeController": target.setRouteController(property(camelContext, org.apache.camel.spi.RouteController.class, value)); return true;
case "runtimeendpointregistry":
case "runtimeEndpointRegistry": target.setRuntimeEndpointRegistry(property(camelContext, org.apache.camel.spi.RuntimeEndpointRegistry.class, value)); return true;
case "sslcontextparameters":
case "sSLContextParameters": target.setSSLContextParameters(property(camelContext, org.apache.camel.support.jsse.SSLContextParameters.class, value)); return true;
case "shutdownroute":
case "shutdownRoute": target.setShutdownRoute(property(camelContext, org.apache.camel.ShutdownRoute.class, value)); return true;
case "shutdownrunningtask":
case "shutdownRunningTask": target.setShutdownRunningTask(property(camelContext, org.apache.camel.ShutdownRunningTask.class, value)); return true;
case "shutdownstrategy":
case "shutdownStrategy": target.setShutdownStrategy(property(camelContext, org.apache.camel.spi.ShutdownStrategy.class, value)); return true;
case "sourcelocationenabled":
case "sourceLocationEnabled": target.setSourceLocationEnabled(property(camelContext, java.lang.Boolean.class, value)); return true;
case "startupsummarylevel":
case "startupSummaryLevel": target.setStartupSummaryLevel(property(camelContext, org.apache.camel.StartupSummaryLevel.class, value)); return true;
case "streamcaching":
case "streamCaching": target.setStreamCaching(property(camelContext, java.lang.Boolean.class, value)); return true;
case "streamcachingstrategy":
case "streamCachingStrategy": target.setStreamCachingStrategy(property(camelContext, org.apache.camel.spi.StreamCachingStrategy.class, value)); return true;
case "tracer": target.setTracer(property(camelContext, org.apache.camel.spi.Tracer.class, value)); return true;
case "tracing": target.setTracing(property(camelContext, java.lang.Boolean.class, value)); return true;
case "tracingloggingformat":
case "tracingLoggingFormat": target.setTracingLoggingFormat(property(camelContext, java.lang.String.class, value)); return true;
case "tracingpattern":
case "tracingPattern": target.setTracingPattern(property(camelContext, java.lang.String.class, value)); return true;
case "tracingrests":
case "tracingRests": target.setTracingRests(property(camelContext, boolean.class, value)); return true;
case "tracingstandby":
case "tracingStandby": target.setTracingStandby(property(camelContext, boolean.class, value)); return true;
case "tracingtemplates":
case "tracingTemplates": target.setTracingTemplates(property(camelContext, boolean.class, value)); return true;
case "typeconverterregistry":
case "typeConverterRegistry": target.setTypeConverterRegistry(property(camelContext, org.apache.camel.spi.TypeConverterRegistry.class, value)); return true;
case "typeconverterstatisticsenabled":
case "typeConverterStatisticsEnabled": target.setTypeConverterStatisticsEnabled(property(camelContext, java.lang.Boolean.class, value)); return true;
case "usebreadcrumb":
case "useBreadcrumb": target.setUseBreadcrumb(property(camelContext, java.lang.Boolean.class, value)); return true;
case "usedatatype":
case "useDataType": target.setUseDataType(property(camelContext, java.lang.Boolean.class, value)); return true;
case "usemdclogging":
case "useMDCLogging": target.setUseMDCLogging(property(camelContext, java.lang.Boolean.class, value)); return true;
case "uuidgenerator":
case "uuidGenerator": target.setUuidGenerator(property(camelContext, org.apache.camel.spi.UuidGenerator.class, value)); return true;
case "vaultconfiguration":
case "vaultConfiguration": target.setVaultConfiguration(property(camelContext, org.apache.camel.vault.VaultConfiguration.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "allowuseoriginalmessage":
case "allowUseOriginalMessage": return java.lang.Boolean.class;
case "applicationcontextclassloader":
case "applicationContextClassLoader": return java.lang.ClassLoader.class;
case "autostartup":
case "autoStartup": return java.lang.Boolean.class;
case "autostartupexcludepattern":
case "autoStartupExcludePattern": return java.lang.String.class;
case "autowiredenabled":
case "autowiredEnabled": return java.lang.Boolean.class;
case "backlogtracing":
case "backlogTracing": return java.lang.Boolean.class;
case "backlogtracingrests":
case "backlogTracingRests": return boolean.class;
case "backlogtracingstandby":
case "backlogTracingStandby": return boolean.class;
case "backlogtracingtemplates":
case "backlogTracingTemplates": return boolean.class;
case "caseinsensitiveheaders":
case "caseInsensitiveHeaders": return java.lang.Boolean.class;
case "classresolver":
case "classResolver": return org.apache.camel.spi.ClassResolver.class;
case "debugstandby":
case "debugStandby": return boolean.class;
case "debugger": return org.apache.camel.spi.Debugger.class;
case "debugging": return java.lang.Boolean.class;
case "delayer": return java.lang.Long.class;
case "devconsole":
case "devConsole": return java.lang.Boolean.class;
case "dumproutes":
case "dumpRoutes": return java.lang.String.class;
case "executorservicemanager":
case "executorServiceManager": return org.apache.camel.spi.ExecutorServiceManager.class;
case "globaloptions":
case "globalOptions": return java.util.Map.class;
case "inflightrepository":
case "inflightRepository": return org.apache.camel.spi.InflightRepository.class;
case "injector": return org.apache.camel.spi.Injector.class;
case "loadhealthchecks":
case "loadHealthChecks": return java.lang.Boolean.class;
case "loadtypeconverters":
case "loadTypeConverters": return java.lang.Boolean.class;
case "logexhaustedmessagebody":
case "logExhaustedMessageBody": return java.lang.Boolean.class;
case "logmask":
case "logMask": return java.lang.Boolean.class;
case "mdcloggingkeyspattern":
case "mDCLoggingKeysPattern": return java.lang.String.class;
case "managementname":
case "managementName": return java.lang.String.class;
case "managementnamestrategy":
case "managementNameStrategy": return org.apache.camel.spi.ManagementNameStrategy.class;
case "managementstrategy":
case "managementStrategy": return org.apache.camel.spi.ManagementStrategy.class;
case "messagehistory":
case "messageHistory": return java.lang.Boolean.class;
case "messagehistoryfactory":
case "messageHistoryFactory": return org.apache.camel.spi.MessageHistoryFactory.class;
case "modeline": return java.lang.Boolean.class;
case "namestrategy":
case "nameStrategy": return org.apache.camel.spi.CamelContextNameStrategy.class;
case "propertiescomponent":
case "propertiesComponent": return org.apache.camel.spi.PropertiesComponent.class;
case "restconfiguration":
case "restConfiguration": return org.apache.camel.spi.RestConfiguration.class;
case "restregistry":
case "restRegistry": return org.apache.camel.spi.RestRegistry.class;
case "routecontroller":
case "routeController": return org.apache.camel.spi.RouteController.class;
case "runtimeendpointregistry":
case "runtimeEndpointRegistry": return org.apache.camel.spi.RuntimeEndpointRegistry.class;
case "sslcontextparameters":
case "sSLContextParameters": return org.apache.camel.support.jsse.SSLContextParameters.class;
case "shutdownroute":
case "shutdownRoute": return org.apache.camel.ShutdownRoute.class;
case "shutdownrunningtask":
case "shutdownRunningTask": return org.apache.camel.ShutdownRunningTask.class;
case "shutdownstrategy":
case "shutdownStrategy": return org.apache.camel.spi.ShutdownStrategy.class;
case "sourcelocationenabled":
case "sourceLocationEnabled": return java.lang.Boolean.class;
case "startupsummarylevel":
case "startupSummaryLevel": return org.apache.camel.StartupSummaryLevel.class;
case "streamcaching":
case "streamCaching": return java.lang.Boolean.class;
case "streamcachingstrategy":
case "streamCachingStrategy": return org.apache.camel.spi.StreamCachingStrategy.class;
case "tracer": return org.apache.camel.spi.Tracer.class;
case "tracing": return java.lang.Boolean.class;
case "tracingloggingformat":
case "tracingLoggingFormat": return java.lang.String.class;
case "tracingpattern":
case "tracingPattern": return java.lang.String.class;
case "tracingrests":
case "tracingRests": return boolean.class;
case "tracingstandby":
case "tracingStandby": return boolean.class;
case "tracingtemplates":
case "tracingTemplates": return boolean.class;
case "typeconverterregistry":
case "typeConverterRegistry": return org.apache.camel.spi.TypeConverterRegistry.class;
case "typeconverterstatisticsenabled":
case "typeConverterStatisticsEnabled": return java.lang.Boolean.class;
case "usebreadcrumb":
case "useBreadcrumb": return java.lang.Boolean.class;
case "usedatatype":
case "useDataType": return java.lang.Boolean.class;
case "usemdclogging":
case "useMDCLogging": return java.lang.Boolean.class;
case "uuidgenerator":
case "uuidGenerator": return org.apache.camel.spi.UuidGenerator.class;
case "vaultconfiguration":
case "vaultConfiguration": return org.apache.camel.vault.VaultConfiguration.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
org.apache.camel.CamelContext target = (org.apache.camel.CamelContext) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "allowuseoriginalmessage":
case "allowUseOriginalMessage": return target.isAllowUseOriginalMessage();
case "applicationcontextclassloader":
case "applicationContextClassLoader": return target.getApplicationContextClassLoader();
case "autostartup":
case "autoStartup": return target.isAutoStartup();
case "autostartupexcludepattern":
case "autoStartupExcludePattern": return target.getAutoStartupExcludePattern();
case "autowiredenabled":
case "autowiredEnabled": return target.isAutowiredEnabled();
case "backlogtracing":
case "backlogTracing": return target.isBacklogTracing();
case "backlogtracingrests":
case "backlogTracingRests": return target.isBacklogTracingRests();
case "backlogtracingstandby":
case "backlogTracingStandby": return target.isBacklogTracingStandby();
case "backlogtracingtemplates":
case "backlogTracingTemplates": return target.isBacklogTracingTemplates();
case "caseinsensitiveheaders":
case "caseInsensitiveHeaders": return target.isCaseInsensitiveHeaders();
case "classresolver":
case "classResolver": return target.getClassResolver();
case "debugstandby":
case "debugStandby": return target.isDebugStandby();
case "debugger": return target.getDebugger();
case "debugging": return target.isDebugging();
case "delayer": return target.getDelayer();
case "devconsole":
case "devConsole": return target.isDevConsole();
case "dumproutes":
case "dumpRoutes": return target.getDumpRoutes();
case "executorservicemanager":
case "executorServiceManager": return target.getExecutorServiceManager();
case "globaloptions":
case "globalOptions": return target.getGlobalOptions();
case "inflightrepository":
case "inflightRepository": return target.getInflightRepository();
case "injector": return target.getInjector();
case "loadhealthchecks":
case "loadHealthChecks": return target.isLoadHealthChecks();
case "loadtypeconverters":
case "loadTypeConverters": return target.isLoadTypeConverters();
case "logexhaustedmessagebody":
case "logExhaustedMessageBody": return target.isLogExhaustedMessageBody();
case "logmask":
case "logMask": return target.isLogMask();
case "mdcloggingkeyspattern":
case "mDCLoggingKeysPattern": return target.getMDCLoggingKeysPattern();
case "managementname":
case "managementName": return target.getManagementName();
case "managementnamestrategy":
case "managementNameStrategy": return target.getManagementNameStrategy();
case "managementstrategy":
case "managementStrategy": return target.getManagementStrategy();
case "messagehistory":
case "messageHistory": return target.isMessageHistory();
case "messagehistoryfactory":
case "messageHistoryFactory": return target.getMessageHistoryFactory();
case "modeline": return target.isModeline();
case "namestrategy":
case "nameStrategy": return target.getNameStrategy();
case "propertiescomponent":
case "propertiesComponent": return target.getPropertiesComponent();
case "restconfiguration":
case "restConfiguration": return target.getRestConfiguration();
case "restregistry":
case "restRegistry": return target.getRestRegistry();
case "routecontroller":
case "routeController": return target.getRouteController();
case "runtimeendpointregistry":
case "runtimeEndpointRegistry": return target.getRuntimeEndpointRegistry();
case "sslcontextparameters":
case "sSLContextParameters": return target.getSSLContextParameters();
case "shutdownroute":
case "shutdownRoute": return target.getShutdownRoute();
case "shutdownrunningtask":
case "shutdownRunningTask": return target.getShutdownRunningTask();
case "shutdownstrategy":
case "shutdownStrategy": return target.getShutdownStrategy();
case "sourcelocationenabled":
case "sourceLocationEnabled": return target.isSourceLocationEnabled();
case "startupsummarylevel":
case "startupSummaryLevel": return target.getStartupSummaryLevel();
case "streamcaching":
case "streamCaching": return target.isStreamCaching();
case "streamcachingstrategy":
case "streamCachingStrategy": return target.getStreamCachingStrategy();
case "tracer": return target.getTracer();
case "tracing": return target.isTracing();
case "tracingloggingformat":
case "tracingLoggingFormat": return target.getTracingLoggingFormat();
case "tracingpattern":
case "tracingPattern": return target.getTracingPattern();
case "tracingrests":
case "tracingRests": return target.isTracingRests();
case "tracingstandby":
case "tracingStandby": return target.isTracingStandby();
case "tracingtemplates":
case "tracingTemplates": return target.isTracingTemplates();
case "typeconverterregistry":
case "typeConverterRegistry": return target.getTypeConverterRegistry();
case "typeconverterstatisticsenabled":
case "typeConverterStatisticsEnabled": return target.isTypeConverterStatisticsEnabled();
case "usebreadcrumb":
case "useBreadcrumb": return target.isUseBreadcrumb();
case "usedatatype":
case "useDataType": return target.isUseDataType();
case "usemdclogging":
case "useMDCLogging": return target.isUseMDCLogging();
case "uuidgenerator":
case "uuidGenerator": return target.getUuidGenerator();
case "vaultconfiguration":
case "vaultConfiguration": return target.getVaultConfiguration();
default: return null;
}
}
@Override
public Object getCollectionValueType(Object target, String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "globaloptions":
case "globalOptions": return java.lang.String.class;
default: return null;
}
}
}
|
CamelContextConfigurer
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/LambdaFunctionalInterfaceTest.java
|
{
"start": 7774,
"end": 8234
}
|
class ____ {
public static double findOptimalMu(Function<Double, Long> costFunc, double mid) {
return costFunc.apply(mid);
}
// call site: anonymous Function
public Double getMu() {
return findOptimalMu(
new Function<Double, Long>() {
@Override
public Long apply(Double mu) {
return 0L;
}
},
3.0);
}
}
public static
|
WithCallSiteAnonymousFunction
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/engine/transaction/jta/platform/spi/JtaPlatformException.java
|
{
"start": 309,
"end": 516
}
|
class ____ extends HibernateException {
public JtaPlatformException(String s) {
super( s );
}
public JtaPlatformException(String string, Throwable root) {
super( string, root );
}
}
|
JtaPlatformException
|
java
|
apache__dubbo
|
dubbo-config/dubbo-config-api/src/test/java/org/apache/dubbo/config/AbstractConfigTest.java
|
{
"start": 29019,
"end": 29219
}
|
interface ____ {
String value() default "";
}
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.FIELD, ElementType.METHOD, ElementType.ANNOTATION_TYPE})
public @
|
ConfigField
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetJobReportRequestPBImpl.java
|
{
"start": 1406,
"end": 3237
}
|
class ____ extends ProtoBase<GetJobReportRequestProto> implements GetJobReportRequest {
GetJobReportRequestProto proto = GetJobReportRequestProto.getDefaultInstance();
GetJobReportRequestProto.Builder builder = null;
boolean viaProto = false;
private JobId jobId = null;
public GetJobReportRequestPBImpl() {
builder = GetJobReportRequestProto.newBuilder();
}
public GetJobReportRequestPBImpl(GetJobReportRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public GetJobReportRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.jobId != null) {
builder.setJobId(convertToProtoFormat(this.jobId));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = GetJobReportRequestProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public JobId getJobId() {
GetJobReportRequestProtoOrBuilder p = viaProto ? proto : builder;
if (this.jobId != null) {
return this.jobId;
}
if (!p.hasJobId()) {
return null;
}
this.jobId = convertFromProtoFormat(p.getJobId());
return this.jobId;
}
@Override
public void setJobId(JobId jobId) {
maybeInitBuilder();
if (jobId == null)
builder.clearJobId();
this.jobId = jobId;
}
private JobIdPBImpl convertFromProtoFormat(JobIdProto p) {
return new JobIdPBImpl(p);
}
private JobIdProto convertToProtoFormat(JobId t) {
return ((JobIdPBImpl)t).getProto();
}
}
|
GetJobReportRequestPBImpl
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/mapper/blockloader/docvalues/BytesRefsFromCustomBinaryBlockLoader.java
|
{
"start": 2834,
"end": 4315
}
|
class ____ extends AbstractBytesRefsFromBinary {
private final ByteArrayStreamInput in = new ByteArrayStreamInput();
private final BytesRef scratch = new BytesRef();
BytesRefsFromCustomBinary(BinaryDocValues docValues) {
super(docValues);
}
@Override
public void read(int doc, BytesRefBuilder builder) throws IOException {
if (false == docValues.advanceExact(doc)) {
builder.appendNull();
return;
}
BytesRef bytes = docValues.binaryValue();
assert bytes.length > 0;
in.reset(bytes.bytes, bytes.offset, bytes.length);
int count = in.readVInt();
scratch.bytes = bytes.bytes;
if (count == 1) {
scratch.length = in.readVInt();
scratch.offset = in.getPosition();
builder.appendBytesRef(scratch);
return;
}
builder.beginPositionEntry();
for (int v = 0; v < count; v++) {
scratch.length = in.readVInt();
scratch.offset = in.getPosition();
in.setPosition(scratch.offset + scratch.length);
builder.appendBytesRef(scratch);
}
builder.endPositionEntry();
}
@Override
public String toString() {
return "BlockDocValuesReader.BytesCustom";
}
}
}
|
BytesRefsFromCustomBinary
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/LocalFs.java
|
{
"start": 1351,
"end": 1888
}
|
class ____ extends ChecksumFs {
LocalFs(final Configuration conf) throws IOException, URISyntaxException {
super(new RawLocalFs(conf));
}
/**
* This constructor has the signature needed by
* {@link AbstractFileSystem#createFileSystem(URI, Configuration)}.
*
* @param theUri which must be that of localFs
* @param conf
* @throws IOException
* @throws URISyntaxException
*/
LocalFs(final URI theUri, final Configuration conf) throws IOException,
URISyntaxException {
this(conf);
}
}
|
LocalFs
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/ClassNamedLikeTypeParameterTest.java
|
{
"start": 1213,
"end": 1290
}
|
class ____ {
// BUG: Diagnostic contains:
static
|
Test
|
java
|
playframework__playframework
|
core/play-integration-test/src/test/java/play/it/http/ActionCompositionOrderTest.java
|
{
"start": 4003,
"end": 4087
}
|
interface ____ {
SomeRepeatable[] value();
}
public static
|
SomeActionAnnotation
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/DynamicPropertyRegistrarIntegrationTests.java
|
{
"start": 5485,
"end": 5748
}
|
class ____ implements ApiUrlClient {
private String apiUrl;
@Autowired
void setApiUrl(@Value("${api.url.1}") String apiUrl) {
this.apiUrl = apiUrl;
}
@Override
public String getApiUrl() {
return this.apiUrl;
}
}
static
|
SetterInjectedService
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/cascade/FetchTest2.java
|
{
"start": 449,
"end": 1750
}
|
class ____ {
@AfterEach
public void tearDown(EntityManagerFactoryScope scope) {
scope.getEntityManagerFactory().getSchemaManager().truncate();
}
@Test
public void testProxyTransientStuff(EntityManagerFactoryScope scope) {
Troop2 disney = new Troop2();
disney.setName( "Disney" );
Soldier2 mickey = new Soldier2();
mickey.setName( "Mickey" );
mickey.setTroop( disney );
scope.inTransaction(
entityManager -> {
entityManager.persist( disney );
entityManager.persist( mickey );
}
);
scope.inTransaction(
entityManager -> {
Soldier2 _soldier = entityManager.find( Soldier2.class, mickey.getId() );
_soldier.getTroop().getId();
try {
entityManager.flush();
}
catch (IllegalStateException e) {
fail( "Should not raise an exception" );
}
}
);
scope.inTransaction(
entityManager -> {
//load troop wo a proxy
Troop2 _troop = entityManager.find( Troop2.class, disney.getId() );
Soldier2 _soldier = entityManager.find( Soldier2.class, mickey.getId() );
try {
entityManager.flush();
}
catch (IllegalStateException e) {
fail( "Should not raise an exception" );
}
entityManager.remove( _troop );
entityManager.remove( _soldier );
}
);
}
}
|
FetchTest2
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/metamodel/mapping/ModelPart.java
|
{
"start": 1054,
"end": 6986
}
|
interface ____ extends MappingModelExpressible {
/**
* @asciidoc
*
* The path for this fetchable back to an entity in the domain model. Acts as a unique
* identifier for individual parts.
*
* Some examples:
*
* For an entity, the role name is simply the entity name.
*
* For embeddable the role name is the path back to the root entity. E.g. a Person's address
* would be a path `Person#address`.
*
* For a collection the path would be the same as the "collection role". E.g. an Order's lineItems
* would be `Order#lineItems`. This is the same as the historical `CollectionPersister#getRoleName`.
*
* For the (model)parts of a collection the role is either `{element}` or `{index}` depending. E.g.
* `Order#lineItems.{element}`. Attributes of the element or index type (embeddable or entity typed)
* would be based on this role. E.g. `Order#lineItems.{element}.quantity`
*
* For an attribute of an embedded, the role would be relative to its "container". E.g. `Person#address.city` or
* `Person#addresses.{element}.city`
*
* @apiNote Whereas {@link #getPartName()} is local to this part, NavigableRole can be a compound path
*
* @see #getPartName()
*/
NavigableRole getNavigableRole();
/**
* The local part name, which is generally the unqualified role name
*/
String getPartName();
/**
* The type for this part.
*/
MappingType getPartMappingType();
/**
* The Java type for this part. Generally equivalent to
* {@link MappingType#getMappedJavaType()} relative to
* {@link #getPartMappingType()}
*/
JavaType<?> getJavaType();
/**
* Whether this model part describes something that physically
* exists in the domain model.
* <p/>
* For example, an entity's {@linkplain EntityDiscriminatorMapping discriminator}
* is part of the model, but is not a physical part of the domain model - there
* is no "discriminator attribute".
* <p/>
* Also indicates whether the part is castable to {@link VirtualModelPart}
*/
default boolean isVirtual() {
return false;
}
default boolean isEntityIdentifierMapping() {
return false;
}
boolean hasPartitionedSelectionMapping();
/**
* Create a DomainResult for a specific reference to this ModelPart.
*/
<T> DomainResult<T> createDomainResult(
NavigablePath navigablePath,
TableGroup tableGroup,
String resultVariable,
DomainResultCreationState creationState);
/**
* Apply SQL selections for a specific reference to this ModelPart outside the domain query's root select clause.
*/
void applySqlSelections(
NavigablePath navigablePath,
TableGroup tableGroup,
DomainResultCreationState creationState);
/**
* Apply SQL selections for a specific reference to this ModelPart outside the domain query's root select clause.
*/
void applySqlSelections(
NavigablePath navigablePath,
TableGroup tableGroup,
DomainResultCreationState creationState,
BiConsumer<SqlSelection,JdbcMapping> selectionConsumer);
/**
* A short hand form of {@link #forEachSelectable(int, SelectableConsumer)}, that passes 0 as offset.
*/
default int forEachSelectable(SelectableConsumer consumer) {
return forEachSelectable( 0, consumer );
}
/**
* Visits each selectable mapping with the selectable index offset by the given value.
* Returns the amount of jdbc types that have been visited.
*/
default int forEachSelectable(int offset, SelectableConsumer consumer) {
return 0;
}
default AttributeMapping asAttributeMapping() {
return null;
}
default EntityMappingType asEntityMappingType(){
return null;
}
@Nullable default BasicValuedModelPart asBasicValuedModelPart() {
return null;
}
/**
* A short hand form of {@link #breakDownJdbcValues(Object, int, Object, Object, JdbcValueBiConsumer, SharedSessionContractImplementor)},
* that passes 0 as offset and null for the two values {@code X} and {@code Y}.
*/
default int breakDownJdbcValues(
Object domainValue,
JdbcValueConsumer valueConsumer,
SharedSessionContractImplementor session) {
return breakDownJdbcValues( domainValue, 0, null, null, valueConsumer, session );
}
/**
* Breaks down the domain value to its constituent JDBC values.
*
* Think of it as breaking the multi-dimensional array into a visitable flat array.
* Additionally, it passes through the values {@code X} and {@code Y} to the consumer.
* Returns the amount of jdbc types that have been visited.
*/
<X, Y> int breakDownJdbcValues(
Object domainValue,
int offset,
X x,
Y y,
JdbcValueBiConsumer<X, Y> valueConsumer,
SharedSessionContractImplementor session);
/**
* A short hand form of {@link #decompose(Object, int, Object, Object, JdbcValueBiConsumer, SharedSessionContractImplementor)},
* that passes 0 as offset and null for the two values {@code X} and {@code Y}.
*/
default int decompose(
Object domainValue,
JdbcValueConsumer valueConsumer,
SharedSessionContractImplementor session) {
return decompose( domainValue, 0, null, null, valueConsumer, session );
}
/**
* Similar to {@link #breakDownJdbcValues(Object, int, Object, Object, JdbcValueBiConsumer, SharedSessionContractImplementor)},
* but this method is supposed to be used for decomposing values for assignment expressions.
* Returns the amount of jdbc types that have been visited.
*/
default <X, Y> int decompose(
Object domainValue,
int offset,
X x,
Y y,
JdbcValueBiConsumer<X, Y> valueConsumer,
SharedSessionContractImplementor session) {
return breakDownJdbcValues( domainValue, offset, x, y, valueConsumer, session );
}
EntityMappingType findContainingEntityMapping();
default boolean areEqual(@Nullable Object one, @Nullable Object other, SharedSessionContractImplementor session) {
// NOTE : deepEquals to account for arrays (compound natural-id)
return Objects.deepEquals( one, other );
}
/**
* Functional
|
ModelPart
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/boot/database/qualfiedTableNaming/DefaultCatalogAndSchemaTest.java
|
{
"start": 20582,
"end": 34710
}
|
class ____ longer work,
// because these methods generally assume the native entity name is the FQCN.
// Thus we use custom code.
AbstractEntityPersister persister = (AbstractEntityPersister) sessionFactory.getRuntimeMetamodels().getMappingMetamodel().streamEntityDescriptors()
.filter( p -> p.getMappedClass().equals( entityClass ) )
.findFirst()
.orElseThrow( () -> new IllegalStateException( "Cannot find persister for " + entityClass ) );
String jpaEntityName = sessionFactory.getJpaMetamodel().getEntities()
.stream()
.filter( p -> p.getJavaType().equals( entityClass ) )
.findFirst()
.orElseThrow( () -> new IllegalStateException( "Cannot find entity metamodel for " + entityClass ) )
.getName();
// Table names are what's used for Query, in particular.
verifyOnlyQualifier( persister.getTableName(), SqlType.RUNTIME,
jpaEntityName, expectedQualifier );
// Here, to simplify assertions, we assume all derived entity types have:
// - an entity name prefixed with the name of their super entity type
// - the same explicit catalog and schema, if any, as their super entity type
verifyOnlyQualifier( persister.getTableNames(), SqlType.RUNTIME,
jpaEntityName, expectedQualifier );
// This will include SQL generated by ID generators in some cases, which will be validated here
// because ID generators table/sequence names are prefixed with the owning entity name.
{
final MutationOperationGroup staticSqlInsertGroup = persister.getInsertCoordinator().getStaticMutationOperationGroup();
final String[] insertSqls = new String[staticSqlInsertGroup.getNumberOfOperations()];
for ( int tablePosition = 0;
tablePosition < staticSqlInsertGroup.getNumberOfOperations();
tablePosition++ ) {
final MutationOperation insertOperation = staticSqlInsertGroup.getOperation( tablePosition );
if ( insertOperation instanceof PreparableMutationOperation ) {
insertSqls[tablePosition] = ( (PreparableMutationOperation) insertOperation ).getSqlString();
}
}
verifyOnlyQualifier( insertSqls, SqlType.RUNTIME, jpaEntityName, expectedQualifier );
}
String identitySelectString = persister.getIdentitySelectString();
if ( identitySelectString != null ) {
verifyOnlyQualifierOptional( identitySelectString, SqlType.RUNTIME, jpaEntityName, expectedQualifier );
}
{
final MutationOperationGroup staticSqlUpdateGroup = persister.getUpdateCoordinator().getStaticMutationOperationGroup();
final String[] sqlUpdateStrings = new String[staticSqlUpdateGroup.getNumberOfOperations()];
for ( int tablePosition = 0;
tablePosition < staticSqlUpdateGroup.getNumberOfOperations();
tablePosition++ ) {
final MutationOperation operation = staticSqlUpdateGroup.getOperation( tablePosition );
if ( operation instanceof PreparableMutationOperation ) {
sqlUpdateStrings[tablePosition] = ( (PreparableMutationOperation) operation ).getSqlString();
}
}
verifyOnlyQualifier( sqlUpdateStrings, SqlType.RUNTIME, jpaEntityName, expectedQualifier );
}
{
final MutationOperationGroup staticDeleteGroup = persister.getDeleteCoordinator().getStaticMutationOperationGroup();
final String[] sqlDeleteStrings = new String[staticDeleteGroup.getNumberOfOperations()];
for ( int tablePosition = 0; tablePosition < staticDeleteGroup.getNumberOfOperations(); tablePosition++ ) {
final MutationOperation operation = staticDeleteGroup.getOperation( tablePosition );
if ( operation instanceof PreparableMutationOperation ) {
sqlDeleteStrings[tablePosition] = ( (PreparableMutationOperation) operation ).getSqlString();
}
}
verifyOnlyQualifier( sqlDeleteStrings, SqlType.RUNTIME, jpaEntityName, expectedQualifier );
}
// This is used in the "select" id generator in particular.
verifyOnlyQualifierOptional( persister.getSelectByUniqueKeyString( "basic" ), SqlType.RUNTIME,
jpaEntityName, expectedQualifier );
}
@Test
public void tableGenerator() {
org.hibernate.id.enhanced.TableGenerator generator = idGenerator(
org.hibernate.id.enhanced.TableGenerator.class,
EntityWithDefaultQualifiersWithTableGenerator.class );
verifyOnlyQualifier( generator.getAllSqlForTests(), SqlType.RUNTIME,
EntityWithDefaultQualifiersWithTableGenerator.NAME, expectedDefaultQualifier() );
generator = idGenerator( org.hibernate.id.enhanced.TableGenerator.class,
EntityWithExplicitQualifiersWithTableGenerator.class );
verifyOnlyQualifier( generator.getAllSqlForTests(), SqlType.RUNTIME,
EntityWithExplicitQualifiersWithTableGenerator.NAME, expectedExplicitQualifier() );
}
@Test
public void enhancedTableGenerator() {
org.hibernate.id.enhanced.TableGenerator generator = idGenerator(
org.hibernate.id.enhanced.TableGenerator.class,
EntityWithDefaultQualifiersWithTableGenerator.class );
verifyOnlyQualifier( generator.getAllSqlForTests(), SqlType.RUNTIME,
EntityWithDefaultQualifiersWithTableGenerator.NAME, expectedDefaultQualifier() );
generator = idGenerator( org.hibernate.id.enhanced.TableGenerator.class,
EntityWithExplicitQualifiersWithTableGenerator.class );
verifyOnlyQualifier( generator.getAllSqlForTests(), SqlType.RUNTIME,
EntityWithExplicitQualifiersWithTableGenerator.NAME, expectedExplicitQualifier() );
}
@Test
public void sequenceGenerator() {
org.hibernate.id.enhanced.SequenceStyleGenerator generator = idGenerator(
org.hibernate.id.enhanced.SequenceStyleGenerator.class,
EntityWithDefaultQualifiersWithSequenceGenerator.class );
verifyOnlyQualifier( generator.getDatabaseStructure().getAllSqlForTests(), SqlType.RUNTIME,
EntityWithDefaultQualifiersWithSequenceGenerator.NAME, expectedDefaultQualifier() );
generator = idGenerator( org.hibernate.id.enhanced.SequenceStyleGenerator.class,
EntityWithExplicitQualifiersWithSequenceGenerator.class );
verifyOnlyQualifier( generator.getDatabaseStructure().getAllSqlForTests(), SqlType.RUNTIME,
EntityWithExplicitQualifiersWithSequenceGenerator.NAME, expectedExplicitQualifier() );
}
@Test
public void enhancedSequenceGenerator() {
org.hibernate.id.enhanced.SequenceStyleGenerator generator = idGenerator(
org.hibernate.id.enhanced.SequenceStyleGenerator.class,
EntityWithDefaultQualifiersWithEnhancedSequenceGenerator.class );
verifyOnlyQualifier( generator.getDatabaseStructure().getAllSqlForTests(), SqlType.RUNTIME,
EntityWithDefaultQualifiersWithEnhancedSequenceGenerator.NAME, expectedDefaultQualifier() );
generator = idGenerator( org.hibernate.id.enhanced.SequenceStyleGenerator.class,
EntityWithExplicitQualifiersWithEnhancedSequenceGenerator.class );
verifyOnlyQualifier( generator.getDatabaseStructure().getAllSqlForTests(), SqlType.RUNTIME,
EntityWithExplicitQualifiersWithEnhancedSequenceGenerator.NAME, expectedExplicitQualifier() );
}
@Test
public void incrementGenerator() {
org.hibernate.id.IncrementGenerator generator = idGenerator( org.hibernate.id.IncrementGenerator.class,
EntityWithDefaultQualifiersWithIncrementGenerator.class );
verifyOnlyQualifier( generator.getAllSqlForTests(), SqlType.RUNTIME,
EntityWithDefaultQualifiersWithIncrementGenerator.NAME, expectedDefaultQualifier() );
generator = idGenerator( org.hibernate.id.IncrementGenerator.class,
EntityWithExplicitQualifiersWithIncrementGenerator.class );
verifyOnlyQualifier( generator.getAllSqlForTests(), SqlType.RUNTIME,
EntityWithExplicitQualifiersWithIncrementGenerator.NAME, expectedExplicitQualifier() );
}
private <T extends IdentifierGenerator> T idGenerator(Class<T> expectedType, Class<?> entityClass) {
final AbstractEntityPersister persister = (AbstractEntityPersister) factoryScope.getSessionFactory().getRuntimeMetamodels()
.getMappingMetamodel()
.getEntityDescriptor( entityClass );
return expectedType.cast( persister.getIdentifierGenerator() );
}
private void verifyDDLCreateCatalogOrSchema(String sql) {
final SessionFactoryImplementor sessionFactory = factoryScope.getSessionFactory();
final Dialect dialect = sessionFactory.getJdbcServices().getDialect();
if ( sessionFactory.getJdbcServices().getDialect().canCreateCatalog() ) {
assertThat( sql ).contains( dialect.getCreateCatalogCommand( EXPLICIT_CATALOG ) );
assertThat( sql ).contains( dialect.getCreateCatalogCommand( IMPLICIT_FILE_LEVEL_CATALOG ) );
if ( options.expectedDefaultCatalog != null ) {
assertThat( sql ).contains( dialect.getCreateCatalogCommand( options.expectedDefaultCatalog ) );
}
}
if ( sessionFactory.getJdbcServices().getDialect().canCreateSchema() ) {
assertThat( sql ).contains( dialect.getCreateSchemaCommand( EXPLICIT_SCHEMA ) );
assertThat( sql ).contains( dialect.getCreateSchemaCommand( IMPLICIT_FILE_LEVEL_SCHEMA ) );
if ( options.expectedDefaultSchema != null ) {
assertThat( sql ).contains( dialect.getCreateSchemaCommand( options.expectedDefaultSchema ) );
}
}
}
private void verifyDDLDropCatalogOrSchema(String sql) {
final SessionFactoryImplementor sessionFactory = factoryScope.getSessionFactory();
final Dialect dialect = sessionFactory.getJdbcServices().getDialect();
if ( sessionFactory.getJdbcServices().getDialect().canCreateCatalog() ) {
assertThat( sql ).contains( dialect.getDropCatalogCommand( EXPLICIT_CATALOG ) );
assertThat( sql ).contains( dialect.getDropCatalogCommand( IMPLICIT_FILE_LEVEL_CATALOG ) );
if ( options.expectedDefaultCatalog != null ) {
assertThat( sql ).contains( dialect.getDropCatalogCommand( options.expectedDefaultCatalog ) );
}
}
if ( sessionFactory.getJdbcServices().getDialect().canCreateSchema() ) {
assertThat( sql ).contains( dialect.getDropSchemaCommand( EXPLICIT_SCHEMA ) );
assertThat( sql ).contains( dialect.getDropSchemaCommand( IMPLICIT_FILE_LEVEL_SCHEMA ) );
if ( options.expectedDefaultSchema != null ) {
assertThat( sql ).contains( dialect.getDropSchemaCommand( options.expectedDefaultSchema ) );
}
}
}
private void verifyDDLQualifiers(String sql) {
// Here, to simplify assertions, we assume:
// - that all entity types have a table name identical to the entity name
// - that all association tables have a name prefixed with the name of their owning entity type
// - that all association tables have the same explicit catalog and schema, if any, as their owning entity type
// - that all ID generator tables/sequences have a name prefixed with the name of their owning entity type
verifyOnlyQualifier( sql, SqlType.DDL, EntityWithExplicitQualifiers.NAME, expectedExplicitQualifier() );
verifyOnlyQualifier( sql, SqlType.DDL, EntityWithDefaultQualifiers.NAME, expectedDefaultQualifier() );
verifyOnlyQualifier( sql, SqlType.DDL, EntityWithOrmXmlImplicitFileLevelQualifiers.NAME, expectedImplicitFileLevelQualifier() );
verifyOnlyQualifier( sql, SqlType.DDL, EntityWithHbmXmlImplicitFileLevelQualifiers.NAME, expectedImplicitFileLevelQualifier() );
verifyOnlyQualifier( sql, SqlType.DDL, EntityWithOrmXmlNoFileLevelQualifiers.NAME, expectedDefaultQualifier() );
verifyOnlyQualifier( sql, SqlType.DDL, EntityWithHbmXmlNoFileLevelQualifiers.NAME, expectedDefaultQualifier() );
verifyOnlyQualifier( sql, SqlType.DDL, EntityWithJoinedInheritanceWithDefaultQualifiers.NAME, expectedDefaultQualifier() );
verifyOnlyQualifier( sql, SqlType.DDL, EntityWithJoinedInheritanceWithDefaultQualifiersSubclass.NAME, expectedDefaultQualifier() );
verifyOnlyQualifier( sql, SqlType.DDL, EntityWithJoinedInheritanceWithExplicitQualifiers.NAME, expectedExplicitQualifier() );
verifyOnlyQualifier( sql, SqlType.DDL, EntityWithJoinedInheritanceWithExplicitQualifiersSubclass.NAME, expectedExplicitQualifier() );
verifyOnlyQualifier( sql, SqlType.DDL, EntityWithTablePerClassInheritanceWithDefaultQualifiers.NAME, expectedDefaultQualifier() );
verifyOnlyQualifier( sql, SqlType.DDL, EntityWithTablePerClassInheritanceWithDefaultQualifiersSubclass.NAME, expectedDefaultQualifier() );
verifyOnlyQualifier( sql, SqlType.DDL, EntityWithTablePerClassInheritanceWithExplicitQualifiers.NAME, expectedExplicitQualifier() );
verifyOnlyQualifier( sql, SqlType.DDL, EntityWithTablePerClassInheritanceWithExplicitQualifiersSubclass.NAME, expectedExplicitQualifier() );
verifyOnlyQualifier( sql, SqlType.DDL, EntityWithDefaultQualifiersWithCustomSql.NAME, expectedDefaultQualifier() );
verifyOnlyQualifier( sql, SqlType.DDL, EntityWithDefaultQualifiersWithIdentityGenerator.NAME, expectedDefaultQualifier() );
verifyOnlyQualifier( sql, SqlType.DDL, EntityWithExplicitQualifiersWithIdentityGenerator.NAME, expectedExplicitQualifier() );
verifyOnlyQualifier( sql, SqlType.DDL, EntityWithDefaultQualifiersWithTableGenerator.NAME, expectedDefaultQualifier() );
verifyOnlyQualifier( sql, SqlType.DDL, EntityWithExplicitQualifiersWithTableGenerator.NAME, expectedExplicitQualifier() );
verifyOnlyQualifier( sql, SqlType.DDL, EntityWithDefaultQualifiersWithSequenceGenerator.NAME, expectedDefaultQualifier() );
verifyOnlyQualifier( sql, SqlType.DDL, EntityWithExplicitQualifiersWithSequenceGenerator.NAME, expectedExplicitQualifier() );
verifyOnlyQualifier( sql, SqlType.DDL, EntityWithDefaultQualifiersWithIncrementGenerator.NAME, expectedDefaultQualifier() );
verifyOnlyQualifier( sql, SqlType.DDL, EntityWithExplicitQualifiersWithIncrementGenerator.NAME, expectedExplicitQualifier() );
verifyOnlyQualifier( sql, SqlType.DDL, EntityWithDefaultQualifiersWithEnhancedSequenceGenerator.NAME, expectedDefaultQualifier() );
verifyOnlyQualifier( sql, SqlType.DDL, EntityWithExplicitQualifiersWithEnhancedSequenceGenerator.NAME, expectedExplicitQualifier() );
if ( dbSupportsCatalogs && options.expectedDefaultCatalog != null ) {
verifyOnlyQualifier( sql, SqlType.DDL, "catalogPrefixedAuxObject",
expectedQualifier( options.expectedDefaultCatalog, null ) );
}
if ( dbSupportsSchemas && options.expectedDefaultSchema != null ) {
verifyOnlyQualifier( sql, SqlType.DDL, "schemaPrefixedAuxObject",
expectedQualifier( null, options.expectedDefaultSchema ) );
}
}
private
|
no
|
java
|
spring-projects__spring-framework
|
spring-core/src/testFixtures/java/org/springframework/core/testfixture/io/buffer/AbstractDataBufferAllocatingTests.java
|
{
"start": 2176,
"end": 2397
}
|
class ____ tests that read or write data buffers with an extension to check
* that allocated buffers have been released.
*
* @author Arjen Poutsma
* @author Rossen Stoyanchev
* @author Sam Brannen
*/
public abstract
|
for
|
java
|
netty__netty
|
microbench/src/main/java/io/netty/microbenchmark/common/IntObjectHashMapBenchmark.java
|
{
"start": 3710,
"end": 4638
}
|
class ____ extends Environment {
private final Int2ObjectHashMap<Long> map = new Int2ObjectHashMap<Long>();
AgronaEnvironment() {
for (int key : keys) {
map.put(key, VALUE);
}
}
@Override
void put(Blackhole bh) {
Int2ObjectHashMap<Long> map = new Int2ObjectHashMap<Long>();
for (int key : keys) {
bh.consume(map.put(key, VALUE));
}
}
@Override
void lookup(Blackhole bh) {
for (int key : keys) {
bh.consume(map.get(key));
}
}
@Override
void remove(Blackhole bh) {
Int2ObjectHashMap<Long> copy = new Int2ObjectHashMap<Long>();
copy.putAll(map);
for (int key : keys) {
bh.consume(copy.remove(key));
}
}
}
private
|
AgronaEnvironment
|
java
|
micronaut-projects__micronaut-core
|
test-suite/src/test/java/io/micronaut/docs/annotation/headers/HeaderSpec.java
|
{
"start": 1038,
"end": 1569
}
|
class ____ {
@Test
void testSenderHeaders() {
Map<String, Object> config = Collections.singletonMap(
"pet.client.id", "11"
);
try (EmbeddedServer embeddedServer = ApplicationContext.run(EmbeddedServer.class, config)) {
PetClient client = embeddedServer.getApplicationContext().getBean(PetClient.class);
Pet pet = Mono.from(client.get("Fred")).block();
assertNotNull(pet);
assertEquals(11, pet.getAge());
}
}
}
|
HeaderSpec
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/dispatcher/ConfigurationNotAllowedMessage.java
|
{
"start": 1199,
"end": 3139
}
|
class ____ {
private ConfigurationNotAllowedMessage() {}
public static String ofConfigurationAdded(String configKey, String configValue) {
return String.format("Configuration %s:%s not allowed.", configKey, configValue);
}
public static String ofConfigurationRemoved(String configKey, String configValue) {
return String.format("Configuration %s:%s was removed.", configKey, configValue);
}
public static String ofConfigurationChanged(String configKey, ValueDifference<String> change) {
return String.format(
"Configuration %s was changed from %s to %s.",
configKey, change.leftValue(), change.rightValue());
}
public static String ofConfigurationObjectAdded(
String configurationObject, String configKey, String configValue) {
return String.format(
"Configuration %s:%s not allowed in the configuration object %s.",
configKey, configValue, configurationObject);
}
public static String ofConfigurationObjectChanged(
String configurationObject, String configKey, ValueDifference<String> change) {
return String.format(
"Configuration %s was changed from %s to %s in the configuration object %s.",
configKey, change.leftValue(), change.rightValue(), configurationObject);
}
public static String ofConfigurationObjectRemoved(
String configurationObject, String configKey, String configValue) {
return String.format(
"Configuration %s:%s was removed from the configuration object %s.",
configKey, configValue, configurationObject);
}
public static String ofConfigurationObjectSetterUsed(
String configurationObject, String setter) {
return String.format("Setter %s#%s has been used", configurationObject, setter);
}
}
|
ConfigurationNotAllowedMessage
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerAllocator.java
|
{
"start": 2389,
"end": 2547
}
|
class ____ Opportunistic container allocations, that provides
* common functions required for Opportunistic container allocation.
* </p>
*/
public abstract
|
for
|
java
|
spring-projects__spring-boot
|
documentation/spring-boot-docs/src/main/java/org/springframework/boot/docs/web/servlet/springmvc/errorhandling/MyErrorBody.java
|
{
"start": 702,
"end": 771
}
|
class ____ {
MyErrorBody(int value, String message) {
}
}
|
MyErrorBody
|
java
|
apache__flink
|
flink-core/src/test/java/org/apache/flink/api/common/typeutils/base/DateComparatorTest.java
|
{
"start": 1097,
"end": 1917
}
|
class ____ extends ComparatorTestBase<Date> {
@Override
protected TypeComparator<Date> createComparator(boolean ascending) {
return new DateComparator(ascending);
}
@Override
protected TypeSerializer<Date> createSerializer() {
return new DateSerializer();
}
@Override
protected Date[] getSortedTestData() {
Random rnd = new Random(874597969123412338L);
long rndLong = rnd.nextLong();
if (rndLong < 0) {
rndLong = -rndLong;
}
if (rndLong == Long.MAX_VALUE) {
rndLong -= 3;
}
if (rndLong <= 2) {
rndLong += 3;
}
return new Date[] {
new Date(0L), new Date(1L), new Date(2L), new Date(rndLong), new Date(Long.MAX_VALUE)
};
}
}
|
DateComparatorTest
|
java
|
apache__spark
|
common/unsafe/src/main/java/org/apache/spark/sql/catalyst/util/CollationSupport.java
|
{
"start": 1367,
"end": 2877
}
|
class ____ {
public static UTF8String[] exec(final UTF8String s, UTF8String d, final int collationId) {
CollationFactory.Collation collation = CollationFactory.fetchCollation(collationId);
if (collation.supportsSpaceTrimming) {
d = CollationFactory.applyTrimmingPolicy(d, collationId);
}
if (collation.isUtf8BinaryType) {
return execBinary(s, d);
} else if (collation.isUtf8LcaseType) {
return execLowercase(s, d);
} else {
return execICU(s, d, collationId);
}
}
public static String genCode(final String s, final String d, final int collationId) {
String expr = "CollationSupport.StringSplitSQL.exec";
if (collationId == CollationFactory.UTF8_BINARY_COLLATION_ID) {
return String.format(expr + "Binary(%s, %s)", s, d);
} else {
return String.format(expr + "(%s, %s, %d)", s, d, collationId);
}
}
public static UTF8String[] execBinary(final UTF8String string, final UTF8String delimiter) {
return string.splitSQL(delimiter, -1);
}
public static UTF8String[] execLowercase(final UTF8String string, final UTF8String delimiter) {
return CollationAwareUTF8String.lowercaseSplitSQL(string, delimiter, -1);
}
public static UTF8String[] execICU(final UTF8String string, final UTF8String delimiter,
final int collationId) {
return CollationAwareUTF8String.icuSplitSQL(string, delimiter, -1, collationId);
}
}
public static
|
StringSplitSQL
|
java
|
quarkusio__quarkus
|
integration-tests/cache/src/test/java/io/quarkus/it/cache/CaffeineTestCase.java
|
{
"start": 218,
"end": 445
}
|
class ____ {
@Test
public void test() {
given().when().get("/caffeine/hasLoadAll").then().statusCode(200).body(
"loader", is(false),
"bulkLoader", is(true));
}
}
|
CaffeineTestCase
|
java
|
micronaut-projects__micronaut-core
|
http-client/src/test/groovy/io/micronaut/http/client/docs/basics/HelloController.java
|
{
"start": 1274,
"end": 2281
}
|
class ____ {
private final HttpClient httpClient;
public HelloController(@Client("/endpoint") HttpClient httpClient) {
this.httpClient = httpClient;
}
// tag::nonblocking[]
@Get("/hello/{name}")
@SingleResult
Publisher<String> hello(String name) { // <1>
return Mono.from(httpClient.retrieve(GET("/hello/" + name))); // <2>
}
// end::nonblocking[]
@Get("/endpoint/hello/{name}")
String helloEndpoint(String name) {
return "Hello " + name;
}
// tag::json[]
@Get("/greet/{name}")
Message greet(String name) {
return new Message("Hello " + name);
}
// end::json[]
@Post("/greet")
@Status(HttpStatus.CREATED)
Message echo(@Body Message message) {
return message;
}
@Post(value = "/hello", consumes = MediaType.TEXT_PLAIN, produces = MediaType.TEXT_PLAIN)
@Status(HttpStatus.CREATED)
String echoHello(@Body String message) {
return message;
}
}
|
HelloController
|
java
|
apache__commons-lang
|
src/test/java/org/apache/commons/lang3/builder/ReflectionDiffBuilderTest.java
|
{
"start": 1759,
"end": 1955
}
|
class ____ {
private /* not final might not matter for the test. */ float value;
FloatWrapper(final float a) {
value = a;
}
}
private static
|
FloatWrapper
|
java
|
FasterXML__jackson-core
|
src/test/java/tools/jackson/core/unittest/read/loc/LocationOfError1180Test.java
|
{
"start": 6484,
"end": 7186
}
|
class ____
{
InvalidJson(String name, String input, int byteOffset, int charOffset,
int lineNr, int columnNr)
{
_name = name;
this.input = input;
this.byteOffset = byteOffset;
this.charOffset = charOffset;
this.lineNr = lineNr;
this.columnNr = columnNr;
}
@Override
public String toString()
{
return _name;
}
protected final String _name;
public final String input;
public final int byteOffset;
public final int charOffset;
public final int lineNr;
public final int columnNr;
}
}
|
InvalidJson
|
java
|
micronaut-projects__micronaut-core
|
core/src/main/java/io/micronaut/core/graal/GraalReflectionConfigurer.java
|
{
"start": 7715,
"end": 7785
}
|
class ____ name.
* @param name The name
* @return The
|
by
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/RobotFrameworkEndpointBuilderFactory.java
|
{
"start": 121162,
"end": 124431
}
|
interface ____ {
/**
* Robot Framework (camel-robotframework)
* Pass camel exchanges to acceptance test written in Robot DSL.
*
* Category: testing
* Since: 3.0
* Maven coordinates: org.apache.camel:camel-robotframework
*
* @return the dsl builder for the headers' name.
*/
default RobotFrameworkHeaderNameBuilder robotframework() {
return RobotFrameworkHeaderNameBuilder.INSTANCE;
}
/**
* Robot Framework (camel-robotframework)
* Pass camel exchanges to acceptance test written in Robot DSL.
*
* Category: testing
* Since: 3.0
* Maven coordinates: org.apache.camel:camel-robotframework
*
* Syntax: <code>robotframework:resourceUri</code>
*
* Path parameter: resourceUri (required)
* Path to the resource. You can prefix with: classpath, file, http,
* ref, or bean. classpath, file and http loads the resource using these
* protocols (classpath is default). ref will lookup the resource in the
* registry. bean will call a method on a bean to be used as the
* resource. For bean you can specify the method name after dot, eg
* bean:myBean.myMethod.
* This option can also be loaded from an existing file, by prefixing
* with file: or classpath: followed by the location of the file.
*
* @param path resourceUri
* @return the dsl builder
*/
default RobotFrameworkEndpointBuilder robotframework(String path) {
return RobotFrameworkEndpointBuilderFactory.endpointBuilder("robotframework", path);
}
/**
* Robot Framework (camel-robotframework)
* Pass camel exchanges to acceptance test written in Robot DSL.
*
* Category: testing
* Since: 3.0
* Maven coordinates: org.apache.camel:camel-robotframework
*
* Syntax: <code>robotframework:resourceUri</code>
*
* Path parameter: resourceUri (required)
* Path to the resource. You can prefix with: classpath, file, http,
* ref, or bean. classpath, file and http loads the resource using these
* protocols (classpath is default). ref will lookup the resource in the
* registry. bean will call a method on a bean to be used as the
* resource. For bean you can specify the method name after dot, eg
* bean:myBean.myMethod.
* This option can also be loaded from an existing file, by prefixing
* with file: or classpath: followed by the location of the file.
*
* @param componentName to use a custom component name for the endpoint
* instead of the default name
* @param path resourceUri
* @return the dsl builder
*/
default RobotFrameworkEndpointBuilder robotframework(String componentName, String path) {
return RobotFrameworkEndpointBuilderFactory.endpointBuilder(componentName, path);
}
}
/**
* The builder of headers' name for the Robot Framework component.
*/
public static
|
RobotFrameworkBuilders
|
java
|
quarkusio__quarkus
|
independent-projects/arc/processor/src/main/java/io/quarkus/arc/processor/AnnotationsTransformer.java
|
{
"start": 7944,
"end": 15575
}
|
class ____<THIS extends AbstractBuilder<THIS>>
implements Predicate<Kind> {
protected int priority;
protected Predicate<TransformationContext> predicate;
private AbstractBuilder() {
this.priority = DEFAULT_PRIORITY;
}
/**
*
* @param priority
* @return self
*/
public THIS priority(int priority) {
this.priority = priority;
return self();
}
/**
* {@link TransformationContext#getAnnotations()} must contain ALL of the given annotations.
*
* @param annotationNames
* @return self
*/
public THIS whenContainsAll(List<DotName> annotationNames) {
return when(context -> {
for (DotName annotationName : annotationNames) {
if (!Annotations.contains(context.getAnnotations(), annotationName)) {
return false;
}
}
return true;
});
}
/**
* {@link TransformationContext#getAnnotations()} must contain ALL of the given annotations.
*
* @param annotationNames
* @return self
*/
public THIS whenContainsAll(DotName... annotationNames) {
return whenContainsAll(List.of(annotationNames));
}
/**
* {@link TransformationContext#getAnnotations()} must contain ALL of the given annotations.
*
* @param annotationNames
* @return self
*/
@SafeVarargs
public final THIS whenContainsAll(Class<? extends Annotation>... annotationNames) {
return whenContainsAll(
Arrays.stream(annotationNames).map(a -> DotName.createSimple(a.getName())).collect(Collectors.toList()));
}
/**
* {@link TransformationContext#getAnnotations()} must contain ANY of the given annotations.
*
* @param annotationNames
* @return self
*/
public THIS whenContainsAny(List<DotName> annotationNames) {
return when(context -> Annotations.containsAny(context.getAnnotations(), annotationNames));
}
/**
* {@link TransformationContext#getAnnotations()} must contain ANY of the given annotations.
*
* @param annotationNames
* @return self
*/
public THIS whenContainsAny(DotName... annotationNames) {
return whenContainsAny(List.of(annotationNames));
}
/**
* {@link TransformationContext#getAnnotations()} must contain ANY of the given annotations.
*
* @param annotationNames
* @return self
*/
@SafeVarargs
public final THIS whenContainsAny(Class<? extends Annotation>... annotationNames) {
return whenContainsAny(
Arrays.stream(annotationNames).map(a -> DotName.createSimple(a.getName())).collect(Collectors.toList()));
}
/**
* {@link TransformationContext#getAnnotations()} must NOT contain any of the given annotations.
*
* @param annotationNames
* @return self
*/
public THIS whenContainsNone(List<DotName> annotationNames) {
return when(context -> !Annotations.containsAny(context.getAnnotations(), annotationNames));
}
/**
* {@link TransformationContext#getAnnotations()} must NOT contain any of the given annotations.
*
* @param annotationNames
* @return self
*/
public THIS whenContainsNone(DotName... annotationNames) {
return whenContainsNone(List.of(annotationNames));
}
/**
* {@link TransformationContext#getAnnotations()} must NOT contain any of the given annotations.
*
* @param annotationNames
* @return self
*/
@SafeVarargs
public final THIS whenContainsNone(Class<? extends Annotation>... annotationNames) {
return whenContainsNone(
Arrays.stream(annotationNames).map(a -> DotName.createSimple(a.getName())).collect(Collectors.toList()));
}
/**
* The transformation logic is only performed if the given predicate is evaluated to true. Multiple predicates are
* logically-ANDed.
*
* @param predicate
* @return self
*/
public THIS when(Predicate<TransformationContext> when) {
if (predicate == null) {
predicate = when;
} else {
predicate = predicate.and(when);
}
return self();
}
/**
* If all conditions are met then apply the transformation logic.
* <p>
* Unlike in {@link #transform(Consumer)} the transformation is automatically applied, i.e. a {@link Transformation}
* is created and the {@link Transformation#done()} method is called automatically.
*
* @param consumer
* @return a new annotation transformer
*/
public AnnotationsTransformer thenTransform(Consumer<Transformation> consumer) {
Consumer<Transformation> transform = Objects.requireNonNull(consumer);
return transform(new Consumer<TransformationContext>() {
@Override
public void accept(TransformationContext context) {
Transformation transformation = context.transform();
transform.accept(transformation);
transformation.done();
}
});
}
/**
* The transformation logic is performed only if all conditions are met.
* <p>
* This method should be used if you need to access the transformation context directly. Otherwise, the
* {@link #thenTransform(Consumer)} is more convenient.
*
* @param consumer
* @return a new annotation transformer
* @see #thenTransform(Consumer)
*/
public AnnotationsTransformer transform(Consumer<TransformationContext> consumer) {
int priority = this.priority;
Consumer<TransformationContext> transform = Objects.requireNonNull(consumer);
Predicate<TransformationContext> predicate = this.predicate;
return new AnnotationsTransformer() {
@Override
public int getPriority() {
return priority;
}
@Override
public boolean appliesTo(Kind kind) {
return test(kind);
}
@Override
public void transform(TransformationContext context) {
if (predicate == null || predicate.test(context)) {
transform.accept(context);
}
}
};
}
@SuppressWarnings("unchecked")
protected THIS self() {
return (THIS) this;
}
protected <TARGET> Predicate<TransformationContext> wrap(Predicate<TARGET> condition,
Function<TransformationContext, TARGET> extractor) {
return new Predicate<TransformationContext>() {
@Override
public boolean test(TransformationContext ctx) {
return condition.test(extractor.apply(ctx));
}
};
}
}
}
|
AbstractBuilder
|
java
|
quarkusio__quarkus
|
integration-tests/jaxb/src/main/java/io/quarkus/it/jaxb/BookWithParent.java
|
{
"start": 237,
"end": 576
}
|
class ____ extends BookIBANField {
@XmlElement
private String title;
public BookWithParent() {
}
public BookWithParent(String title) {
this.title = title;
}
public String getTitle() {
return title;
}
public void setTitle(String title) {
this.title = title;
}
}
|
BookWithParent
|
java
|
FasterXML__jackson-databind
|
src/main/java/tools/jackson/databind/jsonFormatVisitors/JsonAnyFormatVisitor.java
|
{
"start": 59,
"end": 327
}
|
interface ____
{
/**
* Default "empty" implementation, useful as the base to start on;
* especially as it is guaranteed to implement all the method
* of the interface, even if new methods are getting added.
*/
public static
|
JsonAnyFormatVisitor
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/lazytoone/LazyToOneTest.java
|
{
"start": 855,
"end": 2479
}
|
class ____ {
@BeforeEach
protected void prepareTest(SessionFactoryScope scope) throws Exception {
scope.inTransaction(
session -> {
final Airport austin = new Airport( 1, "AUS" );
final Airport baltimore = new Airport( 2, "BWI" );
final Flight flight1 = new Flight( 1, "ABC-123", austin, baltimore );
final Flight flight2 = new Flight( 2, "ABC-987", baltimore, austin );
session.persist( austin );
session.persist( baltimore );
session.persist( flight1 );
session.persist( flight2 );
}
);
}
@AfterEach
protected void cleanupTestData(SessionFactoryScope scope) throws Exception {
scope.dropData();
}
@Test
public void testNonEnhanced(SessionFactoryScope scope) {
final StatisticsImplementor statistics = scope.getSessionFactory().getStatistics();
statistics.clear();
scope.inTransaction(
session -> {
final Flight flight1 = session.byId( Flight.class ).load( 1 );
assertThat( statistics.getPrepareStatementCount(), is( 1L ) );
assertThat( Hibernate.isInitialized( flight1 ), is( true ) );
assertThat( Hibernate.isPropertyInitialized( flight1, "origination" ), is( true ) );
assertThat( Hibernate.isInitialized( flight1.getOrigination() ), is( false ) );
assertThat( flight1.getOrigination(), instanceOf( HibernateProxy.class ) );
assertThat( Hibernate.isPropertyInitialized( flight1, "destination" ), is( true ) );
assertThat( Hibernate.isInitialized( flight1.getDestination() ), is( false ) );
assertThat( flight1.getDestination(), instanceOf( HibernateProxy.class ) );
}
);
}
}
|
LazyToOneTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/AuthorizationStateTests.java
|
{
"start": 717,
"end": 3116
}
|
class ____ extends AbstractSerializingTransformTestCase<AuthorizationState> {
public static AuthorizationState randomAuthorizationState() {
return new AuthorizationState(
randomNonNegativeLong(),
randomFrom(HealthStatus.values()),
randomBoolean() ? null : randomAlphaOfLengthBetween(1, 100)
);
}
@Override
protected Writeable.Reader<AuthorizationState> instanceReader() {
return AuthorizationState::new;
}
@Override
protected AuthorizationState createTestInstance() {
return randomAuthorizationState();
}
@Override
protected AuthorizationState mutateInstance(AuthorizationState instance) {
int statusCount = HealthStatus.values().length;
assert statusCount > 1;
return new AuthorizationState(
instance.getTimestamp().toEpochMilli() + 1,
HealthStatus.values()[(instance.getStatus().ordinal() + 1) % statusCount],
instance.getLastAuthError() == null ? randomAlphaOfLengthBetween(1, 100) : null
);
}
@Override
protected AuthorizationState doParseInstance(XContentParser parser) throws IOException {
return AuthorizationState.PARSER.apply(parser, null);
}
public void testGreen() {
AuthorizationState authState = AuthorizationState.green();
assertThat(authState.getStatus(), is(equalTo(HealthStatus.GREEN)));
assertThat(authState.getLastAuthError(), is(nullValue()));
}
public void testRed() {
Exception e = new Exception("some exception");
AuthorizationState authState = AuthorizationState.red(e);
assertThat(authState.getStatus(), is(equalTo(HealthStatus.RED)));
assertThat(authState.getLastAuthError(), is(equalTo("some exception")));
authState = AuthorizationState.red(null);
assertThat(authState.getStatus(), is(equalTo(HealthStatus.RED)));
assertThat(authState.getLastAuthError(), is(equalTo("unknown exception")));
}
public void testIsNullOrGreen() {
assertThat(AuthorizationState.isNullOrGreen(null), is(true));
assertThat(AuthorizationState.isNullOrGreen(AuthorizationState.green()), is(true));
Exception e = new Exception("some exception");
assertThat(AuthorizationState.isNullOrGreen(AuthorizationState.red(e)), is(false));
}
}
|
AuthorizationStateTests
|
java
|
spring-projects__spring-framework
|
spring-expression/src/main/java/org/springframework/expression/spel/support/StandardTypeConverter.java
|
{
"start": 1494,
"end": 3244
}
|
class ____ implements TypeConverter {
private final Supplier<ConversionService> conversionService;
/**
* Create a StandardTypeConverter for the default ConversionService.
* @see DefaultConversionService#getSharedInstance()
*/
public StandardTypeConverter() {
this.conversionService = DefaultConversionService::getSharedInstance;
}
/**
* Create a StandardTypeConverter for the given ConversionService.
* @param conversionService the ConversionService to delegate to
*/
public StandardTypeConverter(ConversionService conversionService) {
Assert.notNull(conversionService, "ConversionService must not be null");
this.conversionService = () -> conversionService;
}
/**
* Create a StandardTypeConverter for the given ConversionService.
* @param conversionService a Supplier for the ConversionService to delegate to
* @since 5.3.11
*/
public StandardTypeConverter(Supplier<ConversionService> conversionService) {
Assert.notNull(conversionService, "Supplier must not be null");
this.conversionService = conversionService;
}
@Override
public boolean canConvert(@Nullable TypeDescriptor sourceType, TypeDescriptor targetType) {
return this.conversionService.get().canConvert(sourceType, targetType);
}
@Override
public @Nullable Object convertValue(@Nullable Object value, @Nullable TypeDescriptor sourceType, TypeDescriptor targetType) {
try {
return this.conversionService.get().convert(value, sourceType, targetType);
}
catch (ConversionException ex) {
throw new SpelEvaluationException(ex, SpelMessage.TYPE_CONVERSION_ERROR,
(sourceType != null ? sourceType.toString() : (value != null ? value.getClass().getName() : "null")),
targetType.toString());
}
}
}
|
StandardTypeConverter
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/util/SegmentsUtil.java
|
{
"start": 1181,
"end": 36013
}
|
class ____ {
/** Constant that flags the byte order. */
public static final boolean LITTLE_ENDIAN = ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN;
private static final int ADDRESS_BITS_PER_WORD = 3;
private static final int BIT_BYTE_INDEX_MASK = 7;
/**
* SQL execution threads is limited, not too many, so it can bear the overhead of 64K per
* thread.
*/
private static final int MAX_BYTES_LENGTH = 1024 * 64;
private static final int MAX_CHARS_LENGTH = 1024 * 32;
private static final int BYTE_ARRAY_BASE_OFFSET = UNSAFE.arrayBaseOffset(byte[].class);
private static final ThreadLocal<byte[]> BYTES_LOCAL = new ThreadLocal<>();
private static final ThreadLocal<char[]> CHARS_LOCAL = new ThreadLocal<>();
/**
* Allocate bytes that is only for temporary usage, it should not be stored in somewhere else.
* Use a {@link ThreadLocal} to reuse bytes to avoid overhead of byte[] new and gc.
*
* <p>If there are methods that can only accept a byte[], instead of a MemorySegment[]
* parameter, we can allocate a reuse bytes and copy the MemorySegment data to byte[], then call
* the method. Such as String deserialization.
*/
public static byte[] allocateReuseBytes(int length) {
byte[] bytes = BYTES_LOCAL.get();
if (bytes == null) {
if (length <= MAX_BYTES_LENGTH) {
bytes = new byte[MAX_BYTES_LENGTH];
BYTES_LOCAL.set(bytes);
} else {
bytes = new byte[length];
}
} else if (bytes.length < length) {
bytes = new byte[length];
}
return bytes;
}
public static char[] allocateReuseChars(int length) {
char[] chars = CHARS_LOCAL.get();
if (chars == null) {
if (length <= MAX_CHARS_LENGTH) {
chars = new char[MAX_CHARS_LENGTH];
CHARS_LOCAL.set(chars);
} else {
chars = new char[length];
}
} else if (chars.length < length) {
chars = new char[length];
}
return chars;
}
/**
* Copy segments to a new byte[].
*
* @param segments Source segments.
* @param offset Source segments offset.
* @param numBytes the number bytes to copy.
*/
public static byte[] copyToBytes(MemorySegment[] segments, int offset, int numBytes) {
return copyToBytes(segments, offset, new byte[numBytes], 0, numBytes);
}
/**
* Copy segments to target byte[].
*
* @param segments Source segments.
* @param offset Source segments offset.
* @param bytes target byte[].
* @param bytesOffset target byte[] offset.
* @param numBytes the number bytes to copy.
*/
public static byte[] copyToBytes(
MemorySegment[] segments, int offset, byte[] bytes, int bytesOffset, int numBytes) {
if (inFirstSegment(segments, offset, numBytes)) {
segments[0].get(offset, bytes, bytesOffset, numBytes);
} else {
copyMultiSegmentsToBytes(segments, offset, bytes, bytesOffset, numBytes);
}
return bytes;
}
public static void copyMultiSegmentsToBytes(
MemorySegment[] segments, int offset, byte[] bytes, int bytesOffset, int numBytes) {
int remainSize = numBytes;
for (MemorySegment segment : segments) {
int remain = segment.size() - offset;
if (remain > 0) {
int nCopy = Math.min(remain, remainSize);
segment.get(offset, bytes, numBytes - remainSize + bytesOffset, nCopy);
remainSize -= nCopy;
// next new segment.
offset = 0;
if (remainSize == 0) {
return;
}
} else {
// remain is negative, let's advance to next segment
// now the offset = offset - segmentSize (-remain)
offset = -remain;
}
}
}
/**
* Copy segments to target unsafe pointer.
*
* @param segments Source segments.
* @param offset The position where the bytes are started to be read from these memory segments.
* @param target The unsafe memory to copy the bytes to.
* @param pointer The position in the target unsafe memory to copy the chunk to.
* @param numBytes the number bytes to copy.
*/
public static void copyToUnsafe(
MemorySegment[] segments, int offset, Object target, int pointer, int numBytes) {
if (inFirstSegment(segments, offset, numBytes)) {
segments[0].copyToUnsafe(offset, target, pointer, numBytes);
} else {
copyMultiSegmentsToUnsafe(segments, offset, target, pointer, numBytes);
}
}
private static void copyMultiSegmentsToUnsafe(
MemorySegment[] segments, int offset, Object target, int pointer, int numBytes) {
int remainSize = numBytes;
for (MemorySegment segment : segments) {
int remain = segment.size() - offset;
if (remain > 0) {
int nCopy = Math.min(remain, remainSize);
segment.copyToUnsafe(offset, target, numBytes - remainSize + pointer, nCopy);
remainSize -= nCopy;
// next new segment.
offset = 0;
if (remainSize == 0) {
return;
}
} else {
// remain is negative, let's advance to next segment
// now the offset = offset - segmentSize (-remain)
offset = -remain;
}
}
}
/**
* Copy bytes of segments to output view. Note: It just copies the data in, not include the
* length.
*
* @param segments source segments
* @param offset offset for segments
* @param sizeInBytes size in bytes
* @param target target output view
*/
public static void copyToView(
MemorySegment[] segments, int offset, int sizeInBytes, DataOutputView target)
throws IOException {
for (MemorySegment sourceSegment : segments) {
int curSegRemain = sourceSegment.size() - offset;
if (curSegRemain > 0) {
int copySize = Math.min(curSegRemain, sizeInBytes);
byte[] bytes = allocateReuseBytes(copySize);
sourceSegment.get(offset, bytes, 0, copySize);
target.write(bytes, 0, copySize);
sizeInBytes -= copySize;
offset = 0;
} else {
offset -= sourceSegment.size();
}
if (sizeInBytes == 0) {
return;
}
}
if (sizeInBytes != 0) {
throw new RuntimeException(
"No copy finished, this should be a bug, "
+ "The remaining length is: "
+ sizeInBytes);
}
}
/**
* Copy target segments from source byte[].
*
* @param segments target segments.
* @param offset target segments offset.
* @param bytes source byte[].
* @param bytesOffset source byte[] offset.
* @param numBytes the number bytes to copy.
*/
public static void copyFromBytes(
MemorySegment[] segments, int offset, byte[] bytes, int bytesOffset, int numBytes) {
if (segments.length == 1) {
segments[0].put(offset, bytes, bytesOffset, numBytes);
} else {
copyMultiSegmentsFromBytes(segments, offset, bytes, bytesOffset, numBytes);
}
}
private static void copyMultiSegmentsFromBytes(
MemorySegment[] segments, int offset, byte[] bytes, int bytesOffset, int numBytes) {
int remainSize = numBytes;
for (MemorySegment segment : segments) {
int remain = segment.size() - offset;
if (remain > 0) {
int nCopy = Math.min(remain, remainSize);
segment.put(offset, bytes, numBytes - remainSize + bytesOffset, nCopy);
remainSize -= nCopy;
// next new segment.
offset = 0;
if (remainSize == 0) {
return;
}
} else {
// remain is negative, let's advance to next segment
// now the offset = offset - segmentSize (-remain)
offset = -remain;
}
}
}
/** Maybe not copied, if want copy, please use copyTo. */
public static byte[] getBytes(MemorySegment[] segments, int baseOffset, int sizeInBytes) {
// avoid copy if `base` is `byte[]`
if (segments.length == 1) {
byte[] heapMemory = segments[0].getHeapMemory();
if (baseOffset == 0 && heapMemory != null && heapMemory.length == sizeInBytes) {
return heapMemory;
} else {
byte[] bytes = new byte[sizeInBytes];
segments[0].get(baseOffset, bytes, 0, sizeInBytes);
return bytes;
}
} else {
byte[] bytes = new byte[sizeInBytes];
copyMultiSegmentsToBytes(segments, baseOffset, bytes, 0, sizeInBytes);
return bytes;
}
}
/**
* Equals two memory segments regions.
*
* @param segments1 Segments 1
* @param offset1 Offset of segments1 to start equaling
* @param segments2 Segments 2
* @param offset2 Offset of segments2 to start equaling
* @param len Length of the equaled memory region
* @return true if equal, false otherwise
*/
public static boolean equals(
MemorySegment[] segments1,
int offset1,
MemorySegment[] segments2,
int offset2,
int len) {
if (inFirstSegment(segments1, offset1, len) && inFirstSegment(segments2, offset2, len)) {
return segments1[0].equalTo(segments2[0], offset1, offset2, len);
} else {
return equalsMultiSegments(segments1, offset1, segments2, offset2, len);
}
}
@VisibleForTesting
static boolean equalsMultiSegments(
MemorySegment[] segments1,
int offset1,
MemorySegment[] segments2,
int offset2,
int len) {
if (len == 0) {
// quick way and avoid segSize is zero.
return true;
}
int segSize1 = segments1[0].size();
int segSize2 = segments2[0].size();
// find first segIndex and segOffset of segments.
int segIndex1 = offset1 / segSize1;
int segIndex2 = offset2 / segSize2;
int segOffset1 = offset1 - segSize1 * segIndex1; // equal to %
int segOffset2 = offset2 - segSize2 * segIndex2; // equal to %
while (len > 0) {
int equalLen = Math.min(Math.min(len, segSize1 - segOffset1), segSize2 - segOffset2);
if (!segments1[segIndex1].equalTo(
segments2[segIndex2], segOffset1, segOffset2, equalLen)) {
return false;
}
len -= equalLen;
segOffset1 += equalLen;
if (segOffset1 == segSize1) {
segOffset1 = 0;
segIndex1++;
}
segOffset2 += equalLen;
if (segOffset2 == segSize2) {
segOffset2 = 0;
segIndex2++;
}
}
return true;
}
/**
* hash segments to int, numBytes must be aligned to 4 bytes.
*
* @param segments Source segments.
* @param offset Source segments offset.
* @param numBytes the number bytes to hash.
*/
public static int hashByWords(MemorySegment[] segments, int offset, int numBytes) {
if (inFirstSegment(segments, offset, numBytes)) {
return MurmurHashUtil.hashBytesByWords(segments[0], offset, numBytes);
} else {
return hashMultiSegByWords(segments, offset, numBytes);
}
}
private static int hashMultiSegByWords(MemorySegment[] segments, int offset, int numBytes) {
byte[] bytes = allocateReuseBytes(numBytes);
copyMultiSegmentsToBytes(segments, offset, bytes, 0, numBytes);
return MurmurHashUtil.hashUnsafeBytesByWords(bytes, BYTE_ARRAY_BASE_OFFSET, numBytes);
}
/**
* hash segments to int.
*
* @param segments Source segments.
* @param offset Source segments offset.
* @param numBytes the number bytes to hash.
*/
public static int hash(MemorySegment[] segments, int offset, int numBytes) {
if (inFirstSegment(segments, offset, numBytes)) {
return MurmurHashUtil.hashBytes(segments[0], offset, numBytes);
} else {
return hashMultiSeg(segments, offset, numBytes);
}
}
private static int hashMultiSeg(MemorySegment[] segments, int offset, int numBytes) {
byte[] bytes = allocateReuseBytes(numBytes);
copyMultiSegmentsToBytes(segments, offset, bytes, 0, numBytes);
return MurmurHashUtil.hashUnsafeBytes(bytes, BYTE_ARRAY_BASE_OFFSET, numBytes);
}
/** Is it just in first MemorySegment, we use quick way to do something. */
private static boolean inFirstSegment(MemorySegment[] segments, int offset, int numBytes) {
return numBytes + offset <= segments[0].size();
}
/**
* Given a bit index, return the byte index containing it.
*
* @param bitIndex the bit index.
* @return the byte index.
*/
private static int byteIndex(int bitIndex) {
return bitIndex >>> ADDRESS_BITS_PER_WORD;
}
/**
* unset bit.
*
* @param segment target segment.
* @param baseOffset bits base offset.
* @param index bit index from base offset.
*/
public static void bitUnSet(MemorySegment segment, int baseOffset, int index) {
int offset = baseOffset + byteIndex(index);
byte current = segment.get(offset);
current &= ~(1 << (index & BIT_BYTE_INDEX_MASK));
segment.put(offset, current);
}
/**
* set bit.
*
* @param segment target segment.
* @param baseOffset bits base offset.
* @param index bit index from base offset.
*/
public static void bitSet(MemorySegment segment, int baseOffset, int index) {
int offset = baseOffset + byteIndex(index);
byte current = segment.get(offset);
current |= (1 << (index & BIT_BYTE_INDEX_MASK));
segment.put(offset, current);
}
/**
* read bit.
*
* @param segment target segment.
* @param baseOffset bits base offset.
* @param index bit index from base offset.
*/
public static boolean bitGet(MemorySegment segment, int baseOffset, int index) {
int offset = baseOffset + byteIndex(index);
byte current = segment.get(offset);
return (current & (1 << (index & BIT_BYTE_INDEX_MASK))) != 0;
}
/**
* unset bit from segments.
*
* @param segments target segments.
* @param baseOffset bits base offset.
* @param index bit index from base offset.
*/
public static void bitUnSet(MemorySegment[] segments, int baseOffset, int index) {
if (segments.length == 1) {
MemorySegment segment = segments[0];
int offset = baseOffset + byteIndex(index);
byte current = segment.get(offset);
current &= ~(1 << (index & BIT_BYTE_INDEX_MASK));
segment.put(offset, current);
} else {
bitUnSetMultiSegments(segments, baseOffset, index);
}
}
private static void bitUnSetMultiSegments(MemorySegment[] segments, int baseOffset, int index) {
int offset = baseOffset + byteIndex(index);
int segSize = segments[0].size();
int segIndex = offset / segSize;
int segOffset = offset - segIndex * segSize; // equal to %
MemorySegment segment = segments[segIndex];
byte current = segment.get(segOffset);
current &= ~(1 << (index & BIT_BYTE_INDEX_MASK));
segment.put(segOffset, current);
}
/**
* set bit from segments.
*
* @param segments target segments.
* @param baseOffset bits base offset.
* @param index bit index from base offset.
*/
public static void bitSet(MemorySegment[] segments, int baseOffset, int index) {
if (segments.length == 1) {
int offset = baseOffset + byteIndex(index);
MemorySegment segment = segments[0];
byte current = segment.get(offset);
current |= (1 << (index & BIT_BYTE_INDEX_MASK));
segment.put(offset, current);
} else {
bitSetMultiSegments(segments, baseOffset, index);
}
}
private static void bitSetMultiSegments(MemorySegment[] segments, int baseOffset, int index) {
int offset = baseOffset + byteIndex(index);
int segSize = segments[0].size();
int segIndex = offset / segSize;
int segOffset = offset - segIndex * segSize; // equal to %
MemorySegment segment = segments[segIndex];
byte current = segment.get(segOffset);
current |= (1 << (index & BIT_BYTE_INDEX_MASK));
segment.put(segOffset, current);
}
/**
* read bit from segments.
*
* @param segments target segments.
* @param baseOffset bits base offset.
* @param index bit index from base offset.
*/
public static boolean bitGet(MemorySegment[] segments, int baseOffset, int index) {
int offset = baseOffset + byteIndex(index);
byte current = getByte(segments, offset);
return (current & (1 << (index & BIT_BYTE_INDEX_MASK))) != 0;
}
/**
* get boolean from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static boolean getBoolean(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 1)) {
return segments[0].getBoolean(offset);
} else {
return getBooleanMultiSegments(segments, offset);
}
}
private static boolean getBooleanMultiSegments(MemorySegment[] segments, int offset) {
int segSize = segments[0].size();
int segIndex = offset / segSize;
int segOffset = offset - segIndex * segSize; // equal to %
return segments[segIndex].getBoolean(segOffset);
}
/**
* set boolean from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static void setBoolean(MemorySegment[] segments, int offset, boolean value) {
if (inFirstSegment(segments, offset, 1)) {
segments[0].putBoolean(offset, value);
} else {
setBooleanMultiSegments(segments, offset, value);
}
}
private static void setBooleanMultiSegments(
MemorySegment[] segments, int offset, boolean value) {
int segSize = segments[0].size();
int segIndex = offset / segSize;
int segOffset = offset - segIndex * segSize; // equal to %
segments[segIndex].putBoolean(segOffset, value);
}
/**
* get byte from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static byte getByte(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 1)) {
return segments[0].get(offset);
} else {
return getByteMultiSegments(segments, offset);
}
}
private static byte getByteMultiSegments(MemorySegment[] segments, int offset) {
int segSize = segments[0].size();
int segIndex = offset / segSize;
int segOffset = offset - segIndex * segSize; // equal to %
return segments[segIndex].get(segOffset);
}
/**
* set byte from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static void setByte(MemorySegment[] segments, int offset, byte value) {
if (inFirstSegment(segments, offset, 1)) {
segments[0].put(offset, value);
} else {
setByteMultiSegments(segments, offset, value);
}
}
private static void setByteMultiSegments(MemorySegment[] segments, int offset, byte value) {
int segSize = segments[0].size();
int segIndex = offset / segSize;
int segOffset = offset - segIndex * segSize; // equal to %
segments[segIndex].put(segOffset, value);
}
/**
* get int from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static int getInt(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 4)) {
return segments[0].getInt(offset);
} else {
return getIntMultiSegments(segments, offset);
}
}
private static int getIntMultiSegments(MemorySegment[] segments, int offset) {
int segSize = segments[0].size();
int segIndex = offset / segSize;
int segOffset = offset - segIndex * segSize; // equal to %
if (segOffset < segSize - 3) {
return segments[segIndex].getInt(segOffset);
} else {
return getIntSlowly(segments, segSize, segIndex, segOffset);
}
}
private static int getIntSlowly(
MemorySegment[] segments, int segSize, int segNum, int segOffset) {
MemorySegment segment = segments[segNum];
int ret = 0;
for (int i = 0; i < 4; i++) {
if (segOffset == segSize) {
segment = segments[++segNum];
segOffset = 0;
}
int unsignedByte = segment.get(segOffset) & 0xff;
if (LITTLE_ENDIAN) {
ret |= (unsignedByte << (i * 8));
} else {
ret |= (unsignedByte << ((3 - i) * 8));
}
segOffset++;
}
return ret;
}
/**
* set int from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static void setInt(MemorySegment[] segments, int offset, int value) {
if (inFirstSegment(segments, offset, 4)) {
segments[0].putInt(offset, value);
} else {
setIntMultiSegments(segments, offset, value);
}
}
private static void setIntMultiSegments(MemorySegment[] segments, int offset, int value) {
int segSize = segments[0].size();
int segIndex = offset / segSize;
int segOffset = offset - segIndex * segSize; // equal to %
if (segOffset < segSize - 3) {
segments[segIndex].putInt(segOffset, value);
} else {
setIntSlowly(segments, segSize, segIndex, segOffset, value);
}
}
private static void setIntSlowly(
MemorySegment[] segments, int segSize, int segNum, int segOffset, int value) {
MemorySegment segment = segments[segNum];
for (int i = 0; i < 4; i++) {
if (segOffset == segSize) {
segment = segments[++segNum];
segOffset = 0;
}
int unsignedByte;
if (LITTLE_ENDIAN) {
unsignedByte = value >> (i * 8);
} else {
unsignedByte = value >> ((3 - i) * 8);
}
segment.put(segOffset, (byte) unsignedByte);
segOffset++;
}
}
/**
* get long from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static long getLong(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 8)) {
return segments[0].getLong(offset);
} else {
return getLongMultiSegments(segments, offset);
}
}
private static long getLongMultiSegments(MemorySegment[] segments, int offset) {
int segSize = segments[0].size();
int segIndex = offset / segSize;
int segOffset = offset - segIndex * segSize; // equal to %
if (segOffset < segSize - 7) {
return segments[segIndex].getLong(segOffset);
} else {
return getLongSlowly(segments, segSize, segIndex, segOffset);
}
}
private static long getLongSlowly(
MemorySegment[] segments, int segSize, int segNum, int segOffset) {
MemorySegment segment = segments[segNum];
long ret = 0;
for (int i = 0; i < 8; i++) {
if (segOffset == segSize) {
segment = segments[++segNum];
segOffset = 0;
}
long unsignedByte = segment.get(segOffset) & 0xff;
if (LITTLE_ENDIAN) {
ret |= (unsignedByte << (i * 8));
} else {
ret |= (unsignedByte << ((7 - i) * 8));
}
segOffset++;
}
return ret;
}
/**
* set long from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static void setLong(MemorySegment[] segments, int offset, long value) {
if (inFirstSegment(segments, offset, 8)) {
segments[0].putLong(offset, value);
} else {
setLongMultiSegments(segments, offset, value);
}
}
private static void setLongMultiSegments(MemorySegment[] segments, int offset, long value) {
int segSize = segments[0].size();
int segIndex = offset / segSize;
int segOffset = offset - segIndex * segSize; // equal to %
if (segOffset < segSize - 7) {
segments[segIndex].putLong(segOffset, value);
} else {
setLongSlowly(segments, segSize, segIndex, segOffset, value);
}
}
private static void setLongSlowly(
MemorySegment[] segments, int segSize, int segNum, int segOffset, long value) {
MemorySegment segment = segments[segNum];
for (int i = 0; i < 8; i++) {
if (segOffset == segSize) {
segment = segments[++segNum];
segOffset = 0;
}
long unsignedByte;
if (LITTLE_ENDIAN) {
unsignedByte = value >> (i * 8);
} else {
unsignedByte = value >> ((7 - i) * 8);
}
segment.put(segOffset, (byte) unsignedByte);
segOffset++;
}
}
/**
* get short from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static short getShort(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 2)) {
return segments[0].getShort(offset);
} else {
return getShortMultiSegments(segments, offset);
}
}
private static short getShortMultiSegments(MemorySegment[] segments, int offset) {
int segSize = segments[0].size();
int segIndex = offset / segSize;
int segOffset = offset - segIndex * segSize; // equal to %
if (segOffset < segSize - 1) {
return segments[segIndex].getShort(segOffset);
} else {
return (short) getTwoByteSlowly(segments, segSize, segIndex, segOffset);
}
}
/**
* set short from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static void setShort(MemorySegment[] segments, int offset, short value) {
if (inFirstSegment(segments, offset, 2)) {
segments[0].putShort(offset, value);
} else {
setShortMultiSegments(segments, offset, value);
}
}
private static void setShortMultiSegments(MemorySegment[] segments, int offset, short value) {
int segSize = segments[0].size();
int segIndex = offset / segSize;
int segOffset = offset - segIndex * segSize; // equal to %
if (segOffset < segSize - 1) {
segments[segIndex].putShort(segOffset, value);
} else {
setTwoByteSlowly(segments, segSize, segIndex, segOffset, value, value >> 8);
}
}
/**
* get float from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static float getFloat(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 4)) {
return segments[0].getFloat(offset);
} else {
return getFloatMultiSegments(segments, offset);
}
}
private static float getFloatMultiSegments(MemorySegment[] segments, int offset) {
int segSize = segments[0].size();
int segIndex = offset / segSize;
int segOffset = offset - segIndex * segSize; // equal to %
if (segOffset < segSize - 3) {
return segments[segIndex].getFloat(segOffset);
} else {
return Float.intBitsToFloat(getIntSlowly(segments, segSize, segIndex, segOffset));
}
}
/**
* set float from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static void setFloat(MemorySegment[] segments, int offset, float value) {
if (inFirstSegment(segments, offset, 4)) {
segments[0].putFloat(offset, value);
} else {
setFloatMultiSegments(segments, offset, value);
}
}
private static void setFloatMultiSegments(MemorySegment[] segments, int offset, float value) {
int segSize = segments[0].size();
int segIndex = offset / segSize;
int segOffset = offset - segIndex * segSize; // equal to %
if (segOffset < segSize - 3) {
segments[segIndex].putFloat(segOffset, value);
} else {
setIntSlowly(segments, segSize, segIndex, segOffset, Float.floatToRawIntBits(value));
}
}
/**
* get double from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static double getDouble(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 8)) {
return segments[0].getDouble(offset);
} else {
return getDoubleMultiSegments(segments, offset);
}
}
private static double getDoubleMultiSegments(MemorySegment[] segments, int offset) {
int segSize = segments[0].size();
int segIndex = offset / segSize;
int segOffset = offset - segIndex * segSize; // equal to %
if (segOffset < segSize - 7) {
return segments[segIndex].getDouble(segOffset);
} else {
return Double.longBitsToDouble(getLongSlowly(segments, segSize, segIndex, segOffset));
}
}
/**
* set double from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static void setDouble(MemorySegment[] segments, int offset, double value) {
if (inFirstSegment(segments, offset, 8)) {
segments[0].putDouble(offset, value);
} else {
setDoubleMultiSegments(segments, offset, value);
}
}
private static void setDoubleMultiSegments(MemorySegment[] segments, int offset, double value) {
int segSize = segments[0].size();
int segIndex = offset / segSize;
int segOffset = offset - segIndex * segSize; // equal to %
if (segOffset < segSize - 7) {
segments[segIndex].putDouble(segOffset, value);
} else {
setLongSlowly(
segments, segSize, segIndex, segOffset, Double.doubleToRawLongBits(value));
}
}
private static int getTwoByteSlowly(
MemorySegment[] segments, int segSize, int segNum, int segOffset) {
MemorySegment segment = segments[segNum];
int ret = 0;
for (int i = 0; i < 2; i++) {
if (segOffset == segSize) {
segment = segments[++segNum];
segOffset = 0;
}
int unsignedByte = segment.get(segOffset) & 0xff;
if (LITTLE_ENDIAN) {
ret |= (unsignedByte << (i * 8));
} else {
ret |= (unsignedByte << ((1 - i) * 8));
}
segOffset++;
}
return ret;
}
private static void setTwoByteSlowly(
MemorySegment[] segments, int segSize, int segNum, int segOffset, int b1, int b2) {
MemorySegment segment = segments[segNum];
segment.put(segOffset, (byte) (LITTLE_ENDIAN ? b1 : b2));
segOffset++;
if (segOffset == segSize) {
segment = segments[++segNum];
segOffset = 0;
}
segment.put(segOffset, (byte) (LITTLE_ENDIAN ? b2 : b1));
}
/**
* Find equal segments2 in segments1.
*
* @param segments1 segs to find.
* @param segments2 sub segs.
* @return Return the found offset, return -1 if not find.
*/
public static int find(
MemorySegment[] segments1,
int offset1,
int numBytes1,
MemorySegment[] segments2,
int offset2,
int numBytes2) {
if (numBytes2 == 0) { // quick way 1.
return offset1;
}
if (inFirstSegment(segments1, offset1, numBytes1)
&& inFirstSegment(segments2, offset2, numBytes2)) {
byte first = segments2[0].get(offset2);
int end = numBytes1 - numBytes2 + offset1;
for (int i = offset1; i <= end; i++) {
// quick way 2: equal first byte.
if (segments1[0].get(i) == first
&& segments1[0].equalTo(segments2[0], i, offset2, numBytes2)) {
return i;
}
}
return -1;
} else {
return findInMultiSegments(
segments1, offset1, numBytes1, segments2, offset2, numBytes2);
}
}
private static int findInMultiSegments(
MemorySegment[] segments1,
int offset1,
int numBytes1,
MemorySegment[] segments2,
int offset2,
int numBytes2) {
int end = numBytes1 - numBytes2 + offset1;
for (int i = offset1; i <= end; i++) {
if (equalsMultiSegments(segments1, i, segments2, offset2, numBytes2)) {
return i;
}
}
return -1;
}
}
|
SegmentsUtil
|
java
|
apache__flink
|
flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/Tuple0SerializerSnapshot.java
|
{
"start": 1148,
"end": 1323
}
|
class ____ extends SimpleTypeSerializerSnapshot<Tuple0> {
public Tuple0SerializerSnapshot() {
super(() -> Tuple0Serializer.INSTANCE);
}
}
|
Tuple0SerializerSnapshot
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/locking/options/ScopeAndSecondaryTableTests.java
|
{
"start": 1072,
"end": 2492
}
|
class ____ {
@BeforeEach
void createTestData(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( (session) -> {
session.persist( new Detail( 1, "heeby", "jeeby" ) );
} );
}
@AfterEach
void dropTestData(SessionFactoryScope factoryScope) {
factoryScope.dropData();
}
@Test
@RequiresDialectFeature(feature=DialectFeatureChecks.SupportsLockingJoins.class, comment = "Come back and rework this to account for follow-on testing")
@SkipForDialect( dialectClass = InformixDialect.class, reason = "Cursor must be on simple SELECT for FOR UPDATE")
void simpleTest(SessionFactoryScope factoryScope) {
final SQLStatementInspector sqlCollector = factoryScope.getCollectingStatementInspector();
factoryScope.inTransaction( (session) -> {
sqlCollector.clear();
session.find( Detail.class, 1 );
session.clear();
sqlCollector.clear();
final Detail detail = session.find( Detail.class, 1, LockModeType.PESSIMISTIC_WRITE );
assertThat( sqlCollector.getSqlQueries() ).hasSize( 1 );
Helper.checkSql( sqlCollector.getSqlQueries().get( 0 ), session.getDialect(), Tables.DETAILS, Tables.SUPPLEMENTALS );
TransactionUtil.assertRowLock( factoryScope, Tables.DETAILS.getTableName(), "name", "id", detail.getId(), true );
TransactionUtil.assertRowLock( factoryScope, Tables.SUPPLEMENTALS.getTableName(), "txt", "detail_fk", detail.getId(), true );
} );
}
|
ScopeAndSecondaryTableTests
|
java
|
quarkusio__quarkus
|
independent-projects/arc/runtime/src/main/java/io/quarkus/arc/BeanCreator.java
|
{
"start": 220,
"end": 387
}
|
interface ____ used by synthetic beans to produce a contextual instance.
*
* @param <T>
* @see InjectableBean
* @see Contextual#create(CreationalContext)
*/
public
|
is
|
java
|
apache__camel
|
components/camel-json-validator/src/main/java/org/apache/camel/component/jsonvalidator/DefaultJsonValidationErrorHandler.java
|
{
"start": 1051,
"end": 1544
}
|
class ____ implements JsonValidatorErrorHandler {
@Override
public void handleErrors(Exchange exchange, JsonSchema schema, Set<ValidationMessage> errors) throws ValidationException {
throw new JsonValidationException(exchange, schema, errors);
}
@Override
public void handleErrors(Exchange exchange, JsonSchema schema, Exception e) throws ValidationException {
throw new JsonValidationException(exchange, schema, e);
}
}
|
DefaultJsonValidationErrorHandler
|
java
|
elastic__elasticsearch
|
modules/lang-painless/src/main/java/org/elasticsearch/painless/ir/DefInterfaceReferenceNode.java
|
{
"start": 616,
"end": 1146
}
|
class ____ extends ExpressionNode {
/* ---- begin visitor ---- */
@Override
public <Scope> void visit(IRTreeVisitor<Scope> irTreeVisitor, Scope scope) {
irTreeVisitor.visitDefInterfaceReference(this, scope);
}
@Override
public <Scope> void visitChildren(IRTreeVisitor<Scope> irTreeVisitor, Scope scope) {
// do nothing; terminal node
}
/* ---- end visitor ---- */
public DefInterfaceReferenceNode(Location location) {
super(location);
}
}
|
DefInterfaceReferenceNode
|
java
|
mockito__mockito
|
mockito-core/src/main/java/org/mockito/stubbing/Answer6.java
|
{
"start": 166,
"end": 1467
}
|
interface ____ be used for configuring mock's answer for a six argument invocation.
*
* Answer specifies an action that is executed and a return value that is returned when you interact with the mock.
* <p>
* Example of stubbing a mock with this custom answer:
*
* <pre class="code"><code class="java">
* import static org.mockito.AdditionalAnswers.answer;
*
* when(mock.someMethod(anyInt(), anyString(), anyChar(), any(), any(), anyBoolean())).then(answer(
* new Answer6<StringBuilder, Integer, String, Character, Object, Object, Boolean>() {
* public StringBuilder answer(Integer i, String s, Character c, Object o1, Object o2, Boolean isIt) {
* return new StringBuilder().append(i).append(s).append(c).append(o1.hashCode()).append(o2.hashCode()).append(isIt);
* }
* }));
*
* //Following will print a string like "3xyz131635550true"
* System.out.println(mock.someMethod(3, "xy", 'z', new Object(), new Object(), true));
* </code></pre>
*
* @param <T> return type
* @param <A0> type of the first argument
* @param <A1> type of the second argument
* @param <A2> type of the third argument
* @param <A3> type of the fourth argument
* @param <A4> type of the fifth argument
* @param <A5> type of the sixth argument
* @see Answer
*/
public
|
to
|
java
|
google__guava
|
android/guava/src/com/google/common/util/concurrent/AsyncCallable.java
|
{
"start": 1113,
"end": 1531
}
|
interface ____<V extends @Nullable Object> {
/**
* Computes a result {@code Future}. The output {@code Future} need not be {@linkplain
* Future#isDone done}, making {@code AsyncCallable} suitable for asynchronous derivations.
*
* <p>Throwing an exception from this method is equivalent to returning a failing {@link
* ListenableFuture}.
*/
ListenableFuture<V> call() throws Exception;
}
|
AsyncCallable
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/PrioritizedDequeTest.java
|
{
"start": 1196,
"end": 2764
}
|
class ____ {
@Test
void testPrioritizeOnAdd() {
final PrioritizedDeque<Integer> deque = new PrioritizedDeque<>();
deque.add(0);
deque.add(1);
deque.add(2);
deque.add(3);
deque.add(3, true, true);
assertThat(deque.asUnmodifiableCollection()).containsExactly(3, 0, 1, 2);
}
@Test
void testPrioritize() {
final PrioritizedDeque<Integer> deque = new PrioritizedDeque<>();
deque.add(0);
deque.add(1);
deque.add(2);
deque.add(3);
deque.prioritize(3);
assertThat(deque.asUnmodifiableCollection()).containsExactly(3, 0, 1, 2);
}
@Test
void testGetAndRemove() {
final PrioritizedDeque<Integer> deque = new PrioritizedDeque<>();
deque.add(0);
deque.add(1);
deque.add(2);
deque.add(1);
deque.add(3);
assertThat(deque.getAndRemove(v -> v == 1).intValue()).isOne();
assertThat(deque.asUnmodifiableCollection()).containsExactly(0, 2, 1, 3);
assertThat(deque.getAndRemove(v -> v == 1).intValue()).isOne();
assertThat(deque.asUnmodifiableCollection()).containsExactly(0, 2, 3);
try {
int removed = deque.getAndRemove(v -> v == 1);
fail(
String.format(
"This should not happen. Item [%s] was removed, but it shouldn't be found",
removed));
} catch (NoSuchElementException ex) {
// expected
}
}
}
|
PrioritizedDequeTest
|
java
|
quarkusio__quarkus
|
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/buildextension/beans/SyntheticInjectionPointMetadataTest.java
|
{
"start": 1909,
"end": 2790
}
|
class ____ implements BeanRegistrar {
@Override
public void register(RegistrationContext context) {
context.configure(List.class)
// List, List<MyDependentFoo>
.addType(Type.create(DotName.createSimple(List.class.getName()), Kind.CLASS))
.addType(ParameterizedType.create(DotName.createSimple(List.class),
new Type[] { ClassType.create(DotName.createSimple(MyDependentFoo.class)) }, null))
.creator(ListCreator.class)
.addInjectionPoint(ClassType.create(DotName.createSimple(MyDependentFoo.class)))
.addInjectionPoint(ClassType.create(DotName.createSimple(InjectionPoint.class)))
.unremovable()
.done();
}
}
@Singleton
public static
|
TestRegistrar
|
java
|
apache__flink
|
flink-core/src/main/java/org/apache/flink/core/fs/BackPressuringExecutor.java
|
{
"start": 2372,
"end": 3026
}
|
interface ____ transparent drop-in.
try {
permits.acquire();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new FlinkRuntimeException("interrupted:", e);
}
final SemaphoreReleasingRunnable runnable =
new SemaphoreReleasingRunnable(command, permits);
try {
delegate.execute(runnable);
} catch (Throwable e) {
runnable.release();
ExceptionUtils.rethrow(e, e.getMessage());
}
}
// ------------------------------------------------------------------------
private static
|
for
|
java
|
apache__maven
|
impl/maven-cli/src/main/java/org/apache/maven/cling/invoker/mvn/LayeredMavenOptions.java
|
{
"start": 1268,
"end": 5489
}
|
class ____<O extends MavenOptions> extends LayeredOptions<O> implements MavenOptions {
public static MavenOptions layerMavenOptions(Collection<MavenOptions> options) {
List<MavenOptions> o = options.stream().filter(Objects::nonNull).toList();
if (o.isEmpty()) {
throw new IllegalArgumentException("No options specified (or all were null)");
} else if (o.size() == 1) {
return o.get(0);
} else {
return new LayeredMavenOptions<>(o);
}
}
protected LayeredMavenOptions(List<O> options) {
super(options);
}
@Override
public Optional<String> alternatePomFile() {
return returnFirstPresentOrEmpty(MavenOptions::alternatePomFile);
}
@Override
public Optional<Boolean> nonRecursive() {
return returnFirstPresentOrEmpty(MavenOptions::nonRecursive);
}
@Override
public Optional<Boolean> updateSnapshots() {
return returnFirstPresentOrEmpty(MavenOptions::updateSnapshots);
}
@Override
public Optional<List<String>> activatedProfiles() {
return collectListIfPresentOrEmpty(MavenOptions::activatedProfiles);
}
@Override
public Optional<Boolean> suppressSnapshotUpdates() {
return returnFirstPresentOrEmpty(MavenOptions::suppressSnapshotUpdates);
}
@Override
public Optional<Boolean> strictChecksums() {
return returnFirstPresentOrEmpty(MavenOptions::strictChecksums);
}
@Override
public Optional<Boolean> relaxedChecksums() {
return returnFirstPresentOrEmpty(MavenOptions::relaxedChecksums);
}
@Override
public Optional<Boolean> failFast() {
return returnFirstPresentOrEmpty(MavenOptions::failFast);
}
@Override
public Optional<Boolean> failAtEnd() {
return returnFirstPresentOrEmpty(MavenOptions::failAtEnd);
}
@Override
public Optional<Boolean> failNever() {
return returnFirstPresentOrEmpty(MavenOptions::failNever);
}
@Override
public Optional<Boolean> resume() {
return returnFirstPresentOrEmpty(MavenOptions::resume);
}
@Override
public Optional<String> resumeFrom() {
return returnFirstPresentOrEmpty(MavenOptions::resumeFrom);
}
@Override
public Optional<List<String>> projects() {
return collectListIfPresentOrEmpty(MavenOptions::projects);
}
@Override
public Optional<Boolean> alsoMake() {
return returnFirstPresentOrEmpty(MavenOptions::alsoMake);
}
@Override
public Optional<Boolean> alsoMakeDependents() {
return returnFirstPresentOrEmpty(MavenOptions::alsoMakeDependents);
}
@Override
public Optional<String> threads() {
return returnFirstPresentOrEmpty(MavenOptions::threads);
}
@Override
public Optional<String> builder() {
return returnFirstPresentOrEmpty(MavenOptions::builder);
}
@Override
public Optional<Boolean> noTransferProgress() {
return returnFirstPresentOrEmpty(MavenOptions::noTransferProgress);
}
@Override
public Optional<Boolean> cacheArtifactNotFound() {
return returnFirstPresentOrEmpty(MavenOptions::cacheArtifactNotFound);
}
@Override
public Optional<Boolean> strictArtifactDescriptorPolicy() {
return returnFirstPresentOrEmpty(MavenOptions::strictArtifactDescriptorPolicy);
}
@Override
public Optional<Boolean> ignoreTransitiveRepositories() {
return returnFirstPresentOrEmpty(MavenOptions::ignoreTransitiveRepositories);
}
@Override
public Optional<String> atFile() {
return returnFirstPresentOrEmpty(MavenOptions::atFile);
}
@Override
public Optional<List<String>> goals() {
return collectListIfPresentOrEmpty(MavenOptions::goals);
}
@Override
public MavenOptions interpolate(UnaryOperator<String> callback) {
ArrayList<MavenOptions> interpolatedOptions = new ArrayList<>(options.size());
for (MavenOptions o : options) {
interpolatedOptions.add((MavenOptions) o.interpolate(callback));
}
return layerMavenOptions(interpolatedOptions);
}
}
|
LayeredMavenOptions
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.