language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/CoGroupedStreams.java | {
"start": 8766,
"end": 15437
} | class ____<T1, T2, KEY, W extends Window> {
private final DataStream<T1> input1;
private final DataStream<T2> input2;
private final KeySelector<T1, KEY> keySelector1;
private final KeySelector<T2, KEY> keySelector2;
private final TypeInformation<KEY> keyType;
private final WindowAssigner<? super TaggedUnion<T1, T2>, W> windowAssigner;
private final Trigger<? super TaggedUnion<T1, T2>, ? super W> trigger;
private final Evictor<? super TaggedUnion<T1, T2>, ? super W> evictor;
@Nullable private final Duration allowedLateness;
private WindowedStream<TaggedUnion<T1, T2>, KEY, W> windowedStream;
protected WithWindow(
DataStream<T1> input1,
DataStream<T2> input2,
KeySelector<T1, KEY> keySelector1,
KeySelector<T2, KEY> keySelector2,
TypeInformation<KEY> keyType,
WindowAssigner<? super TaggedUnion<T1, T2>, W> windowAssigner,
Trigger<? super TaggedUnion<T1, T2>, ? super W> trigger,
Evictor<? super TaggedUnion<T1, T2>, ? super W> evictor,
@Nullable Duration allowedLateness) {
this.input1 = input1;
this.input2 = input2;
this.keySelector1 = keySelector1;
this.keySelector2 = keySelector2;
this.keyType = keyType;
this.windowAssigner = windowAssigner;
this.trigger = trigger;
this.evictor = evictor;
this.allowedLateness = allowedLateness;
}
/** Sets the {@code Trigger} that should be used to trigger window emission. */
@PublicEvolving
public WithWindow<T1, T2, KEY, W> trigger(
Trigger<? super TaggedUnion<T1, T2>, ? super W> newTrigger) {
return new WithWindow<>(
input1,
input2,
keySelector1,
keySelector2,
keyType,
windowAssigner,
newTrigger,
evictor,
allowedLateness);
}
/**
* Sets the {@code Evictor} that should be used to evict elements from a window before
* emission.
*
* <p>Note: When using an evictor window performance will degrade significantly, since
* pre-aggregation of window results cannot be used.
*/
@PublicEvolving
public WithWindow<T1, T2, KEY, W> evictor(
Evictor<? super TaggedUnion<T1, T2>, ? super W> newEvictor) {
return new WithWindow<>(
input1,
input2,
keySelector1,
keySelector2,
keyType,
windowAssigner,
trigger,
newEvictor,
allowedLateness);
}
/**
* Sets the time by which elements are allowed to be late.
*
* @see WindowedStream#allowedLateness(Duration)
*/
@PublicEvolving
public WithWindow<T1, T2, KEY, W> allowedLateness(@Nullable Duration newLateness) {
return new WithWindow<>(
input1,
input2,
keySelector1,
keySelector2,
keyType,
windowAssigner,
trigger,
evictor,
newLateness);
}
/**
* Completes the co-group operation with the user function that is executed for windowed
* groups.
*/
public <T> SingleOutputStreamOperator<T> apply(CoGroupFunction<T1, T2, T> function) {
TypeInformation<T> resultType =
TypeExtractor.getCoGroupReturnTypes(
function, input1.getType(), input2.getType(), "CoGroup", false);
return apply(function, resultType);
}
/**
* Completes the co-group operation with the user function that is executed for windowed
* groups.
*/
public <T> SingleOutputStreamOperator<T> apply(
CoGroupFunction<T1, T2, T> function, TypeInformation<T> resultType) {
// clean the closure
function = input1.getExecutionEnvironment().clean(function);
UnionTypeInfo<T1, T2> unionType =
new UnionTypeInfo<>(input1.getType(), input2.getType());
UnionKeySelector<T1, T2, KEY> unionKeySelector =
new UnionKeySelector<>(keySelector1, keySelector2);
SingleOutputStreamOperator<TaggedUnion<T1, T2>> taggedInput1 =
input1.map(new Input1Tagger<T1, T2>());
taggedInput1.getTransformation().setParallelism(input1.getParallelism(), false);
taggedInput1.returns(unionType);
SingleOutputStreamOperator<TaggedUnion<T1, T2>> taggedInput2 =
input2.map(new Input2Tagger<T1, T2>());
taggedInput2.getTransformation().setParallelism(input2.getParallelism(), false);
taggedInput2.returns(unionType);
DataStream<TaggedUnion<T1, T2>> unionStream = taggedInput1.union(taggedInput2);
// we explicitly create the keyed stream to manually pass the key type information in
windowedStream =
new KeyedStream<TaggedUnion<T1, T2>, KEY>(
unionStream, unionKeySelector, keyType)
.window(windowAssigner);
if (trigger != null) {
windowedStream.trigger(trigger);
}
if (evictor != null) {
windowedStream.evictor(evictor);
}
if (allowedLateness != null) {
windowedStream.allowedLateness(allowedLateness);
}
return windowedStream.apply(
new CoGroupWindowFunction<T1, T2, T, KEY, W>(function), resultType);
}
@VisibleForTesting
Optional<Duration> getAllowedLatenessDuration() {
return Optional.ofNullable(allowedLateness);
}
@VisibleForTesting
WindowedStream<TaggedUnion<T1, T2>, KEY, W> getWindowedStream() {
return windowedStream;
}
}
// ------------------------------------------------------------------------
// Type information for Tagged Union
// ------------------------------------------------------------------------
private static | WithWindow |
java | apache__logging-log4j2 | log4j-core-test/src/test/java/org/apache/logging/log4j/core/appender/SyslogAppenderTest.java | {
"start": 1549,
"end": 5255
} | class ____ extends SyslogAppenderTestBase {
public SyslogAppenderTest() {
root = ctx.getLogger("SyslogAppenderTest");
}
@BeforeEach
public void setUp() {
sentMessages.clear();
}
@AfterEach
public void teardown() {
removeAppenders();
if (syslogServer != null) {
syslogServer.shutdown();
}
}
@Test
public void testTCPAppender() throws Exception {
initTCPTestEnvironment(LEGACY_BSD);
sendAndCheckLegacyBsdMessage("This is a test message");
sendAndCheckLegacyBsdMessage("This is a test message 2");
}
@Test
public void testDefaultAppender() throws Exception {
initTCPTestEnvironment(LEGACY_BSD);
sendAndCheckLegacyBsdMessage("This is a test message");
sendAndCheckLegacyBsdMessage("This is a test message 2");
}
@Test
public void testTCPStructuredAppender() throws Exception {
initTCPTestEnvironment(SYSLOG);
sendAndCheckStructuredMessage();
}
@Test
public void testUDPAppender() throws Exception {
initUDPTestEnvironment(LEGACY_BSD);
sendAndCheckLegacyBsdMessage("This is a test message");
root.removeAppender(appender);
appender.stop();
}
@Test
public void testUDPStructuredAppender() throws Exception {
initUDPTestEnvironment(SYSLOG);
sendAndCheckStructuredMessage();
root.removeAppender(appender);
appender.stop();
}
protected void initUDPTestEnvironment(final TlsSyslogMessageFormat messageFormat) throws SocketException {
syslogServer = MockSyslogServerFactory.createUDPSyslogServer();
syslogServer.start();
initAppender(Protocol.UDP, messageFormat, syslogServer.getLocalPort());
}
protected void initTCPTestEnvironment(final TlsSyslogMessageFormat messageFormat) throws IOException {
syslogServer = MockSyslogServerFactory.createTCPSyslogServer();
syslogServer.start();
initAppender(Protocol.TCP, messageFormat, syslogServer.getLocalPort());
}
protected void initAppender(final Protocol protocol, final TlsSyslogMessageFormat messageFormat, final int port) {
appender = createAppender(protocol, messageFormat, port);
validate(appender);
appender.start();
initRootLogger(appender);
}
protected SyslogAppender createAppender(
final Protocol protocol, final TlsSyslogMessageFormat format, final int port) {
return newSyslogAppenderBuilder(protocol, format, includeNewLine, port).build();
}
protected Builder<?> newSyslogAppenderBuilder(
final Protocol protocol, final TlsSyslogMessageFormat format, final boolean newLine, final int port) {
// @formatter:off
return SyslogAppender.newSyslogAppenderBuilder()
.setHost("localhost")
.setPort(port)
.setProtocol(protocol)
.setReconnectDelayMillis(-1)
.setImmediateFail(true)
.setName("Test")
.setImmediateFlush(true)
.setIgnoreExceptions(false)
.setFacility(Facility.LOCAL0)
.setId("Audit")
.setEnterpriseNumber("18060")
.setIncludeMdc(true)
.setMdcId("RequestContext")
.setNewLine(includeNewLine)
.setAppName("TestApp")
.setMsgId("Test")
.setIncludes("ipAddress,loginId")
.setFormat(format == SYSLOG ? SyslogAppender.RFC5424 : null)
.setAdvertise(false);
// @formatter:on
}
}
| SyslogAppenderTest |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/test/src/main/java/org/elasticsearch/compute/test/TestBlockBuilder.java | {
"start": 11164,
"end": 12858
} | class ____ extends TestBlockBuilder {
private final BooleanBlock.Builder builder;
TestBooleanBlockBuilder(BlockFactory blockFactory, int estimatedSize) {
builder = blockFactory.newBooleanBlockBuilder(estimatedSize);
}
@Override
public TestBlockBuilder appendObject(Object object) {
if (object instanceof Number number) {
object = number.intValue() % 2 == 0;
}
builder.appendBoolean((boolean) object);
return this;
}
@Override
public TestBlockBuilder appendNull() {
builder.appendNull();
return this;
}
@Override
public TestBlockBuilder beginPositionEntry() {
builder.beginPositionEntry();
return this;
}
@Override
public TestBlockBuilder endPositionEntry() {
builder.endPositionEntry();
return this;
}
@Override
public TestBlockBuilder copyFrom(Block block, int beginInclusive, int endExclusive) {
builder.copyFrom(block, beginInclusive, endExclusive);
return this;
}
@Override
public TestBlockBuilder mvOrdering(Block.MvOrdering mvOrdering) {
builder.mvOrdering(mvOrdering);
return this;
}
@Override
public long estimatedBytes() {
return builder.estimatedBytes();
}
@Override
public BooleanBlock build() {
return builder.build();
}
@Override
public void close() {
builder.close();
}
}
}
| TestBooleanBlockBuilder |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/boot/database/qualfiedTableNaming/DefaultCatalogAndSchemaTest.java | {
"start": 58503,
"end": 59010
} | class ____ {
public static final String NAME = "EntityWithExplicitQualifiersWithTableGenerator";
@Id
@GeneratedValue(strategy = GenerationType.TABLE, generator = NAME + "_generator")
@TableGenerator(name = NAME + "_generator", table = NAME + "_tableseq",
catalog = EXPLICIT_CATALOG, schema = EXPLICIT_SCHEMA)
private Long id;
@Basic
private String basic;
}
@Entity(name = EntityWithDefaultQualifiersWithIncrementGenerator.NAME)
public static | EntityWithExplicitQualifiersWithTableGenerator |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/nullvaluemapping/NullValueMappingTest.java | {
"start": 1361,
"end": 11263
} | class ____ {
@ProcessorTest
public void shouldProvideMapperInstance() {
assertThat( CarMapper.INSTANCE ).isNotNull();
}
@ProcessorTest
public void shouldMapExpressionAndConstantRegardlessNullArg() {
//given
Car car = new Car( "Morris", 2 );
//when
CarDto carDto1 = CarMapper.INSTANCE.carToCarDto( car );
//then
assertThat( carDto1 ).isNotNull();
assertThat( carDto1.getMake() ).isEqualTo( car.getMake() );
assertThat( carDto1.getSeatCount() ).isEqualTo( car.getNumberOfSeats() );
assertThat( carDto1.getModel() ).isEqualTo( "ModelT" );
assertThat( carDto1.getCatalogId() ).isNotEmpty();
//when
CarDto carDto2 = CarMapper.INSTANCE.carToCarDto( null );
//then
assertThat( carDto2 ).isNotNull();
assertThat( carDto2.getMake() ).isNull();
assertThat( carDto2.getSeatCount() ).isEqualTo( 0 );
assertThat( carDto2.getModel() ).isEqualTo( "ModelT" );
assertThat( carDto2.getCatalogId() ).isNotEmpty();
}
@ProcessorTest
public void shouldMapExpressionAndConstantRegardlessNullArgSeveralSources() {
//given
Car car = new Car( "Morris", 2 );
//when
CarDto carDto1 = CarMapper.INSTANCE.carToCarDto( car, "ModelT" );
//then
assertThat( carDto1 ).isNotNull();
assertThat( carDto1.getMake() ).isEqualTo( car.getMake() );
assertThat( carDto1.getModel() ).isEqualTo( "ModelT" );
assertThat( carDto1.getSeatCount() ).isEqualTo( car.getNumberOfSeats() );
assertThat( carDto1.getCatalogId() ).isNotEmpty();
//when
CarDto carDto2 = CarMapper.INSTANCE.carToCarDto( null, "ModelT" );
//then
assertThat( carDto2 ).isNotNull();
assertThat( carDto2.getMake() ).isNull();
assertThat( carDto2.getSeatCount() ).isEqualTo( 0 );
assertThat( carDto2.getModel() ).isEqualTo( "ModelT" );
assertThat( carDto2.getCatalogId() ).isNotEmpty();
}
@ProcessorTest
public void shouldMapIterableWithNullArg() {
//given
Car car = new Car( "Morris", 2 );
//when
List<CarDto> carDtos1 = CarMapper.INSTANCE.carsToCarDtos( Arrays.asList( car ) );
//then
assertThat( carDtos1 ).isNotNull();
assertThat( carDtos1.size() ).isEqualTo( 1 );
//when
List<CarDto> carDtos2 = CarMapper.INSTANCE.carsToCarDtos( null );
//then
assertThat( carDtos2 ).isNotNull();
assertThat( carDtos2.isEmpty() ).isTrue();
}
@ProcessorTest
@SuppressWarnings({ "rawtypes", "unchecked" })
public void shouldMapMapWithNullArg() {
//given
Car car = new Car( "Morris", 2 );
Map carMap = new HashMap();
carMap.put( 1, car );
//when
Map<Integer, CarDto> carDtoMap1 = CarMapper.INSTANCE.carsToCarDtoMap( carMap );
//then
assertThat( carDtoMap1 ).isNotNull();
assertThat( carDtoMap1.size() ).isEqualTo( 1 );
//when
Map<Integer, CarDto> carDtoMap2 = CarMapper.INSTANCE.carsToCarDtoMap( null );
//then
assertThat( carDtoMap2 ).isNotNull();
assertThat( carDtoMap2.isEmpty() ).isTrue();
}
@ProcessorTest
public void shouldMapExpressionAndConstantRegardlessNullArgOnMapper() {
//when
CarDto carDto = CarMapperSettingOnMapper.INSTANCE.carToCarDto( null );
//then
assertThat( carDto ).isNotNull();
assertThat( carDto.getMake() ).isNull();
assertThat( carDto.getSeatCount() ).isEqualTo( 0 );
assertThat( carDto.getModel() ).isEqualTo( "ModelT" );
assertThat( carDto.getCatalogId() ).isNotEmpty();
}
@ProcessorTest
public void shouldMapIterableWithNullArgOnMapper() {
//when
List<CarDto> carDtos = CarMapperSettingOnMapper.INSTANCE.carsToCarDtos( null );
//then
assertThat( carDtos ).isEmpty();
}
@ProcessorTest
public void shouldMapMapWithNullArgOnMapper() {
//when
Map<Integer, CarDto> carDtoMap = CarMapperSettingOnMapper.INSTANCE.carsToCarDtoMap( null );
//then
assertThat( carDtoMap ).isNull();
}
@ProcessorTest
public void shouldMapExpressionAndConstantRegardlessOfIterableNullArgOnMapper() {
//when
CarDto carDto = CarMapperIterableSettingOnMapper.INSTANCE.carToCarDto( null );
//then
assertThat( carDto ).isNotNull();
assertThat( carDto.getMake() ).isNull();
assertThat( carDto.getSeatCount() ).isEqualTo( 0 );
assertThat( carDto.getModel() ).isEqualTo( "ModelT" );
assertThat( carDto.getCatalogId() ).isNotEmpty();
}
@ProcessorTest
public void shouldMapIterableToNullWithIterableNullArgOnMapper() {
//when
List<CarDto> carDtos = CarMapperIterableSettingOnMapper.INSTANCE.carsToCarDtos( null );
//then
assertThat( carDtos ).isNull();
}
@ProcessorTest
public void shouldMapMapRegardlessOfIterableNullArgOnMapper() {
//when
Map<Integer, CarDto> carDtoMap = CarMapperIterableSettingOnMapper.INSTANCE.carsToCarDtoMap( null );
//then
assertThat( carDtoMap ).isEmpty();
}
@ProcessorTest
public void shouldMapExpressionAndConstantRegardlessMapNullArgOnMapper() {
//when
CarDto carDto = CarMapperMapSettingOnMapper.INSTANCE.carToCarDto( null );
//then
assertThat( carDto ).isNotNull();
assertThat( carDto.getMake() ).isNull();
assertThat( carDto.getSeatCount() ).isEqualTo( 0 );
assertThat( carDto.getModel() ).isEqualTo( "ModelT" );
assertThat( carDto.getCatalogId() ).isNotEmpty();
}
@ProcessorTest
public void shouldMapIterableRegardlessOfMapNullArgOnMapper() {
//when
List<CarDto> carDtos = CarMapperMapSettingOnMapper.INSTANCE.carsToCarDtos( null );
//then
assertThat( carDtos ).isEmpty();
}
@ProcessorTest
public void shouldMapMapToWithMapNullArgOnMapper() {
//when
Map<Integer, CarDto> carDtoMap = CarMapperMapSettingOnMapper.INSTANCE.carsToCarDtoMap( null );
//then
assertThat( carDtoMap ).isNull();
}
@ProcessorTest
public void shouldMapExpressionAndConstantRegardlessNullArgOnConfig() {
//when
CarDto carDto = CarMapperSettingOnConfig.INSTANCE.carToCarDto( null );
//then
assertThat( carDto ).isNotNull();
assertThat( carDto.getMake() ).isNull();
assertThat( carDto.getSeatCount() ).isEqualTo( 0 );
assertThat( carDto.getModel() ).isEqualTo( "ModelT" );
assertThat( carDto.getCatalogId() ).isNotEmpty();
}
@ProcessorTest
public void shouldMapIterableWithNullArgOnConfig() {
//when
List<CarDto> carDtos = CarMapperSettingOnConfig.INSTANCE.carsToCarDtos( null );
//then
assertThat( carDtos ).isEmpty();
}
@ProcessorTest
public void shouldMapMapWithNullArgOnConfig() {
//when
Map<Integer, CarDto> carDtoMap = CarMapperSettingOnConfig.INSTANCE.carsToCarDtoMap( null );
//then
assertThat( carDtoMap ).isNull();
}
@ProcessorTest
public void shouldMapExpressionAndConstantRegardlessOfIterableNullArgOnConfig() {
//when
CarDto carDto = CarMapperIterableSettingOnConfig.INSTANCE.carToCarDto( null );
//then
assertThat( carDto ).isNotNull();
assertThat( carDto.getMake() ).isNull();
assertThat( carDto.getSeatCount() ).isEqualTo( 0 );
assertThat( carDto.getModel() ).isEqualTo( "ModelT" );
assertThat( carDto.getCatalogId() ).isNotEmpty();
}
@ProcessorTest
public void shouldMapIterableToNullWithIterableNullArgOnConfig() {
//when
List<CarDto> carDtos = CarMapperIterableSettingOnConfig.INSTANCE.carsToCarDtos( null );
//then
assertThat( carDtos ).isNull();
}
@ProcessorTest
public void shouldMapMapRegardlessOfIterableNullArgOnConfig() {
//when
Map<Integer, CarDto> carDtoMap = CarMapperIterableSettingOnConfig.INSTANCE.carsToCarDtoMap( null );
//then
assertThat( carDtoMap ).isEmpty();
}
@ProcessorTest
public void shouldMapExpressionAndConstantRegardlessOfMapNullArgOnConfig() {
//when
CarDto carDto = CarMapperMapSettingOnConfig.INSTANCE.carToCarDto( null );
//then
assertThat( carDto ).isNotNull();
assertThat( carDto.getMake() ).isNull();
assertThat( carDto.getSeatCount() ).isEqualTo( 0 );
assertThat( carDto.getModel() ).isEqualTo( "ModelT" );
assertThat( carDto.getCatalogId() ).isNotEmpty();
}
@ProcessorTest
public void shouldMapIterableRegardlessOfMapNullArgOnConfig() {
//when
List<CarDto> carDtos = CarMapperMapSettingOnConfig.INSTANCE.carsToCarDtos( null );
//then
assertThat( carDtos ).isEmpty();
}
@ProcessorTest
public void shouldMapMapToNullWithMapNullArgOnConfig() {
//when
Map<Integer, CarDto> carDtoMap = CarMapperMapSettingOnConfig.INSTANCE.carsToCarDtoMap( null );
//then
assertThat( carDtoMap ).isNull();
}
@ProcessorTest
public void shouldApplyConfiguredStrategyForMethodWithSeveralSourceParams() {
//when
DriverAndCarDto result = CarMapper.INSTANCE.driverAndCarToDto( null, null );
//then
assertThat( result ).isNotNull();
assertThat( result.getMake() ).isNull();
assertThat( result.getName() ).isNull();
//when
result = CarMapper.INSTANCE.driverAndCarToDtoReturningNull( null, null );
//then
assertThat( result ).isNull();
}
}
| NullValueMappingTest |
java | alibaba__nacos | ai/src/test/java/com/alibaba/nacos/ai/utils/AgentCardUtilTest.java | {
"start": 1533,
"end": 13733
} | class ____ {
@Test
void testBuildAgentCardDetailInfo() {
// Given
AgentCard agentCard = createTestAgentCard();
String registrationType = AiConstants.A2a.A2A_ENDPOINT_TYPE_SERVICE;
// When
AgentCardDetailInfo result = AgentCardUtil.buildAgentCardDetailInfo(agentCard, registrationType);
// Then
assertNotNull(result);
assertEquals(registrationType, result.getRegistrationType());
assertAgentCardEquals(agentCard, result);
}
@Test
void testBuildAgentCardVersionInfoWithLatest() {
// Given
AgentCard agentCard = createTestAgentCard();
String registrationType = AiConstants.A2a.A2A_ENDPOINT_TYPE_SERVICE;
boolean isLatest = true;
// When
AgentCardVersionInfo result = AgentCardUtil.buildAgentCardVersionInfo(agentCard, registrationType, isLatest);
// Then
assertNotNull(result);
assertEquals(registrationType, result.getRegistrationType());
assertEquals(agentCard.getVersion(), result.getLatestPublishedVersion());
assertNotNull(result.getVersionDetails());
assertEquals(1, result.getVersionDetails().size());
AgentVersionDetail versionDetail = result.getVersionDetails().get(0);
assertEquals(agentCard.getVersion(), versionDetail.getVersion());
assertTrue(versionDetail.isLatest());
assertNotNull(versionDetail.getCreatedAt());
assertNotNull(versionDetail.getUpdatedAt());
}
@Test
void testBuildAgentCardVersionInfoWithoutLatest() {
// Given
AgentCard agentCard = createTestAgentCard();
String registrationType = AiConstants.A2a.A2A_ENDPOINT_TYPE_SERVICE;
boolean isLatest = false;
// When
AgentCardVersionInfo result = AgentCardUtil.buildAgentCardVersionInfo(agentCard, registrationType, isLatest);
// Then
assertNotNull(result);
assertEquals(registrationType, result.getRegistrationType());
assertNotNull(result.getVersionDetails());
assertEquals(1, result.getVersionDetails().size());
AgentVersionDetail versionDetail = result.getVersionDetails().get(0);
assertEquals(agentCard.getVersion(), versionDetail.getVersion());
assertEquals(isLatest, versionDetail.isLatest());
assertNotNull(versionDetail.getCreatedAt());
assertNotNull(versionDetail.getUpdatedAt());
}
@Test
void testBuildAgentVersionDetail() {
// Given
AgentCard agentCard = createTestAgentCard();
boolean isLatest = true;
// When
AgentVersionDetail result = AgentCardUtil.buildAgentVersionDetail(agentCard, isLatest);
// Then
assertNotNull(result);
assertEquals(agentCard.getVersion(), result.getVersion());
assertEquals(isLatest, result.isLatest());
assertNotNull(result.getCreatedAt());
assertNotNull(result.getUpdatedAt());
}
@Test
void testUpdateUpdateTime() {
// Given
AgentVersionDetail versionDetail = new AgentVersionDetail();
versionDetail.setUpdatedAt("2023-01-01T00:00:00Z");
// When
AgentCardUtil.updateUpdateTime(versionDetail);
// Then
assertNotNull(versionDetail.getUpdatedAt());
// We can't assert exact value since it's based on current time, but we can ensure it's not null
}
@Test
void testBuildAgentInterfaceWithTlsSupport() {
// Given
Instance instance = new Instance();
instance.setIp("127.0.0.1");
instance.setPort(8080);
Map<String, String> metadata = new HashMap<>();
metadata.put(Constants.A2A.NACOS_AGENT_ENDPOINT_SUPPORT_TLS, "true");
metadata.put(Constants.A2A.AGENT_ENDPOINT_TRANSPORT_KEY, "JSONRPC");
metadata.put(Constants.A2A.AGENT_ENDPOINT_PATH_KEY, "/agent");
instance.setMetadata(metadata);
// When
AgentInterface result = AgentCardUtil.buildAgentInterface(instance);
// Then
assertNotNull(result);
assertEquals("https://127.0.0.1:8080/agent", result.getUrl());
assertEquals("JSONRPC", result.getTransport());
}
@Test
void testBuildAgentInterfaceWithoutTlsSupport() {
// Given
Instance instance = new Instance();
instance.setIp("127.0.0.1");
instance.setPort(8080);
Map<String, String> metadata = new HashMap<>();
metadata.put(Constants.A2A.NACOS_AGENT_ENDPOINT_SUPPORT_TLS, "false");
metadata.put(Constants.A2A.AGENT_ENDPOINT_TRANSPORT_KEY, "JSONRPC");
metadata.put(Constants.A2A.AGENT_ENDPOINT_PATH_KEY, "/agent");
instance.setMetadata(metadata);
// When
AgentInterface result = AgentCardUtil.buildAgentInterface(instance);
// Then
assertNotNull(result);
assertEquals("http://127.0.0.1:8080/agent", result.getUrl());
assertEquals("JSONRPC", result.getTransport());
}
@Test
void testBuildAgentInterfaceWithoutPath() {
// Given
Instance instance = new Instance();
instance.setIp("127.0.0.1");
instance.setPort(8080);
Map<String, String> metadata = new HashMap<>();
metadata.put(Constants.A2A.NACOS_AGENT_ENDPOINT_SUPPORT_TLS, "false");
metadata.put(Constants.A2A.AGENT_ENDPOINT_TRANSPORT_KEY, "JSONRPC");
instance.setMetadata(metadata);
// When
AgentInterface result = AgentCardUtil.buildAgentInterface(instance);
// Then
assertNotNull(result);
assertEquals("http://127.0.0.1:8080", result.getUrl());
assertEquals("JSONRPC", result.getTransport());
}
@Test
void testBuildAgentInterfaceWithPathWithoutLeadingSlash() {
// Given
Instance instance = new Instance();
instance.setIp("127.0.0.1");
instance.setPort(8080);
Map<String, String> metadata = new HashMap<>();
metadata.put(Constants.A2A.NACOS_AGENT_ENDPOINT_SUPPORT_TLS, "false");
metadata.put(Constants.A2A.AGENT_ENDPOINT_TRANSPORT_KEY, "JSONRPC");
metadata.put(Constants.A2A.AGENT_ENDPOINT_PATH_KEY, "agent");
instance.setMetadata(metadata);
// When
AgentInterface result = AgentCardUtil.buildAgentInterface(instance);
// Then
assertNotNull(result);
assertEquals("http://127.0.0.1:8080/agent", result.getUrl());
assertEquals("JSONRPC", result.getTransport());
}
@Test
void testBuildAgentInterfaceWithProtocolField() {
// Given
Instance instance = new Instance();
instance.setIp("127.0.0.1");
instance.setPort(8080);
Map<String, String> metadata = new HashMap<>();
metadata.put(Constants.A2A.NACOS_AGENT_ENDPOINT_SUPPORT_TLS, "false");
metadata.put(Constants.A2A.AGENT_ENDPOINT_TRANSPORT_KEY, "GRPC");
metadata.put(Constants.A2A.AGENT_ENDPOINT_PATH_KEY, "/agent");
metadata.put(Constants.A2A.NACOS_AGENT_ENDPOINT_PROTOCOL_KEY, "grpc");
instance.setMetadata(metadata);
// When
AgentInterface result = AgentCardUtil.buildAgentInterface(instance);
// Then
assertNotNull(result);
assertEquals("grpc://127.0.0.1:8080/agent", result.getUrl());
assertEquals("GRPC", result.getTransport());
}
@Test
void testBuildAgentInterfaceWithQueryField() {
// Given
Instance instance = new Instance();
instance.setIp("127.0.0.1");
instance.setPort(8080);
Map<String, String> metadata = new HashMap<>();
metadata.put(Constants.A2A.NACOS_AGENT_ENDPOINT_SUPPORT_TLS, "false");
metadata.put(Constants.A2A.AGENT_ENDPOINT_TRANSPORT_KEY, "JSONRPC");
metadata.put(Constants.A2A.AGENT_ENDPOINT_PATH_KEY, "/agent");
metadata.put(Constants.A2A.NACOS_AGENT_ENDPOINT_QUERY_KEY, "param1=value1¶m2=value2");
instance.setMetadata(metadata);
// When
AgentInterface result = AgentCardUtil.buildAgentInterface(instance);
// Then
assertNotNull(result);
assertEquals("http://127.0.0.1:8080/agent?param1=value1¶m2=value2", result.getUrl());
assertEquals("JSONRPC", result.getTransport());
}
@Test
void testBuildAgentInterfaceWithProtocolAndQueryFields() {
// Given
Instance instance = new Instance();
instance.setIp("127.0.0.1");
instance.setPort(8080);
Map<String, String> metadata = new HashMap<>();
metadata.put(Constants.A2A.NACOS_AGENT_ENDPOINT_SUPPORT_TLS, "true");
metadata.put(Constants.A2A.AGENT_ENDPOINT_TRANSPORT_KEY, "JSONRPC");
metadata.put(Constants.A2A.AGENT_ENDPOINT_PATH_KEY, "/agent");
metadata.put(Constants.A2A.NACOS_AGENT_ENDPOINT_PROTOCOL_KEY, "https");
metadata.put(Constants.A2A.NACOS_AGENT_ENDPOINT_QUERY_KEY, "token=abc123");
instance.setMetadata(metadata);
// When
AgentInterface result = AgentCardUtil.buildAgentInterface(instance);
// Then
assertNotNull(result);
assertEquals("https://127.0.0.1:8080/agent?token=abc123", result.getUrl());
assertEquals("JSONRPC", result.getTransport());
}
@Test
void testBuildAgentInterfaceWithEmptyMetadata() {
// Given
Instance instance = new Instance();
instance.setIp("127.0.0.1");
instance.setPort(8080);
instance.setMetadata(Collections.emptyMap());
// When
AgentInterface result = AgentCardUtil.buildAgentInterface(instance);
// Then
assertNotNull(result);
assertEquals("http://127.0.0.1:8080", result.getUrl());
assertEquals(null, result.getTransport());
}
@Test
void testGetCurrentTimeDoesNotThrow() {
// This test ensures the private method getCurrentTime works without exceptions
assertDoesNotThrow(() -> {
// We can't directly test the private method, but we can test methods that use it
AgentVersionDetail versionDetail = new AgentVersionDetail();
AgentCardUtil.updateUpdateTime(versionDetail);
assertNotNull(versionDetail.getUpdatedAt());
});
}
private AgentCard createTestAgentCard() {
AgentCard agentCard = new AgentCard();
agentCard.setProtocolVersion("1.0");
agentCard.setName("test-agent");
agentCard.setDescription("Test Agent");
agentCard.setVersion("1.0.0");
agentCard.setIconUrl("http://example.com/icon.png");
agentCard.setCapabilities(new AgentCapabilities());
agentCard.setSkills(Collections.emptyList());
agentCard.setUrl("http://example.com/agent");
agentCard.setPreferredTransport("JSONRPC");
agentCard.setAdditionalInterfaces(Collections.emptyList());
agentCard.setDocumentationUrl("http://example.com/docs");
agentCard.setDefaultInputModes(Collections.emptyList());
agentCard.setDefaultOutputModes(Collections.emptyList());
agentCard.setSupportsAuthenticatedExtendedCard(false);
return agentCard;
}
private void assertAgentCardEquals(AgentCard expected, AgentCard actual) {
assertEquals(expected.getProtocolVersion(), actual.getProtocolVersion());
assertEquals(expected.getName(), actual.getName());
assertEquals(expected.getDescription(), actual.getDescription());
assertEquals(expected.getVersion(), actual.getVersion());
assertEquals(expected.getIconUrl(), actual.getIconUrl());
assertEquals(expected.getUrl(), actual.getUrl());
assertEquals(expected.getPreferredTransport(), actual.getPreferredTransport());
assertEquals(expected.getDocumentationUrl(), actual.getDocumentationUrl());
}
} | AgentCardUtilTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/embeddable/naming/EmbeddedColumnNamingNestedTests.java | {
"start": 1076,
"end": 2486
} | class ____ {
/**
* Test use of {@code @EmbeddedColumnNaming} one 2 separate embedded attributes
*/
@Test
@ServiceRegistry
@DomainModel(annotatedClasses = {Address.class, Person.class})
@SessionFactory
void testNestedNamingPattern(SessionFactoryScope sfScope) {
final SessionFactoryImplementor sessionFactory = sfScope.getSessionFactory();
final MappingMetamodelImplementor mappingMetamodel = sessionFactory.getMappingMetamodel();
final EntityPersister persister = mappingMetamodel.getEntityDescriptor( Person.class );
final EmbeddedAttributeMapping homeAddressMapping = (EmbeddedAttributeMapping) persister.findAttributeMapping( "homeAddress" );
verifyColumnNames( homeAddressMapping, "home_" );
final EmbeddedAttributeMapping homeZipMapping = (EmbeddedAttributeMapping) homeAddressMapping.getEmbeddableTypeDescriptor().findAttributeMapping( "zip" );
verifyColumnNames( homeZipMapping, "home_zip" );
final EmbeddedAttributeMapping workAddressMapping = (EmbeddedAttributeMapping) persister.findAttributeMapping( "workAddress" );
verifyColumnNames( workAddressMapping, "work_" );
final EmbeddedAttributeMapping workZipMapping = (EmbeddedAttributeMapping) workAddressMapping.getEmbeddableTypeDescriptor().findAttributeMapping( "zip" );
verifyColumnNames( workZipMapping, "work_zip" );
}
@Entity(name="Person")
@Table(name="person")
public static | EmbeddedColumnNamingNestedTests |
java | spring-projects__spring-boot | module/spring-boot-cloudfoundry/src/main/java/org/springframework/boot/cloudfoundry/autoconfigure/actuate/endpoint/servlet/SkipSslVerificationHttpRequestFactory.java | {
"start": 2389,
"end": 2742
} | class ____ implements X509TrustManager {
@Override
public X509Certificate[] getAcceptedIssuers() {
return new X509Certificate[0];
}
@Override
public void checkClientTrusted(X509Certificate[] chain, String authType) {
}
@Override
public void checkServerTrusted(X509Certificate[] chain, String authType) {
}
}
}
| SkipX509TrustManager |
java | apache__spark | common/kvstore/src/main/java/org/apache/spark/util/kvstore/KVStoreView.java | {
"start": 1520,
"end": 3505
} | class ____<T> implements Iterable<T> {
boolean ascending = true;
String index = KVIndex.NATURAL_INDEX_NAME;
Object first = null;
Object last = null;
Object parent = null;
long skip = 0L;
long max = Long.MAX_VALUE;
/**
* Reverses the order of iteration. By default, iterates in ascending order.
*/
public KVStoreView<T> reverse() {
ascending = !ascending;
return this;
}
/**
* Iterates according to the given index.
*/
public KVStoreView<T> index(String name) {
this.index = Objects.requireNonNull(name);
return this;
}
/**
* Defines the value of the parent index when iterating over a child index. Only elements that
* match the parent index's value will be included in the iteration.
*
* <p>
* Required for iterating over child indices, will generate an error if iterating over a
* parent-less index.
* </p>
*/
public KVStoreView<T> parent(Object value) {
this.parent = value;
return this;
}
/**
* Iterates starting at the given value of the chosen index (inclusive).
*/
public KVStoreView<T> first(Object value) {
this.first = value;
return this;
}
/**
* Stops iteration at the given value of the chosen index (inclusive).
*/
public KVStoreView<T> last(Object value) {
this.last = value;
return this;
}
/**
* Stops iteration after a number of elements has been retrieved.
*/
public KVStoreView<T> max(long max) {
JavaUtils.checkArgument(max > 0L, "max must be positive.");
this.max = max;
return this;
}
/**
* Skips a number of elements at the start of iteration. Skipped elements are not accounted
* when using {@link #max(long)}.
*/
public KVStoreView<T> skip(long n) {
this.skip = n;
return this;
}
/**
* Returns an iterator for the current configuration.
*/
public KVStoreIterator<T> closeableIterator() throws Exception {
return (KVStoreIterator<T>) iterator();
}
}
| KVStoreView |
java | netty__netty | resolver-dns/src/main/java/io/netty/resolver/dns/DnsResolveContext.java | {
"start": 33804,
"end": 54639
} | class ____ extends AbstractList<InetSocketAddress> {
private final DnsServerAddressStream duplicate;
private List<InetSocketAddress> addresses;
DnsAddressStreamList(DnsServerAddressStream stream) {
duplicate = stream.duplicate();
}
@Override
public InetSocketAddress get(int index) {
if (addresses == null) {
DnsServerAddressStream stream = duplicate.duplicate();
addresses = new ArrayList<InetSocketAddress>(size());
for (int i = 0; i < stream.size(); i++) {
addresses.add(stream.next());
}
}
return addresses.get(index);
}
@Override
public int size() {
return duplicate.size();
}
@Override
public Iterator<InetSocketAddress> iterator() {
return new Iterator<InetSocketAddress>() {
private final DnsServerAddressStream stream = duplicate.duplicate();
private int i;
@Override
public boolean hasNext() {
return i < stream.size();
}
@Override
public InetSocketAddress next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
i++;
return stream.next();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
}
/**
* Returns the {@code {@link AuthoritativeNameServerList} which were included in {@link DnsSection#AUTHORITY}
* or {@code null} if non are found.
*/
private static AuthoritativeNameServerList extractAuthoritativeNameServers(String questionName, DnsResponse res) {
int authorityCount = res.count(DnsSection.AUTHORITY);
if (authorityCount == 0) {
return null;
}
AuthoritativeNameServerList serverNames = new AuthoritativeNameServerList(questionName);
for (int i = 0; i < authorityCount; i++) {
serverNames.add(res.recordAt(DnsSection.AUTHORITY, i));
}
return serverNames.isEmpty() ? null : serverNames;
}
private void onExpectedResponse(
DnsQuestion question, AddressedEnvelope<DnsResponse, InetSocketAddress> envelope,
final DnsQueryLifecycleObserver queryLifecycleObserver, Promise<List<T>> promise) {
// We often get a bunch of CNAMES as well when we asked for A/AAAA.
final DnsResponse response = envelope.content();
final Map<String, String> cnames = buildAliasMap(response, cnameCache(), parent.executor());
final int answerCount = response.count(DnsSection.ANSWER);
boolean found = false;
boolean completeEarly = this.completeEarly;
boolean cnameNeedsFollow = !cnames.isEmpty();
for (int i = 0; i < answerCount; i ++) {
final DnsRecord r = response.recordAt(DnsSection.ANSWER, i);
final DnsRecordType type = r.type();
boolean matches = false;
for (DnsRecordType expectedType : expectedTypes) {
if (type == expectedType) {
matches = true;
break;
}
}
if (!matches) {
continue;
}
final String questionName = question.name().toLowerCase(Locale.US);
final String recordName = r.name().toLowerCase(Locale.US);
// Make sure the record is for the questioned domain.
if (!recordName.equals(questionName)) {
Map<String, String> cnamesCopy = new HashMap<String, String>(cnames);
// Even if the record's name is not exactly same, it might be an alias defined in the CNAME records.
String resolved = questionName;
do {
resolved = cnamesCopy.remove(resolved);
if (recordName.equals(resolved)) {
// We followed a CNAME chain that was part of the response without any extra queries.
cnameNeedsFollow = false;
break;
}
} while (resolved != null);
if (resolved == null) {
assert questionName.isEmpty() || questionName.charAt(questionName.length() - 1) == '.';
for (String searchDomain : parent.searchDomains()) {
if (searchDomain.isEmpty()) {
continue;
}
final String fqdn;
if (searchDomain.charAt(searchDomain.length() - 1) == '.') {
fqdn = questionName + searchDomain;
} else {
fqdn = questionName + searchDomain + '.';
}
if (recordName.equals(fqdn)) {
resolved = recordName;
break;
}
}
if (resolved == null) {
if (logger.isDebugEnabled()) {
logger.debug("{} Ignoring record {} for [{}: {}] as it contains a different name than " +
"the question name [{}]. Cnames: {}, Search domains: {}",
channel, r.toString(), response.id(), envelope.sender(),
questionName, cnames, parent.searchDomains());
}
continue;
}
}
}
final T converted = convertRecord(r, hostname, additionals, parent.executor());
if (converted == null) {
if (logger.isDebugEnabled()) {
logger.debug("{} Ignoring record {} for [{}: {}] as the converted record is null. "
+ "Hostname [{}], Additionals: {}",
channel, r.toString(), response.id(),
envelope.sender(), hostname, additionals);
}
continue;
}
boolean shouldRelease = false;
// Check if we did determine we wanted to complete early before. If this is the case we want to not
// include the result
if (!completeEarly) {
completeEarly = isCompleteEarly(converted);
}
// Check if the promise was done already, and only if not add things to the finalResult. Otherwise lets
// just release things after we cached it.
if (!promise.isDone()) {
// We want to ensure we do not have duplicates in finalResult as this may be unexpected.
//
// While using a LinkedHashSet or HashSet may sound like the perfect fit for this we will use an
// ArrayList here as duplicates should be found quite unfrequently in the wild and we dont want to pay
// for the extra memory copy and allocations in this cases later on.
if (finalResult == null) {
finalResult = new ArrayList<T>(8);
finalResult.add(converted);
} else if (isDuplicateAllowed() || !finalResult.contains(converted)) {
finalResult.add(converted);
} else {
shouldRelease = true;
}
} else {
shouldRelease = true;
}
cache(hostname, additionals, r, converted);
found = true;
if (shouldRelease) {
ReferenceCountUtil.release(converted);
}
// Note that we do not break from the loop here, so we decode/cache all A/AAAA records.
}
if (found && !cnameNeedsFollow) {
// If we found the correct result we can just stop here without following any extra CNAME records in the
// response.
if (completeEarly) {
this.completeEarly = true;
}
queryLifecycleObserver.querySucceed();
} else if (cnames.isEmpty()) {
queryLifecycleObserver.queryFailed(NO_MATCHING_RECORD_QUERY_FAILED_EXCEPTION);
} else {
queryLifecycleObserver.querySucceed();
// We also got a CNAME so we need to ensure we also query it.
onResponseCNAME(question, cnames, newDnsQueryLifecycleObserver(question), promise);
}
}
private void onResponseCNAME(
DnsQuestion question, Map<String, String> cnames,
final DnsQueryLifecycleObserver queryLifecycleObserver,
Promise<List<T>> promise) {
// Resolve the host name in the question into the real host name.
String resolved = question.name().toLowerCase(Locale.US);
boolean found = false;
while (!cnames.isEmpty()) { // Do not attempt to call Map.remove() when the Map is empty
// because it can be Collections.emptyMap()
// whose remove() throws a UnsupportedOperationException.
final String next = cnames.remove(resolved);
if (next != null) {
found = true;
resolved = next;
} else {
break;
}
}
if (found) {
followCname(question, resolved, queryLifecycleObserver, promise);
} else {
queryLifecycleObserver.queryFailed(CNAME_NOT_FOUND_QUERY_FAILED_EXCEPTION);
}
}
private static Map<String, String> buildAliasMap(DnsResponse response, DnsCnameCache cache, EventLoop loop) {
final int answerCount = response.count(DnsSection.ANSWER);
Map<String, String> cnames = null;
for (int i = 0; i < answerCount; i ++) {
final DnsRecord r = response.recordAt(DnsSection.ANSWER, i);
final DnsRecordType type = r.type();
if (type != DnsRecordType.CNAME) {
continue;
}
if (!(r instanceof DnsRawRecord)) {
continue;
}
final ByteBuf recordContent = ((ByteBufHolder) r).content();
final String domainName = decodeDomainName(recordContent);
if (domainName == null) {
continue;
}
if (cnames == null) {
cnames = new HashMap<String, String>(min(8, answerCount));
}
String name = r.name().toLowerCase(Locale.US);
String mapping = domainName.toLowerCase(Locale.US);
// Cache the CNAME as well.
String nameWithDot = hostnameWithDot(name);
String mappingWithDot = hostnameWithDot(mapping);
if (!nameWithDot.equalsIgnoreCase(mappingWithDot)) {
cache.cache(nameWithDot, mappingWithDot, r.timeToLive(), loop);
cnames.put(name, mapping);
}
}
return cnames != null? cnames : Collections.<String, String>emptyMap();
}
private void tryToFinishResolve(final DnsServerAddressStream nameServerAddrStream,
final int nameServerAddrStreamIndex,
final DnsQuestion question,
final DnsQueryLifecycleObserver queryLifecycleObserver,
final Promise<List<T>> promise,
final Throwable cause) {
// There are no queries left to try.
if (!completeEarly && !queriesInProgress.isEmpty()) {
queryLifecycleObserver.queryCancelled(allowedQueries);
// There are still some queries in process, we will try to notify once the next one finishes until
// all are finished.
return;
}
// There are no queries left to try.
if (finalResult == null) {
if (nameServerAddrStreamIndex < nameServerAddrStream.size()) {
if (queryLifecycleObserver == NoopDnsQueryLifecycleObserver.INSTANCE) {
// If the queryLifecycleObserver has already been terminated we should create a new one for this
// fresh query.
query(nameServerAddrStream, nameServerAddrStreamIndex + 1, question,
newDnsQueryLifecycleObserver(question), true, promise, cause);
} else {
query(nameServerAddrStream, nameServerAddrStreamIndex + 1, question, queryLifecycleObserver,
true, promise, cause);
}
return;
}
queryLifecycleObserver.queryFailed(NAME_SERVERS_EXHAUSTED_EXCEPTION);
// .. and we could not find any expected records.
// The following is of questionable benefit, but has been around for a while that
// it may be risky to remove. Reference https://datatracker.ietf.org/doc/html/rfc8020
// - If we receive NXDOMAIN we know the domain doesn't exist, any other lookup type is meaningless.
// - If we receive SERVFAIL, and we attempt a CNAME that returns NOERROR with 0 answers, it may lead the
// call-site to invalidate previously advertised addresses.
// Having said that, there is the case of DNS services that don't respect the protocol either
// - An A lookup could result in NXDOMAIN but a CNAME may succeed with answers.
// It's an imperfect world. Accept it.
// Guarding it with a system property, as an opt-in functionality.
if (TRY_FINAL_CNAME_ON_ADDRESS_LOOKUPS) {
// If cause != null we know this was caused by a timeout / cancel / transport exception. In this case we
// won't try to resolve the CNAME as we only should do this if we could not get the expected records
// because they do not exist and the DNS server did probably signal it.
final boolean isValidResponse =
cause == NXDOMAIN_CAUSE_QUERY_FAILED_EXCEPTION || cause == SERVFAIL_QUERY_FAILED_EXCEPTION;
if ((cause == null || isValidResponse) && !triedCNAME &&
(question.type() == DnsRecordType.A || question.type() == DnsRecordType.AAAA)) {
// As the last resort, try to query CNAME, just in case the name server has it.
triedCNAME = true;
query(hostname, DnsRecordType.CNAME, getNameServers(hostname), true, promise);
return;
}
}
} else {
queryLifecycleObserver.queryCancelled(allowedQueries);
}
// We have at least one resolved record or tried CNAME as the last resort.
finishResolve(promise, cause);
}
private void finishResolve(Promise<List<T>> promise, Throwable cause) {
// If completeEarly was true we still want to continue processing the queries to ensure we still put everything
// in the cache eventually.
if (!completeEarly && !queriesInProgress.isEmpty()) {
// If there are queries in progress, we should cancel it because we already finished the resolution.
for (Iterator<Future<AddressedEnvelope<DnsResponse, InetSocketAddress>>> i = queriesInProgress.iterator();
i.hasNext();) {
Future<AddressedEnvelope<DnsResponse, InetSocketAddress>> f = i.next();
i.remove();
f.cancel(false);
}
}
if (finalResult != null) {
if (!promise.isDone()) {
// Found at least one resolved record.
final List<T> result = filterResults(finalResult);
// Lets replace the previous stored result.
finalResult = Collections.emptyList();
if (!DnsNameResolver.trySuccess(promise, result)) {
for (T item : result) {
ReferenceCountUtil.safeRelease(item);
}
}
} else {
// This should always be the case as we replaced the list once notify the promise with an empty one
// and never add to it again.
assert finalResult.isEmpty();
}
return;
}
// No resolved address found.
final int maxAllowedQueries = parent.maxQueriesPerResolve();
final int tries = maxAllowedQueries - allowedQueries;
final StringBuilder buf = new StringBuilder(64);
buf.append("Failed to resolve '").append(hostname).append("' ").append(Arrays.toString(expectedTypes));
if (tries > 1) {
if (tries < maxAllowedQueries) {
buf.append(" after ")
.append(tries)
.append(" queries ");
} else {
buf.append(". Exceeded max queries per resolve ")
.append(maxAllowedQueries)
.append(' ');
}
}
final UnknownHostException unknownHostException = new UnknownHostException(buf.toString());
if (cause == null) {
// Only cache if the failure was not because of an IO error / timeout that was caused by the query
// itself.
cache(hostname, additionals, unknownHostException);
} else {
unknownHostException.initCause(cause);
}
promise.tryFailure(unknownHostException);
}
static String decodeDomainName(ByteBuf in) {
in.markReaderIndex();
try {
return DefaultDnsRecordDecoder.decodeName(in);
} catch (CorruptedFrameException e) {
// In this case we just return null.
return null;
} finally {
in.resetReaderIndex();
}
}
private DnsServerAddressStream getNameServers(String name) {
DnsServerAddressStream stream = getNameServersFromCache(name);
if (stream == null) {
// We need to obtain a new stream from the parent DnsNameResolver if the hostname is not the same as
// for the original query (for example we may follow CNAMEs). Otherwise let's just duplicate the
// original nameservers so we correctly update the internal index
if (name.equals(hostname)) {
return nameServerAddrs.duplicate();
}
return parent.newNameServerAddressStream(name);
}
return stream;
}
private void followCname(DnsQuestion question, String cname, DnsQueryLifecycleObserver queryLifecycleObserver,
Promise<List<T>> promise) {
final DnsQuestion cnameQuestion;
final DnsServerAddressStream stream;
try {
cname = cnameResolveFromCache(cnameCache(), cname);
stream = getNameServers(cname);
cnameQuestion = new DefaultDnsQuestion(cname, question.type(), dnsClass);
} catch (Throwable cause) {
queryLifecycleObserver.queryFailed(cause);
PlatformDependent.throwException(cause);
return;
}
query(stream, 0, cnameQuestion, queryLifecycleObserver.queryCNAMEd(cnameQuestion),
true, promise, null);
}
private boolean query(String hostname, DnsRecordType type, DnsServerAddressStream dnsServerAddressStream,
boolean flush, Promise<List<T>> promise) {
final DnsQuestion question;
try {
question = new DefaultDnsQuestion(hostname, type, dnsClass);
} catch (Throwable cause) {
// Assume a single failure means that queries will succeed. If the hostname is invalid for one type
// there is no case where it is known to be valid for another type.
promise.tryFailure(new IllegalArgumentException("Unable to create DNS Question for: [" + hostname + ", " +
type + ']', cause));
return false;
}
query(dnsServerAddressStream, 0, question, newDnsQueryLifecycleObserver(question), flush, promise, null);
return true;
}
private DnsQueryLifecycleObserver newDnsQueryLifecycleObserver(DnsQuestion question) {
return parent.dnsQueryLifecycleObserverFactory().newDnsQueryLifecycleObserver(question);
}
private final | DnsAddressStreamList |
java | spring-projects__spring-security | oauth2/oauth2-resource-server/src/test/java/org/springframework/security/oauth2/server/resource/authentication/JwtBearerTokenAuthenticationConverterTests.java | {
"start": 1226,
"end": 3346
} | class ____ {
private final JwtBearerTokenAuthenticationConverter converter = new JwtBearerTokenAuthenticationConverter();
@Test
public void convertWhenJwtThenBearerTokenAuthentication() {
// @formatter:off
Jwt jwt = Jwt.withTokenValue("token-value")
.claim("claim", "value")
.header("header", "value")
.build();
// @formatter:on
AbstractAuthenticationToken token = this.converter.convert(jwt);
assertThat(token).isInstanceOf(BearerTokenAuthentication.class);
BearerTokenAuthentication bearerToken = (BearerTokenAuthentication) token;
assertThat(bearerToken.getToken().getTokenValue()).isEqualTo("token-value");
assertThat(bearerToken.getTokenAttributes()).containsOnlyKeys("claim");
assertThat(bearerToken.getAuthorities()).noneMatch(isScope());
}
@Test
public void convertWhenJwtWithScopeAttributeThenBearerTokenAuthentication() {
// @formatter:off
Jwt jwt = Jwt.withTokenValue("token-value")
.claim("scope", "message:read message:write")
.header("header", "value")
.build();
// @formatter:on
AbstractAuthenticationToken token = this.converter.convert(jwt);
assertThat(token).isInstanceOf(BearerTokenAuthentication.class);
BearerTokenAuthentication bearerToken = (BearerTokenAuthentication) token;
SecurityAssertions.assertThat(bearerToken).hasAuthorities("SCOPE_message:read", "SCOPE_message:write");
}
@Test
public void convertWhenJwtWithScpAttributeThenBearerTokenAuthentication() {
// @formatter:off
Jwt jwt = Jwt.withTokenValue("token-value")
.claim("scp", Arrays.asList("message:read", "message:write"))
.header("header", "value")
.build();
// @formatter:on
AbstractAuthenticationToken token = this.converter.convert(jwt);
assertThat(token).isInstanceOf(BearerTokenAuthentication.class);
BearerTokenAuthentication bearerToken = (BearerTokenAuthentication) token;
SecurityAssertions.assertThat(bearerToken).hasAuthorities("SCOPE_message:read", "SCOPE_message:write");
}
static Predicate<GrantedAuthority> isScope() {
return (a) -> a.getAuthority().startsWith("SCOPE_");
}
}
| JwtBearerTokenAuthenticationConverterTests |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/metamodel/model/domain/internal/JpaMetamodelImpl.java | {
"start": 15477,
"end": 16566
} | class ____ - add a marker entry so we do not keep trying later
// note that ConcurrentHashMap does not support null value so a marker entry is needed
// [HHH-14948] But only add it if the cache size isn't getting too large, as in some use cases
// the queries are dynamically generated and this cache could lead to memory leaks when left unbounded.
if ( knownInvalidnameToImportMap.size() < 1_000 ) {
//TODO this collection might benefit from a LRU eviction algorithm,
//we currently have no evidence for this need but this could be explored further.
//To consider that we don't have a hard dependency on a cache implementation providing LRU semantics.
//Alternatively - even better - would be to precompute all possible valid options and
//store them in nameToImportMap on bootstrap: if that can be filled with all (comprehensive)
//valid values, then there is no need for ever bothering the classloader.
knownInvalidnameToImportMap.put( name, name );
}
return null;
}
else {
// it is a fully-qualified | name |
java | spring-projects__spring-framework | spring-websocket/src/test/java/org/springframework/web/socket/messaging/StompWebSocketIntegrationTests.java | {
"start": 13595,
"end": 13828
} | class ____ implements ScopedBean {
private final String value;
public ScopedBeanImpl(String value) {
this.value = value;
}
@Override
public String getValue() {
return this.value;
}
}
private static | ScopedBeanImpl |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceSettingsTests.java | {
"start": 1216,
"end": 9853
} | class ____ extends ESTestCase {
public void testEmptySettingsAreOkay() throws InterruptedException {
AtomicBoolean scheduled = new AtomicBoolean();
execute(Settings.EMPTY, (command, interval, executor) -> {
scheduled.set(true);
return new MockCancellable();
}, () -> assertTrue(scheduled.get()));
}
public void testDisabledSetting() throws InterruptedException {
Settings settings = Settings.builder().put("monitor.jvm.gc.enabled", "false").build();
AtomicBoolean scheduled = new AtomicBoolean();
execute(settings, (command, interval, executor) -> {
scheduled.set(true);
return new MockCancellable();
}, () -> assertFalse(scheduled.get()));
}
public void testNegativeSetting() throws InterruptedException {
String collector = randomAlphaOfLength(5);
final String timeValue = "-" + randomTimeValue(2, 1000).getStringRep(); // -1 is handled separately
Settings settings = Settings.builder().put("monitor.jvm.gc.collector." + collector + ".warn", timeValue).build();
execute(settings, (command, interval, name) -> null, e -> {
assertThat(e, instanceOf(IllegalArgumentException.class));
assertThat(
e.getMessage(),
equalTo(
"failed to parse setting [monitor.jvm.gc.collector."
+ collector
+ ".warn] "
+ "with value ["
+ timeValue
+ "] as a time value"
)
);
}, true, null);
}
public void testNegativeOneSetting() throws InterruptedException {
String collector = randomAlphaOfLength(5);
final String timeValue = "-1" + randomFrom("", "d", "h", "m", "s", "ms", "nanos");
Settings settings = Settings.builder().put("monitor.jvm.gc.collector." + collector + ".warn", timeValue).build();
execute(settings, (command, interval, name) -> null, e -> {
assertThat(e, instanceOf(IllegalArgumentException.class));
assertThat(
e.getMessage(),
equalTo(
"invalid gc_threshold [monitor.jvm.gc.collector."
+ collector
+ ".warn] "
+ "value ["
+ timeValue
+ "]: value cannot be negative"
)
);
}, true, null);
}
public void testMissingSetting() throws InterruptedException {
String collector = randomAlphaOfLength(5);
Set<Tuple<String, String>> entries = new HashSet<>();
entries.add(Tuple.tuple("monitor.jvm.gc.collector." + collector + ".warn", randomPositiveTimeValue().getStringRep()));
entries.add(Tuple.tuple("monitor.jvm.gc.collector." + collector + ".info", randomPositiveTimeValue().getStringRep()));
entries.add(Tuple.tuple("monitor.jvm.gc.collector." + collector + ".debug", randomPositiveTimeValue().getStringRep()));
Settings.Builder builder = Settings.builder();
// drop a random setting or two
for (final var entry : randomSubsetOf(randomIntBetween(1, 2), entries)) {
builder.put(entry.v1(), entry.v2());
}
// we should get an exception that a setting is missing
execute(builder.build(), (command, interval, name) -> null, e -> {
assertThat(e, instanceOf(IllegalArgumentException.class));
assertThat(e.getMessage(), containsString("missing gc_threshold for [monitor.jvm.gc.collector." + collector + "."));
}, true, null);
}
public void testIllegalOverheadSettings() throws InterruptedException {
for (final String threshold : new String[] { "warn", "info", "debug" }) {
final Settings.Builder builder = Settings.builder();
builder.put("monitor.jvm.gc.overhead." + threshold, randomIntBetween(Integer.MIN_VALUE, -1));
execute(builder.build(), (command, interval, name) -> null, e -> {
assertThat(e, instanceOf(IllegalArgumentException.class));
assertThat(e.getMessage(), containsString("setting [monitor.jvm.gc.overhead." + threshold + "] must be >= 0"));
}, true, null);
}
for (final String threshold : new String[] { "warn", "info", "debug" }) {
final Settings.Builder builder = Settings.builder();
builder.put("monitor.jvm.gc.overhead." + threshold, randomIntBetween(100 + 1, Integer.MAX_VALUE));
execute(builder.build(), (command, interval, name) -> null, e -> {
assertThat(e, instanceOf(IllegalArgumentException.class));
assertThat(e.getMessage(), containsString("setting [monitor.jvm.gc.overhead." + threshold + "] must be <= 100"));
}, true, null);
}
final Settings.Builder infoWarnOutOfOrderBuilder = Settings.builder();
final int info = randomIntBetween(2, 98);
infoWarnOutOfOrderBuilder.put("monitor.jvm.gc.overhead.info", info);
final int warn = randomIntBetween(1, info - 1);
infoWarnOutOfOrderBuilder.put("monitor.jvm.gc.overhead.warn", warn);
execute(infoWarnOutOfOrderBuilder.build(), (command, interval, name) -> null, e -> {
assertThat(e, instanceOf(IllegalArgumentException.class));
assertThat(
e.getMessage(),
containsString(
"[monitor.jvm.gc.overhead.warn] must be greater than "
+ "[monitor.jvm.gc.overhead.info] ["
+ info
+ "] but was ["
+ warn
+ "]"
)
);
}, true, null);
final Settings.Builder debugInfoOutOfOrderBuilder = Settings.builder();
debugInfoOutOfOrderBuilder.put("monitor.jvm.gc.overhead.info", info);
final int debug = randomIntBetween(info + 1, 99);
debugInfoOutOfOrderBuilder.put("monitor.jvm.gc.overhead.debug", debug);
debugInfoOutOfOrderBuilder.put("monitor.jvm.gc.overhead.warn", randomIntBetween(debug + 1, 100)); // or the test will fail for the
// wrong reason
execute(debugInfoOutOfOrderBuilder.build(), (command, interval, name) -> null, e -> {
assertThat(e, instanceOf(IllegalArgumentException.class));
assertThat(
e.getMessage(),
containsString(
"[monitor.jvm.gc.overhead.info] must be greater than "
+ "[monitor.jvm.gc.overhead.debug] ["
+ debug
+ "] but was ["
+ info
+ "]"
)
);
}, true, null);
}
private static void execute(Settings settings, TriFunction<Runnable, TimeValue, Executor, Cancellable> scheduler, Runnable asserts)
throws InterruptedException {
execute(settings, scheduler, null, false, asserts);
}
private static void execute(
Settings settings,
TriFunction<Runnable, TimeValue, Executor, Cancellable> scheduler,
Consumer<Throwable> consumer,
boolean constructionShouldFail,
Runnable asserts
) throws InterruptedException {
assert constructionShouldFail == (consumer != null);
assert constructionShouldFail == (asserts == null);
ThreadPool threadPool = null;
try {
threadPool = new TestThreadPool(JvmGcMonitorServiceSettingsTests.class.getCanonicalName()) {
@Override
public Cancellable scheduleWithFixedDelay(Runnable command, TimeValue interval, Executor executor) {
return scheduler.apply(command, interval, executor);
}
};
try {
JvmGcMonitorService service = new JvmGcMonitorService(settings, threadPool);
if (constructionShouldFail) {
fail("construction of jvm gc service should have failed");
}
service.doStart();
asserts.run();
service.doStop();
} catch (Exception t) {
consumer.accept(t);
}
} finally {
ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS);
}
}
| JvmGcMonitorServiceSettingsTests |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/iterable/IterableAssert_map_Test.java | {
"start": 1211,
"end": 2825
} | class ____ {
private final List<TolkienCharacter> hobbits = new ArrayList<>();
@BeforeEach
void setUp() {
hobbits.add(TolkienCharacter.of("Frodo", 33, HOBBIT));
hobbits.add(TolkienCharacter.of("Sam", 38, HOBBIT));
hobbits.add(TolkienCharacter.of("Pippin", 28, HOBBIT));
}
@Test
void should_allow_assertions_on_values_extracted_by_given_using_function() {
then(hobbits).map(TolkienCharacter::getName)
.containsExactly("Frodo", "Sam", "Pippin");
}
@Test
void should_allow_assertions_on_tuple_values_extracted_by_given_throwing_extractors() {
then(hobbits).map(TolkienCharacter::getName, TolkienCharacter::getRace)
.containsExactly(tuple("Frodo", HOBBIT),
tuple("Sam", HOBBIT),
tuple("Pippin", HOBBIT));
}
@Test
void should_throw_assertion_error_if_actual_is_null() {
// GIVEN
List<TolkienCharacter> elves = null;
// WHEN
var assertionError = expectAssertionError(() -> assertThat(elves).map(TolkienCharacter::getName));
// THEN
then(assertionError).hasMessage(actualIsNull());
}
@Test
void should_throw_assertion_error_if_actual_is_null_when_passing_multiple_functions() {
// GIVEN
List<TolkienCharacter> elves = null;
// WHEN
var assertionError = expectAssertionError(() -> assertThat(elves).map(TolkienCharacter::getName,
TolkienCharacter::getRace));
// THEN
then(assertionError).hasMessage(actualIsNull());
}
}
| IterableAssert_map_Test |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/formula/ManyToManyNotIgnoreLazyFetchingTest.java | {
"start": 3216,
"end": 3904
} | class ____ implements Serializable {
@Id
@Column(name = "ID")
private Long id;
@ManyToMany
@NotFound(action = NotFoundAction.IGNORE)
@JoinTable(name = "STOCK_BY_CODE",
joinColumns = @JoinColumn(name = "STOCK_ID", referencedColumnName = "ID"),
inverseJoinColumns = {
@JoinColumn(name = "CODE_ID", referencedColumnName = "ID"),
@JoinColumn(name = "CODE_TYPE", referencedColumnName = "TYPE")
})
private List<StockCode> codes = new ArrayList<>();
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public List<StockCode> getCodes() {
return codes;
}
}
@Entity(name = "StockCode")
public static | Stock |
java | quarkusio__quarkus | integration-tests/smallrye-config/src/main/java/io/quarkus/it/smallrye/config/ConfigurableExceptionMapperResource.java | {
"start": 255,
"end": 430
} | class ____ {
@GET
public Response get() {
throw new ConfigurableExceptionMapper.ConfigurableExceptionMapperException();
}
}
| ConfigurableExceptionMapperResource |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/size/CriteriaSelectSizeCollectionTest.java | {
"start": 3067,
"end": 3501
} | class ____ implements java.io.Serializable {
@Id
private String id;
private String alias;
public Alias() {
}
public Alias(String id, String alias) {
this.id = id;
this.alias = alias;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getAlias() {
return alias;
}
public void setAlias(String alias) {
this.alias = alias;
}
}
}
| Alias |
java | apache__kafka | server-common/src/main/java/org/apache/kafka/server/immutable/pcollections/PCollectionsImmutableMap.java | {
"start": 1238,
"end": 7003
} | class ____<K, V> implements ImmutableMap<K, V> {
private final HashPMap<K, V> underlying;
/**
* @return a wrapped hash-based persistent map that is empty
* @param <K> the key type
* @param <V> the value type
*/
public static <K, V> PCollectionsImmutableMap<K, V> empty() {
return new PCollectionsImmutableMap<>(HashTreePMap.empty());
}
/**
* @param key the key
* @param value the value
* @return a wrapped hash-based persistent map that has a single mapping
* @param <K> the key type
* @param <V> the value type
*/
public static <K, V> PCollectionsImmutableMap<K, V> singleton(K key, V value) {
return new PCollectionsImmutableMap<>(HashTreePMap.singleton(key, value));
}
public PCollectionsImmutableMap(HashPMap<K, V> map) {
this.underlying = Objects.requireNonNull(map);
}
@Override
public ImmutableMap<K, V> updated(K key, V value) {
return new PCollectionsImmutableMap<>(underlying().plus(key, value));
}
@Override
public ImmutableMap<K, V> removed(K key) {
return new PCollectionsImmutableMap<>(underlying().minus(key));
}
@Override
public int size() {
return underlying().size();
}
@Override
public boolean isEmpty() {
return underlying().isEmpty();
}
@Override
public boolean containsKey(Object key) {
return underlying().containsKey(key);
}
@Override
public boolean containsValue(Object value) {
return underlying().containsValue(value);
}
@Override
public V get(Object key) {
return underlying().get(key);
}
@Override
public V put(K key, V value) {
// will throw UnsupportedOperationException; delegate anyway for testability
return underlying().put(key, value);
}
@Override
public V remove(Object key) {
// will throw UnsupportedOperationException; delegate anyway for testability
return underlying().remove(key);
}
@Override
public void putAll(Map<? extends K, ? extends V> m) {
// will throw UnsupportedOperationException; delegate anyway for testability
underlying().putAll(m);
}
@Override
public void clear() {
// will throw UnsupportedOperationException; delegate anyway for testability
underlying().clear();
}
@Override
public Set<K> keySet() {
return underlying().keySet();
}
@Override
public Collection<V> values() {
return underlying().values();
}
@Override
public Set<Entry<K, V>> entrySet() {
return underlying().entrySet();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
PCollectionsImmutableMap<?, ?> that = (PCollectionsImmutableMap<?, ?>) o;
return underlying().equals(that.underlying());
}
@Override
public int hashCode() {
return underlying().hashCode();
}
@Override
public V getOrDefault(Object key, V defaultValue) {
return underlying().getOrDefault(key, defaultValue);
}
@Override
public void forEach(BiConsumer<? super K, ? super V> action) {
underlying().forEach(action);
}
@Override
public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
// will throw UnsupportedOperationException; delegate anyway for testability
underlying().replaceAll(function);
}
@Override
public V putIfAbsent(K key, V value) {
// will throw UnsupportedOperationException; delegate anyway for testability
return underlying().putIfAbsent(key, value);
}
@Override
public boolean remove(Object key, Object value) {
// will throw UnsupportedOperationException; delegate anyway for testability
return underlying().remove(key, value);
}
@Override
public boolean replace(K key, V oldValue, V newValue) {
// will throw UnsupportedOperationException; delegate anyway for testability
return underlying().replace(key, oldValue, newValue);
}
@Override
public V replace(K key, V value) {
// will throw UnsupportedOperationException; delegate anyway for testability
return underlying().replace(key, value);
}
@Override
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
// will throw UnsupportedOperationException; delegate anyway for testability
return underlying().computeIfAbsent(key, mappingFunction);
}
@Override
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
// will throw UnsupportedOperationException; delegate anyway for testability
return underlying().computeIfPresent(key, remappingFunction);
}
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
// will throw UnsupportedOperationException; delegate anyway for testability
return underlying().compute(key, remappingFunction);
}
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
// will throw UnsupportedOperationException; delegate anyway for testability
return underlying().merge(key, value, remappingFunction);
}
@Override
public String toString() {
return "PCollectionsImmutableMap{" +
"underlying=" + underlying() +
'}';
}
// package-private for testing
HashPMap<K, V> underlying() {
return underlying;
}
}
| PCollectionsImmutableMap |
java | apache__flink | flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/api/PlanReference.java | {
"start": 5624,
"end": 6624
} | class ____ extends PlanReference {
private final byte[] content;
private BytesContentPlanReference(byte[] content) {
this.content = content;
}
public byte[] getContent() {
return content;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
BytesContentPlanReference that = (BytesContentPlanReference) o;
return Arrays.equals(content, that.content);
}
@Override
public int hashCode() {
return Arrays.hashCode(content);
}
@Override
public String toString() {
return "Plan:\n" + Arrays.toString(content);
}
}
/** Plan reference to a file in the provided {@link ClassLoader}. */
@Experimental
public static | BytesContentPlanReference |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/referencedcolumnname/Postman.java | {
"start": 307,
"end": 702
} | class ____ implements Serializable {
private String name;
private String id;
public Postman() {
}
@Id
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public Postman(String name, String id) {
this.name = name;
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
| Postman |
java | apache__avro | lang/java/avro/src/test/java/org/apache/avro/ParseContextTest.java | {
"start": 1415,
"end": 4740
} | class ____ {
Schema fooRecord, fooRecordCopy, barEnum, bazFixed, mehRecord;
ParseContext fooBarBaz;
@BeforeEach
public void setUp() throws Exception {
fooRecord = SchemaBuilder.record("ns.Foo").fields().endRecord();
fooRecordCopy = SchemaBuilder.record("ns.Foo").fields().endRecord();
barEnum = SchemaBuilder.enumeration("ns.Bar").symbols();
bazFixed = SchemaBuilder.fixed("ns.Baz").size(8);
mehRecord = SchemaBuilder.record("ns.Meh").fields().endRecord();
fooBarBaz = new ParseContext();
fooBarBaz.put(fooRecord);
fooBarBaz.put(barEnum);
fooBarBaz.put(bazFixed);
}
@Test
public void checkNewNameContextContainsPrimitives() {
EnumSet<Schema.Type> complexTypes = EnumSet.of(Schema.Type.RECORD, Schema.Type.ENUM, Schema.Type.FIXED,
Schema.Type.UNION, Schema.Type.ARRAY, Schema.Type.MAP);
EnumSet<Schema.Type> primitives = EnumSet.complementOf(complexTypes);
ParseContext context = new ParseContext();
for (Schema.Type type : complexTypes) {
assertFalse(context.contains(type.getName()));
}
for (Schema.Type type : primitives) {
assertTrue(context.contains(type.getName()));
}
}
@Test
public void primitivesAreNotCached() {
EnumSet<Schema.Type> primitives = EnumSet.complementOf(EnumSet.of(Schema.Type.RECORD, Schema.Type.ENUM,
Schema.Type.FIXED, Schema.Type.UNION, Schema.Type.ARRAY, Schema.Type.MAP));
ParseContext context = new ParseContext();
for (Schema.Type type : primitives) {
Schema first = context.find(type.getName(), null);
Schema second = context.find(type.getName(), null);
assertEquals(first, second);
assertNotSame(first, second);
first.addProp("logicalType", "brick");
assertNotEquals(first, second);
}
}
@Test
public void validateSchemaRetrievalFailure() {
Schema unknown = Schema.createFixed("unknown", null, null, 0);
Schema unresolved = fooBarBaz.find("unknown", null);
assertTrue(SchemaResolver.isUnresolvedSchema(unresolved));
assertEquals(unknown.getFullName(), SchemaResolver.getUnresolvedSchemaName(unresolved));
}
@Test
public void validateSchemaRetrievalByFullName() {
assertSame(fooRecord, fooBarBaz.find(fooRecord.getFullName(), null));
}
@Test
public void validateSchemaRetrievalBySimpleName() {
assertSame(fooRecord, fooBarBaz.find(fooRecord.getName(), fooRecord.getNamespace()));
}
@Test
public void verifyPutIsIdempotent() {
ParseContext context = new ParseContext();
assertNotEquals(fooRecord, context.find(fooRecord.getFullName(), null));
context.put(fooRecord);
assertEquals(fooRecord, context.find(fooRecord.getFullName(), null));
context.put(fooRecord);
assertEquals(fooRecord, context.find(fooRecord.getFullName(), null));
}
@Test
public void verifyPutOnlyAcceptsNamedSchemas() {
ParseContext context = new ParseContext();
assertThrows(AvroRuntimeException.class, () -> context.put(Schema.create(Schema.Type.STRING)));
}
@Test
public void verifyAddDoesNotAllowChangingSchemas() {
Schema fooEnum = SchemaBuilder.enumeration("ns.Foo").symbols();
ParseContext context = new ParseContext();
context.put(fooRecord);
assertThrows(AvroRuntimeException.class, () -> context.put(fooEnum));
}
}
| ParseContextTest |
java | apache__spark | mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java | {
"start": 1397,
"end": 7227
} | class ____ extends SharedSparkSession {
@Override
@BeforeEach
public void setUp() throws IOException {
super.setUp();
List<Tuple2<Long, Vector>> tinyCorpus = new ArrayList<>();
for (int i = 0; i < LDASuite.tinyCorpus().length; i++) {
tinyCorpus.add(new Tuple2<>((Long) LDASuite.tinyCorpus()[i]._1(),
LDASuite.tinyCorpus()[i]._2()));
}
JavaRDD<Tuple2<Long, Vector>> tmpCorpus = jsc.parallelize(tinyCorpus, 2);
corpus = JavaPairRDD.fromJavaRDD(tmpCorpus);
}
@Test
public void localLDAModel() {
Matrix topics = LDASuite.tinyTopics();
double[] topicConcentration = new double[topics.numRows()];
Arrays.fill(topicConcentration, 1.0D / topics.numRows());
LocalLDAModel model = new LocalLDAModel(topics, Vectors.dense(topicConcentration), 1.0, 100.0);
// Check: basic parameters
assertEquals(model.k(), tinyK);
assertEquals(model.vocabSize(), tinyVocabSize);
assertEquals(model.topicsMatrix(), tinyTopics);
// Check: describeTopics() with all terms
Tuple2<int[], double[]>[] fullTopicSummary = model.describeTopics();
assertEquals(fullTopicSummary.length, tinyK);
for (int i = 0; i < fullTopicSummary.length; i++) {
assertArrayEquals(fullTopicSummary[i]._1(), tinyTopicDescription[i]._1());
assertArrayEquals(fullTopicSummary[i]._2(), tinyTopicDescription[i]._2(), 1e-5);
}
}
@Test
public void distributedLDAModel() {
int k = 3;
double topicSmoothing = 1.2;
double termSmoothing = 1.2;
// Train a model
LDA lda = new LDA();
lda.setK(k)
.setDocConcentration(topicSmoothing)
.setTopicConcentration(termSmoothing)
.setMaxIterations(5)
.setSeed(12345);
DistributedLDAModel model = (DistributedLDAModel) lda.run(corpus);
// Check: basic parameters
LocalLDAModel localModel = model.toLocal();
assertEquals(k, model.k());
assertEquals(k, localModel.k());
assertEquals(tinyVocabSize, model.vocabSize());
assertEquals(tinyVocabSize, localModel.vocabSize());
assertEquals(localModel.topicsMatrix(), model.topicsMatrix());
// Check: topic summaries
Tuple2<int[], double[]>[] roundedTopicSummary = model.describeTopics();
assertEquals(k, roundedTopicSummary.length);
Tuple2<int[], double[]>[] roundedLocalTopicSummary = localModel.describeTopics();
assertEquals(k, roundedLocalTopicSummary.length);
// Check: log probabilities
assertTrue(model.logLikelihood() < 0.0);
assertTrue(model.logPrior() < 0.0);
// Check: topic distributions
JavaPairRDD<Long, Vector> topicDistributions = model.javaTopicDistributions();
// SPARK-5562. since the topicDistribution returns the distribution of the non empty docs
// over topics. Compare it against nonEmptyCorpus instead of corpus
JavaPairRDD<Long, Vector> nonEmptyCorpus =
corpus.filter(tuple2 -> Vectors.norm(tuple2._2(), 1.0) != 0.0);
assertEquals(topicDistributions.count(), nonEmptyCorpus.count());
// Check: javaTopTopicsPerDocuments
Tuple3<Long, int[], double[]> topTopics = model.javaTopTopicsPerDocument(3).first();
Long docId = topTopics._1(); // confirm doc ID type
int[] topicIndices = topTopics._2();
double[] topicWeights = topTopics._3();
assertEquals(3, topicIndices.length);
assertEquals(3, topicWeights.length);
// Check: topTopicAssignments
Tuple3<Long, int[], int[]> topicAssignment = model.javaTopicAssignments().first();
Long docId2 = topicAssignment._1();
int[] termIndices2 = topicAssignment._2();
int[] topicIndices2 = topicAssignment._3();
assertEquals(termIndices2.length, topicIndices2.length);
}
@Test
public void onlineOptimizerCompatibility() {
int k = 3;
double topicSmoothing = 1.2;
double termSmoothing = 1.2;
// Train a model
OnlineLDAOptimizer op = new OnlineLDAOptimizer()
.setTau0(1024)
.setKappa(0.51)
.setGammaShape(1e40)
.setMiniBatchFraction(0.5);
LDA lda = new LDA();
lda.setK(k)
.setDocConcentration(topicSmoothing)
.setTopicConcentration(termSmoothing)
.setMaxIterations(5)
.setSeed(12345)
.setOptimizer(op);
LDAModel model = lda.run(corpus);
// Check: basic parameters
assertEquals(k, model.k());
assertEquals(tinyVocabSize, model.vocabSize());
// Check: topic summaries
Tuple2<int[], double[]>[] roundedTopicSummary = model.describeTopics();
assertEquals(k, roundedTopicSummary.length);
Tuple2<int[], double[]>[] roundedLocalTopicSummary = model.describeTopics();
assertEquals(k, roundedLocalTopicSummary.length);
}
@Test
public void localLdaMethods() {
JavaRDD<Tuple2<Long, Vector>> docs = jsc.parallelize(toyData, 2);
JavaPairRDD<Long, Vector> pairedDocs = JavaPairRDD.fromJavaRDD(docs);
// check: topicDistributions
assertEquals(toyModel.topicDistributions(pairedDocs).count(), pairedDocs.count());
// check: logPerplexity
double logPerplexity = toyModel.logPerplexity(pairedDocs);
// check: logLikelihood.
List<Tuple2<Long, Vector>> docsSingleWord = new ArrayList<>();
docsSingleWord.add(new Tuple2<>(0L, Vectors.dense(1.0, 0.0, 0.0)));
JavaPairRDD<Long, Vector> single = JavaPairRDD.fromJavaRDD(jsc.parallelize(docsSingleWord));
double logLikelihood = toyModel.logLikelihood(single);
}
private static int tinyK = LDASuite.tinyK();
private static int tinyVocabSize = LDASuite.tinyVocabSize();
private static Matrix tinyTopics = LDASuite.tinyTopics();
private static Tuple2<int[], double[]>[] tinyTopicDescription =
LDASuite.tinyTopicDescription();
private JavaPairRDD<Long, Vector> corpus;
private LocalLDAModel toyModel = LDASuite.toyModel();
private List<Tuple2<Long, Vector>> toyData = LDASuite.javaToyData();
}
| JavaLDASuite |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/IdentifierNameTest.java | {
"start": 4036,
"end": 4099
} | class ____ {
public void get() {
| Test |
java | netty__netty | handler/src/main/java/io/netty/handler/traffic/GlobalChannelTrafficShapingHandler.java | {
"start": 4730,
"end": 6459
} | class ____ extends AbstractTrafficShapingHandler {
private static final InternalLogger logger =
InternalLoggerFactory.getInstance(GlobalChannelTrafficShapingHandler.class);
/**
* All queues per channel
*/
final ConcurrentMap<Integer, PerChannel> channelQueues = new ConcurrentHashMap<>();
/**
* Global queues size
*/
private final AtomicLong queuesSize = new AtomicLong();
/**
* Maximum cumulative writing bytes for one channel among all (as long as channels stay the same)
*/
private final AtomicLong cumulativeWrittenBytes = new AtomicLong();
/**
* Maximum cumulative read bytes for one channel among all (as long as channels stay the same)
*/
private final AtomicLong cumulativeReadBytes = new AtomicLong();
/**
* Max size in the list before proposing to stop writing new objects from next handlers
* for all channel (global)
*/
volatile long maxGlobalWriteSize = DEFAULT_MAX_SIZE * 100; // default 400MB
/**
* Limit in B/s to apply to write
*/
private volatile long writeChannelLimit;
/**
* Limit in B/s to apply to read
*/
private volatile long readChannelLimit;
private static final float DEFAULT_DEVIATION = 0.1F;
private static final float MAX_DEVIATION = 0.4F;
private static final float DEFAULT_SLOWDOWN = 0.4F;
private static final float DEFAULT_ACCELERATION = -0.1F;
private volatile float maxDeviation;
private volatile float accelerationFactor;
private volatile float slowDownFactor;
private volatile boolean readDeviationActive;
private volatile boolean writeDeviationActive;
static final | GlobalChannelTrafficShapingHandler |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/text/translate/JavaUnicodeEscaper.java | {
"start": 1276,
"end": 4247
} | class ____ extends UnicodeEscaper {
/**
* Constructs a {@link JavaUnicodeEscaper} above the specified value (exclusive).
*
* @param codePoint
* above which to escape.
* @return the newly created {@link UnicodeEscaper} instance.
*/
public static JavaUnicodeEscaper above(final int codePoint) {
return outsideOf(0, codePoint);
}
/**
* Constructs a {@link JavaUnicodeEscaper} below the specified value (exclusive).
*
* @param codePoint
* below which to escape.
* @return the newly created {@link UnicodeEscaper} instance.
*/
public static JavaUnicodeEscaper below(final int codePoint) {
return outsideOf(codePoint, Integer.MAX_VALUE);
}
/**
* Constructs a {@link JavaUnicodeEscaper} between the specified values (inclusive).
*
* @param codePointLow
* above which to escape.
* @param codePointHigh
* below which to escape.
* @return the newly created {@link UnicodeEscaper} instance.
*/
public static JavaUnicodeEscaper between(final int codePointLow, final int codePointHigh) {
return new JavaUnicodeEscaper(codePointLow, codePointHigh, true);
}
/**
* Constructs a {@link JavaUnicodeEscaper} outside of the specified values (exclusive).
*
* @param codePointLow
* below which to escape.
* @param codePointHigh
* above which to escape.
* @return the newly created {@link UnicodeEscaper} instance.
*/
public static JavaUnicodeEscaper outsideOf(final int codePointLow, final int codePointHigh) {
return new JavaUnicodeEscaper(codePointLow, codePointHigh, false);
}
/**
* Constructs a {@link JavaUnicodeEscaper} for the specified range. This is the underlying method for the
* other constructors/builders. The {@code below} and {@code above} boundaries are inclusive when
* {@code between} is {@code true} and exclusive when it is {@code false}.
*
* @param below
* int value representing the lowest code point boundary.
* @param above
* int value representing the highest code point boundary.
* @param between
* whether to escape between the boundaries or outside them.
*/
public JavaUnicodeEscaper(final int below, final int above, final boolean between) {
super(below, above, between);
}
/**
* Converts the given code point to a hexadecimal string of the form {@code "\\uXXXX\\uXXXX"}
*
* @param codePoint
* a Unicode code point.
* @return the hexadecimal string for the given code point.
*/
@Override
protected String toUtf16Escape(final int codePoint) {
final char[] surrogatePair = Character.toChars(codePoint);
return "\\u" + hex(surrogatePair[0]) + "\\u" + hex(surrogatePair[1]);
}
}
| JavaUnicodeEscaper |
java | apache__camel | components/camel-geocoder/src/test/java/org/apache/camel/component/geocoder/GeoCoderComponentAddressTest.java | {
"start": 1139,
"end": 2148
} | class ____ extends GeoCoderApiKeyTestBase {
@Test
public void testGeoCoder() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(1);
mock.expectedHeaderReceived(GeoCoderConstants.LATLNG, "55.67609680,12.56833720");
// the address header overrides the endpoint configuration
template.sendBodyAndHeader("direct:start", "Hello", GeoCoderConstants.ADDRESS, " ");
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("direct:start").to("geocoder:address: empty?apiKey=" + getApiKey()).to("log:result")
.log("Location ${header.CamelGeocoderAddress} is at lat/lng: ${header.CamelGeocoderLatlng} in city ${header.CamelGeocoderCity}")
.to("mock:result");
}
};
}
}
| GeoCoderComponentAddressTest |
java | quarkusio__quarkus | extensions/opentelemetry/deployment/src/test/java/io/quarkus/opentelemetry/deployment/traces/OpenTelemetryHttpCDITest.java | {
"start": 4189,
"end": 4552
} | class ____ {
@Inject
HelloBean helloBean;
@GET
public String hello() {
return helloBean.hello();
}
@GET
@Path("/withSpan")
@WithSpan("withSpan")
public String withSpan() {
return helloBean.hello();
}
}
@ApplicationScoped
public static | HelloResource |
java | apache__camel | components/camel-debezium/camel-debezium-mysql/src/generated/java/org/apache/camel/component/debezium/mysql/DebeziumMySqlEndpointUriFactory.java | {
"start": 524,
"end": 7879
} | class ____ extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory {
private static final String BASE = ":name";
private static final Set<String> PROPERTY_NAMES;
private static final Set<String> SECRET_PROPERTY_NAMES;
private static final Map<String, String> MULTI_VALUE_PREFIXES;
static {
Set<String> props = new HashSet<>(123);
props.add("additionalProperties");
props.add("bigintUnsignedHandlingMode");
props.add("binlogBufferSize");
props.add("bridgeErrorHandler");
props.add("columnExcludeList");
props.add("columnIncludeList");
props.add("columnPropagateSourceType");
props.add("connectKeepAlive");
props.add("connectKeepAliveIntervalMs");
props.add("connectTimeoutMs");
props.add("connectionValidationTimeoutMs");
props.add("converters");
props.add("customMetricTags");
props.add("databaseExcludeList");
props.add("databaseHostname");
props.add("databaseIncludeList");
props.add("databaseInitialStatements");
props.add("databaseJdbcDriver");
props.add("databasePassword");
props.add("databasePort");
props.add("databaseProtocol");
props.add("databaseQueryTimeoutMs");
props.add("databaseServerId");
props.add("databaseServerIdOffset");
props.add("databaseSslKeystore");
props.add("databaseSslKeystorePassword");
props.add("databaseSslMode");
props.add("databaseSslTruststore");
props.add("databaseSslTruststorePassword");
props.add("databaseUser");
props.add("datatypePropagateSourceType");
props.add("decimalHandlingMode");
props.add("enableTimeAdjuster");
props.add("errorsMaxRetries");
props.add("eventDeserializationFailureHandlingMode");
props.add("eventProcessingFailureHandlingMode");
props.add("exceptionHandler");
props.add("exchangePattern");
props.add("executorShutdownTimeoutMs");
props.add("extendedHeadersEnabled");
props.add("gtidSourceExcludes");
props.add("gtidSourceFilterDmlEvents");
props.add("gtidSourceIncludes");
props.add("guardrailCollectionsLimitAction");
props.add("guardrailCollectionsMax");
props.add("heartbeatActionQuery");
props.add("heartbeatIntervalMs");
props.add("heartbeatTopicsPrefix");
props.add("includeQuery");
props.add("includeSchemaChanges");
props.add("includeSchemaComments");
props.add("inconsistentSchemaHandlingMode");
props.add("incrementalSnapshotAllowSchemaChanges");
props.add("incrementalSnapshotChunkSize");
props.add("incrementalSnapshotWatermarkingStrategy");
props.add("internalKeyConverter");
props.add("internalValueConverter");
props.add("maxBatchSize");
props.add("maxQueueSize");
props.add("maxQueueSizeInBytes");
props.add("messageKeyColumns");
props.add("minRowCountToStreamResults");
props.add("name");
props.add("notificationEnabledChannels");
props.add("notificationSinkTopicName");
props.add("offsetCommitPolicy");
props.add("offsetCommitTimeoutMs");
props.add("offsetFlushIntervalMs");
props.add("offsetStorage");
props.add("offsetStorageFileName");
props.add("offsetStoragePartitions");
props.add("offsetStorageReplicationFactor");
props.add("offsetStorageTopic");
props.add("openlineageIntegrationConfigFilePath");
props.add("openlineageIntegrationDatasetKafkaBootstrapServers");
props.add("openlineageIntegrationEnabled");
props.add("openlineageIntegrationJobDescription");
props.add("openlineageIntegrationJobNamespace");
props.add("openlineageIntegrationJobOwners");
props.add("openlineageIntegrationJobTags");
props.add("pollIntervalMs");
props.add("postProcessors");
props.add("provideTransactionMetadata");
props.add("queryFetchSize");
props.add("retriableRestartConnectorWaitMs");
props.add("schemaHistoryInternal");
props.add("schemaHistoryInternalFileFilename");
props.add("schemaHistoryInternalSkipUnparseableDdl");
props.add("schemaHistoryInternalStoreOnlyCapturedDatabasesDdl");
props.add("schemaHistoryInternalStoreOnlyCapturedTablesDdl");
props.add("schemaNameAdjustmentMode");
props.add("signalDataCollection");
props.add("signalEnabledChannels");
props.add("signalPollIntervalMs");
props.add("skippedOperations");
props.add("snapshotDelayMs");
props.add("snapshotFetchSize");
props.add("snapshotIncludeCollectionList");
props.add("snapshotLockTimeoutMs");
props.add("snapshotLockingMode");
props.add("snapshotMaxThreads");
props.add("snapshotMode");
props.add("snapshotModeConfigurationBasedSnapshotData");
props.add("snapshotModeConfigurationBasedSnapshotOnDataError");
props.add("snapshotModeConfigurationBasedSnapshotOnSchemaError");
props.add("snapshotModeConfigurationBasedSnapshotSchema");
props.add("snapshotModeConfigurationBasedStartStream");
props.add("snapshotModeCustomName");
props.add("snapshotQueryMode");
props.add("snapshotQueryModeCustomName");
props.add("snapshotSelectStatementOverrides");
props.add("snapshotTablesOrderByRowCount");
props.add("sourceinfoStructMaker");
props.add("streamingDelayMs");
props.add("tableExcludeList");
props.add("tableIgnoreBuiltin");
props.add("tableIncludeList");
props.add("timePrecisionMode");
props.add("tombstonesOnDelete");
props.add("topicNamingStrategy");
props.add("topicPrefix");
props.add("transactionMetadataFactory");
props.add("useNongracefulDisconnect");
PROPERTY_NAMES = Collections.unmodifiableSet(props);
SECRET_PROPERTY_NAMES = Collections.emptySet();
Map<String, String> prefixes = new HashMap<>(1);
prefixes.put("additionalProperties", "additionalProperties.");
MULTI_VALUE_PREFIXES = Collections.unmodifiableMap(prefixes);
}
@Override
public boolean isEnabled(String scheme) {
return "debezium-mysql".equals(scheme);
}
@Override
public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException {
String syntax = scheme + BASE;
String uri = syntax;
Map<String, Object> copy = new HashMap<>(properties);
uri = buildPathParameter(syntax, uri, "name", null, true, copy);
uri = buildQueryParameters(uri, copy, encode);
return uri;
}
@Override
public Set<String> propertyNames() {
return PROPERTY_NAMES;
}
@Override
public Set<String> secretPropertyNames() {
return SECRET_PROPERTY_NAMES;
}
@Override
public Map<String, String> multiValuePrefixes() {
return MULTI_VALUE_PREFIXES;
}
@Override
public boolean isLenientProperties() {
return false;
}
}
| DebeziumMySqlEndpointUriFactory |
java | quarkusio__quarkus | extensions/devservices/common/src/main/java/io/quarkus/devservices/common/ContainerShutdownCloseable.java | {
"start": 811,
"end": 2064
} | class ____ implements Closeable {
private static final Logger LOG = Logger.getLogger(ContainerShutdownCloseable.class);
private final GenericContainer<?> container;
private final String friendlyServiceName;
/**
* @param container the container to be eventually closed
* @param friendlyServiceName for logging purposes
*/
public ContainerShutdownCloseable(GenericContainer<?> container, String friendlyServiceName) {
Objects.requireNonNull(container);
Objects.requireNonNull(friendlyServiceName);
this.container = container;
this.friendlyServiceName = friendlyServiceName;
}
@Override
public void close() {
if (TestcontainersConfiguration.getInstance().environmentSupportsReuse()
&& container.isShouldBeReused()) {
LOG.infof(
"Dev Services for %s is no longer needed by this Quarkus instance, but is not shut down as 'testcontainers.reuse.enable' is enabled in your Testcontainers configuration file",
friendlyServiceName);
} else {
container.stop();
LOG.infof("Dev Services for %s shut down.", this.friendlyServiceName);
}
}
}
| ContainerShutdownCloseable |
java | google__auto | factory/src/main/java/com/google/auto/factory/AutoFactory.java | {
"start": 3302,
"end": 3482
} | class ____ {...}
* }</pre>
*
* Then the generated {@code FooFactory} would look like this:
* <pre>{@code
* @Immutable
* @SuppressWarnings("Immutable")
* public | Foo |
java | google__error-prone | core/src/test/java/com/google/errorprone/testdata/MultipleTopLevelClassesWithNoErrors.java | {
"start": 878,
"end": 1001
} | class ____ {
int foo;
int bar;
public Foo1(int foo, int bar) {
this.foo = foo;
this.bar = bar;
}
}
final | Foo1 |
java | netty__netty | handler/src/test/java/io/netty/handler/address/ResolveAddressHandlerTest.java | {
"start": 3920,
"end": 5080
} | class ____ extends AddressResolverGroup<SocketAddress> {
private final boolean fail;
TestResolverGroup(boolean fail) {
this.fail = fail;
}
@Override
protected AddressResolver<SocketAddress> newResolver(EventExecutor executor) {
return new AbstractAddressResolver<SocketAddress>(executor) {
@Override
protected boolean doIsResolved(SocketAddress address) {
return address == RESOLVED;
}
@Override
protected void doResolve(SocketAddress unresolvedAddress, Promise<SocketAddress> promise) {
assertSame(UNRESOLVED, unresolvedAddress);
if (fail) {
promise.setFailure(ERROR);
} else {
promise.setSuccess(RESOLVED);
}
}
@Override
protected void doResolveAll(SocketAddress unresolvedAddress, Promise<List<SocketAddress>> promise) {
fail();
}
};
}
}
}
| TestResolverGroup |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/serializer/label/LabelIncludeTest2.java | {
"start": 247,
"end": 634
} | class ____ extends TestCase {
public void test_includes() throws Exception {
VO vo = new VO();
vo.setId(123);
vo.setName("wenshao");
vo.setPassword("ooxxx");
String text = JSON.toJSONString(vo, Labels.includes("normal"));
Assert.assertEquals("{\"id\":123,\"name\":\"wenshao\"}", text);
}
private static | LabelIncludeTest2 |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/argumentselectiondefects/EnclosedByReverseHeuristicTest.java | {
"start": 2475,
"end": 3024
} | class ____ {
abstract void target(Object first, Object second);
void test(Object first, Object second) {
// BUG: Diagnostic contains: false
target(second, first);
}
}
""")
.doTest();
}
@Test
public void enclosedByReverse_returnsTrue_withinReverseMethod() {
CompilationTestHelper.newInstance(EnclosedByReverseHeuristicChecker.class, getClass())
.addSourceLines(
"Test.java",
"""
abstract | Test |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/scheduling/concurrent/ThreadPoolExecutorFactoryBeanTests.java | {
"start": 3053,
"end": 3231
} | class ____ {
@Bean
ThreadPoolExecutorFactoryBean executor() {
return new ThreadPoolExecutorFactoryBean();
}
}
@SuppressWarnings("serial")
private static | ExecutorConfig |
java | mockito__mockito | mockito-core/src/test/java/org/mockito/internal/util/reflection/GenericTypeExtractorTest.java | {
"start": 915,
"end": 958
} | class ____ implements IBase {}
| INonGeneric |
java | apache__avro | lang/java/avro/src/test/java/org/apache/avro/reflect/TestReflect.java | {
"start": 31825,
"end": 35140
} | class ____ {
int i;
}
/** Test nesting of reflect data within generic. */
@Test
void reflectWithinGeneric() throws Exception {
ReflectData data = ReflectData.get();
// define a record with a field that's a specific Y
Schema schema = Schema.createRecord("Foo", "", "x.y.z", false);
List<Schema.Field> fields = new ArrayList<>();
fields.add(new Schema.Field("f", data.getSchema(Y.class), "", null));
schema.setFields(fields);
// create a generic instance of this record
Y y = new Y();
y.i = 1;
GenericData.Record record = new GenericData.Record(schema);
record.put("f", y);
// test that this instance can be written & re-read
checkBinary(schema, record);
}
@Test
void primitiveArray() throws Exception {
testPrimitiveArrays(false);
}
@Test
void primitiveArrayBlocking() throws Exception {
testPrimitiveArrays(true);
}
private void testPrimitiveArrays(boolean blocking) throws Exception {
testPrimitiveArray(boolean.class, blocking);
testPrimitiveArray(byte.class, blocking);
testPrimitiveArray(short.class, blocking);
testPrimitiveArray(char.class, blocking);
testPrimitiveArray(int.class, blocking);
testPrimitiveArray(long.class, blocking);
testPrimitiveArray(float.class, blocking);
testPrimitiveArray(double.class, blocking);
}
private void testPrimitiveArray(Class<?> c, boolean blocking) throws Exception {
ReflectData data = new ReflectData();
Random r = new Random();
int size = 200;
Object array = Array.newInstance(c, size);
Schema s = data.getSchema(array.getClass());
for (int i = 0; i < size; i++) {
Array.set(array, i, randomFor(c, r));
}
checkBinary(data, s, array, false, blocking);
}
private Object randomFor(Class<?> c, Random r) {
if (c == boolean.class)
return r.nextBoolean();
if (c == int.class)
return r.nextInt();
if (c == long.class)
return r.nextLong();
if (c == byte.class)
return (byte) r.nextInt();
if (c == float.class)
return r.nextFloat();
if (c == double.class)
return r.nextDouble();
if (c == char.class)
return (char) r.nextInt();
if (c == short.class)
return (short) r.nextInt();
return null;
}
/** Test union of null and an array. */
@Test
void nullArray() throws Exception {
String json = "[{\"type\":\"array\", \"items\": \"long\"}, \"null\"]";
Schema schema = new Schema.Parser().parse(json);
checkBinary(schema, null);
}
/** Test stringable classes. */
@Test
void stringables() throws Exception {
checkStringable(java.math.BigDecimal.class, "10");
checkStringable(java.math.BigInteger.class, "20");
checkStringable(java.net.URI.class, "foo://bar:9000/baz");
checkStringable(java.net.URL.class, "http://bar:9000/baz");
checkStringable(java.io.File.class, "foo.bar");
}
@SuppressWarnings({ "unchecked", "rawtypes" })
public void checkStringable(Class c, String value) throws Exception {
ReflectData data = new ReflectData();
Schema schema = data.getSchema(c);
assertEquals("{\"type\":\"string\",\"java-class\":\"" + c.getName() + "\"}", schema.toString());
checkBinary(schema, c.getConstructor(String.class).newInstance(value));
}
public static | Y |
java | apache__dubbo | dubbo-remoting/dubbo-remoting-http12/src/main/java/org/apache/dubbo/remoting/http12/CompositeInputStream.java | {
"start": 1052,
"end": 3828
} | class ____ extends InputStream {
private final Queue<InputStream> inputStreams = new ConcurrentLinkedQueue<>();
private int totalAvailable = 0;
private int readIndex = 0;
public void addInputStream(InputStream inputStream) {
inputStreams.offer(inputStream);
try {
totalAvailable += inputStream.available();
} catch (IOException e) {
throw new DecodeException(e);
}
}
@Override
public int read() throws IOException {
InputStream inputStream;
while ((inputStream = inputStreams.peek()) != null) {
int available = inputStream.available();
if (available == 0) {
releaseHeadStream();
continue;
}
int read = inputStream.read();
if (read != -1) {
++readIndex;
releaseIfNecessary(inputStream);
return read;
}
releaseHeadStream();
}
return -1;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
if (b == null) {
throw new NullPointerException();
} else if (off < 0 || len < 0 || len > b.length - off) {
throw new IndexOutOfBoundsException();
} else if (len == 0) {
return 0;
}
int total = 0;
InputStream inputStream;
while ((inputStream = inputStreams.peek()) != null) {
int available = inputStream.available();
if (available == 0) {
releaseHeadStream();
continue;
}
int read = inputStream.read(b, off + total, Math.min(len - total, available));
if (read != -1) {
total += read;
readIndex += read;
releaseIfNecessary(inputStream);
if (total == len) {
return total;
}
} else {
releaseHeadStream();
}
}
return total > 0 ? total : -1;
}
@Override
public int available() {
return totalAvailable - readIndex;
}
@Override
public void close() throws IOException {
InputStream inputStream;
while ((inputStream = inputStreams.poll()) != null) {
inputStream.close();
}
}
private void releaseHeadStream() throws IOException {
InputStream removeStream = inputStreams.remove();
removeStream.close();
}
private void releaseIfNecessary(InputStream inputStream) throws IOException {
int available = inputStream.available();
if (available == 0) {
releaseHeadStream();
}
}
}
| CompositeInputStream |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/load/model/ResourceLoader.java | {
"start": 4044,
"end": 4692
} | class ____
implements ModelLoaderFactory<Integer, AssetFileDescriptor> {
private final Resources resources;
public AssetFileDescriptorFactory(Resources resources) {
this.resources = resources;
}
@Override
public ModelLoader<Integer, AssetFileDescriptor> build(MultiModelLoaderFactory multiFactory) {
return new ResourceLoader<>(
resources, multiFactory.build(Uri.class, AssetFileDescriptor.class));
}
@Override
public void teardown() {
// Do nothing.
}
}
/** Factory for loading resource {@link Uri}s from Android resource ids. */
public static | AssetFileDescriptorFactory |
java | apache__avro | lang/java/avro/src/main/java/org/apache/avro/Protocol.java | {
"start": 2710,
"end": 5366
} | class ____ extends JsonProperties {
private final String name;
private final String doc;
private final Schema request;
/** Construct a message. */
private Message(String name, String doc, JsonProperties propMap, Schema request) {
super(MESSAGE_RESERVED);
this.name = name;
this.doc = doc;
this.request = request;
if (propMap != null)
// copy props
addAllProps(propMap);
}
private Message(String name, String doc, Map<String, ?> propMap, Schema request) {
super(MESSAGE_RESERVED, propMap);
this.name = name;
this.doc = doc;
this.request = request;
}
/** The name of this message. */
public String getName() {
return name;
}
/** The parameters of this message. */
public Schema getRequest() {
return request;
}
/** The returned data. */
public Schema getResponse() {
return Schema.create(Schema.Type.NULL);
}
/** Errors that might be thrown. */
public Schema getErrors() {
return Schema.createUnion(Collections.emptyList());
}
/** Returns true if this is a one-way message, with no response or errors. */
public boolean isOneWay() {
return true;
}
@Override
public String toString() {
try {
StringWriter writer = new StringWriter();
JsonGenerator gen = Schema.FACTORY.createGenerator(writer);
toJson(new HashSet<>(), gen);
gen.flush();
return writer.toString();
} catch (IOException e) {
throw new AvroRuntimeException(e);
}
}
void toJson(Set<String> knownNames, JsonGenerator gen) throws IOException {
gen.writeStartObject();
if (doc != null)
gen.writeStringField("doc", doc);
writeProps(gen); // write out properties
gen.writeFieldName("request");
request.fieldsToJson(knownNames, namespace, gen);
toJson1(knownNames, gen);
gen.writeEndObject();
}
void toJson1(Set<String> knownNames, JsonGenerator gen) throws IOException {
gen.writeStringField("response", "null");
gen.writeBooleanField("one-way", true);
}
@Override
public boolean equals(Object o) {
if (o == this)
return true;
if (!(o instanceof Message))
return false;
Message that = (Message) o;
return this.name.equals(that.name) && this.request.equals(that.request) && propsEqual(that);
}
@Override
public int hashCode() {
return name.hashCode() + request.hashCode() + propsHashCode();
}
public String getDoc() {
return doc;
}
}
private final | Message |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/job/metrics/MetricCollectionResponseBody.java | {
"start": 2915,
"end": 3594
} | class ____ extends StdSerializer<MetricCollectionResponseBody> {
private static final long serialVersionUID = 1L;
protected Serializer() {
super(MetricCollectionResponseBody.class);
}
@Override
public void serialize(
MetricCollectionResponseBody metricCollectionResponseBody,
JsonGenerator jsonGenerator,
SerializerProvider serializerProvider)
throws IOException {
jsonGenerator.writeObject(metricCollectionResponseBody.getMetrics());
}
}
/** JSON deserializer for {@link MetricCollectionResponseBody}. */
public static | Serializer |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java | {
"start": 2658,
"end": 5686
} | class ____ {
private Multimap<InetAddress, DatanodeAdminProperties> allDNs =
HashMultimap.create();
// optimization. If every node in the file isn't in service, it implies
// any node is allowed to register with nn. This is equivalent to having
// an empty "include" file.
private boolean emptyInServiceNodeLists = true;
synchronized void add(InetAddress addr,
DatanodeAdminProperties properties) {
allDNs.put(addr, properties);
if (properties.getAdminState().equals(
AdminStates.NORMAL)) {
emptyInServiceNodeLists = false;
}
}
// If the includes list is empty, act as if everything is in the
// includes list.
synchronized boolean isIncluded(final InetSocketAddress address) {
return emptyInServiceNodeLists || allDNs.get(address.getAddress())
.stream().anyMatch(
input -> input.getPort() == 0 ||
input.getPort() == address.getPort());
}
synchronized boolean isExcluded(final InetSocketAddress address) {
return allDNs.get(address.getAddress()).stream().anyMatch(
input -> input.getAdminState().equals(
AdminStates.DECOMMISSIONED) &&
(input.getPort() == 0 ||
input.getPort() == address.getPort()));
}
synchronized String getUpgradeDomain(final InetSocketAddress address) {
Iterable<DatanodeAdminProperties> datanode =
allDNs.get(address.getAddress()).stream().filter(
input -> (input.getPort() == 0 ||
input.getPort() == address.getPort())).collect(
Collectors.toList());
return datanode.iterator().hasNext() ?
datanode.iterator().next().getUpgradeDomain() : null;
}
Iterable<InetSocketAddress> getIncludes() {
return new Iterable<InetSocketAddress>() {
@Override
public Iterator<InetSocketAddress> iterator() {
return new HostIterator(allDNs.entries());
}
};
}
Iterable<InetSocketAddress> getExcludes() {
return () -> new HostIterator(
allDNs.entries().stream().filter(
entry -> entry.getValue().getAdminState().equals(
AdminStates.DECOMMISSIONED)).collect(
Collectors.toList()));
}
synchronized long getMaintenanceExpireTimeInMS(
final InetSocketAddress address) {
Iterable<DatanodeAdminProperties> datanode =
allDNs.get(address.getAddress()).stream().filter(
input -> input.getAdminState().equals(
AdminStates.IN_MAINTENANCE) &&
(input.getPort() == 0 ||
input.getPort() == address.getPort())).collect(
Collectors.toList());
// if DN isn't set to maintenance state, ignore MaintenanceExpireTimeInMS
// set in the config.
return datanode.iterator().hasNext() ?
datanode.iterator().next().getMaintenanceExpireTimeInMS() : 0;
}
static | HostProperties |
java | apache__camel | components/camel-vertx/camel-vertx-http/src/test/java/org/apache/camel/component/vertx/http/VertxHttpMethodTest.java | {
"start": 1113,
"end": 3687
} | class ____ extends VertxHttpTestSupport {
@Test
public void testDefaultMethodGet() {
String result = template.requestBody(getProducerUri(), null, String.class);
assertEquals(HttpMethod.GET.name(), result);
}
@Test
public void testMethodSetFromEndpoint() {
String result = template.requestBody(getProducerUri() + "?httpMethod=DELETE", null, String.class);
assertEquals(HttpMethod.DELETE.name(), result);
}
@Test
public void testMethodSetFromHeader() {
String result = template.requestBodyAndHeader(getProducerUri(), null, Exchange.HTTP_METHOD, HttpMethod.PUT.name(),
String.class);
assertEquals(HttpMethod.PUT.name(), result);
}
@Test
public void testDefaultMethodGetWhenQueryStringProvided() {
String result = template.requestBody(getProducerUri() + "/?foo=bar&cheese=wine", null, String.class);
assertEquals(HttpMethod.GET.name(), result);
}
@Test
public void testDefaultMethodGetWhenQueryStringProvidedFromHeader() {
String result = template.requestBodyAndHeader(getProducerUri(), null, Exchange.HTTP_QUERY, "foo=bar&cheese=wine",
String.class);
assertEquals(HttpMethod.GET.name(), result);
}
@Test
public void testDefaultMethodPostWhenBodyNotNull() {
String result = template.requestBody(getProducerUri(), "Test Body", String.class);
assertEquals(HttpMethod.POST.name(), result);
}
@Test
public void testEndpointConfigurationPrecedence() {
String result = template.requestBody(getProducerUri() + "?httpMethod=DELETE&foo=bar", null, String.class);
assertEquals(HttpMethod.DELETE.name(), result);
}
@Test
public void testHeaderConfigurationPrecedence() {
String result = template.requestBodyAndHeader(getProducerUri() + "?foo=bar", null, Exchange.HTTP_METHOD,
HttpMethod.PUT.name(), String.class);
assertEquals(HttpMethod.PUT.name(), result);
}
@Test
public void testQueryStringPrecedence() {
String result = template.requestBody(getProducerUri() + "/?foo=bar&cheese=wine", "Test Body", String.class);
assertEquals(HttpMethod.GET.name(), result);
}
@Override
protected RoutesBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from(getTestServerUri())
.setBody(header(Exchange.HTTP_METHOD));
}
};
}
}
| VertxHttpMethodTest |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/DataBlocks.java | {
"start": 15890,
"end": 16737
} | class ____ extends ByteArrayOutputStream {
DataBlockByteArrayOutputStream(int size) {
super(size);
}
/**
* InputStream backed by the internal byte array.
*
* @return ByteArrayInputStream instance.
*/
ByteArrayInputStream getInputStream() {
ByteArrayInputStream bin = new ByteArrayInputStream(this.buf, 0, count);
this.reset();
this.buf = null;
return bin;
}
}
/**
* Stream to memory via a {@code ByteArrayOutputStream}.
* <p>
* It can consume a lot of heap space
* proportional to the mismatch between writes to the stream and
* the JVM-wide upload bandwidth to a Store's endpoint.
* The memory consumption can be limited by tuning the filesystem settings
* to restrict the number of queued/active uploads.
*/
static | DataBlockByteArrayOutputStream |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/main/java/org/springframework/data/jpa/repository/Query.java | {
"start": 1429,
"end": 3044
} | interface ____ {
/**
* Defines the JPA query to be executed when the annotated method is called.
*/
String value() default "";
/**
* Defines a special count query that shall be used for pagination queries to look up the total number of elements for
* a page. If none is configured we will derive the count query from the original query or {@link #countProjection()}
* query if any.
*/
String countQuery() default "";
/**
* Defines the projection part of the count query that is generated for pagination. If neither {@link #countQuery()}
* nor {@code countProjection()} is configured we will derive the count query from the original query.
*
* @return
* @since 1.6
*/
String countProjection() default "";
/**
* Configures whether the given query is a native one. Defaults to {@literal false}.
*/
boolean nativeQuery() default false;
/**
* The named query to be used. If not defined, a {@link jakarta.persistence.NamedQuery} with name of
* {@code ${domainClass}.${queryMethodName}} will be used.
*/
String name() default "";
/**
* Returns the name of the {@link jakarta.persistence.NamedQuery} to be used to execute count queries when pagination
* is used. Will default to the named query name configured suffixed by {@code .count}.
*
* @see #name()
* @return
*/
String countName() default "";
/**
* Define a {@link QueryRewriter} that should be applied to the query string after the query is fully assembled.
*
* @return
* @since 3.0
*/
Class<? extends QueryRewriter> queryRewriter() default QueryRewriter.IdentityQueryRewriter.class;
}
| Query |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/error/ShouldBeUpperCase_create_Test.java | {
"start": 990,
"end": 1592
} | class ____ {
@Test
void should_create_error_message_for_character() {
// WHEN
String message = shouldBeUpperCase('a').create(new TextDescription("Test"), STANDARD_REPRESENTATION);
// THEN
then(message).isEqualTo("[Test] %nExpecting 'a' to be uppercase".formatted());
}
@Test
void should_create_error_message_for_string() {
// WHEN
String message = shouldBeUpperCase("abc").create(new TextDescription("Test"), STANDARD_REPRESENTATION);
// THEN
then(message).isEqualTo("[Test] %nExpecting \"abc\" to be uppercase".formatted());
}
}
| ShouldBeUpperCase_create_Test |
java | quarkusio__quarkus | core/runtime/src/main/java/io/quarkus/runtime/Startup.java | {
"start": 536,
"end": 973
} | class ____ annotated then a contextual instance is created and the {@link jakarta.annotation.PostConstruct}
* callbacks are invoked.</li>
* <li>If a producer method is annotated then a contextual instance is created, i.e. the producer method is invoked.</li>
* <li>If a producer field is annotated then a contextual instance is created, i.e. the producer field is read.</li>
* <li>If a non-static non-producer no-args method of a bean | is |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/any/xml2/NamedIntegerProperty.java | {
"start": 315,
"end": 1042
} | class ____ implements NamedProperty {
private Integer id;
private String name;
private Integer value;
public NamedIntegerProperty() {
super();
}
public NamedIntegerProperty(int id, String name, Integer value) {
super();
this.id = id;
this.name = name;
this.value = value;
}
public String asString() {
return Integer.toString(value);
}
public String getName() {
return name;
}
@Id
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
@Column(name = "`value`")
public Integer getValue() {
return value;
}
public void setValue(Integer value) {
this.value = value;
}
public void setName(String name) {
this.name = name;
}
}
| NamedIntegerProperty |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/LogWebServiceUtils.java | {
"start": 2378,
"end": 7907
} | class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(LogWebServiceUtils.class);
private LogWebServiceUtils() {
}
private static final Joiner DOT_JOINER = Joiner.on(". ");
public static Response sendStreamOutputResponse(
LogAggregationFileControllerFactory factory, ApplicationId appId,
String appOwner, String nodeId, String containerIdStr, String fileName,
String format, long bytes, boolean printEmptyLocalContainerLog) {
String contentType = WebAppUtils.getDefaultLogContentType();
if (format != null && !format.isEmpty()) {
contentType = WebAppUtils.getSupportedLogContentType(format);
if (contentType == null) {
String errorMessage =
"The valid values for the parameter : format " + "are "
+ WebAppUtils.listSupportedLogContentType();
return Response.status(Response.Status.BAD_REQUEST).entity(errorMessage)
.build();
}
}
StreamingOutput stream = null;
try {
stream =
getStreamingOutput(factory, appId, appOwner, nodeId, containerIdStr,
fileName, bytes, printEmptyLocalContainerLog);
} catch (Exception ex) {
LOG.debug("Exception", ex);
return createBadResponse(Response.Status.INTERNAL_SERVER_ERROR,
ex.getMessage());
}
Response.ResponseBuilder response = Response.ok(stream);
response.header("Content-Type", contentType);
// Sending the X-Content-Type-Options response header with the value
// nosniff will prevent Internet Explorer from MIME-sniffing a response
// away from the declared content-type.
response.header("X-Content-Type-Options", "nosniff");
return response.build();
}
private static StreamingOutput getStreamingOutput(
final LogAggregationFileControllerFactory factory,
final ApplicationId appId, final String appOwner, final String nodeId,
final String containerIdStr, final String logFile, final long bytes,
final boolean printEmptyLocalContainerLog) throws IOException {
StreamingOutput stream = new StreamingOutput() {
@Override public void write(OutputStream os)
throws IOException, WebApplicationException {
ContainerLogsRequest request = new ContainerLogsRequest();
request.setAppId(appId);
request.setAppOwner(appOwner);
request.setContainerId(containerIdStr);
request.setBytes(bytes);
request.setNodeId(nodeId);
Set<String> logTypes = new HashSet<>();
logTypes.add(logFile);
request.setLogTypes(logTypes);
boolean findLogs = factory.getFileControllerForRead(appId, appOwner)
.readAggregatedLogs(request, os);
if (!findLogs) {
os.write(("Can not find logs for container:" + containerIdStr)
.getBytes(StandardCharsets.UTF_8));
} else {
if (printEmptyLocalContainerLog) {
StringBuilder sb = new StringBuilder();
sb.append(containerIdStr + "\n");
sb.append("LogAggregationType: " + ContainerLogAggregationType.LOCAL
+ "\n");
sb.append("LogContents:\n");
sb.append(getNoRedirectWarning() + "\n");
os.write(sb.toString().getBytes(StandardCharsets.UTF_8));
}
}
}
};
return stream;
}
public static String getNoRedirectWarning() {
return "We do not have NodeManager web address, so we can not "
+ "re-direct the request to related NodeManager "
+ "for local container logs.";
}
public static void rewrapAndThrowException(Exception e) {
if (e instanceof UndeclaredThrowableException) {
rewrapAndThrowThrowable(e.getCause());
} else {
rewrapAndThrowThrowable(e);
}
}
public static void rewrapAndThrowThrowable(Throwable t) {
if (t instanceof AuthorizationException) {
throw new ForbiddenException(t);
} else {
throw new WebApplicationException(t);
}
}
public static long parseLongParam(String bytes) {
if (bytes == null || bytes.isEmpty()) {
return Long.MAX_VALUE;
}
return Long.parseLong(bytes);
}
public static Response createBadResponse(Response.Status status,
String errMessage) {
Response response = Response.status(status)
.entity(DOT_JOINER.join(status.toString(), errMessage)).build();
return response;
}
public static boolean isRunningState(YarnApplicationState appState) {
return appState == YarnApplicationState.RUNNING;
}
protected static UserGroupInformation getUser(HttpServletRequest req) {
String remoteUser = req.getRemoteUser();
UserGroupInformation callerUGI = null;
if (remoteUser != null) {
callerUGI = UserGroupInformation.createRemoteUser(remoteUser);
}
return callerUGI;
}
public static String getNMWebAddressFromRM(Configuration yarnConf,
String nodeId)
throws JSONException {
JSONObject nodeInfo = YarnWebServiceUtils.getNodeInfoFromRMWebService(yarnConf, nodeId)
.getJSONObject("node");
return nodeInfo.has("nodeHTTPAddress") ?
nodeInfo.getString("nodeHTTPAddress") : null;
}
public static String getAbsoluteNMWebAddress(Configuration yarnConf,
String nmWebAddress) {
if (nmWebAddress.contains(WebAppUtils.HTTP_PREFIX) || nmWebAddress
.contains(WebAppUtils.HTTPS_PREFIX)) {
return nmWebAddress;
}
return WebAppUtils.getHttpSchemePrefix(yarnConf) + nmWebAddress;
}
} | LogWebServiceUtils |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableFromArrayTest.java | {
"start": 998,
"end": 5285
} | class ____ extends RxJavaTest {
Flowable<Integer> create(int n) {
Integer[] array = new Integer[n];
for (int i = 0; i < n; i++) {
array[i] = i;
}
return Flowable.fromArray(array);
}
@Test
public void simple() {
TestSubscriber<Integer> ts = new TestSubscriber<>();
create(1000).subscribe(ts);
ts.assertNoErrors();
ts.assertValueCount(1000);
ts.assertComplete();
}
@Test
public void backpressure() {
TestSubscriber<Integer> ts = TestSubscriber.create(0);
create(1000).subscribe(ts);
ts.assertNoErrors();
ts.assertNoValues();
ts.assertNotComplete();
ts.request(10);
ts.assertNoErrors();
ts.assertValueCount(10);
ts.assertNotComplete();
ts.request(1000);
ts.assertNoErrors();
ts.assertValueCount(1000);
ts.assertComplete();
}
@Test
public void conditionalBackpressure() {
TestSubscriber<Integer> ts = TestSubscriber.create(0);
create(1000)
.filter(Functions.alwaysTrue())
.subscribe(ts);
ts.assertNoErrors();
ts.assertNoValues();
ts.assertNotComplete();
ts.request(10);
ts.assertNoErrors();
ts.assertValueCount(10);
ts.assertNotComplete();
ts.request(1000);
ts.assertNoErrors();
ts.assertValueCount(1000);
ts.assertComplete();
}
@Test
public void empty() {
Assert.assertSame(Flowable.empty(), Flowable.fromArray(new Object[0]));
}
@Test
public void just() {
Flowable<Integer> source = Flowable.fromArray(new Integer[] { 1 });
Assert.assertTrue(source.getClass().toString(), source instanceof ScalarSupplier);
}
@Test
public void just10Arguments() {
Flowable.just(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
.test()
.assertResult(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
}
@Test
public void badRequest() {
TestHelper.assertBadRequestReported(Flowable.just(1, 2, 3));
}
@Test
public void conditionalOneIsNull() {
Flowable.fromArray(new Integer[] { null, 1 })
.filter(Functions.alwaysTrue())
.test()
.assertFailure(NullPointerException.class);
}
@Test
public void conditionalOneIsNullSlowPath() {
Flowable.fromArray(new Integer[] { null, 1 })
.filter(Functions.alwaysTrue())
.test(2L)
.assertFailure(NullPointerException.class);
}
@Test
public void conditionalOneByOne() {
Flowable.fromArray(new Integer[] { 1, 2, 3, 4, 5 })
.filter(Functions.alwaysTrue())
.rebatchRequests(1)
.test()
.assertResult(1, 2, 3, 4, 5);
}
@Test
public void conditionalFiltered() {
Flowable.fromArray(new Integer[] { 1, 2, 3, 4, 5 })
.filter(new Predicate<Integer>() {
@Override
public boolean test(Integer v) throws Exception {
return v % 2 == 0;
}
})
.test()
.assertResult(2, 4);
}
@Test
public void conditionalSlowPathCancel() {
Flowable.fromArray(new Integer[] { 1, 2, 3, 4, 5 })
.filter(Functions.alwaysTrue())
.subscribeWith(new TestSubscriber<Integer>(5L) {
@Override
public void onNext(Integer t) {
super.onNext(t);
if (t == 1) {
cancel();
onComplete();
}
}
})
.assertResult(1);
}
@Test
public void conditionalSlowPathSkipCancel() {
Flowable.fromArray(new Integer[] { 1, 2, 3, 4, 5 })
.filter(new Predicate<Integer>() {
@Override
public boolean test(Integer v) throws Exception {
return v < 2;
}
})
.subscribeWith(new TestSubscriber<Integer>(5L) {
@Override
public void onNext(Integer t) {
super.onNext(t);
if (t == 1) {
cancel();
onComplete();
}
}
})
.assertResult(1);
}
}
| FlowableFromArrayTest |
java | google__error-prone | test_helpers/src/test/java/com/google/errorprone/CompilationTestHelperTest.java | {
"start": 12140,
"end": 12705
} | class ____ {}")
.setArgs(
ImmutableList.of("-Xep:ReturnTreeChecker:Squirrels")) // Bad flag crashes.
.doTest());
assertThat(expected)
.hasMessageThat()
.contains("invalid flag: -Xep:ReturnTreeChecker:Squirrels");
}
@Test
public void commandLineArgToDisableCheckWorks() {
compilationHelper
.setArgs(ImmutableList.of("-Xep:ReturnTreeChecker:OFF"))
.expectNoDiagnostics()
.addSourceLines(
"Test.java",
"""
public | Test |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/ConfigurationClassWithConditionTests.java | {
"start": 7728,
"end": 7903
} | class ____ implements Condition {
@Override
public boolean matches(ConditionContext context, AnnotatedTypeMetadata metadata) {
return false;
}
}
static | NeverCondition |
java | spring-projects__spring-security | test/src/test/java/org/springframework/security/test/web/servlet/response/Gh3409Tests.java | {
"start": 2388,
"end": 3319
} | class ____ {
@Autowired
private WebApplicationContext context;
private MockMvc mockMvc;
@BeforeEach
public void setup() {
// @formatter:off
this.mockMvc = MockMvcBuilders
.webAppContextSetup(this.context)
.apply(springSecurity())
.build();
// @formatter:on
}
// gh-3409
@Test
public void unauthenticatedAnonymousUser() throws Exception {
// @formatter:off
this.mockMvc
.perform(get("/public/")
.with(securityContext(new SecurityContextImpl())));
this.mockMvc
.perform(get("/public/"))
.andExpect(unauthenticated());
// @formatter:on
}
@Test
public void unauthenticatedNullAuthentication() throws Exception {
// @formatter:off
this.mockMvc
.perform(get("/")
.with(securityContext(new SecurityContextImpl())));
this.mockMvc
.perform(get("/"))
.andExpect(unauthenticated());
// @formatter:on
}
@Configuration
@EnableWebSecurity
@EnableWebMvc
static | Gh3409Tests |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/channel/InputChannelInfo.java | {
"start": 1252,
"end": 2411
} | class ____ implements Serializable {
private static final long serialVersionUID = 1L;
private final int gateIdx;
private final int inputChannelIdx;
public InputChannelInfo(int gateIdx, int inputChannelIdx) {
this.gateIdx = gateIdx;
this.inputChannelIdx = inputChannelIdx;
}
public int getGateIdx() {
return gateIdx;
}
public int getInputChannelIdx() {
return inputChannelIdx;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final InputChannelInfo that = (InputChannelInfo) o;
return gateIdx == that.gateIdx && inputChannelIdx == that.inputChannelIdx;
}
@Override
public int hashCode() {
return Objects.hash(gateIdx, inputChannelIdx);
}
@Override
public String toString() {
return "InputChannelInfo{"
+ "gateIdx="
+ gateIdx
+ ", inputChannelIdx="
+ inputChannelIdx
+ '}';
}
}
| InputChannelInfo |
java | hibernate__hibernate-orm | tooling/hibernate-ant/src/test/java/org/hibernate/tool/enhance/EnhancementTaskTest.java | {
"start": 10505,
"end": 10762
} | class ____ should exist now
assertTrue( fileExists( "dest/Bar.class" ) );
assertTrue( fileExists( "dest/Baz.class" ) );
assertTrue( fileExists( "dest/Foo.class" ) );
}
private void executeEnhanceTarget(Project project) throws Exception {
// The | files |
java | elastic__elasticsearch | x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionStringDocValuesField.java | {
"start": 855,
"end": 1302
} | class ____ extends AbstractScriptFieldFactory<Version>
implements
Field<Version>,
DocValuesScriptFieldFactory,
ScriptDocValues.Supplier<String> {
protected final SortedSetDocValues input;
protected final String name;
protected long[] ords = new long[0];
protected int count;
// used for backwards compatibility for old-style "doc" access
// as a delegate to this field | VersionStringDocValuesField |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java | {
"start": 5477,
"end": 26859
} | class ____ then
// the actual xcontent isn't the same and test fail.
// Testing with a single agg is ok as we don't have special list writeable / xcontent logic
builder.setAggregations(createRandomValidAggProvider(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10)));
}
if (randomBoolean()) {
builder.setScrollSize(randomIntBetween(0, Integer.MAX_VALUE));
}
if (randomBoolean()) {
builder.setChunkingConfig(ChunkingConfigTests.createRandomizedChunk());
}
if (randomBoolean()) {
builder.setDelayedDataCheckConfig(DelayedDataCheckConfigTests.createRandomizedConfig(randomLongBetween(300_001, 400_000)));
}
if (randomBoolean()) {
builder.setMaxEmptySearches(randomBoolean() ? -1 : randomIntBetween(10, 100));
}
if (randomBoolean()) {
builder.setIndicesOptions(
IndicesOptions.fromParameters(
randomFrom(EXPAND_WILDCARDS_VALUES),
Boolean.toString(randomBoolean()),
Boolean.toString(randomBoolean()),
Boolean.toString(randomBoolean()),
SearchRequest.DEFAULT_INDICES_OPTIONS
)
);
}
if (randomBoolean()) {
Map<String, Object> settings = new HashMap<>();
settings.put("type", "keyword");
settings.put("script", "");
Map<String, Object> field = new HashMap<>();
field.put("runtime_field_foo", settings);
builder.setRuntimeMappings(field);
}
return builder.build();
}
@Override
protected Writeable.Reader<DatafeedUpdate> instanceReader() {
return DatafeedUpdate::new;
}
@Override
protected DatafeedUpdate doParseInstance(XContentParser parser) {
return DatafeedUpdate.PARSER.apply(parser, null).build();
}
@Override
protected NamedWriteableRegistry getNamedWriteableRegistry() {
SearchModule searchModule = new SearchModule(Settings.EMPTY, List.of(new AggregationsPlugin()));
return new NamedWriteableRegistry(searchModule.getNamedWriteables());
}
@Override
protected NamedXContentRegistry xContentRegistry() {
SearchModule searchModule = new SearchModule(Settings.EMPTY, List.of(new AggregationsPlugin()));
return new NamedXContentRegistry(searchModule.getNamedXContents());
}
private static final String MULTIPLE_AGG_DEF_DATAFEED = """
{
"datafeed_id": "farequote-datafeed",
"job_id": "farequote",
"frequency": "1h",
"indices": ["farequote1", "farequote2"],
"aggregations": {
"buckets": {
"date_histogram": {
"field": "time",
"fixed_interval": "360s",
"time_zone": "UTC"
},
"aggregations": {
"time": {
"max": {"field": "time"}
}
}
}
}, "aggs": {
"buckets2": {
"date_histogram": {
"field": "time",
"fixed_interval": "360s",
"time_zone": "UTC"
},
"aggregations": {
"time": {
"max": {"field": "time"}
}
}
}
}
}""";
public void testMultipleDefinedAggParse() throws IOException {
try (
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
.createParser(XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry()), MULTIPLE_AGG_DEF_DATAFEED)
) {
XContentParseException ex = expectThrows(XContentParseException.class, () -> DatafeedUpdate.PARSER.apply(parser, null));
assertThat(ex.getMessage(), equalTo("[32:3] [datafeed_update] failed to parse field [aggs]"));
assertNotNull(ex.getCause());
assertThat(ex.getCause().getMessage(), equalTo("Found two aggregation definitions: [aggs] and [aggregations]"));
}
}
public void testApply_failBecauseTargetDatafeedHasDifferentId() {
DatafeedConfig datafeed = DatafeedConfigTests.createRandomizedDatafeedConfig("foo");
expectThrows(IllegalArgumentException.class, () -> createRandomized(datafeed.getId() + "_2").apply(datafeed, null, clusterState));
}
public void testApply_failBecauseJobIdChanged() {
DatafeedConfig datafeed = DatafeedConfigTests.createRandomizedDatafeedConfig("foo");
DatafeedUpdate datafeedUpdateWithUnchangedJobId = new DatafeedUpdate.Builder(datafeed.getId()).setJobId("foo").build();
DatafeedConfig updatedDatafeed = datafeedUpdateWithUnchangedJobId.apply(datafeed, Collections.emptyMap(), clusterState);
assertThat(updatedDatafeed, equalTo(datafeed));
DatafeedUpdate datafeedUpdateWithChangedJobId = new DatafeedUpdate.Builder(datafeed.getId()).setJobId("bar").build();
ElasticsearchStatusException ex = expectThrows(
ElasticsearchStatusException.class,
() -> datafeedUpdateWithChangedJobId.apply(datafeed, Collections.emptyMap(), clusterState)
);
assertThat(ex.status(), equalTo(RestStatus.BAD_REQUEST));
assertThat(ex.getMessage(), equalTo(DatafeedUpdate.ERROR_MESSAGE_ON_JOB_ID_UPDATE));
}
public void testApply_givenEmptyUpdate() {
DatafeedConfig datafeed = DatafeedConfigTests.createRandomizedDatafeedConfig("foo");
DatafeedConfig updatedDatafeed = new DatafeedUpdate.Builder(datafeed.getId()).build()
.apply(datafeed, Collections.emptyMap(), clusterState);
assertThat(datafeed, equalTo(updatedDatafeed));
}
public void testApply_givenPartialUpdate() {
DatafeedConfig datafeed = DatafeedConfigTests.createRandomizedDatafeedConfig("foo");
DatafeedUpdate.Builder update = new DatafeedUpdate.Builder(datafeed.getId());
update.setScrollSize(datafeed.getScrollSize() + 1);
DatafeedUpdate.Builder updated = new DatafeedUpdate.Builder(datafeed.getId());
updated.setScrollSize(datafeed.getScrollSize() + 1);
DatafeedConfig updatedDatafeed = update.build().apply(datafeed, Collections.emptyMap(), clusterState);
DatafeedConfig.Builder expectedDatafeed = new DatafeedConfig.Builder(datafeed);
expectedDatafeed.setScrollSize(datafeed.getScrollSize() + 1);
assertThat(updatedDatafeed, equalTo(expectedDatafeed.build()));
}
public void testApply_givenFullUpdateNoAggregations() {
DatafeedConfig.Builder datafeedBuilder = new DatafeedConfig.Builder("foo", "foo-feed");
datafeedBuilder.setIndices(Collections.singletonList("i_1"));
DatafeedConfig datafeed = datafeedBuilder.build();
QueryProvider queryProvider = createTestQueryProvider("a", "b");
DatafeedUpdate.Builder update = new DatafeedUpdate.Builder(datafeed.getId());
update.setIndices(Collections.singletonList("i_2"));
update.setQueryDelay(TimeValue.timeValueSeconds(42));
update.setFrequency(TimeValue.timeValueSeconds(142));
update.setQuery(queryProvider);
update.setScriptFields(Collections.singletonList(new SearchSourceBuilder.ScriptField("a", mockScript("b"), false)));
update.setScrollSize(8000);
update.setChunkingConfig(ChunkingConfig.newManual(TimeValue.timeValueHours(1)));
update.setDelayedDataCheckConfig(DelayedDataCheckConfig.enabledDelayedDataCheckConfig(TimeValue.timeValueHours(1)));
Map<String, Object> settings = new HashMap<>();
settings.put("type", "keyword");
settings.put("script", "");
Map<String, Object> field = new HashMap<>();
field.put("updated_runtime_field_foo", settings);
update.setRuntimeMappings(field);
DatafeedConfig updatedDatafeed = update.build().apply(datafeed, Collections.emptyMap(), clusterState);
assertThat(updatedDatafeed.getJobId(), equalTo("foo-feed"));
assertThat(updatedDatafeed.getIndices(), equalTo(Collections.singletonList("i_2")));
assertThat(updatedDatafeed.getQueryDelay(), equalTo(TimeValue.timeValueSeconds(42)));
assertThat(updatedDatafeed.getFrequency(), equalTo(TimeValue.timeValueSeconds(142)));
assertThat(updatedDatafeed.getQuery(), equalTo(queryProvider.getQuery()));
assertThat(updatedDatafeed.hasAggregations(), is(false));
assertThat(
updatedDatafeed.getScriptFields(),
equalTo(Collections.singletonList(new SearchSourceBuilder.ScriptField("a", mockScript("b"), false)))
);
assertThat(updatedDatafeed.getScrollSize(), equalTo(8000));
assertThat(updatedDatafeed.getChunkingConfig(), equalTo(ChunkingConfig.newManual(TimeValue.timeValueHours(1))));
assertThat(updatedDatafeed.getDelayedDataCheckConfig().isEnabled(), equalTo(true));
assertThat(updatedDatafeed.getDelayedDataCheckConfig().getCheckWindow(), equalTo(TimeValue.timeValueHours(1)));
assertThat(updatedDatafeed.getRuntimeMappings(), hasKey("updated_runtime_field_foo"));
}
public void testApply_givenAggregations() throws IOException {
DatafeedConfig.Builder datafeedBuilder = new DatafeedConfig.Builder("foo", "foo-feed");
datafeedBuilder.setIndices(Collections.singletonList("i_1"));
DatafeedConfig datafeed = datafeedBuilder.build();
DatafeedUpdate.Builder update = new DatafeedUpdate.Builder(datafeed.getId());
MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time");
AggProvider aggProvider = AggProvider.fromParsedAggs(
new AggregatorFactories.Builder().addAggregator(
AggregationBuilders.histogram("a").interval(300000).field("time").subAggregation(maxTime)
)
);
update.setAggregations(aggProvider);
DatafeedConfig updatedDatafeed = update.build().apply(datafeed, Collections.emptyMap(), clusterState);
assertThat(updatedDatafeed.getIndices(), equalTo(Collections.singletonList("i_1")));
assertThat(updatedDatafeed.getParsedAggregations(xContentRegistry()), equalTo(aggProvider.getParsedAggs()));
assertThat(updatedDatafeed.getAggregations(), equalTo(aggProvider.getAggs()));
}
public void testApply_givenIndicesOptions() {
DatafeedConfig datafeed = DatafeedConfigTests.createRandomizedDatafeedConfig("foo");
DatafeedConfig updatedDatafeed = new DatafeedUpdate.Builder(datafeed.getId()).setIndicesOptions(
IndicesOptions.LENIENT_EXPAND_OPEN_HIDDEN
).build().apply(datafeed, Collections.emptyMap(), clusterState);
assertThat(datafeed.getIndicesOptions(), is(not(equalTo(updatedDatafeed.getIndicesOptions()))));
assertThat(updatedDatafeed.getIndicesOptions(), equalTo(IndicesOptions.LENIENT_EXPAND_OPEN_HIDDEN));
}
public void testApply_GivenRandomUpdates_AssertImmutability() {
for (int i = 0; i < 100; ++i) {
DatafeedConfig datafeed = DatafeedConfigTests.createRandomizedDatafeedConfig(JobTests.randomValidJobId());
if (datafeed.getAggregations() != null) {
DatafeedConfig.Builder withoutAggs = new DatafeedConfig.Builder(datafeed);
withoutAggs.setAggProvider(null);
datafeed = withoutAggs.build();
}
DatafeedUpdate update = createRandomized(datafeed.getId(), datafeed);
while (update.isNoop(datafeed)) {
update = createRandomized(datafeed.getId(), datafeed);
}
DatafeedConfig updatedDatafeed = update.apply(datafeed, Collections.emptyMap(), clusterState);
assertThat("update was " + update, datafeed, not(equalTo(updatedDatafeed)));
}
}
public void testSerializationOfComplexAggsBetweenVersions() throws IOException {
MaxAggregationBuilder maxTime = new MaxAggregationBuilder("timestamp").field("timestamp");
AvgAggregationBuilder avgAggregationBuilder = new AvgAggregationBuilder("bytes_in_avg").field("system.network.in.bytes");
DerivativePipelineAggregationBuilder derivativePipelineAggregationBuilder = new DerivativePipelineAggregationBuilder(
"bytes_in_derivative",
"bytes_in_avg"
);
BucketScriptPipelineAggregationBuilder bucketScriptPipelineAggregationBuilder = new BucketScriptPipelineAggregationBuilder(
"non_negative_bytes",
Collections.singletonMap("bytes", "bytes_in_derivative"),
new Script("params.bytes > 0 ? params.bytes : null")
);
DateHistogramAggregationBuilder dateHistogram = new DateHistogramAggregationBuilder("histogram_buckets").field("timestamp")
.fixedInterval(new DateHistogramInterval("300000ms"))
.timeZone(ZoneOffset.UTC)
.subAggregation(maxTime)
.subAggregation(avgAggregationBuilder)
.subAggregation(derivativePipelineAggregationBuilder)
.subAggregation(bucketScriptPipelineAggregationBuilder);
AggregatorFactories.Builder aggs = new AggregatorFactories.Builder().addAggregator(dateHistogram);
DatafeedUpdate.Builder datafeedUpdateBuilder = new DatafeedUpdate.Builder("df-update-past-serialization-test");
datafeedUpdateBuilder.setAggregations(
new AggProvider(XContentObjectTransformer.aggregatorTransformer(xContentRegistry()).toMap(aggs), aggs, null, false)
);
// So equality check between the streamed and current passes
// Streamed DatafeedConfigs when they are before 6.6.0 require a parsed object for aggs and queries, consequently all the default
// values are added between them
datafeedUpdateBuilder.setQuery(
QueryProvider.fromParsedQuery(
QueryBuilders.boolQuery()
.filter(QueryBuilders.termQuery(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10)))
)
);
DatafeedUpdate datafeedUpdate = datafeedUpdateBuilder.build();
try (BytesStreamOutput output = new BytesStreamOutput()) {
output.setTransportVersion(TransportVersion.current());
datafeedUpdate.writeTo(output);
try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), getNamedWriteableRegistry())) {
in.setTransportVersion(TransportVersion.current());
DatafeedUpdate streamedDatafeedUpdate = new DatafeedUpdate(in);
assertEquals(datafeedUpdate, streamedDatafeedUpdate);
// Assert that the parsed versions of our aggs and queries work as well
assertEquals(aggs, streamedDatafeedUpdate.getParsedAgg(xContentRegistry()));
assertEquals(datafeedUpdate.getParsedQuery(xContentRegistry()), streamedDatafeedUpdate.getParsedQuery(xContentRegistry()));
}
}
}
@Override
protected DatafeedUpdate mutateInstance(DatafeedUpdate instance) throws IOException {
DatafeedUpdate.Builder builder = new DatafeedUpdate.Builder(instance);
switch (between(1, 12)) {
case 1:
builder.setId(instance.getId() + DatafeedConfigTests.randomValidDatafeedId());
break;
case 2:
if (instance.getQueryDelay() == null) {
builder.setQueryDelay(new TimeValue(between(100, 100000)));
} else {
builder.setQueryDelay(new TimeValue(instance.getQueryDelay().millis() + between(100, 100000)));
}
break;
case 3:
if (instance.getFrequency() == null) {
builder.setFrequency(new TimeValue(between(1, 10) * 1000));
} else {
builder.setFrequency(new TimeValue(instance.getFrequency().millis() + between(1, 10) * 1000));
}
break;
case 4:
List<String> indices;
if (instance.getIndices() == null) {
indices = new ArrayList<>();
} else {
indices = new ArrayList<>(instance.getIndices());
}
indices.add(randomAlphaOfLengthBetween(1, 20));
builder.setIndices(indices);
break;
case 5:
BoolQueryBuilder query = new BoolQueryBuilder();
if (instance.getQuery() != null) {
query.must(instance.getParsedQuery(xContentRegistry()));
}
query.filter(new TermQueryBuilder(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10)));
builder.setQuery(QueryProvider.fromParsedQuery(query));
break;
case 6:
if (instance.hasAggregations()) {
builder.setAggregations(null);
} else {
AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder();
String timeField = randomAlphaOfLength(10);
DateHistogramInterval interval = new DateHistogramInterval(between(10000, 3600000) + "ms");
aggBuilder.addAggregator(
new DateHistogramAggregationBuilder(timeField).field(timeField)
.fixedInterval(interval)
.subAggregation(new MaxAggregationBuilder(timeField).field(timeField))
);
builder.setAggregations(AggProvider.fromParsedAggs(aggBuilder));
if (instance.getScriptFields().isEmpty() == false) {
builder.setScriptFields(Collections.emptyList());
}
}
break;
case 7:
builder.setScriptFields(
CollectionUtils.appendToCopy(
instance.getScriptFields(),
new ScriptField(randomAlphaOfLengthBetween(1, 10), new Script("foo"), true)
)
);
builder.setAggregations(null);
break;
case 8:
if (instance.getScrollSize() == null) {
builder.setScrollSize(between(1, 100));
} else {
builder.setScrollSize(instance.getScrollSize() + between(1, 100));
}
break;
case 9:
if (instance.getChunkingConfig() == null || instance.getChunkingConfig().getMode() == Mode.AUTO) {
ChunkingConfig newChunkingConfig = ChunkingConfig.newManual(new TimeValue(randomNonNegativeLong()));
builder.setChunkingConfig(newChunkingConfig);
} else {
builder.setChunkingConfig(null);
}
break;
case 10:
if (instance.getMaxEmptySearches() == null) {
builder.setMaxEmptySearches(randomFrom(-1, 10));
} else {
builder.setMaxEmptySearches(instance.getMaxEmptySearches() + 100);
}
break;
case 11:
if (instance.getIndicesOptions() != null) {
builder.setIndicesOptions(
IndicesOptions.fromParameters(
randomFrom(EXPAND_WILDCARDS_VALUES),
Boolean.toString(instance.getIndicesOptions().ignoreUnavailable() == false),
Boolean.toString(instance.getIndicesOptions().allowNoIndices() == false),
Boolean.toString(instance.getIndicesOptions().ignoreThrottled() == false),
SearchRequest.DEFAULT_INDICES_OPTIONS
)
);
} else {
builder.setIndicesOptions(
IndicesOptions.fromParameters(
randomFrom(EXPAND_WILDCARDS_VALUES),
Boolean.toString(randomBoolean()),
Boolean.toString(randomBoolean()),
Boolean.toString(randomBoolean()),
SearchRequest.DEFAULT_INDICES_OPTIONS
)
);
}
break;
case 12:
if (instance.getRuntimeMappings() != null) {
builder.setRuntimeMappings(null);
} else {
Map<String, Object> settings = new HashMap<>();
settings.put("type", "keyword");
settings.put("script", "");
Map<String, Object> field = new HashMap<>();
field.put("runtime_field_foo", settings);
builder.setRuntimeMappings(field);
}
break;
default:
throw new AssertionError("Illegal randomisation branch");
}
return builder.build();
}
}
| and |
java | apache__flink | flink-table/flink-table-common/src/test/java/org/apache/flink/table/types/extraction/TypeInferenceExtractorTest.java | {
"start": 113027,
"end": 113280
} | class ____
extends AsyncTableFunction<Object> {
public void eval(CompletableFuture<Collection<Object>> f, Integer i) {}
}
@FunctionHint(output = @DataTypeHint("STRING"))
private static | ExtractWithOutputHintFunctionAsyncTable |
java | apache__rocketmq | remoting/src/main/java/org/apache/rocketmq/remoting/common/ServiceThread.java | {
"start": 1030,
"end": 1078
} | class ____ background thread
*/
public abstract | for |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Issue939.java | {
"start": 879,
"end": 956
} | class ____ {
public int age;
public boolean is_top;
}
}
| Model |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/context/SmartLifecycle.java | {
"start": 1328,
"end": 3314
} | interface ____ {@link Phased}, and the {@link #getPhase()} method's
* return value indicates the phase within which this {@code Lifecycle} component
* should be started and stopped. The startup process begins with the <i>lowest</i>
* phase value and ends with the <i>highest</i> phase value ({@code Integer.MIN_VALUE}
* is the lowest possible, and {@code Integer.MAX_VALUE} is the highest possible).
* The shutdown process will apply the reverse order. Any components with the
* same value will be arbitrarily ordered within the same phase.
*
* <p>Example: if component B depends on component A having already started,
* then component A should have a lower phase value than component B. During
* the shutdown process, component B would be stopped before component A.
*
* <p>Any explicit "depends-on" relationship will take precedence over the phase
* order such that the dependent bean always starts after its dependency and
* always stops before its dependency.
*
* <p>Any {@code Lifecycle} components within the context that do not also
* implement {@code SmartLifecycle} will be treated as if they have a phase
* value of {@code 0}. This allows a {@code SmartLifecycle} component to start
* before those {@code Lifecycle} components if the {@code SmartLifecycle}
* component has a negative phase value, or the {@code SmartLifecycle} component
* may start after those {@code Lifecycle} components if the {@code SmartLifecycle}
* component has a positive phase value.
*
* <p>Note that, due to the auto-startup support in {@code SmartLifecycle}, a
* {@code SmartLifecycle} bean instance will usually get initialized on startup
* of the application context in any case. As a consequence, the bean definition
* lazy-init flag has very limited actual effect on {@code SmartLifecycle} beans.
*
* @author Mark Fisher
* @author Juergen Hoeller
* @author Sam Brannen
* @since 3.0
* @see LifecycleProcessor
* @see ConfigurableApplicationContext
*/
public | extends |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/bytecode/enhance/internal/bytebuddy/EnhancerImpl.java | {
"start": 7148,
"end": 7275
} | class ____ implements `Managed`. there are 2 broad cases -
// 1. the user manually implemented `Managed`
// 2. the | already |
java | google__auto | value/src/test/java/com/google/auto/value/processor/AutoAnnotationErrorsTest.java | {
"start": 12691,
"end": 13227
} | interface ____ {",
" Class<? extends Annotation>[] value();",
" int value$();",
"}");
JavaFileObject testSource =
JavaFileObjects.forSourceLines(
"com.foo.Test",
"package com.foo;",
"",
"import java.lang.annotation.Annotation;",
"import java.util.Collection;",
"",
"import com.example.TestAnnotation;",
"import com.google.auto.value.AutoAnnotation;",
"",
" | TestAnnotation |
java | spring-projects__spring-framework | spring-web/src/test/java/org/springframework/http/codec/json/Jackson2JsonEncoderTests.java | {
"start": 2381,
"end": 11230
} | class ____ extends AbstractEncoderTests<Jackson2JsonEncoder> {
public Jackson2JsonEncoderTests() {
super(new Jackson2JsonEncoder());
}
@Override
@Test
public void canEncode() {
ResolvableType pojoType = ResolvableType.forClass(Pojo.class);
assertThat(this.encoder.canEncode(pojoType, APPLICATION_JSON)).isTrue();
assertThat(this.encoder.canEncode(pojoType, APPLICATION_NDJSON)).isTrue();
assertThat(this.encoder.canEncode(pojoType, null)).isTrue();
assertThat(this.encoder.canEncode(ResolvableType.forClass(Pojo.class),
new MediaType("application", "json", StandardCharsets.UTF_8))).isTrue();
assertThat(this.encoder.canEncode(ResolvableType.forClass(Pojo.class),
new MediaType("application", "json", StandardCharsets.US_ASCII))).isTrue();
assertThat(this.encoder.canEncode(ResolvableType.forClass(Pojo.class),
new MediaType("application", "json", StandardCharsets.ISO_8859_1))).isFalse();
// SPR-15464
assertThat(this.encoder.canEncode(ResolvableType.NONE, null)).isTrue();
// SPR-15910
assertThat(this.encoder.canEncode(ResolvableType.forClass(Object.class), APPLICATION_OCTET_STREAM)).isFalse();
}
@Override
@Test
@SuppressWarnings("deprecation")
public void encode() throws Exception {
Flux<Object> input = Flux.just(new Pojo("foo", "bar"),
new Pojo("foofoo", "barbar"),
new Pojo("foofoofoo", "barbarbar"));
testEncodeAll(input, ResolvableType.forClass(Pojo.class), APPLICATION_NDJSON, null, step -> step
.consumeNextWith(expectString("{\"foo\":\"foo\",\"bar\":\"bar\"}\n"))
.consumeNextWith(expectString("{\"foo\":\"foofoo\",\"bar\":\"barbar\"}\n"))
.consumeNextWith(expectString("{\"foo\":\"foofoofoo\",\"bar\":\"barbarbar\"}\n"))
.verifyComplete()
);
}
@Test // SPR-15866
public void canEncodeWithCustomMimeType() {
MimeType textJavascript = new MimeType("text", "javascript", StandardCharsets.UTF_8);
Jackson2JsonEncoder encoder = new Jackson2JsonEncoder(new ObjectMapper(), textJavascript);
assertThat(encoder.getEncodableMimeTypes()).isEqualTo(Collections.singletonList(textJavascript));
}
@Test
void encodableMimeTypesIsImmutable() {
MimeType textJavascript = new MimeType("text", "javascript", StandardCharsets.UTF_8);
Jackson2JsonEncoder encoder = new Jackson2JsonEncoder(new ObjectMapper(), textJavascript);
assertThatExceptionOfType(UnsupportedOperationException.class).isThrownBy(() ->
encoder.getMimeTypes().add(new MimeType("text", "ecmascript")));
}
@Test
void canNotEncode() {
assertThat(this.encoder.canEncode(ResolvableType.forClass(String.class), null)).isFalse();
assertThat(this.encoder.canEncode(ResolvableType.forClass(Pojo.class), APPLICATION_XML)).isFalse();
ResolvableType sseType = ResolvableType.forClass(ServerSentEvent.class);
assertThat(this.encoder.canEncode(sseType, APPLICATION_JSON)).isFalse();
}
@Test
void encodeNonStream() {
Flux<Pojo> input = Flux.just(
new Pojo("foo", "bar"),
new Pojo("foofoo", "barbar"),
new Pojo("foofoofoo", "barbarbar")
);
testEncode(input, Pojo.class, step -> step
.consumeNextWith(expectString("[{\"foo\":\"foo\",\"bar\":\"bar\"}"))
.consumeNextWith(expectString(",{\"foo\":\"foofoo\",\"bar\":\"barbar\"}"))
.consumeNextWith(expectString(",{\"foo\":\"foofoofoo\",\"bar\":\"barbarbar\"}"))
.consumeNextWith(expectString("]"))
.verifyComplete());
}
@Test
void encodeNonStreamEmpty() {
testEncode(Flux.empty(), Pojo.class, step -> step
.consumeNextWith(expectString("["))
.consumeNextWith(expectString("]"))
.verifyComplete());
}
@Test // gh-29038
void encodeNonStreamWithErrorAsFirstSignal() {
String message = "I'm a teapot";
Flux<Object> input = Flux.error(new IllegalStateException(message));
Flux<DataBuffer> output = this.encoder.encode(
input, this.bufferFactory, ResolvableType.forClass(Pojo.class), null, null);
StepVerifier.create(output).expectErrorMessage(message).verify();
}
@Test
void encodeWithType() {
Flux<ParentClass> input = Flux.just(new Foo(), new Bar());
testEncode(input, ParentClass.class, step -> step
.consumeNextWith(expectString("[{\"type\":\"foo\"}"))
.consumeNextWith(expectString(",{\"type\":\"bar\"}"))
.consumeNextWith(expectString("]"))
.verifyComplete());
}
@Test // SPR-15727
public void encodeStreamWithCustomStreamingType() {
MediaType fooMediaType = new MediaType("application", "foo");
MediaType barMediaType = new MediaType("application", "bar");
this.encoder.setStreamingMediaTypes(Arrays.asList(fooMediaType, barMediaType));
Flux<Pojo> input = Flux.just(
new Pojo("foo", "bar"),
new Pojo("foofoo", "barbar"),
new Pojo("foofoofoo", "barbarbar")
);
testEncode(input, ResolvableType.forClass(Pojo.class), barMediaType, null, step -> step
.consumeNextWith(expectString("{\"foo\":\"foo\",\"bar\":\"bar\"}\n"))
.consumeNextWith(expectString("{\"foo\":\"foofoo\",\"bar\":\"barbar\"}\n"))
.consumeNextWith(expectString("{\"foo\":\"foofoofoo\",\"bar\":\"barbarbar\"}\n"))
.verifyComplete()
);
}
@Test
void fieldLevelJsonView() {
JacksonViewBean bean = new JacksonViewBean();
bean.setWithView1("with");
bean.setWithView2("with");
bean.setWithoutView("without");
Mono<JacksonViewBean> input = Mono.just(bean);
ResolvableType type = ResolvableType.forClass(JacksonViewBean.class);
Map<String, Object> hints = singletonMap(org.springframework.http.codec.json.Jackson2CodecSupport.JSON_VIEW_HINT,
MyJacksonView1.class);
testEncode(input, type, null, hints, step -> step
.consumeNextWith(expectString("{\"withView1\":\"with\"}"))
.verifyComplete()
);
}
@Test
void classLevelJsonView() {
JacksonViewBean bean = new JacksonViewBean();
bean.setWithView1("with");
bean.setWithView2("with");
bean.setWithoutView("without");
Mono<JacksonViewBean> input = Mono.just(bean);
ResolvableType type = ResolvableType.forClass(JacksonViewBean.class);
Map<String, Object> hints = singletonMap(org.springframework.http.codec.json.Jackson2CodecSupport.JSON_VIEW_HINT,
MyJacksonView3.class);
testEncode(input, type, null, hints, step -> step
.consumeNextWith(expectString("{\"withoutView\":\"without\"}"))
.verifyComplete()
);
}
@Test
void jacksonValue() {
JacksonViewBean bean = new JacksonViewBean();
bean.setWithView1("with");
bean.setWithView2("with");
bean.setWithoutView("without");
MappingJacksonValue jacksonValue = new MappingJacksonValue(bean);
jacksonValue.setSerializationView(MyJacksonView1.class);
ResolvableType type = ResolvableType.forClass(MappingJacksonValue.class);
testEncode(Mono.just(jacksonValue), type, null, Collections.emptyMap(), step -> step
.consumeNextWith(expectString("{\"withView1\":\"with\"}"))
.verifyComplete()
);
}
@Test // gh-28045
public void jacksonValueUnwrappedBeforeObjectMapperSelection() {
JacksonViewBean bean = new JacksonViewBean();
bean.setWithView1("with");
bean.setWithView2("with");
bean.setWithoutView("without");
MappingJacksonValue jacksonValue = new MappingJacksonValue(bean);
jacksonValue.setSerializationView(MyJacksonView1.class);
ResolvableType type = ResolvableType.forClass(MappingJacksonValue.class);
MediaType halMediaType = MediaType.parseMediaType("application/hal+json");
ObjectMapper mapper = new ObjectMapper().configure(SerializationFeature.INDENT_OUTPUT, true);
this.encoder.registerObjectMappersForType(JacksonViewBean.class, map -> map.put(halMediaType, mapper));
testEncode(Mono.just(jacksonValue), type, halMediaType, Collections.emptyMap(), step -> step
.consumeNextWith(expectString("""
{
\s "withView1" : "with"
}"""))
.verifyComplete()
);
}
@Test // gh-22771
public void encodeWithFlushAfterWriteOff() {
ObjectMapper mapper = new ObjectMapper();
mapper.configure(SerializationFeature.FLUSH_AFTER_WRITE_VALUE, false);
Jackson2JsonEncoder encoder = new Jackson2JsonEncoder(mapper);
Flux<DataBuffer> result = encoder.encode(Flux.just(new Pojo("foo", "bar")), this.bufferFactory,
ResolvableType.forClass(Pojo.class), MimeTypeUtils.APPLICATION_JSON, Collections.emptyMap());
StepVerifier.create(result)
.consumeNextWith(expectString("[{\"foo\":\"foo\",\"bar\":\"bar\"}"))
.consumeNextWith(expectString("]"))
.expectComplete()
.verify(Duration.ofSeconds(5));
}
@Test
void encodeAscii() {
Mono<Object> input = Mono.just(new Pojo("foo", "bar"));
MimeType mimeType = new MimeType("application", "json", StandardCharsets.US_ASCII);
testEncode(input, ResolvableType.forClass(Pojo.class), mimeType, null, step -> step
.consumeNextWith(expectString("{\"foo\":\"foo\",\"bar\":\"bar\"}"))
.verifyComplete()
);
}
@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type")
private static | Jackson2JsonEncoderTests |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/TestExecutionListenersTests.java | {
"start": 13315,
"end": 13415
} | class ____ extends MetaTestCase {
}
@MetaNonInheritedListeners
static | MetaInheritedListenersTestCase |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java | {
"start": 851,
"end": 934
} | class ____ generated. Edit {@code X-BigArrayBlock.java.st} instead.
*/
public final | is |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/WireTapNoCacheTest.java | {
"start": 1209,
"end": 3322
} | class ____ extends ContextTestSupport {
@Test
public void testNoCache() throws Exception {
assertEquals(1, context.getEndpointRegistry().size());
sendBody("foo", "mock:x");
sendBody("foo", "mock:y");
sendBody("foo", "mock:z");
sendBody("bar", "mock:x");
sendBody("bar", "mock:y");
sendBody("bar", "mock:z");
// make sure its using an empty producer cache as the cache is disabled
List<Processor> list = getProcessors("foo");
WireTapProcessor wtp = (WireTapProcessor) list.get(0);
assertNotNull(wtp);
assertEquals(-1, wtp.getCacheSize());
// check no additional endpoints added as cache was disabled
assertEquals(1, context.getEndpointRegistry().size());
// now send again with mocks which then add endpoints
MockEndpoint x = getMockEndpoint("mock:x2");
MockEndpoint y = getMockEndpoint("mock:y2");
MockEndpoint z = getMockEndpoint("mock:z2");
x.expectedBodiesReceivedInAnyOrder("foo", "bar");
y.expectedBodiesReceivedInAnyOrder("foo", "bar");
z.expectedBodiesReceivedInAnyOrder("foo", "bar");
assertEquals(4, context.getEndpointRegistry().size());
sendBody("foo", "mock:x2");
sendBody("foo", "mock:y2");
sendBody("foo", "mock:z2");
sendBody("bar", "mock:x2");
sendBody("bar", "mock:y2");
sendBody("bar", "mock:z2");
// should not register as new endpoint so we keep at 4
sendBody("dummy", "mock:dummy");
assertMockEndpointsSatisfied();
assertEquals(4, context.getEndpointRegistry().size());
}
protected void sendBody(String body, String uri) {
template.sendBodyAndHeader("direct:a", body, "myHeader", uri);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("direct:a")
.wireTap("${header.myHeader}").cacheSize(-1).id("foo").end();
}
};
}
}
| WireTapNoCacheTest |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/strategy/PipelinedRegionSchedulingStrategyTest.java | {
"start": 2325,
"end": 29582
} | class ____ {
@RegisterExtension
private static final TestExecutorExtension<ScheduledExecutorService> EXECUTOR_RESOURCE =
TestingUtils.defaultExecutorExtension();
private TestingSchedulerOperations testingSchedulerOperation;
private static final int PARALLELISM = 2;
private TestingSchedulingTopology testingSchedulingTopology;
private List<TestingSchedulingExecutionVertex> source;
private List<TestingSchedulingExecutionVertex> map1;
private List<TestingSchedulingExecutionVertex> map2;
private List<TestingSchedulingExecutionVertex> map3;
private List<TestingSchedulingExecutionVertex> sink;
@BeforeEach
void setUp() {
testingSchedulerOperation = new TestingSchedulerOperations();
buildTopology();
}
private void buildTopology() {
testingSchedulingTopology = new TestingSchedulingTopology();
source =
testingSchedulingTopology
.addExecutionVertices()
.withParallelism(PARALLELISM)
.finish();
map1 =
testingSchedulingTopology
.addExecutionVertices()
.withParallelism(PARALLELISM)
.finish();
map2 =
testingSchedulingTopology
.addExecutionVertices()
.withParallelism(PARALLELISM)
.finish();
map3 =
testingSchedulingTopology
.addExecutionVertices()
.withParallelism(PARALLELISM)
.finish();
sink =
testingSchedulingTopology
.addExecutionVertices()
.withParallelism(PARALLELISM)
.finish();
testingSchedulingTopology
.connectPointwise(source, map1)
.withResultPartitionState(ResultPartitionState.CREATED)
.withResultPartitionType(ResultPartitionType.PIPELINED_BOUNDED)
.finish();
testingSchedulingTopology
.connectPointwise(map1, map2)
.withResultPartitionState(ResultPartitionState.CREATED)
.withResultPartitionType(ResultPartitionType.HYBRID_FULL)
.finish();
testingSchedulingTopology
.connectPointwise(map2, map3)
.withResultPartitionState(ResultPartitionState.CREATED)
.withResultPartitionType(ResultPartitionType.HYBRID_SELECTIVE)
.finish();
testingSchedulingTopology
.connectAllToAll(map3, sink)
.withResultPartitionState(ResultPartitionState.CREATED)
.withResultPartitionType(ResultPartitionType.BLOCKING)
.finish();
}
@Test
void testStartScheduling() {
startScheduling(testingSchedulingTopology);
final List<List<TestingSchedulingExecutionVertex>> expectedScheduledVertices =
new ArrayList<>();
expectedScheduledVertices.add(Arrays.asList(source.get(0), map1.get(0)));
expectedScheduledVertices.add(Arrays.asList(source.get(1), map1.get(1)));
expectedScheduledVertices.add(Arrays.asList(map2.get(0)));
expectedScheduledVertices.add(Arrays.asList(map2.get(1)));
expectedScheduledVertices.add(Arrays.asList(map3.get(0)));
expectedScheduledVertices.add(Arrays.asList(map3.get(1)));
assertLatestScheduledVerticesAreEqualTo(
expectedScheduledVertices, testingSchedulerOperation);
}
@Test
void testRestartTasks() {
final PipelinedRegionSchedulingStrategy schedulingStrategy =
startScheduling(testingSchedulingTopology);
final Set<ExecutionVertexID> verticesToRestart =
Stream.of(source, map1, map2, map3, sink)
.flatMap(List::stream)
.map(TestingSchedulingExecutionVertex::getId)
.collect(Collectors.toSet());
schedulingStrategy.restartTasks(verticesToRestart);
final List<List<TestingSchedulingExecutionVertex>> expectedScheduledVertices =
new ArrayList<>();
expectedScheduledVertices.add(Arrays.asList(source.get(0), map1.get(0)));
expectedScheduledVertices.add(Arrays.asList(source.get(1), map1.get(1)));
expectedScheduledVertices.add(Arrays.asList(map2.get(0)));
expectedScheduledVertices.add(Arrays.asList(map2.get(1)));
expectedScheduledVertices.add(Arrays.asList(map3.get(0)));
expectedScheduledVertices.add(Arrays.asList(map3.get(1)));
assertLatestScheduledVerticesAreEqualTo(
expectedScheduledVertices, testingSchedulerOperation);
}
@Test
void testNotifyingBlockingResultPartitionProducerFinished() {
final PipelinedRegionSchedulingStrategy schedulingStrategy =
startScheduling(testingSchedulingTopology);
final TestingSchedulingExecutionVertex upstream1 = map3.get(0);
upstream1.getProducedResults().iterator().next().markFinished();
schedulingStrategy.onExecutionStateChange(upstream1.getId(), ExecutionState.FINISHED);
// sinks' inputs are not all consumable yet so they are not scheduled
assertThat(testingSchedulerOperation.getScheduledVertices()).hasSize(6);
final TestingSchedulingExecutionVertex upstream2 = map3.get(1);
upstream2.getProducedResults().iterator().next().markFinished();
schedulingStrategy.onExecutionStateChange(upstream2.getId(), ExecutionState.FINISHED);
assertThat(testingSchedulerOperation.getScheduledVertices()).hasSize(8);
final List<List<TestingSchedulingExecutionVertex>> expectedScheduledVertices =
new ArrayList<>();
expectedScheduledVertices.add(Arrays.asList(sink.get(0)));
expectedScheduledVertices.add(Arrays.asList(sink.get(1)));
assertLatestScheduledVerticesAreEqualTo(
expectedScheduledVertices, testingSchedulerOperation);
}
@Test
void testSchedulingTopologyWithPersistentBlockingEdges() {
final TestingSchedulingTopology topology = new TestingSchedulingTopology();
final List<TestingSchedulingExecutionVertex> v1 =
topology.addExecutionVertices().withParallelism(1).finish();
final List<TestingSchedulingExecutionVertex> v2 =
topology.addExecutionVertices().withParallelism(1).finish();
topology.connectPointwise(v1, v2)
.withResultPartitionState(ResultPartitionState.CREATED)
.withResultPartitionType(ResultPartitionType.BLOCKING_PERSISTENT)
.finish();
startScheduling(topology);
final List<List<TestingSchedulingExecutionVertex>> expectedScheduledVertices =
new ArrayList<>();
expectedScheduledVertices.add(Arrays.asList(v1.get(0)));
assertLatestScheduledVerticesAreEqualTo(
expectedScheduledVertices, testingSchedulerOperation);
}
@Test
void testComputingCrossRegionConsumedPartitionGroupsCorrectly() throws Exception {
final JobVertex v1 = createJobVertex("v1", 4);
final JobVertex v2 = createJobVertex("v2", 3);
final JobVertex v3 = createJobVertex("v3", 2);
connectNewDataSetAsInput(
v2, v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
connectNewDataSetAsInput(
v3, v2, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
connectNewDataSetAsInput(
v3, v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
final List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2, v3));
final JobGraph jobGraph =
JobGraphBuilder.newBatchJobGraphBuilder().addJobVertices(ordered).build();
final ExecutionGraph executionGraph =
TestingDefaultExecutionGraphBuilder.newBuilder()
.setJobGraph(jobGraph)
.build(EXECUTOR_RESOURCE.getExecutor());
final SchedulingTopology schedulingTopology = executionGraph.getSchedulingTopology();
final PipelinedRegionSchedulingStrategy schedulingStrategy =
new PipelinedRegionSchedulingStrategy(
testingSchedulerOperation, schedulingTopology);
final Set<ConsumedPartitionGroup> crossRegionConsumedPartitionGroups =
schedulingStrategy.getCrossRegionConsumedPartitionGroups();
assertThat(crossRegionConsumedPartitionGroups).hasSize(1);
final ConsumedPartitionGroup expected =
executionGraph
.getJobVertex(v3.getID())
.getTaskVertices()[1]
.getAllConsumedPartitionGroups()
.get(0);
assertThat(crossRegionConsumedPartitionGroups).contains(expected);
}
@Test
void testNoCrossRegionConsumedPartitionGroupsWithAllToAllBlockingEdge() {
final TestingSchedulingTopology topology = new TestingSchedulingTopology();
final List<TestingSchedulingExecutionVertex> producer =
topology.addExecutionVertices().withParallelism(4).finish();
final List<TestingSchedulingExecutionVertex> consumer =
topology.addExecutionVertices().withParallelism(4).finish();
topology.connectAllToAll(producer, consumer)
.withResultPartitionType(ResultPartitionType.BLOCKING)
.finish();
final PipelinedRegionSchedulingStrategy schedulingStrategy =
new PipelinedRegionSchedulingStrategy(testingSchedulerOperation, topology);
final Set<ConsumedPartitionGroup> crossRegionConsumedPartitionGroups =
schedulingStrategy.getCrossRegionConsumedPartitionGroups();
assertThat(crossRegionConsumedPartitionGroups).isEmpty();
}
@Test
void testSchedulingTopologyWithBlockingCrossRegionConsumedPartitionGroups() throws Exception {
final JobVertex v1 = createJobVertex("v1", 4);
final JobVertex v2 = createJobVertex("v2", 3);
final JobVertex v3 = createJobVertex("v3", 2);
connectNewDataSetAsInput(
v2, v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
connectNewDataSetAsInput(
v3, v2, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
connectNewDataSetAsInput(
v3, v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
final List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2, v3));
final JobGraph jobGraph =
JobGraphBuilder.newBatchJobGraphBuilder().addJobVertices(ordered).build();
final ExecutionGraph executionGraph =
TestingDefaultExecutionGraphBuilder.newBuilder()
.setJobGraph(jobGraph)
.build(EXECUTOR_RESOURCE.getExecutor());
final SchedulingTopology schedulingTopology = executionGraph.getSchedulingTopology();
// Test whether the topology is built correctly
final List<SchedulingPipelinedRegion> regions = new ArrayList<>();
schedulingTopology.getAllPipelinedRegions().forEach(regions::add);
assertThat(regions).hasSize(2);
final ExecutionVertex v31 = executionGraph.getJobVertex(v3.getID()).getTaskVertices()[0];
final Set<ExecutionVertexID> region1 = new HashSet<>();
schedulingTopology
.getPipelinedRegionOfVertex(v31.getID())
.getVertices()
.forEach(vertex -> region1.add(vertex.getId()));
assertThat(region1).hasSize(5);
final ExecutionVertex v32 = executionGraph.getJobVertex(v3.getID()).getTaskVertices()[1];
final Set<ExecutionVertexID> region2 = new HashSet<>();
schedulingTopology
.getPipelinedRegionOfVertex(v32.getID())
.getVertices()
.forEach(vertex -> region2.add(vertex.getId()));
assertThat(region2).hasSize(4);
// Test whether region 1 is scheduled correctly
PipelinedRegionSchedulingStrategy schedulingStrategy = startScheduling(schedulingTopology);
assertThat(testingSchedulerOperation.getScheduledVertices()).hasSize(1);
final List<ExecutionVertexID> scheduledVertices1 =
testingSchedulerOperation.getScheduledVertices().get(0);
assertThat(scheduledVertices1).hasSize(5);
for (ExecutionVertexID vertexId : scheduledVertices1) {
assertThat(region1).contains(vertexId);
}
// Test whether the region 2 is scheduled correctly when region 1 is finished
final ExecutionVertex v22 = executionGraph.getJobVertex(v2.getID()).getTaskVertices()[1];
v22.finishPartitionsIfNeeded();
schedulingStrategy.onExecutionStateChange(v22.getID(), ExecutionState.FINISHED);
assertThat(testingSchedulerOperation.getScheduledVertices()).hasSize(2);
final List<ExecutionVertexID> scheduledVertices2 =
testingSchedulerOperation.getScheduledVertices().get(1);
assertThat(scheduledVertices2).hasSize(4);
for (ExecutionVertexID vertexId : scheduledVertices2) {
assertThat(region2).contains(vertexId);
}
}
@Test
void testSchedulingTopologyWithHybridCrossRegionConsumedPartitionGroups() throws Exception {
final JobVertex v1 = createJobVertex("v1", 4);
final JobVertex v2 = createJobVertex("v2", 3);
final JobVertex v3 = createJobVertex("v3", 2);
connectNewDataSetAsInput(
v2, v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
connectNewDataSetAsInput(
v3, v2, DistributionPattern.POINTWISE, ResultPartitionType.HYBRID_FULL);
connectNewDataSetAsInput(
v3, v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
final List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2, v3));
final JobGraph jobGraph =
JobGraphBuilder.newBatchJobGraphBuilder().addJobVertices(ordered).build();
final ExecutionGraph executionGraph =
TestingDefaultExecutionGraphBuilder.newBuilder()
.setJobGraph(jobGraph)
.build(EXECUTOR_RESOURCE.getExecutor());
final SchedulingTopology schedulingTopology = executionGraph.getSchedulingTopology();
// Test whether the topology is built correctly
final List<SchedulingPipelinedRegion> regions = new ArrayList<>();
schedulingTopology.getAllPipelinedRegions().forEach(regions::add);
assertThat(regions).hasSize(2);
final ExecutionVertex v31 = executionGraph.getJobVertex(v3.getID()).getTaskVertices()[0];
final Set<ExecutionVertexID> region1 = new HashSet<>();
schedulingTopology
.getPipelinedRegionOfVertex(v31.getID())
.getVertices()
.forEach(vertex -> region1.add(vertex.getId()));
assertThat(region1).hasSize(5);
final ExecutionVertex v32 = executionGraph.getJobVertex(v3.getID()).getTaskVertices()[1];
final Set<ExecutionVertexID> region2 = new HashSet<>();
schedulingTopology
.getPipelinedRegionOfVertex(v32.getID())
.getVertices()
.forEach(vertex -> region2.add(vertex.getId()));
assertThat(region2).hasSize(4);
startScheduling(schedulingTopology);
assertThat(testingSchedulerOperation.getScheduledVertices()).hasSize(2);
// Test whether region 1 is scheduled correctly
final List<ExecutionVertexID> scheduledVertices1 =
testingSchedulerOperation.getScheduledVertices().get(0);
assertThat(scheduledVertices1).hasSize(5);
for (ExecutionVertexID vertexId : scheduledVertices1) {
assertThat(region1).contains(vertexId);
}
// Test whether region 2 is scheduled correctly
final List<ExecutionVertexID> scheduledVertices2 =
testingSchedulerOperation.getScheduledVertices().get(1);
assertThat(scheduledVertices2).hasSize(4);
for (ExecutionVertexID vertexId : scheduledVertices2) {
assertThat(region2).contains(vertexId);
}
}
@Test
void testScheduleBlockingDownstreamTaskIndividually() throws Exception {
final JobVertex v1 = createJobVertex("v1", 2);
final JobVertex v2 = createJobVertex("v2", 2);
connectNewDataSetAsInput(
v2, v1, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
final List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2));
final JobGraph jobGraph =
JobGraphBuilder.newBatchJobGraphBuilder().addJobVertices(ordered).build();
final ExecutionGraph executionGraph =
TestingDefaultExecutionGraphBuilder.newBuilder()
.setJobGraph(jobGraph)
.build(EXECUTOR_RESOURCE.getExecutor());
final SchedulingTopology schedulingTopology = executionGraph.getSchedulingTopology();
final PipelinedRegionSchedulingStrategy schedulingStrategy =
startScheduling(schedulingTopology);
assertThat(testingSchedulerOperation.getScheduledVertices()).hasSize(2);
final ExecutionVertex v11 = executionGraph.getJobVertex(v1.getID()).getTaskVertices()[0];
v11.finishPartitionsIfNeeded();
schedulingStrategy.onExecutionStateChange(v11.getID(), ExecutionState.FINISHED);
assertThat(testingSchedulerOperation.getScheduledVertices()).hasSize(3);
}
@Test
void testFinishHybridPartitionWillNotRescheduleDownstream() throws Exception {
final JobVertex v1 = createJobVertex("v1", 1);
final JobVertex v2 = createJobVertex("v2", 1);
final JobVertex v3 = createJobVertex("v3", 1);
connectNewDataSetAsInput(
v2, v1, DistributionPattern.POINTWISE, ResultPartitionType.HYBRID_FULL);
connectNewDataSetAsInput(
v3, v1, DistributionPattern.POINTWISE, ResultPartitionType.HYBRID_SELECTIVE);
final List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2, v3));
final JobGraph jobGraph =
JobGraphBuilder.newBatchJobGraphBuilder().addJobVertices(ordered).build();
final ExecutionGraph executionGraph =
TestingDefaultExecutionGraphBuilder.newBuilder()
.setJobGraph(jobGraph)
.build(EXECUTOR_RESOURCE.getExecutor());
final SchedulingTopology schedulingTopology = executionGraph.getSchedulingTopology();
PipelinedRegionSchedulingStrategy schedulingStrategy = startScheduling(schedulingTopology);
// all regions will be scheduled
assertThat(testingSchedulerOperation.getScheduledVertices()).hasSize(3);
final ExecutionVertex v11 = executionGraph.getJobVertex(v1.getID()).getTaskVertices()[0];
schedulingStrategy.onExecutionStateChange(v11.getID(), ExecutionState.FINISHED);
assertThat(testingSchedulerOperation.getScheduledVertices()).hasSize(3);
}
/**
* Source and it's downstream with hybrid edge will be scheduled. When blocking result partition
* finished, it's downstream will be scheduled.
*
* <pre>
* V1 ----> V2 ----> V3 ----> V4
* | | |
* Hybrid Blocking Hybrid
* </pre>
*/
@Test
void testScheduleTopologyWithHybridAndBlockingEdge() throws Exception {
final JobVertex v1 = createJobVertex("v1", 1);
final JobVertex v2 = createJobVertex("v2", 1);
final JobVertex v3 = createJobVertex("v3", 1);
final JobVertex v4 = createJobVertex("v4", 1);
connectNewDataSetAsInput(
v2, v1, DistributionPattern.POINTWISE, ResultPartitionType.HYBRID_FULL);
connectNewDataSetAsInput(
v3, v2, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
connectNewDataSetAsInput(
v4, v3, DistributionPattern.POINTWISE, ResultPartitionType.HYBRID_SELECTIVE);
final List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2, v3, v4));
final JobGraph jobGraph =
JobGraphBuilder.newBatchJobGraphBuilder().addJobVertices(ordered).build();
final ExecutionGraph executionGraph =
TestingDefaultExecutionGraphBuilder.newBuilder()
.setJobGraph(jobGraph)
.build(EXECUTOR_RESOURCE.getExecutor());
final SchedulingTopology schedulingTopology = executionGraph.getSchedulingTopology();
PipelinedRegionSchedulingStrategy schedulingStrategy = startScheduling(schedulingTopology);
// v1 & v2 will be scheduled as v1 is a source and v1 -> v2 is a hybrid downstream.
assertThat(testingSchedulerOperation.getScheduledVertices()).hasSize(2);
final ExecutionVertex v11 = executionGraph.getJobVertex(v1.getID()).getTaskVertices()[0];
final ExecutionVertex v21 = executionGraph.getJobVertex(v2.getID()).getTaskVertices()[0];
assertThat(testingSchedulerOperation.getScheduledVertices().get(0))
.containsExactly(v11.getID());
assertThat(testingSchedulerOperation.getScheduledVertices().get(1))
.containsExactly(v21.getID());
// finish v2 to trigger new round of scheduling.
v21.finishPartitionsIfNeeded();
schedulingStrategy.onExecutionStateChange(v21.getID(), ExecutionState.FINISHED);
assertThat(testingSchedulerOperation.getScheduledVertices()).hasSize(4);
final ExecutionVertex v31 = executionGraph.getJobVertex(v3.getID()).getTaskVertices()[0];
final ExecutionVertex v41 = executionGraph.getJobVertex(v4.getID()).getTaskVertices()[0];
assertThat(testingSchedulerOperation.getScheduledVertices().get(2))
.containsExactly(v31.getID());
assertThat(testingSchedulerOperation.getScheduledVertices().get(3))
.containsExactly(v41.getID());
}
/** Inner non-pipelined edge will not affect it's region be scheduled. */
@Test
void testSchedulingRegionWithInnerNonPipelinedEdge() throws Exception {
final JobVertex v1 = createJobVertex("v1", 1);
final JobVertex v2 = createJobVertex("v2", 1);
final JobVertex v3 = createJobVertex("v3", 1);
final JobVertex v4 = createJobVertex("v4", 1);
final JobVertex v5 = createJobVertex("v5", 1);
connectNewDataSetAsInput(
v2, v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
connectNewDataSetAsInput(
v3, v2, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
connectNewDataSetAsInput(
v4, v2, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
connectNewDataSetAsInput(
v5, v2, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
connectNewDataSetAsInput(
v3, v1, DistributionPattern.POINTWISE, ResultPartitionType.HYBRID_FULL);
connectNewDataSetAsInput(
v4, v1, DistributionPattern.POINTWISE, ResultPartitionType.HYBRID_SELECTIVE);
connectNewDataSetAsInput(
v4, v1, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
final List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2, v3, v4, v5));
final JobGraph jobGraph =
JobGraphBuilder.newBatchJobGraphBuilder().addJobVertices(ordered).build();
final ExecutionGraph executionGraph =
TestingDefaultExecutionGraphBuilder.newBuilder()
.setJobGraph(jobGraph)
.build(EXECUTOR_RESOURCE.getExecutor());
final SchedulingTopology schedulingTopology = executionGraph.getSchedulingTopology();
startScheduling(schedulingTopology);
assertThat(testingSchedulerOperation.getScheduledVertices()).hasSize(1);
List<ExecutionVertexID> executionVertexIds =
testingSchedulerOperation.getScheduledVertices().get(0);
assertThat(executionVertexIds).hasSize(5);
}
/**
* If a region have blocking and non-blocking input edge at the same time, it will be scheduled
* after it's all blocking edge finished, non-blocking edge don't block scheduling.
*/
@Test
void testDownstreamRegionWillBeBlockedByBlockingEdge() throws Exception {
final JobVertex v1 = createJobVertex("v1", 1);
final JobVertex v2 = createJobVertex("v2", 1);
final JobVertex v3 = createJobVertex("v3", 1);
final JobVertex v4 = createJobVertex("v4", 1);
connectNewDataSetAsInput(
v4, v1, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
connectNewDataSetAsInput(
v4, v2, DistributionPattern.POINTWISE, ResultPartitionType.HYBRID_FULL);
connectNewDataSetAsInput(
v4, v3, DistributionPattern.POINTWISE, ResultPartitionType.HYBRID_SELECTIVE);
final List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2, v3, v4));
final JobGraph jobGraph =
JobGraphBuilder.newBatchJobGraphBuilder().addJobVertices(ordered).build();
final ExecutionGraph executionGraph =
TestingDefaultExecutionGraphBuilder.newBuilder()
.setJobGraph(jobGraph)
.build(EXECUTOR_RESOURCE.getExecutor());
final SchedulingTopology schedulingTopology = executionGraph.getSchedulingTopology();
final PipelinedRegionSchedulingStrategy schedulingStrategy =
startScheduling(schedulingTopology);
assertThat(testingSchedulerOperation.getScheduledVertices()).hasSize(3);
final ExecutionVertex v11 = executionGraph.getJobVertex(v1.getID()).getTaskVertices()[0];
v11.finishPartitionsIfNeeded();
schedulingStrategy.onExecutionStateChange(v11.getID(), ExecutionState.FINISHED);
assertThat(testingSchedulerOperation.getScheduledVertices()).hasSize(4);
}
private static JobVertex createJobVertex(String vertexName, int parallelism) {
JobVertex jobVertex = new JobVertex(vertexName);
jobVertex.setParallelism(parallelism);
jobVertex.setInvokableClass(AbstractInvokable.class);
return jobVertex;
}
private PipelinedRegionSchedulingStrategy startScheduling(
SchedulingTopology schedulingTopology) {
final PipelinedRegionSchedulingStrategy schedulingStrategy =
new PipelinedRegionSchedulingStrategy(
testingSchedulerOperation, schedulingTopology);
schedulingStrategy.startScheduling();
return schedulingStrategy;
}
}
| PipelinedRegionSchedulingStrategyTest |
java | quarkusio__quarkus | extensions/grpc/stubs/src/main/java/io/quarkus/grpc/stubs/ManyToManyObserver.java | {
"start": 388,
"end": 1663
} | class ____<I, O> extends AbstractMulti<O> implements StreamObserver<O> {
private final StreamObserver<I> processor;
private final Multi<I> source;
private final UpstreamSubscriber subscriber = new UpstreamSubscriber();
private final AtomicReference<Flow.Subscription> upstream = new AtomicReference<>();
private volatile MultiSubscriber<? super O> downstream;
public ManyToManyObserver(Multi<I> source, Function<StreamObserver<O>, StreamObserver<I>> function) {
this.processor = function.apply(this);
this.source = source;
}
@Override
public void subscribe(MultiSubscriber<? super O> subscriber) {
this.downstream = subscriber;
source.subscribe(this.subscriber);
}
@Override
public void onNext(O value) {
downstream.onItem(value);
}
@Override
public void onError(Throwable t) {
cancelUpstream();
downstream.onFailure(t);
}
@Override
public void onCompleted() {
cancelUpstream();
downstream.onComplete();
}
private void cancelUpstream() {
var subscription = upstream.getAndSet(Subscriptions.CANCELLED);
if (subscription != null) {
subscription.cancel();
}
}
| ManyToManyObserver |
java | spring-projects__spring-security | config/src/main/java/org/springframework/security/config/annotation/web/configurers/ChannelSecurityConfigurer.java | {
"start": 7773,
"end": 8341
} | class ____ {
protected List<? extends RequestMatcher> requestMatchers;
RequiresChannelUrl(List<? extends RequestMatcher> requestMatchers) {
this.requestMatchers = requestMatchers;
}
public ChannelRequestMatcherRegistry requiresSecure() {
return requires("REQUIRES_SECURE_CHANNEL");
}
public ChannelRequestMatcherRegistry requiresInsecure() {
return requires("REQUIRES_INSECURE_CHANNEL");
}
public ChannelRequestMatcherRegistry requires(String attribute) {
return addAttribute(attribute, this.requestMatchers);
}
}
}
| RequiresChannelUrl |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamGlobalKTableJoinTest.java | {
"start": 2384,
"end": 13224
} | class ____ {
private static final KeyValueTimestamp[] EMPTY = new KeyValueTimestamp[0];
private final String streamTopic = "streamTopic";
private final String globalTableTopic = "globalTableTopic";
private TestInputTopic<Integer, String> inputStreamTopic;
private TestInputTopic<String, String> inputTableTopic;
private final int[] expectedKeys = {0, 1, 2, 3};
private TopologyTestDriver driver;
private MockApiProcessor<Integer, String, Void, Void> processor;
private StreamsBuilder builder;
@BeforeEach
public void setUp() {
// use un-versioned store by default
init(Optional.empty());
}
private void initWithVersionedStore(final long historyRetentionMs) {
init(Optional.of(historyRetentionMs));
}
private void init(final Optional<Long> versionedStoreHistoryRetentionMs) {
builder = new StreamsBuilder();
final KStream<Integer, String> stream;
final GlobalKTable<String, String> table; // value of stream optionally contains key of table
final KeyValueMapper<Integer, String, String> keyMapper;
final MockApiProcessorSupplier<Integer, String, Void, Void> supplier = new MockApiProcessorSupplier<>();
final Consumed<Integer, String> streamConsumed = Consumed.with(Serdes.Integer(), Serdes.String());
final Consumed<String, String> tableConsumed = Consumed.with(Serdes.String(), Serdes.String());
stream = builder.stream(streamTopic, streamConsumed);
if (versionedStoreHistoryRetentionMs.isPresent()) {
table = builder.globalTable(globalTableTopic, tableConsumed, Materialized.as(
Stores.persistentVersionedKeyValueStore("table", Duration.ofMillis(versionedStoreHistoryRetentionMs.get()))));
} else {
table = builder.globalTable(globalTableTopic, tableConsumed);
}
keyMapper = (key, value) -> {
final String[] tokens = value.split(",");
// Value is comma-delimited. If second token is present, it's the key to the global ktable.
// If not present, use null to indicate no match
return tokens.length > 1 ? tokens[1] : null;
};
stream.join(table, keyMapper, MockValueJoiner.TOSTRING_JOINER).process(supplier);
final Properties props = StreamsTestUtils.getStreamsConfig(Serdes.Integer(), Serdes.String());
driver = new TopologyTestDriver(builder.build(), props);
processor = supplier.theCapturedProcessor();
// auto-advance stream timestamps by default, but not global table timestamps
inputStreamTopic = driver.createInputTopic(streamTopic, new IntegerSerializer(), new StringSerializer(), Instant.ofEpochMilli(0L), Duration.ofMillis(1L));
inputTableTopic = driver.createInputTopic(globalTableTopic, new StringSerializer(), new StringSerializer());
}
@AfterEach
public void cleanup() {
driver.close();
}
private void pushToStream(final int messageCount, final String valuePrefix, final boolean includeForeignKey, final boolean includeNullKey) {
for (int i = 0; i < messageCount; i++) {
String value = valuePrefix + expectedKeys[i];
if (includeForeignKey) {
value = value + ",FKey" + expectedKeys[i];
}
Integer key = expectedKeys[i];
if (includeNullKey && i == 0) {
key = null;
}
inputStreamTopic.pipeInput(key, value);
}
}
private void pushToGlobalTable(final int messageCount, final String valuePrefix) {
for (int i = 0; i < messageCount; i++) {
inputTableTopic.pipeInput("FKey" + expectedKeys[i], valuePrefix + expectedKeys[i]);
}
}
private void pushNullValueToGlobalTable(final int messageCount) {
for (int i = 0; i < messageCount; i++) {
inputTableTopic.pipeInput("FKey" + expectedKeys[i], (String) null);
}
}
@Test
public void shouldNotRequireCopartitioning() {
final Collection<Set<String>> copartitionGroups =
TopologyWrapper.getInternalTopologyBuilder(builder.build()).copartitionGroups();
assertEquals(0, copartitionGroups.size(), "KStream-GlobalKTable joins do not need to be co-partitioned");
}
@Test
public void shouldNotJoinWithEmptyGlobalTableOnStreamUpdates() {
// push two items to the primary stream. the globalTable is empty
pushToStream(2, "X", true, false);
processor.checkAndClearProcessResult(EMPTY);
}
@Test
public void shouldNotJoinOnGlobalTableUpdates() {
// push two items to the primary stream. the globalTable is empty
pushToStream(2, "X", true, false);
processor.checkAndClearProcessResult(EMPTY);
// push two items to the globalTable. this should not produce any item.
pushToGlobalTable(2, "Y");
processor.checkAndClearProcessResult(EMPTY);
// push all four items to the primary stream. this should produce two items.
pushToStream(4, "X", true, false);
processor.checkAndClearProcessResult(
new KeyValueTimestamp<>(0, "X0,FKey0+Y0", 2),
new KeyValueTimestamp<>(1, "X1,FKey1+Y1", 3)
);
// push all items to the globalTable. this should not produce any item
pushToGlobalTable(4, "YY");
processor.checkAndClearProcessResult(EMPTY);
// push all four items to the primary stream. this should produce four items.
pushToStream(4, "X", true, false);
processor.checkAndClearProcessResult(
new KeyValueTimestamp<>(0, "X0,FKey0+YY0", 6),
new KeyValueTimestamp<>(1, "X1,FKey1+YY1", 7),
new KeyValueTimestamp<>(2, "X2,FKey2+YY2", 8),
new KeyValueTimestamp<>(3, "X3,FKey3+YY3", 9)
);
// push all items to the globalTable. this should not produce any item
pushToGlobalTable(4, "YYY");
processor.checkAndClearProcessResult(EMPTY);
}
@Test
public void shouldJoinOnlyIfMatchFoundOnStreamUpdates() {
// push two items to the globalTable. this should not produce any item.
pushToGlobalTable(2, "Y");
processor.checkAndClearProcessResult(EMPTY);
// push all four items to the primary stream. this should produce two items.
pushToStream(4, "X", true, false);
processor.checkAndClearProcessResult(
new KeyValueTimestamp<>(0, "X0,FKey0+Y0", 0),
new KeyValueTimestamp<>(1, "X1,FKey1+Y1", 1)
);
}
@Test
public void shouldClearGlobalTableEntryOnNullValueUpdates() {
// push all four items to the globalTable. this should not produce any item.
pushToGlobalTable(4, "Y");
processor.checkAndClearProcessResult(EMPTY);
// push all four items to the primary stream. this should produce four items.
pushToStream(4, "X", true, false);
processor.checkAndClearProcessResult(
new KeyValueTimestamp<>(0, "X0,FKey0+Y0", 0),
new KeyValueTimestamp<>(1, "X1,FKey1+Y1", 1),
new KeyValueTimestamp<>(2, "X2,FKey2+Y2", 2),
new KeyValueTimestamp<>(3, "X3,FKey3+Y3", 3)
);
// push two items with null to the globalTable as deletes. this should not produce any item.
pushNullValueToGlobalTable(2);
processor.checkAndClearProcessResult(EMPTY);
// push all four items to the primary stream. this should produce two items.
pushToStream(4, "XX", true, false);
processor.checkAndClearProcessResult(
new KeyValueTimestamp<>(2, "XX2,FKey2+Y2", 6),
new KeyValueTimestamp<>(3, "XX3,FKey3+Y3", 7)
);
}
@Test
public void shouldNotJoinOnNullKeyMapperValues() {
// push all items to the globalTable. this should not produce any item
pushToGlobalTable(4, "Y");
processor.checkAndClearProcessResult(EMPTY);
// push all four items to the primary stream with no foreign key, resulting in null keyMapper values.
// this should not produce any item.
pushToStream(4, "XXX", false, false);
processor.checkAndClearProcessResult(EMPTY);
assertThat(
driver.metrics().get(
new MetricName(
"dropped-records-total",
"stream-task-metrics",
"",
mkMap(
mkEntry("thread-id", Thread.currentThread().getName()),
mkEntry("task-id", "0_0")
)
))
.metricValue(),
is(4.0)
);
}
@Test
public void shouldNotJoinOnNullKeyMapperValuesWithNullKeys() {
// push all items to the globalTable. this should not produce any item
pushToGlobalTable(4, "Y");
processor.checkAndClearProcessResult(EMPTY);
// push all four items to the primary stream with no foreign key, resulting in null keyMapper values.
// this should not produce any item.
pushToStream(4, "XXX", false, true);
processor.checkAndClearProcessResult(EMPTY);
assertThat(
driver.metrics().get(
new MetricName(
"dropped-records-total",
"stream-task-metrics",
"",
mkMap(
mkEntry("thread-id", Thread.currentThread().getName()),
mkEntry("task-id", "0_0")
)
))
.metricValue(),
is(4.0)
);
}
@Test
public void shouldJoinOnNullKey() {
// push two items to the globalTable. this should not produce any item.
pushToGlobalTable(2, "Y");
processor.checkAndClearProcessResult(EMPTY);
// push all four items to the primary stream. this should produce two items.
pushToStream(4, "X", true, true);
processor.checkAndClearProcessResult(
new KeyValueTimestamp<>(null, "X0,FKey0+Y0", 0),
new KeyValueTimestamp<>(1, "X1,FKey1+Y1", 1)
);
assertThat(
driver.metrics().get(
new MetricName(
"dropped-records-total",
"stream-task-metrics",
"",
mkMap(
mkEntry("thread-id", Thread.currentThread().getName()),
mkEntry("task-id", "0_0")
)
))
.metricValue(),
is(0.0)
);
}
}
| KStreamGlobalKTableJoinTest |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/atomic/long_/AtomicLongAssert_isNull_Test.java | {
"start": 859,
"end": 1052
} | class ____ {
@Test
void should_be_able_to_use_isNull_assertion() {
AtomicLong actual = null;
assertThat(actual).isNull();
then(actual).isNull();
}
}
| AtomicLongAssert_isNull_Test |
java | spring-projects__spring-framework | spring-webflux/src/main/java/org/springframework/web/reactive/function/server/RenderingResponse.java | {
"start": 2049,
"end": 5132
} | interface ____ {
/**
* Add the supplied attribute to the model using a
* {@linkplain org.springframework.core.Conventions#getVariableName generated name}.
* <p><em>Note: Empty {@link Collection Collections} are not added to
* the model when using this method because we cannot correctly determine
* the true convention name. View code should check for {@code null} rather
* than for empty collections.</em>
* @param attribute the model attribute value (never {@code null})
*/
Builder modelAttribute(Object attribute);
/**
* Add the supplied attribute value under the supplied name.
* @param name the name of the model attribute (never {@code null})
* @param value the model attribute value (can be {@code null})
*/
Builder modelAttribute(String name, @Nullable Object value);
/**
* Copy all attributes in the supplied array into the model,
* using attribute name generation for each element.
* @see #modelAttribute(Object)
*/
Builder modelAttributes(Object... attributes);
/**
* Copy all attributes in the supplied {@code Collection} into the model,
* using attribute name generation for each element.
* @see #modelAttribute(Object)
*/
Builder modelAttributes(Collection<?> attributes);
/**
* Copy all attributes in the supplied {@code Map} into the model.
* @see #modelAttribute(String, Object)
*/
Builder modelAttributes(Map<String, ?> attributes);
/**
* Add the given header value(s) under the given name.
* @param headerName the header name
* @param headerValues the header value(s)
* @return this builder
* @see HttpHeaders#add(String, String)
*/
Builder header(String headerName, String... headerValues);
/**
* Copy the given headers into the entity's headers map.
* @param headers the existing HttpHeaders to copy from
* @return this builder
* @see HttpHeaders#add(String, String)
*/
Builder headers(HttpHeaders headers);
/**
* Set the HTTP status.
* @param status the response status
* @return this builder
*/
Builder status(HttpStatusCode status);
/**
* Set the HTTP status.
* @param status the response status
* @return this builder
* @since 5.0.3
*/
Builder status(int status);
/**
* Add the given cookie to the response.
* @param cookie the cookie to add
* @return this builder
*/
Builder cookie(ResponseCookie cookie);
/**
* Manipulate this response's cookies with the given consumer. The
* cookies provided to the consumer are "live", so that the consumer can be used to
* {@linkplain MultiValueMap#set(Object, Object) overwrite} existing cookies,
* {@linkplain MultiValueMap#remove(Object) remove} cookies, or use any of the other
* {@link MultiValueMap} methods.
* @param cookiesConsumer a function that consumes the cookies
* @return this builder
*/
Builder cookies(Consumer<MultiValueMap<String, ResponseCookie>> cookiesConsumer);
/**
* Build the response.
* @return the built response
*/
Mono<RenderingResponse> build();
}
}
| Builder |
java | google__guava | guava/src/com/google/common/util/concurrent/ClosingFuture.java | {
"start": 61894,
"end": 62377
} | class ____<V1 extends @Nullable Object, V2 extends @Nullable Object>
extends Combiner {
/**
* A function that returns a value when applied to the values of the two futures passed to
* {@link #whenAllSucceed(ClosingFuture, ClosingFuture)}.
*
* @param <V1> the type returned by the first future
* @param <V2> the type returned by the second future
* @param <U> the type returned by the function
*/
@FunctionalInterface
public | Combiner2 |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/SagaComponentBuilderFactory.java | {
"start": 1381,
"end": 1830
} | interface ____ {
/**
* Saga (camel-saga)
* Execute custom actions within a route using the Saga EIP.
*
* Category: clustering
* Since: 2.21
* Maven coordinates: org.apache.camel:camel-saga
*
* @return the dsl builder
*/
static SagaComponentBuilder saga() {
return new SagaComponentBuilderImpl();
}
/**
* Builder for the Saga component.
*/
| SagaComponentBuilderFactory |
java | spring-projects__spring-framework | spring-web/src/test/java/org/springframework/http/server/reactive/ListenerReadPublisherTests.java | {
"start": 1151,
"end": 2244
} | class ____ {
private final TestListenerReadPublisher publisher = new TestListenerReadPublisher();
private final TestSubscriber subscriber = new TestSubscriber();
@BeforeEach
void setup() {
this.publisher.subscribe(this.subscriber);
}
@Test
void twoReads() {
this.subscriber.getSubscription().request(2);
this.publisher.onDataAvailable();
assertThat(this.publisher.getReadCalls()).isEqualTo(2);
}
@Test // SPR-17410
public void discardDataOnError() {
this.subscriber.getSubscription().request(2);
this.publisher.onDataAvailable();
this.publisher.onError(new IllegalStateException());
assertThat(this.publisher.getReadCalls()).isEqualTo(2);
assertThat(this.publisher.getDiscardCalls()).isEqualTo(1);
}
@Test // SPR-17410
public void discardDataOnCancel() {
this.subscriber.getSubscription().request(2);
this.subscriber.setCancelOnNext(true);
this.publisher.onDataAvailable();
assertThat(this.publisher.getReadCalls()).isEqualTo(1);
assertThat(this.publisher.getDiscardCalls()).isEqualTo(1);
}
private static final | ListenerReadPublisherTests |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/inject/ScopeAnnotationOnInterfaceOrAbstractClassTest.java | {
"start": 2771,
"end": 2822
} | class ____ {}
/** An abstract | TestClass1 |
java | spring-projects__spring-framework | spring-aop/src/main/java/org/springframework/aop/support/DelegatePerTargetObjectIntroductionInterceptor.java | {
"start": 1592,
"end": 1690
} | class ____ which should not be introduced to the
* owning AOP proxy.
*
* <p>An instance of this | but |
java | google__guava | android/guava/src/com/google/common/graph/ArchetypeGraph.java | {
"start": 850,
"end": 7654
} | interface ____<N> extends SuccessorsFunction<N>, PredecessorsFunction<N> {
//
// Graph-level accessors
//
/** Returns all nodes in this graph, in the order specified by {@link #nodeOrder()}. */
Set<N> nodes();
//
// Graph properties
//
/**
* Returns true if the edges in this graph are directed. Directed edges connect a {@link
* EndpointPair#source() source node} to a {@link EndpointPair#target() target node}, while
* undirected edges connect a pair of nodes to each other.
*/
boolean isDirected();
/**
* Returns true if this graph allows self-loops (edges that connect a node to itself). Attempting
* to add a self-loop to a graph that does not allow them will throw an {@link
* IllegalArgumentException}.
*/
boolean allowsSelfLoops();
/** Returns the order of iteration for the elements of {@link #nodes()}. */
ElementOrder<N> nodeOrder();
//
// Element-level accessors
//
/**
* Returns a live view of the nodes which have an incident edge in common with {@code node} in
* this graph.
*
* <p>This is equal to the union of {@link #predecessors(Object)} and {@link #successors(Object)}.
*
* <p>If {@code node} is removed from the graph after this method is called, the {@code Set}
* {@code view} returned by this method will be invalidated, and will throw {@code
* IllegalStateException} if it is accessed in any way, with the following exceptions:
*
* <ul>
* <li>{@code view.equals(view)} evaluates to {@code true} (but any other {@code equals(...)}
* expression involving {@code view} will throw)
* <li>{@code hashCode()} does not throw
* <li>if {@code node} is re-added to the graph after having been removed, {@code view}'s
* behavior is undefined
* </ul>
*
* @throws IllegalArgumentException if {@code node} is not an element of this graph
*/
Set<N> adjacentNodes(N node);
/**
* Returns a live view of all nodes in this graph adjacent to {@code node} which can be reached by
* traversing {@code node}'s incoming edges <i>against</i> the direction (if any) of the edge.
*
* <p>In an undirected graph, this is equivalent to {@link #adjacentNodes(Object)}.
*
* <p>If {@code node} is removed from the graph after this method is called, the {@code Set}
* {@code view} returned by this method will be invalidated, and will throw {@code
* IllegalStateException} if it is accessed in any way, with the following exceptions:
*
* <ul>
* <li>{@code view.equals(view)} evaluates to {@code true} (but any other {@code equals(...)}
* expression involving {@code view} will throw)
* <li>{@code hashCode()} does not throw
* <li>if {@code node} is re-added to the graph after having been removed, {@code view}'s
* behavior is undefined
* </ul>
*
* @throws IllegalArgumentException if {@code node} is not an element of this graph
*/
@Override
Set<N> predecessors(N node);
/**
* Returns a live view of all nodes in this graph adjacent to {@code node} which can be reached by
* traversing {@code node}'s outgoing edges in the direction (if any) of the edge.
*
* <p>In an undirected graph, this is equivalent to {@link #adjacentNodes(Object)}.
*
* <p>This is <i>not</i> the same as "all nodes reachable from {@code node} by following outgoing
* edges". For that functionality, see {@link Graphs#reachableNodes(Graph, Object)}.
*
* <p>If {@code node} is removed from the graph after this method is called, the {@code Set}
* {@code view} returned by this method will be invalidated, and will throw {@code
* IllegalStateException} if it is accessed in any way, with the following exceptions:
*
* <ul>
* <li>{@code view.equals(view)} evaluates to {@code true} (but any other {@code equals(...)}
* expression involving {@code view} will throw)
* <li>{@code hashCode()} does not throw
* <li>if {@code node} is re-added to the graph after having been removed, {@code view}'s
* behavior is undefined
* </ul>
*
* @throws IllegalArgumentException if {@code node} is not an element of this graph
*/
@Override
Set<N> successors(N node);
/**
* Returns the count of {@code node}'s incident edges, counting self-loops twice (equivalently,
* the number of times an edge touches {@code node}).
*
* <p>For directed graphs, this is equal to {@code inDegree(node) + outDegree(node)}.
*
* <p>For undirected graphs, this is equal to {@code incidentEdges(node).size()} + (number of
* self-loops incident to {@code node}).
*
* <p>If the count is greater than {@code Integer.MAX_VALUE}, returns {@code Integer.MAX_VALUE}.
*
* @throws IllegalArgumentException if {@code node} is not an element of this graph
*/
int degree(N node);
/**
* Returns the count of {@code node}'s incoming edges (equal to {@code predecessors(node).size()})
* in a directed graph. In an undirected graph, returns the {@link #degree(Object)}.
*
* <p>If the count is greater than {@code Integer.MAX_VALUE}, returns {@code Integer.MAX_VALUE}.
*
* @throws IllegalArgumentException if {@code node} is not an element of this graph
*/
int inDegree(N node);
/**
* Returns the count of {@code node}'s outgoing edges (equal to {@code successors(node).size()})
* in a directed graph. In an undirected graph, returns the {@link #degree(Object)}.
*
* <p>If the count is greater than {@code Integer.MAX_VALUE}, returns {@code Integer.MAX_VALUE}.
*
* @throws IllegalArgumentException if {@code node} is not an element of this graph
*/
int outDegree(N node);
/**
* Returns true if there is an edge that directly connects {@code nodeU} to {@code nodeV}. This is
* equivalent to {@code nodes().contains(nodeU) && successors(nodeU).contains(nodeV)}.
*
* <p>In an undirected graph, this is equal to {@code hasEdgeConnecting(nodeV, nodeU)}.
*
* @since 23.0
*/
boolean hasEdgeConnecting(N nodeU, N nodeV);
/**
* Returns true if there is an edge that directly connects {@code endpoints} (in the order, if
* any, specified by {@code endpoints}). This is equivalent to {@code
* edges().contains(endpoints)}.
*
* <p>Unlike the other {@code EndpointPair}-accepting methods, this method does not throw if the
* endpoints are unordered; it simply returns false. This is for consistency with the behavior of
* {@link Collection#contains(Object)} (which does not generally throw if the object cannot be
* present in the collection), and the desire to have this method's behavior be compatible with
* {@code edges().contains(endpoints)}.
*
* @since 27.1
*/
boolean hasEdgeConnecting(EndpointPair<N> endpoints);
}
| ArchetypeGraph |
java | hibernate__hibernate-orm | hibernate-vector/src/main/java/org/hibernate/vector/internal/DB2JdbcLiteralFormatterVector.java | {
"start": 507,
"end": 1748
} | class ____<T> extends BasicJdbcLiteralFormatter<T> {
private final JdbcLiteralFormatter<Object> elementFormatter;
private final AbstractDB2VectorJdbcType db2VectorJdbcType;
public DB2JdbcLiteralFormatterVector(JavaType<T> javaType, JdbcLiteralFormatter<?> elementFormatter, AbstractDB2VectorJdbcType db2VectorJdbcType) {
super( javaType );
//noinspection unchecked
this.elementFormatter = (JdbcLiteralFormatter<Object>) elementFormatter;
this.db2VectorJdbcType = db2VectorJdbcType;
}
@Override
public void appendJdbcLiteral(SqlAppender appender, T value, Dialect dialect, WrapperOptions wrapperOptions) {
final Object[] objects = unwrapArray( value, wrapperOptions );
appender.append( "vector('" );
char separator = '[';
for ( Object o : objects ) {
appender.append( separator );
elementFormatter.appendJdbcLiteral( appender, o, dialect, wrapperOptions );
separator = ',';
}
appender.append( "]'," );
appender.append( db2VectorJdbcType.getVectorParameters( new Size().setArrayLength( objects.length ) ) );
appender.append( ')' );
}
private Object[] unwrapArray(Object value, WrapperOptions wrapperOptions) {
return unwrap( value, Object[].class, wrapperOptions );
}
}
| DB2JdbcLiteralFormatterVector |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/query/criteria/ValueHandlingMode.java | {
"start": 187,
"end": 532
} | enum ____ how values passed to JPA Criteria API are handled.
* <ul>
* <li>The {@code BIND} mode (default) will use bind variables for any value.
* <li> The {@code INLINE} mode inlines values as literals.
* </ul>
*
* @see org.hibernate.cfg.AvailableSettings#CRITERIA_VALUE_HANDLING_MODE
*
* @author Christian Beikov
*/
public | defines |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/support/SpringApplicationJsonEnvironmentPostProcessorTests.java | {
"start": 1665,
"end": 10003
} | class ____ {
private final SpringApplicationJsonEnvironmentPostProcessor processor = new SpringApplicationJsonEnvironmentPostProcessor();
private final ConfigurableEnvironment environment = new StandardEnvironment();
@Test
void error() {
assertThat(this.environment.resolvePlaceholders("${foo:}")).isEmpty();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(this.environment, "spring.application.json=foo:bar");
assertThatExceptionOfType(JsonParseException.class)
.isThrownBy(() -> this.processor.postProcessEnvironment(this.environment, getApplication()))
.withMessageContaining("Cannot parse JSON");
}
@Test
void missing() {
assertThat(this.environment.resolvePlaceholders("${foo:}")).isEmpty();
this.processor.postProcessEnvironment(this.environment, getApplication());
assertThat(this.environment.resolvePlaceholders("${foo:}")).isEmpty();
}
@Test
void empty() {
assertThat(this.environment.resolvePlaceholders("${foo:}")).isEmpty();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(this.environment, "spring.application.json={}");
this.processor.postProcessEnvironment(this.environment, getApplication());
assertThat(this.environment.resolvePlaceholders("${foo:}")).isEmpty();
}
@Test
void periodSeparated() {
assertThat(this.environment.resolvePlaceholders("${foo:}")).isEmpty();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(this.environment,
"spring.application.json={\"foo\":\"bar\"}");
this.processor.postProcessEnvironment(this.environment, getApplication());
assertThat(this.environment.resolvePlaceholders("${foo:}")).isEqualTo("bar");
}
@Test
void envVar() {
assertThat(this.environment.resolvePlaceholders("${foo:}")).isEmpty();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(this.environment,
"SPRING_APPLICATION_JSON={\"foo\":\"bar\"}");
this.processor.postProcessEnvironment(this.environment, getApplication());
assertThat(this.environment.resolvePlaceholders("${foo:}")).isEqualTo("bar");
}
@Test
void nested() {
assertThat(this.environment.resolvePlaceholders("${foo:}")).isEmpty();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(this.environment,
"SPRING_APPLICATION_JSON={\"foo\":{\"bar\":\"spam\",\"rab\":\"maps\"}}");
this.processor.postProcessEnvironment(this.environment, getApplication());
assertThat(this.environment.resolvePlaceholders("${foo.bar:}")).isEqualTo("spam");
assertThat(this.environment.resolvePlaceholders("${foo.rab:}")).isEqualTo("maps");
}
@Test
void prefixed() {
assertThat(this.environment.resolvePlaceholders("${foo:}")).isEmpty();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(this.environment,
"SPRING_APPLICATION_JSON={\"foo.bar\":\"spam\"}");
this.processor.postProcessEnvironment(this.environment, getApplication());
assertThat(this.environment.resolvePlaceholders("${foo.bar:}")).isEqualTo("spam");
}
@Test
void list() {
assertThat(this.environment.resolvePlaceholders("${foo[1]:}")).isEmpty();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(this.environment,
"SPRING_APPLICATION_JSON={\"foo\":[\"bar\",\"spam\"]}");
this.processor.postProcessEnvironment(this.environment, getApplication());
assertThat(this.environment.resolvePlaceholders("${foo[1]:}")).isEqualTo("spam");
}
@Test
void listOfObject() {
assertThat(this.environment.resolvePlaceholders("${foo[0].bar:}")).isEmpty();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(this.environment,
"SPRING_APPLICATION_JSON={\"foo\":[{\"bar\":\"spam\"}]}");
this.processor.postProcessEnvironment(this.environment, getApplication());
assertThat(this.environment.resolvePlaceholders("${foo[0].bar:}")).isEqualTo("spam");
}
@Test
void propertySourceShouldTrackOrigin() {
assertThat(this.environment.resolvePlaceholders("${foo:}")).isEmpty();
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(this.environment,
"spring.application.json={\"foo\":\"bar\"}");
this.processor.postProcessEnvironment(this.environment, getApplication());
PropertySource<?> propertySource = this.environment.getPropertySources().get("spring.application.json");
assertThat(propertySource).isNotNull();
PropertySourceOrigin origin = (PropertySourceOrigin) PropertySourceOrigin.get(propertySource, "foo");
assertThat(origin.getPropertySource().getName()).isEqualTo("Inlined Test Properties");
assertThat(origin.getPropertyName()).isEqualTo("spring.application.json");
assertThat(this.environment.resolvePlaceholders("${foo:}")).isEqualTo("bar");
}
@Test
void propertySourceShouldBeOrderedBeforeJndiPropertySource() {
testServletPropertySource(StandardServletEnvironment.JNDI_PROPERTY_SOURCE_NAME);
}
@Test
void propertySourceShouldBeOrderedBeforeServletContextPropertySource() {
testServletPropertySource(StandardServletEnvironment.SERVLET_CONTEXT_PROPERTY_SOURCE_NAME);
}
@Test
void propertySourceShouldBeOrderedBeforeServletConfigPropertySource() {
testServletPropertySource(StandardServletEnvironment.SERVLET_CONFIG_PROPERTY_SOURCE_NAME);
}
@Test
void propertySourceOrderingWhenMultipleServletSpecificPropertySources() {
MapPropertySource jndi = getPropertySource(StandardServletEnvironment.JNDI_PROPERTY_SOURCE_NAME, "jndi");
this.environment.getPropertySources().addFirst(jndi);
MapPropertySource servlet = getPropertySource(StandardServletEnvironment.SERVLET_CONTEXT_PROPERTY_SOURCE_NAME,
"servlet");
this.environment.getPropertySources().addFirst(servlet);
MapPropertySource custom = getPropertySource("custom", "custom");
this.environment.getPropertySources().addFirst(custom);
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(this.environment,
"SPRING_APPLICATION_JSON={\"foo\":\"bar\"}");
this.processor.postProcessEnvironment(this.environment, getApplication());
PropertySource<?> json = this.environment.getPropertySources().get("spring.application.json");
assertThat(this.environment.getProperty("foo")).isEqualTo("custom");
assertThat(this.environment.getPropertySources()).containsSequence(custom, json, servlet, jndi);
}
@Test
void nullValuesShouldBeAddedToPropertySource() {
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(this.environment,
"SPRING_APPLICATION_JSON={\"foo\":null}");
this.processor.postProcessEnvironment(this.environment, getApplication());
assertThat(this.environment.containsProperty("foo")).isTrue();
}
@Test
void emptyValuesForCollectionShouldNotBeIgnored() {
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(this.environment,
"SPRING_APPLICATION_JSON={\"foo\":[]}");
MockPropertySource source = new MockPropertySource();
source.setProperty("foo", "bar");
this.environment.getPropertySources().addLast(source);
assertThat(this.environment.resolvePlaceholders("${foo}")).isEqualTo("bar");
this.environment.getPropertySources().addLast(source);
this.processor.postProcessEnvironment(this.environment, getApplication());
assertThat(this.environment.resolvePlaceholders("${foo}")).isEmpty();
}
@Test
@SuppressWarnings("unchecked")
void emptyMapValuesShouldNotBeIgnored() {
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(this.environment,
"SPRING_APPLICATION_JSON={\"foo\":{}}");
MockPropertySource source = new MockPropertySource();
source.setProperty("foo.baz", "bar");
this.environment.getPropertySources().addLast(source);
this.processor.postProcessEnvironment(this.environment, getApplication());
assertThat(this.environment.getProperty("foo", Map.class)).isEmpty();
}
private SpringApplication getApplication() {
return new SpringApplication();
}
private void testServletPropertySource(String servletPropertySourceName) {
this.environment.getPropertySources().addFirst(getPropertySource(servletPropertySourceName, "servlet"));
TestPropertySourceUtils.addInlinedPropertiesToEnvironment(this.environment,
"SPRING_APPLICATION_JSON={\"foo\":\"bar\"}");
this.processor.postProcessEnvironment(this.environment, getApplication());
assertThat(this.environment.getProperty("foo")).isEqualTo("bar");
}
private MapPropertySource getPropertySource(String name, String value) {
return new MapPropertySource(name, Collections.singletonMap("foo", value));
}
}
| SpringApplicationJsonEnvironmentPostProcessorTests |
java | dropwizard__dropwizard | dropwizard-hibernate/src/test/java/io/dropwizard/hibernate/UnitOfWorkAwareProxyFactoryTest.java | {
"start": 7628,
"end": 8843
} | class ____ {
private final SessionFactory sessionFactory;
public NestedCall(SessionFactory sessionFactory) {
this.sessionFactory = sessionFactory;
}
@UnitOfWork
public void normalCall() {
assertThat(transactionActive())
.withFailMessage("Expected transaction to be active in normal call")
.isTrue();
}
@UnitOfWork
public void nestedCall() {
assertThat(transactionActive())
.withFailMessage("Expected transaction to be active before nested call")
.isTrue();
normalCall();
assertThat(transactionActive())
.withFailMessage("Expected transaction to be active after nested call")
.isTrue();
}
@UnitOfWork(cacheMode = CacheMode.IGNORE)
public void invalidNestedCall() {
normalCall();
}
private boolean transactionActive() {
try {
return sessionFactory.getCurrentSession().getTransaction().isActive();
} catch (HibernateException ex) {
return false;
}
}
}
}
| NestedCall |
java | micronaut-projects__micronaut-core | core/src/main/java/io/micronaut/core/annotation/Introspected.java | {
"start": 6763,
"end": 7524
} | class ____ {
* ...
* }</pre>
*
* <p>With the above in place a reverse lookup on the column can be done using {@link io.micronaut.core.beans.BeanIntrospection#getIndexedProperty(Class, String)}:</p>
*
* <pre class="code">
* BeanProperty property = introspection.getIndexedProperty(Column.class, "foo_bar").orElse(null);
* </pre>
*
*
* @return The indexed annotation types
*/
IndexedAnnotation[] indexed() default {};
/**
* @return The prefix used for copy constructor invoking methods on immutable types. The default is "with".
* @since 2.3.0
*/
String withPrefix() default "with";
/**
* @return The package to write introspections to. By default, uses the | MyBean |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java | {
"start": 6432,
"end": 6758
} | interface ____ extends Vector.Builder permits BooleanVectorBuilder, FixedBuilder {
/**
* Appends a boolean to the current entry.
*/
Builder appendBoolean(boolean value);
@Override
BooleanVector build();
}
/**
* A builder that never grows.
*/
sealed | Builder |
java | apache__camel | components/camel-rest-openapi/src/generated/java/org/apache/camel/component/rest/openapi/RestOpenApiComponentConfigurer.java | {
"start": 739,
"end": 8886
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
RestOpenApiComponent target = (RestOpenApiComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "apicontextpath":
case "apiContextPath": target.setApiContextPath(property(camelContext, java.lang.String.class, value)); return true;
case "autowiredenabled":
case "autowiredEnabled": target.setAutowiredEnabled(property(camelContext, boolean.class, value)); return true;
case "basepath":
case "basePath": target.setBasePath(property(camelContext, java.lang.String.class, value)); return true;
case "bindingpackagescan":
case "bindingPackageScan": target.setBindingPackageScan(property(camelContext, java.lang.String.class, value)); return true;
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "clientrequestvalidation":
case "clientRequestValidation": target.setClientRequestValidation(property(camelContext, boolean.class, value)); return true;
case "clientresponsevalidation":
case "clientResponseValidation": target.setClientResponseValidation(property(camelContext, boolean.class, value)); return true;
case "componentname":
case "componentName": target.setComponentName(property(camelContext, java.lang.String.class, value)); return true;
case "consumercomponentname":
case "consumerComponentName": target.setConsumerComponentName(property(camelContext, java.lang.String.class, value)); return true;
case "consumes": target.setConsumes(property(camelContext, java.lang.String.class, value)); return true;
case "host": target.setHost(property(camelContext, java.lang.String.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "missingoperation":
case "missingOperation": target.setMissingOperation(property(camelContext, java.lang.String.class, value)); return true;
case "mockincludepattern":
case "mockIncludePattern": target.setMockIncludePattern(property(camelContext, java.lang.String.class, value)); return true;
case "produces": target.setProduces(property(camelContext, java.lang.String.class, value)); return true;
case "requestvalidationenabled":
case "requestValidationEnabled": target.setRequestValidationEnabled(property(camelContext, boolean.class, value)); return true;
case "restopenapiprocessorstrategy":
case "restOpenapiProcessorStrategy": target.setRestOpenapiProcessorStrategy(property(camelContext, org.apache.camel.component.rest.openapi.RestOpenapiProcessorStrategy.class, value)); return true;
case "specificationuri":
case "specificationUri": target.setSpecificationUri(property(camelContext, java.lang.String.class, value)); return true;
case "sslcontextparameters":
case "sslContextParameters": target.setSslContextParameters(property(camelContext, org.apache.camel.support.jsse.SSLContextParameters.class, value)); return true;
case "useglobalsslcontextparameters":
case "useGlobalSslContextParameters": target.setUseGlobalSslContextParameters(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "apicontextpath":
case "apiContextPath": return java.lang.String.class;
case "autowiredenabled":
case "autowiredEnabled": return boolean.class;
case "basepath":
case "basePath": return java.lang.String.class;
case "bindingpackagescan":
case "bindingPackageScan": return java.lang.String.class;
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "clientrequestvalidation":
case "clientRequestValidation": return boolean.class;
case "clientresponsevalidation":
case "clientResponseValidation": return boolean.class;
case "componentname":
case "componentName": return java.lang.String.class;
case "consumercomponentname":
case "consumerComponentName": return java.lang.String.class;
case "consumes": return java.lang.String.class;
case "host": return java.lang.String.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "missingoperation":
case "missingOperation": return java.lang.String.class;
case "mockincludepattern":
case "mockIncludePattern": return java.lang.String.class;
case "produces": return java.lang.String.class;
case "requestvalidationenabled":
case "requestValidationEnabled": return boolean.class;
case "restopenapiprocessorstrategy":
case "restOpenapiProcessorStrategy": return org.apache.camel.component.rest.openapi.RestOpenapiProcessorStrategy.class;
case "specificationuri":
case "specificationUri": return java.lang.String.class;
case "sslcontextparameters":
case "sslContextParameters": return org.apache.camel.support.jsse.SSLContextParameters.class;
case "useglobalsslcontextparameters":
case "useGlobalSslContextParameters": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
RestOpenApiComponent target = (RestOpenApiComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "apicontextpath":
case "apiContextPath": return target.getApiContextPath();
case "autowiredenabled":
case "autowiredEnabled": return target.isAutowiredEnabled();
case "basepath":
case "basePath": return target.getBasePath();
case "bindingpackagescan":
case "bindingPackageScan": return target.getBindingPackageScan();
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "clientrequestvalidation":
case "clientRequestValidation": return target.isClientRequestValidation();
case "clientresponsevalidation":
case "clientResponseValidation": return target.isClientResponseValidation();
case "componentname":
case "componentName": return target.getComponentName();
case "consumercomponentname":
case "consumerComponentName": return target.getConsumerComponentName();
case "consumes": return target.getConsumes();
case "host": return target.getHost();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "missingoperation":
case "missingOperation": return target.getMissingOperation();
case "mockincludepattern":
case "mockIncludePattern": return target.getMockIncludePattern();
case "produces": return target.getProduces();
case "requestvalidationenabled":
case "requestValidationEnabled": return target.isRequestValidationEnabled();
case "restopenapiprocessorstrategy":
case "restOpenapiProcessorStrategy": return target.getRestOpenapiProcessorStrategy();
case "specificationuri":
case "specificationUri": return target.getSpecificationUri();
case "sslcontextparameters":
case "sslContextParameters": return target.getSslContextParameters();
case "useglobalsslcontextparameters":
case "useGlobalSslContextParameters": return target.isUseGlobalSslContextParameters();
default: return null;
}
}
}
| RestOpenApiComponentConfigurer |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/OpenshiftDeploymentconfigsComponentBuilderFactory.java | {
"start": 6286,
"end": 7516
} | class ____
extends AbstractComponentBuilder<OpenshiftDeploymentConfigsComponent>
implements OpenshiftDeploymentconfigsComponentBuilder {
@Override
protected OpenshiftDeploymentConfigsComponent buildConcreteComponent() {
return new OpenshiftDeploymentConfigsComponent();
}
@Override
protected boolean setPropertyOnComponent(
Component component,
String name,
Object value) {
switch (name) {
case "kubernetesClient": ((OpenshiftDeploymentConfigsComponent) component).setKubernetesClient((io.fabric8.kubernetes.client.KubernetesClient) value); return true;
case "bridgeErrorHandler": ((OpenshiftDeploymentConfigsComponent) component).setBridgeErrorHandler((boolean) value); return true;
case "lazyStartProducer": ((OpenshiftDeploymentConfigsComponent) component).setLazyStartProducer((boolean) value); return true;
case "autowiredEnabled": ((OpenshiftDeploymentConfigsComponent) component).setAutowiredEnabled((boolean) value); return true;
default: return false;
}
}
}
} | OpenshiftDeploymentconfigsComponentBuilderImpl |
java | quarkusio__quarkus | extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/config/dialect/DbVersionValidPersistenceXmlTest.java | {
"start": 766,
"end": 2592
} | class ____ {
private static final String ACTUAL_H2_VERSION = DialectVersions.Defaults.H2;
private static final String CONFIGURED_DB_VERSION;
static {
// We will set the DB version to something lower than the actual version: this is valid.
CONFIGURED_DB_VERSION = ACTUAL_H2_VERSION.replaceAll("\\.[\\d]+\\.[\\d]+$", ".0.0");
assertThat(ACTUAL_H2_VERSION)
.as("Test setup - we need the required version to be different from the actual one")
.isNotEqualTo(CONFIGURED_DB_VERSION);
}
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClass(SmokeTestUtils.class)
.addClass(MyEntity.class)
.addAsManifestResource(new StringAsset(loadResourceAndReplacePlaceholders(
"META-INF/some-persistence-with-h2-version-placeholder.xml",
Map.of("H2_VERSION", CONFIGURED_DB_VERSION))),
"persistence.xml"))
.withConfigurationResource("application-datasource-only.properties");
@Inject
SessionFactory sessionFactory;
@Inject
Session session;
@Test
public void dialectVersion() {
var dialectVersion = sessionFactory.unwrap(SessionFactoryImplementor.class).getJdbcServices().getDialect().getVersion();
assertThat(DialectVersions.toString(dialectVersion)).isEqualTo(CONFIGURED_DB_VERSION);
}
@Test
@Transactional
public void smokeTest() {
SmokeTestUtils.testSimplePersistRetrieveUpdateDelete(session,
MyEntity.class, MyEntity::new,
MyEntity::getId,
MyEntity::setName, MyEntity::getName);
}
}
| DbVersionValidPersistenceXmlTest |
java | spring-projects__spring-boot | module/spring-boot-http-client/src/test/java/org/springframework/boot/http/client/JettyClientHttpRequestFactoryBuilderTests.java | {
"start": 1242,
"end": 3466
} | class ____
extends AbstractClientHttpRequestFactoryBuilderTests<JettyClientHttpRequestFactory> {
JettyClientHttpRequestFactoryBuilderTests() {
super(JettyClientHttpRequestFactory.class, ClientHttpRequestFactoryBuilder.jetty());
}
@Test
void withCustomizers() {
TestCustomizer<HttpClient> httpClientCustomizer1 = new TestCustomizer<>();
TestCustomizer<HttpClient> httpClientCustomizer2 = new TestCustomizer<>();
TestCustomizer<HttpClientTransport> httpClientTransportCustomizer = new TestCustomizer<>();
TestCustomizer<ClientConnector> clientConnectorCustomizerCustomizer = new TestCustomizer<>();
ClientHttpRequestFactoryBuilder.jetty()
.withHttpClientCustomizer(httpClientCustomizer1)
.withHttpClientCustomizer(httpClientCustomizer2)
.withHttpClientTransportCustomizer(httpClientTransportCustomizer)
.withClientConnectorCustomizerCustomizer(clientConnectorCustomizerCustomizer)
.build();
httpClientCustomizer1.assertCalled();
httpClientCustomizer2.assertCalled();
httpClientTransportCustomizer.assertCalled();
clientConnectorCustomizerCustomizer.assertCalled();
}
@Test
void with() {
TestCustomizer<HttpClient> customizer = new TestCustomizer<>();
ClientHttpRequestFactoryBuilder.jetty().with((builder) -> builder.withHttpClientCustomizer(customizer)).build();
customizer.assertCalled();
}
@Test
void withHttpClientTransportFactory() {
JettyClientHttpRequestFactory factory = ClientHttpRequestFactoryBuilder.jetty()
.withHttpClientTransportFactory(TestHttpClientTransport::new)
.build();
assertThat(factory).extracting("httpClient")
.extracting("transport")
.isInstanceOf(TestHttpClientTransport.class);
}
@Override
protected long connectTimeout(JettyClientHttpRequestFactory requestFactory) {
HttpClient httpClient = (HttpClient) ReflectionTestUtils.getField(requestFactory, "httpClient");
assertThat(httpClient).isNotNull();
return httpClient.getConnectTimeout();
}
@Override
protected long readTimeout(JettyClientHttpRequestFactory requestFactory) {
Object field = ReflectionTestUtils.getField(requestFactory, "readTimeout");
assertThat(field).isNotNull();
return (long) field;
}
static | JettyClientHttpRequestFactoryBuilderTests |
java | google__dagger | javatests/dagger/internal/codegen/MissingBindingValidationTest.java | {
"start": 36117,
"end": 37504
} | class ____ {",
" @Provides @IntoSet static String contributesToSet(int i) {",
" return \"\" + i;",
" }",
"}");
CompilerTests.daggerCompiler(parent, parentModule, child, childModule, grandchild)
.withProcessingOptions(compilerMode.processorOptions())
.compile(
subject -> {
subject.hasErrorCount(1);
// TODO(b/243720787): Replace with CompilationResultSubject#hasErrorContainingMatch()
subject.hasErrorContaining(
String.join(
"\n",
"Double cannot be provided without an @Inject constructor or an "
+ "@Provides-annotated method.",
"",
" Double is injected at",
" [Parent] ParentModule.missingDependency(dub)",
" Integer is injected at",
" [Child] ChildModule.contributesToSet(i)",
" Set<String> is injected at",
" [Child] ParentModule.dependsOnSet(strings)",
" Object is requested at",
" [Grandchild] Grandchild.object() [Parent → Child → Grandchild]"))
.onSource(parent)
.onLineContaining(" | ChildModule |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/resource/transaction/SynchronizationRegistryStandardImplTests.java | {
"start": 937,
"end": 4877
} | class ____ {
@Test
public void basicUsageTests() {
final SynchronizationRegistryStandardImpl registry = new SynchronizationRegistryStandardImpl();
try {
registry.registerSynchronization( null );
fail( "Was expecting NullSynchronizationException, but call succeeded" );
}
catch (NullSynchronizationException expected) {
// expected behavior
}
catch (Exception e) {
fail( "Was expecting NullSynchronizationException, but got " + e.getClass().getName() );
}
final SynchronizationCollectorImpl synchronization = new SynchronizationCollectorImpl();
assertEquals( 0, registry.getNumberOfRegisteredSynchronizations() );
registry.registerSynchronization( synchronization );
assertEquals( 1, registry.getNumberOfRegisteredSynchronizations() );
registry.registerSynchronization( synchronization );
assertEquals( 1, registry.getNumberOfRegisteredSynchronizations() );
assertEquals( 0, synchronization.getBeforeCompletionCount() );
assertEquals( 0, synchronization.getSuccessfulCompletionCount() );
assertEquals( 0, synchronization.getFailedCompletionCount() );
{
registry.notifySynchronizationsBeforeTransactionCompletion();
assertEquals( 1, synchronization.getBeforeCompletionCount() );
assertEquals( 0, synchronization.getSuccessfulCompletionCount() );
assertEquals( 0, synchronization.getFailedCompletionCount() );
registry.notifySynchronizationsAfterTransactionCompletion( Status.STATUS_COMMITTED );
assertEquals( 1, synchronization.getBeforeCompletionCount() );
assertEquals( 1, synchronization.getSuccessfulCompletionCount() );
assertEquals( 0, synchronization.getFailedCompletionCount() );
}
// after completion should clear registered synchronizations
assertEquals( 0, registry.getNumberOfRegisteredSynchronizations() );
// reset the sync
synchronization.reset();
assertEquals( 0, synchronization.getBeforeCompletionCount() );
assertEquals( 0, synchronization.getSuccessfulCompletionCount() );
assertEquals( 0, synchronization.getFailedCompletionCount() );
// re-register it
registry.registerSynchronization( synchronization );
assertEquals( 1, registry.getNumberOfRegisteredSynchronizations() );
{
registry.notifySynchronizationsAfterTransactionCompletion( Status.STATUS_ROLLEDBACK );
assertEquals( 0, synchronization.getBeforeCompletionCount() );
assertEquals( 0, synchronization.getSuccessfulCompletionCount() );
assertEquals( 1, synchronization.getFailedCompletionCount() );
// after completion should clear registered synchronizations
assertEquals( 0, registry.getNumberOfRegisteredSynchronizations() );
}
}
@Test
public void testUserSynchronizationExceptions() {
// exception in beforeCompletion
SynchronizationRegistryStandardImpl registry = new SynchronizationRegistryStandardImpl();
Synchronization synchronization = SynchronizationErrorImpl.forBefore();
registry.registerSynchronization( synchronization );
try {
registry.notifySynchronizationsBeforeTransactionCompletion();
fail( "Expecting LocalSynchronizationException, but call succeeded" );
}
catch (LocalSynchronizationException expected) {
// expected
}
catch (Exception e) {
fail( "Was expecting LocalSynchronizationException, but got " + e.getClass().getName() );
}
// exception in beforeCompletion
registry.clearSynchronizations();
registry = new SynchronizationRegistryStandardImpl();
synchronization = SynchronizationErrorImpl.forAfter();
registry.registerSynchronization( synchronization );
try {
registry.notifySynchronizationsAfterTransactionCompletion( Status.STATUS_COMMITTED );
fail( "Expecting LocalSynchronizationException, but call succeeded" );
}
catch (LocalSynchronizationException expected) {
// expected
}
catch (Exception e) {
fail( "Was expecting LocalSynchronizationException, but got " + e.getClass().getName() );
}
}
}
| SynchronizationRegistryStandardImplTests |
java | spring-projects__spring-framework | spring-jdbc/src/main/java/org/springframework/jdbc/core/JdbcTemplate.java | {
"start": 20664,
"end": 21431
} | class ____ implements StatementCallback<Integer>, SqlProvider {
@Override
public Integer doInStatement(Statement stmt) throws SQLException {
int rows = stmt.executeUpdate(sql);
if (logger.isTraceEnabled()) {
logger.trace("SQL update affected " + rows + " rows");
}
return rows;
}
@Override
public String getSql() {
return sql;
}
}
return updateCount(execute(new UpdateStatementCallback(), true));
}
@Override
public int[] batchUpdate(String... sql) throws DataAccessException {
Assert.notEmpty(sql, "SQL array must not be empty");
if (logger.isDebugEnabled()) {
logger.debug("Executing SQL batch update of " + sql.length + " statements");
}
// Callback to execute the batch update.
| UpdateStatementCallback |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/join/lookup/keyordered/AecRecord.java | {
"start": 1086,
"end": 3051
} | class ____<IN, OUT> {
private StreamRecord<IN> record;
private Epoch<OUT> epoch;
// index where this record is from
// start with 0
private int inputIndex;
public AecRecord() {
this.record = null;
this.epoch = null;
this.inputIndex = -1;
}
public AecRecord(StreamRecord<IN> record, Epoch<OUT> epoch, int inputIndex) {
this.record = record;
this.epoch = epoch;
this.inputIndex = inputIndex;
}
public AecRecord<IN, OUT> reset(StreamRecord<IN> record, Epoch<OUT> epoch, int inputIndex) {
this.record = record;
this.epoch = epoch;
this.inputIndex = inputIndex;
return this;
}
public AecRecord<IN, OUT> setRecord(StreamRecord<IN> record) {
this.record = record;
return this;
}
public AecRecord<IN, OUT> setEpoch(Epoch<OUT> epoch) {
this.epoch = epoch;
return this;
}
public StreamRecord<IN> getRecord() {
return record;
}
public Epoch<OUT> getEpoch() {
return epoch;
}
public int getInputIndex() {
return inputIndex;
}
@Override
public String toString() {
return "AecRecord{"
+ "record="
+ record
+ ", epoch="
+ epoch
+ ", inputIndex="
+ inputIndex
+ '}';
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
AecRecord<?, ?> aecRecord = (AecRecord<?, ?>) o;
return inputIndex == aecRecord.inputIndex
&& Objects.equals(record, aecRecord.record)
&& Objects.equals(epoch, aecRecord.epoch);
}
@Override
public int hashCode() {
return Objects.hash(record, epoch, inputIndex);
}
}
| AecRecord |
java | grpc__grpc-java | core/src/main/java/io/grpc/internal/InstantTimeProvider.java | {
"start": 929,
"end": 1217
} | class ____ implements TimeProvider {
@Override
@IgnoreJRERequirement
public long currentTimeNanos() {
Instant now = Instant.now();
long epochSeconds = now.getEpochSecond();
return saturatedAdd(TimeUnit.SECONDS.toNanos(epochSeconds), now.getNano());
}
}
| InstantTimeProvider |
java | redisson__redisson | redisson/src/main/java/org/redisson/client/handler/BaseConnectionHandler.java | {
"start": 1325,
"end": 9624
} | class ____<C extends RedisConnection> extends ChannelInboundHandlerAdapter {
private static final Logger log = LoggerFactory.getLogger(BaseConnectionHandler.class);
final RedisClient redisClient;
final CompletableFuture<C> connectionPromise = new CompletableFuture<>();
C connection;
public BaseConnectionHandler(RedisClient redisClient) {
super();
this.redisClient = redisClient;
}
@Override
public void channelRegistered(ChannelHandlerContext ctx) throws Exception {
if (connection == null) {
connection = createConnection(ctx);
}
super.channelRegistered(ctx);
}
abstract C createConnection(ChannelHandlerContext ctx);
@Override
public void channelActive(ChannelHandlerContext ctx) {
List<CompletableFuture<?>> futures = new ArrayList<>(5);
CompletableFuture<Void> f = authWithCredential();
futures.add(f);
RedisClientConfig config = redisClient.getConfig();
if (config.getProtocol() == Protocol.RESP3) {
CompletionStage<Object> f1 = connection.async(RedisCommands.HELLO, "3");
futures.add(f1.toCompletableFuture());
}
if (config.getDatabase() != 0) {
CompletionStage<Object> future = connection.async(RedisCommands.SELECT, config.getDatabase());
futures.add(future.toCompletableFuture());
}
if (config.getClientName() != null) {
CompletionStage<Object> future = connection.async(RedisCommands.CLIENT_SETNAME, config.getClientName());
futures.add(future.toCompletableFuture());
}
if (!config.getCapabilities().isEmpty()) {
CompletionStage<Object> future = connection.async(RedisCommands.CLIENT_CAPA, config.getCapabilities().toArray());
futures.add(future.toCompletableFuture());
}
if (config.isReadOnly()) {
CompletionStage<Object> future = connection.async(RedisCommands.READONLY);
futures.add(future.toCompletableFuture());
}
if (config.getPingConnectionInterval() > 0) {
CompletionStage<Object> future = connection.async(RedisCommands.PING);
futures.add(future.toCompletableFuture());
}
CompletableFuture<Void> future = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
future.whenComplete((res, e) -> {
if (e != null) {
if (e instanceof RedisRetryException) {
ctx.executor().schedule(() -> {
channelActive(ctx);
}, 1, TimeUnit.SECONDS);
return;
}
connection.closeAsync();
connectionPromise.completeExceptionally(e);
return;
}
startRenewal(ctx, config);
ctx.fireChannelActive();
connectionPromise.complete(connection);
});
}
private CompletionStage<Void> startRenewal(ChannelHandlerContext ctx, RedisClientConfig config) {
if (isClosed(ctx, connection)) {
return CompletableFuture.completedFuture(null);
}
return config.getCredentialsResolver()
.nextRenewal()
.thenCompose(r -> {
QueueCommand currentCommand = connection.getCurrentCommandData();
if (currentCommand != null && currentCommand.isBlockingCommand()) {
return connection.forceFastReconnectAsync();
}
return authWithCredential();
})
.thenCompose(r -> startRenewal(ctx, config))
.exceptionally(cause -> {
if (isClosed(ctx, connection)) {
return null;
}
if (!(cause instanceof RedisRetryException)) {
log.error("Unable to send AUTH command over channel: {}", ctx.channel(), cause);
log.debug("channel: {} closed due to AUTH error response", ctx.channel());
ctx.channel().close();
return null;
}
log.warn("Renewal cycle failed, retrying", cause);
config.getTimer().newTimeout(t -> {
startRenewal(ctx, config);
}, 1, TimeUnit.SECONDS);
return null;
});
}
private CompletableFuture<Void> authWithCredential() {
RedisClientConfig config = redisClient.getConfig();
InetSocketAddress addr = redisClient.resolveAddr().getNow(null);
CompletionStage<Void> f = config.getCredentialsResolver().resolve(addr)
.thenCompose(credentials -> {
String password = Objects.toString(config.getAddress().getPassword(),
Objects.toString(credentials.getPassword(), config.getPassword()));
if (password != null) {
CompletionStage<Void> future;
String username = Objects.toString(config.getAddress().getUsername(),
Objects.toString(credentials.getUsername(), config.getUsername()));
if (username != null) {
future = connection.async(RedisCommands.AUTH, username, password);
} else {
future = connection.async(RedisCommands.AUTH, password);
}
return future;
}
return CompletableFuture.completedFuture(null);
});
return f.toCompletableFuture();
}
// private void reapplyCredential(ChannelHandlerContext ctx, Duration renewalInterval) {
// if (isClosed(ctx, connection)) {
// return;
// }
//
// CompletableFuture<Void> future;
// QueueCommand currentCommand = connection.getCurrentCommandData();
// if (connection.getUsage() == 0 && (currentCommand == null || !currentCommand.isBlockingCommand())) {
// future = authWithCredential();
// } else {
// future = null;
// }
//
// RedisClientConfig config = redisClient.getConfig();
//
// config.getTimer().newTimeout(timeout -> {
// if (isClosed(ctx, connection)) {
// return;
// }
//
// QueueCommand cd = connection.getCurrentCommandData();
// if (cd != null && cd.isBlockingCommand()) {
// reapplyCredential(ctx, renewalInterval);
// return;
// }
//
// if (connection.getUsage() == 0 && future != null && (future.cancel(false) || cause(future) != null)) {
// Throwable cause = cause(future);
// if (!(cause instanceof RedisRetryException)) {
// if (!future.isCancelled()) {
// log.error("Unable to send AUTH command over channel: {}", ctx.channel(), cause);
// }
//
// log.debug("channel: {} closed due to AUTH response timeout set in {} ms", ctx.channel(), config.getCredentialsReapplyInterval());
// ctx.channel().close();
// } else {
// reapplyCredential(ctx, renewalInterval);
// }
//
// } else {
// reapplyCredential(ctx, renewalInterval);
// }
// }, renewalInterval.toMillis(), TimeUnit.MILLISECONDS);
// }
protected Throwable cause(CompletableFuture<?> future) {
try {
future.toCompletableFuture().getNow(null);
return null;
} catch (CompletionException ex2) {
return ex2.getCause();
} catch (CancellationException ex1) {
return ex1;
}
}
private static boolean isClosed(ChannelHandlerContext ctx, RedisConnection connection) {
return connection.isClosed()
|| !ctx.channel().equals(connection.getChannel())
|| ctx.isRemoved()
|| connection.getRedisClient().isShutdown();
}
}
| BaseConnectionHandler |
java | apache__flink | flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStDBWriteBatchWrapper.java | {
"start": 1445,
"end": 1482
} | class ____ not thread safe.
*/
public | is |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffListingInfo.java | {
"start": 1556,
"end": 1709
} | class ____ the difference between snapshots of a snapshottable
* directory where the difference is limited by dfs.snapshotDiff-report.limit.
*/
| describing |
java | quarkusio__quarkus | extensions/resteasy-classic/resteasy-client/deployment/src/test/java/io/quarkus/restclient/configuration/QuarkusRestClientsTest.java | {
"start": 396,
"end": 1248
} | class ____ extends AbstractRestClientsTest {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(EchoResource.class,
EchoClient.class, EchoClientWithConfigKey.class, ShortNameEchoClient.class))
.withConfigurationResource("quarkus-restclients-test-application.properties");
@RestClient
ShortNameEchoClient shortNameClient;
// tests for configKey and fully qualified clients inherited
@Test
public void shortNameClientShouldConnect() {
Assertions.assertEquals("Hello", shortNameClient.echo("Hello"));
}
@Test
void shortNameClientShouldHaveSingletonScope() {
verifyClientScope(ShortNameEchoClient.class, Singleton.class);
}
}
| QuarkusRestClientsTest |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/typeutils/base/InstantComparator.java | {
"start": 1193,
"end": 3819
} | class ____ extends BasicTypeComparator<Instant> {
private static final long serialVersionUID = 1L;
private static final long SECONDS_MIN_VALUE = Instant.MIN.getEpochSecond();
public InstantComparator(boolean ascending) {
super(ascending);
}
@Override
public int compareSerialized(DataInputView firstSource, DataInputView secondSource)
throws IOException {
final long lSeconds = firstSource.readLong();
final long rSeconds = secondSource.readLong();
final int comp;
if (lSeconds == rSeconds) {
final int lNanos = firstSource.readInt();
final int rNanos = secondSource.readInt();
comp = (lNanos < rNanos ? -1 : (lNanos == rNanos ? 0 : 1));
} else {
comp = lSeconds < rSeconds ? -1 : 1;
}
return ascendingComparison ? comp : -comp;
}
@Override
public boolean supportsNormalizedKey() {
return true;
}
@Override
public int getNormalizeKeyLen() {
return InstantSerializer.SECONDS_BYTES + InstantSerializer.NANOS_BYTES;
}
@Override
public boolean isNormalizedKeyPrefixOnly(int keyBytes) {
return keyBytes < getNormalizeKeyLen();
}
@Override
public void putNormalizedKey(Instant record, MemorySegment target, int offset, int numBytes) {
final int secondsBytes = InstantSerializer.SECONDS_BYTES;
final long normalizedSeconds = record.getEpochSecond() - SECONDS_MIN_VALUE;
if (numBytes >= secondsBytes) {
target.putLongBigEndian(offset, normalizedSeconds);
offset += secondsBytes;
numBytes -= secondsBytes;
final int nanosBytes = InstantSerializer.NANOS_BYTES;
if (numBytes >= nanosBytes) {
target.putIntBigEndian(offset, record.getNano());
offset += nanosBytes;
numBytes -= nanosBytes;
for (int i = 0; i < numBytes; i++) {
target.put(offset + i, (byte) 0);
}
} else {
final int nanos = record.getNano();
for (int i = 0; i < numBytes; i++) {
target.put(offset + i, (byte) (nanos >>> ((3 - i) << 3)));
}
}
} else {
for (int i = 0; i < numBytes; i++) {
target.put(offset + i, (byte) (normalizedSeconds >>> ((7 - i) << 3)));
}
}
}
@Override
public TypeComparator<Instant> duplicate() {
return new InstantComparator(ascendingComparison);
}
}
| InstantComparator |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.