language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/views/ViewsWithCreatorTest.java | {
"start": 717,
"end": 1530
} | class ____ {
public String a;
@JsonView(View1.class)
public String b;
@JsonView(View2.class)
public String c;
public ObjWithCreator() { }
@JsonCreator
public ObjWithCreator(@JsonProperty("a") String a, @JsonProperty("b") String b, @JsonProperty("c") String c) {
this.a = a;
this.b = b;
this.c = c;
}
@Override
public String toString() {
return String.format("%s-%s-%s", a, b, c);
}
}
@JsonAutoDetect(fieldVisibility = JsonAutoDetect.Visibility.PUBLIC_ONLY,
isGetterVisibility = JsonAutoDetect.Visibility.NONE,
getterVisibility = JsonAutoDetect.Visibility.NONE)
@JsonIgnoreProperties(ignoreUnknown = true)
static | ObjWithCreator |
java | FasterXML__jackson-core | src/test/java/tools/jackson/core/unittest/jsonptr/JsonPointer1168Test.java | {
"start": 222,
"end": 923
} | class ____ extends JacksonCoreTestBase
{
// [core#1168]
@Test
void appendWithTail()
{
JsonPointer original = JsonPointer.compile("/a1/b/c");
JsonPointer tailPointer = original.tail();
assertEquals("/b/c", tailPointer.toString());
JsonPointer other = JsonPointer.compile("/a2");
assertEquals("/a2", other.toString());
assertEquals("/a2/b/c", other.append(tailPointer).toString());
// And the other way around too
assertEquals("/b/c/a2", tailPointer.append(other).toString());
// And with `appendProperty()`
assertEquals("/b/c/xyz", tailPointer.appendProperty("xyz").toString());
}
}
| JsonPointer1168Test |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/event/EventPublishingTestExecutionListenerTests.java | {
"start": 2162,
"end": 5872
} | interface ____ method
willCallRealMethod().given(testContext).publishEvent(any());
given(testContext.getApplicationContext()).willReturn(applicationContext);
// Only allow events to be published for test methods named "publish*".
given(testContext.hasApplicationContext()).willReturn(testInfo.getTestMethod().get().getName().startsWith("publish"));
}
@Test
void publishBeforeTestClassEvent() {
assertEvent(BeforeTestClassEvent.class, listener::beforeTestClass);
}
@Test
void publishPrepareTestInstanceEvent() {
assertEvent(PrepareTestInstanceEvent.class, listener::prepareTestInstance);
}
@Test
void publishBeforeTestMethodEvent() {
assertEvent(BeforeTestMethodEvent.class, listener::beforeTestMethod);
}
@Test
void publishBeforeTestExecutionEvent() {
assertEvent(BeforeTestExecutionEvent.class, listener::beforeTestExecution);
}
@Test
void publishAfterTestExecutionEvent() {
assertEvent(AfterTestExecutionEvent.class, listener::afterTestExecution);
}
@Test
void publishAfterTestMethodEvent() {
assertEvent(AfterTestMethodEvent.class, listener::afterTestMethod);
}
@Test
void publishAfterTestClassEvent() {
assertEvent(AfterTestClassEvent.class, listener::afterTestClass);
}
@Test
void doesNotPublishBeforeTestClassEventIfApplicationContextHasNotBeenLoaded() {
assertNoEvent(BeforeTestClassEvent.class, listener::beforeTestClass);
}
@Test
void doesNotPublishPrepareTestInstanceEventIfApplicationContextHasNotBeenLoaded() {
assertNoEvent(PrepareTestInstanceEvent.class, listener::prepareTestInstance);
}
@Test
void doesNotPublishBeforeTestMethodEventIfApplicationContextHasNotBeenLoaded() {
assertNoEvent(BeforeTestMethodEvent.class, listener::beforeTestMethod);
}
@Test
void doesNotPublishBeforeTestExecutionEventIfApplicationContextHasNotBeenLoaded() {
assertNoEvent(BeforeTestExecutionEvent.class, listener::beforeTestExecution);
}
@Test
void doesNotPublishAfterTestExecutionEventIfApplicationContextHasNotBeenLoaded() {
assertNoEvent(AfterTestExecutionEvent.class, listener::afterTestExecution);
}
@Test
void doesNotPublishAfterTestMethodEventIfApplicationContextHasNotBeenLoaded() {
assertNoEvent(AfterTestMethodEvent.class, listener::afterTestMethod);
}
@Test
void doesNotPublishAfterTestClassEventIfApplicationContextHasNotBeenLoaded() {
assertNoEvent(AfterTestClassEvent.class, listener::afterTestClass);
}
private void assertEvent(Class<? extends TestContextEvent> eventClass, Consumer<TestContext> callback) {
callback.accept(testContext);
// The listener attempted to publish the event...
verify(testContext, times(1)).publishEvent(eventFactory.capture());
// The listener successfully published the event...
verify(applicationContext, times(1)).publishEvent(any());
// Verify the type of event that was published.
ApplicationEvent event = eventFactory.getValue().apply(testContext);
assertThat(event).isInstanceOf(eventClass);
assertThat(event.getSource()).isEqualTo(testContext);
}
private void assertNoEvent(Class<? extends TestContextEvent> eventClass, Consumer<TestContext> callback) {
callback.accept(testContext);
// The listener attempted to publish the event...
verify(testContext, times(1)).publishEvent(eventFactory.capture());
// But the event was not actually published since the ApplicationContext
// was not available.
verify(applicationContext, never()).publishEvent(any());
// In any case, we can still verify the type of event that would have
// been published.
ApplicationEvent event = eventFactory.getValue().apply(testContext);
assertThat(event).isInstanceOf(eventClass);
assertThat(event.getSource()).isEqualTo(testContext);
}
}
| default |
java | micronaut-projects__micronaut-core | inject-groovy/src/main/groovy/io/micronaut/ast/groovy/visitor/GroovyPackageElement.java | {
"start": 908,
"end": 1028
} | class ____ returning data from a {@link PackageNode}.
*
* @author Graeme Rocher
* @since 2.0
*/
@Internal
public | element |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java | {
"start": 1454,
"end": 1916
} | class ____ manages the initialization and destruction of the
* HttpFSServer server, it is a <code>javax.servlet.ServletContextListener
* </code> implementation that is wired in HttpFSServer's WAR
* <code>WEB-INF/web.xml</code>.
* <p>
* It provides acces to the server context via the singleton {@link #get}.
* <p>
* All the configuration is loaded from configuration properties prefixed
* with <code>httpfs.</code>.
*/
@InterfaceAudience.Private
public | that |
java | apache__camel | components/camel-hazelcast/src/test/java/org/apache/camel/component/hazelcast/HazelcastAtomicnumberProducerTest.java | {
"start": 1574,
"end": 7450
} | class ____ extends HazelcastCamelTestSupport {
@Mock
private IAtomicLong atomicNumber;
@Mock
private CPSubsystem cpSubsystem;
@Override
protected void trainHazelcastInstance(HazelcastInstance hazelcastInstance) {
when(hazelcastInstance.getCPSubsystem()).thenReturn(cpSubsystem);
when(cpSubsystem.getAtomicLong("foo")).thenReturn(atomicNumber);
}
@Override
protected void verifyHazelcastInstance(HazelcastInstance hazelcastInstance) {
verify(hazelcastInstance, times(10)).getCPSubsystem();
verify(cpSubsystem, atLeastOnce()).getAtomicLong("foo");
}
@AfterEach
public void verifyAtomicNumberMock() {
verifyNoMoreInteractions(atomicNumber);
}
@Test
public void testWithInvalidOperationName() {
assertThrows(CamelExecutionException.class,
() -> template.sendBody("direct:setInvalid", 4711));
}
@Test
public void testSet() {
template.sendBody("direct:set", 4711);
verify(atomicNumber).set(4711);
}
@Test
public void testGet() {
when(atomicNumber.get()).thenReturn(1234L);
long body = template.requestBody("direct:get", null, Long.class);
verify(atomicNumber).get();
assertEquals(1234, body);
}
@Test
public void testIncrement() {
when(atomicNumber.incrementAndGet()).thenReturn(11L);
long body = template.requestBody("direct:increment", null, Long.class);
verify(atomicNumber).incrementAndGet();
assertEquals(11, body);
}
@Test
public void testDecrement() {
when(atomicNumber.decrementAndGet()).thenReturn(9L);
long body = template.requestBody("direct:decrement", null, Long.class);
verify(atomicNumber).decrementAndGet();
assertEquals(9, body);
}
@Test
public void testDestroy() throws InterruptedException {
template.sendBody("direct:destroy", null);
verify(atomicNumber).destroy();
}
@Test
public void testSetWithOperationNumber() {
template.sendBody("direct:setWithOperationNumber", 5711);
verify(atomicNumber).set(5711);
}
@Test
public void testSetWithOperationName() {
template.sendBody("direct:setWithOperationName", 5711);
verify(atomicNumber).set(5711);
}
@Test
public void testCompareAndSet() {
Map<String, Object> headersOk = new HashMap();
headersOk.put(HazelcastConstants.EXPECTED_VALUE, 1234L);
when(atomicNumber.compareAndSet(1234L, 1235L)).thenReturn(true);
when(atomicNumber.compareAndSet(1233L, 1235L)).thenReturn(false);
boolean result = template.requestBodyAndHeaders("direct:compareAndSet", 1235L, headersOk, Boolean.class);
verify(atomicNumber).compareAndSet(1234L, 1235L);
assertEquals(true, result);
Map<String, Object> headersKo = new HashMap();
headersKo.put(HazelcastConstants.EXPECTED_VALUE, 1233L);
result = template.requestBodyAndHeaders("direct:compareAndSet", 1235L, headersKo, Boolean.class);
verify(atomicNumber).compareAndSet(1233L, 1235L);
assertEquals(false, result);
}
@Test
public void testGetAndAdd() {
when(atomicNumber.getAndAdd(12L)).thenReturn(13L);
long result = template.requestBody("direct:getAndAdd", 12L, Long.class);
verify(atomicNumber).getAndAdd(12L);
assertEquals(13L, result);
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
from("direct:setInvalid").setHeader(HazelcastConstants.OPERATION, constant("invalid"))
.to(String.format("hazelcast-%sfoo", HazelcastConstants.ATOMICNUMBER_PREFIX));
from("direct:set").setHeader(HazelcastConstants.OPERATION, constant(HazelcastOperation.SET_VALUE))
.to(String.format("hazelcast-%sfoo", HazelcastConstants.ATOMICNUMBER_PREFIX));
from("direct:get").setHeader(HazelcastConstants.OPERATION, constant(HazelcastOperation.GET))
.to(String.format("hazelcast-%sfoo", HazelcastConstants.ATOMICNUMBER_PREFIX));
from("direct:increment").setHeader(HazelcastConstants.OPERATION, constant(HazelcastOperation.INCREMENT)).to(
String.format("hazelcast-%sfoo", HazelcastConstants.ATOMICNUMBER_PREFIX));
from("direct:decrement").setHeader(HazelcastConstants.OPERATION, constant(HazelcastOperation.DECREMENT)).to(
String.format("hazelcast-%sfoo", HazelcastConstants.ATOMICNUMBER_PREFIX));
from("direct:destroy").setHeader(HazelcastConstants.OPERATION, constant(HazelcastOperation.DESTROY)).to(
String.format("hazelcast-%sfoo", HazelcastConstants.ATOMICNUMBER_PREFIX));
from("direct:compareAndSet")
.setHeader(HazelcastConstants.OPERATION, constant(HazelcastOperation.COMPARE_AND_SET)).to(
String.format("hazelcast-%sfoo", HazelcastConstants.ATOMICNUMBER_PREFIX));
from("direct:getAndAdd").setHeader(HazelcastConstants.OPERATION, constant(HazelcastOperation.GET_AND_ADD)).to(
String.format("hazelcast-%sfoo", HazelcastConstants.ATOMICNUMBER_PREFIX));
from("direct:setWithOperationNumber").toF("hazelcast-%sfoo?operation=%s",
HazelcastConstants.ATOMICNUMBER_PREFIX, HazelcastOperation.SET_VALUE);
from("direct:setWithOperationName").toF("hazelcast-%sfoo?operation=setvalue",
HazelcastConstants.ATOMICNUMBER_PREFIX);
}
};
}
}
| HazelcastAtomicnumberProducerTest |
java | elastic__elasticsearch | modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java | {
"start": 1713,
"end": 2022
} | class ____ extends FieldMapper {
public static final String NAME = "_feature";
public static final String CONTENT_TYPE = "rank_feature";
private static RankFeatureFieldType ft(FieldMapper in) {
return ((RankFeatureFieldMapper) in).fieldType();
}
public static | RankFeatureFieldMapper |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/processor/SpringCamelContextLogExhaustedMessageBodyTest.java | {
"start": 1060,
"end": 1403
} | class ____ extends CamelContextLogExhaustedMessageBodyTest {
@Override
protected CamelContext createCamelContext() throws Exception {
return createSpringCamelContext(this,
"org/apache/camel/spring/processor/SpringCamelContextLogExhaustedMessageBodyTest.xml");
}
}
| SpringCamelContextLogExhaustedMessageBodyTest |
java | apache__avro | lang/java/mapred/src/main/java/org/apache/avro/mapreduce/AvroSequenceFileInputFormat.java | {
"start": 2130,
"end": 4759
} | class ____ extends RecordReader<K, V> {
private SequenceFile.Reader mReader;
private long mStart;
private long mEnd;
private boolean mHasMoreData;
private K mCurrentKey;
private V mCurrentValue;
/** {@inheritDoc} */
@Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
FileSplit fileSplit = (FileSplit) split;
Configuration conf = context.getConfiguration();
Path path = fileSplit.getPath();
FileSystem fs = path.getFileSystem(conf);
// Configure the SequenceFile reader.
AvroSequenceFile.Reader.Options options = new AvroSequenceFile.Reader.Options().withFileSystem(fs)
.withInputPath(path).withConfiguration(conf);
Schema keySchema = AvroJob.getInputKeySchema(conf);
if (null != keySchema) {
options.withKeySchema(keySchema);
}
Schema valueSchema = AvroJob.getInputValueSchema(conf);
if (null != valueSchema) {
options.withValueSchema(valueSchema);
}
mReader = new AvroSequenceFile.Reader(options);
mEnd = fileSplit.getStart() + fileSplit.getLength();
if (fileSplit.getStart() > mReader.getPosition()) {
// Sync to the beginning of the input split.
mReader.sync(fileSplit.getStart());
}
mStart = mReader.getPosition();
mHasMoreData = mStart < mEnd;
}
/** {@inheritDoc} */
@Override
@SuppressWarnings("unchecked")
public boolean nextKeyValue() throws IOException, InterruptedException {
if (!mHasMoreData) {
return false;
}
long pos = mReader.getPosition();
mCurrentKey = (K) mReader.next(mCurrentKey);
if (null == mCurrentKey || (pos >= mEnd && mReader.syncSeen())) {
mHasMoreData = false;
mCurrentKey = null;
mCurrentValue = null;
} else {
mCurrentValue = (V) mReader.getCurrentValue(mCurrentValue);
}
return mHasMoreData;
}
/** {@inheritDoc} */
@Override
public K getCurrentKey() {
return mCurrentKey;
}
/** {@inheritDoc} */
@Override
public V getCurrentValue() {
return mCurrentValue;
}
/** {@inheritDoc} */
@Override
public float getProgress() throws IOException {
if (mEnd == mStart) {
return 0.0f;
} else {
return Math.min(1.0f, (mReader.getPosition() - mStart) / (float) (mEnd - mStart));
}
}
/** {@inheritDoc} */
@Override
public synchronized void close() throws IOException {
mReader.close();
}
}
}
| AvroSequenceFileRecordReader |
java | spring-projects__spring-security | test/src/main/java/org/springframework/security/test/web/servlet/request/SecurityMockMvcRequestPostProcessors.java | {
"start": 41549,
"end": 44917
} | class ____ implements RequestPostProcessor {
private Jwt jwt;
private Converter<Jwt, Collection<GrantedAuthority>> authoritiesConverter = new JwtGrantedAuthoritiesConverter();
private JwtRequestPostProcessor() {
this.jwt((jwt) -> {
});
}
/**
* Use the given {@link Jwt.Builder} {@link Consumer} to configure the underlying
* {@link Jwt}
*
* This method first creates a default {@link Jwt.Builder} instance with default
* values for the {@code alg}, {@code sub}, and {@code scope} claims. The
* {@link Consumer} can then modify these or provide additional configuration.
*
* Calling {@link SecurityMockMvcRequestPostProcessors#jwt()} is the equivalent of
* calling {@code SecurityMockMvcRequestPostProcessors.jwt().jwt(() -> {})}.
* @param jwtBuilderConsumer For configuring the underlying {@link Jwt}
* @return the {@link JwtRequestPostProcessor} for additional customization
*/
public JwtRequestPostProcessor jwt(Consumer<Jwt.Builder> jwtBuilderConsumer) {
Jwt.Builder jwtBuilder = Jwt.withTokenValue("token")
.header("alg", "none")
.claim(JwtClaimNames.SUB, "user")
.claim("scope", "read");
jwtBuilderConsumer.accept(jwtBuilder);
this.jwt = jwtBuilder.build();
return this;
}
/**
* Use the given {@link Jwt}
* @param jwt The {@link Jwt} to use
* @return the {@link JwtRequestPostProcessor} for additional customization
*/
public JwtRequestPostProcessor jwt(Jwt jwt) {
this.jwt = jwt;
return this;
}
/**
* Use the provided authorities in the token
* @param authorities the authorities to use
* @return the {@link JwtRequestPostProcessor} for further configuration
*/
public JwtRequestPostProcessor authorities(Collection<GrantedAuthority> authorities) {
Assert.notNull(authorities, "authorities cannot be null");
this.authoritiesConverter = (jwt) -> authorities;
return this;
}
/**
* Use the provided authorities in the token
* @param authorities the authorities to use
* @return the {@link JwtRequestPostProcessor} for further configuration
*/
public JwtRequestPostProcessor authorities(GrantedAuthority... authorities) {
Assert.notNull(authorities, "authorities cannot be null");
this.authoritiesConverter = (jwt) -> Arrays.asList(authorities);
return this;
}
/**
* Provides the configured {@link Jwt} so that custom authorities can be derived
* from it
* @param authoritiesConverter the conversion strategy from {@link Jwt} to a
* {@link Collection} of {@link GrantedAuthority}s
* @return the {@link JwtRequestPostProcessor} for further configuration
*/
public JwtRequestPostProcessor authorities(Converter<Jwt, Collection<GrantedAuthority>> authoritiesConverter) {
Assert.notNull(authoritiesConverter, "authoritiesConverter cannot be null");
this.authoritiesConverter = authoritiesConverter;
return this;
}
@NullUnmarked
@Override
public MockHttpServletRequest postProcessRequest(MockHttpServletRequest request) {
CsrfFilter.skipRequest(request);
JwtAuthenticationToken token = new JwtAuthenticationToken(this.jwt,
this.authoritiesConverter.convert(this.jwt));
return new AuthenticationRequestPostProcessor(token).postProcessRequest(request);
}
}
/**
* @author Josh Cummings
* @since 5.3
*/
public static final | JwtRequestPostProcessor |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java | {
"start": 2264,
"end": 24008
} | class ____ implements ChunkedToXContent, Writeable, ExpectedShardSizeEstimator.ShardSizeProvider {
public static final ClusterInfo EMPTY = new ClusterInfo();
private static final TransportVersion HEAP_USAGE_IN_CLUSTER_INFO = TransportVersion.fromName("heap_usage_in_cluster_info");
private static final TransportVersion NODE_USAGE_STATS_FOR_THREAD_POOLS_IN_CLUSTER_INFO = TransportVersion.fromName(
"node_usage_stats_for_thread_pools_in_cluster_info"
);
private static final TransportVersion SHARD_WRITE_LOAD_IN_CLUSTER_INFO = TransportVersion.fromName("shard_write_load_in_cluster_info");
private static final TransportVersion MAX_HEAP_SIZE_PER_NODE_IN_CLUSTER_INFO = TransportVersion.fromName(
"max_heap_size_per_node_in_cluster_info"
);
private final Map<String, DiskUsage> leastAvailableSpaceUsage;
private final Map<String, DiskUsage> mostAvailableSpaceUsage;
final Map<String, Long> shardSizes;
final Map<ShardId, Long> shardDataSetSizes;
final Map<NodeAndShard, String> dataPath;
final Map<NodeAndPath, ReservedSpace> reservedSpace;
final Map<String, EstimatedHeapUsage> estimatedHeapUsages;
final Map<String, NodeUsageStatsForThreadPools> nodeUsageStatsForThreadPools;
final Map<ShardId, Double> shardWriteLoads;
// max heap size per node ID
final Map<String, ByteSizeValue> maxHeapSizePerNode;
private final Map<ShardId, Set<String>> shardToNodeIds;
protected ClusterInfo() {
this(Map.of(), Map.of(), Map.of(), Map.of(), Map.of(), Map.of(), Map.of(), Map.of(), Map.of(), Map.of());
}
/**
* Creates a new ClusterInfo instance.
*
* @param leastAvailableSpaceUsage a node id to disk usage mapping for the path that has the least available space on the node.
* @param mostAvailableSpaceUsage a node id to disk usage mapping for the path that has the most available space on the node.
* @param shardSizes a shardkey to size in bytes mapping per shard.
* @param shardDataSetSizes a shard id to data set size in bytes mapping per shard
* @param dataPath the shard routing to datapath mapping
* @param reservedSpace reserved space per shard broken down by node and data path
* @param estimatedHeapUsages estimated heap usage broken down by node
* @param nodeUsageStatsForThreadPools node-level usage stats (operational load) broken down by node
* @see #shardIdentifierFromRouting
* @param maxHeapSizePerNode node id to max heap size
*/
public ClusterInfo(
Map<String, DiskUsage> leastAvailableSpaceUsage,
Map<String, DiskUsage> mostAvailableSpaceUsage,
Map<String, Long> shardSizes,
Map<ShardId, Long> shardDataSetSizes,
Map<NodeAndShard, String> dataPath,
Map<NodeAndPath, ReservedSpace> reservedSpace,
Map<String, EstimatedHeapUsage> estimatedHeapUsages,
Map<String, NodeUsageStatsForThreadPools> nodeUsageStatsForThreadPools,
Map<ShardId, Double> shardWriteLoads,
Map<String, ByteSizeValue> maxHeapSizePerNode
) {
this(
leastAvailableSpaceUsage,
mostAvailableSpaceUsage,
shardSizes,
shardDataSetSizes,
dataPath,
reservedSpace,
estimatedHeapUsages,
nodeUsageStatsForThreadPools,
shardWriteLoads,
maxHeapSizePerNode,
computeShardToNodeIds(dataPath)
);
}
private ClusterInfo(
Map<String, DiskUsage> leastAvailableSpaceUsage,
Map<String, DiskUsage> mostAvailableSpaceUsage,
Map<String, Long> shardSizes,
Map<ShardId, Long> shardDataSetSizes,
Map<NodeAndShard, String> dataPath,
Map<NodeAndPath, ReservedSpace> reservedSpace,
Map<String, EstimatedHeapUsage> estimatedHeapUsages,
Map<String, NodeUsageStatsForThreadPools> nodeUsageStatsForThreadPools,
Map<ShardId, Double> shardWriteLoads,
Map<String, ByteSizeValue> maxHeapSizePerNode,
Map<ShardId, Set<String>> shardToNodeIds
) {
this.leastAvailableSpaceUsage = Map.copyOf(leastAvailableSpaceUsage);
this.mostAvailableSpaceUsage = Map.copyOf(mostAvailableSpaceUsage);
this.shardSizes = Map.copyOf(shardSizes);
this.shardDataSetSizes = Map.copyOf(shardDataSetSizes);
this.dataPath = Map.copyOf(dataPath);
this.reservedSpace = Map.copyOf(reservedSpace);
this.estimatedHeapUsages = Map.copyOf(estimatedHeapUsages);
this.nodeUsageStatsForThreadPools = Map.copyOf(nodeUsageStatsForThreadPools);
this.shardWriteLoads = Map.copyOf(shardWriteLoads);
this.maxHeapSizePerNode = Map.copyOf(maxHeapSizePerNode);
this.shardToNodeIds = shardToNodeIds;
}
public ClusterInfo(StreamInput in) throws IOException {
this.leastAvailableSpaceUsage = in.readImmutableMap(DiskUsage::new);
this.mostAvailableSpaceUsage = in.readImmutableMap(DiskUsage::new);
this.shardSizes = in.readImmutableMap(StreamInput::readLong);
this.shardDataSetSizes = in.readImmutableMap(ShardId::new, StreamInput::readLong);
this.dataPath = in.readImmutableMap(NodeAndShard::new, StreamInput::readString);
this.reservedSpace = in.readImmutableMap(NodeAndPath::new, ReservedSpace::new);
if (in.getTransportVersion().supports(HEAP_USAGE_IN_CLUSTER_INFO)) {
this.estimatedHeapUsages = in.readImmutableMap(EstimatedHeapUsage::new);
} else {
this.estimatedHeapUsages = Map.of();
}
if (in.getTransportVersion().supports(NODE_USAGE_STATS_FOR_THREAD_POOLS_IN_CLUSTER_INFO)) {
this.nodeUsageStatsForThreadPools = in.readImmutableMap(NodeUsageStatsForThreadPools::new);
} else {
this.nodeUsageStatsForThreadPools = Map.of();
}
if (in.getTransportVersion().supports(SHARD_WRITE_LOAD_IN_CLUSTER_INFO)) {
this.shardWriteLoads = in.readImmutableMap(ShardId::new, StreamInput::readDouble);
} else {
this.shardWriteLoads = Map.of();
}
if (in.getTransportVersion().supports(MAX_HEAP_SIZE_PER_NODE_IN_CLUSTER_INFO)) {
this.maxHeapSizePerNode = in.readImmutableMap(ByteSizeValue::readFrom);
} else {
this.maxHeapSizePerNode = Map.of();
}
this.shardToNodeIds = computeShardToNodeIds(dataPath);
}
ClusterInfo updateWith(
Map<String, DiskUsage> leastAvailableSpaceUsage,
Map<String, DiskUsage> mostAvailableSpaceUsage,
Map<String, Long> shardSizes,
Map<NodeAndPath, ReservedSpace> reservedSpace,
Map<String, EstimatedHeapUsage> estimatedHeapUsages,
Map<String, NodeUsageStatsForThreadPools> nodeUsageStatsForThreadPools
) {
return new ClusterInfo(
leastAvailableSpaceUsage,
mostAvailableSpaceUsage,
shardSizes,
shardDataSetSizes,
dataPath,
reservedSpace,
estimatedHeapUsages,
nodeUsageStatsForThreadPools,
shardWriteLoads,
maxHeapSizePerNode,
shardToNodeIds
);
}
private static Map<ShardId, Set<String>> computeShardToNodeIds(Map<NodeAndShard, String> dataPath) {
if (dataPath.isEmpty()) {
return Map.of();
}
final var shardToNodeIds = new HashMap<ShardId, Set<String>>();
for (NodeAndShard nodeAndShard : dataPath.keySet()) {
shardToNodeIds.computeIfAbsent(nodeAndShard.shardId, ignore -> new HashSet<>()).add(nodeAndShard.nodeId);
}
return Collections.unmodifiableMap(Maps.transformValues(shardToNodeIds, Collections::unmodifiableSet));
}
public Set<String> getNodeIdsForShard(ShardId shardId) {
assert shardToNodeIds != null : "shardToNodeIds not computed for simulations, make sure this ClusterInfo is from polling";
return shardToNodeIds.getOrDefault(shardId, Set.of());
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeMap(this.leastAvailableSpaceUsage, StreamOutput::writeWriteable);
out.writeMap(this.mostAvailableSpaceUsage, StreamOutput::writeWriteable);
out.writeMap(this.shardSizes, (o, v) -> o.writeLong(v == null ? -1 : v));
out.writeMap(this.shardDataSetSizes, StreamOutput::writeWriteable, StreamOutput::writeLong);
out.writeMap(this.dataPath, StreamOutput::writeWriteable, StreamOutput::writeString);
out.writeMap(this.reservedSpace);
if (out.getTransportVersion().supports(HEAP_USAGE_IN_CLUSTER_INFO)) {
out.writeMap(this.estimatedHeapUsages, StreamOutput::writeWriteable);
}
if (out.getTransportVersion().supports(NODE_USAGE_STATS_FOR_THREAD_POOLS_IN_CLUSTER_INFO)) {
out.writeMap(this.nodeUsageStatsForThreadPools, StreamOutput::writeWriteable);
}
if (out.getTransportVersion().supports(SHARD_WRITE_LOAD_IN_CLUSTER_INFO)) {
out.writeMap(this.shardWriteLoads, StreamOutput::writeWriteable, StreamOutput::writeDouble);
}
if (out.getTransportVersion().supports(MAX_HEAP_SIZE_PER_NODE_IN_CLUSTER_INFO)) {
out.writeMap(this.maxHeapSizePerNode, StreamOutput::writeWriteable);
}
}
/**
* This creates a fake ShardRouting from limited info available in NodeAndShard.
* This will not be the same as real shard, however this is fine as ClusterInfo is only written
* in TransportClusterAllocationExplainAction when handling an allocation explain request with includeDiskInfo during upgrade
* that is later presented to the user and is not used by any code.
*/
private static ShardRouting createFakeShardRoutingFromNodeAndShard(NodeAndShard nodeAndShard) {
return newUnassigned(
nodeAndShard.shardId,
true,
RecoverySource.EmptyStoreRecoverySource.INSTANCE,
new UnassignedInfo(REINITIALIZED, "fake"),
ShardRouting.Role.DEFAULT // ok, this is only used prior to DATA_PATH_NEW_KEY_VERSION which has no other roles
).initialize(nodeAndShard.nodeId, null, 0L).moveToStarted(0L);
}
@Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params params) {
return Iterators.concat(startObject("nodes"), Iterators.map(leastAvailableSpaceUsage.entrySet().iterator(), c -> (builder, p) -> {
builder.startObject(c.getKey());
{ // node
builder.field("node_name", c.getValue().nodeName());
builder.startObject("least_available");
{
c.getValue().toShortXContent(builder);
}
builder.endObject(); // end "least_available"
builder.startObject("most_available");
{
DiskUsage most = this.mostAvailableSpaceUsage.get(c.getKey());
if (most != null) {
most.toShortXContent(builder);
}
}
builder.endObject(); // end "most_available"
}
return builder.endObject(); // end $nodename
}),
chunk(
(builder, p) -> builder.endObject() // end "nodes"
.startObject("shard_sizes")
),
Iterators.map(
shardSizes.entrySet().iterator(),
c -> (builder, p) -> builder.humanReadableField(c.getKey() + "_bytes", c.getKey(), ByteSizeValue.ofBytes(c.getValue()))
),
chunk(
(builder, p) -> builder.endObject() // end "shard_sizes"
.startObject("shard_data_set_sizes")
),
Iterators.map(
shardDataSetSizes.entrySet().iterator(),
c -> (builder, p) -> builder.humanReadableField(
c.getKey() + "_bytes",
c.getKey().toString(),
ByteSizeValue.ofBytes(c.getValue())
)
),
chunk(
(builder, p) -> builder.endObject() // end "shard_data_set_sizes"
.startObject("shard_paths")
),
Iterators.map(dataPath.entrySet().iterator(), c -> (builder, p) -> builder.field(c.getKey().toString(), c.getValue())),
chunk(
(builder, p) -> builder.endObject() // end "shard_paths"
.startArray("reserved_sizes")
),
Iterators.map(reservedSpace.entrySet().iterator(), c -> (builder, p) -> {
builder.startObject();
{
builder.field("node_id", c.getKey().nodeId);
builder.field("path", c.getKey().path);
c.getValue().toXContent(builder, params);
}
return builder.endObject(); // NodeAndPath
}),
endArray() // end "reserved_sizes"
// NOTE: We don't serialize estimatedHeapUsages/nodeUsageStatsForThreadPools/shardWriteLoads/maxHeapSizePerNode at this stage,
// to avoid committing to API payloads until the features are settled
);
}
/**
* Returns a node id to estimated heap usage mapping for all nodes that we have such data for.
* Note that these estimates should be considered minimums. They may be used to determine whether
* there IS NOT capacity to do something, but not to determine that there IS capacity to do something.
* Also note that the map may not be complete, it may contain none, or a subset of the nodes in
* the cluster at any time. It may also contain entries for nodes that have since left the cluster.
*/
public Map<String, EstimatedHeapUsage> getEstimatedHeapUsages() {
return estimatedHeapUsages;
}
/**
* Returns a map containing thread pool usage stats for each node, keyed by node ID.
*/
public Map<String, NodeUsageStatsForThreadPools> getNodeUsageStatsForThreadPools() {
return nodeUsageStatsForThreadPools;
}
/**
* Returns a node id to disk usage mapping for the path that has the least available space on the node.
* Note that this does not take account of reserved space: there may be another path with less available _and unreserved_ space.
*/
public Map<String, DiskUsage> getNodeLeastAvailableDiskUsages() {
return this.leastAvailableSpaceUsage;
}
/**
* Returns a node id to disk usage mapping for the path that has the most available space on the node.
* Note that this does not take account of reserved space: there may be another path with more available _and unreserved_ space.
*/
public Map<String, DiskUsage> getNodeMostAvailableDiskUsages() {
return this.mostAvailableSpaceUsage;
}
/**
* Returns a map of shard IDs to the write-loads for use in balancing. The write-loads can be interpreted
* as the average number of threads that ingestion to the shard will consume.
* This information may be partial or missing altogether under some circumstances. The absence of a shard
* write load from the map should be interpreted as "unknown".
*/
public Map<ShardId, Double> getShardWriteLoads() {
return shardWriteLoads;
}
/**
* Returns the shard size for the given shardId or <code>null</code> if that metric is not available.
*/
@Override
public Long getShardSize(ShardId shardId, boolean primary) {
return shardSizes.get(shardIdentifierFromRouting(shardId, primary));
}
/**
* Returns the nodes absolute data-path the given shard is allocated on or <code>null</code> if the information is not available.
*/
public String getDataPath(ShardRouting shardRouting) {
return dataPath.get(NodeAndShard.from(shardRouting));
}
public String getDataPath(NodeAndShard nodeAndShard) {
return dataPath.get(nodeAndShard);
}
public Optional<Long> getShardDataSetSize(ShardId shardId) {
return Optional.ofNullable(shardDataSetSizes.get(shardId));
}
/**
* Returns the reserved space for each shard on the given node/path pair
*/
public ReservedSpace getReservedSpace(String nodeId, String dataPath) {
final ReservedSpace result = reservedSpace.get(new NodeAndPath(nodeId, dataPath));
return result == null ? ReservedSpace.EMPTY : result;
}
public Map<String, ByteSizeValue> getMaxHeapSizePerNode() {
return this.maxHeapSizePerNode;
}
/**
* Return true if the shard has moved since the time ClusterInfo was created.
*/
public boolean hasShardMoved(ShardRouting shardRouting) {
// We use dataPath to find out whether a shard is allocated on a node.
// TODO: DataPath is sent with disk usages but thread pool usage is sent separately so that local shard allocation
// may change between the two calls.
return getDataPath(shardRouting) == null;
}
/**
* Method that incorporates the ShardId for the shard into a string that
* includes a 'p' or 'r' depending on whether the shard is a primary.
*/
public static String shardIdentifierFromRouting(ShardRouting shardRouting) {
return shardIdentifierFromRouting(shardRouting.shardId(), shardRouting.primary());
}
public static String shardIdentifierFromRouting(ShardId shardId, boolean primary) {
return shardId.toString() + "[" + (primary ? "p" : "r") + "]";
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ClusterInfo that = (ClusterInfo) o;
return leastAvailableSpaceUsage.equals(that.leastAvailableSpaceUsage)
&& mostAvailableSpaceUsage.equals(that.mostAvailableSpaceUsage)
&& shardSizes.equals(that.shardSizes)
&& shardDataSetSizes.equals(that.shardDataSetSizes)
&& dataPath.equals(that.dataPath)
&& reservedSpace.equals(that.reservedSpace)
&& estimatedHeapUsages.equals(that.estimatedHeapUsages)
&& nodeUsageStatsForThreadPools.equals(that.nodeUsageStatsForThreadPools)
&& shardWriteLoads.equals(that.shardWriteLoads)
&& maxHeapSizePerNode.equals(that.maxHeapSizePerNode);
}
@Override
public int hashCode() {
return Objects.hash(
leastAvailableSpaceUsage,
mostAvailableSpaceUsage,
shardSizes,
shardDataSetSizes,
dataPath,
reservedSpace,
estimatedHeapUsages,
nodeUsageStatsForThreadPools,
shardWriteLoads,
maxHeapSizePerNode
);
}
@Override
public String toString() {
return Strings.toString(this, true, false);
}
// exposed for tests, computed here rather than exposing all the collections separately
int getChunkCount() {
return leastAvailableSpaceUsage.size() + shardSizes.size() + shardDataSetSizes.size() + dataPath.size() + reservedSpace.size() + 6;
}
public record NodeAndShard(String nodeId, ShardId shardId) implements Writeable {
public NodeAndShard {
Objects.requireNonNull(nodeId);
Objects.requireNonNull(shardId);
}
public NodeAndShard(StreamInput in) throws IOException {
this(in.readString(), new ShardId(in));
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(nodeId);
shardId.writeTo(out);
}
public static NodeAndShard from(ShardRouting shardRouting) {
return new NodeAndShard(shardRouting.currentNodeId(), shardRouting.shardId());
}
}
/**
* Represents a data path on a node
*/
public record NodeAndPath(String nodeId, String path) implements Writeable {
public NodeAndPath {
Objects.requireNonNull(nodeId);
Objects.requireNonNull(path);
}
public NodeAndPath(StreamInput in) throws IOException {
this(in.readString(), in.readString());
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(nodeId);
out.writeString(path);
}
}
/**
* Represents the total amount of "reserved" space on a particular data path, together with the set of shards considered.
*/
public record ReservedSpace(long total, Set<ShardId> shardIds) implements Writeable {
public static final ReservedSpace EMPTY = new ReservedSpace(0, new HashSet<>());
ReservedSpace(StreamInput in) throws IOException {
this(in.readVLong(), in.readCollectionAsSet(ShardId::new));
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(total);
out.writeCollection(shardIds);
}
public boolean containsShardId(ShardId shardId) {
return shardIds.contains(shardId);
}
void toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.field("total", total);
builder.startArray("shards");
{
for (ShardId shardIdCursor : shardIds) {
shardIdCursor.toXContent(builder, params);
}
}
builder.endArray(); // end "shards"
}
public static | ClusterInfo |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/ID.java | {
"start": 1390,
"end": 2416
} | class ____ implements WritableComparable<ID> {
protected static final char SEPARATOR = '_';
protected int id;
/** constructs an ID object from the given int */
public ID(int id) {
this.id = id;
}
protected ID() {
}
/** returns the int which represents the identifier */
public int getId() {
return id;
}
@Override
public String toString() {
return String.valueOf(id);
}
@Override
public int hashCode() {
return id;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if(o == null)
return false;
if (o.getClass() == this.getClass()) {
ID that = (ID) o;
return this.id == that.id;
}
else
return false;
}
/** Compare IDs by associated numbers */
public int compareTo(ID that) {
return this.id - that.id;
}
public void readFields(DataInput in) throws IOException {
this.id = in.readInt();
}
public void write(DataOutput out) throws IOException {
out.writeInt(id);
}
}
| ID |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/common/settings/PropertyPlaceholder.java | {
"start": 6643,
"end": 7454
} | interface ____ {
/**
* Resolves the supplied placeholder name into the replacement value.
*
* @param placeholderName the name of the placeholder to resolve.
* @return the replacement value or <code>null</code> if no replacement is to be made.
*/
String resolvePlaceholder(String placeholderName);
boolean shouldIgnoreMissing(String placeholderName);
/**
* Allows for special handling for ignored missing placeholders that may be resolved elsewhere
*
* @param placeholderName the name of the placeholder to resolve.
* @return true if the placeholder should be replaced with a empty string
*/
boolean shouldRemoveMissingPlaceholder(String placeholderName);
}
}
| PlaceholderResolver |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/SubQuerySelectCaseWhenTest.java | {
"start": 768,
"end": 1724
} | class ____ {
@BeforeEach
public void setUp(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
session.persist( new TestEntity( "A" ) );
session.persist( new TestEntity( "B" ) );
session.persist( new TestEntity( "C" ) );
}
);
}
@Test
public void testSelectCase(SessionFactoryScope scope) {
scope.inTransaction( session -> {
List<Integer> result = session.createQuery( "select "
+ " (select "
+ " case "
+ " when "
+ " t.name = ?1 "
+ " then 0 "
+ " else 1 "
+ " end)"
+ " from TestEntity t order by t.name", Integer.class )
.setParameter( 1, "A" )
.list();
assertThat( result.size() ).isEqualTo( 3 );
assertThat( result.get( 0 ) ).isEqualTo( 0 );
assertThat( result.get( 1 ) ).isEqualTo( 1 );
assertThat( result.get( 2 ) ).isEqualTo( 1 );
} );
}
@Entity(name = "TestEntity")
public | SubQuerySelectCaseWhenTest |
java | apache__camel | core/camel-base-engine/src/main/java/org/apache/camel/impl/engine/DefaultHeadersMapFactory.java | {
"start": 1436,
"end": 1947
} | class ____ implements HeadersMapFactory {
@Override
public Map<String, Object> newMap() {
return new CaseInsensitiveMap();
}
@Override
public Map<String, Object> newMap(Map<String, Object> map) {
return new CaseInsensitiveMap(map);
}
@Override
public boolean isInstanceOf(Map<String, Object> map) {
return map instanceof CaseInsensitiveMap;
}
@Override
public boolean isCaseInsensitive() {
return true;
}
}
| DefaultHeadersMapFactory |
java | google__error-prone | core/src/test/java/com/google/errorprone/fixes/SuggestedFixesTest.java | {
"start": 74187,
"end": 74738
} | class ____ extends BugChecker implements ReturnTreeMatcher {
@Override
public Description matchReturn(ReturnTree tree, VisitorState state) {
return describeMatch(
tree,
SuggestedFix.replace(
tree.getExpression(), SuggestedFixes.castTree(tree.getExpression(), "int", state)));
}
}
@Test
public void castTree() {
BugCheckerRefactoringTestHelper.newInstance(CastTreeToIntChecker.class, getClass())
.addInputLines(
"Test.java",
"""
| CastTreeToIntChecker |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/basic/FinalFieldEnhancementTest.java | {
"start": 5303,
"end": 5959
} | class ____ {
@Id
@GeneratedValue
private Long id;
private final String immutableProperty;
private String name;
// For Hibernate ORM
protected EntityWithFinalField() {
this.immutableProperty = null;
}
private EntityWithFinalField(String id) {
this.immutableProperty = id;
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getImmutableProperty() {
return immutableProperty;
}
}
@Entity(name = "embidwithfinal")
public static | EntityWithFinalField |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/engine/transaction/jta/platform/spi/JtaPlatformResolver.java | {
"start": 680,
"end": 830
} | interface ____ extends Service {
JtaPlatform resolveJtaPlatform(Map<?,?> configurationValues, ServiceRegistryImplementor registry);
}
| JtaPlatformResolver |
java | google__guava | android/guava/src/com/google/common/io/PatternFilenameFilter.java | {
"start": 2365,
"end": 2811
} | class ____ begin with but rather something returned from a static factory method whose declared
* return type is plain FilenameFilter. If we made such a change, then the annotation we choose
* here would have no significance to end users, who would be forced to conform to the signature
* used in FilenameFilter.)
*/
@Override
public boolean accept(File dir, String fileName) {
return pattern.matcher(fileName).matches();
}
}
| to |
java | google__dagger | javatests/dagger/internal/codegen/MapRequestRepresentationTest.java | {
"start": 4622,
"end": 5179
} | class ____ {",
" @Multibinds abstract Map<Integer, Inaccessible> ints();",
"}");
JavaFileObject componentFile =
JavaFileObjects.forSourceLines(
"test.TestComponent",
"package test;",
"",
"import dagger.Component;",
"import java.util.Map;",
"import javax.inject.Provider;",
"import other.TestModule;",
"import other.UsesInaccessible;",
"",
"@Component(modules = TestModule.class)",
" | TestModule |
java | grpc__grpc-java | netty/src/test/java/io/grpc/netty/NettyClientTransportTest.java | {
"start": 55034,
"end": 55536
} | class ____ implements Marshaller<String> {
static final StringMarshaller INSTANCE = new StringMarshaller();
@Override
public InputStream stream(String value) {
return new ByteArrayInputStream(value.getBytes(UTF_8));
}
@Override
public String parse(InputStream stream) {
try {
return new String(ByteStreams.toByteArray(stream), UTF_8);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
}
private static | StringMarshaller |
java | spring-projects__spring-framework | spring-expression/src/test/java/org/springframework/expression/spel/ExpressionStateTests.java | {
"start": 1695,
"end": 6185
} | class ____ extends AbstractExpressionTests {
private ExpressionState state = new ExpressionState(TestScenarioCreator.getTestEvaluationContext());
@Test
void construction() {
EvaluationContext context = TestScenarioCreator.getTestEvaluationContext();
ExpressionState state = new ExpressionState(context);
assertThat(state.getEvaluationContext()).isEqualTo(context);
}
@Test
void globalVariables() {
TypedValue typedValue = state.lookupVariable("foo");
assertThat(typedValue).isEqualTo(TypedValue.NULL);
state.setVariable("foo",34);
typedValue = state.lookupVariable("foo");
assertThat(typedValue.getValue()).isEqualTo(34);
assertThat(typedValue.getTypeDescriptor().getType()).isEqualTo(Integer.class);
state.setVariable("foo","abc");
typedValue = state.lookupVariable("foo");
assertThat(typedValue.getValue()).isEqualTo("abc");
assertThat(typedValue.getTypeDescriptor().getType()).isEqualTo(String.class);
}
@Test
void rootContextObject() {
assertThat(state.getRootContextObject().getValue().getClass()).isEqualTo(Inventor.class);
// Although the root object is being set on the evaluation context,
// the value in the 'state' remains what it was when constructed.
((StandardEvaluationContext) state.getEvaluationContext()).setRootObject(null);
assertThat(state.getRootContextObject().getValue()).isInstanceOf(Inventor.class);
state = new ExpressionState(new StandardEvaluationContext());
assertThat(state.getRootContextObject()).isEqualTo(TypedValue.NULL);
((StandardEvaluationContext) state.getEvaluationContext()).setRootObject(null);
assertThat(state.getRootContextObject().getValue()).isNull();
}
@Test
void activeContextObject() {
assertThat(state.getActiveContextObject().getValue()).isEqualTo(state.getRootContextObject().getValue());
assertThatIllegalStateException().isThrownBy(state::popActiveContextObject);
state.pushActiveContextObject(new TypedValue(34));
assertThat(state.getActiveContextObject().getValue()).isEqualTo(34);
state.pushActiveContextObject(new TypedValue("hello"));
assertThat(state.getActiveContextObject().getValue()).isEqualTo("hello");
state.popActiveContextObject();
assertThat(state.getActiveContextObject().getValue()).isEqualTo(34);
state.popActiveContextObject();
assertThat(state.getActiveContextObject().getValue()).isEqualTo(state.getRootContextObject().getValue());
state = new ExpressionState(new StandardEvaluationContext());
assertThat(state.getActiveContextObject()).isEqualTo(TypedValue.NULL);
}
@Test
void rootObjectConstructor() {
EvaluationContext ctx = TestScenarioCreator.getTestEvaluationContext();
// TypedValue root = ctx.getRootObject();
// supplied should override root on context
ExpressionState state = new ExpressionState(ctx, new TypedValue("i am a string"));
TypedValue stateRoot = state.getRootContextObject();
assertThat(stateRoot.getTypeDescriptor().getType()).isEqualTo(String.class);
assertThat(stateRoot.getValue()).isEqualTo("i am a string");
}
@Test
void operators() {
assertThatExceptionOfType(SpelEvaluationException.class)
.isThrownBy(() -> state.operate(Operation.ADD,1,2))
.satisfies(ex -> assertThat(ex.getMessageCode()).isEqualTo(SpelMessage.OPERATOR_NOT_SUPPORTED_BETWEEN_TYPES));
assertThatExceptionOfType(SpelEvaluationException.class)
.isThrownBy(() -> state.operate(Operation.ADD,null,null))
.satisfies(ex -> assertThat(ex.getMessageCode()).isEqualTo(SpelMessage.OPERATOR_NOT_SUPPORTED_BETWEEN_TYPES));
}
@Test
void comparator() {
assertThat(state.getTypeComparator()).isEqualTo(state.getEvaluationContext().getTypeComparator());
}
@Test
void typeLocator() {
assertThat(state.getEvaluationContext().getTypeLocator()).isNotNull();
assertThat(state.findType("java.lang.Integer")).isEqualTo(Integer.class);
assertThatExceptionOfType(SpelEvaluationException.class)
.isThrownBy(() -> state.findType("someMadeUpName"))
.satisfies(ex -> assertThat(ex.getMessageCode()).isEqualTo(SpelMessage.TYPE_NOT_FOUND));
}
@Test
void typeConversion() {
String s = (String) state.convertValue(34, TypeDescriptor.valueOf(String.class));
assertThat(s).isEqualTo("34");
s = (String) state.convertValue(new TypedValue(34), TypeDescriptor.valueOf(String.class));
assertThat(s).isEqualTo("34");
}
@Test
void propertyAccessors() {
assertThat(state.getPropertyAccessors()).isEqualTo(state.getEvaluationContext().getPropertyAccessors());
}
}
| ExpressionStateTests |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrimTests.java | {
"start": 721,
"end": 1197
} | class ____ extends AbstractTrimTests {
public RTrimTests(@Name("TestCase") Supplier<TestCaseSupplier.TestCase> testCaseSupplier) {
this.testCase = testCaseSupplier.get();
}
@ParametersFactory
public static Iterable<Object[]> parameters() {
return parameters("RTrimEvaluator", false, true);
}
@Override
protected Expression build(Source source, List<Expression> args) {
return new RTrim(source, args.get(0));
}
}
| RTrimTests |
java | resilience4j__resilience4j | resilience4j-core/src/main/java/io/github/resilience4j/core/NamingThreadFactory.java | {
"start": 909,
"end": 1938
} | class ____ implements ThreadFactory {
private final ThreadGroup group;
private final AtomicInteger threadNumber = new AtomicInteger(1);
private final String prefix;
public NamingThreadFactory(String name) {
this.group = getThreadGroup();
this.prefix = String.join("-",name, "");
}
private ThreadGroup getThreadGroup() {
SecurityManager security = System.getSecurityManager();
return security != null ? security.getThreadGroup()
: Thread.currentThread().getThreadGroup();
}
@Override
public Thread newThread(Runnable runnable) {
Thread thread = new Thread(group, runnable, createName(), 0);
if (thread.isDaemon()) {
thread.setDaemon(false);
}
if (thread.getPriority() != Thread.NORM_PRIORITY) {
thread.setPriority(Thread.NORM_PRIORITY);
}
return thread;
}
private String createName() {
return prefix + threadNumber.getAndIncrement();
}
}
| NamingThreadFactory |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/SearchHit.java | {
"start": 2905,
"end": 26241
} | class ____ implements Writeable, ToXContentObject, RefCounted {
private static final TransportVersion DOC_FIELDS_AS_LIST = TransportVersion.fromName("doc_fields_as_list");
private final transient int docId;
static final float DEFAULT_SCORE = Float.NaN;
private float score;
static final int NO_RANK = -1;
private int rank;
private final Text id;
private final NestedIdentity nestedIdentity;
private long version;
private long seqNo;
private long primaryTerm;
private BytesReference source;
private final Map<String, DocumentField> documentFields;
private final Map<String, DocumentField> metaFields;
private Map<String, HighlightField> highlightFields;
private SearchSortValues sortValues;
private Map<String, Float> matchedQueries;
private Explanation explanation;
@Nullable
private SearchShardTarget shard;
// These two fields normally get set when setting the shard target, so they hold the same values as the target thus don't get
// serialized over the wire. When parsing hits back from xcontent though, in most of the cases (whenever explanation is disabled)
// we can't rebuild the shard target object so we need to set these manually for users retrieval.
private transient String index;
private transient String clusterAlias;
// For asserting that the method #getSourceAsMap is called just once on the lifetime of this object
private boolean sourceAsMapCalled = false;
private Map<String, SearchHits> innerHits;
private final RefCounted refCounted;
public SearchHit(int docId) {
this(docId, null);
}
public SearchHit(int docId, String id) {
this(docId, id, null);
}
public SearchHit(int nestedTopDocId, String id, NestedIdentity nestedIdentity) {
this(nestedTopDocId, id, nestedIdentity, null);
}
private SearchHit(int nestedTopDocId, String id, NestedIdentity nestedIdentity, @Nullable RefCounted refCounted) {
this(
nestedTopDocId,
DEFAULT_SCORE,
NO_RANK,
id == null ? null : new Text(id),
nestedIdentity,
-1,
SequenceNumbers.UNASSIGNED_SEQ_NO,
SequenceNumbers.UNASSIGNED_PRIMARY_TERM,
null,
null,
SearchSortValues.EMPTY,
Collections.emptyMap(),
null,
null,
null,
null,
null,
new HashMap<>(),
new HashMap<>(),
refCounted
);
}
public SearchHit(
int docId,
float score,
int rank,
Text id,
NestedIdentity nestedIdentity,
long version,
long seqNo,
long primaryTerm,
BytesReference source,
Map<String, HighlightField> highlightFields,
SearchSortValues sortValues,
Map<String, Float> matchedQueries,
Explanation explanation,
SearchShardTarget shard,
String index,
String clusterAlias,
Map<String, SearchHits> innerHits,
Map<String, DocumentField> documentFields,
Map<String, DocumentField> metaFields,
@Nullable RefCounted refCounted
) {
this.docId = docId;
this.score = score;
this.rank = rank;
this.id = id;
this.nestedIdentity = nestedIdentity;
this.version = version;
this.seqNo = seqNo;
this.primaryTerm = primaryTerm;
this.source = source;
this.highlightFields = highlightFields;
this.sortValues = sortValues;
this.matchedQueries = matchedQueries;
this.explanation = explanation;
this.shard = shard;
this.index = index;
this.clusterAlias = clusterAlias;
this.innerHits = innerHits;
this.documentFields = documentFields;
this.metaFields = metaFields;
this.refCounted = refCounted == null ? LeakTracker.wrap(new SimpleRefCounted()) : refCounted;
}
public static SearchHit readFrom(StreamInput in, boolean pooled) throws IOException {
final float score = in.readFloat();
final int rank;
if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) {
rank = in.readVInt();
} else {
rank = NO_RANK;
}
final Text id = in.readOptionalText();
if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) {
in.readOptionalText();
}
final NestedIdentity nestedIdentity = in.readOptionalWriteable(NestedIdentity::new);
final long version = in.readLong();
final long seqNo = in.readZLong();
final long primaryTerm = in.readVLong();
BytesReference source = pooled ? in.readReleasableBytesReference() : in.readBytesReference();
if (source.length() == 0) {
source = null;
}
Explanation explanation = null;
if (in.readBoolean()) {
explanation = readExplanation(in);
}
final Map<String, DocumentField> documentFields;
final Map<String, DocumentField> metaFields;
if (in.getTransportVersion().supports(DOC_FIELDS_AS_LIST)) {
documentFields = DocumentField.readFieldsFromMapValues(in);
metaFields = DocumentField.readFieldsFromMapValues(in);
} else {
documentFields = in.readMap(DocumentField::new);
metaFields = in.readMap(DocumentField::new);
}
Map<String, HighlightField> highlightFields = in.readMapValues(HighlightField::new, HighlightField::name);
highlightFields = highlightFields.isEmpty() ? null : unmodifiableMap(highlightFields);
final SearchSortValues sortValues = SearchSortValues.readFrom(in);
final Map<String, Float> matchedQueries;
if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) {
matchedQueries = in.readOrderedMap(StreamInput::readString, StreamInput::readFloat);
} else {
int size = in.readVInt();
matchedQueries = Maps.newLinkedHashMapWithExpectedSize(size);
for (int i = 0; i < size; i++) {
matchedQueries.put(in.readString(), Float.NaN);
}
}
final SearchShardTarget shardTarget = in.readOptionalWriteable(SearchShardTarget::new);
final String index;
final String clusterAlias;
if (shardTarget == null) {
index = null;
clusterAlias = null;
} else {
index = shardTarget.getIndex();
clusterAlias = shardTarget.getClusterAlias();
}
boolean isPooled = pooled && source != null;
final Map<String, SearchHits> innerHits;
int size = in.readVInt();
if (size > 0) {
innerHits = Maps.newMapWithExpectedSize(size);
for (int i = 0; i < size; i++) {
var key = in.readString();
var nestedHits = SearchHits.readFrom(in, pooled);
innerHits.put(key, nestedHits);
isPooled = isPooled || nestedHits.isPooled();
}
} else {
innerHits = null;
}
return new SearchHit(
-1,
score,
rank,
id,
nestedIdentity,
version,
seqNo,
primaryTerm,
source,
highlightFields,
sortValues,
matchedQueries,
explanation,
shardTarget,
index,
clusterAlias,
innerHits,
documentFields,
metaFields,
isPooled ? null : ALWAYS_REFERENCED
);
}
public static SearchHit unpooled(int docId) {
return unpooled(docId, null);
}
public static SearchHit unpooled(int docId, String id) {
return unpooled(docId, id, null);
}
public static SearchHit unpooled(int nestedTopDocId, String id, NestedIdentity nestedIdentity) {
// always referenced search hits do NOT call #deallocate
return new SearchHit(nestedTopDocId, id, nestedIdentity, ALWAYS_REFERENCED);
}
private static final Text SINGLE_MAPPING_TYPE = new Text(MapperService.SINGLE_MAPPING_NAME);
@Override
public void writeTo(StreamOutput out) throws IOException {
assert hasReferences();
out.writeFloat(score);
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) {
out.writeVInt(rank);
} else if (rank != NO_RANK) {
throw new IllegalArgumentException("cannot serialize [rank] to version [" + out.getTransportVersion().toReleaseVersion() + "]");
}
out.writeOptionalText(id);
if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) {
out.writeOptionalText(SINGLE_MAPPING_TYPE);
}
out.writeOptionalWriteable(nestedIdentity);
out.writeLong(version);
out.writeZLong(seqNo);
out.writeVLong(primaryTerm);
out.writeBytesReference(source);
if (explanation == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
writeExplanation(out, explanation);
}
if (out.getTransportVersion().supports(DOC_FIELDS_AS_LIST)) {
out.writeMapValues(documentFields);
out.writeMapValues(metaFields);
} else {
out.writeMap(documentFields, StreamOutput::writeWriteable);
out.writeMap(metaFields, StreamOutput::writeWriteable);
}
if (highlightFields == null) {
out.writeVInt(0);
} else {
out.writeCollection(highlightFields.values());
}
sortValues.writeTo(out);
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) {
out.writeMap(matchedQueries, StreamOutput::writeFloat);
} else {
out.writeStringCollection(matchedQueries.keySet());
}
out.writeOptionalWriteable(shard);
if (innerHits == null) {
out.writeVInt(0);
} else {
out.writeMap(innerHits, StreamOutput::writeWriteable);
}
}
public int docId() {
return this.docId;
}
public void score(float score) {
this.score = score;
}
/**
* The score.
*/
public float getScore() {
return this.score;
}
public void setRank(int rank) {
this.rank = rank;
}
public int getRank() {
return this.rank;
}
public void version(long version) {
this.version = version;
}
/**
* The version of the hit.
*/
public long getVersion() {
return this.version;
}
public void setSeqNo(long seqNo) {
this.seqNo = seqNo;
}
public void setPrimaryTerm(long primaryTerm) {
this.primaryTerm = primaryTerm;
}
/**
* returns the sequence number of the last modification to the document, or {@link SequenceNumbers#UNASSIGNED_SEQ_NO}
* if not requested.
**/
public long getSeqNo() {
return this.seqNo;
}
/**
* returns the primary term of the last modification to the document, or {@link SequenceNumbers#UNASSIGNED_PRIMARY_TERM}
* if not requested. */
public long getPrimaryTerm() {
return this.primaryTerm;
}
/**
* The index of the hit.
*/
public String getIndex() {
return this.index;
}
/**
* The id of the document.
*/
public String getId() {
return id != null ? id.string() : null;
}
/**
* If this is a nested hit then nested reference information is returned otherwise <code>null</code> is returned.
*/
public NestedIdentity getNestedIdentity() {
return nestedIdentity;
}
/**
* Returns bytes reference, also uncompress the source if needed.
*/
public BytesReference getSourceRef() {
assert hasReferences();
if (this.source == null) {
return null;
}
try {
this.source = CompressorFactory.uncompressIfNeeded(this.source);
return this.source;
} catch (IOException e) {
throw new ElasticsearchParseException("failed to decompress source", e);
}
}
/**
* Sets representation, might be compressed....
*/
public SearchHit sourceRef(BytesReference source) {
this.source = source;
return this;
}
/**
* Is the source available or not. A source with no fields will return true. This will return false if {@code fields} doesn't contain
* {@code _source} or if source is disabled in the mapping.
*/
public boolean hasSource() {
assert hasReferences();
return source != null;
}
/**
* The source of the document as string (can be {@code null}).
*/
public String getSourceAsString() {
assert hasReferences();
if (source == null) {
return null;
}
try {
return XContentHelper.convertToJson(getSourceRef(), false);
} catch (IOException e) {
throw new ElasticsearchParseException("failed to convert source to a json string");
}
}
/**
* The source of the document as a map (can be {@code null}). This method is expected
* to be called at most once during the lifetime of the object as the generated map
* is expensive to generate and it does not get cache.
*/
public Map<String, Object> getSourceAsMap() {
assert hasReferences();
assert sourceAsMapCalled == false : "getSourceAsMap() called twice";
sourceAsMapCalled = true;
if (source == null) {
return null;
}
return Source.fromBytes(source).source();
}
/**
* The hit field matching the given field name.
*/
public DocumentField field(String fieldName) {
assert hasReferences();
DocumentField result = documentFields.get(fieldName);
if (result != null) {
return result;
} else {
return metaFields.get(fieldName);
}
}
/*
* Adds a new DocumentField to the map in case both parameters are not null.
* */
public void setDocumentField(DocumentField field) {
if (field == null) return;
this.documentFields.put(field.getName(), field);
}
public void addDocumentFields(Map<String, DocumentField> docFields, Map<String, DocumentField> metaFields) {
this.documentFields.putAll(docFields);
this.metaFields.putAll(metaFields);
}
public DocumentField removeDocumentField(String field) {
return documentFields.remove(field);
}
/**
* @return a map of metadata fields for this hit
*/
public Map<String, DocumentField> getMetadataFields() {
assert hasReferences();
return Collections.unmodifiableMap(metaFields);
}
/**
* @return a map of non-metadata fields requested for this hit
*/
public Map<String, DocumentField> getDocumentFields() {
assert hasReferences();
return Collections.unmodifiableMap(documentFields);
}
/**
* A map of hit fields (from field name to hit fields) if additional fields
* were required to be loaded. Includes both document and metadata fields.
*/
public Map<String, DocumentField> getFields() {
assert hasReferences();
if (metaFields.size() > 0 || documentFields.size() > 0) {
final Map<String, DocumentField> fields = new HashMap<>();
fields.putAll(metaFields);
fields.putAll(documentFields);
return fields;
} else {
return emptyMap();
}
}
/**
* Whether this search hit has any lookup fields
*/
public boolean hasLookupFields() {
return getDocumentFields().values().stream().anyMatch(doc -> doc.getLookupFields().isEmpty() == false);
}
/**
* Resolve the lookup fields with the given results and merge them as regular fetch fields.
*/
public void resolveLookupFields(Map<LookupField, List<Object>> lookupResults) {
assert hasReferences();
if (lookupResults.isEmpty()) {
return;
}
for (Iterator<Map.Entry<String, DocumentField>> iterator = documentFields.entrySet().iterator(); iterator.hasNext();) {
Map.Entry<String, DocumentField> entry = iterator.next();
final DocumentField docField = entry.getValue();
if (docField.getLookupFields().isEmpty()) {
continue;
}
final List<Object> newValues = new ArrayList<>(docField.getValues());
for (LookupField lookupField : docField.getLookupFields()) {
final List<Object> resolvedValues = lookupResults.get(lookupField);
if (resolvedValues != null) {
newValues.addAll(resolvedValues);
}
}
if (newValues.isEmpty() && docField.getIgnoredValues().isEmpty()) {
iterator.remove();
} else {
entry.setValue(new DocumentField(docField.getName(), newValues, docField.getIgnoredValues()));
}
}
assert hasLookupFields() == false : "Some lookup fields are not resolved";
}
/**
* A map of highlighted fields.
*/
public Map<String, HighlightField> getHighlightFields() {
assert hasReferences();
return highlightFields == null ? emptyMap() : highlightFields;
}
public void highlightFields(Map<String, HighlightField> highlightFields) {
this.highlightFields = highlightFields;
}
public void sortValues(Object[] sortValues, DocValueFormat[] sortValueFormats) {
sortValues(new SearchSortValues(sortValues, sortValueFormats));
}
public void sortValues(SearchSortValues sortValues) {
this.sortValues = sortValues;
}
/**
* An array of the (formatted) sort values used.
*/
public Object[] getSortValues() {
return sortValues.getFormattedSortValues();
}
/**
* An array of the (raw) sort values used.
*/
public Object[] getRawSortValues() {
return sortValues.getRawSortValues();
}
/**
* If enabled, the explanation of the search hit.
*/
public Explanation getExplanation() {
return explanation;
}
public void explanation(Explanation explanation) {
this.explanation = explanation;
}
/**
* The shard of the search hit.
*/
public SearchShardTarget getShard() {
return shard;
}
public void shard(SearchShardTarget target) {
if (innerHits != null) {
for (SearchHits innerHits : innerHits.values()) {
for (SearchHit innerHit : innerHits) {
innerHit.shard(target);
}
}
}
this.shard = target;
if (target != null) {
this.index = target.getIndex();
this.clusterAlias = target.getClusterAlias();
}
}
/**
* Returns the cluster alias this hit comes from or null if it comes from a local cluster
*/
public String getClusterAlias() {
return clusterAlias;
}
public void matchedQueries(Map<String, Float> matchedQueries) {
this.matchedQueries = matchedQueries;
}
/**
* The set of query and filter names the query matched with. Mainly makes sense for compound filters and queries.
*/
public String[] getMatchedQueries() {
return matchedQueries == null ? new String[0] : matchedQueries.keySet().toArray(new String[0]);
}
/**
* @return The score of the provided named query if it matches, {@code null} otherwise.
*/
public Float getMatchedQueryScore(String name) {
return getMatchedQueriesAndScores().get(name);
}
/**
* @return The map of the named queries that matched and their associated score.
*/
public Map<String, Float> getMatchedQueriesAndScores() {
return matchedQueries == null ? Collections.emptyMap() : matchedQueries;
}
/**
* @return Inner hits or <code>null</code> if there are none
*/
public Map<String, SearchHits> getInnerHits() {
assert hasReferences();
return innerHits;
}
public void setInnerHits(Map<String, SearchHits> innerHits) {
assert innerHits == null || innerHits.values().stream().noneMatch(h -> h.hasReferences() == false);
assert this.innerHits == null;
this.innerHits = innerHits;
}
@Override
public void incRef() {
refCounted.incRef();
}
@Override
public boolean tryIncRef() {
return refCounted.tryIncRef();
}
@Override
public boolean decRef() {
if (refCounted.decRef()) {
deallocate();
return true;
}
return false;
}
private void deallocate() {
if (SearchHit.this.innerHits != null) {
for (SearchHits h : SearchHit.this.innerHits.values()) {
h.decRef();
}
SearchHit.this.innerHits = null;
}
if (SearchHit.this.source instanceof RefCounted r) {
r.decRef();
}
SearchHit.this.source = null;
clearIfMutable(documentFields);
clearIfMutable(metaFields);
this.highlightFields = null;
}
private static void clearIfMutable(Map<String, DocumentField> fields) {
// check that we're dealing with a HashMap, instances read from the wire that are empty be of an immutable type
assert fields instanceof HashMap<?, ?> || fields.isEmpty() : fields;
if (fields instanceof HashMap<?, ?> hm) {
hm.clear();
}
}
@Override
public boolean hasReferences() {
return refCounted.hasReferences();
}
public SearchHit asUnpooled() {
assert hasReferences();
if (isPooled() == false) {
return this;
}
return new SearchHit(
docId,
score,
rank,
id,
nestedIdentity,
version,
seqNo,
primaryTerm,
source instanceof RefCounted ? new BytesArray(source.toBytesRef(), true) : source,
highlightFields,
sortValues,
matchedQueries,
explanation,
shard,
index,
clusterAlias,
innerHits == null
? null
: innerHits.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().asUnpooled())),
cloneIfHashMap(documentFields),
cloneIfHashMap(metaFields),
ALWAYS_REFERENCED
);
}
private Map<String, DocumentField> cloneIfHashMap(Map<String, DocumentField> map) {
return map instanceof HashMap<String, DocumentField> hashMap ? new HashMap<>(hashMap) : map;
}
public boolean isPooled() {
return refCounted != ALWAYS_REFERENCED;
}
public static | SearchHit |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/StompEndpointBuilderFactory.java | {
"start": 17846,
"end": 21648
} | interface ____ extends EndpointProducerBuilder {
default StompEndpointProducerBuilder basic() {
return (StompEndpointProducerBuilder) this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedStompEndpointProducerBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedStompEndpointProducerBuilder lazyStartProducer(String lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* To use a custom HeaderFilterStrategy to filter header to and from
* Camel message.
*
* The option is a:
* <code>org.apache.camel.spi.HeaderFilterStrategy</code> type.
*
* Group: advanced
*
* @param headerFilterStrategy the value to set
* @return the dsl builder
*/
default AdvancedStompEndpointProducerBuilder headerFilterStrategy(org.apache.camel.spi.HeaderFilterStrategy headerFilterStrategy) {
doSetProperty("headerFilterStrategy", headerFilterStrategy);
return this;
}
/**
* To use a custom HeaderFilterStrategy to filter header to and from
* Camel message.
*
* The option will be converted to a
* <code>org.apache.camel.spi.HeaderFilterStrategy</code> type.
*
* Group: advanced
*
* @param headerFilterStrategy the value to set
* @return the dsl builder
*/
default AdvancedStompEndpointProducerBuilder headerFilterStrategy(String headerFilterStrategy) {
doSetProperty("headerFilterStrategy", headerFilterStrategy);
return this;
}
}
/**
* Builder for endpoint for the Stomp component.
*/
public | AdvancedStompEndpointProducerBuilder |
java | google__dagger | javatests/dagger/internal/codegen/LazyClassKeyMapBindingComponentProcessorTest.java | {
"start": 8345,
"end": 8875
} | interface ____ {",
" @Provides @IntoMap @LazyClassKey(test.Foo_Bar.class)",
" static int classKey() { return 1; }",
"}");
Source componentFile =
CompilerTests.javaSource(
"test.TestComponent",
"package test;",
"",
"import dagger.Component;",
"import javax.inject.Provider;",
"import java.util.Map;",
"",
"@Component(modules = MapKeyBindingsModule.class)",
" | MapKeyBindingsModule |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapterTest.java | {
"start": 2920,
"end": 3287
} | interface ____ if we upgrade {@link RocksDB}.
* Using reflections, we make sure the {@link RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter} maps all
* methods from {@link DBOptions} and {@link ColumnFamilyOptions} to/from {@link Options} correctly.
*/
@ExtendWith(MockitoExtension.class)
@MockitoSettings(strictness = Strictness.STRICT_STUBS)
public | changes |
java | grpc__grpc-java | xds/src/main/java/io/grpc/xds/WeightedRoundRobinLoadBalancer.java | {
"start": 17837,
"end": 22851
} | class ____ extends SubchannelPicker {
// Parallel lists (column-based storage instead of normal row-based storage of List<Struct>).
// The ith element of children corresponds to the ith element of pickers, listeners, and even
// updateWeight(float[]).
private final List<ChildLbState> children; // May only be accessed from sync context
private final List<SubchannelPicker> pickers;
private final List<OrcaPerRequestReportListener> reportListeners;
private final boolean enableOobLoadReport;
private final float errorUtilizationPenalty;
private final AtomicInteger sequence;
private final int hashCode;
private volatile StaticStrideScheduler scheduler;
WeightedRoundRobinPicker(List<ChildLbState> children, boolean enableOobLoadReport,
float errorUtilizationPenalty, AtomicInteger sequence) {
checkNotNull(children, "children");
Preconditions.checkArgument(!children.isEmpty(), "empty child list");
this.children = children;
List<SubchannelPicker> pickers = new ArrayList<>(children.size());
List<OrcaPerRequestReportListener> reportListeners = new ArrayList<>(children.size());
for (ChildLbState child : children) {
WeightedChildLbState wChild = (WeightedChildLbState) child;
pickers.add(wChild.getCurrentPicker());
reportListeners.add(wChild.getOrCreateOrcaListener(errorUtilizationPenalty));
}
this.pickers = pickers;
this.reportListeners = reportListeners;
this.enableOobLoadReport = enableOobLoadReport;
this.errorUtilizationPenalty = errorUtilizationPenalty;
this.sequence = checkNotNull(sequence, "sequence");
// For equality we treat pickers as a set; use hash code as defined by Set
int sum = 0;
for (SubchannelPicker picker : pickers) {
sum += picker.hashCode();
}
this.hashCode = sum
^ Boolean.hashCode(enableOobLoadReport)
^ Float.hashCode(errorUtilizationPenalty);
}
@Override
public PickResult pickSubchannel(PickSubchannelArgs args) {
int pick = scheduler.pick();
PickResult pickResult = pickers.get(pick).pickSubchannel(args);
Subchannel subchannel = pickResult.getSubchannel();
if (subchannel == null) {
return pickResult;
}
if (!enableOobLoadReport) {
return PickResult.withSubchannel(subchannel,
OrcaPerRequestUtil.getInstance().newOrcaClientStreamTracerFactory(
reportListeners.get(pick)));
} else {
return PickResult.withSubchannel(subchannel);
}
}
/** Returns {@code true} if weights are different than round_robin. */
private boolean updateWeight(float[] newWeights) {
this.scheduler = new StaticStrideScheduler(newWeights, sequence);
return !this.scheduler.usesRoundRobin();
}
@Override
public String toString() {
return MoreObjects.toStringHelper(WeightedRoundRobinPicker.class)
.add("enableOobLoadReport", enableOobLoadReport)
.add("errorUtilizationPenalty", errorUtilizationPenalty)
.add("pickers", pickers)
.toString();
}
@VisibleForTesting
List<ChildLbState> getChildren() {
return children;
}
@Override
public int hashCode() {
return hashCode;
}
@Override
public boolean equals(Object o) {
if (!(o instanceof WeightedRoundRobinPicker)) {
return false;
}
WeightedRoundRobinPicker other = (WeightedRoundRobinPicker) o;
if (other == this) {
return true;
}
// the lists cannot contain duplicate subchannels
return hashCode == other.hashCode
&& sequence == other.sequence
&& enableOobLoadReport == other.enableOobLoadReport
&& Float.compare(errorUtilizationPenalty, other.errorUtilizationPenalty) == 0
&& pickers.size() == other.pickers.size()
&& new HashSet<>(pickers).containsAll(other.pickers);
}
}
/*
* The Static Stride Scheduler is an implementation of an earliest deadline first (EDF) scheduler
* in which each object's deadline is the multiplicative inverse of the object's weight.
* <p>
* The way in which this is implemented is through a static stride scheduler.
* The Static Stride Scheduler works by iterating through the list of subchannel weights
* and using modular arithmetic to proportionally distribute picks, favoring entries
* with higher weights. It is based on the observation that the intended sequence generated
* from an EDF scheduler is a periodic one that can be achieved through modular arithmetic.
* The Static Stride Scheduler is more performant than other implementations of the EDF
* Scheduler, as it removes the need for a priority queue (and thus mutex locks).
* <p>
* go/static-stride-scheduler
* <p>
*
* <ul>
* <li>nextSequence() - O(1)
* <li>pick() - O(n)
*/
@VisibleForTesting
static final | WeightedRoundRobinPicker |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java | {
"start": 61280,
"end": 61402
} | class ____ expression result: " + evaluated);
}
}
if (freshResolve) {
// When resolving against a temporary | name |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/types/logical/utils/LogicalTypeMerging.java | {
"start": 6592,
"end": 8892
} | class ____ {
// mappings for interval generalization
private static final Map<YearMonthResolution, List<YearMonthResolution>>
YEAR_MONTH_RES_TO_BOUNDARIES = new HashMap<>();
private static final Map<List<YearMonthResolution>, YearMonthResolution>
YEAR_MONTH_BOUNDARIES_TO_RES = new HashMap<>();
private static final int MINIMUM_ADJUSTED_SCALE = 6;
static {
addYearMonthMapping(YEAR, YEAR);
addYearMonthMapping(MONTH, MONTH);
addYearMonthMapping(YEAR_TO_MONTH, YEAR, MONTH);
}
private static final Map<DayTimeResolution, List<DayTimeResolution>>
DAY_TIME_RES_TO_BOUNDARIES = new HashMap<>();
private static final Map<List<DayTimeResolution>, DayTimeResolution>
DAY_TIME_BOUNDARIES_TO_RES = new HashMap<>();
static {
addDayTimeMapping(DAY, DAY);
addDayTimeMapping(DAY_TO_HOUR, DAY, HOUR);
addDayTimeMapping(DAY_TO_MINUTE, DAY, MINUTE);
addDayTimeMapping(DAY_TO_SECOND, DAY, SECOND);
addDayTimeMapping(HOUR, HOUR);
addDayTimeMapping(HOUR_TO_MINUTE, HOUR, MINUTE);
addDayTimeMapping(HOUR_TO_SECOND, HOUR, SECOND);
addDayTimeMapping(MINUTE, MINUTE);
addDayTimeMapping(MINUTE_TO_SECOND, MINUTE, SECOND);
addDayTimeMapping(SECOND, SECOND);
}
private static void addYearMonthMapping(
YearMonthResolution to, YearMonthResolution... boundaries) {
final List<YearMonthResolution> boundariesList = Arrays.asList(boundaries);
YEAR_MONTH_RES_TO_BOUNDARIES.put(to, boundariesList);
YEAR_MONTH_BOUNDARIES_TO_RES.put(boundariesList, to);
}
private static void addDayTimeMapping(DayTimeResolution to, DayTimeResolution... boundaries) {
final List<DayTimeResolution> boundariesList = Arrays.asList(boundaries);
DAY_TIME_RES_TO_BOUNDARIES.put(to, boundariesList);
DAY_TIME_BOUNDARIES_TO_RES.put(boundariesList, to);
}
/**
* Returns the most common, more general {@link LogicalType} for a given set of types. If such a
* type exists, all given types can be casted to this more general type.
*
* <p>For example: {@code [INT, BIGINT, DECIMAL(2, 2)]} would lead to {@code DECIMAL(21, 2)}.
*
* <p>This | LogicalTypeMerging |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/bean/override/mockito/MockitoSpyBeanByNameLookupTestMethodScopedExtensionContextIntegrationTests.java | {
"start": 1769,
"end": 2221
} | class ____ {
@MockitoSpyBean("field1")
ExampleService field;
@Test
void fieldHasOverride(ApplicationContext ctx) {
assertThat(ctx.getBean("field1"))
.isInstanceOf(ExampleService.class)
.satisfies(MockitoAssertions::assertIsSpy)
.isSameAs(field);
assertThat(field.greeting()).isEqualTo("bean1");
}
@Nested
@DisplayName("With @MockitoSpyBean in enclosing | MockitoSpyBeanByNameLookupTestMethodScopedExtensionContextIntegrationTests |
java | google__auto | factory/src/it/functional/src/main/java/com/google/auto/factory/QualifiedDependencyImpl.java | {
"start": 660,
"end": 757
} | class ____ implements Dependency {
@Inject
QualifiedDependencyImpl() {}
}
| QualifiedDependencyImpl |
java | apache__camel | components/camel-jcr/src/test/java/org/apache/camel/component/jcr/JcrRouteDifferentWorkspaceTestSupport.java | {
"start": 1389,
"end": 2859
} | class ____ extends CamelTestSupport {
protected static final String CONFIG_FILE = "target/test-classes/repository-simple-security.xml";
protected static final String REPO_PATH = "target/repository-simple-security";
protected static final String CUSTOM_WORKSPACE_NAME = "testWorkspace";
private Repository repository;
@BeforeEach
public void cleanupDirectory() {
deleteDirectory(REPO_PATH);
}
@BeforeEach
protected void setupBeans() throws Exception {
Session session = getRepository().login(new SimpleCredentials("user", "pass".toCharArray()));
Workspace workspace = session.getWorkspace();
workspace.createWorkspace(CUSTOM_WORKSPACE_NAME);
session.save();
session.logout();
}
protected Repository getRepository() {
return repository;
}
protected Session openSession(String workspaceName) throws RepositoryException {
return getRepository().login(new SimpleCredentials("user", "pass".toCharArray()), workspaceName);
}
@Override
protected void bindToRegistry(Registry registry) throws Exception {
File config = new File(CONFIG_FILE);
if (!config.exists()) {
throw new FileNotFoundException("Missing config file: " + config.getPath());
}
repository = new TransientRepository(CONFIG_FILE, REPO_PATH);
registry.bind("repository", repository);
}
}
| JcrRouteDifferentWorkspaceTestSupport |
java | spring-projects__spring-security | oauth2/oauth2-authorization-server/src/main/java/org/springframework/security/oauth2/server/authorization/authentication/JwtClientAssertionDecoderFactory.java | {
"start": 3396,
"end": 10038
} | class ____ implements JwtDecoderFactory<RegisteredClient> {
/**
* The default {@code OAuth2TokenValidator<Jwt>} factory that validates the
* {@link JwtClaimNames#ISS iss}, {@link JwtClaimNames#SUB sub},
* {@link JwtClaimNames#AUD aud}, {@link JwtClaimNames#EXP exp} and
* {@link JwtClaimNames#NBF nbf} claims of the {@link Jwt} for the specified
* {@link RegisteredClient}.
*/
public static final Function<RegisteredClient, OAuth2TokenValidator<Jwt>> DEFAULT_JWT_VALIDATOR_FACTORY = defaultJwtValidatorFactory();
private static final String JWT_CLIENT_AUTHENTICATION_ERROR_URI = "https://datatracker.ietf.org/doc/html/rfc7523#section-3";
private static final Map<JwsAlgorithm, String> JCA_ALGORITHM_MAPPINGS;
static {
Map<JwsAlgorithm, String> mappings = new HashMap<>();
mappings.put(MacAlgorithm.HS256, "HmacSHA256");
mappings.put(MacAlgorithm.HS384, "HmacSHA384");
mappings.put(MacAlgorithm.HS512, "HmacSHA512");
JCA_ALGORITHM_MAPPINGS = Collections.unmodifiableMap(mappings);
}
private static final RestTemplate restTemplate = new RestTemplate();
static {
SimpleClientHttpRequestFactory requestFactory = new SimpleClientHttpRequestFactory();
requestFactory.setConnectTimeout(15_000);
requestFactory.setReadTimeout(15_000);
restTemplate.setRequestFactory(requestFactory);
}
private final Map<String, JwtDecoder> jwtDecoders = new ConcurrentHashMap<>();
private Function<RegisteredClient, OAuth2TokenValidator<Jwt>> jwtValidatorFactory = DEFAULT_JWT_VALIDATOR_FACTORY;
@Override
public JwtDecoder createDecoder(RegisteredClient registeredClient) {
Assert.notNull(registeredClient, "registeredClient cannot be null");
return this.jwtDecoders.computeIfAbsent(registeredClient.getId(), (key) -> {
NimbusJwtDecoder jwtDecoder = buildDecoder(registeredClient);
jwtDecoder.setJwtValidator(this.jwtValidatorFactory.apply(registeredClient));
return jwtDecoder;
});
}
/**
* Sets the factory that provides an {@link OAuth2TokenValidator} for the specified
* {@link RegisteredClient} and is used by the {@link JwtDecoder}. The default
* {@code OAuth2TokenValidator<Jwt>} factory is
* {@link #DEFAULT_JWT_VALIDATOR_FACTORY}.
* @param jwtValidatorFactory the factory that provides an
* {@link OAuth2TokenValidator} for the specified {@link RegisteredClient}
*/
public void setJwtValidatorFactory(Function<RegisteredClient, OAuth2TokenValidator<Jwt>> jwtValidatorFactory) {
Assert.notNull(jwtValidatorFactory, "jwtValidatorFactory cannot be null");
this.jwtValidatorFactory = jwtValidatorFactory;
}
private static NimbusJwtDecoder buildDecoder(RegisteredClient registeredClient) {
JwsAlgorithm jwsAlgorithm = registeredClient.getClientSettings()
.getTokenEndpointAuthenticationSigningAlgorithm();
if (jwsAlgorithm instanceof SignatureAlgorithm) {
String jwkSetUrl = registeredClient.getClientSettings().getJwkSetUrl();
if (!StringUtils.hasText(jwkSetUrl)) {
OAuth2Error oauth2Error = new OAuth2Error(OAuth2ErrorCodes.INVALID_CLIENT,
"Failed to find a Signature Verifier for Client: '" + registeredClient.getId()
+ "'. Check to ensure you have configured the JWK Set URL.",
JWT_CLIENT_AUTHENTICATION_ERROR_URI);
throw new OAuth2AuthenticationException(oauth2Error);
}
return NimbusJwtDecoder.withJwkSetUri(jwkSetUrl)
.jwsAlgorithm((SignatureAlgorithm) jwsAlgorithm)
.restOperations(restTemplate)
.build();
}
if (jwsAlgorithm instanceof MacAlgorithm) {
String clientSecret = registeredClient.getClientSecret();
if (!StringUtils.hasText(clientSecret)) {
OAuth2Error oauth2Error = new OAuth2Error(OAuth2ErrorCodes.INVALID_CLIENT,
"Failed to find a Signature Verifier for Client: '" + registeredClient.getId()
+ "'. Check to ensure you have configured the client secret.",
JWT_CLIENT_AUTHENTICATION_ERROR_URI);
throw new OAuth2AuthenticationException(oauth2Error);
}
SecretKeySpec secretKeySpec = new SecretKeySpec(clientSecret.getBytes(StandardCharsets.UTF_8),
JCA_ALGORITHM_MAPPINGS.get(jwsAlgorithm));
return NimbusJwtDecoder.withSecretKey(secretKeySpec).macAlgorithm((MacAlgorithm) jwsAlgorithm).build();
}
OAuth2Error oauth2Error = new OAuth2Error(OAuth2ErrorCodes.INVALID_CLIENT,
"Failed to find a Signature Verifier for Client: '" + registeredClient.getId()
+ "'. Check to ensure you have configured a valid JWS Algorithm: '" + jwsAlgorithm + "'.",
JWT_CLIENT_AUTHENTICATION_ERROR_URI);
throw new OAuth2AuthenticationException(oauth2Error);
}
private static Function<RegisteredClient, OAuth2TokenValidator<Jwt>> defaultJwtValidatorFactory() {
return (registeredClient) -> {
String clientId = registeredClient.getClientId();
return new DelegatingOAuth2TokenValidator<>(new JwtClaimValidator<>(JwtClaimNames.ISS, clientId::equals),
new JwtClaimValidator<>(JwtClaimNames.SUB, clientId::equals),
new JwtClaimValidator<>(JwtClaimNames.AUD, containsAudience()),
new JwtClaimValidator<>(JwtClaimNames.EXP, Objects::nonNull), new JwtTimestampValidator());
};
}
private static Predicate<List<String>> containsAudience() {
return (audienceClaim) -> {
if (CollectionUtils.isEmpty(audienceClaim)) {
return false;
}
List<String> audienceList = getAudience();
for (String audience : audienceClaim) {
if (audienceList.contains(audience)) {
return true;
}
}
return false;
};
}
private static List<String> getAudience() {
AuthorizationServerContext authorizationServerContext = AuthorizationServerContextHolder.getContext();
if (!StringUtils.hasText(authorizationServerContext.getIssuer())) {
return Collections.emptyList();
}
AuthorizationServerSettings authorizationServerSettings = authorizationServerContext
.getAuthorizationServerSettings();
List<String> audience = new ArrayList<>();
audience.add(authorizationServerContext.getIssuer());
audience.add(asUrl(authorizationServerContext.getIssuer(), authorizationServerSettings.getTokenEndpoint()));
audience.add(asUrl(authorizationServerContext.getIssuer(),
authorizationServerSettings.getTokenIntrospectionEndpoint()));
audience.add(asUrl(authorizationServerContext.getIssuer(),
authorizationServerSettings.getTokenRevocationEndpoint()));
audience.add(asUrl(authorizationServerContext.getIssuer(),
authorizationServerSettings.getPushedAuthorizationRequestEndpoint()));
return audience;
}
private static String asUrl(String issuer, String endpoint) {
return UriComponentsBuilder.fromUriString(issuer).path(endpoint).build().toUriString();
}
}
| JwtClientAssertionDecoderFactory |
java | apache__camel | components/camel-grpc/src/generated/java/org/apache/camel/component/grpc/GrpcComponentConfigurer.java | {
"start": 731,
"end": 2646
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
GrpcComponent target = (GrpcComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": target.setAutowiredEnabled(property(camelContext, boolean.class, value)); return true;
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return boolean.class;
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
GrpcComponent target = (GrpcComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return target.isAutowiredEnabled();
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
default: return null;
}
}
}
| GrpcComponentConfigurer |
java | apache__camel | test-infra/camel-test-infra-docling/src/main/java/org/apache/camel/test/infra/docling/common/DoclingProperties.java | {
"start": 870,
"end": 1086
} | class ____ {
public static final String DOCLING_SERVER_URL = "docling.server.url";
public static final String DOCLING_CONTAINER = "docling.container";
private DoclingProperties() {
}
}
| DoclingProperties |
java | elastic__elasticsearch | x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportBulkPutRolesAction.java | {
"start": 965,
"end": 1677
} | class ____ extends TransportAction<BulkPutRolesRequest, BulkRolesResponse> {
private final NativeRolesStore rolesStore;
@Inject
public TransportBulkPutRolesAction(ActionFilters actionFilters, NativeRolesStore rolesStore, TransportService transportService) {
super(ActionTypes.BULK_PUT_ROLES.name(), actionFilters, transportService.getTaskManager(), EsExecutors.DIRECT_EXECUTOR_SERVICE);
this.rolesStore = rolesStore;
}
@Override
protected void doExecute(Task task, final BulkPutRolesRequest request, final ActionListener<BulkRolesResponse> listener) {
rolesStore.putRoles(request.getRefreshPolicy(), request.getRoles(), listener);
}
}
| TransportBulkPutRolesAction |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/profile/SearchProfileShardResult.java | {
"start": 1176,
"end": 3767
} | class ____ implements Writeable, ToXContentFragment {
private final SearchProfileQueryPhaseResult queryPhase;
private final ProfileResult fetchPhase;
public SearchProfileShardResult(SearchProfileQueryPhaseResult queryPhase, @Nullable ProfileResult fetch) {
this.queryPhase = queryPhase;
this.fetchPhase = fetch;
}
public SearchProfileShardResult(StreamInput in) throws IOException {
queryPhase = new SearchProfileQueryPhaseResult(in);
fetchPhase = in.readOptionalWriteable(ProfileResult::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
queryPhase.writeTo(out);
out.writeOptionalWriteable(fetchPhase);
}
public SearchProfileDfsPhaseResult getSearchProfileDfsPhaseResult() {
return queryPhase.getSearchProfileDfsPhaseResult();
}
public SearchProfileQueryPhaseResult getQueryPhase() {
return queryPhase;
}
public ProfileResult getFetchPhase() {
return fetchPhase;
}
public List<QueryProfileShardResult> getQueryProfileResults() {
return queryPhase.getQueryProfileResults();
}
public AggregationProfileShardResult getAggregationProfileResults() {
return queryPhase.getAggregationProfileResults();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (getSearchProfileDfsPhaseResult() != null) {
builder.field("dfs");
getSearchProfileDfsPhaseResult().toXContent(builder, params);
}
builder.startArray("searches");
for (QueryProfileShardResult result : queryPhase.getQueryProfileResults()) {
result.toXContent(builder, params);
}
builder.endArray();
queryPhase.getAggregationProfileResults().toXContent(builder, params);
if (fetchPhase != null) {
builder.field("fetch");
fetchPhase.toXContent(builder, params);
}
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
SearchProfileShardResult that = (SearchProfileShardResult) o;
return Objects.equals(queryPhase, that.queryPhase) && Objects.equals(fetchPhase, that.fetchPhase);
}
@Override
public int hashCode() {
return Objects.hash(queryPhase, fetchPhase);
}
@Override
public String toString() {
return Strings.toString(this);
}
}
| SearchProfileShardResult |
java | junit-team__junit5 | junit-platform-engine/src/main/java/org/junit/platform/engine/UniqueId.java | {
"start": 1110,
"end": 9572
} | class ____ implements Cloneable, Serializable {
@Serial
private static final long serialVersionUID = 1L;
private static final String ENGINE_SEGMENT_TYPE = "engine";
/**
* Parse a {@code UniqueId} from the supplied string representation using the
* default format.
*
* @param uniqueId the string representation to parse; never {@code null} or blank
* @return a properly constructed {@code UniqueId}
* @throws JUnitException if the string cannot be parsed
*/
public static UniqueId parse(String uniqueId) throws JUnitException {
Preconditions.notBlank(uniqueId, "Unique ID string must not be null or blank");
return UniqueIdFormat.getDefault().parse(uniqueId);
}
/**
* Create an engine's unique ID from its {@code engineId} using the default
* format.
*
* <p>The engine ID will be stored in a {@link Segment} with
* {@link Segment#getType type} {@code "engine"}.
*
* @param engineId the engine ID; never {@code null} or blank
* @see #root(String, String)
*/
public static UniqueId forEngine(String engineId) {
Preconditions.notBlank(engineId, "engineId must not be null or blank");
return root(ENGINE_SEGMENT_TYPE, engineId);
}
/**
* Create a root unique ID from the supplied {@code segmentType} and
* {@code value} using the default format.
*
* @param segmentType the segment type; never {@code null} or blank
* @param value the value; never {@code null} or blank
* @see #forEngine(String)
*/
public static UniqueId root(String segmentType, String value) {
return new UniqueId(UniqueIdFormat.getDefault(), new Segment(segmentType, value));
}
private final UniqueIdFormat uniqueIdFormat;
@SuppressWarnings({ "serial", "RedundantSuppression" }) // always used with serializable implementation (List.copyOf())
private final List<Segment> segments;
// lazily computed
private transient int hashCode;
// lazily computed
private transient @Nullable SoftReference<String> toString;
private UniqueId(UniqueIdFormat uniqueIdFormat, Segment segment) {
this(uniqueIdFormat, List.of(segment));
}
/**
* Initialize a {@code UniqueId} instance.
*
* @implNote A defensive copy of the segment list is <b>not</b> created by
* this implementation. All callers should immediately drop the reference
* to the list instance that they pass into this constructor.
*/
UniqueId(UniqueIdFormat uniqueIdFormat, List<Segment> segments) {
this.uniqueIdFormat = uniqueIdFormat;
this.segments = List.copyOf(segments);
}
Optional<Segment> getRoot() {
return this.segments.isEmpty() ? Optional.empty() : Optional.of(this.segments.get(0));
}
/**
* Get the engine ID stored in this {@code UniqueId}, if available.
*
* @see #forEngine(String)
*/
public Optional<String> getEngineId() {
return getRoot().filter(segment -> ENGINE_SEGMENT_TYPE.equals(segment.getType())).map(Segment::getValue);
}
/**
* Get the immutable list of {@linkplain Segment segments} that make up this
* {@code UniqueId}.
*/
public List<Segment> getSegments() {
return this.segments;
}
/**
* Construct a new {@code UniqueId} by appending a new {@link Segment}, based
* on the supplied {@code segmentType} and {@code value}, to the end of this
* {@code UniqueId}.
*
* <p>This {@code UniqueId} will not be modified.
*
* <p>Neither the {@code segmentType} nor the {@code value} may contain any
* of the special characters used for constructing the string representation
* of this {@code UniqueId}.
*
* @param segmentType the type of the segment; never {@code null} or blank
* @param value the value of the segment; never {@code null} or blank
*/
public UniqueId append(String segmentType, String value) {
return append(new Segment(segmentType, value));
}
/**
* Construct a new {@code UniqueId} by appending a new {@link Segment} to
* the end of this {@code UniqueId}.
*
* <p>This {@code UniqueId} will not be modified.
*
* @param segment the segment to be appended; never {@code null}
*
* @since 1.1
*/
@API(status = STABLE, since = "1.1")
public UniqueId append(Segment segment) {
Preconditions.notNull(segment, "segment must not be null");
List<Segment> baseSegments = new ArrayList<>(this.segments.size() + 1);
baseSegments.addAll(this.segments);
baseSegments.add(segment);
return new UniqueId(this.uniqueIdFormat, baseSegments);
}
/**
* Construct a new {@code UniqueId} by appending a new {@link Segment}, based
* on the supplied {@code engineId}, to the end of this {@code UniqueId}.
*
* <p>This {@code UniqueId} will not be modified.
*
* <p>The engine ID will be stored in a {@link Segment} with
* {@link Segment#getType type} {@value #ENGINE_SEGMENT_TYPE}.
*
* @param engineId the engine ID; never {@code null} or blank
*
* @since 1.8
*/
@API(status = STABLE, since = "1.10")
public UniqueId appendEngine(String engineId) {
return append(new Segment(ENGINE_SEGMENT_TYPE, engineId));
}
/**
* Determine if the supplied {@code UniqueId} is a prefix for this
* {@code UniqueId}.
*
* @param potentialPrefix the {@code UniqueId} to be checked; never {@code null}
*
* @since 1.1
*/
@API(status = STABLE, since = "1.1")
public boolean hasPrefix(UniqueId potentialPrefix) {
Preconditions.notNull(potentialPrefix, "potentialPrefix must not be null");
int size = this.segments.size();
int prefixSize = potentialPrefix.segments.size();
return size >= prefixSize && this.segments.subList(0, prefixSize).equals(potentialPrefix.segments);
}
/**
* Construct a new {@code UniqueId} and removing the last {@link Segment} of
* this {@code UniqueId}.
*
* <p>This {@code UniqueId} will not be modified.
*
* @return a new {@code UniqueId}; never {@code null}
* @throws org.junit.platform.commons.PreconditionViolationException
* if this {@code UniqueId} contains a single segment
* @since 1.5
*/
@API(status = STABLE, since = "1.5")
public UniqueId removeLastSegment() {
Preconditions.condition(this.segments.size() > 1, "Cannot remove last remaining segment");
return new UniqueId(uniqueIdFormat, List.copyOf(this.segments.subList(0, this.segments.size() - 1)));
}
/**
* Get the last {@link Segment} of this {@code UniqueId}.
*
* @return the last {@code Segment}; never {@code null}
* @since 1.5
*/
@API(status = STABLE, since = "1.5")
public Segment getLastSegment() {
return this.segments.get(this.segments.size() - 1);
}
@Override
protected Object clone() throws CloneNotSupportedException {
return super.clone();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
UniqueId that = (UniqueId) o;
return this.segments.equals(that.segments);
}
@Override
public int hashCode() {
int value = this.hashCode;
if (value == 0) {
value = this.segments.hashCode();
if (value == 0) {
// handle the edge case of the computed hashCode being 0
value = 1;
}
// this is a benign race like String#hash
// we potentially read and write values from multiple threads
// without a happens-before relationship
// however the JMM guarantees us that we only ever see values
// that were valid at one point, either 0 or the hash code
// so we might end up not seeing a value that a different thread
// has computed or multiple threads writing the same value
this.hashCode = value;
}
return value;
}
/**
* Generate the unique, formatted string representation of this {@code UniqueId}
* using the configured {@link UniqueIdFormat}.
*/
@Override
public String toString() {
SoftReference<String> s = this.toString;
String value = s == null ? null : s.get();
if (value == null) {
value = this.uniqueIdFormat.format(this);
// this is a benign race like String#hash
// we potentially read and write values from multiple threads
// without a happens-before relationship
// however the JMM guarantees us that we only ever see values
// that were valid at one point, either null or the toString value
// so we might end up not seeing a value that a different thread
// has computed or multiple threads writing the same value
this.toString = new SoftReference<>(value);
}
return value;
}
/**
* A segment of a {@link UniqueId} comprises a <em>type</em> and a
* <em>value</em>.
*/
@API(status = STABLE, since = "1.0")
public static final | UniqueId |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/persister/entity/AbstractEntityPersister.java | {
"start": 163578,
"end": 164235
} | class ____ implements CacheEntryHelper {
private final EntityPersister persister;
private final StructuredCacheEntry structure;
private StructuredCacheEntryHelper(EntityPersister persister) {
this.persister = persister;
this.structure = new StructuredCacheEntry( persister );
}
@Override
public CacheEntryStructure getCacheEntryStructure() {
return structure;
}
@Override
public CacheEntry buildCacheEntry(Object entity, Object[] state, Object version, SharedSessionContractImplementor session) {
return new StandardCacheEntryImpl( state, persister, version, session, entity );
}
}
private static | StructuredCacheEntryHelper |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/SerializeWriterTest.java | {
"start": 154,
"end": 1183
} | class ____ extends TestCase {
public void test_0() throws Exception {
SerializeWriter writer = new SerializeWriter();
writer.append('A');
writer.writeInt(156);
Assert.assertEquals("A156", writer.toString());
writer.writeLong(345);
Assert.assertEquals("A156345", writer.toString());
}
public void test_1() throws Exception {
SerializeWriter writer = new SerializeWriter();
writer.writeInt(-1);
Assert.assertEquals("-1", writer.toString());
}
public void test_4() throws Exception {
SerializeWriter writer = new SerializeWriter();
writer.writeInt(-1);
writer.write(',');
Assert.assertEquals("-1,", writer.toString());
}
public void test_5() throws Exception {
SerializeWriter writer = new SerializeWriter();
writer.writeLong(-1L);
Assert.assertEquals("-1", writer.toString());
}
public void test_6() throws Exception {
SerializeWriter writer = new SerializeWriter();
writer.writeLong(-1L);
writer.write(',');
Assert.assertEquals("-1,", writer.toString());
}
}
| SerializeWriterTest |
java | google__guava | guava-testlib/test/com/google/common/testing/NullPointerTesterTest.java | {
"start": 39065,
"end": 39611
} | class ____ extends DefaultValueChecker {
@SuppressWarnings("unused") // called by NullPointerTester
@Keep
public void checkArray(FromTo<String, Integer> f, String s) {
calledWith(f, s);
}
void check() {
runTester();
FromTo<?, ?> defaultFunction = (FromTo<?, ?>) getDefaultParameterValue(0);
assertEquals(0, defaultFunction.apply(null));
}
}
public void testGenericInterfaceDefaultValue() {
new GenericInterfaceDefaultValueChecker().check();
}
private | GenericInterfaceDefaultValueChecker |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/collectionelement/EmbeddableElementCollectionMemberOfTest.java | {
"start": 2164,
"end": 2685
} | class ____ {
public String street;
public int number;
public City city;
public String getStreet() {
return street;
}
public void setStreet(String street) {
this.street = street;
}
@Column(name = "house_number")
public int getNumber() {
return number;
}
public void setNumber(int number) {
this.number = number;
}
@ManyToOne
public City getCity() {
return city;
}
public void setCity(City city) {
this.city = city;
}
}
@Entity(name = "City")
public static | Address |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/format/support/FormattingConversionServiceTests.java | {
"start": 7183,
"end": 7555
} | class ____ implements ConverterFactory<String, Number> {
@Override
@SuppressWarnings("unchecked")
public <T extends Number> Converter<String, T> getConverter(Class<T> targetType) {
if (Integer.class == targetType) {
return (Converter<String, T>) new IntegerConverter();
}
else {
throw new IllegalStateException();
}
}
}
}
| IntegerConverterFactory |
java | elastic__elasticsearch | modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/rest/RestGetDataStreamOptionsAction.java | {
"start": 1328,
"end": 2698
} | class ____ extends BaseRestHandler {
private static final Set<String> CAPABILITIES = Set.of(RestGetDataStreamsAction.FAILURES_LIFECYCLE_API_CAPABILITY);
@Override
public String getName() {
return "get_data_stream_options_action";
}
@Override
public List<Route> routes() {
return List.of(new Route(GET, "/_data_stream/{name}/_options"));
}
@Override
protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) {
GetDataStreamOptionsAction.Request getDataStreamOptionsRequest = new GetDataStreamOptionsAction.Request(
RestUtils.getMasterNodeTimeout(request),
Strings.splitStringByCommaToArray(request.param("name"))
);
getDataStreamOptionsRequest.indicesOptions(IndicesOptions.fromRequest(request, getDataStreamOptionsRequest.indicesOptions()));
return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).execute(
GetDataStreamOptionsAction.INSTANCE,
getDataStreamOptionsRequest,
new RestRefCountedChunkedToXContentListener<>(channel)
);
}
@Override
public boolean allowSystemIndexAccessByDefault() {
return true;
}
@Override
public Set<String> supportedCapabilities() {
return CAPABILITIES;
}
}
| RestGetDataStreamOptionsAction |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/common/util/concurrent/TaskExecutionTimeTrackingEsThreadPoolExecutorTests.java | {
"start": 20064,
"end": 21498
} | class ____ extends TimedRunnable {
private final long executionTimeTakenNanos;
private final boolean testFailedOrRejected;
public SettableTimedRunnable(long executionTimeTakenNanos, boolean failedOrRejected) {
super(() -> {});
this.executionTimeTakenNanos = executionTimeTakenNanos;
this.testFailedOrRejected = failedOrRejected;
}
@Override
public long getTotalExecutionNanos() {
return executionTimeTakenNanos;
}
@Override
public boolean getFailedOrRejected() {
return testFailedOrRejected;
}
}
/**
* This TimedRunnable override provides the following:
* <ul>
* <li> Overrides {@link TimedRunnable#getQueueTimeNanos()} so that arbitrary queue latencies can be set for the thread pool.</li>
* <li> Replaces any submitted Runnable task to the thread pool with a Runnable that only waits on a {@link CyclicBarrier}.</li>
* </ul>
* This allows dynamically manipulating the queue time with {@link #setQueuedTimeTakenNanos}, and provides a means of waiting for a task
* to start by calling {@code safeAwait(barrier)} after submitting a task.
* <p>
* Look at {@link TaskExecutionTimeTrackingEsThreadPoolExecutor#wrapRunnable} for how the ThreadPool uses this as a wrapper around all
* submitted tasks.
*/
public | SettableTimedRunnable |
java | quarkusio__quarkus | test-framework/junit5/src/main/java/io/quarkus/test/junit/MockSupport.java | {
"start": 1304,
"end": 3326
} | interface ____
List<Object> inst = contexts.peek();
if (inst == null) {
throw new IllegalStateException("No test in progress");
}
try {
if (instance instanceof MockedThroughWrapper m) {
m.setMock(mock);
inst.add(instance);
} else {
Method setMethod = instance.getClass().getDeclaredMethod("arc$setMock", Object.class);
setMethod.invoke(instance, mock);
inst.add(instance);
// Disable all observers declared on the mocked bean
mockObservers(instance, true);
}
} catch (Exception e) {
throw new RuntimeException(instance
+ " is not a normal scoped CDI bean, make sure the bean is a normal scope like @ApplicationScoped or @RequestScoped",
e);
}
}
private static <T> void mockObservers(T instance, boolean mock) throws NoSuchMethodException, SecurityException,
ClassNotFoundException, IllegalAccessException, IllegalArgumentException, InvocationTargetException {
// io.quarkus.arc.ClientProxy.arc_bean()
Method getBeanMethod;
try {
getBeanMethod = instance.getClass().getDeclaredMethod("arc_bean");
} catch (NoSuchMethodException e) {
// Not a client proxy
return;
}
Object bean = getBeanMethod.invoke(instance);
// io.quarkus.arc.InjectableBean.getIdentifier()
Method getIdMethod = bean.getClass().getDeclaredMethod("getIdentifier");
String id = getIdMethod.invoke(bean).toString();
// io.quarkus.arc.impl.ArcContainerImpl.mockObservers(String, boolean)
Method mockObserversMethod = instance.getClass().getClassLoader().loadClass("io.quarkus.arc.impl.ArcContainerImpl")
.getDeclaredMethod("mockObservers", String.class, boolean.class);
mockObserversMethod.invoke(null, id, mock);
}
}
| directly |
java | elastic__elasticsearch | x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/SparseFileTracker.java | {
"start": 23852,
"end": 24774
} | class ____ {
/**
* Range in the file corresponding to the current gap
*/
private final Range range;
private Gap(Range range) {
assert range.start < range.end : range.start + "-" + range.end;
this.range = range;
}
public long start() {
return range.start;
}
public long end() {
return range.end;
}
public void onCompletion() {
onGapSuccess(range);
}
public void onProgress(long value) {
assert assertGapRangePending(range);
range.completionListener.onProgress(value);
}
public void onFailure(Exception e) {
onGapFailure(range, e);
}
@Override
public String toString() {
return SparseFileTracker.this.toString() + ' ' + range;
}
}
private static | Gap |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/UdevUtil.java | {
"start": 2686,
"end": 2989
} | interface ____ {
Pointer udev_new();
Pointer udev_unref(Pointer udev);
Pointer udev_device_new_from_devnum(Pointer udev,
byte type,
int devnum);
Pointer udev_device_get_syspath(Pointer udev_device);
Pointer udev_device_unref(Pointer udev_device);
}
}
| LibUdevMapping |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/propertyeditors/ClassEditor.java | {
"start": 1368,
"end": 2345
} | class ____ extends PropertyEditorSupport {
private final @Nullable ClassLoader classLoader;
/**
* Create a default ClassEditor, using the thread context ClassLoader.
*/
public ClassEditor() {
this(null);
}
/**
* Create a default ClassEditor, using the given ClassLoader.
* @param classLoader the ClassLoader to use
* (or {@code null} for the thread context ClassLoader)
*/
public ClassEditor(@Nullable ClassLoader classLoader) {
this.classLoader = (classLoader != null ? classLoader : ClassUtils.getDefaultClassLoader());
}
@Override
public void setAsText(String text) throws IllegalArgumentException {
if (StringUtils.hasText(text)) {
setValue(ClassUtils.resolveClassName(text.trim(), this.classLoader));
}
else {
setValue(null);
}
}
@Override
public String getAsText() {
Class<?> clazz = (Class<?>) getValue();
if (clazz != null) {
return ClassUtils.getQualifiedName(clazz);
}
else {
return "";
}
}
}
| ClassEditor |
java | spring-projects__spring-boot | module/spring-boot-session/src/test/java/org/springframework/boot/session/autoconfigure/SessionAutoConfigurationTests.java | {
"start": 10058,
"end": 10331
} | class ____ extends SessionRepositoryConfiguration {
@Bean
DefaultCookieSerializer myCookieSerializer() {
return new DefaultCookieSerializer();
}
}
@Configuration(proxyBeanMethods = false)
@EnableSpringHttpSession
static | UserProvidedCookieSerializerConfiguration |
java | grpc__grpc-java | core/src/main/java/io/grpc/internal/MessageFramer.java | {
"start": 1452,
"end": 1684
} | class ____ implements Framer {
private static final int NO_MAX_OUTBOUND_MESSAGE_SIZE = -1;
/**
* Sink implemented by the transport layer to receive frames and forward them to their
* destination.
*/
public | MessageFramer |
java | quarkusio__quarkus | extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/templateroot/TemplateRootsBuildItemTest.java | {
"start": 343,
"end": 1586
} | class ____ {
@Test
public void testIsRoot() {
TemplateRootsBuildItem buildItem = new TemplateRootsBuildItem(Set.of("templates", "public/web"));
assertTrue(buildItem.isRoot(Path.of("/templates")));
assertTrue(buildItem.isRoot(Path.of("public/web")));
assertTrue(buildItem.isRoot(Path.of("/templates/")));
assertTrue(buildItem.isRoot(Path.of("public/web/")));
assertFalse(buildItem.isRoot(Path.of("/foo/templates")));
assertFalse(buildItem.isRoot(Path.of("/web")));
assertFalse(buildItem.isRoot(Path.of("public")));
assertFalse(buildItem.isRoot(Path.of("baz/web")));
assertFalse(buildItem.isRoot(Path.of("baz/template")));
}
@Test
public void testMaybeRoot() {
TemplateRootsBuildItem buildItem = new TemplateRootsBuildItem(Set.of("templates", "public/web"));
assertTrue(buildItem.maybeRoot(Path.of("public")));
assertTrue(buildItem.maybeRoot(Path.of("templates")));
assertTrue(buildItem.maybeRoot(Path.of(File.separatorChar + "public" + File.separatorChar)));
assertFalse(buildItem.maybeRoot(Path.of("template")));
assertFalse(buildItem.maybeRoot(Path.of("foo")));
}
}
| TemplateRootsBuildItemTest |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestPriorityRouterPolicy.java | {
"start": 1801,
"end": 1942
} | class ____ the {@link PriorityRouterPolicy}. Tests that the
* weights are correctly used for ordering the choice of sub-clusters.
*/
public | for |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/CamelException.java | {
"start": 932,
"end": 1260
} | class ____ extends Exception {
public CamelException() {
}
public CamelException(String message) {
super(message);
}
public CamelException(String message, Throwable cause) {
super(message, cause);
}
public CamelException(Throwable cause) {
super(cause);
}
}
| CamelException |
java | spring-projects__spring-framework | spring-webflux/src/test/java/org/springframework/web/reactive/result/method/annotation/RequestMappingExceptionHandlingIntegrationTests.java | {
"start": 2214,
"end": 5908
} | class ____ extends AbstractRequestMappingIntegrationTests {
@Override
protected ApplicationContext initApplicationContext() {
AnnotationConfigApplicationContext wac = new AnnotationConfigApplicationContext();
wac.register(WebConfig.class);
wac.refresh();
return wac;
}
@ParameterizedHttpServerTest
void thrownException(HttpServer httpServer) throws Exception {
startServer(httpServer);
doTest("/thrown-exception", "Recovered from error: State");
}
@ParameterizedHttpServerTest
void thrownExceptionWithCause(HttpServer httpServer) throws Exception {
startServer(httpServer);
doTest("/thrown-exception-with-cause", "Recovered from error: State");
}
@ParameterizedHttpServerTest
void thrownExceptionWithCauseToHandle(HttpServer httpServer) throws Exception {
startServer(httpServer);
doTest("/thrown-exception-with-cause-to-handle", "Recovered from error: IO");
}
@ParameterizedHttpServerTest
void errorBeforeFirstItem(HttpServer httpServer) throws Exception {
startServer(httpServer);
doTest("/mono-error", "Recovered from error: Argument");
}
private void doTest(String url, String expected) {
assertThat(performGet(url, new HttpHeaders(), String.class).getBody()).isEqualTo(expected);
}
@ParameterizedHttpServerTest // SPR-16051
void exceptionAfterSeveralItems(HttpServer httpServer) throws Exception {
startServer(httpServer);
assertThatExceptionOfType(Throwable.class)
.isThrownBy(() -> performGet("/SPR-16051", new HttpHeaders(), String.class))
.withMessageStartingWith("Error while extracting response");
}
@ParameterizedHttpServerTest // SPR-16318
void exceptionFromMethodWithProducesCondition(HttpServer httpServer) throws Exception {
startServer(httpServer);
HttpHeaders headers = new HttpHeaders();
headers.add("Accept", "text/plain, application/problem+json");
assertThatExceptionOfType(HttpStatusCodeException.class)
.isThrownBy(() -> performGet("/SPR-16318", headers, String.class))
.satisfies(ex -> {
assertThat(ex.getStatusCode()).isEqualTo(HttpStatus.INTERNAL_SERVER_ERROR);
assertThat(ex.getResponseHeaders().getContentType().toString()).isEqualTo("application/problem+json");
assertThat(ex.getResponseBodyAsString()).isEqualTo("{\"reason\":\"error\"}");
});
}
@Test
void globalExceptionHandlerWithHandlerNotFound() throws Exception {
startServer(new ReactorHttpServer());
assertThatExceptionOfType(HttpStatusCodeException.class)
.isThrownBy(() -> performGet("/no-such-handler", new HttpHeaders(), String.class))
.satisfies(ex -> {
assertThat(ex.getStatusCode()).isEqualTo(HttpStatus.NOT_FOUND);
assertThat(ex.getResponseBodyAsString()).isEqualTo("{" +
"\"instance\":\"/no-such-handler\"," +
"\"status\":404," +
"\"title\":\"Not Found\"}");
});
}
@Test
void globalExceptionHandlerWithMissingRequestParameter() throws Exception {
startServer(new ReactorHttpServer());
assertThatExceptionOfType(HttpStatusCodeException.class)
.isThrownBy(() -> performGet("/missing-request-parameter", new HttpHeaders(), String.class))
.satisfies(ex -> {
assertThat(ex.getStatusCode()).isEqualTo(HttpStatus.BAD_REQUEST);
assertThat(ex.getResponseBodyAsString()).isEqualTo("{" +
"\"detail\":\"Required query parameter 'q' is not present.\"," +
"\"instance\":\"/missing-request-parameter\"," +
"\"status\":400," +
"\"title\":\"Bad Request\"}");
});
}
@Configuration
@EnableWebFlux
@ComponentScan(resourcePattern = "**/RequestMappingExceptionHandlingIntegrationTests$*.class")
@SuppressWarnings({"unused", "WeakerAccess"})
static | RequestMappingExceptionHandlingIntegrationTests |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/ResourceElementResolverFieldTests.java | {
"start": 1445,
"end": 5114
} | class ____ {
private final DefaultListableBeanFactory beanFactory = new DefaultListableBeanFactory();
@Test
void resolveWhenFieldIsMissingThrowsException() {
RegisteredBean registeredBean = registerTestBean(this.beanFactory);
assertThatIllegalArgumentException()
.isThrownBy(() -> ResourceElementResolver.forField("missing").resolve(registeredBean))
.withMessage("No field 'missing' found on " + TestBean.class.getName());
}
@Test
void resolveReturnsValue() {
this.beanFactory.registerSingleton("one", "1");
this.beanFactory.registerSingleton("two", "2");
RegisteredBean registeredBean = registerTestBean(this.beanFactory);
Object resolved = ResourceElementResolver.forField("one")
.resolve(registeredBean);
assertThat(resolved).isEqualTo("1");
}
@Test
void resolveWhenResourceNameAndMatchReturnsValue() {
this.beanFactory.registerSingleton("one", "1");
this.beanFactory.registerSingleton("two", "2");
RegisteredBean registeredBean = registerTestBean(this.beanFactory);
Object resolved = ResourceElementResolver.forField("test", "two").resolve(registeredBean);
assertThat(resolved).isEqualTo("2");
}
@Test
void resolveWheNoMatchFallbackOnType() {
this.beanFactory.registerSingleton("two", "2");
RegisteredBean registeredBean = registerTestBean(this.beanFactory);
Object resolved = ResourceElementResolver.forField("one").resolve(registeredBean);
assertThat(resolved).isEqualTo("2");
}
@Test
void resolveWhenMultipleCandidatesWithNoNameMatchThrowsException() {
this.beanFactory.registerSingleton("one", "1");
this.beanFactory.registerSingleton("two", "2");
RegisteredBean registeredBean = registerTestBean(this.beanFactory);
assertThatThrownBy(() -> ResourceElementResolver.forField("test").resolve(registeredBean))
.isInstanceOf(NoUniqueBeanDefinitionException.class)
.hasMessageContaining(String.class.getName())
.hasMessageContaining("one").hasMessageContaining("two");
}
@Test
void resolveWhenNoCandidateMatchingTypeThrowsException() {
RegisteredBean registeredBean = registerTestBean(this.beanFactory);
assertThatThrownBy(() -> ResourceElementResolver.forField("test").resolve(registeredBean))
.isInstanceOf(NoSuchBeanDefinitionException.class)
.hasMessageContaining(String.class.getName());
}
@Test
void resolveWhenInvalidMatchingTypeThrowsException() {
this.beanFactory.registerSingleton("count", "counter");
RegisteredBean registeredBean = registerTestBean(this.beanFactory);
assertThatThrownBy(() -> ResourceElementResolver.forField("count").resolve(registeredBean))
.isInstanceOf(BeanNotOfRequiredTypeException.class)
.hasMessageContaining(Integer.class.getName())
.hasMessageContaining(String.class.getName());
}
@Test
void resolveAndSetSetsValue() {
this.beanFactory.registerSingleton("one", "1");
RegisteredBean registeredBean = registerTestBean(this.beanFactory);
TestBean testBean = new TestBean();
ResourceElementResolver.forField("one").resolveAndSet(registeredBean, testBean);
assertThat(testBean.one).isEqualTo("1");
}
@Test
void resolveRegistersDependantBeans() {
this.beanFactory.registerSingleton("one", "1");
RegisteredBean registeredBean = registerTestBean(this.beanFactory);
ResourceElementResolver.forField("one").resolve(registeredBean);
assertThat(this.beanFactory.getDependentBeans("one")).containsExactly("testBean");
}
private RegisteredBean registerTestBean(DefaultListableBeanFactory beanFactory) {
beanFactory.registerBeanDefinition("testBean", new RootBeanDefinition(TestBean.class));
return RegisteredBean.of(beanFactory, "testBean");
}
static | ResourceElementResolverFieldTests |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/IgnoredPureGetter.java | {
"start": 5973,
"end": 6069
} | enum ____ {
AUTO_VALUE,
AUTO_VALUE_BUILDER,
AUTO_BUILDER,
PROTO
}
}
| PureGetterKind |
java | apache__flink | flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/FileSourceSplit.java | {
"start": 2220,
"end": 10573
} | class ____ implements SourceSplit, Serializable {
private static final long serialVersionUID = 1L;
private static final String[] NO_HOSTS = StringUtils.EMPTY_STRING_ARRAY;
/** The unique ID of the split. Unique within the scope of this source. */
private final String id;
/** The path of the file referenced by this split. */
private final Path filePath;
/** The position of the first byte in the file to process. */
private final long offset;
/** The number of bytes in the file to process. */
private final long length;
/** The modification time of the file, from {@link FileStatus#getModificationTime()}. */
private final long fileModificationTime;
/** The file size in bytes, from {@link FileStatus#getLen()}. */
private final long fileSize;
/**
* The names of the hosts storing this range of the file. Empty, if no host information is
* available.
*/
private final String[] hostnames;
/** The precise reader position in the split, to resume from. */
@Nullable private final CheckpointedPosition readerPosition;
/**
* The splits are frequently serialized into checkpoints. Caching the byte representation makes
* repeated serialization cheap. This field is used by {@link FileSourceSplitSerializer}.
*/
@Nullable transient byte[] serializedFormCache;
// --------------------------------------------------------------------------------------------
/**
* Constructs a split with host information.
*
* @param id The unique ID of this source split.
* @param filePath The path to the file.
* @param offset The start (inclusive) of the split's rage in the file.
* @param length The number of bytes in the split (starting from the offset)
* @param fileModificationTime The modification time of the file
* @param fileSize The size of the full file
*/
public FileSourceSplit(
String id,
Path filePath,
long offset,
long length,
long fileModificationTime,
long fileSize) {
this(id, filePath, offset, length, fileModificationTime, fileSize, NO_HOSTS);
}
/**
* Constructs a split with host information.
*
* @param filePath The path to the file.
* @param offset The start (inclusive) of the split's rage in the file.
* @param length The number of bytes in the split (starting from the offset)
* @param fileModificationTime The modification time of the file
* @param fileSize The size of the full file
* @param hostnames The hostnames of the nodes storing the split's file range.
*/
public FileSourceSplit(
String id,
Path filePath,
long offset,
long length,
long fileModificationTime,
long fileSize,
String... hostnames) {
this(id, filePath, offset, length, fileModificationTime, fileSize, hostnames, null, null);
}
/**
* Constructs a split with host information.
*
* @param filePath The path to the file.
* @param offset The start (inclusive) of the split's rage in the file.
* @param length The number of bytes in the split (starting from the offset)
* @param fileModificationTime The modification time of the file
* @param fileSize The size of the full file
* @param hostnames The hostnames of the nodes storing the split's file range.
*/
public FileSourceSplit(
String id,
Path filePath,
long offset,
long length,
long fileModificationTime,
long fileSize,
String[] hostnames,
@Nullable CheckpointedPosition readerPosition) {
this(
id,
filePath,
offset,
length,
fileModificationTime,
fileSize,
hostnames,
readerPosition,
null);
}
/**
* Package private constructor, used by the serializers to directly cache the serialized form.
*/
FileSourceSplit(
String id,
Path filePath,
long offset,
long length,
long fileModificationTime,
long fileSize,
String[] hostnames,
@Nullable CheckpointedPosition readerPosition,
@Nullable byte[] serializedForm) {
this.fileModificationTime = fileModificationTime;
this.fileSize = fileSize;
checkArgument(offset >= 0, "offset must be >= 0");
checkArgument(length >= 0, "length must be >= 0");
checkNoNullHosts(hostnames);
this.id = checkNotNull(id);
this.filePath = checkNotNull(filePath);
this.offset = offset;
this.length = length;
this.hostnames = hostnames;
this.readerPosition = readerPosition;
this.serializedFormCache = serializedForm;
}
// ------------------------------------------------------------------------
// split properties
// ------------------------------------------------------------------------
@Override
public String splitId() {
return id;
}
/** Gets the file's path. */
public Path path() {
return filePath;
}
/**
* Returns the start of the file region referenced by this source split. The position is
* inclusive, the value indicates the first byte that is part of the split.
*/
public long offset() {
return offset;
}
/** Returns the number of bytes in the file region described by this source split. */
public long length() {
return length;
}
/** Returns the modification time of the file, from {@link FileStatus#getModificationTime()}. */
public long fileModificationTime() {
return fileModificationTime;
}
/** Returns the full file size in bytes, from {@link FileStatus#getLen()}. */
public long fileSize() {
return fileSize;
}
/**
* Gets the hostnames of the nodes storing the file range described by this split. The returned
* array is empty, if no host information is available.
*
* <p>Host information is typically only available on specific file systems, like HDFS.
*/
public String[] hostnames() {
return hostnames;
}
/**
* Gets the (checkpointed) position of the reader, if set. This value is typically absent for
* splits when assigned from the enumerator to the readers, and present when the splits are
* recovered from a checkpoint.
*/
public Optional<CheckpointedPosition> getReaderPosition() {
return Optional.ofNullable(readerPosition);
}
/**
* Creates a copy of this split where the checkpointed position is replaced by the given new
* position.
*
* <p><b>IMPORTANT:</b> Subclasses that add additional information to the split must override
* this method to return that subclass type. This contract is enforced by checks in the file
* source implementation. We did not try to enforce this contract via generics in this split
* class, because it leads to very ugly and verbose use of generics.
*/
public FileSourceSplit updateWithCheckpointedPosition(@Nullable CheckpointedPosition position) {
return new FileSourceSplit(
id, filePath, offset, length, fileModificationTime, fileSize, hostnames, position);
}
// ------------------------------------------------------------------------
// utils
// ------------------------------------------------------------------------
@Override
public String toString() {
final String hosts =
hostnames.length == 0 ? "(no host info)" : " hosts=" + Arrays.toString(hostnames);
return String.format(
"FileSourceSplit: %s [%d, %d) %s ID=%s position=%s",
filePath, offset, offset + length, hosts, id, readerPosition);
}
private static void checkNoNullHosts(String[] hosts) {
checkNotNull(hosts, "hostnames array must not be null");
for (String host : hosts) {
checkArgument(host != null, "the hostnames must not contain null entries");
}
}
}
| FileSourceSplit |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/sealed/InterceptedSealedTest.java | {
"start": 887,
"end": 1442
} | class ____ {
@RegisterExtension
public ArcTestContainer container = ArcTestContainer.builder()
.beanClasses(DependentSealed.class, MyInterceptorBinding.class, MyInterceptor.class)
.shouldFail()
.build();
@Test
public void trigger() {
Throwable error = container.getFailure();
assertNotNull(error);
assertInstanceOf(DeploymentException.class, error);
assertTrue(error.getMessage().contains("must not be sealed"));
}
@Dependent
static sealed | InterceptedSealedTest |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/load/model/DirectResourceLoader.java | {
"start": 3103,
"end": 3486
} | interface ____<DataT> {
/**
* {@code resources} is expected to come from the given {@code theme}, so {@code theme} does not
* need to be used if it's not required.
*/
DataT open(@Nullable Theme theme, Resources resources, int resourceId);
void close(DataT data) throws IOException;
Class<DataT> getDataClass();
}
private static final | ResourceOpener |
java | google__dagger | javatests/dagger/internal/codegen/ComponentCreatorTest.java | {
"start": 13926,
"end": 14011
} | class ____ {",
" @Component.Builder",
" private | SimpleComponent |
java | quarkusio__quarkus | independent-projects/tools/devtools-common/src/main/java/io/quarkus/maven/utilities/SortedProperties.java | {
"start": 115,
"end": 322
} | class ____ extends Properties {
private static final long serialVersionUID = 1L;
@Override
public Set<Object> keySet() {
return new TreeSet<Object>(super.keySet());
}
}
| SortedProperties |
java | apache__rocketmq | auth/src/main/java/org/apache/rocketmq/auth/authorization/chain/AclAuthorizationHandler.java | {
"start": 2121,
"end": 7185
} | class ____ implements Handler<DefaultAuthorizationContext, CompletableFuture<Void>> {
private final AuthorizationMetadataProvider authorizationMetadataProvider;
public AclAuthorizationHandler(AuthConfig config) {
this.authorizationMetadataProvider = AuthorizationFactory.getMetadataProvider(config);
}
public AclAuthorizationHandler(AuthConfig config, Supplier<?> metadataService) {
this.authorizationMetadataProvider = AuthorizationFactory.getMetadataProvider(config, metadataService);
}
@Override
public CompletableFuture<Void> handle(DefaultAuthorizationContext context,
HandlerChain<DefaultAuthorizationContext, CompletableFuture<Void>> chain) {
if (this.authorizationMetadataProvider == null) {
throw new AuthorizationException("The authorizationMetadataProvider is not configured");
}
return this.authorizationMetadataProvider.getAcl(context.getSubject()).thenAccept(acl -> {
if (acl == null) {
throwException(context, "no matched policies.");
}
// 1. get the defined acl entries which match the request.
PolicyEntry matchedEntry = matchPolicyEntries(context, acl);
// 2. if no matched acl entries, return deny
if (matchedEntry == null) {
throwException(context, "no matched policies.");
}
// 3. judge is the entries has denied decision.
if (matchedEntry.getDecision() == Decision.DENY) {
throwException(context, "the decision is deny.");
}
});
}
private PolicyEntry matchPolicyEntries(DefaultAuthorizationContext context, Acl acl) {
List<PolicyEntry> policyEntries = new ArrayList<>();
Policy policy = acl.getPolicy(PolicyType.CUSTOM);
if (policy != null) {
List<PolicyEntry> entries = matchPolicyEntries(context, policy.getEntries());
if (CollectionUtils.isNotEmpty(entries)) {
policyEntries.addAll(entries);
}
}
if (CollectionUtils.isEmpty(policyEntries)) {
policy = acl.getPolicy(PolicyType.DEFAULT);
if (policy != null) {
List<PolicyEntry> entries = matchPolicyEntries(context, policy.getEntries());
if (CollectionUtils.isNotEmpty(entries)) {
policyEntries.addAll(entries);
}
}
}
if (CollectionUtils.isEmpty(policyEntries)) {
return null;
}
policyEntries.sort(this::comparePolicyEntries);
return policyEntries.get(0);
}
private List<PolicyEntry> matchPolicyEntries(DefaultAuthorizationContext context, List<PolicyEntry> entries) {
if (CollectionUtils.isEmpty(entries)) {
return null;
}
return entries.stream()
.filter(entry -> entry.isMatchResource(context.getResource()))
.filter(entry -> entry.isMatchAction(context.getActions()))
.filter(entry -> entry.isMatchEnvironment(Environment.of(context.getSourceIp())))
.collect(Collectors.toList());
}
private int comparePolicyEntries(PolicyEntry o1, PolicyEntry o2) {
int compare = 0;
Resource r1 = o1.getResource();
Resource r2 = o2.getResource();
if (r1.getResourceType() != r2.getResourceType()) {
if (r1.getResourceType() == ResourceType.ANY) {
compare = 1;
}
if (r2.getResourceType() == ResourceType.ANY) {
compare = -1;
}
} else if (r1.getResourcePattern() == r2.getResourcePattern()) {
if (r1.getResourcePattern() == ResourcePattern.PREFIXED) {
String n1 = r1.getResourceName();
String n2 = r2.getResourceName();
compare = -1 * Integer.compare(n1.length(), n2.length());
}
} else {
if (r1.getResourcePattern() == ResourcePattern.LITERAL) {
compare = -1;
} else if (r2.getResourcePattern() == ResourcePattern.LITERAL) {
compare = 1;
} else if (r1.getResourcePattern() == ResourcePattern.PREFIXED) {
compare = -1;
} else if (r2.getResourcePattern() == ResourcePattern.PREFIXED) {
compare = 1;
}
}
if (compare != 0) {
return compare;
}
// the decision deny has higher priority
Decision d1 = o1.getDecision();
Decision d2 = o2.getDecision();
if (d1 != d2) {
return d1 == Decision.DENY ? -1 : 1;
}
return 0;
}
private static void throwException(DefaultAuthorizationContext context, String detail) {
throw new AuthorizationException("{} has no permission to access {} from {}, " + detail,
context.getSubject().getSubjectKey(), context.getResource().getResourceKey(), context.getSourceIp());
}
}
| AclAuthorizationHandler |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/lazy/LazyProxyOnEnhancedEntityTest.java | {
"start": 2663,
"end": 3026
} | class ____ {
@Id
@GeneratedValue( strategy = GenerationType.AUTO )
Long id;
@OneToOne( fetch = FetchType.LAZY
)
Child child;
public Long getId() {
return id;
}
public Child getChild() {
return child;
}
public void setChild(Child child) {
this.child = child;
}
}
@Entity(name = "Child")
@Table( name = "CHILD" )
static | Parent |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/language/csimple/CSimpleTest.java | {
"start": 980,
"end": 2084
} | class ____ extends ContextTestSupport {
@Test
public void testCSimple() throws Exception {
getMockEndpoint("mock:high").expectedBodiesReceived("24", "20");
getMockEndpoint("mock:med").expectedBodiesReceived("9", "6");
getMockEndpoint("mock:low").expectedBodiesReceived("1", "2");
template.sendBody("direct:start", 9);
template.sendBody("direct:start", 1);
template.sendBody("direct:start", 24);
template.sendBody("direct:start", 2);
template.sendBody("direct:start", 6);
template.sendBody("direct:start", 20);
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.choice()
.when(csimple("${body} > 10")).to("mock:high")
.when(csimple("${body} > 5")).to("mock:med")
.otherwise().to("mock:low");
}
};
}
}
| CSimpleTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UnnecessaryDefaultInEnumSwitchTest.java | {
"start": 32170,
"end": 32743
} | enum ____ {
FOO,
BAR,
BAZ,
}
public static void main(String[] args) {
var type = Type.valueOf(args[0]);
switch (type) {
case FOO -> {
System.out.println("Hi foo");
}
case BAR, BAZ -> {}
default -> throw new AssertionError(type);
}
}
}
""")
.addOutputLines(
"Test.java",
"""
| Type |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/SpringBootConfigurationTests.java | {
"start": 1057,
"end": 1686
} | class ____ {
@Test
void proxyBeanMethodsIsEnabledByDefault() {
AnnotationAttributes attributes = AnnotatedElementUtils
.getMergedAnnotationAttributes(DefaultSpringBootConfiguration.class, Configuration.class);
assertThat(attributes).containsEntry("proxyBeanMethods", true);
}
@Test
void proxyBeanMethodsCanBeDisabled() {
AnnotationAttributes attributes = AnnotatedElementUtils
.getMergedAnnotationAttributes(NoBeanMethodProxyingSpringBootConfiguration.class, Configuration.class);
assertThat(attributes).containsEntry("proxyBeanMethods", false);
}
@SpringBootConfiguration
static | SpringBootConfigurationTests |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/BugCheckerTest.java | {
"start": 9387,
"end": 10501
} | class ____ extends BugChecker
implements CompilationUnitTreeMatcher {
@Override
public Description matchCompilationUnit(
CompilationUnitTree compilationUnit, VisitorState stateForCompilationUnit) {
new TreePathScanner<Void, Void>() {
@Override
public Void scan(Tree tree, Void unused) {
if (isSuppressed(tree, state())) {
return null;
}
return super.scan(tree, null);
}
@Override
public Void scan(TreePath path, Void unused) {
if (isSuppressed(path.getLeaf(), stateForCompilationUnit.withPath(path))) {
return null;
}
return super.scan(path, null);
}
@Override
public Void visitVariable(VariableTree tree, Void unused) {
state().reportMatch(describeMatch(tree));
return null;
}
private VisitorState state() {
return stateForCompilationUnit.withPath(getCurrentPath());
}
}.scan(stateForCompilationUnit.getPath(), null);
return NO_MATCH;
}
}
}
| ManuallySuppressibleCheck |
java | google__dagger | javatests/dagger/internal/codegen/ModuleValidationTest.java | {
"start": 6049,
"end": 6902
} | interface ____ {",
" Sub build();",
" }",
"}");
CompilerTests.daggerCompiler(module, subcomponent)
.compile(
subject -> {
subject.hasErrorCount(1);
subject.hasErrorContaining(
"test.Sub.Builder is a @ProductionSubcomponent.Builder. "
+ "Did you mean to use test.Sub?")
.onSource(module)
.onLine(5);
});
}
@Test
public void moduleSubcomponents_listsProductionSubcomponentFactory() {
Source module =
CompilerTests.javaSource(
"test.TestModule",
"package test;",
"",
moduleType.importStatement(),
"",
moduleType.annotationWithSubcomponent("Sub.Factory.class"),
" | Builder |
java | apache__dubbo | dubbo-common/src/main/java/org/apache/dubbo/common/convert/StringToFloatConverter.java | {
"start": 1042,
"end": 1320
} | class ____ implements StringConverter<Float> {
@Override
public Float convert(String source) {
return isNotEmpty(source) ? valueOf(source) : null;
}
@Override
public int getPriority() {
return NORMAL_PRIORITY + 4;
}
}
| StringToFloatConverter |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/where/hbm/Product.java | {
"start": 193,
"end": 1905
} | class ____ {
private int id;
private Set<Category> categoriesOneToMany = new HashSet<>();
private Set<Category> categoriesWithDescOneToMany = new HashSet<>();
private Set<Category> categoriesManyToMany = new HashSet<>();
private Set<Category> categoriesWithDescManyToMany = new HashSet<>();
private Set<Category> categoriesWithDescIdLt4ManyToMany = new HashSet<>();
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public Set<Category> getCategoriesOneToMany() {
return categoriesOneToMany;
}
public void setCategoriesOneToMany(Set<Category> categoriesOneToMany) {
this.categoriesOneToMany = categoriesOneToMany;
}
public Set<Category> getCategoriesWithDescOneToMany() {
return categoriesWithDescOneToMany;
}
public void setCategoriesWithDescOneToMany(Set<Category> categoriesWithDescOneToMany) {
this.categoriesWithDescOneToMany = categoriesWithDescOneToMany;
}
public Set<Category> getCategoriesManyToMany() {
return categoriesManyToMany;
}
public void setCategoriesManyToMany(Set<Category> categoriesManyToMany) {
this.categoriesManyToMany = categoriesManyToMany;
}
public Set<Category> getCategoriesWithDescManyToMany() {
return categoriesWithDescManyToMany;
}
public void setCategoriesWithDescManyToMany(Set<Category> categoriesWithDescManyToMany) {
this.categoriesWithDescManyToMany = categoriesWithDescManyToMany;
}
public Set<Category> getCategoriesWithDescIdLt4ManyToMany() {
return categoriesWithDescIdLt4ManyToMany;
}
public void setCategoriesWithDescIdLt4ManyToMany(Set<Category> categoriesWithDescIdLt4ManyToMany) {
this.categoriesWithDescIdLt4ManyToMany = categoriesWithDescIdLt4ManyToMany;
}
}
| Product |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/admin/ListTopicsResult.java | {
"start": 1029,
"end": 1803
} | class ____ {
final KafkaFuture<Map<String, TopicListing>> future;
ListTopicsResult(KafkaFuture<Map<String, TopicListing>> future) {
this.future = future;
}
/**
* Return a future which yields a map of topic names to TopicListing objects.
*/
public KafkaFuture<Map<String, TopicListing>> namesToListings() {
return future;
}
/**
* Return a future which yields a collection of TopicListing objects.
*/
public KafkaFuture<Collection<TopicListing>> listings() {
return future.thenApply(Map::values);
}
/**
* Return a future which yields a collection of topic names.
*/
public KafkaFuture<Set<String>> names() {
return future.thenApply(Map::keySet);
}
}
| ListTopicsResult |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/testkit/CaseInsensitiveStringComparator.java | {
"start": 689,
"end": 1073
} | class ____ implements Comparator<String> {
public static final CaseInsensitiveStringComparator INSTANCE = new CaseInsensitiveStringComparator();
@Override
public int compare(String s1, String s2) {
if (s1 == null && s2 == null) return 0;
if (s1 == null) return -1;
if (s2 == null) return 1;
return s1.compareToIgnoreCase(s2);
}
}
| CaseInsensitiveStringComparator |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/SealedTypesWithSubtypesTest.java | {
"start": 2773,
"end": 3027
} | class ____ {
public Base1125 value;
public Issue1125Wrapper() { }
public Issue1125Wrapper(Base1125 v) { value = v; }
}
@JsonTypeInfo(use=JsonTypeInfo.Id.NAME, defaultImpl=Default1125.class)
static sealed | Issue1125Wrapper |
java | micronaut-projects__micronaut-core | management/src/test/java/io/micronaut/management/health/indicator/threads/DeadlockedThreadsHealthIndicatorTest.java | {
"start": 1117,
"end": 4480
} | class ____ {
@Test
void diskSpaceHealthIndicatorViaConfiguration() {
Map<String, Object> configuration = Map.of("endpoints.health.deadlocked-threads.enabled", StringUtils.FALSE);
try (ApplicationContext context = ApplicationContext.run(configuration)) {
assertFalse(context.containsBean(DeadlockedThreadsHealthIndicator.class));
}
// enabled by default
try (ApplicationContext context = ApplicationContext.run()) {
assertTrue(context.containsBean(DeadlockedThreadsHealthIndicator.class));
}
}
@Test
void testDeadlockedThreadsHealthIndicator() {
Map<String, Object> configuration = Map.of(
"spec.name", "DeadlockedThreadsHealthIndicatorTest",
"endpoints.health.details-visible", DetailsVisibility.ANONYMOUS
);
try (EmbeddedServer server = ApplicationContext.run(EmbeddedServer.class, configuration)) {
try (HttpClient httpClient = server.getApplicationContext().createBean(HttpClient.class, server.getURL())) {
BlockingHttpClient client = httpClient.toBlocking();
await().until(() -> isDown(client));
Argument ok = Argument.of(Map.class);
Argument notOk = Argument.of(Map.class);
HttpClientResponseException ex = assertThrows(HttpClientResponseException.class,
() -> client.exchange(HttpRequest.GET("/health"), ok, notOk));
assertEquals(HttpStatus.SERVICE_UNAVAILABLE, ex.getStatus());
Optional<Map> healthStatusOptional = ex.getResponse().getBody(notOk);
assertNotNull(healthStatusOptional);
assertTrue(healthStatusOptional.isPresent());
Map healthStatus = healthStatusOptional.get();
assertNotNull(healthStatus);
assertEquals("DOWN", healthStatus.get("status"));
assertEquals("DOWN", ((Map) ((Map) healthStatus.get("details")).get("deadlockedThreads")).get("status"));
Map details = (Map)((List) ((Map) ((Map) healthStatus.get("details")).get("deadlockedThreads")).get("details")).get(0);
assertTrue(details.containsKey("threadId"));
assertTrue(details.containsKey("threadName"));
assertTrue(details.containsKey("threadState"));
assertTrue(details.containsKey("daemon"));
assertTrue(details.containsKey("priority"));
assertTrue(details.containsKey("suspended"));
assertTrue(details.containsKey("inNative"));
assertTrue(details.containsKey("lockName"));
assertTrue(details.containsKey("lockOwnerName"));
assertTrue(details.containsKey("lockOwnerId"));
assertFalse(details.containsKey("lockedSynchronizers"));
assertTrue(details.containsKey("stackTrace"));
}
}
}
private boolean isDown(BlockingHttpClient client) {
try {
client.exchange("/health");
return false;
} catch (HttpClientResponseException e) {
return true;
}
}
@Requires(property = "spec.name", value = "DeadlockedThreadsHealthIndicatorTest")
@Singleton
static | DeadlockedThreadsHealthIndicatorTest |
java | quarkusio__quarkus | extensions/panache/hibernate-orm-panache/runtime/src/main/java/io/quarkus/hibernate/orm/panache/Panache.java | {
"start": 451,
"end": 1011
} | class ____ {
/**
* Returns the default {@link EntityManager}
*
* @return {@link EntityManager}
*/
public static EntityManager getEntityManager() {
return JpaOperations.INSTANCE.getSession();
}
/**
* Returns the default {@link Session}
*
* @return {@link Session}
*/
public static Session getSession() {
return JpaOperations.INSTANCE.getSession();
}
/**
* Returns the {@link EntityManager} for the given {@link Class<?> entity}
*
* @param clazz the entity | Panache |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/heartbeat/HeartbeatMonitor.java | {
"start": 1355,
"end": 2305
} | interface ____<O> {
/**
* Gets heartbeat target.
*
* @return the heartbeat target
*/
HeartbeatTarget<O> getHeartbeatTarget();
/**
* Gets heartbeat target id.
*
* @return the heartbeat target id
*/
ResourceID getHeartbeatTargetId();
/** Report heartbeat from the monitored target. */
void reportHeartbeat();
/** Cancel this monitor. */
void cancel();
/**
* Gets the last heartbeat.
*
* @return the last heartbeat
*/
long getLastHeartbeat();
/** Reports that the heartbeat rpc could not be sent to the target. */
void reportHeartbeatRpcFailure();
/** Reports that the heartbeat rpc could be sent to the target. */
void reportHeartbeatRpcSuccess();
/**
* This factory provides an indirection way to create {@link HeartbeatMonitor}.
*
* @param <O> Type of the outgoing heartbeat payload
*/
| HeartbeatMonitor |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/bytecode/enhance/internal/bytebuddy/FieldAccessEnhancer.java | {
"start": 4382,
"end": 6062
} | class ____
final String cleanedName = name.replace( '/', '.' );
final TypePool.Resolution resolution = classPool.describe( cleanedName );
if ( !resolution.isResolved() ) {
final String msg = String.format(
"Unable to perform extended enhancement - Unable to locate [%s]",
cleanedName
);
throw new EnhancementException( msg );
}
return resolution.resolve();
}
private AnnotatedFieldDescription findField(TypeDescription declaredOwnedType, String name, String desc) {
TypeDefinition ownerType = declaredOwnedType;
ElementMatcher.Junction<NamedElement.WithDescriptor> fieldFilter = named( name ).and( hasDescriptor( desc ) );
FieldList<?> fields = ownerType.getDeclaredFields().filter( fieldFilter );
// Look in the superclasses if necessary
while ( fields.isEmpty() && ownerType.getSuperClass() != null ) {
ownerType = ownerType.getSuperClass();
fields = ownerType.getDeclaredFields().filter( fieldFilter );
}
if ( fields.size() != 1 ) {
final String msg = String.format(
"Unable to perform extended enhancement - No unique field [%s] defined by [%s]",
name,
declaredOwnedType.getName()
);
throw new EnhancementException( msg );
}
return new AnnotatedFieldDescription( enhancementContext, fields.getOnly() );
}
@Override
public boolean equals(final Object o) {
if ( this == o ) {
return true;
}
if ( o == null || FieldAccessEnhancer.class != o.getClass() ) {
return false;
}
final FieldAccessEnhancer that = (FieldAccessEnhancer) o;
return Objects.equals( managedCtClass, that.managedCtClass );
}
@Override
public int hashCode() {
return managedCtClass.hashCode();
}
}
| name |
java | square__javapoet | src/test/java/com/squareup/javapoet/MethodSpecTest.java | {
"start": 1864,
"end": 3408
} | class ____ {
@Rule public final CompilationRule compilation = new CompilationRule();
private Elements elements;
private Types types;
@Before public void setUp() {
elements = compilation.getElements();
types = compilation.getTypes();
}
private TypeElement getElement(Class<?> clazz) {
return elements.getTypeElement(clazz.getCanonicalName());
}
@Test public void nullAnnotationsAddition() {
try {
MethodSpec.methodBuilder("doSomething").addAnnotations(null);
fail();
} catch (IllegalArgumentException expected) {
assertThat(expected).hasMessageThat().isEqualTo("annotationSpecs == null");
}
}
@Test public void nullTypeVariablesAddition() {
try {
MethodSpec.methodBuilder("doSomething").addTypeVariables(null);
fail();
} catch (IllegalArgumentException expected) {
assertThat(expected).hasMessageThat().isEqualTo("typeVariables == null");
}
}
@Test public void nullParametersAddition() {
try {
MethodSpec.methodBuilder("doSomething").addParameters(null);
fail();
} catch (IllegalArgumentException expected) {
assertThat(expected).hasMessageThat().isEqualTo("parameterSpecs == null");
}
}
@Test public void nullExceptionsAddition() {
try {
MethodSpec.methodBuilder("doSomething").addExceptions(null);
fail();
} catch (IllegalArgumentException expected) {
assertThat(expected).hasMessageThat().isEqualTo("exceptions == null");
}
}
@Target(ElementType.PARAMETER)
@ | MethodSpecTest |
java | google__dagger | javatests/dagger/internal/codegen/MultibindingTest.java | {
"start": 12805,
"end": 13203
} | interface ____");
});
}
// Regression test for b/352142595.
@Test
public void testMultibindingMapWithKotlinSource() {
Source parent =
CompilerTests.kotlinSource(
"test.Parent.kt",
"package test",
"",
"import dagger.Component",
"",
"@Component(modules = [ParentModule::class])",
" | Parent |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/AucRoc.java | {
"start": 2845,
"end": 2879
} | class ____ the rest.
*/
public | versus |
java | apache__kafka | clients/src/test/java/org/apache/kafka/clients/consumer/internals/StreamsGroupHeartbeatRequestManagerTest.java | {
"start": 3961,
"end": 87617
} | class ____ {
private static final LogContext LOG_CONTEXT = new LogContext("test");
private static final long RECEIVED_HEARTBEAT_INTERVAL_MS = 1200;
private static final int DEFAULT_MAX_POLL_INTERVAL_MS = 10000;
private static final String GROUP_ID = "group-id";
private static final String MEMBER_ID = "member-id";
private static final int MEMBER_EPOCH = 1;
private static final String INSTANCE_ID = "instance-id";
private static final UUID PROCESS_ID = UUID.randomUUID();
private static final StreamsRebalanceData.HostInfo ENDPOINT = new StreamsRebalanceData.HostInfo("localhost", 8080);
private static final String SOURCE_TOPIC_1 = "sourceTopic1";
private static final String SOURCE_TOPIC_2 = "sourceTopic2";
private static final Set<String> SOURCE_TOPICS = Set.of(SOURCE_TOPIC_1, SOURCE_TOPIC_2);
private static final String REPARTITION_SINK_TOPIC_1 = "repartitionSinkTopic1";
private static final String REPARTITION_SINK_TOPIC_2 = "repartitionSinkTopic2";
private static final String REPARTITION_SINK_TOPIC_3 = "repartitionSinkTopic3";
private static final Set<String> REPARTITION_SINK_TOPICS = Set.of(
REPARTITION_SINK_TOPIC_1,
REPARTITION_SINK_TOPIC_2,
REPARTITION_SINK_TOPIC_3
);
private static final String REPARTITION_SOURCE_TOPIC_1 = "repartitionSourceTopic1";
private static final String REPARTITION_SOURCE_TOPIC_2 = "repartitionSourceTopic2";
private static final Map<String, StreamsRebalanceData.TopicInfo> REPARTITION_SOURCE_TOPICS = Map.of(
REPARTITION_SOURCE_TOPIC_1, new StreamsRebalanceData.TopicInfo(Optional.of(2), Optional.of((short) 1), Map.of("config3", "value3", "config1", "value1")),
REPARTITION_SOURCE_TOPIC_2, new StreamsRebalanceData.TopicInfo(Optional.of(3), Optional.of((short) 3), Collections.emptyMap())
);
private static final String CHANGELOG_TOPIC_1 = "changelogTopic1";
private static final String CHANGELOG_TOPIC_2 = "changelogTopic2";
private static final String CHANGELOG_TOPIC_3 = "changelogTopic3";
private static final Map<String, StreamsRebalanceData.TopicInfo> CHANGELOG_TOPICS = Map.of(
CHANGELOG_TOPIC_1, new StreamsRebalanceData.TopicInfo(Optional.empty(), Optional.of((short) 1), Map.of()),
CHANGELOG_TOPIC_2, new StreamsRebalanceData.TopicInfo(Optional.empty(), Optional.of((short) 2), Map.of()),
CHANGELOG_TOPIC_3, new StreamsRebalanceData.TopicInfo(Optional.empty(), Optional.of((short) 3), Map.of("config4", "value4", "config2", "value2"))
);
private static final Collection<Set<String>> COPARTITION_GROUP = Set.of(
Set.of(SOURCE_TOPIC_1, REPARTITION_SOURCE_TOPIC_2),
Set.of(SOURCE_TOPIC_2, REPARTITION_SOURCE_TOPIC_1)
);
private static final String SUBTOPOLOGY_NAME_1 = "subtopology1";
private static final StreamsRebalanceData.Subtopology SUBTOPOLOGY_1 = new StreamsRebalanceData.Subtopology(
SOURCE_TOPICS,
REPARTITION_SINK_TOPICS,
REPARTITION_SOURCE_TOPICS,
CHANGELOG_TOPICS,
COPARTITION_GROUP
);
private static final String SUBTOPOLOGY_NAME_2 = "subtopology2";
private static final String SOURCE_TOPIC_3 = "sourceTopic3";
private static final String CHANGELOG_TOPIC_4 = "changelogTopic4";
private static final StreamsRebalanceData.Subtopology SUBTOPOLOGY_2 = new StreamsRebalanceData.Subtopology(
Set.of(SOURCE_TOPIC_3),
Set.of(),
Map.of(),
Map.of(CHANGELOG_TOPIC_4, new StreamsRebalanceData.TopicInfo(Optional.empty(), Optional.of((short) 1), Map.of())),
Collections.emptyList()
);
private static final Map<String, StreamsRebalanceData.Subtopology> SUBTOPOLOGIES =
Map.of(
SUBTOPOLOGY_NAME_1, SUBTOPOLOGY_1,
SUBTOPOLOGY_NAME_2, SUBTOPOLOGY_2
);
private static final String CLIENT_TAG_1 = "client-tag1";
private static final String VALUE_1 = "value1";
private static final Map<String, String> CLIENT_TAGS = Map.of(CLIENT_TAG_1, VALUE_1);
private static final List<StreamsGroupHeartbeatResponseData.EndpointToPartitions> ENDPOINT_TO_PARTITIONS =
List.of(
new StreamsGroupHeartbeatResponseData.EndpointToPartitions()
.setUserEndpoint(new StreamsGroupHeartbeatResponseData.Endpoint().setHost("localhost").setPort(8080))
.setActivePartitions(List.of(
new StreamsGroupHeartbeatResponseData.TopicPartition().setTopic("topic").setPartitions(List.of(0)))
)
);
private final StreamsRebalanceData streamsRebalanceData = new StreamsRebalanceData(
PROCESS_ID,
Optional.of(ENDPOINT),
SUBTOPOLOGIES,
CLIENT_TAGS
);
private final Time time = new MockTime();
private final ConsumerConfig config = config();
@Mock
private CoordinatorRequestManager coordinatorRequestManager;
@Mock
private StreamsMembershipManager membershipManager;
@Mock
private BackgroundEventHandler backgroundEventHandler;
private final Metrics metrics = new Metrics(time);
private final Node coordinatorNode = new Node(1, "localhost", 9092);
@Test
public void testConstructWithNullCoordinatorRequestManager() {
final Exception exception = assertThrows(NullPointerException.class, () -> new StreamsGroupHeartbeatRequestManager(
new LogContext("test"),
time,
config,
null,
membershipManager,
backgroundEventHandler,
metrics,
streamsRebalanceData
));
assertEquals("Coordinator request manager cannot be null", exception.getMessage());
}
@Test
public void testConstructWithNullMembershipManager() {
final Exception exception = assertThrows(NullPointerException.class, () -> new StreamsGroupHeartbeatRequestManager(
new LogContext("test"),
time,
config,
coordinatorRequestManager,
null,
backgroundEventHandler,
metrics,
streamsRebalanceData
));
assertEquals("Streams membership manager cannot be null", exception.getMessage());
}
@Test
public void testConstructWithNullBackgroundEventHandler() {
final Exception exception = assertThrows(NullPointerException.class, () -> new StreamsGroupHeartbeatRequestManager(
new LogContext("test"),
time,
config,
coordinatorRequestManager,
membershipManager,
null,
metrics,
streamsRebalanceData
));
assertEquals("Background event handler cannot be null", exception.getMessage());
}
@Test
public void testConstructWithNullMetrics() {
final Exception exception = assertThrows(NullPointerException.class, () -> new StreamsGroupHeartbeatRequestManager(
new LogContext("test"),
time,
config,
coordinatorRequestManager,
membershipManager,
backgroundEventHandler,
null,
streamsRebalanceData
));
assertEquals("Metrics cannot be null", exception.getMessage());
}
@Test
public void testConstructWithNullStreamsRebalanceData() {
final Exception exception = assertThrows(NullPointerException.class, () -> new StreamsGroupHeartbeatRequestManager(
new LogContext("test"),
time,
config,
coordinatorRequestManager,
membershipManager,
backgroundEventHandler,
metrics,
null
));
assertEquals("Streams rebalance data cannot be null", exception.getMessage());
}
@Test
public void testNoHeartbeatIfCoordinatorUnknown() {
try (final MockedConstruction<Timer> pollTimerMockedConstruction = mockConstruction(Timer.class)) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final Timer pollTimer = pollTimerMockedConstruction.constructed().get(0);
when(coordinatorRequestManager.coordinator()).thenReturn(Optional.empty());
final NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(0, result.unsentRequests.size());
verify(membershipManager).onHeartbeatRequestSkipped();
verify(pollTimer, never()).update();
}
}
@Test
public void testNoHeartbeatIfHeartbeatSkipped() {
try (final MockedConstruction<Timer> pollTimerMockedConstruction = mockConstruction(Timer.class)) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final Timer pollTimer = pollTimerMockedConstruction.constructed().get(0);
when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(coordinatorNode));
when(membershipManager.shouldSkipHeartbeat()).thenReturn(true);
final NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(0, result.unsentRequests.size());
verify(membershipManager).onHeartbeatRequestSkipped();
verify(pollTimer, never()).update();
}
}
@Test
public void testPropagateCoordinatorFatalErrorToApplicationThread() {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
when(coordinatorRequestManager.coordinator()).thenReturn(Optional.empty());
final Throwable fatalError = new RuntimeException("KABOOM");
when(coordinatorRequestManager.getAndClearFatalError()).thenReturn(Optional.of(fatalError));
final NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(0, result.unsentRequests.size());
verify(membershipManager).onHeartbeatRequestSkipped();
verify(backgroundEventHandler).add(argThat(
errorEvent -> errorEvent instanceof ErrorEvent && ((ErrorEvent) errorEvent).error() == fatalError));
}
@ParameterizedTest
@ValueSource(booleans = {false, true})
public void testSendingHeartbeatIfMemberIsLeaving(final boolean requestInFlight) {
final long heartbeatIntervalMs = 1234;
try (
final MockedConstruction<HeartbeatRequestState> heartbeatRequestStateMockedConstruction = mockConstruction(
HeartbeatRequestState.class,
(mock, context) -> {
when(mock.canSendRequest(time.milliseconds())).thenReturn(false);
when(mock.heartbeatIntervalMs()).thenReturn(heartbeatIntervalMs);
when(mock.requestInFlight()).thenReturn(requestInFlight);
});
final MockedConstruction<Timer> pollTimerMockedConstruction = mockConstruction(Timer.class)
) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final Timer pollTimer = pollTimerMockedConstruction.constructed().get(0);
when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(coordinatorNode));
when(membershipManager.state()).thenReturn(MemberState.LEAVING);
final NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(1, result.unsentRequests.size());
assertEquals(heartbeatIntervalMs, result.timeUntilNextPollMs);
verify(pollTimer).update(time.milliseconds());
}
}
@ParameterizedTest
@EnumSource(value = MemberState.class, names = {"JOINING", "ACKNOWLEDGING"})
public void testSendingHeartbeatIfMemberIsJoiningOrAcknowledging(final MemberState memberState) {
final long heartbeatIntervalMs = 1234;
try (
final MockedConstruction<HeartbeatRequestState> heartbeatRequestStateMockedConstruction = mockConstruction(
HeartbeatRequestState.class,
(mock, context) -> {
when(mock.canSendRequest(time.milliseconds())).thenReturn(false);
when(mock.heartbeatIntervalMs()).thenReturn(heartbeatIntervalMs);
});
final MockedConstruction<Timer> pollTimerMockedConstruction = mockConstruction(Timer.class)
) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final Timer pollTimer = pollTimerMockedConstruction.constructed().get(0);
when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(coordinatorNode));
when(membershipManager.state()).thenReturn(memberState);
final NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(1, result.unsentRequests.size());
assertEquals(heartbeatIntervalMs, result.timeUntilNextPollMs);
verify(pollTimer).update(time.milliseconds());
}
}
@ParameterizedTest
@EnumSource(value = MemberState.class, names = {"JOINING", "ACKNOWLEDGING"})
public void testNotSendingHeartbeatIfMemberIsJoiningOrAcknowledgingWhenHeartbeatInFlight(final MemberState memberState) {
final long timeToNextHeartbeatMs = 1234;
try (
final MockedConstruction<HeartbeatRequestState> heartbeatRequestStateMockedConstruction = mockConstruction(
HeartbeatRequestState.class,
(mock, context) -> {
when(mock.canSendRequest(time.milliseconds())).thenReturn(false);
when(mock.timeToNextHeartbeatMs(time.milliseconds())).thenReturn(timeToNextHeartbeatMs);
when(mock.requestInFlight()).thenReturn(true);
});
final MockedConstruction<Timer> pollTimerMockedConstruction = mockConstruction(Timer.class)
) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final Timer pollTimer = pollTimerMockedConstruction.constructed().get(0);
when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(coordinatorNode));
when(membershipManager.state()).thenReturn(memberState);
final NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(0, result.unsentRequests.size());
assertEquals(timeToNextHeartbeatMs, result.timeUntilNextPollMs);
verify(pollTimer).update(time.milliseconds());
}
}
@Test
public void testSendingHeartbeatIfHeartbeatCanBeSent() {
final long heartbeatIntervalMs = 1234;
try (
final MockedConstruction<HeartbeatRequestState> heartbeatRequestStateMockedConstruction = mockConstruction(
HeartbeatRequestState.class,
(mock, context) -> {
when(mock.canSendRequest(time.milliseconds())).thenReturn(true);
when(mock.heartbeatIntervalMs()).thenReturn(heartbeatIntervalMs);
});
final MockedConstruction<Timer> pollTimerMockedConstruction = mockConstruction(Timer.class)
) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final Timer pollTimer = pollTimerMockedConstruction.constructed().get(0);
when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(coordinatorNode));
when(membershipManager.state()).thenReturn(MemberState.STABLE);
final NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(1, result.unsentRequests.size());
assertEquals(heartbeatIntervalMs, result.timeUntilNextPollMs);
verify(pollTimer).update(time.milliseconds());
}
}
@Test
public void testNotSendingHeartbeatIfHeartbeatCannotBeSent() {
final long timeToNextHeartbeatMs = 1234;
try (
final MockedConstruction<HeartbeatRequestState> heartbeatRequestStateMockedConstruction = mockConstruction(
HeartbeatRequestState.class,
(mock, context) -> {
when(mock.canSendRequest(time.milliseconds())).thenReturn(false);
when(mock.timeToNextHeartbeatMs(time.milliseconds())).thenReturn(timeToNextHeartbeatMs);
});
final MockedConstruction<Timer> pollTimerMockedConstruction = mockConstruction(Timer.class)
) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final Timer pollTimer = pollTimerMockedConstruction.constructed().get(0);
when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(coordinatorNode));
final NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(0, result.unsentRequests.size());
assertEquals(timeToNextHeartbeatMs, result.timeUntilNextPollMs);
verify(pollTimer).update(time.milliseconds());
}
}
@Test
public void testSendingLeaveHeartbeatIfPollTimerExpired() {
final long heartbeatIntervalMs = 1234;
try (
final MockedConstruction<HeartbeatRequestState> heartbeatRequestStateMockedConstruction = mockConstruction(
HeartbeatRequestState.class,
(mock, context) -> when(mock.heartbeatIntervalMs()).thenReturn(heartbeatIntervalMs));
final MockedConstruction<Timer> pollTimerMockedConstruction = mockConstruction(
Timer.class,
(mock, context) -> when(mock.isExpired()).thenReturn(true));
final MockedConstruction<StreamsGroupHeartbeatRequestManager.HeartbeatState> heartbeatStateMockedConstruction = mockConstruction(
StreamsGroupHeartbeatRequestManager.HeartbeatState.class)
) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final HeartbeatRequestState heartbeatRequestState = heartbeatRequestStateMockedConstruction.constructed().get(0);
final StreamsGroupHeartbeatRequestManager.HeartbeatState heartbeatState = heartbeatStateMockedConstruction.constructed().get(0);
final Timer pollTimer = pollTimerMockedConstruction.constructed().get(0);
when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(coordinatorNode));
final NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(1, result.unsentRequests.size());
assertEquals(heartbeatIntervalMs, result.timeUntilNextPollMs);
verify(pollTimer).update(time.milliseconds());
verify(membershipManager).onPollTimerExpired();
verify(heartbeatRequestState).reset();
verify(heartbeatState).reset();
}
}
@Test
public void testNotSendingLeaveHeartbeatIfPollTimerExpiredAndMemberIsLeaving() {
final long timeToNextHeartbeatMs = 1234;
try (
final MockedConstruction<HeartbeatRequestState> heartbeatRequestStateMockedConstruction = mockConstruction(
HeartbeatRequestState.class,
(mock, context) -> when(mock.timeToNextHeartbeatMs(time.milliseconds())).thenReturn(timeToNextHeartbeatMs));
final MockedConstruction<Timer> pollTimerMockedConstruction = mockConstruction(
Timer.class,
(mock, context) -> when(mock.isExpired()).thenReturn(true));
final MockedConstruction<StreamsGroupHeartbeatRequestManager.HeartbeatState> heartbeatStateMockedConstruction = mockConstruction(
StreamsGroupHeartbeatRequestManager.HeartbeatState.class)
) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final HeartbeatRequestState heartbeatRequestState = heartbeatRequestStateMockedConstruction.constructed().get(0);
final StreamsGroupHeartbeatRequestManager.HeartbeatState heartbeatState = heartbeatStateMockedConstruction.constructed().get(0);
final Timer pollTimer = pollTimerMockedConstruction.constructed().get(0);
when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(coordinatorNode));
when(membershipManager.isLeavingGroup()).thenReturn(true);
when(membershipManager.state()).thenReturn(MemberState.PREPARE_LEAVING);
final NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(0, result.unsentRequests.size());
assertEquals(timeToNextHeartbeatMs, result.timeUntilNextPollMs);
verify(pollTimer).update(time.milliseconds());
verify(membershipManager, never()).onPollTimerExpired();
verify(heartbeatRequestState, never()).reset();
verify(heartbeatState, never()).reset();
}
}
@Test
public void testSendingLeaveHeartbeatRequestWhenPollTimerExpired() {
try (
final MockedConstruction<HeartbeatRequestState> heartbeatRequestStateMockedConstruction = mockConstruction(
HeartbeatRequestState.class,
(mock, context) -> when(mock.canSendRequest(time.milliseconds())).thenReturn(true));
final MockedConstruction<Timer> pollTimerMockedConstruction = mockConstruction(
Timer.class,
(mock, context) -> when(mock.isExpired()).thenReturn(true))
) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final HeartbeatRequestState heartbeatRequestState = heartbeatRequestStateMockedConstruction.constructed().get(0);
when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(coordinatorNode));
when(membershipManager.groupId()).thenReturn(GROUP_ID);
when(membershipManager.memberId()).thenReturn(MEMBER_ID);
when(membershipManager.memberEpoch()).thenReturn(LEAVE_GROUP_MEMBER_EPOCH);
when(membershipManager.groupInstanceId()).thenReturn(Optional.of(INSTANCE_ID));
final NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(0, result.timeUntilNextPollMs);
assertEquals(1, result.unsentRequests.size());
assertEquals(Optional.of(coordinatorNode), result.unsentRequests.get(0).node());
NetworkClientDelegate.UnsentRequest networkRequest = result.unsentRequests.get(0);
StreamsGroupHeartbeatRequest streamsRequest = (StreamsGroupHeartbeatRequest) networkRequest.requestBuilder().build();
assertEquals(GROUP_ID, streamsRequest.data().groupId());
assertEquals(MEMBER_ID, streamsRequest.data().memberId());
assertEquals(LEAVE_GROUP_MEMBER_EPOCH, streamsRequest.data().memberEpoch());
assertEquals(INSTANCE_ID, streamsRequest.data().instanceId());
verify(heartbeatRequestState).onSendAttempt(time.milliseconds());
verify(membershipManager).onHeartbeatRequestGenerated();
final ClientResponse response = buildClientResponse();
networkRequest.handler().onComplete(response);
verify(heartbeatRequestState, never()).updateHeartbeatIntervalMs(anyLong());
verify(heartbeatRequestState, never()).onSuccessfulAttempt(anyLong());
verify(membershipManager, never()).onHeartbeatSuccess(any());
}
}
@Test
public void testSendingHeartbeatRequest() {
try (
final MockedConstruction<HeartbeatRequestState> heartbeatRequestStateMockedConstruction = mockConstruction(
HeartbeatRequestState.class,
(mock, context) -> when(mock.canSendRequest(time.milliseconds())).thenReturn(true))
) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final HeartbeatRequestState heartbeatRequestState = heartbeatRequestStateMockedConstruction.constructed().get(0);
when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(coordinatorNode));
when(membershipManager.groupId()).thenReturn(GROUP_ID);
when(membershipManager.memberId()).thenReturn(MEMBER_ID);
when(membershipManager.memberEpoch()).thenReturn(MEMBER_EPOCH);
when(membershipManager.groupInstanceId()).thenReturn(Optional.of(INSTANCE_ID));
final NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(0, result.timeUntilNextPollMs);
assertEquals(1, result.unsentRequests.size());
assertEquals(Optional.of(coordinatorNode), result.unsentRequests.get(0).node());
NetworkClientDelegate.UnsentRequest networkRequest = result.unsentRequests.get(0);
StreamsGroupHeartbeatRequest streamsRequest = (StreamsGroupHeartbeatRequest) networkRequest.requestBuilder().build();
assertEquals(GROUP_ID, streamsRequest.data().groupId());
assertEquals(MEMBER_ID, streamsRequest.data().memberId());
assertEquals(MEMBER_EPOCH, streamsRequest.data().memberEpoch());
assertEquals(INSTANCE_ID, streamsRequest.data().instanceId());
verify(heartbeatRequestState).onSendAttempt(time.milliseconds());
verify(membershipManager).onHeartbeatRequestGenerated();
time.sleep(2000);
assertEquals(
2.0,
metrics.metric(metrics.metricName("last-heartbeat-seconds-ago", "consumer-coordinator-metrics")).metricValue()
);
final ClientResponse response = buildClientResponse();
networkRequest.handler().onComplete(response);
verify(membershipManager).onHeartbeatSuccess((StreamsGroupHeartbeatResponse) response.responseBody());
verify(heartbeatRequestState).updateHeartbeatIntervalMs(RECEIVED_HEARTBEAT_INTERVAL_MS);
verify(heartbeatRequestState).onSuccessfulAttempt(networkRequest.handler().completionTimeMs());
verify(heartbeatRequestState).resetTimer();
final List<TopicPartition> topicPartitions = streamsRebalanceData.partitionsByHost()
.get(new StreamsRebalanceData.HostInfo(
ENDPOINT_TO_PARTITIONS.get(0).userEndpoint().host(),
ENDPOINT_TO_PARTITIONS.get(0).userEndpoint().port())
).activePartitions();
assertEquals(ENDPOINT_TO_PARTITIONS.get(0).activePartitions().get(0).topic(), topicPartitions.get(0).topic());
assertEquals(ENDPOINT_TO_PARTITIONS.get(0).activePartitions().get(0).partitions().get(0), topicPartitions.get(0).partition());
assertEquals(
1.0,
metrics.metric(metrics.metricName("heartbeat-total", "consumer-coordinator-metrics")).metricValue()
);
}
}
@ParameterizedTest
@ValueSource(booleans = {false, true})
public void testBuildingHeartbeatRequestFieldsThatAreAlwaysSent(final boolean instanceIdPresent) {
when(membershipManager.groupId()).thenReturn(GROUP_ID);
when(membershipManager.memberId()).thenReturn(MEMBER_ID);
when(membershipManager.memberEpoch()).thenReturn(MEMBER_EPOCH);
when(membershipManager.groupInstanceId()).thenReturn(instanceIdPresent ? Optional.of(INSTANCE_ID) : Optional.empty());
final StreamsGroupHeartbeatRequestManager.HeartbeatState heartbeatState =
new StreamsGroupHeartbeatRequestManager.HeartbeatState(
streamsRebalanceData,
membershipManager,
1000
);
StreamsGroupHeartbeatRequestData requestData1 = heartbeatState.buildRequestData();
assertEquals(GROUP_ID, requestData1.groupId());
assertEquals(MEMBER_ID, requestData1.memberId());
assertEquals(MEMBER_EPOCH, requestData1.memberEpoch());
if (instanceIdPresent) {
assertEquals(INSTANCE_ID, requestData1.instanceId());
} else {
assertNull(requestData1.instanceId());
}
StreamsGroupHeartbeatRequestData requestData2 = heartbeatState.buildRequestData();
assertEquals(GROUP_ID, requestData2.groupId());
assertEquals(MEMBER_ID, requestData2.memberId());
assertEquals(MEMBER_EPOCH, requestData2.memberEpoch());
if (instanceIdPresent) {
assertEquals(INSTANCE_ID, requestData2.instanceId());
} else {
assertNull(requestData2.instanceId());
}
}
@ParameterizedTest
@MethodSource("provideNonJoiningStates")
public void testBuildingHeartbeatRequestTopologySentWhenJoining(final MemberState memberState) {
final StreamsGroupHeartbeatRequestManager.HeartbeatState heartbeatState =
new StreamsGroupHeartbeatRequestManager.HeartbeatState(
streamsRebalanceData,
membershipManager,
1000
);
when(membershipManager.state()).thenReturn(MemberState.JOINING);
StreamsGroupHeartbeatRequestData requestData1 = heartbeatState.buildRequestData();
assertEquals(streamsRebalanceData.topologyEpoch(), requestData1.topology().epoch());
final List<StreamsGroupHeartbeatRequestData.Subtopology> subtopologies = requestData1.topology().subtopologies();
assertEquals(2, subtopologies.size());
final StreamsGroupHeartbeatRequestData.Subtopology subtopology1 = subtopologies.get(0);
assertEquals(SUBTOPOLOGY_NAME_1, subtopology1.subtopologyId());
assertEquals(List.of(SOURCE_TOPIC_1, SOURCE_TOPIC_2), subtopology1.sourceTopics());
assertEquals(List.of(REPARTITION_SINK_TOPIC_1, REPARTITION_SINK_TOPIC_2, REPARTITION_SINK_TOPIC_3), subtopology1.repartitionSinkTopics());
assertEquals(REPARTITION_SOURCE_TOPICS.size(), subtopology1.repartitionSourceTopics().size());
subtopology1.repartitionSourceTopics().forEach(topicInfo -> {
final StreamsRebalanceData.TopicInfo repartitionTopic = REPARTITION_SOURCE_TOPICS.get(topicInfo.name());
assertEquals(repartitionTopic.numPartitions().get(), topicInfo.partitions());
assertEquals(repartitionTopic.replicationFactor().get(), topicInfo.replicationFactor());
assertEquals(repartitionTopic.topicConfigs().size(), topicInfo.topicConfigs().size());
assertTrue(isSorted(topicInfo.topicConfigs(), Comparator.comparing(StreamsGroupHeartbeatRequestData.KeyValue::key)));
});
assertEquals(CHANGELOG_TOPICS.size(), subtopology1.stateChangelogTopics().size());
subtopology1.stateChangelogTopics().forEach(topicInfo -> {
assertTrue(CHANGELOG_TOPICS.containsKey(topicInfo.name()));
assertEquals(0, topicInfo.partitions());
final StreamsRebalanceData.TopicInfo changelogTopic = CHANGELOG_TOPICS.get(topicInfo.name());
assertEquals(changelogTopic.replicationFactor().get(), topicInfo.replicationFactor());
assertEquals(changelogTopic.topicConfigs().size(), topicInfo.topicConfigs().size());
assertTrue(isSorted(topicInfo.topicConfigs(), Comparator.comparing(StreamsGroupHeartbeatRequestData.KeyValue::key)));
});
assertEquals(2, subtopology1.copartitionGroups().size());
final StreamsGroupHeartbeatRequestData.CopartitionGroup expectedCopartitionGroupData1 =
new StreamsGroupHeartbeatRequestData.CopartitionGroup()
.setRepartitionSourceTopics(Collections.singletonList((short) 0))
.setSourceTopics(Collections.singletonList((short) 1));
final StreamsGroupHeartbeatRequestData.CopartitionGroup expectedCopartitionGroupData2 =
new StreamsGroupHeartbeatRequestData.CopartitionGroup()
.setRepartitionSourceTopics(Collections.singletonList((short) 1))
.setSourceTopics(Collections.singletonList((short) 0));
assertTrue(subtopology1.copartitionGroups().contains(expectedCopartitionGroupData1));
assertTrue(subtopology1.copartitionGroups().contains(expectedCopartitionGroupData2));
final StreamsGroupHeartbeatRequestData.Subtopology subtopology2 = subtopologies.get(1);
assertEquals(SUBTOPOLOGY_NAME_2, subtopology2.subtopologyId());
assertEquals(List.of(SOURCE_TOPIC_3), subtopology2.sourceTopics());
assertEquals(Collections.emptyList(), subtopology2.repartitionSinkTopics());
assertEquals(Collections.emptyList(), subtopology2.repartitionSourceTopics());
assertEquals(1, subtopology2.stateChangelogTopics().size());
assertEquals(CHANGELOG_TOPIC_4, subtopology2.stateChangelogTopics().get(0).name());
assertEquals(0, subtopology2.stateChangelogTopics().get(0).partitions());
assertEquals(1, subtopology2.stateChangelogTopics().get(0).replicationFactor());
assertEquals(0, subtopology2.stateChangelogTopics().get(0).topicConfigs().size());
when(membershipManager.state()).thenReturn(memberState);
StreamsGroupHeartbeatRequestData nonJoiningRequestData = heartbeatState.buildRequestData();
assertNull(nonJoiningRequestData.topology());
}
private <V> boolean isSorted(List<V> collection, Comparator<V> comparator) {
for (int i = 1; i < collection.size(); i++) {
if (comparator.compare(collection.get(i - 1), collection.get(i)) > 0) {
return false;
}
}
return true;
}
@ParameterizedTest
@MethodSource("provideNonJoiningStates")
public void testBuildingHeartbeatRequestRebalanceTimeoutSentWhenJoining(final MemberState memberState) {
final int rebalanceTimeoutMs = 1234;
final StreamsGroupHeartbeatRequestManager.HeartbeatState heartbeatState =
new StreamsGroupHeartbeatRequestManager.HeartbeatState(
streamsRebalanceData,
membershipManager,
rebalanceTimeoutMs
);
when(membershipManager.state()).thenReturn(MemberState.JOINING);
StreamsGroupHeartbeatRequestData requestData1 = heartbeatState.buildRequestData();
assertEquals(rebalanceTimeoutMs, requestData1.rebalanceTimeoutMs());
when(membershipManager.state()).thenReturn(memberState);
StreamsGroupHeartbeatRequestData nonJoiningRequestData = heartbeatState.buildRequestData();
assertEquals(-1, nonJoiningRequestData.rebalanceTimeoutMs());
}
@ParameterizedTest
@MethodSource("provideNonJoiningStates")
public void testBuildingHeartbeatProcessIdSentWhenJoining(final MemberState memberState) {
final StreamsGroupHeartbeatRequestManager.HeartbeatState heartbeatState =
new StreamsGroupHeartbeatRequestManager.HeartbeatState(
streamsRebalanceData,
membershipManager,
1234
);
when(membershipManager.state()).thenReturn(MemberState.JOINING);
StreamsGroupHeartbeatRequestData requestData1 = heartbeatState.buildRequestData();
assertEquals(PROCESS_ID.toString(), requestData1.processId());
when(membershipManager.state()).thenReturn(memberState);
StreamsGroupHeartbeatRequestData nonJoiningRequestData = heartbeatState.buildRequestData();
assertNull(nonJoiningRequestData.processId());
}
@ParameterizedTest
@MethodSource("provideNonJoiningStates")
public void testBuildingHeartbeatEndpointSentWhenJoining(final MemberState memberState) {
final StreamsGroupHeartbeatRequestManager.HeartbeatState heartbeatState =
new StreamsGroupHeartbeatRequestManager.HeartbeatState(
streamsRebalanceData,
membershipManager,
1234
);
when(membershipManager.state()).thenReturn(MemberState.JOINING);
StreamsGroupHeartbeatRequestData joiningRequestData = heartbeatState.buildRequestData();
assertEquals(ENDPOINT.host(), joiningRequestData.userEndpoint().host());
assertEquals(ENDPOINT.port(), joiningRequestData.userEndpoint().port());
when(membershipManager.state()).thenReturn(memberState);
StreamsGroupHeartbeatRequestData nonJoiningRequestData = heartbeatState.buildRequestData();
assertNull(nonJoiningRequestData.userEndpoint());
}
@ParameterizedTest
@MethodSource("provideNonJoiningStates")
public void testBuildingHeartbeatClientTagsSentWhenJoining(final MemberState memberState) {
final StreamsGroupHeartbeatRequestManager.HeartbeatState heartbeatState =
new StreamsGroupHeartbeatRequestManager.HeartbeatState(
streamsRebalanceData,
membershipManager,
1234
);
when(membershipManager.state()).thenReturn(MemberState.JOINING);
StreamsGroupHeartbeatRequestData joiningRequestData = heartbeatState.buildRequestData();
assertEquals(CLIENT_TAG_1, joiningRequestData.clientTags().get(0).key());
assertEquals(VALUE_1, joiningRequestData.clientTags().get(0).value());
when(membershipManager.state()).thenReturn(memberState);
StreamsGroupHeartbeatRequestData nonJoiningRequestData = heartbeatState.buildRequestData();
assertNull(nonJoiningRequestData.clientTags());
}
@ParameterizedTest
@MethodSource("provideNonJoiningStates")
public void testBuildingHeartbeatAssignmentSentWhenChanged(final MemberState memberState) {
final StreamsGroupHeartbeatRequestManager.HeartbeatState heartbeatState =
new StreamsGroupHeartbeatRequestManager.HeartbeatState(
streamsRebalanceData,
membershipManager,
1234
);
when(membershipManager.state()).thenReturn(MemberState.JOINING);
StreamsGroupHeartbeatRequestData joiningRequestData = heartbeatState.buildRequestData();
assertEquals(List.of(), joiningRequestData.activeTasks());
assertEquals(List.of(), joiningRequestData.standbyTasks());
assertEquals(List.of(), joiningRequestData.warmupTasks());
when(membershipManager.state()).thenReturn(memberState);
streamsRebalanceData.setReconciledAssignment(
new StreamsRebalanceData.Assignment(
Set.of(
new StreamsRebalanceData.TaskId(SUBTOPOLOGY_NAME_1, 0),
new StreamsRebalanceData.TaskId(SUBTOPOLOGY_NAME_1, 1),
new StreamsRebalanceData.TaskId(SUBTOPOLOGY_NAME_2, 2)
),
Set.of(
new StreamsRebalanceData.TaskId(SUBTOPOLOGY_NAME_1, 2)
),
Set.of(
new StreamsRebalanceData.TaskId(SUBTOPOLOGY_NAME_1, 3),
new StreamsRebalanceData.TaskId(SUBTOPOLOGY_NAME_1, 4),
new StreamsRebalanceData.TaskId(SUBTOPOLOGY_NAME_1, 5)
)
)
);
StreamsGroupHeartbeatRequestData firstNonJoiningRequestData = heartbeatState.buildRequestData();
assertTaskIdsEquals(
List.of(
new StreamsGroupHeartbeatRequestData.TaskIds()
.setSubtopologyId(SUBTOPOLOGY_NAME_1)
.setPartitions(List.of(0, 1)),
new StreamsGroupHeartbeatRequestData.TaskIds()
.setSubtopologyId(SUBTOPOLOGY_NAME_2)
.setPartitions(List.of(2))
),
firstNonJoiningRequestData.activeTasks()
);
assertTaskIdsEquals(
List.of(
new StreamsGroupHeartbeatRequestData.TaskIds()
.setSubtopologyId(SUBTOPOLOGY_NAME_1)
.setPartitions(List.of(2))
),
firstNonJoiningRequestData.standbyTasks()
);
assertTaskIdsEquals(
List.of(
new StreamsGroupHeartbeatRequestData.TaskIds()
.setSubtopologyId(SUBTOPOLOGY_NAME_1)
.setPartitions(List.of(3, 4, 5))
),
firstNonJoiningRequestData.warmupTasks()
);
StreamsGroupHeartbeatRequestData nonJoiningRequestDataWithoutChanges = heartbeatState.buildRequestData();
assertNull(nonJoiningRequestDataWithoutChanges.activeTasks());
assertNull(nonJoiningRequestDataWithoutChanges.standbyTasks());
assertNull(nonJoiningRequestDataWithoutChanges.warmupTasks());
streamsRebalanceData.setReconciledAssignment(
new StreamsRebalanceData.Assignment(
Set.of(
new StreamsRebalanceData.TaskId(SUBTOPOLOGY_NAME_1, 0)
),
Set.of(
new StreamsRebalanceData.TaskId(SUBTOPOLOGY_NAME_1, 2)
),
Set.of(
)
)
);
StreamsGroupHeartbeatRequestData nonJoiningRequestDataWithChanges = heartbeatState.buildRequestData();
assertTaskIdsEquals(
List.of(
new StreamsGroupHeartbeatRequestData.TaskIds()
.setSubtopologyId(SUBTOPOLOGY_NAME_1)
.setPartitions(List.of(0))
),
nonJoiningRequestDataWithChanges.activeTasks()
);
assertTaskIdsEquals(
List.of(
new StreamsGroupHeartbeatRequestData.TaskIds()
.setSubtopologyId(SUBTOPOLOGY_NAME_1)
.setPartitions(List.of(2))
),
nonJoiningRequestDataWithChanges.standbyTasks()
);
assertEquals(List.of(), nonJoiningRequestDataWithChanges.warmupTasks());
}
@ParameterizedTest
@MethodSource("provideNonJoiningStates")
public void testResettingHeartbeatState(final MemberState memberState) {
when(membershipManager.groupId()).thenReturn(GROUP_ID);
when(membershipManager.memberId()).thenReturn(MEMBER_ID);
when(membershipManager.memberEpoch()).thenReturn(MEMBER_EPOCH);
when(membershipManager.groupInstanceId()).thenReturn(Optional.of(INSTANCE_ID));
final StreamsGroupHeartbeatRequestManager.HeartbeatState heartbeatState =
new StreamsGroupHeartbeatRequestManager.HeartbeatState(
streamsRebalanceData,
membershipManager,
1234
);
when(membershipManager.state()).thenReturn(memberState);
streamsRebalanceData.setReconciledAssignment(
new StreamsRebalanceData.Assignment(
Set.of(
new StreamsRebalanceData.TaskId(SUBTOPOLOGY_NAME_1, 0),
new StreamsRebalanceData.TaskId(SUBTOPOLOGY_NAME_1, 1),
new StreamsRebalanceData.TaskId(SUBTOPOLOGY_NAME_2, 2)
),
Set.of(
new StreamsRebalanceData.TaskId(SUBTOPOLOGY_NAME_1, 2)
),
Set.of(
new StreamsRebalanceData.TaskId(SUBTOPOLOGY_NAME_1, 3),
new StreamsRebalanceData.TaskId(SUBTOPOLOGY_NAME_1, 4),
new StreamsRebalanceData.TaskId(SUBTOPOLOGY_NAME_1, 5)
)
)
);
StreamsGroupHeartbeatRequestData requestDataBeforeReset = heartbeatState.buildRequestData();
assertEquals(GROUP_ID, requestDataBeforeReset.groupId());
assertEquals(MEMBER_ID, requestDataBeforeReset.memberId());
assertEquals(MEMBER_EPOCH, requestDataBeforeReset.memberEpoch());
assertEquals(INSTANCE_ID, requestDataBeforeReset.instanceId());
assertFalse(requestDataBeforeReset.activeTasks().isEmpty());
assertFalse(requestDataBeforeReset.standbyTasks().isEmpty());
assertFalse(requestDataBeforeReset.warmupTasks().isEmpty());
heartbeatState.reset();
StreamsGroupHeartbeatRequestData requestDataAfterReset = heartbeatState.buildRequestData();
assertEquals(GROUP_ID, requestDataAfterReset.groupId());
assertEquals(MEMBER_ID, requestDataAfterReset.memberId());
assertEquals(MEMBER_EPOCH, requestDataAfterReset.memberEpoch());
assertEquals(INSTANCE_ID, requestDataAfterReset.instanceId());
assertEquals(requestDataBeforeReset.activeTasks(), requestDataAfterReset.activeTasks());
assertEquals(requestDataBeforeReset.standbyTasks(), requestDataAfterReset.standbyTasks());
assertEquals(requestDataBeforeReset.warmupTasks(), requestDataAfterReset.warmupTasks());
}
private static Stream<Arguments> provideNonJoiningStates() {
return Stream.of(
Arguments.of(MemberState.ACKNOWLEDGING),
Arguments.of(MemberState.RECONCILING),
Arguments.of(MemberState.STABLE),
Arguments.of(MemberState.PREPARE_LEAVING),
Arguments.of(MemberState.LEAVING)
);
}
@ParameterizedTest
@EnumSource(
value = MemberState.class,
names = {"JOINING", "ACKNOWLEDGING", "RECONCILING", "STABLE", "PREPARE_LEAVING", "LEAVING"}
)
public void testBuildingHeartbeatShutdownRequested(final MemberState memberState) {
final StreamsGroupHeartbeatRequestManager.HeartbeatState heartbeatState =
new StreamsGroupHeartbeatRequestManager.HeartbeatState(
streamsRebalanceData,
membershipManager,
1234
);
when(membershipManager.state()).thenReturn(memberState);
StreamsGroupHeartbeatRequestData requestDataWithoutShutdownRequest = heartbeatState.buildRequestData();
assertFalse(requestDataWithoutShutdownRequest.shutdownApplication());
streamsRebalanceData.requestShutdown();
StreamsGroupHeartbeatRequestData requestDataWithShutdownRequest = heartbeatState.buildRequestData();
assertTrue(requestDataWithShutdownRequest.shutdownApplication());
}
@Test
public void testCoordinatorDisconnectFailureWhileSending() {
try (
final MockedConstruction<HeartbeatRequestState> heartbeatRequestStateMockedConstruction = mockConstruction(
HeartbeatRequestState.class,
(mock, context) -> when(mock.canSendRequest(time.milliseconds())).thenReturn(true));
final MockedConstruction<StreamsGroupHeartbeatRequestManager.HeartbeatState> heartbeatStateMockedConstruction = mockConstruction(
StreamsGroupHeartbeatRequestManager.HeartbeatState.class)
) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final StreamsGroupHeartbeatRequestManager.HeartbeatState heartbeatState = heartbeatStateMockedConstruction.constructed().get(0);
when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(coordinatorNode));
final NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(1, result.unsentRequests.size());
final NetworkClientDelegate.UnsentRequest networkRequest = result.unsentRequests.get(0);
time.sleep(1234);
final long completionTimeMs = time.milliseconds();
final DisconnectException disconnectException = DisconnectException.INSTANCE;
networkRequest.handler().onFailure(completionTimeMs, disconnectException);
final HeartbeatRequestState heartbeatRequestState = heartbeatRequestStateMockedConstruction.constructed().get(0);
verify(heartbeatRequestState).onFailedAttempt(completionTimeMs);
verify(heartbeatState).reset();
verify(coordinatorRequestManager).handleCoordinatorDisconnect(disconnectException, completionTimeMs);
verify(membershipManager).onRetriableHeartbeatFailure();
}
}
@Test
public void testUnsupportedVersionFailureWhileSending() {
try (
final MockedConstruction<HeartbeatRequestState> heartbeatRequestStateMockedConstruction = mockConstruction(
HeartbeatRequestState.class,
(mock, context) -> when(mock.canSendRequest(time.milliseconds())).thenReturn(true));
final MockedConstruction<StreamsGroupHeartbeatRequestManager.HeartbeatState> heartbeatStateMockedConstruction = mockConstruction(
StreamsGroupHeartbeatRequestManager.HeartbeatState.class)
) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final StreamsGroupHeartbeatRequestManager.HeartbeatState heartbeatState = heartbeatStateMockedConstruction.constructed().get(0);
when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(coordinatorNode));
final NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(1, result.unsentRequests.size());
final NetworkClientDelegate.UnsentRequest networkRequest = result.unsentRequests.get(0);
time.sleep(1234);
final long completionTimeMs = time.milliseconds();
final UnsupportedVersionException unsupportedVersionException = new UnsupportedVersionException("message");
networkRequest.handler().onFailure(completionTimeMs, unsupportedVersionException);
final HeartbeatRequestState heartbeatRequestState = heartbeatRequestStateMockedConstruction.constructed().get(0);
verify(heartbeatRequestState).onFailedAttempt(completionTimeMs);
verify(heartbeatState).reset();
verify(membershipManager).onFatalHeartbeatFailure();
ArgumentCaptor<ErrorEvent> errorEvent = ArgumentCaptor.forClass(ErrorEvent.class);
verify(backgroundEventHandler).add(errorEvent.capture());
assertEquals(
"The cluster does not support the STREAMS group " +
"protocol or does not support the versions of the STREAMS group protocol used by this client " +
"(used versions: " + StreamsGroupHeartbeatRequestData.LOWEST_SUPPORTED_VERSION + " to " +
StreamsGroupHeartbeatRequestData.HIGHEST_SUPPORTED_VERSION + ").",
errorEvent.getValue().error().getMessage()
);
assertInstanceOf(UnsupportedVersionException.class, errorEvent.getValue().error());
verify(membershipManager).transitionToFatal();
}
}
@Test
public void testFatalFailureWhileSending() {
try (
final MockedConstruction<HeartbeatRequestState> heartbeatRequestStateMockedConstruction = mockConstruction(
HeartbeatRequestState.class,
(mock, context) -> when(mock.canSendRequest(time.milliseconds())).thenReturn(true));
final MockedConstruction<StreamsGroupHeartbeatRequestManager.HeartbeatState> heartbeatStateMockedConstruction = mockConstruction(
StreamsGroupHeartbeatRequestManager.HeartbeatState.class)
) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final StreamsGroupHeartbeatRequestManager.HeartbeatState heartbeatState = heartbeatStateMockedConstruction.constructed().get(0);
when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(coordinatorNode));
final NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(1, result.unsentRequests.size());
final NetworkClientDelegate.UnsentRequest networkRequest = result.unsentRequests.get(0);
time.sleep(1234);
final long completionTimeMs = time.milliseconds();
final RuntimeException fatalException = new RuntimeException();
networkRequest.handler().onFailure(completionTimeMs, fatalException);
final HeartbeatRequestState heartbeatRequestState = heartbeatRequestStateMockedConstruction.constructed().get(0);
verify(heartbeatRequestState).onFailedAttempt(completionTimeMs);
verify(heartbeatState).reset();
verify(membershipManager).onFatalHeartbeatFailure();
ArgumentCaptor<ErrorEvent> errorEvent = ArgumentCaptor.forClass(ErrorEvent.class);
verify(backgroundEventHandler).add(errorEvent.capture());
assertEquals(fatalException, errorEvent.getValue().error());
verify(membershipManager).transitionToFatal();
}
}
@ParameterizedTest
@EnumSource(
value = Errors.class,
names = {"NOT_COORDINATOR", "COORDINATOR_NOT_AVAILABLE"}
)
public void testNotCoordinatorAndCoordinatorNotAvailableErrorResponse(final Errors error) {
try (
final MockedConstruction<HeartbeatRequestState> heartbeatRequestStateMockedConstruction = mockConstruction(
HeartbeatRequestState.class,
(mock, context) -> when(mock.canSendRequest(time.milliseconds())).thenReturn(true));
final MockedConstruction<StreamsGroupHeartbeatRequestManager.HeartbeatState> heartbeatStateMockedConstruction = mockConstruction(
StreamsGroupHeartbeatRequestManager.HeartbeatState.class)
) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final StreamsGroupHeartbeatRequestManager.HeartbeatState heartbeatState = heartbeatStateMockedConstruction.constructed().get(0);
final HeartbeatRequestState heartbeatRequestState = heartbeatRequestStateMockedConstruction.constructed().get(0);
when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(coordinatorNode));
final NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(1, result.unsentRequests.size());
final NetworkClientDelegate.UnsentRequest networkRequest = result.unsentRequests.get(0);
time.sleep(1234);
final long completionTimeMs = time.milliseconds();
final ClientResponse response = buildClientErrorResponse(error, "error message");
networkRequest.handler().onComplete(response);
verify(coordinatorRequestManager).markCoordinatorUnknown(
((StreamsGroupHeartbeatResponse) response.responseBody()).data().errorMessage(),
completionTimeMs
);
verify(heartbeatState).reset();
verify(heartbeatRequestState).reset();
verify(membershipManager).onFatalHeartbeatFailure();
}
}
@Test
public void testCoordinatorLoadInProgressErrorResponse() {
try (
final MockedConstruction<HeartbeatRequestState> heartbeatRequestStateMockedConstruction = mockConstruction(
HeartbeatRequestState.class,
(mock, context) -> when(mock.canSendRequest(time.milliseconds())).thenReturn(true));
final MockedConstruction<StreamsGroupHeartbeatRequestManager.HeartbeatState> heartbeatStateMockedConstruction = mockConstruction(
StreamsGroupHeartbeatRequestManager.HeartbeatState.class)
) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final StreamsGroupHeartbeatRequestManager.HeartbeatState heartbeatState = heartbeatStateMockedConstruction.constructed().get(0);
final HeartbeatRequestState heartbeatRequestState = heartbeatRequestStateMockedConstruction.constructed().get(0);
when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(coordinatorNode));
final NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(1, result.unsentRequests.size());
final NetworkClientDelegate.UnsentRequest networkRequest = result.unsentRequests.get(0);
final ClientResponse response = buildClientErrorResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, "message");
networkRequest.handler().onComplete(response);
verify(heartbeatState).reset();
verify(membershipManager).onFatalHeartbeatFailure();
verify(heartbeatRequestState, never()).reset();
}
}
@Test
public void testGroupAuthorizationFailedErrorResponse() {
try (
final MockedConstruction<HeartbeatRequestState> heartbeatRequestStateMockedConstruction = mockConstruction(
HeartbeatRequestState.class,
(mock, context) -> when(mock.canSendRequest(time.milliseconds())).thenReturn(true));
final MockedConstruction<StreamsGroupHeartbeatRequestManager.HeartbeatState> heartbeatStateMockedConstruction = mockConstruction(
StreamsGroupHeartbeatRequestManager.HeartbeatState.class);
final LogCaptureAppender logAppender = LogCaptureAppender.createAndRegister(StreamsGroupHeartbeatRequestManager.class)
) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final StreamsGroupHeartbeatRequestManager.HeartbeatState heartbeatState = heartbeatStateMockedConstruction.constructed().get(0);
when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(coordinatorNode));
when(membershipManager.groupId()).thenReturn(GROUP_ID);
final NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(1, result.unsentRequests.size());
final NetworkClientDelegate.UnsentRequest networkRequest = result.unsentRequests.get(0);
final ClientResponse response = buildClientErrorResponse(Errors.GROUP_AUTHORIZATION_FAILED, "message");
networkRequest.handler().onComplete(response);
assertTrue(logAppender.getMessages("ERROR").stream()
.anyMatch(m -> m.contains("StreamsGroupHeartbeatRequest failed due to group authorization failure: " +
"Not authorized to access group: " + GROUP_ID)));
verify(heartbeatState).reset();
ArgumentCaptor<ErrorEvent> errorEvent = ArgumentCaptor.forClass(ErrorEvent.class);
verify(backgroundEventHandler).add(errorEvent.capture());
assertEquals(
GroupAuthorizationException.forGroupId(GROUP_ID).getMessage(),
errorEvent.getValue().error().getMessage()
);
assertInstanceOf(GroupAuthorizationException.class, errorEvent.getValue().error());
verify(membershipManager).transitionToFatal();
verify(membershipManager).onFatalHeartbeatFailure();
}
}
@Test
public void testTopicAuthorizationFailedErrorResponse() {
try (
final MockedConstruction<HeartbeatRequestState> heartbeatRequestStateMockedConstruction = mockConstruction(
HeartbeatRequestState.class,
(mock, context) -> when(mock.canSendRequest(time.milliseconds())).thenReturn(true));
final MockedConstruction<StreamsGroupHeartbeatRequestManager.HeartbeatState> heartbeatStateMockedConstruction = mockConstruction(
StreamsGroupHeartbeatRequestManager.HeartbeatState.class);
final LogCaptureAppender logAppender = LogCaptureAppender.createAndRegister(StreamsGroupHeartbeatRequestManager.class)
) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final StreamsGroupHeartbeatRequestManager.HeartbeatState heartbeatState = heartbeatStateMockedConstruction.constructed().get(0);
when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(coordinatorNode));
when(membershipManager.state()).thenReturn(MemberState.STABLE);
when(membershipManager.memberId()).thenReturn(MEMBER_ID);
final NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(1, result.unsentRequests.size());
final NetworkClientDelegate.UnsentRequest networkRequest = result.unsentRequests.get(0);
final String errorMessage = "message";
final ClientResponse response = buildClientErrorResponse(Errors.TOPIC_AUTHORIZATION_FAILED, errorMessage);
networkRequest.handler().onComplete(response);
assertTrue(logAppender.getMessages("ERROR").stream()
.anyMatch(m -> m.contains("StreamsGroupHeartbeatRequest failed for member " + MEMBER_ID +
" with state " + MemberState.STABLE + " due to " + Errors.TOPIC_AUTHORIZATION_FAILED + ": " +
errorMessage)));
verify(heartbeatState).reset();
ArgumentCaptor<ErrorEvent> errorEvent = ArgumentCaptor.forClass(ErrorEvent.class);
verify(backgroundEventHandler).add(errorEvent.capture());
assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.message(), errorEvent.getValue().error().getMessage());
assertInstanceOf(TopicAuthorizationException.class, errorEvent.getValue().error());
verify(membershipManager).onFatalHeartbeatFailure();
}
}
@ParameterizedTest
@EnumSource(
value = Errors.class,
names = {
"INVALID_REQUEST",
"GROUP_MAX_SIZE_REACHED",
"UNSUPPORTED_VERSION",
"STREAMS_INVALID_TOPOLOGY",
"STREAMS_INVALID_TOPOLOGY_EPOCH",
"STREAMS_TOPOLOGY_FENCED"
}
)
public void testKnownFatalErrorResponse(final Errors error) {
try (
final MockedConstruction<HeartbeatRequestState> heartbeatRequestStateMockedConstruction = mockConstruction(
HeartbeatRequestState.class,
(mock, context) -> when(mock.canSendRequest(time.milliseconds())).thenReturn(true));
final MockedConstruction<StreamsGroupHeartbeatRequestManager.HeartbeatState> heartbeatStateMockedConstruction = mockConstruction(
StreamsGroupHeartbeatRequestManager.HeartbeatState.class);
final LogCaptureAppender logAppender = LogCaptureAppender.createAndRegister(StreamsGroupHeartbeatRequestManager.class)
) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final StreamsGroupHeartbeatRequestManager.HeartbeatState heartbeatState = heartbeatStateMockedConstruction.constructed().get(0);
when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(coordinatorNode));
final NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(1, result.unsentRequests.size());
final NetworkClientDelegate.UnsentRequest networkRequest = result.unsentRequests.get(0);
final String errorMessageInResponse = "message";
final ClientResponse response = buildClientErrorResponse(error, errorMessageInResponse);
networkRequest.handler().onComplete(response);
verify(heartbeatState).reset();
ArgumentCaptor<ErrorEvent> errorEvent = ArgumentCaptor.forClass(ErrorEvent.class);
verify(backgroundEventHandler).add(errorEvent.capture());
if (error == Errors.UNSUPPORTED_VERSION) {
final String errorMessage = "The cluster does not support the STREAMS group " +
"protocol or does not support the versions of the STREAMS group protocol used by this client " +
"(used versions: " + StreamsGroupHeartbeatRequestData.LOWEST_SUPPORTED_VERSION + " to " +
StreamsGroupHeartbeatRequestData.HIGHEST_SUPPORTED_VERSION + ").";
assertTrue(logAppender.getMessages("ERROR").stream()
.anyMatch(m -> m.contains("StreamsGroupHeartbeatRequest failed due to " +
error + ": " + errorMessage)));
assertEquals(errorMessage, errorEvent.getValue().error().getMessage());
} else {
assertTrue(logAppender.getMessages("ERROR").stream()
.anyMatch(m -> m.contains("StreamsGroupHeartbeatRequest failed due to " +
error + ": " + errorMessageInResponse)));
assertEquals(errorMessageInResponse, errorEvent.getValue().error().getMessage());
}
assertInstanceOf(error.exception().getClass(), errorEvent.getValue().error());
verify(membershipManager).transitionToFatal();
verify(membershipManager).onFatalHeartbeatFailure();
}
}
@ParameterizedTest
@EnumSource(
value = Errors.class,
names = {"FENCED_MEMBER_EPOCH", "UNKNOWN_MEMBER_ID"}
)
public void testFencedMemberOrUnknownMemberIdErrorResponse(final Errors error) {
try (
final MockedConstruction<HeartbeatRequestState> heartbeatRequestStateMockedConstruction = mockConstruction(
HeartbeatRequestState.class,
(mock, context) -> when(mock.canSendRequest(time.milliseconds())).thenReturn(true));
final MockedConstruction<StreamsGroupHeartbeatRequestManager.HeartbeatState> heartbeatStateMockedConstruction = mockConstruction(
StreamsGroupHeartbeatRequestManager.HeartbeatState.class)
) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final StreamsGroupHeartbeatRequestManager.HeartbeatState heartbeatState = heartbeatStateMockedConstruction.constructed().get(0);
final HeartbeatRequestState heartbeatRequestState = heartbeatRequestStateMockedConstruction.constructed().get(0);
when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(coordinatorNode));
final NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(1, result.unsentRequests.size());
final NetworkClientDelegate.UnsentRequest networkRequest = result.unsentRequests.get(0);
final String errorMessage = "message";
final ClientResponse response = buildClientErrorResponse(error, errorMessage);
networkRequest.handler().onComplete(response);
verify(heartbeatState).reset();
verify(heartbeatRequestState).reset();
verify(membershipManager).onFenced();
verify(membershipManager).onFatalHeartbeatFailure();
}
}
@ParameterizedTest
@MethodSource("provideOtherErrors")
public void testOtherErrorResponse(final Errors error) {
try (
final MockedConstruction<HeartbeatRequestState> heartbeatRequestStateMockedConstruction = mockConstruction(
HeartbeatRequestState.class,
(mock, context) -> when(mock.canSendRequest(time.milliseconds())).thenReturn(true));
final MockedConstruction<StreamsGroupHeartbeatRequestManager.HeartbeatState> heartbeatStateMockedConstruction = mockConstruction(
StreamsGroupHeartbeatRequestManager.HeartbeatState.class);
final LogCaptureAppender logAppender = LogCaptureAppender.createAndRegister(StreamsGroupHeartbeatRequestManager.class)
) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final StreamsGroupHeartbeatRequestManager.HeartbeatState heartbeatState = heartbeatStateMockedConstruction.constructed().get(0);
when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(coordinatorNode));
final NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(1, result.unsentRequests.size());
final NetworkClientDelegate.UnsentRequest networkRequest = result.unsentRequests.get(0);
final String errorMessage = "message";
final ClientResponse response = buildClientErrorResponse(error, errorMessage);
networkRequest.handler().onComplete(response);
assertTrue(logAppender.getMessages("ERROR").stream()
.anyMatch(m -> m.contains("StreamsGroupHeartbeatRequest failed due to unexpected error")));
verify(heartbeatState).reset();
ArgumentCaptor<ErrorEvent> errorEvent = ArgumentCaptor.forClass(ErrorEvent.class);
verify(backgroundEventHandler).add(errorEvent.capture());
assertEquals(errorMessage, errorEvent.getValue().error().getMessage());
assertInstanceOf(error.exception().getClass(), errorEvent.getValue().error());
verify(membershipManager).transitionToFatal();
verify(membershipManager).onFatalHeartbeatFailure();
}
}
private static Stream<Arguments> provideOtherErrors() {
final Set<Errors> consideredErrors = Set.of(
Errors.NONE,
Errors.NOT_COORDINATOR,
Errors.COORDINATOR_NOT_AVAILABLE,
Errors.COORDINATOR_LOAD_IN_PROGRESS,
Errors.GROUP_AUTHORIZATION_FAILED,
Errors.TOPIC_AUTHORIZATION_FAILED,
Errors.INVALID_REQUEST,
Errors.GROUP_MAX_SIZE_REACHED,
Errors.FENCED_MEMBER_EPOCH,
Errors.UNKNOWN_MEMBER_ID,
Errors.UNSUPPORTED_VERSION,
Errors.STREAMS_INVALID_TOPOLOGY,
Errors.STREAMS_INVALID_TOPOLOGY_EPOCH,
Errors.STREAMS_TOPOLOGY_FENCED);
return Arrays.stream(Errors.values())
.filter(error -> !consideredErrors.contains(error))
.map(Arguments::of);
}
@Test
public void testPollOnCloseWhenIsNotLeaving() {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
NetworkClientDelegate.PollResult result = heartbeatRequestManager.pollOnClose(time.milliseconds());
assertEquals(NetworkClientDelegate.PollResult.EMPTY, result);
}
@Test
public void testPollOnCloseWhenIsLeaving() {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
when(membershipManager.isLeavingGroup()).thenReturn(true);
when(membershipManager.groupId()).thenReturn(GROUP_ID);
when(membershipManager.memberId()).thenReturn(MEMBER_ID);
when(membershipManager.memberEpoch()).thenReturn(LEAVE_GROUP_MEMBER_EPOCH);
NetworkClientDelegate.PollResult result = heartbeatRequestManager.pollOnClose(time.milliseconds());
assertEquals(1, result.unsentRequests.size());
final NetworkClientDelegate.UnsentRequest networkRequest = result.unsentRequests.get(0);
StreamsGroupHeartbeatRequest streamsRequest = (StreamsGroupHeartbeatRequest) networkRequest.requestBuilder().build();
assertEquals(GROUP_ID, streamsRequest.data().groupId());
assertEquals(MEMBER_ID, streamsRequest.data().memberId());
assertEquals(LEAVE_GROUP_MEMBER_EPOCH, streamsRequest.data().memberEpoch());
}
@Test
public void testMaximumTimeToWaitPollTimerExpired() {
try (
final MockedConstruction<Timer> timerMockedConstruction =
mockConstruction(Timer.class, (mock, context) -> when(mock.isExpired()).thenReturn(true));
final MockedConstruction<HeartbeatRequestState> heartbeatRequestStateMockedConstruction = mockConstruction(
HeartbeatRequestState.class,
(mock, context) -> when(mock.requestInFlight()).thenReturn(false))
) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final Timer pollTimer = timerMockedConstruction.constructed().get(0);
time.sleep(1234);
final long maximumTimeToWait = heartbeatRequestManager.maximumTimeToWait(time.milliseconds());
assertEquals(0, maximumTimeToWait);
verify(pollTimer).update(time.milliseconds());
}
}
@Test
public void testMaximumTimeToWaitWhenHeartbeatShouldBeSentImmediately() {
try (
final MockedConstruction<Timer> timerMockedConstruction = mockConstruction(Timer.class);
final MockedConstruction<HeartbeatRequestState> heartbeatRequestStateMockedConstruction = mockConstruction(
HeartbeatRequestState.class,
(mock, context) -> when(mock.requestInFlight()).thenReturn(false))
) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final Timer pollTimer = timerMockedConstruction.constructed().get(0);
when(membershipManager.shouldNotWaitForHeartbeatInterval()).thenReturn(true);
time.sleep(1234);
final long maximumTimeToWait = heartbeatRequestManager.maximumTimeToWait(time.milliseconds());
assertEquals(0, maximumTimeToWait);
verify(pollTimer).update(time.milliseconds());
}
}
@ParameterizedTest
@CsvSource({"true, false", "false, false", "true, true"})
public void testMaximumTimeToWaitWhenHeartbeatShouldBeNotSentImmediately(final boolean isRequestInFlight,
final boolean shouldNotWaitForHeartbeatInterval) {
final long remainingMs = 12L;
final long timeToNextHeartbeatMs = 6L;
try (
final MockedConstruction<Timer> timerMockedConstruction =
mockConstruction(Timer.class, (mock, context) -> when(mock.remainingMs()).thenReturn(remainingMs));
final MockedConstruction<HeartbeatRequestState> heartbeatRequestStateMockedConstruction = mockConstruction(
HeartbeatRequestState.class,
(mock, context) -> {
when(mock.requestInFlight()).thenReturn(isRequestInFlight);
when(mock.timeToNextHeartbeatMs(anyLong())).thenReturn(timeToNextHeartbeatMs);
})
) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final Timer pollTimer = timerMockedConstruction.constructed().get(0);
when(membershipManager.shouldNotWaitForHeartbeatInterval()).thenReturn(shouldNotWaitForHeartbeatInterval);
time.sleep(1234);
final long maximumTimeToWait = heartbeatRequestManager.maximumTimeToWait(time.milliseconds());
assertEquals(timeToNextHeartbeatMs, maximumTimeToWait);
verify(pollTimer).update(time.milliseconds());
}
}
@ParameterizedTest
@CsvSource({"12, 5", "10, 6"})
public void testMaximumTimeToWaitSelectingMinimumWaitTime(final long remainingMs,
final long timeToNextHeartbeatMs) {
try (
final MockedConstruction<Timer> timerMockedConstruction =
mockConstruction(Timer.class, (mock, context) -> when(mock.remainingMs()).thenReturn(remainingMs));
final MockedConstruction<HeartbeatRequestState> heartbeatRequestStateMockedConstruction = mockConstruction(
HeartbeatRequestState.class,
(mock, context) -> when(mock.timeToNextHeartbeatMs(anyLong())).thenReturn(timeToNextHeartbeatMs))
) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final Timer pollTimer = timerMockedConstruction.constructed().get(0);
time.sleep(1234);
final long maximumTimeToWait = heartbeatRequestManager.maximumTimeToWait(time.milliseconds());
assertEquals(5, maximumTimeToWait);
verify(pollTimer).update(time.milliseconds());
}
}
@Test
public void testResetPollTimer() {
try (final MockedConstruction<Timer> pollTimerMockedConstruction = mockConstruction(Timer.class)) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final Timer pollTimer = pollTimerMockedConstruction.constructed().get(1);
heartbeatRequestManager.resetPollTimer(time.milliseconds());
verify(pollTimer).update(time.milliseconds());
verify(pollTimer).isExpired();
verify(pollTimer).reset(DEFAULT_MAX_POLL_INTERVAL_MS);
}
}
@Test
public void testResetPollTimerWhenExpired() {
try (final MockedConstruction<Timer> pollTimerMockedConstruction = mockConstruction(Timer.class)) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
final Timer pollTimer = pollTimerMockedConstruction.constructed().get(1);
when(pollTimer.isExpired()).thenReturn(true);
heartbeatRequestManager.resetPollTimer(time.milliseconds());
verify(pollTimer).update(time.milliseconds());
verify(pollTimer).isExpired();
verify(pollTimer).isExpiredBy();
verify(membershipManager).memberId();
verify(membershipManager).maybeRejoinStaleMember();
verify(pollTimer).reset(DEFAULT_MAX_POLL_INTERVAL_MS);
}
}
@Test
public void testStreamsRebalanceDataHeartbeatIntervalMsUpdatedOnSuccess() {
try (
final MockedConstruction<HeartbeatRequestState> ignored = mockConstruction(
HeartbeatRequestState.class,
(mock, context) -> when(mock.canSendRequest(time.milliseconds())).thenReturn(true))
) {
final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager();
when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(coordinatorNode));
when(membershipManager.groupId()).thenReturn(GROUP_ID);
when(membershipManager.memberId()).thenReturn(MEMBER_ID);
when(membershipManager.memberEpoch()).thenReturn(MEMBER_EPOCH);
when(membershipManager.groupInstanceId()).thenReturn(Optional.of(INSTANCE_ID));
// Initially, heartbeatIntervalMs should be -1
assertEquals(-1, streamsRebalanceData.heartbeatIntervalMs());
final NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds());
assertEquals(1, result.unsentRequests.size());
final NetworkClientDelegate.UnsentRequest networkRequest = result.unsentRequests.get(0);
final ClientResponse response = buildClientResponse();
networkRequest.handler().onComplete(response);
// After successful response, heartbeatIntervalMs should be updated
assertEquals(RECEIVED_HEARTBEAT_INTERVAL_MS, streamsRebalanceData.heartbeatIntervalMs());
}
}
private static ConsumerConfig config() {
Properties prop = new Properties();
prop.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
prop.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
prop.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, String.valueOf(DEFAULT_MAX_POLL_INTERVAL_MS));
prop.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
return new ConsumerConfig(prop);
}
private StreamsGroupHeartbeatRequestManager createStreamsGroupHeartbeatRequestManager() {
return new StreamsGroupHeartbeatRequestManager(
LOG_CONTEXT,
time,
config,
coordinatorRequestManager,
membershipManager,
backgroundEventHandler,
metrics,
streamsRebalanceData
);
}
private ClientResponse buildClientResponse() {
return new ClientResponse(
new RequestHeader(ApiKeys.STREAMS_GROUP_HEARTBEAT, (short) 1, "", 1),
null,
"-1",
time.milliseconds(),
time.milliseconds(),
false,
null,
null,
new StreamsGroupHeartbeatResponse(
new StreamsGroupHeartbeatResponseData()
.setPartitionsByUserEndpoint(ENDPOINT_TO_PARTITIONS)
.setHeartbeatIntervalMs((int) RECEIVED_HEARTBEAT_INTERVAL_MS)
)
);
}
private ClientResponse buildClientErrorResponse(final Errors error, final String errorMessage) {
return new ClientResponse(
new RequestHeader(ApiKeys.STREAMS_GROUP_HEARTBEAT, (short) 1, "", 1),
null,
"-1",
time.milliseconds(),
time.milliseconds(),
false,
null,
null,
new StreamsGroupHeartbeatResponse(
new StreamsGroupHeartbeatResponseData()
.setErrorCode(error.code())
.setErrorMessage(errorMessage)
)
);
}
private static void assertTaskIdsEquals(final List<StreamsGroupHeartbeatRequestData.TaskIds> expected,
final List<StreamsGroupHeartbeatRequestData.TaskIds> actual) {
List<StreamsGroupHeartbeatRequestData.TaskIds> sortedExpected = expected.stream()
.map(taskIds -> new StreamsGroupHeartbeatRequestData.TaskIds()
.setSubtopologyId(taskIds.subtopologyId())
.setPartitions(taskIds.partitions().stream().sorted().collect(Collectors.toList())))
.sorted(Comparator.comparing(StreamsGroupHeartbeatRequestData.TaskIds::subtopologyId))
.collect(Collectors.toList());
List<StreamsGroupHeartbeatRequestData.TaskIds> sortedActual = actual.stream()
.map(taskIds -> new StreamsGroupHeartbeatRequestData.TaskIds()
.setSubtopologyId(taskIds.subtopologyId())
.setPartitions(taskIds.partitions().stream().sorted().collect(Collectors.toList())))
.sorted(Comparator.comparing(StreamsGroupHeartbeatRequestData.TaskIds::subtopologyId))
.collect(Collectors.toList());
assertEquals(sortedExpected, sortedActual);
}
}
| StreamsGroupHeartbeatRequestManagerTest |
java | google__guava | guava-tests/test/com/google/common/reflect/InvokableTest.java | {
"start": 1722,
"end": 6449
} | class ____ extends TestCase {
// Historically Invokable inherited from java.lang.reflect.AccessibleObject. That's no longer the
// case, but we do check that its API still has the same public methods. We exclude some methods
// that were added in Java 9 and that people probably weren't calling via Invokable, namely
// `boolean canAccess(Object)` and `boolean trySetAccessible()`.
public void testApiCompatibleWithAccessibleObject() {
ImmutableSet<String> invokableMethods =
publicMethodSignatures(Invokable.class, ImmutableSet.<String>of());
ImmutableSet<String> accessibleObjectMethods =
publicMethodSignatures(AccessibleObject.class, ImmutableSet.of("canAccess"));
assertThat(invokableMethods).containsAtLeastElementsIn(accessibleObjectMethods);
ImmutableSet<String> genericDeclarationMethods =
publicMethodSignatures(GenericDeclaration.class, ImmutableSet.<String>of());
assertThat(invokableMethods).containsAtLeastElementsIn(genericDeclarationMethods);
}
private static ImmutableSet<String> publicMethodSignatures(
Class<?> c, ImmutableSet<String> ignore) {
ImmutableSet.Builder<String> methods = ImmutableSet.builder();
for (Method method : c.getMethods()) {
if (Modifier.isStatic(method.getModifiers()) || ignore.contains(method.getName())) {
continue;
}
StringBuilder signature =
new StringBuilder()
.append(typeName(method.getReturnType()))
.append(" ")
.append(method.getName())
.append("(");
String sep = "";
for (Class<?> param : method.getParameterTypes()) {
signature.append(sep).append(typeName(param));
sep = ", ";
}
methods.add(signature.append(")").toString());
}
return methods.build();
}
private static String typeName(Class<?> type) {
return type.isArray() ? typeName(type.getComponentType()) + "[]" : type.getName();
}
public void testConstructor() throws Exception {
Invokable<A, A> invokable = A.constructor();
assertTrue(invokable.isPublic());
assertFalse(invokable.isPackagePrivate());
assertFalse(invokable.isAbstract());
assertFalse(invokable.isStatic());
assertTrue(invokable.isAnnotationPresent(Tested.class));
}
public void testAbstractMethod() throws Exception {
Invokable<?, Object> invokable = A.method("abstractMethod");
assertTrue(invokable.isPackagePrivate());
assertTrue(invokable.isAbstract());
assertFalse(invokable.isFinal());
assertTrue(invokable.isAnnotationPresent(Tested.class));
}
public void testOverridableMethod() throws Exception {
Invokable<?, Object> invokable = A.method("overridableMethod");
assertTrue(invokable.isPackagePrivate());
assertFalse(invokable.isAbstract());
assertFalse(invokable.isFinal());
assertTrue(invokable.isAnnotationPresent(Tested.class));
}
public void testPrivateMethod() throws Exception {
Invokable<?, Object> invokable = A.method("privateMethod");
assertFalse(invokable.isAbstract());
assertTrue(invokable.isPrivate());
assertFalse(invokable.isPackagePrivate());
assertFalse(invokable.isPublic());
assertFalse(invokable.isProtected());
assertTrue(invokable.isAnnotationPresent(Tested.class));
}
public void testProtectedMethod() throws Exception {
Invokable<?, Object> invokable = A.method("protectedMethod");
assertFalse(invokable.isAbstract());
assertFalse(invokable.isPrivate());
assertFalse(invokable.isPackagePrivate());
assertFalse(invokable.isFinal());
assertFalse(invokable.isPublic());
assertTrue(invokable.isProtected());
assertTrue(invokable.isAnnotationPresent(Tested.class));
}
public void testFinalMethod() throws Exception {
Invokable<?, Object> invokable = A.method("publicFinalMethod");
assertFalse(invokable.isAbstract());
assertFalse(invokable.isPrivate());
assertTrue(invokable.isFinal());
assertTrue(invokable.isPublic());
assertTrue(invokable.isAnnotationPresent(Tested.class));
}
public void testNativeMethod() throws Exception {
Invokable<?, Object> invokable = A.method("nativeMethod");
assertTrue(invokable.isNative());
assertTrue(invokable.isPackagePrivate());
}
public void testSynchronizedMethod() throws Exception {
Invokable<?, Object> invokable = A.method("synchronizedMethod");
assertTrue(invokable.isSynchronized());
}
public void testUnannotatedMethod() throws Exception {
Invokable<?, Object> invokable = A.method("notAnnotatedMethod");
assertFalse(invokable.isAnnotationPresent(Tested.class));
}
@Retention(RetentionPolicy.RUNTIME)
@Keep
private @ | InvokableTest |
java | google__guice | extensions/throwingproviders/test/com/google/inject/throwingproviders/ThrowingProviderTest.java | {
"start": 34189,
"end": 34299
} | interface ____<T>
extends ThrowingProvider<String, Exception> {}
private static | WrongThrowingProviderType |
java | apache__flink | flink-core/src/test/java/org/apache/flink/core/fs/SafetyNetCloseableRegistryTest.java | {
"start": 11406,
"end": 12331
} | class ____
extends SafetyNetCloseableRegistry.CloseableReaperThread {
@Override
public synchronized void start() {
throw new java.lang.OutOfMemoryError();
}
private static File newFolder(File root, String... subDirs) throws IOException {
String subFolder = String.join("/", subDirs);
File result = new File(root, subFolder);
if (!result.mkdirs()) {
throw new IOException("Couldn't create folders " + root);
}
return result;
}
}
private static File newFolder(File root, String... subDirs) throws IOException {
String subFolder = String.join("/", subDirs);
File result = new File(root, subFolder);
if (!result.mkdirs()) {
throw new IOException("Couldn't create folders " + root);
}
return result;
}
}
| OutOfMemoryReaperThread |
java | apache__flink | flink-table/flink-sql-gateway/src/main/java/org/apache/flink/table/gateway/rest/SqlGatewayRestEndpoint.java | {
"start": 5125,
"end": 14513
} | class ____ extends RestServerEndpoint implements SqlGatewayEndpoint {
public final SqlGatewayService service;
private final EmbeddedQuartzScheduler quartzScheduler;
public SqlGatewayRestEndpoint(Configuration configuration, SqlGatewayService sqlGatewayService)
throws IOException, ConfigurationException {
super(configuration);
service = sqlGatewayService;
quartzScheduler = new EmbeddedQuartzScheduler();
}
@VisibleForTesting
public EmbeddedQuartzScheduler getQuartzScheduler() {
return quartzScheduler;
}
@Override
protected List<Tuple2<RestHandlerSpecification, ChannelInboundHandler>> initializeHandlers(
CompletableFuture<String> localAddressFuture) {
List<Tuple2<RestHandlerSpecification, ChannelInboundHandler>> handlers = new ArrayList<>();
addSessionRelatedHandlers(handlers);
addOperationRelatedHandlers(handlers);
addUtilRelatedHandlers(handlers);
addStatementRelatedHandlers(handlers);
addEmbeddedSchedulerRelatedHandlers(handlers);
addMaterializedTableRelatedHandlers(handlers);
addDeployScriptRelatedHandlers(handlers);
return handlers;
}
private void addSessionRelatedHandlers(
List<Tuple2<RestHandlerSpecification, ChannelInboundHandler>> handlers) {
// Open a session
OpenSessionHandler openSessionHandler =
new OpenSessionHandler(service, responseHeaders, OpenSessionHeaders.getInstance());
handlers.add(Tuple2.of(OpenSessionHeaders.getInstance(), openSessionHandler));
// Close a session
CloseSessionHandler closeSessionHandler =
new CloseSessionHandler(
service, responseHeaders, CloseSessionHeaders.getInstance());
handlers.add(Tuple2.of(CloseSessionHeaders.getInstance(), closeSessionHandler));
// Configure session
ConfigureSessionHandler configureSessionHandler =
new ConfigureSessionHandler(
service, responseHeaders, ConfigureSessionHeaders.getInstance());
handlers.add(Tuple2.of(ConfigureSessionHeaders.getInstance(), configureSessionHandler));
// Get session configuration
GetSessionConfigHandler getSessionConfigHandler =
new GetSessionConfigHandler(
service, responseHeaders, GetSessionConfigHeaders.getInstance());
handlers.add(Tuple2.of(GetSessionConfigHeaders.getInstance(), getSessionConfigHandler));
// Trigger session heartbeat
TriggerSessionHeartbeatHandler triggerSessionHeartbeatHandler =
new TriggerSessionHeartbeatHandler(
service, responseHeaders, TriggerSessionHeartbeatHeaders.getInstance());
handlers.add(
Tuple2.of(
TriggerSessionHeartbeatHeaders.getInstance(),
triggerSessionHeartbeatHandler));
}
protected void addOperationRelatedHandlers(
List<Tuple2<RestHandlerSpecification, ChannelInboundHandler>> handlers) {
// Get the status of operation
GetOperationStatusHandler getOperationStatusHandler =
new GetOperationStatusHandler(
service, responseHeaders, GetOperationStatusHeaders.getInstance());
handlers.add(Tuple2.of(GetOperationStatusHeaders.getInstance(), getOperationStatusHandler));
// Cancel the operation
CancelOperationHandler cancelOperationHandler =
new CancelOperationHandler(
service, responseHeaders, CancelOperationHeaders.getInstance());
handlers.add(Tuple2.of(CancelOperationHeaders.getInstance(), cancelOperationHandler));
// Close the operation
CloseOperationHandler closeOperationHandler =
new CloseOperationHandler(
service, responseHeaders, CloseOperationHeaders.getInstance());
handlers.add(Tuple2.of(CloseOperationHeaders.getInstance(), closeOperationHandler));
}
protected void addUtilRelatedHandlers(
List<Tuple2<RestHandlerSpecification, ChannelInboundHandler>> handlers) {
// Get info
GetInfoHandler getInfoHandler =
new GetInfoHandler(service, responseHeaders, GetInfoHeaders.getInstance());
handlers.add(Tuple2.of(GetInfoHeaders.getInstance(), getInfoHandler));
// Get version
GetApiVersionHandler getApiVersionHandler =
new GetApiVersionHandler(
service, responseHeaders, GetApiVersionHeaders.getInstance());
handlers.add(Tuple2.of(GetApiVersionHeaders.getInstance(), getApiVersionHandler));
}
private void addStatementRelatedHandlers(
List<Tuple2<RestHandlerSpecification, ChannelInboundHandler>> handlers) {
// Complete a statement
CompleteStatementHandler completeStatementHandler =
new CompleteStatementHandler(
service, responseHeaders, CompleteStatementHeaders.getInstance());
handlers.add(Tuple2.of(CompleteStatementHeaders.getInstance(), completeStatementHandler));
// Execute a statement
ExecuteStatementHandler executeStatementHandler =
new ExecuteStatementHandler(
service, responseHeaders, ExecuteStatementHeaders.getInstance());
handlers.add(Tuple2.of(ExecuteStatementHeaders.getInstance(), executeStatementHandler));
// Fetch results
handlers.add(
Tuple2.of(
FetchResultsHeaders.getDefaultInstance(),
new FetchResultsHandler(
service,
responseHeaders,
FetchResultsHeaders.getDefaultInstance())));
handlers.add(
Tuple2.of(
FetchResultsHeaders.getInstanceV1(),
new FetchResultsHandler(
service, responseHeaders, FetchResultsHeaders.getInstanceV1())));
}
private void addEmbeddedSchedulerRelatedHandlers(
List<Tuple2<RestHandlerSpecification, ChannelInboundHandler>> handlers) {
// create workflow
CreateEmbeddedSchedulerWorkflowHandler createHandler =
new CreateEmbeddedSchedulerWorkflowHandler(
service,
quartzScheduler,
responseHeaders,
CreateEmbeddedSchedulerWorkflowHeaders.getInstance());
handlers.add(
Tuple2.of(CreateEmbeddedSchedulerWorkflowHeaders.getInstance(), createHandler));
// suspend workflow
SuspendEmbeddedSchedulerWorkflowHandler suspendHandler =
new SuspendEmbeddedSchedulerWorkflowHandler(
service,
quartzScheduler,
responseHeaders,
SuspendEmbeddedSchedulerWorkflowHeaders.getInstance());
handlers.add(
Tuple2.of(SuspendEmbeddedSchedulerWorkflowHeaders.getInstance(), suspendHandler));
// resume workflow
ResumeEmbeddedSchedulerWorkflowHandler resumeHandler =
new ResumeEmbeddedSchedulerWorkflowHandler(
service,
quartzScheduler,
responseHeaders,
ResumeEmbeddedSchedulerWorkflowHeaders.getInstance());
handlers.add(
Tuple2.of(ResumeEmbeddedSchedulerWorkflowHeaders.getInstance(), resumeHandler));
// delete workflow
DeleteEmbeddedSchedulerWorkflowHandler deleteHandler =
new DeleteEmbeddedSchedulerWorkflowHandler(
service,
quartzScheduler,
responseHeaders,
DeleteEmbeddedSchedulerWorkflowHeaders.getInstance());
handlers.add(
Tuple2.of(DeleteEmbeddedSchedulerWorkflowHeaders.getInstance(), deleteHandler));
}
private void addMaterializedTableRelatedHandlers(
List<Tuple2<RestHandlerSpecification, ChannelInboundHandler>> handlers) {
// Refresh materialized table
RefreshMaterializedTableHandler refreshMaterializedTableHandler =
new RefreshMaterializedTableHandler(
service, responseHeaders, RefreshMaterializedTableHeaders.getInstance());
handlers.add(
Tuple2.of(
RefreshMaterializedTableHeaders.getInstance(),
refreshMaterializedTableHandler));
}
private void addDeployScriptRelatedHandlers(
List<Tuple2<RestHandlerSpecification, ChannelInboundHandler>> handlers) {
DeployScriptHandler handler =
new DeployScriptHandler(
service, responseHeaders, DeployScriptHeaders.getInstance());
handlers.add(Tuple2.of(DeployScriptHeaders.getInstance(), handler));
}
@Override
protected void startInternal() {
quartzScheduler.start();
}
@Override
public void stop() throws Exception {
super.close();
quartzScheduler.stop();
}
}
| SqlGatewayRestEndpoint |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/spi-deployment/src/main/java/io/quarkus/resteasy/reactive/server/spi/HandlerConfigurationProviderBuildItem.java | {
"start": 610,
"end": 818
} | class ____ the
* second argument constructor.
*
* Ideally we would have used generic to make things more type safe, but generics cannot be used in build items.
*/
@SuppressWarnings("rawtypes")
public final | as |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/collection/defaultimplementation/DefaultCollectionImplementationTest.java | {
"start": 1125,
"end": 7074
} | class ____ {
@RegisterExtension
final GeneratedSource generatedSource = new GeneratedSource()
.addComparisonToFixtureFor( SourceTargetMapper.class );
@ProcessorTest
@IssueKey("6")
public void shouldUseDefaultImplementationForConcurrentMap() {
ConcurrentMap<String, TargetFoo> target =
SourceTargetMapper.INSTANCE.sourceFooMapToTargetFooConcurrentMap( createSourceFooMap() );
assertResultMap( target );
}
@ProcessorTest
@IssueKey("6")
public void shouldUseDefaultImplementationForConcurrentNavigableMap() {
ConcurrentNavigableMap<String, TargetFoo> target =
SourceTargetMapper.INSTANCE.sourceFooMapToTargetFooConcurrentNavigableMap( createSourceFooMap() );
assertResultMap( target );
}
@ProcessorTest
@IssueKey("6")
public void shouldUseDefaultImplementationForMap() {
Map<String, TargetFoo> target = SourceTargetMapper.INSTANCE.sourceFooMapToTargetFooMap( createSourceFooMap() );
assertResultMap( target );
}
@ProcessorTest
@IssueKey("6")
public void shouldUseDefaultImplementationForNavigableMap() {
NavigableMap<String, TargetFoo> target =
SourceTargetMapper.INSTANCE.sourceFooMapToTargetFooNavigableMap( createSourceFooMap() );
assertResultMap( target );
}
@ProcessorTest
@IssueKey("6")
public void shouldUseDefaultImplementationForSortedMap() {
SortedMap<String, TargetFoo> target =
SourceTargetMapper.INSTANCE.sourceFooMapToTargetFooSortedMap( createSourceFooMap() );
assertResultMap( target );
}
@ProcessorTest
@IssueKey("6")
public void shouldUseDefaultImplementationForNaviableSet() {
NavigableSet<TargetFoo> target =
SourceTargetMapper.INSTANCE.sourceFoosToTargetFooNavigableSet( createSourceFooList() );
assertResultList( target );
}
@ProcessorTest
@IssueKey("6")
public void shouldUseDefaultImplementationForCollection() {
Collection<TargetFoo> target =
SourceTargetMapper.INSTANCE.sourceFoosToTargetFoos( (Collection<SourceFoo>) createSourceFooList() );
assertResultList( target );
}
@ProcessorTest
@IssueKey("6")
public void shouldUseDefaultImplementationForIterable() {
Iterable<TargetFoo> target =
SourceTargetMapper.INSTANCE.sourceFoosToTargetFoos( (Iterable<SourceFoo>) createSourceFooList() );
assertResultList( target );
}
@ProcessorTest
@IssueKey("6")
public void shouldUseDefaultImplementationForList() {
List<TargetFoo> target = SourceTargetMapper.INSTANCE.sourceFoosToTargetFoos( createSourceFooList() );
assertResultList( target );
}
@ProcessorTest
@IssueKey("6")
public void shouldUseDefaultImplementationForSet() {
Set<TargetFoo> target =
SourceTargetMapper.INSTANCE.sourceFoosToTargetFoos( new HashSet<>( createSourceFooList() ) );
assertResultList( target );
}
@ProcessorTest
@IssueKey("6")
public void shouldUseDefaultImplementationForSortedSet() {
SortedSet<TargetFoo> target =
SourceTargetMapper.INSTANCE.sourceFoosToTargetFooSortedSet( createSourceFooList() );
assertResultList( target );
}
@ProcessorTest
@IssueKey("19")
public void shouldUseTargetParameterForMapping() {
List<TargetFoo> target = new ArrayList<>();
SourceTargetMapper.INSTANCE.sourceFoosToTargetFoosUsingTargetParameter(
target,
createSourceFooList()
);
assertResultList( target );
}
@ProcessorTest
@IssueKey("19")
public void shouldUseAndReturnTargetParameterForMapping() {
List<TargetFoo> target = new ArrayList<>();
Iterable<TargetFoo> result =
SourceTargetMapper.INSTANCE
.sourceFoosToTargetFoosUsingTargetParameterAndReturn( createSourceFooList(), target );
assertThat( result ).isSameAs( target );
assertResultList( target );
}
@ProcessorTest
@IssueKey("1752")
public void shouldUseAndReturnTargetParameterForNullMapping() {
List<TargetFoo> target = new ArrayList<>();
target.add( new TargetFoo( "Bob" ) );
target.add( new TargetFoo( "Alice" ) );
Iterable<TargetFoo> result =
SourceTargetMapper.INSTANCE
.sourceFoosToTargetFoosUsingTargetParameterAndReturn( null, target );
assertThat( result ).isSameAs( target );
assertResultList( target );
}
@ProcessorTest
@IssueKey("92")
public void shouldUseDefaultImplementationForListWithoutSetter() {
Source source = new Source();
source.setFooList( createSourceFooList() );
Target target = SourceTargetMapper.INSTANCE.sourceToTarget( source );
assertThat( target ).isNotNull();
assertThat( target.getFooListNoSetter() ).containsExactly( new TargetFoo( "Bob" ), new TargetFoo( "Alice" ) );
}
private void assertResultList(Iterable<TargetFoo> fooIterable) {
assertThat( fooIterable ).isNotNull();
assertThat( fooIterable ).containsOnly( new TargetFoo( "Bob" ), new TargetFoo( "Alice" ) );
}
private void assertResultMap(Map<String, TargetFoo> result) {
assertThat( result ).isNotNull();
assertThat( result ).hasSize( 2 );
assertThat( result ).contains( entry( "1", new TargetFoo( "Bob" ) ), entry( "2", new TargetFoo( "Alice" ) ) );
}
private Map<Long, SourceFoo> createSourceFooMap() {
Map<Long, SourceFoo> map = new HashMap<>();
map.put( 1L, new SourceFoo( "Bob" ) );
map.put( 2L, new SourceFoo( "Alice" ) );
return map;
}
private List<SourceFoo> createSourceFooList() {
return Arrays.asList( new SourceFoo( "Bob" ), new SourceFoo( "Alice" ) );
}
}
| DefaultCollectionImplementationTest |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/AbstractMockChecker.java | {
"start": 4642,
"end": 7611
} | interface ____<T extends Tree> {
/**
* Investigate the provided Tree, and return type information about it if it matches.
*
* @return the Type of the object being mocked, if any; Optional.empty() otherwise
*/
Optional<Type> extract(T tree, VisitorState state);
/**
* Enrich this TypeExtractor with fallback behavior.
*
* @return a TypeExtractor which first tries {@code this.extract(t, s)}, and if that does not
* match, falls back to {@code other.extract(t, s)}.
*/
default TypeExtractor<T> or(TypeExtractor<T> other) {
return (tree, state) ->
TypeExtractor.this
.extract(tree, state)
.map(Optional::of)
.orElseGet(() -> other.extract(tree, state));
}
}
/**
* Produces an extractor which, if the tree matches, extracts the type of that tree, as given by
* {@link ASTHelpers#getType(Tree)}.
*/
public static <T extends Tree> TypeExtractor<T> extractType(Matcher<T> m) {
return (tree, state) -> {
if (m.matches(tree, state)) {
return Optional.ofNullable(ASTHelpers.getType(tree));
}
return Optional.empty();
};
}
/**
* Produces an extractor which, if the tree matches, extracts the type of the first argument to
* the method invocation.
*/
public static TypeExtractor<MethodInvocationTree> extractFirstArg(
Matcher<MethodInvocationTree> m) {
return (tree, state) -> {
if (!m.matches(tree, state)) {
return Optional.empty();
}
if (tree.getArguments().size() >= 1) {
return Optional.ofNullable(ASTHelpers.getType(tree.getArguments().getFirst()));
}
return Optional.ofNullable(TargetType.targetType(state)).map(t -> t.type());
};
}
/**
* Produces an extractor which, if the tree matches, extracts the type of the first argument whose
* type is {@link Class} (preserving its {@code <T>} type parameter, if it has one}.
*
* @param m the matcher to use. It is an error for this matcher to succeed on any Tree that does
* not include at least one argument of type {@link Class}; if such a matcher is provided, the
* behavior of the returned Extractor is undefined.
*/
public static TypeExtractor<MethodInvocationTree> extractClassArg(
Matcher<MethodInvocationTree> m) {
return (tree, state) -> {
if (m.matches(tree, state)) {
for (ExpressionTree argument : tree.getArguments()) {
Type argumentType = ASTHelpers.getType(argument);
if (ASTHelpers.isSameType(argumentType, state.getSymtab().classType, state)) {
return Optional.of(argumentType);
}
}
// It's undefined, so we could fall through - but an exception is less likely to surprise.
throw new IllegalStateException();
}
return Optional.empty();
};
}
/**
* Creates a TypeExtractor that extracts the type of a | TypeExtractor |
java | apache__camel | components/camel-aws/camel-aws-xray/src/main/java/org/apache/camel/component/aws/xray/TraceAnnotatedTracingStrategy.java | {
"start": 1633,
"end": 2872
} | class ____ implements InterceptStrategy {
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@Override
public Processor wrapProcessorInInterceptors(
CamelContext camelContext,
NamedNode processorDefinition,
Processor target, Processor nextTarget)
throws Exception {
Class<?> processorClass = processorDefinition.getClass();
String shortName = processorDefinition.getShortName();
if (processorDefinition instanceof BeanDefinition) {
BeanProcessor beanProcessor = (BeanProcessor) target;
if (null != beanProcessor && null != beanProcessor.getBean()) {
processorClass = beanProcessor.getBean().getClass();
}
} else if (processorDefinition instanceof ProcessDefinition) {
DelegateSyncProcessor syncProcessor = (DelegateSyncProcessor) target;
if (null != syncProcessor && null != syncProcessor.getProcessor()) {
processorClass = syncProcessor.getProcessor().getClass();
}
}
if (processorClass == null) {
LOG.trace("Could not identify processor | TraceAnnotatedTracingStrategy |
java | google__dagger | dagger-android/main/java/dagger/android/AndroidInjector.java | {
"start": 1224,
"end": 1487
} | interface ____<T> {
/** Injects the members of {@code instance}. */
void inject(T instance);
/**
* Creates {@link AndroidInjector}s for a concrete subtype of a core Android type.
*
* @param <T> the concrete type to be injected
*/
| AndroidInjector |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/api/sync/RedisCommands.java | {
"start": 1145,
"end": 3121
} | interface ____<K, V> extends BaseRedisCommands<K, V>, RedisAclCommands<K, V>, RedisClusterCommands<K, V>,
RedisFunctionCommands<K, V>, RedisGeoCommands<K, V>, RedisHashCommands<K, V>, RedisHLLCommands<K, V>,
RedisKeyCommands<K, V>, RedisListCommands<K, V>, RedisScriptingCommands<K, V>, RedisServerCommands<K, V>,
RedisSetCommands<K, V>, RedisSortedSetCommands<K, V>, RedisStreamCommands<K, V>, RedisStringCommands<K, V>,
RedisTransactionalCommands<K, V>, RedisJsonCommands<K, V>, RedisVectorSetCommands<K, V>, RediSearchCommands<K, V> {
/**
* Authenticate to the server.
*
* @param password the password
* @return String simple-string-reply
*/
String auth(CharSequence password);
/**
* Authenticate to the server with username and password. Requires Redis 6 or newer.
*
* @param username the username
* @param password the password
* @return String simple-string-reply
* @since 6.0
*/
String auth(String username, CharSequence password);
/**
* Change the selected database for the current Commands.
*
* @param db the database number
* @return String simple-string-reply
*/
String select(int db);
/**
* Swap two Redis databases, so that immediately all the clients connected to a given DB will see the data of the other DB,
* and the other way around
*
* @param db1 the first database number
* @param db2 the second database number
* @return String simple-string-reply
*/
String swapdb(int db1, int db2);
/**
* @return the underlying connection.
* @since 6.2, will be removed with Lettuce 7 to avoid exposing the underlying connection.
*/
@Deprecated
StatefulRedisConnection<K, V> getStatefulConnection();
/**
* @return the currently configured instance of the {@link JsonParser}
* @since 6.5
*/
JsonParser getJsonParser();
}
| RedisCommands |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/batch/LeakingStatementCachingTest.java | {
"start": 2977,
"end": 3734
} | class ____ {
private static final SingleConnectionDataSource SINGLE_CONNECTION_DATASOURCE = new SingleConnectionDataSource();
@AfterAll
public void tearDown(SessionFactoryScope scope) {
try {
scope.getSessionFactory().getSchemaManager().dropMappedObjects( false );
}
finally {
SINGLE_CONNECTION_DATASOURCE.close();
}
}
@Test
void testPersistMultipleEntities(SessionFactoryScope scope) {
scope.inTransaction( session -> {
LongStream.range( 1, 10 )
.mapToObj( BaseEntity::new )
.forEach( session::persist );
assertThat( session.getJdbcCoordinator().getLogicalConnection().getResourceRegistry()
.hasRegisteredResources() ).isFalse();
} );
}
@Entity(name = "BaseEntity")
public static | LeakingStatementCachingTest |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/dialect/oscar/ast/stmt/OscarDropSchemaStatement.java | {
"start": 1038,
"end": 2220
} | class ____ extends SQLStatementImpl implements OscarStatement, SQLDropStatement {
private SQLIdentifierExpr schemaName;
private boolean ifExists;
private boolean cascade;
private boolean restrict;
public SQLIdentifierExpr getSchemaName() {
return schemaName;
}
public void setSchemaName(SQLIdentifierExpr schemaName) {
this.schemaName = schemaName;
}
public boolean isIfExists() {
return ifExists;
}
public void setIfExists(boolean ifExists) {
this.ifExists = ifExists;
}
public boolean isCascade() {
return cascade;
}
public void setCascade(boolean cascade) {
this.cascade = cascade;
}
public boolean isRestrict() {
return restrict;
}
public void setRestrict(boolean restrict) {
this.restrict = restrict;
}
protected void accept0(SQLASTVisitor visitor) {
accept0((PGASTVisitor) visitor);
}
@Override
public void accept0(OscarASTVisitor visitor) {
if (visitor.visit(this)) {
acceptChild(visitor, this.schemaName);
}
visitor.endVisit(this);
}
}
| OscarDropSchemaStatement |
java | apache__spark | common/network-common/src/main/java/org/apache/spark/network/protocol/Encoders.java | {
"start": 1193,
"end": 1866
} | class ____ {
public static int encodedLength(String s) {
return 4 + s.getBytes(StandardCharsets.UTF_8).length;
}
public static void encode(ByteBuf buf, String s) {
byte[] bytes = s.getBytes(StandardCharsets.UTF_8);
buf.writeInt(bytes.length);
buf.writeBytes(bytes);
}
public static String decode(ByteBuf buf) {
int length = buf.readInt();
byte[] bytes = new byte[length];
buf.readBytes(bytes);
return new String(bytes, StandardCharsets.UTF_8);
}
}
/**
* Bitmaps are encoded with their serialization length followed by the serialization bytes.
*
* @since 3.1.0
*/
public static | Strings |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/serializer/SimpleDataFormatSerializerTest.java | {
"start": 377,
"end": 1349
} | class ____ extends TestCase {
private static SerializeConfig mapping = new SerializeConfig();
static {
mapping.put(Date.class, new SimpleDateFormatSerializer("yyyy-MM-dd"));
}
protected void setUp() throws Exception {
JSON.defaultTimeZone = TimeZone.getTimeZone("Asia/Shanghai");
JSON.defaultLocale = Locale.CHINA;
}
public void test_0() throws Exception {
Date date = new Date();
String text = JSON.toJSONString(date, mapping);
SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd", JSON.defaultLocale);
format.setTimeZone(JSON.defaultTimeZone);
SimpleDateFormat format2 = new SimpleDateFormat("yyyy-MM-dd", JSON.defaultLocale);
format2.setTimeZone(JSON.defaultTimeZone);
Assert.assertEquals(JSON.toJSONString(format.format(date)), text);
Assert.assertEquals(JSON.toJSONString(format2.format(date)), text);
}
}
| SimpleDataFormatSerializerTest |
java | elastic__elasticsearch | qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java | {
"start": 2698,
"end": 21358
} | class ____ extends ESRestTestCase {
private final ThreadPool threadPool = new TestThreadPool(getClass().getName());
@ClassRule
public static ElasticsearchCluster cluster = ElasticsearchCluster.local().build();
@Override
protected String getTestRestCluster() {
return cluster.getHttpAddresses();
}
@Override
public void tearDown() throws Exception {
super.tearDown();
ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS);
}
private static MockTransportService startTransport(
final String id,
final List<DiscoveryNode> knownNodes,
final VersionInformation version,
final TransportVersion transportVersion,
final ThreadPool threadPool
) {
boolean success = false;
final Settings s = Settings.builder().put("node.name", id).build();
ClusterName clusterName = ClusterName.CLUSTER_NAME_SETTING.get(s);
MockTransportService newService = MockTransportService.createNewService(s, version, transportVersion, threadPool, null);
try {
newService.registerRequestHandler(
TransportSearchShardsAction.TYPE.name(),
EsExecutors.DIRECT_EXECUTOR_SERVICE,
SearchShardsRequest::new,
(request, channel, task) -> {
var searchShardsResponse = new SearchShardsResponse(List.of(), List.of(), Collections.emptyMap());
try {
channel.sendResponse(searchShardsResponse);
} finally {
searchShardsResponse.decRef();
}
}
);
newService.registerRequestHandler(
TransportSearchAction.TYPE.name(),
EsExecutors.DIRECT_EXECUTOR_SERVICE,
SearchRequest::new,
(request, channel, task) -> {
var searchResponse = SearchResponseUtils.successfulResponse(
SearchHits.empty(Lucene.TOTAL_HITS_EQUAL_TO_ZERO, Float.NaN)
);
try {
channel.sendResponse(searchResponse);
} finally {
searchResponse.decRef();
}
}
);
newService.registerRequestHandler(
ClusterStateAction.NAME,
EsExecutors.DIRECT_EXECUTOR_SERVICE,
RemoteClusterStateRequest::new,
(request, channel, task) -> {
DiscoveryNodes.Builder builder = DiscoveryNodes.builder();
for (DiscoveryNode node : knownNodes) {
builder.add(node);
}
ClusterState build = ClusterState.builder(clusterName).nodes(builder.build()).build();
var clusterStateResponse = new ClusterStateResponse(clusterName, build, false);
try {
channel.sendResponse(clusterStateResponse);
} finally {
clusterStateResponse.decRef();
}
}
);
newService.start();
newService.acceptIncomingRequests();
success = true;
return newService;
} finally {
if (success == false) {
newService.close();
}
}
}
public void testSearchSkipUnavailable() throws IOException {
try (
MockTransportService remoteTransport = startTransport(
"node0",
new CopyOnWriteArrayList<>(),
VersionInformation.CURRENT,
TransportVersion.current(),
threadPool
)
) {
DiscoveryNode remoteNode = remoteTransport.getLocalNode();
updateRemoteClusterSettings(Collections.singletonMap("seeds", remoteNode.getAddress().toString()));
for (int i = 0; i < 10; i++) {
Request request = new Request("POST", "/index/_doc");
request.setJsonEntity("{ \"field\" : \"value\" }");
Response response = client().performRequest(request);
assertEquals(201, response.getStatusLine().getStatusCode());
}
Response refreshResponse = client().performRequest(new Request("POST", "/index/_refresh"));
assertEquals(200, refreshResponse.getStatusLine().getStatusCode());
{
Response response = client().performRequest(new Request("GET", "/index/_search"));
assertEquals(200, response.getStatusLine().getStatusCode());
ObjectPath objectPath = ObjectPath.createFromResponse(response);
assertNull(objectPath.evaluate("_clusters"));
assertThat(objectPath.evaluate("hits.total.value"), equalTo(10));
assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(10));
}
{
Response response = client().performRequest(new Request("GET", "/index,remote1:index/_search"));
assertEquals(200, response.getStatusLine().getStatusCode());
ObjectPath objectPath = ObjectPath.createFromResponse(response);
assertNotNull(objectPath.evaluate("_clusters"));
assertThat(objectPath.evaluate("_clusters.total"), equalTo(2));
assertThat(objectPath.evaluate("_clusters.successful"), equalTo(2));
assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(0));
assertThat(objectPath.evaluate("_clusters.running"), equalTo(0));
assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0));
assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0));
assertThat(objectPath.evaluate("hits.total.value"), equalTo(10));
assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(10));
}
{
Response response = client().performRequest(new Request("GET", "/remote1:index/_search"));
assertEquals(200, response.getStatusLine().getStatusCode());
ObjectPath objectPath = ObjectPath.createFromResponse(response);
assertNotNull(objectPath.evaluate("_clusters"));
assertThat(objectPath.evaluate("_clusters.total"), equalTo(1));
assertThat(objectPath.evaluate("_clusters.successful"), equalTo(1));
assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(0));
assertThat(objectPath.evaluate("_clusters.running"), equalTo(0));
assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0));
assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0));
assertThat(objectPath.evaluate("hits.total.value"), equalTo(0));
assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(0));
}
{
Response response = client().performRequest(new Request("GET", "/index,remote1:index/_search?scroll=1m"));
assertEquals(200, response.getStatusLine().getStatusCode());
ObjectPath objectPath = ObjectPath.createFromResponse(response);
assertNotNull(objectPath.evaluate("_clusters"));
assertThat(objectPath.evaluate("_clusters.total"), equalTo(2));
assertThat(objectPath.evaluate("_clusters.successful"), equalTo(2));
assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(0));
assertThat(objectPath.evaluate("_clusters.running"), equalTo(0));
assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0));
assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0));
assertThat(objectPath.evaluate("hits.total.value"), equalTo(10));
assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(10));
String scrollId = objectPath.evaluate("_scroll_id");
assertNotNull(scrollId);
Request scrollRequest = new Request("POST", "/_search/scroll");
scrollRequest.setJsonEntity("{ \"scroll_id\" : \"" + scrollId + "\" }");
Response scrollResponse = client().performRequest(scrollRequest);
assertEquals(200, scrollResponse.getStatusLine().getStatusCode());
ObjectPath scrollObjectPath = ObjectPath.createFromResponse(scrollResponse);
assertNull(scrollObjectPath.evaluate("_clusters"));
assertThat(scrollObjectPath.evaluate("hits.total.value"), equalTo(10));
assertThat(scrollObjectPath.evaluateArraySize("hits.hits"), equalTo(0));
}
remoteTransport.close();
updateRemoteClusterSettings(Collections.singletonMap("skip_unavailable", true));
{
Response response = client().performRequest(new Request("GET", "/index,remote1:index/_search"));
assertEquals(200, response.getStatusLine().getStatusCode());
ObjectPath objectPath = ObjectPath.createFromResponse(response);
assertNotNull(objectPath.evaluate("_clusters"));
assertThat(objectPath.evaluate("_clusters.total"), equalTo(2));
assertThat(objectPath.evaluate("_clusters.successful"), equalTo(1));
assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(1));
assertThat(objectPath.evaluate("_clusters.running"), equalTo(0));
assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0));
assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0));
assertThat(objectPath.evaluate("hits.total.value"), equalTo(10));
assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(10));
}
{
Response response = client().performRequest(new Request("GET", "/remote1:index/_search"));
assertEquals(200, response.getStatusLine().getStatusCode());
ObjectPath objectPath = ObjectPath.createFromResponse(response);
assertNotNull(objectPath.evaluate("_clusters"));
assertThat(objectPath.evaluate("_clusters.total"), equalTo(1));
assertThat(objectPath.evaluate("_clusters.successful"), equalTo(0));
assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(1));
assertThat(objectPath.evaluate("_clusters.running"), equalTo(0));
assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0));
assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0));
assertThat(objectPath.evaluate("hits.total.value"), equalTo(0));
assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(0));
}
{
Response response = client().performRequest(new Request("GET", "/index,remote1:index/_search?scroll=1m"));
assertEquals(200, response.getStatusLine().getStatusCode());
ObjectPath objectPath = ObjectPath.createFromResponse(response);
assertNotNull(objectPath.evaluate("_clusters"));
assertThat(objectPath.evaluate("_clusters.total"), equalTo(2));
assertThat(objectPath.evaluate("_clusters.successful"), equalTo(1));
assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(1));
assertThat(objectPath.evaluate("_clusters.running"), equalTo(0));
assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0));
assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0));
assertThat(objectPath.evaluate("hits.total.value"), equalTo(10));
assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(10));
String scrollId = objectPath.evaluate("_scroll_id");
assertNotNull(scrollId);
Request scrollRequest = new Request("POST", "/_search/scroll");
scrollRequest.setJsonEntity("{ \"scroll_id\" : \"" + scrollId + "\" }");
Response scrollResponse = client().performRequest(scrollRequest);
assertEquals(200, scrollResponse.getStatusLine().getStatusCode());
ObjectPath scrollObjectPath = ObjectPath.createFromResponse(scrollResponse);
assertNull(scrollObjectPath.evaluate("_clusters"));
assertThat(scrollObjectPath.evaluate("hits.total.value"), equalTo(10));
assertThat(scrollObjectPath.evaluateArraySize("hits.hits"), equalTo(0));
}
updateRemoteClusterSettings(Collections.singletonMap("skip_unavailable", false));
assertSearchConnectFailure();
Map<String, Object> map = new HashMap<>();
map.put("seeds", null);
map.put("skip_unavailable", null);
updateRemoteClusterSettings(map);
}
}
public void testSkipUnavailableDependsOnSeeds() throws IOException {
try (
MockTransportService remoteTransport = startTransport(
"node0",
new CopyOnWriteArrayList<>(),
VersionInformation.CURRENT,
TransportVersion.current(),
threadPool
)
) {
DiscoveryNode remoteNode = remoteTransport.getLocalNode();
{
// check that skip_unavailable alone cannot be set
Request request = new Request("PUT", "/_cluster/settings");
request.setEntity(buildUpdateSettingsRequestBody(Collections.singletonMap("skip_unavailable", randomBoolean())));
ResponseException responseException = expectThrows(ResponseException.class, () -> client().performRequest(request));
assertEquals(400, responseException.getResponse().getStatusLine().getStatusCode());
assertThat(
responseException.getMessage(),
containsString(
"Cannot configure setting [cluster.remote.remote1.skip_unavailable] if remote cluster is " + "not enabled."
)
);
}
Map<String, Object> settingsMap = new HashMap<>();
settingsMap.put("seeds", remoteNode.getAddress().toString());
settingsMap.put("skip_unavailable", randomBoolean());
updateRemoteClusterSettings(settingsMap);
{
// check that seeds cannot be reset alone if skip_unavailable is set
Request request = new Request("PUT", "/_cluster/settings");
request.setEntity(buildUpdateSettingsRequestBody(Collections.singletonMap("seeds", null)));
ResponseException responseException = expectThrows(ResponseException.class, () -> client().performRequest(request));
assertEquals(400, responseException.getResponse().getStatusLine().getStatusCode());
assertThat(
responseException.getMessage(),
containsString(
"Cannot configure setting " + "[cluster.remote.remote1.skip_unavailable] if remote cluster is not enabled."
)
);
}
if (randomBoolean()) {
updateRemoteClusterSettings(Collections.singletonMap("skip_unavailable", null));
updateRemoteClusterSettings(Collections.singletonMap("seeds", null));
} else {
Map<String, Object> nullMap = new HashMap<>();
nullMap.put("seeds", null);
nullMap.put("skip_unavailable", null);
updateRemoteClusterSettings(nullMap);
}
}
}
private static void assertSearchConnectFailure() {
{
ResponseException exception = expectThrows(
ResponseException.class,
() -> client().performRequest(new Request("POST", "/index,remote1:index/_search"))
);
assertThat(exception.getMessage(), containsString("connect_exception"));
}
{
ResponseException exception = expectThrows(
ResponseException.class,
() -> client().performRequest(new Request("POST", "/remote1:index/_search"))
);
assertThat(exception.getMessage(), containsString("connect_exception"));
}
{
ResponseException exception = expectThrows(
ResponseException.class,
() -> client().performRequest(new Request("POST", "/remote1:index/_search?scroll=1m"))
);
assertThat(exception.getMessage(), containsString("connect_exception"));
}
}
private static void updateRemoteClusterSettings(Map<String, Object> settings) throws IOException {
Request request = new Request("PUT", "/_cluster/settings");
request.setEntity(buildUpdateSettingsRequestBody(settings));
Response response = client().performRequest(request);
assertEquals(200, response.getStatusLine().getStatusCode());
}
private static HttpEntity buildUpdateSettingsRequestBody(Map<String, Object> settings) throws IOException {
String requestBody;
try (XContentBuilder builder = JsonXContent.contentBuilder()) {
builder.startObject();
{
builder.startObject("persistent");
{
builder.startObject("cluster.remote.remote1");
{
for (Map.Entry<String, Object> entry : settings.entrySet()) {
builder.field(entry.getKey(), entry.getValue());
}
}
builder.endObject();
}
builder.endObject();
}
builder.endObject();
requestBody = Strings.toString(builder);
}
return new NStringEntity(requestBody, ContentType.APPLICATION_JSON);
}
@Override
protected Settings restClientSettings() {
String token = basicAuthHeaderValue("admin", new SecureString("admin-password".toCharArray()));
return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build();
}
}
| CrossClusterSearchUnavailableClusterIT |
java | google__dagger | dagger-spi/main/java/dagger/spi/model/Key.java | {
"start": 968,
"end": 3520
} | class ____ {
/**
* A {@link javax.inject.Qualifier} annotation that provides a unique namespace prefix for the
* type of this key.
*/
public abstract Optional<DaggerAnnotation> qualifier();
/** The type represented by this key. */
public abstract DaggerType type();
/**
* Distinguishes keys for multibinding contributions that share a {@link #type()} and {@link
* #qualifier()}.
*
* <p>Each multibound map and set has a synthetic multibinding that depends on the specific
* contributions to that map or set using keys that identify those multibinding contributions.
*
* <p>Absent except for multibinding contributions.
*/
public abstract Optional<MultibindingContributionIdentifier> multibindingContributionIdentifier();
/** Returns a {@link Builder} that inherits the properties of this key. */
abstract Builder toBuilder();
/** Returns a copy of this key with the type replaced with the given type. */
public Key withType(DaggerType newType) {
return toBuilder().type(newType).build();
}
/**
* Returns a copy of this key with the multibinding contribution identifier replaced with the
* given multibinding contribution identifier.
*/
public Key withMultibindingContributionIdentifier(
DaggerTypeElement contributingModule, DaggerExecutableElement bindingMethod) {
return toBuilder()
.multibindingContributionIdentifier(contributingModule, bindingMethod)
.build();
}
/** Returns a copy of this key with the multibinding contribution identifier, if any, removed. */
public Key withoutMultibindingContributionIdentifier() {
return toBuilder().multibindingContributionIdentifier(Optional.empty()).build();
}
// The main hashCode/equality bottleneck is in MoreTypes.equivalence(). It's possible that we can
// avoid this by tuning that method. Perhaps we can also avoid the issue entirely by interning all
// Keys
@Memoized
@Override
public abstract int hashCode();
@Override
public abstract boolean equals(Object o);
@Override
public final String toString() {
return Joiner.on(' ')
.skipNulls()
.join(
qualifier().map(DaggerAnnotation::toString).orElse(null),
type(),
multibindingContributionIdentifier().orElse(null));
}
/** Returns a builder for {@link Key}s. */
public static Builder builder(DaggerType type) {
return new AutoValue_Key.Builder().type(type);
}
/** A builder for {@link Key}s. */
@AutoValue.Builder
public abstract static | Key |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.