language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | playframework__playframework | documentation/manual/working/javaGuide/main/tests/code/javaguide/tests/controllers/HomeController.java | {
"start": 221,
"end": 468
} | class ____ extends Controller {
public Result index() {
return ok(javaguide.tests.html.index.render("Welcome to Play!"));
}
public Result post(Http.Request request) {
return redirect(routes.HomeController.index());
}
}
| HomeController |
java | google__dagger | javatests/dagger/internal/codegen/DaggerSuperficialValidationTest.java | {
"start": 12102,
"end": 12329
} | class ____ {",
" abstract TestClass foo(TestClass x);",
"}"),
CompilerTests.kotlinSource(
"test.TestClass.kt",
"package test",
"",
"abstract | TestClass |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/spi/DataType.java | {
"start": 1618,
"end": 2181
} | class ____ allows the message to carry the name of the message
* data structure even if it's marshaled.
* <p/>
* The scheme can also be used to associate the same DataType with different Camel components. For example
* `http:cloud-events` and `aws-s3:cloud-events` where the scheme relates to the respective component's scheme. This
* information could be leveraged to detect required {@link Transformer} and {@link Validator} implementations provided
* as part of these components.
*
* @see DataTypeAware
* @see Transformer
* @see Validator
*/
public | name |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/BinaryPartitioner.java | {
"start": 1381,
"end": 1606
} | class ____<V>
extends org.apache.hadoop.mapreduce.lib.partition.BinaryPartitioner<V>
implements Partitioner<BinaryComparable, V> {
public void configure(JobConf job) {
super.setConf(job);
}
}
| BinaryPartitioner |
java | micronaut-projects__micronaut-core | discovery-core/src/main/java/io/micronaut/discovery/cloud/package-info.java | {
"start": 622,
"end": 725
} | interface ____.
*
* @author graemerocher
* @since 1.0
*/
package io.micronaut.discovery.cloud;
| classes |
java | mockito__mockito | mockito-core/src/test/java/org/mockito/internal/hamcrest/MatcherGenericTypeExtractorTest.java | {
"start": 2049,
"end": 2455
} | class ____ extends BaseMatcher<Integer> {
public boolean matches(Object o) {
return true;
}
public void describeMismatch(Object item, Description mismatchDescription) {}
public void describeTo(Description description) {}
}
// non-generic matcher implementing the interface
@SuppressWarnings("rawtypes")
private static | StaticIntMatcherFromInterface |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/annotation/MergedAnnotation.java | {
"start": 12815,
"end": 12921
} | enum ____ value from the annotation.
* @param attributeName the attribute name
* @param type the | attribute |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointDocValuesAggregatorFunction.java | {
"start": 1207,
"end": 7981
} | class ____ implements AggregatorFunction {
private static final List<IntermediateStateDesc> INTERMEDIATE_STATE_DESC = List.of(
new IntermediateStateDesc("top", ElementType.INT),
new IntermediateStateDesc("bottom", ElementType.INT),
new IntermediateStateDesc("negLeft", ElementType.INT),
new IntermediateStateDesc("negRight", ElementType.INT),
new IntermediateStateDesc("posLeft", ElementType.INT),
new IntermediateStateDesc("posRight", ElementType.INT) );
private final DriverContext driverContext;
private final SpatialExtentStateWrappedLongitudeState state;
private final List<Integer> channels;
public SpatialExtentGeoPointDocValuesAggregatorFunction(DriverContext driverContext,
List<Integer> channels, SpatialExtentStateWrappedLongitudeState state) {
this.driverContext = driverContext;
this.channels = channels;
this.state = state;
}
public static SpatialExtentGeoPointDocValuesAggregatorFunction create(DriverContext driverContext,
List<Integer> channels) {
return new SpatialExtentGeoPointDocValuesAggregatorFunction(driverContext, channels, SpatialExtentGeoPointDocValuesAggregator.initSingle());
}
public static List<IntermediateStateDesc> intermediateStateDesc() {
return INTERMEDIATE_STATE_DESC;
}
@Override
public int intermediateBlockCount() {
return INTERMEDIATE_STATE_DESC.size();
}
@Override
public void addRawInput(Page page, BooleanVector mask) {
if (mask.allFalse()) {
// Entire page masked away
} else if (mask.allTrue()) {
addRawInputNotMasked(page);
} else {
addRawInputMasked(page, mask);
}
}
private void addRawInputMasked(Page page, BooleanVector mask) {
LongBlock encodedBlock = page.getBlock(channels.get(0));
LongVector encodedVector = encodedBlock.asVector();
if (encodedVector == null) {
addRawBlock(encodedBlock, mask);
return;
}
addRawVector(encodedVector, mask);
}
private void addRawInputNotMasked(Page page) {
LongBlock encodedBlock = page.getBlock(channels.get(0));
LongVector encodedVector = encodedBlock.asVector();
if (encodedVector == null) {
addRawBlock(encodedBlock);
return;
}
addRawVector(encodedVector);
}
private void addRawVector(LongVector encodedVector) {
for (int valuesPosition = 0; valuesPosition < encodedVector.getPositionCount(); valuesPosition++) {
long encodedValue = encodedVector.getLong(valuesPosition);
SpatialExtentGeoPointDocValuesAggregator.combine(state, encodedValue);
}
}
private void addRawVector(LongVector encodedVector, BooleanVector mask) {
for (int valuesPosition = 0; valuesPosition < encodedVector.getPositionCount(); valuesPosition++) {
if (mask.getBoolean(valuesPosition) == false) {
continue;
}
long encodedValue = encodedVector.getLong(valuesPosition);
SpatialExtentGeoPointDocValuesAggregator.combine(state, encodedValue);
}
}
private void addRawBlock(LongBlock encodedBlock) {
for (int p = 0; p < encodedBlock.getPositionCount(); p++) {
int encodedValueCount = encodedBlock.getValueCount(p);
if (encodedValueCount == 0) {
continue;
}
int encodedStart = encodedBlock.getFirstValueIndex(p);
int encodedEnd = encodedStart + encodedValueCount;
for (int encodedOffset = encodedStart; encodedOffset < encodedEnd; encodedOffset++) {
long encodedValue = encodedBlock.getLong(encodedOffset);
SpatialExtentGeoPointDocValuesAggregator.combine(state, encodedValue);
}
}
}
private void addRawBlock(LongBlock encodedBlock, BooleanVector mask) {
for (int p = 0; p < encodedBlock.getPositionCount(); p++) {
if (mask.getBoolean(p) == false) {
continue;
}
int encodedValueCount = encodedBlock.getValueCount(p);
if (encodedValueCount == 0) {
continue;
}
int encodedStart = encodedBlock.getFirstValueIndex(p);
int encodedEnd = encodedStart + encodedValueCount;
for (int encodedOffset = encodedStart; encodedOffset < encodedEnd; encodedOffset++) {
long encodedValue = encodedBlock.getLong(encodedOffset);
SpatialExtentGeoPointDocValuesAggregator.combine(state, encodedValue);
}
}
}
@Override
public void addIntermediateInput(Page page) {
assert channels.size() == intermediateBlockCount();
assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size();
Block topUncast = page.getBlock(channels.get(0));
if (topUncast.areAllValuesNull()) {
return;
}
IntVector top = ((IntBlock) topUncast).asVector();
assert top.getPositionCount() == 1;
Block bottomUncast = page.getBlock(channels.get(1));
if (bottomUncast.areAllValuesNull()) {
return;
}
IntVector bottom = ((IntBlock) bottomUncast).asVector();
assert bottom.getPositionCount() == 1;
Block negLeftUncast = page.getBlock(channels.get(2));
if (negLeftUncast.areAllValuesNull()) {
return;
}
IntVector negLeft = ((IntBlock) negLeftUncast).asVector();
assert negLeft.getPositionCount() == 1;
Block negRightUncast = page.getBlock(channels.get(3));
if (negRightUncast.areAllValuesNull()) {
return;
}
IntVector negRight = ((IntBlock) negRightUncast).asVector();
assert negRight.getPositionCount() == 1;
Block posLeftUncast = page.getBlock(channels.get(4));
if (posLeftUncast.areAllValuesNull()) {
return;
}
IntVector posLeft = ((IntBlock) posLeftUncast).asVector();
assert posLeft.getPositionCount() == 1;
Block posRightUncast = page.getBlock(channels.get(5));
if (posRightUncast.areAllValuesNull()) {
return;
}
IntVector posRight = ((IntBlock) posRightUncast).asVector();
assert posRight.getPositionCount() == 1;
SpatialExtentGeoPointDocValuesAggregator.combineIntermediate(state, top.getInt(0), bottom.getInt(0), negLeft.getInt(0), negRight.getInt(0), posLeft.getInt(0), posRight.getInt(0));
}
@Override
public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) {
state.toIntermediate(blocks, offset, driverContext);
}
@Override
public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) {
blocks[offset] = SpatialExtentGeoPointDocValuesAggregator.evaluateFinal(state, driverContext);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(getClass().getSimpleName()).append("[");
sb.append("channels=").append(channels);
sb.append("]");
return sb.toString();
}
@Override
public void close() {
state.close();
}
}
| SpatialExtentGeoPointDocValuesAggregatorFunction |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/io/FinalizeOnMaster.java | {
"start": 933,
"end": 1052
} | interface ____ be implemented by {@link OutputFormat}s to have the master finalize them
* globally.
*/
@Public
public | may |
java | quarkusio__quarkus | integration-tests/hibernate-search-orm-elasticsearch/src/main/java/io/quarkus/it/hibernate/search/orm/elasticsearch/search/HibernateSearchTestResource.java | {
"start": 621,
"end": 4466
} | class ____ {
@Inject
EntityManager entityManager;
@PUT
@Path("/init-data")
@Transactional
public void initData() {
createPerson("John Irving", "Burlington");
createPerson("David Lodge", "London");
createPerson("Paul Auster", "New York");
createPerson("John Grisham", "Oxford");
// Add many other entities, so that mass indexing has something to do.
// DO NOT REMOVE, it's important to have many entities to fully test mass indexing.
for (int i = 0; i < 2000; i++) {
createPerson("Other Person #" + i, "Other City #" + i);
}
}
@GET
@Path("/search")
@Produces(MediaType.TEXT_PLAIN)
public String testSearch() {
SearchSession searchSession = Search.session(entityManager);
List<Person> person = searchSession.search(Person.class)
.where(f -> f.match().field("name").matching("john"))
.sort(f -> f.field("name_sort"))
.fetchHits(20);
assertEquals(2, person.size());
assertEquals("John Grisham", person.get(0).getName());
assertEquals("John Irving", person.get(1).getName());
person = searchSession.search(Person.class)
.where(f -> f.match().field("address.city").matching("london"))
.sort(f -> f.field("name_sort"))
.fetchHits(20);
assertEquals(1, person.size());
assertEquals("David Lodge", person.get(0).getName());
assertEquals(4 + 2000, searchSession.search(Person.class)
.where(f -> f.matchAll())
.fetchTotalHitCount());
return "OK";
}
@GET
@Path("/search-projection")
@Produces(MediaType.TEXT_PLAIN)
public String testSearchWithProjection() {
SearchSession searchSession = Search.session(entityManager);
assertThat(searchSession.search(Person.class)
.select(PersonDTO.class)
.where(f -> f.match().field("name").matching("john"))
.sort(f -> f.field("name_sort"))
.fetchHits(20))
.usingRecursiveFieldByFieldElementComparator()
.containsExactly(
new PersonDTO(4, "John Grisham", new AddressDTO("Oxford")),
new PersonDTO(1, "John Irving", new AddressDTO("Burlington")));
return "OK";
}
@PUT
@Path("/purge")
@Produces(MediaType.TEXT_PLAIN)
public String testPurge() {
SearchSession searchSession = Search.session(entityManager);
searchSession.workspace().purge();
return "OK";
}
@PUT
@Path("/refresh")
@Produces(MediaType.TEXT_PLAIN)
public String testRefresh() {
SearchSession searchSession = Search.session(entityManager);
searchSession.workspace().refresh();
return "OK";
}
@GET
@Path("/search-empty")
@Produces(MediaType.TEXT_PLAIN)
public String testSearchEmpty() {
SearchSession searchSession = Search.session(entityManager);
List<Person> person = searchSession.search(Person.class)
.where(f -> f.matchAll())
.fetchHits(20);
assertEquals(0, person.size());
return "OK";
}
@PUT
@Path("/mass-indexer")
@Produces(MediaType.TEXT_PLAIN)
public String testMassIndexer() throws InterruptedException {
SearchSession searchSession = Search.session(entityManager);
searchSession.massIndexer().startAndWait();
return "OK";
}
private void createPerson(String name, String city) {
Address address = new Address(city);
entityManager.persist(address);
Person person = new Person(name, address);
entityManager.persist(person);
}
}
| HibernateSearchTestResource |
java | alibaba__fastjson | src/test/java/com/alibaba/fastjson/deserializer/issues3796/bean/ObjectJ1.java | {
"start": 95,
"end": 1219
} | class ____ {
private int a = 0;
private int b = 0;
private List<ObjectJ1_A> c;
private int d = 0;
private List<CommonObject> e;
private List<Integer> f;
private List<Integer> g;
private List<Integer> h;
private boolean i = false;
public int getA() {
return a;
}
public void setA(int a) {
this.a = a;
}
public int getB() {
return b;
}
public void setB(int b) {
this.b = b;
}
public List<ObjectJ1_A> getC() {
return c;
}
public void setC(List<ObjectJ1_A> c) {
this.c = c;
}
public int getD() {
return d;
}
public void setD(int d) {
this.d = d;
}
public List<CommonObject> getE() {
return e;
}
public void setE(List<CommonObject> e) {
this.e = e;
}
public List<Integer> getF() {
return f;
}
public void setF(List<Integer> f) {
this.f = f;
}
public List<Integer> getG() {
return g;
}
public void setG(List<Integer> g) {
this.g = g;
}
public List<Integer> getH() {
return h;
}
public void setH(List<Integer> h) {
this.h = h;
}
public boolean isI() {
return i;
}
public void setI(boolean i) {
this.i = i;
}
}
| ObjectJ1 |
java | apache__camel | components/camel-stringtemplate/src/generated/java/org/apache/camel/component/stringtemplate/StringTemplateEndpointConfigurer.java | {
"start": 741,
"end": 3704
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
StringTemplateEndpoint target = (StringTemplateEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "allowcontextmapall":
case "allowContextMapAll": target.setAllowContextMapAll(property(camelContext, boolean.class, value)); return true;
case "allowtemplatefromheader":
case "allowTemplateFromHeader": target.setAllowTemplateFromHeader(property(camelContext, boolean.class, value)); return true;
case "contentcache":
case "contentCache": target.setContentCache(property(camelContext, boolean.class, value)); return true;
case "delimiterstart":
case "delimiterStart": target.setDelimiterStart(property(camelContext, char.class, value)); return true;
case "delimiterstop":
case "delimiterStop": target.setDelimiterStop(property(camelContext, char.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "allowcontextmapall":
case "allowContextMapAll": return boolean.class;
case "allowtemplatefromheader":
case "allowTemplateFromHeader": return boolean.class;
case "contentcache":
case "contentCache": return boolean.class;
case "delimiterstart":
case "delimiterStart": return char.class;
case "delimiterstop":
case "delimiterStop": return char.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
StringTemplateEndpoint target = (StringTemplateEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "allowcontextmapall":
case "allowContextMapAll": return target.isAllowContextMapAll();
case "allowtemplatefromheader":
case "allowTemplateFromHeader": return target.isAllowTemplateFromHeader();
case "contentcache":
case "contentCache": return target.isContentCache();
case "delimiterstart":
case "delimiterStart": return target.getDelimiterStart();
case "delimiterstop":
case "delimiterStop": return target.getDelimiterStop();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
default: return null;
}
}
}
| StringTemplateEndpointConfigurer |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/processor/internals/assignment/LegacyStickyTaskAssignorTest.java | {
"start": 6890,
"end": 71658
} | class ____ {
private final List<Integer> expectedTopicGroupIds = asList(1, 2);
private final Time time = new MockTime();
private final Map<ProcessId, ClientState> clients = new TreeMap<>();
private boolean enableRackAwareTaskAssignor;
public void setUp(final String rackAwareStrategy) {
enableRackAwareTaskAssignor = !rackAwareStrategy.equals(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE);
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldAssignOneActiveTaskToEachProcessWhenTaskCountSameAsProcessCount(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
createClient(PID_1, 1);
createClient(PID_2, 1);
createClient(PID_3, 1);
final boolean probingRebalanceNeeded = assign(rackAwareStrategy, TASK_0_0, TASK_0_1, TASK_0_2);
assertThat(probingRebalanceNeeded, is(false));
for (final ClientState clientState : clients.values()) {
assertThat(clientState.activeTaskCount(), equalTo(1));
}
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldAssignTopicGroupIdEvenlyAcrossClientsWithNoStandByTasks(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
createClient(PID_1, 2);
createClient(PID_2, 2);
createClient(PID_3, 2);
final boolean probingRebalanceNeeded = assign(rackAwareStrategy, TASK_1_0, TASK_1_1, TASK_2_2, TASK_2_0, TASK_2_1, TASK_1_2);
assertThat(probingRebalanceNeeded, is(false));
assertActiveTaskTopicGroupIdsEvenlyDistributed();
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldAssignTopicGroupIdEvenlyAcrossClientsWithStandByTasks(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
createClient(PID_1, 2);
createClient(PID_2, 2);
createClient(PID_3, 2);
final boolean probingRebalanceNeeded = assign(1, rackAwareStrategy, TASK_2_0, TASK_1_1, TASK_1_2, TASK_1_0, TASK_2_1, TASK_2_2);
assertThat(probingRebalanceNeeded, is(false));
assertActiveTaskTopicGroupIdsEvenlyDistributed();
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldNotMigrateActiveTaskToOtherProcess(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
createClientWithPreviousActiveTasks(PID_1, 1, TASK_0_0);
createClientWithPreviousActiveTasks(PID_2, 1, TASK_0_1);
assertThat(assign(rackAwareStrategy, TASK_0_0, TASK_0_1, TASK_0_2), is(false));
assertThat(clients.get(PID_1).activeTasks(), hasItems(TASK_0_0));
assertThat(clients.get(PID_2).activeTasks(), hasItems(TASK_0_1));
assertThat(allActiveTasks(), equalTo(asList(TASK_0_0, TASK_0_1, TASK_0_2)));
clients.clear();
// flip the previous active tasks assignment around.
createClientWithPreviousActiveTasks(PID_1, 1, TASK_0_1);
createClientWithPreviousActiveTasks(PID_2, 1, TASK_0_2);
assertThat(assign(rackAwareStrategy, TASK_0_0, TASK_0_1, TASK_0_2), is(false));
assertThat(clients.get(PID_1).activeTasks(), hasItems(TASK_0_1));
assertThat(clients.get(PID_2).activeTasks(), hasItems(TASK_0_2));
assertThat(allActiveTasks(), equalTo(asList(TASK_0_0, TASK_0_1, TASK_0_2)));
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldMigrateActiveTasksToNewProcessWithoutChangingAllAssignments(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
createClientWithPreviousActiveTasks(PID_1, 1, TASK_0_0, TASK_0_2);
createClientWithPreviousActiveTasks(PID_2, 1, TASK_0_1);
createClient(PID_3, 1);
final boolean probingRebalanceNeeded = assign(rackAwareStrategy, TASK_0_0, TASK_0_1, TASK_0_2);
assertThat(probingRebalanceNeeded, is(false));
assertThat(clients.get(PID_2).activeTasks(), equalTo(singleton(TASK_0_1)));
assertThat(clients.get(PID_1).activeTasks().size(), equalTo(1));
assertThat(clients.get(PID_3).activeTasks().size(), equalTo(1));
assertThat(allActiveTasks(), equalTo(asList(TASK_0_0, TASK_0_1, TASK_0_2)));
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldAssignBasedOnCapacity(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
createClient(PID_1, 1);
createClient(PID_2, 2);
final boolean probingRebalanceNeeded = assign(rackAwareStrategy, TASK_0_0, TASK_0_1, TASK_0_2);
assertThat(probingRebalanceNeeded, is(false));
assertThat(clients.get(PID_1).activeTasks().size(), equalTo(1));
assertThat(clients.get(PID_2).activeTasks().size(), equalTo(2));
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldAssignTasksEvenlyWithUnequalTopicGroupSizes(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
createClientWithPreviousActiveTasks(PID_1, 1, TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3, TASK_0_4, TASK_0_5, TASK_1_0);
createClient(PID_2, 1);
assertThat(assign(rackAwareStrategy, TASK_1_0, TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3, TASK_0_4, TASK_0_5), is(false));
final Set<TaskId> allTasks = new HashSet<>(asList(TASK_0_0, TASK_0_1, TASK_1_0, TASK_0_5, TASK_0_2, TASK_0_3, TASK_0_4));
final Set<TaskId> client1Tasks = clients.get(PID_1).activeTasks();
final Set<TaskId> client2Tasks = clients.get(PID_2).activeTasks();
// one client should get 3 tasks and the other should have 4
assertThat(
(client1Tasks.size() == 3 && client2Tasks.size() == 4) ||
(client1Tasks.size() == 4 && client2Tasks.size() == 3),
is(true));
allTasks.removeAll(client1Tasks);
// client2 should have all the remaining tasks not assigned to client 1
assertThat(client2Tasks, equalTo(allTasks));
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldKeepActiveTaskStickinessWhenMoreClientThanActiveTasks(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
createClientWithPreviousActiveTasks(PID_1, 1, TASK_0_0);
createClientWithPreviousActiveTasks(PID_2, 1, TASK_0_2);
createClientWithPreviousActiveTasks(PID_3, 1, TASK_0_1);
createClient(PID_4, 1);
createClient(PID_5, 1);
assertThat(assign(rackAwareStrategy, TASK_0_0, TASK_0_1, TASK_0_2), is(false));
assertThat(clients.get(PID_1).activeTasks(), equalTo(singleton(TASK_0_0)));
assertThat(clients.get(PID_2).activeTasks(), equalTo(singleton(TASK_0_2)));
assertThat(clients.get(PID_3).activeTasks(), equalTo(singleton(TASK_0_1)));
// change up the assignment and make sure it is still sticky
clients.clear();
createClient(PID_1, 1);
createClientWithPreviousActiveTasks(PID_2, 1, TASK_0_0);
createClient(PID_3, 1);
createClientWithPreviousActiveTasks(PID_4, 1, TASK_0_2);
createClientWithPreviousActiveTasks(PID_5, 1, TASK_0_1);
assertThat(assign(rackAwareStrategy, TASK_0_0, TASK_0_1, TASK_0_2), is(false));
assertThat(clients.get(PID_2).activeTasks(), equalTo(singleton(TASK_0_0)));
assertThat(clients.get(PID_4).activeTasks(), equalTo(singleton(TASK_0_2)));
assertThat(clients.get(PID_5).activeTasks(), equalTo(singleton(TASK_0_1)));
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldAssignTasksToClientWithPreviousStandbyTasks(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
final ClientState client1 = createClient(PID_1, 1);
client1.addPreviousStandbyTasks(Set.of(TASK_0_2));
final ClientState client2 = createClient(PID_2, 1);
client2.addPreviousStandbyTasks(Set.of(TASK_0_1));
final ClientState client3 = createClient(PID_3, 1);
client3.addPreviousStandbyTasks(Set.of(TASK_0_0));
final boolean probingRebalanceNeeded = assign(rackAwareStrategy, TASK_0_0, TASK_0_1, TASK_0_2);
assertThat(probingRebalanceNeeded, is(false));
assertThat(clients.get(PID_1).activeTasks(), equalTo(singleton(TASK_0_2)));
assertThat(clients.get(PID_2).activeTasks(), equalTo(singleton(TASK_0_1)));
assertThat(clients.get(PID_3).activeTasks(), equalTo(singleton(TASK_0_0)));
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldAssignBasedOnCapacityWhenMultipleClientHaveStandbyTasks(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
final ClientState c1 = createClientWithPreviousActiveTasks(PID_1, 1, TASK_0_0);
c1.addPreviousStandbyTasks(Set.of(TASK_0_1));
final ClientState c2 = createClientWithPreviousActiveTasks(PID_2, 2, TASK_0_2);
c2.addPreviousStandbyTasks(Set.of(TASK_0_1));
final boolean probingRebalanceNeeded = assign(rackAwareStrategy, TASK_0_0, TASK_0_1, TASK_0_2);
assertThat(probingRebalanceNeeded, is(false));
assertThat(clients.get(PID_1).activeTasks(), equalTo(singleton(TASK_0_0)));
assertThat(clients.get(PID_2).activeTasks(), equalTo(Set.of(TASK_0_2, TASK_0_1)));
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldAssignStandbyTasksToDifferentClientThanCorrespondingActiveTaskIsAssignedTo(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
createClientWithPreviousActiveTasks(PID_1, 1, TASK_0_0);
createClientWithPreviousActiveTasks(PID_2, 1, TASK_0_1);
createClientWithPreviousActiveTasks(PID_3, 1, TASK_0_2);
createClientWithPreviousActiveTasks(PID_4, 1, TASK_0_3);
final boolean probingRebalanceNeeded = assign(1, rackAwareStrategy, TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3);
assertThat(probingRebalanceNeeded, is(false));
assertThat(clients.get(PID_1).standbyTasks(), not(hasItems(TASK_0_0)));
assertThat(clients.get(PID_1).standbyTasks().size(), lessThanOrEqualTo(2));
assertThat(clients.get(PID_2).standbyTasks(), not(hasItems(TASK_0_1)));
assertThat(clients.get(PID_2).standbyTasks().size(), lessThanOrEqualTo(2));
assertThat(clients.get(PID_3).standbyTasks(), not(hasItems(TASK_0_2)));
assertThat(clients.get(PID_3).standbyTasks().size(), lessThanOrEqualTo(2));
assertThat(clients.get(PID_4).standbyTasks(), not(hasItems(TASK_0_3)));
assertThat(clients.get(PID_4).standbyTasks().size(), lessThanOrEqualTo(2));
int nonEmptyStandbyTaskCount = 0;
for (final ClientState clientState : clients.values()) {
nonEmptyStandbyTaskCount += clientState.standbyTasks().isEmpty() ? 0 : 1;
}
assertThat(nonEmptyStandbyTaskCount, greaterThanOrEqualTo(3));
assertThat(allStandbyTasks(), equalTo(asList(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3)));
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldAssignMultipleReplicasOfStandbyTask(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
createClientWithPreviousActiveTasks(PID_1, 1, TASK_0_0);
createClientWithPreviousActiveTasks(PID_2, 1, TASK_0_1);
createClientWithPreviousActiveTasks(PID_3, 1, TASK_0_2);
final boolean probingRebalanceNeeded = assign(2, rackAwareStrategy, TASK_0_0, TASK_0_1, TASK_0_2);
assertThat(probingRebalanceNeeded, is(false));
assertThat(clients.get(PID_1).standbyTasks(), equalTo(Set.of(TASK_0_1, TASK_0_2)));
assertThat(clients.get(PID_2).standbyTasks(), equalTo(Set.of(TASK_0_2, TASK_0_0)));
assertThat(clients.get(PID_3).standbyTasks(), equalTo(Set.of(TASK_0_0, TASK_0_1)));
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldNotAssignStandbyTaskReplicasWhenNoClientAvailableWithoutHavingTheTaskAssigned(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
createClient(PID_1, 1);
final boolean probingRebalanceNeeded = assign(1, rackAwareStrategy, TASK_0_0);
assertThat(probingRebalanceNeeded, is(false));
assertThat(clients.get(PID_1).standbyTasks().size(), equalTo(0));
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldAssignActiveAndStandbyTasks(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
createClient(PID_1, 1);
createClient(PID_2, 1);
createClient(PID_3, 1);
final boolean probingRebalanceNeeded = assign(1, rackAwareStrategy, TASK_0_0, TASK_0_1, TASK_0_2);
assertThat(probingRebalanceNeeded, is(false));
assertThat(allActiveTasks(), equalTo(asList(TASK_0_0, TASK_0_1, TASK_0_2)));
assertThat(allStandbyTasks(), equalTo(asList(TASK_0_0, TASK_0_1, TASK_0_2)));
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldAssignAtLeastOneTaskToEachClientIfPossible(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
createClient(PID_1, 3);
createClient(PID_2, 1);
createClient(PID_3, 1);
final boolean probingRebalanceNeeded = assign(rackAwareStrategy, TASK_0_0, TASK_0_1, TASK_0_2);
assertThat(probingRebalanceNeeded, is(false));
assertThat(clients.get(PID_1).assignedTaskCount(), equalTo(1));
assertThat(clients.get(PID_2).assignedTaskCount(), equalTo(1));
assertThat(clients.get(PID_3).assignedTaskCount(), equalTo(1));
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldAssignEachActiveTaskToOneClientWhenMoreClientsThanTasks(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
createClient(PID_1, 1);
createClient(PID_2, 1);
createClient(PID_3, 1);
createClient(PID_4, 1);
createClient(PID_5, 1);
createClient(PID_6, 1);
final boolean probingRebalanceNeeded = assign(rackAwareStrategy, TASK_0_0, TASK_0_1, TASK_0_2);
assertThat(probingRebalanceNeeded, is(false));
assertThat(allActiveTasks(), equalTo(asList(TASK_0_0, TASK_0_1, TASK_0_2)));
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldBalanceActiveAndStandbyTasksAcrossAvailableClients(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
createClient(PID_1, 1);
createClient(PID_2, 1);
createClient(PID_3, 1);
createClient(PID_4, 1);
createClient(PID_5, 1);
createClient(PID_6, 1);
final boolean probingRebalanceNeeded = assign(1, rackAwareStrategy, TASK_0_0, TASK_0_1, TASK_0_2);
assertThat(probingRebalanceNeeded, is(false));
for (final ClientState clientState : clients.values()) {
assertThat(clientState.assignedTaskCount(), equalTo(1));
}
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldAssignMoreTasksToClientWithMoreCapacity(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
createClient(PID_2, 2);
createClient(PID_1, 1);
final boolean probingRebalanceNeeded = assign(
rackAwareStrategy,
TASK_0_0,
TASK_0_1,
TASK_0_2,
TASK_1_0,
TASK_1_1,
TASK_1_2,
TASK_2_0,
TASK_2_1,
TASK_2_2,
TASK_3_0,
TASK_3_1,
TASK_3_2
);
assertThat(probingRebalanceNeeded, is(false));
assertThat(clients.get(PID_2).assignedTaskCount(), equalTo(8));
assertThat(clients.get(PID_1).assignedTaskCount(), equalTo(4));
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldEvenlyDistributeByTaskIdAndPartition(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
createClient(PID_1, 4);
createClient(PID_2, 4);
createClient(PID_3, 4);
createClient(PID_4, 4);
final List<TaskId> taskIds = new ArrayList<>();
final TaskId[] taskIdArray = new TaskId[16];
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 8; j++) {
taskIds.add(new TaskId(i, j));
}
}
Collections.shuffle(taskIds);
taskIds.toArray(taskIdArray);
final int nodeSize = 5;
final int topicSize = 2;
final int partitionSize = 8;
final int clientSize = 4;
final Cluster cluster = getRandomCluster(nodeSize, topicSize, partitionSize);
final Map<TaskId, Set<TopicPartition>> partitionsForTask = getTaskTopicPartitionMap(topicSize, partitionSize, false);
final Map<TaskId, Set<TopicPartition>> changelogPartitionsForTask = getTaskTopicPartitionMap(topicSize, partitionSize, true);
final Map<ProcessId, Map<String, Optional<String>>> racksForProcessConsumer = getRandomProcessRacks(clientSize, nodeSize);
final InternalTopicManager internalTopicManager = mockInternalTopicManagerForRandomChangelog(nodeSize, topicSize, partitionSize);
final AssignmentConfigs configs = new AssignmentConfigs(
0L,
1,
1,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy
);
final RackAwareTaskAssignor rackAwareTaskAssignor = new RackAwareTaskAssignor(
cluster,
partitionsForTask,
changelogPartitionsForTask,
getTasksForTopicGroup(topicSize, partitionSize),
racksForProcessConsumer,
internalTopicManager,
configs,
time
);
final boolean probingRebalanceNeeded = assign(configs, rackAwareTaskAssignor, taskIdArray);
assertThat(probingRebalanceNeeded, is(false));
Collections.sort(taskIds);
final Set<TaskId> expectedClientOneAssignment = getExpectedTaskIdAssignment(taskIds, 0, 4, 8, 12);
final Set<TaskId> expectedClientTwoAssignment = getExpectedTaskIdAssignment(taskIds, 1, 5, 9, 13);
final Set<TaskId> expectedClientThreeAssignment = getExpectedTaskIdAssignment(taskIds, 2, 6, 10, 14);
final Set<TaskId> expectedClientFourAssignment = getExpectedTaskIdAssignment(taskIds, 3, 7, 11, 15);
final Map<ProcessId, Set<TaskId>> sortedAssignments = sortClientAssignments(clients);
assertThat(sortedAssignments.get(PID_1), equalTo(expectedClientOneAssignment));
assertThat(sortedAssignments.get(PID_2), equalTo(expectedClientTwoAssignment));
assertThat(sortedAssignments.get(PID_3), equalTo(expectedClientThreeAssignment));
assertThat(sortedAssignments.get(PID_4), equalTo(expectedClientFourAssignment));
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldNotHaveSameAssignmentOnAnyTwoHosts(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
final List<ProcessId> allProcessIds = asList(PID_1, PID_2, PID_3, PID_4);
createClient(PID_1, 1);
createClient(PID_2, 1);
createClient(PID_3, 1);
createClient(PID_4, 1);
final boolean probingRebalanceNeeded = assign(1, rackAwareStrategy, TASK_0_0, TASK_0_2, TASK_0_1, TASK_0_3);
assertThat(probingRebalanceNeeded, is(false));
for (final ProcessId uuid : allProcessIds) {
final Set<TaskId> taskIds = clients.get(uuid).assignedTasks();
for (final ProcessId otherProcessId : allProcessIds) {
if (!uuid.equals(otherProcessId)) {
assertThat("clients shouldn't have same task assignment", clients.get(otherProcessId).assignedTasks(),
not(equalTo(taskIds)));
}
}
}
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldNotHaveSameAssignmentOnAnyTwoHostsWhenThereArePreviousActiveTasks(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
final List<ProcessId> allProcessIds = asList(PID_1, PID_2, PID_3);
createClientWithPreviousActiveTasks(PID_1, 1, TASK_0_1, TASK_0_2);
createClientWithPreviousActiveTasks(PID_2, 1, TASK_0_3);
createClientWithPreviousActiveTasks(PID_3, 1, TASK_0_0);
createClient(PID_4, 1);
final boolean probingRebalanceNeeded = assign(1, rackAwareStrategy, TASK_0_0, TASK_0_2, TASK_0_1, TASK_0_3);
assertThat(probingRebalanceNeeded, is(false));
for (final ProcessId uuid : allProcessIds) {
final Set<TaskId> taskIds = clients.get(uuid).assignedTasks();
for (final ProcessId otherProcessId : allProcessIds) {
if (!uuid.equals(otherProcessId)) {
assertThat("clients shouldn't have same task assignment", clients.get(otherProcessId).assignedTasks(),
not(equalTo(taskIds)));
}
}
}
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldNotHaveSameAssignmentOnAnyTwoHostsWhenThereArePreviousStandbyTasks(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
final List<ProcessId> allProcessIds = asList(PID_1, PID_2, PID_3, PID_4);
final ClientState c1 = createClientWithPreviousActiveTasks(PID_1, 1, TASK_0_1, TASK_0_2);
c1.addPreviousStandbyTasks(Set.of(TASK_0_3, TASK_0_0));
final ClientState c2 = createClientWithPreviousActiveTasks(PID_2, 1, TASK_0_3, TASK_0_0);
c2.addPreviousStandbyTasks(Set.of(TASK_0_1, TASK_0_2));
createClient(PID_3, 1);
createClient(PID_4, 1);
final boolean probingRebalanceNeeded = assign(1, rackAwareStrategy, TASK_0_0, TASK_0_2, TASK_0_1, TASK_0_3);
assertThat(probingRebalanceNeeded, is(false));
for (final ProcessId uuid : allProcessIds) {
final Set<TaskId> taskIds = clients.get(uuid).assignedTasks();
for (final ProcessId otherProcessId : allProcessIds) {
if (!uuid.equals(otherProcessId)) {
assertThat("clients shouldn't have same task assignment", clients.get(otherProcessId).assignedTasks(),
not(equalTo(taskIds)));
}
}
}
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldReBalanceTasksAcrossAllClientsWhenCapacityAndTaskCountTheSame(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
createClientWithPreviousActiveTasks(PID_3, 1, TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3);
createClient(PID_1, 1);
createClient(PID_2, 1);
createClient(PID_4, 1);
final boolean probingRebalanceNeeded = assign(rackAwareStrategy, TASK_0_0, TASK_0_2, TASK_0_1, TASK_0_3);
assertThat(probingRebalanceNeeded, is(false));
assertThat(clients.get(PID_1).assignedTaskCount(), equalTo(1));
assertThat(clients.get(PID_2).assignedTaskCount(), equalTo(1));
assertThat(clients.get(PID_3).assignedTaskCount(), equalTo(1));
assertThat(clients.get(PID_4).assignedTaskCount(), equalTo(1));
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldReBalanceTasksAcrossClientsWhenCapacityLessThanTaskCount(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
createClientWithPreviousActiveTasks(PID_3, 1, TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3);
createClient(PID_1, 1);
createClient(PID_2, 1);
final boolean probingRebalanceNeeded = assign(rackAwareStrategy, TASK_0_0, TASK_0_2, TASK_0_1, TASK_0_3);
assertThat(probingRebalanceNeeded, is(false));
assertThat(clients.get(PID_3).assignedTaskCount(), equalTo(2));
assertThat(clients.get(PID_1).assignedTaskCount(), equalTo(1));
assertThat(clients.get(PID_2).assignedTaskCount(), equalTo(1));
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldRebalanceTasksToClientsBasedOnCapacity(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
createClientWithPreviousActiveTasks(PID_2, 1, TASK_0_0, TASK_0_3, TASK_0_2);
createClient(PID_3, 2);
final boolean probingRebalanceNeeded = assign(rackAwareStrategy, TASK_0_0, TASK_0_2, TASK_0_3);
assertThat(probingRebalanceNeeded, is(false));
assertThat(clients.get(PID_2).assignedTaskCount(), equalTo(1));
assertThat(clients.get(PID_3).assignedTaskCount(), equalTo(2));
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldMoveMinimalNumberOfTasksWhenPreviouslyAboveCapacityAndNewClientAdded(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
final Set<TaskId> p1PrevTasks = new HashSet<>(List.of(TASK_0_0, TASK_0_2));
final Set<TaskId> p2PrevTasks = Set.of(TASK_0_1, TASK_0_3);
createClientWithPreviousActiveTasks(PID_1, 1, TASK_0_0, TASK_0_2);
createClientWithPreviousActiveTasks(PID_2, 1, TASK_0_1, TASK_0_3);
createClientWithPreviousActiveTasks(PID_3, 1);
final boolean probingRebalanceNeeded = assign(rackAwareStrategy, TASK_0_0, TASK_0_2, TASK_0_1, TASK_0_3);
assertThat(probingRebalanceNeeded, is(false));
final Set<TaskId> p3ActiveTasks = clients.get(PID_3).activeTasks();
assertThat(p3ActiveTasks.size(), equalTo(1));
if (p1PrevTasks.removeAll(p3ActiveTasks)) {
assertThat(clients.get(PID_2).activeTasks(), equalTo(p2PrevTasks));
} else {
assertThat(clients.get(PID_1).activeTasks(), equalTo(p1PrevTasks));
}
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldNotMoveAnyTasksWhenNewTasksAdded(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
createClientWithPreviousActiveTasks(PID_1, 1, TASK_0_0, TASK_0_1);
createClientWithPreviousActiveTasks(PID_2, 1, TASK_0_2, TASK_0_3);
final boolean probingRebalanceNeeded = assign(rackAwareStrategy, TASK_0_3, TASK_0_1, TASK_0_4, TASK_0_2, TASK_0_0, TASK_0_5);
assertThat(probingRebalanceNeeded, is(false));
assertThat(clients.get(PID_1).activeTasks(), hasItems(TASK_0_0, TASK_0_1));
assertThat(clients.get(PID_2).activeTasks(), hasItems(TASK_0_2, TASK_0_3));
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldAssignNewTasksToNewClientWhenPreviousTasksAssignedToOldClients(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
createClientWithPreviousActiveTasks(PID_1, 1, TASK_0_2, TASK_0_1);
createClientWithPreviousActiveTasks(PID_2, 1, TASK_0_0, TASK_0_3);
createClient(PID_3, 1);
final boolean probingRebalanceNeeded = assign(rackAwareStrategy, TASK_0_3, TASK_0_1, TASK_0_4, TASK_0_2, TASK_0_0, TASK_0_5);
assertThat(probingRebalanceNeeded, is(false));
assertThat(clients.get(PID_1).activeTasks(), hasItems(TASK_0_2, TASK_0_1));
assertThat(clients.get(PID_2).activeTasks(), hasItems(TASK_0_0, TASK_0_3));
assertThat(clients.get(PID_3).activeTasks(), hasItems(TASK_0_4, TASK_0_5));
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldAssignTasksNotPreviouslyActiveToNewClient(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
final ClientState c1 = createClientWithPreviousActiveTasks(PID_1, 1, TASK_0_1, TASK_1_2, TASK_1_3);
c1.addPreviousStandbyTasks(Set.of(TASK_0_0, TASK_1_1, TASK_2_0, TASK_2_1, TASK_2_3));
final ClientState c2 = createClientWithPreviousActiveTasks(PID_2, 1, TASK_0_0, TASK_1_1, TASK_2_2);
c2.addPreviousStandbyTasks(Set.of(TASK_0_1, TASK_1_0, TASK_0_2, TASK_2_0, TASK_0_3, TASK_1_2, TASK_2_1, TASK_1_3, TASK_2_3));
final ClientState c3 = createClientWithPreviousActiveTasks(PID_3, 1, TASK_2_0, TASK_2_1, TASK_2_3);
c3.addPreviousStandbyTasks(Set.of(TASK_0_2, TASK_1_2));
final ClientState newClient = createClient(PID_4, 1);
newClient.addPreviousStandbyTasks(Set.of(TASK_0_0, TASK_1_0, TASK_0_1, TASK_0_2, TASK_1_1, TASK_2_0, TASK_0_3, TASK_1_2, TASK_2_1, TASK_1_3, TASK_2_2, TASK_2_3));
final boolean probingRebalanceNeeded = assign(rackAwareStrategy, TASK_0_0, TASK_1_0, TASK_0_1, TASK_0_2, TASK_1_1, TASK_2_0, TASK_0_3, TASK_1_2, TASK_2_1, TASK_1_3, TASK_2_2, TASK_2_3);
assertThat(probingRebalanceNeeded, is(false));
if (rackAwareStrategy.equals(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY)) {
assertThat(c1.activeTasks(), equalTo(Set.of(TASK_0_1, TASK_1_2, TASK_2_3)));
assertThat(c2.activeTasks(), equalTo(Set.of(TASK_0_0, TASK_1_1, TASK_2_2)));
assertThat(c3.activeTasks(), equalTo(Set.of(TASK_0_2, TASK_1_3, TASK_2_1)));
assertThat(newClient.activeTasks(), equalTo(Set.of(TASK_0_3, TASK_1_0, TASK_2_0)));
} else {
assertThat(c1.activeTasks(), equalTo(Set.of(TASK_0_1, TASK_1_2, TASK_1_3)));
assertThat(c2.activeTasks(), equalTo(Set.of(TASK_0_0, TASK_1_1, TASK_2_2)));
assertThat(c3.activeTasks(), equalTo(Set.of(TASK_2_0, TASK_2_1, TASK_2_3)));
assertThat(newClient.activeTasks(), equalTo(Set.of(TASK_0_2, TASK_0_3, TASK_1_0)));
}
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldAssignTasksNotPreviouslyActiveToMultipleNewClients(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
final ClientState c1 = createClientWithPreviousActiveTasks(PID_1, 1, TASK_0_1, TASK_1_2, TASK_1_3);
c1.addPreviousStandbyTasks(Set.of(TASK_0_0, TASK_1_1, TASK_2_0, TASK_2_1, TASK_2_3));
final ClientState c2 = createClientWithPreviousActiveTasks(PID_2, 1, TASK_0_0, TASK_1_1, TASK_2_2);
c2.addPreviousStandbyTasks(Set.of(TASK_0_1, TASK_1_0, TASK_0_2, TASK_2_0, TASK_0_3, TASK_1_2, TASK_2_1, TASK_1_3, TASK_2_3));
final ClientState bounce1 = createClient(PID_3, 1);
bounce1.addPreviousStandbyTasks(Set.of(TASK_2_0, TASK_2_1, TASK_2_3));
final ClientState bounce2 = createClient(PID_4, 1);
bounce2.addPreviousStandbyTasks(Set.of(TASK_0_2, TASK_0_3, TASK_1_0));
final boolean probingRebalanceNeeded = assign(rackAwareStrategy, TASK_0_0, TASK_1_0, TASK_0_1, TASK_0_2, TASK_1_1, TASK_2_0, TASK_0_3, TASK_1_2, TASK_2_1, TASK_1_3, TASK_2_2, TASK_2_3);
assertThat(probingRebalanceNeeded, is(false));
if (rackAwareStrategy.equals(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY)) {
assertThat(c1.activeTasks(), equalTo(Set.of(TASK_0_1, TASK_1_2, TASK_2_3)));
assertThat(c2.activeTasks(), equalTo(Set.of(TASK_0_0, TASK_1_1, TASK_2_2)));
assertThat(bounce1.activeTasks(), equalTo(Set.of(TASK_0_2, TASK_1_3, TASK_2_1)));
assertThat(bounce2.activeTasks(), equalTo(Set.of(TASK_0_3, TASK_1_0, TASK_2_0)));
} else {
assertThat(c1.activeTasks(), equalTo(Set.of(TASK_0_1, TASK_1_2, TASK_1_3)));
assertThat(c2.activeTasks(), equalTo(Set.of(TASK_0_0, TASK_1_1, TASK_2_2)));
assertThat(bounce1.activeTasks(), equalTo(Set.of(TASK_2_0, TASK_2_1, TASK_2_3)));
assertThat(bounce2.activeTasks(), equalTo(Set.of(TASK_0_2, TASK_0_3, TASK_1_0)));
}
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldAssignTasksToNewClient(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
createClientWithPreviousActiveTasks(PID_1, 1, TASK_0_1, TASK_0_2);
createClient(PID_2, 1);
assertThat(assign(rackAwareStrategy, TASK_0_1, TASK_0_2), is(false));
assertThat(clients.get(PID_1).activeTaskCount(), equalTo(1));
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldAssignTasksToNewClientWithoutFlippingAssignmentBetweenExistingClients(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
final ClientState c1 = createClientWithPreviousActiveTasks(PID_1, 1, TASK_0_0, TASK_0_1, TASK_0_2);
final ClientState c2 = createClientWithPreviousActiveTasks(PID_2, 1, TASK_0_3, TASK_0_4, TASK_0_5);
final ClientState newClient = createClient(PID_3, 1);
final boolean probingRebalanceNeeded = assign(rackAwareStrategy, TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3, TASK_0_4, TASK_0_5);
assertThat(probingRebalanceNeeded, is(false));
assertThat(c1.activeTasks(), not(hasItem(TASK_0_3)));
assertThat(c1.activeTasks(), not(hasItem(TASK_0_4)));
assertThat(c1.activeTasks(), not(hasItem(TASK_0_5)));
assertThat(c1.activeTaskCount(), equalTo(2));
assertThat(c2.activeTasks(), not(hasItems(TASK_0_0)));
assertThat(c2.activeTasks(), not(hasItems(TASK_0_1)));
assertThat(c2.activeTasks(), not(hasItems(TASK_0_2)));
assertThat(c2.activeTaskCount(), equalTo(2));
assertThat(newClient.activeTaskCount(), equalTo(2));
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldAssignTasksToNewClientWithoutFlippingAssignmentBetweenExistingAndBouncedClients(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
final ClientState c1 = createClientWithPreviousActiveTasks(PID_1, 1, TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_6);
final ClientState c2 = createClient(PID_2, 1);
c2.addPreviousStandbyTasks(Set.of(TASK_0_3, TASK_0_4, TASK_0_5));
final ClientState newClient = createClient(PID_3, 1);
final boolean probingRebalanceNeeded = assign(rackAwareStrategy, TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3, TASK_0_4, TASK_0_5, TASK_0_6);
assertThat(probingRebalanceNeeded, is(false));
// it's possible for either client 1 or 2 to get three tasks since they both had three previously assigned
assertThat(c1.activeTasks(), not(hasItem(TASK_0_3)));
assertThat(c1.activeTasks(), not(hasItem(TASK_0_4)));
assertThat(c1.activeTasks(), not(hasItem(TASK_0_5)));
assertThat(c1.activeTaskCount(), greaterThanOrEqualTo(2));
assertThat(c2.activeTasks(), not(hasItems(TASK_0_0)));
assertThat(c2.activeTasks(), not(hasItems(TASK_0_1)));
assertThat(c2.activeTasks(), not(hasItems(TASK_0_2)));
assertThat(c2.activeTaskCount(), greaterThanOrEqualTo(2));
assertThat(newClient.activeTaskCount(), equalTo(2));
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldViolateBalanceToPreserveActiveTaskStickiness(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
final ClientState c1 = createClientWithPreviousActiveTasks(PID_1, 1, TASK_0_0, TASK_0_1, TASK_0_2);
final ClientState c2 = createClient(PID_2, 1);
final List<TaskId> taskIds = asList(TASK_0_0, TASK_0_1, TASK_0_2);
Collections.shuffle(taskIds);
final int nodeSize = 5;
final int topicSize = 1;
final int partitionSize = 3;
final int clientSize = 2;
final Cluster cluster = getRandomCluster(nodeSize, topicSize, partitionSize);
final Map<TaskId, Set<TopicPartition>> partitionsForTask = getTaskTopicPartitionMap(topicSize, partitionSize, false);
final Map<TaskId, Set<TopicPartition>> changelogPartitionsForTask = getTaskTopicPartitionMap(topicSize, partitionSize, true);
final Map<ProcessId, Map<String, Optional<String>>> racksForProcessConsumer = getRandomProcessRacks(clientSize, nodeSize);
final InternalTopicManager internalTopicManager = mockInternalTopicManagerForRandomChangelog(nodeSize, topicSize, partitionSize);
final AssignmentConfigs configs = new AssignmentConfigs(
0L,
1,
0,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy
);
final RackAwareTaskAssignor rackAwareTaskAssignor = new RackAwareTaskAssignor(
cluster,
partitionsForTask,
changelogPartitionsForTask,
getTasksForTopicGroup(),
racksForProcessConsumer,
internalTopicManager,
configs,
time
);
final boolean probingRebalanceNeeded = new LegacyStickyTaskAssignor(true).assign(
clients,
new HashSet<>(taskIds),
new HashSet<>(taskIds),
rackAwareTaskAssignor,
configs
);
assertThat(probingRebalanceNeeded, is(false));
assertThat(c1.activeTasks(), equalTo(Set.of(TASK_0_0, TASK_0_1, TASK_0_2)));
assertThat(c2.activeTasks(), empty());
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldOptimizeStatefulAndStatelessTaskTraffic(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
final ClientState c1 = createClientWithPreviousActiveTasks(PID_1, 1, TASK_0_0, TASK_0_1, TASK_0_2);
final ClientState c2 = createClientWithPreviousActiveTasks(PID_2, 1, TASK_1_0, TASK_1_1, TASK_0_3, TASK_1_3);
final ClientState c3 = createClientWithPreviousActiveTasks(PID_3, 1, TASK_1_2);
final List<TaskId> taskIds = asList(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3, TASK_1_0, TASK_1_1, TASK_1_2, TASK_1_3);
final List<TaskId> statefulTaskIds = asList(TASK_0_0, TASK_0_1, TASK_1_0, TASK_1_1);
Collections.shuffle(taskIds);
final Cluster cluster = getClusterForAllTopics();
final Map<TaskId, Set<TopicPartition>> partitionsForTask = getTaskTopicPartitionMapForAllTasks();
final Map<TaskId, Set<TopicPartition>> changelogPartitionsForTask = getTaskChangelogMapForAllTasks();
final Map<ProcessId, Map<String, Optional<String>>> racksForProcessConsumer = getProcessRacksForAllProcess();
final InternalTopicManager internalTopicManager = mockInternalTopicManagerForChangelog();
final AssignmentConfigs configs = new AssignmentConfigs(
0L,
1,
1,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
10,
1,
rackAwareStrategy
);
final RackAwareTaskAssignor rackAwareTaskAssignor = new RackAwareTaskAssignor(
cluster,
partitionsForTask,
changelogPartitionsForTask,
getTasksForTopicGroup(),
racksForProcessConsumer,
internalTopicManager,
configs,
time
);
final boolean probingRebalanceNeeded = new LegacyStickyTaskAssignor().assign(
clients,
new HashSet<>(taskIds),
new HashSet<>(statefulTaskIds),
rackAwareTaskAssignor,
configs
);
assertThat(probingRebalanceNeeded, is(false));
if (rackAwareStrategy.equals(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC)) {
// Total cost for active stateful: 3
// Total cost for active stateless: 0
// Total cost for standby: 20
assertThat(c1.activeTasks(), equalTo(Set.of(TASK_0_3, TASK_1_0, TASK_1_2)));
assertThat(c1.standbyTasks(), equalTo(Set.of(TASK_0_0, TASK_0_1)));
assertThat(c2.activeTasks(), equalTo(Set.of(TASK_0_0, TASK_0_2, TASK_1_1)));
assertThat(c2.standbyTasks(), empty());
assertThat(c3.activeTasks(), equalTo(Set.of(TASK_0_1, TASK_1_3)));
assertThat(c3.standbyTasks(), equalTo(Set.of(TASK_1_0, TASK_1_1)));
} else if (rackAwareStrategy.equals(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY)) {
assertThat(c1.activeTasks(), equalTo(Set.of(TASK_0_0, TASK_0_3, TASK_1_2)));
assertThat(c1.standbyTasks(), equalTo(Set.of(TASK_1_0)));
assertThat(c2.activeTasks(), equalTo(Set.of(TASK_0_1, TASK_0_2, TASK_1_1)));
assertThat(c2.standbyTasks(), equalTo(Set.of(TASK_0_0)));
assertThat(c3.activeTasks(), equalTo(Set.of(TASK_1_0, TASK_1_3)));
assertThat(c3.standbyTasks(), equalTo(Set.of(TASK_0_1, TASK_1_1)));
} else {
// Total cost for active stateful: 30
// Total cost for active stateless: 40
// Total cost for standby: 10
assertThat(c1.activeTasks(), equalTo(Set.of(TASK_0_1, TASK_0_2, TASK_1_3)));
assertThat(c1.standbyTasks(), equalTo(Set.of(TASK_0_0)));
assertThat(c2.activeTasks(), equalTo(Set.of(TASK_0_3, TASK_1_0, TASK_1_1)));
assertThat(c2.standbyTasks(), equalTo(Set.of(TASK_0_1)));
assertThat(c3.activeTasks(), equalTo(Set.of(TASK_0_0, TASK_1_2)));
assertThat(c3.standbyTasks(), equalTo(Set.of(TASK_1_0, TASK_1_1)));
}
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldAssignRandomInput(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
final int nodeSize = 50;
final int tpSize = 60;
final int partitionSize = 3;
final int clientSize = 50;
final int replicaCount = 1;
final int maxCapacity = 3;
final SortedMap<TaskId, Set<TopicPartition>> taskTopicPartitionMap = getTaskTopicPartitionMap(
tpSize, partitionSize, false);
final AssignmentConfigs configs = new AssignmentConfigs(
0L,
1,
replicaCount,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
10,
1,
rackAwareStrategy
);
final RackAwareTaskAssignor rackAwareTaskAssignor = spy(new RackAwareTaskAssignor(
getRandomCluster(nodeSize, tpSize, partitionSize),
taskTopicPartitionMap,
getTaskTopicPartitionMap(tpSize, partitionSize, true),
getTasksForTopicGroup(tpSize, partitionSize),
getRandomProcessRacks(clientSize, nodeSize),
mockInternalTopicManagerForRandomChangelog(nodeSize, tpSize, partitionSize),
configs,
time
));
final SortedSet<TaskId> taskIds = (SortedSet<TaskId>) taskTopicPartitionMap.keySet();
final List<Set<TaskId>> statefulAndStatelessTasks = getRandomSubset(taskIds, 2);
final Set<TaskId> statefulTasks = statefulAndStatelessTasks.get(0);
final Set<TaskId> statelessTasks = statefulAndStatelessTasks.get(1);
final SortedMap<ProcessId, ClientState> clientStateMap = getRandomClientState(clientSize,
tpSize, partitionSize, maxCapacity, false, statefulTasks);
final boolean probing = new LegacyStickyTaskAssignor().assign(
clientStateMap,
taskIds,
statefulTasks,
rackAwareTaskAssignor,
configs
);
assertFalse(probing);
assertValidAssignment(
replicaCount,
statefulTasks,
statelessTasks,
clientStateMap,
new StringBuilder()
);
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, taskIds, clientStateMap, true, enableRackAwareTaskAssignor);
if (rackAwareStrategy.equals(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY)) {
assertBalancedTasks(clientStateMap, 4);
}
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldRemainOriginalAssignmentWithoutTrafficCostForMinCostStrategy(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
// This test tests that if the traffic cost is 0, we should have same assignment with or without
// rack aware assignor enabled
final int nodeSize = 50;
final int tpSize = 60;
final int partitionSize = 3;
final int clientSize = 50;
final int replicaCount = 1;
final int maxCapacity = 3;
final SortedMap<TaskId, Set<TopicPartition>> taskTopicPartitionMap = getTaskTopicPartitionMap(
tpSize, partitionSize, false);
final Cluster cluster = getRandomCluster(nodeSize, tpSize, partitionSize);
final Map<TaskId, Set<TopicPartition>> taskChangelogTopicPartitionMap = getTaskTopicPartitionMap(tpSize, partitionSize, true);
final Map<ProcessId, Map<String, Optional<String>>> processRackMap = getRandomProcessRacks(clientSize, nodeSize);
final InternalTopicManager mockInternalTopicManager = mockInternalTopicManagerForRandomChangelog(nodeSize, tpSize, partitionSize);
AssignmentConfigs configs = new AssignmentConfigs(
0L,
1,
replicaCount,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
0, // Override traffic cost to 0 to maintain original assignment
10,
rackAwareStrategy
);
RackAwareTaskAssignor rackAwareTaskAssignor = spy(new RackAwareTaskAssignor(
cluster,
taskTopicPartitionMap,
taskChangelogTopicPartitionMap,
getTasksForTopicGroup(tpSize, partitionSize),
processRackMap,
mockInternalTopicManager,
configs,
time
));
final SortedSet<TaskId> taskIds = (SortedSet<TaskId>) taskTopicPartitionMap.keySet();
final List<Set<TaskId>> statefulAndStatelessTasks = getRandomSubset(taskIds, 2);
final Set<TaskId> statefulTasks = statefulAndStatelessTasks.get(0);
final Set<TaskId> statelessTasks = statefulAndStatelessTasks.get(1);
final SortedMap<ProcessId, ClientState> clientStateMap = getRandomClientState(clientSize,
tpSize, partitionSize, maxCapacity, false, statefulTasks);
new LegacyStickyTaskAssignor().assign(
clientStateMap,
taskIds,
statefulTasks,
rackAwareTaskAssignor,
configs
);
assertValidAssignment(1, statefulTasks, statelessTasks, clientStateMap, new StringBuilder());
if (rackAwareStrategy.equals(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE)) {
return;
}
if (rackAwareStrategy.equals(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY)) {
// Original assignment won't be maintained because we calculate the assignment using max flow first
// in balance subtopology strategy
assertBalancedTasks(clientStateMap, 4);
return;
}
final SortedMap<ProcessId, ClientState> clientStateMapCopy = copyClientStateMap(clientStateMap);
configs = new AssignmentConfigs(
0L,
1,
replicaCount,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
0,
10,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE
);
rackAwareTaskAssignor = spy(new RackAwareTaskAssignor(
cluster,
taskTopicPartitionMap,
taskChangelogTopicPartitionMap,
getTasksForTopicGroup(tpSize, partitionSize),
processRackMap,
mockInternalTopicManager,
configs,
time
));
new LegacyStickyTaskAssignor().assign(
clientStateMapCopy,
taskIds,
statefulTasks,
rackAwareTaskAssignor,
configs
);
for (final Map.Entry<ProcessId, ClientState> entry : clientStateMap.entrySet()) {
assertThat(entry.getValue().statefulActiveTasks(), equalTo(clientStateMapCopy.get(entry.getKey()).statefulActiveTasks()));
assertThat(entry.getValue().standbyTasks(), equalTo(clientStateMapCopy.get(entry.getKey()).standbyTasks()));
}
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldReassignTasksWhenNewNodeJoinsWithExistingActiveAndStandbyAssignments(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
// Initial setup: Node 1 has active tasks 0,1 and standby tasks 2,3
// Node 2 has active tasks 2,3 and standby tasks 0,1
final ClientState node1 = createClientWithPreviousActiveTasks(PID_1, 1, TASK_0_0, TASK_0_1);
node1.addPreviousStandbyTasks(Set.of(TASK_0_2, TASK_0_3));
final ClientState node2 = createClientWithPreviousActiveTasks(PID_2, 1, TASK_0_2, TASK_0_3);
node2.addPreviousStandbyTasks(Set.of(TASK_0_0, TASK_0_1));
// Node 3 joins as new client
final ClientState node3 = createClient(PID_3, 1);
final boolean probingRebalanceNeeded = assign(1, rackAwareStrategy, TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3);
assertThat(probingRebalanceNeeded, is(false));
// Verify all active tasks are assigned
final Set<TaskId> allAssignedActiveTasks = new HashSet<>();
allAssignedActiveTasks.addAll(node1.activeTasks());
allAssignedActiveTasks.addAll(node2.activeTasks());
allAssignedActiveTasks.addAll(node3.activeTasks());
assertThat(allAssignedActiveTasks, equalTo(Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3)));
// Verify all standby tasks are assigned
final Set<TaskId> allAssignedStandbyTasks = new HashSet<>();
allAssignedStandbyTasks.addAll(node1.standbyTasks());
allAssignedStandbyTasks.addAll(node2.standbyTasks());
allAssignedStandbyTasks.addAll(node3.standbyTasks());
assertThat(allAssignedStandbyTasks, equalTo(Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3)));
// Verify each client has 1-2 active tasks and at most 3 tasks total
assertThat(node1.activeTasks().size(), greaterThanOrEqualTo(1));
assertThat(node1.activeTasks().size(), lessThanOrEqualTo(2));
assertThat(node1.activeTasks().size() + node1.standbyTasks().size(), lessThanOrEqualTo(3));
assertThat(node2.activeTasks().size(), greaterThanOrEqualTo(1));
assertThat(node2.activeTasks().size(), lessThanOrEqualTo(2));
assertThat(node2.activeTasks().size() + node2.standbyTasks().size(), lessThanOrEqualTo(3));
assertThat(node3.activeTasks().size(), greaterThanOrEqualTo(1));
assertThat(node3.activeTasks().size(), lessThanOrEqualTo(2));
assertThat(node3.activeTasks().size() + node3.standbyTasks().size(), lessThanOrEqualTo(3));
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void shouldRangeAssignTasksWhenStartingEmpty(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
// Two clients with capacity 1 each, starting empty (no previous tasks)
createClient(PID_1, 1);
createClient(PID_2, 1);
// Two subtopologies with 2 tasks each (4 tasks total)
final boolean probingRebalanceNeeded = assign(1, rackAwareStrategy, TASK_0_0, TASK_0_1, TASK_1_0, TASK_1_1);
assertThat(probingRebalanceNeeded, is(false));
// Each client should get one active task from each subtopology
final ClientState client1 = clients.get(PID_1);
final ClientState client2 = clients.get(PID_2);
// Check that each client has one active task from subtopology 0
final long client1Subtopology0ActiveCount = client1.activeTasks().stream()
.filter(task -> task.subtopology() == 0)
.count();
final long client2Subtopology0ActiveCount = client2.activeTasks().stream()
.filter(task -> task.subtopology() == 0)
.count();
assertThat(client1Subtopology0ActiveCount, equalTo(1L));
assertThat(client2Subtopology0ActiveCount, equalTo(1L));
// Check that each client has one active task from subtopology 1
final long client1Subtopology1ActiveCount = client1.activeTasks().stream()
.filter(task -> task.subtopology() == 1)
.count();
final long client2Subtopology1ActiveCount = client2.activeTasks().stream()
.filter(task -> task.subtopology() == 1)
.count();
assertThat(client1Subtopology1ActiveCount, equalTo(1L));
assertThat(client2Subtopology1ActiveCount, equalTo(1L));
// Check that each client has one standby task from subtopology 0
final long client1Subtopology0StandbyCount = client1.standbyTasks().stream()
.filter(task -> task.subtopology() == 0)
.count();
final long client2Subtopology0StandbyCount = client2.standbyTasks().stream()
.filter(task -> task.subtopology() == 0)
.count();
assertThat(client1Subtopology0StandbyCount, equalTo(1L));
assertThat(client2Subtopology0StandbyCount, equalTo(1L));
// Check that each client has one standby task from subtopology 1
final long client1Subtopology1StandbyCount = client1.standbyTasks().stream()
.filter(task -> task.subtopology() == 1)
.count();
final long client2Subtopology1StandbyCount = client2.standbyTasks().stream()
.filter(task -> task.subtopology() == 1)
.count();
assertThat(client1Subtopology1StandbyCount, equalTo(1L));
assertThat(client2Subtopology1StandbyCount, equalTo(1L));
}
private boolean assign(final String rackAwareStrategy, final TaskId... tasks) {
return assign(0, rackAwareStrategy, tasks);
}
private boolean assign(final int numStandbys, final String rackAwareStrategy, final TaskId... tasks) {
final List<TaskId> taskIds = asList(tasks);
Collections.shuffle(taskIds);
final AssignmentConfigs configs = new AssignmentConfigs(
0L,
1,
numStandbys,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy
);
return assign(configs, getRackAwareTaskAssignor(configs, getTasksForTopicGroup()), tasks);
}
private boolean assign(final AssignmentConfigs configs, final RackAwareTaskAssignor rackAwareTaskAssignor, final TaskId... tasks) {
final List<TaskId> taskIds = asList(tasks);
Collections.shuffle(taskIds);
return new LegacyStickyTaskAssignor().assign(
clients,
new HashSet<>(taskIds),
new HashSet<>(taskIds),
rackAwareTaskAssignor,
configs
);
}
private List<TaskId> allActiveTasks() {
final List<TaskId> allActive = new ArrayList<>();
for (final ClientState client : clients.values()) {
allActive.addAll(client.activeTasks());
}
Collections.sort(allActive);
return allActive;
}
private List<TaskId> allStandbyTasks() {
final List<TaskId> tasks = new ArrayList<>();
for (final ClientState client : clients.values()) {
tasks.addAll(client.standbyTasks());
}
Collections.sort(tasks);
return tasks;
}
private ClientState createClient(final ProcessId processId, final int capacity) {
return createClientWithPreviousActiveTasks(processId, capacity);
}
private ClientState createClientWithPreviousActiveTasks(final ProcessId processId, final int capacity, final TaskId... taskIds) {
final ClientState clientState = new ClientState(processId, capacity);
clientState.addPreviousActiveTasks(Set.of(taskIds));
clients.put(processId, clientState);
return clientState;
}
private void assertActiveTaskTopicGroupIdsEvenlyDistributed() {
for (final Map.Entry<ProcessId, ClientState> clientStateEntry : clients.entrySet()) {
final List<Integer> topicGroupIds = new ArrayList<>();
final Set<TaskId> activeTasks = clientStateEntry.getValue().activeTasks();
for (final TaskId activeTask : activeTasks) {
topicGroupIds.add(activeTask.subtopology());
}
Collections.sort(topicGroupIds);
assertThat(topicGroupIds, equalTo(expectedTopicGroupIds));
}
}
private static Map<ProcessId, Set<TaskId>> sortClientAssignments(final Map<ProcessId, ClientState> clients) {
final Map<ProcessId, Set<TaskId>> sortedAssignments = new HashMap<>();
for (final Map.Entry<ProcessId, ClientState> entry : clients.entrySet()) {
final Set<TaskId> sorted = new TreeSet<>(entry.getValue().activeTasks());
sortedAssignments.put(entry.getKey(), sorted);
}
return sortedAssignments;
}
private static Set<TaskId> getExpectedTaskIdAssignment(final List<TaskId> tasks, final int... indices) {
final Set<TaskId> sortedAssignment = new TreeSet<>();
for (final int index : indices) {
sortedAssignment.add(tasks.get(index));
}
return sortedAssignment;
}
}
| LegacyStickyTaskAssignorTest |
java | apache__kafka | connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/ConnectAssertions.java | {
"start": 1782,
"end": 10346
} | class ____ {
private static final Logger log = LoggerFactory.getLogger(ConnectAssertions.class);
public static final long WORKER_SETUP_DURATION_MS = TimeUnit.MINUTES.toMillis(5);
public static final long VALIDATION_DURATION_MS = TimeUnit.SECONDS.toMillis(30);
public static final long CONNECTOR_SETUP_DURATION_MS = TimeUnit.MINUTES.toMillis(2);
// Creating a connector requires two rounds of rebalance; destroying one only requires one
// Assume it'll take ~half the time to destroy a connector as it does to create one
public static final long CONNECTOR_SHUTDOWN_DURATION_MS = TimeUnit.MINUTES.toMillis(1);
private static final long CONNECT_INTERNAL_TOPIC_UPDATES_DURATION_MS = TimeUnit.SECONDS.toMillis(60);
private final EmbeddedConnect connect;
ConnectAssertions(EmbeddedConnect connect) {
this.connect = connect;
}
/**
* Assert that at least the requested number of workers are up and running.
*
* @param numWorkers the number of online workers
*/
public void assertAtLeastNumWorkersAreUp(int numWorkers, String detailMessage) throws InterruptedException {
try {
waitForCondition(
() -> checkWorkersUp(numWorkers, (actual, expected) -> actual >= expected).orElse(false),
WORKER_SETUP_DURATION_MS,
"Didn't meet the minimum requested number of online workers: " + numWorkers);
} catch (AssertionError e) {
throw new AssertionError(detailMessage, e);
}
}
/**
* Assert that the exact number of workers are up and running.
*
* @param numWorkers the number of online workers
*/
public void assertExactlyNumWorkersAreUp(int numWorkers, String detailMessage) throws InterruptedException {
try {
waitForCondition(
() -> checkWorkersUp(numWorkers, (actual, expected) -> actual == expected).orElse(false),
WORKER_SETUP_DURATION_MS,
"Didn't meet the exact requested number of online workers: " + numWorkers);
} catch (AssertionError e) {
throw new AssertionError(detailMessage, e);
}
}
/**
* Confirm that the requested number of workers are up and running.
*
* @param numWorkers the number of online workers
* @return true if at least {@code numWorkers} are up; false otherwise
*/
protected Optional<Boolean> checkWorkersUp(int numWorkers, BiFunction<Integer, Integer, Boolean> comp) {
try {
int numUp = connect.healthyWorkers().size();
return Optional.of(comp.apply(numUp, numWorkers));
} catch (Exception e) {
log.error("Could not check active workers.", e);
return Optional.empty();
}
}
/**
* Assert that at least the requested number of workers are up and running.
*
* @param numBrokers the number of online brokers
*/
public void assertExactlyNumBrokersAreUp(int numBrokers, String detailMessage) throws InterruptedException {
try {
waitForCondition(
() -> checkBrokersUp(numBrokers, (actual, expected) -> actual == expected).orElse(false),
WORKER_SETUP_DURATION_MS,
"Didn't meet the exact requested number of online brokers: " + numBrokers);
} catch (AssertionError e) {
throw new AssertionError(detailMessage, e);
}
}
/**
* Confirm that the requested number of brokers are up and running.
*
* @param numBrokers the number of online brokers
* @return true if at least {@code numBrokers} are up; false otherwise
*/
protected Optional<Boolean> checkBrokersUp(int numBrokers, BiFunction<Integer, Integer, Boolean> comp) {
try {
int numRunning = connect.kafka().runningBrokers().size();
return Optional.of(comp.apply(numRunning, numBrokers));
} catch (Exception e) {
log.error("Could not check running brokers.", e);
return Optional.empty();
}
}
/**
* Assert that the topics with the specified names do not exist.
*
* @param topicNames the names of the topics that are expected to not exist
*/
public void assertTopicsDoNotExist(String... topicNames) throws InterruptedException {
Set<String> topicNameSet = Set.of(topicNames);
AtomicReference<Set<String>> existingTopics = new AtomicReference<>(topicNameSet);
waitForCondition(
() -> checkTopicsExist(topicNameSet, (actual, expected) -> {
existingTopics.set(actual);
return actual.isEmpty();
}).orElse(false),
CONNECTOR_SETUP_DURATION_MS,
() -> "Unexpectedly found topics " + existingTopics.get());
}
/**
* Assert that the topics with the specified names do exist.
*
* @param topicNames the names of the topics that are expected to exist
*/
public void assertTopicsExist(String... topicNames) throws InterruptedException {
Set<String> topicNameSet = Set.of(topicNames);
AtomicReference<Set<String>> missingTopics = new AtomicReference<>(topicNameSet);
waitForCondition(
() -> checkTopicsExist(topicNameSet, (actual, expected) -> {
Set<String> missing = new HashSet<>(expected);
missing.removeAll(actual);
missingTopics.set(missing);
return missing.isEmpty();
}).orElse(false),
CONNECTOR_SETUP_DURATION_MS,
() -> "Didn't find the topics " + missingTopics.get());
}
protected Optional<Boolean> checkTopicsExist(Set<String> topicNames, BiFunction<Set<String>, Set<String>, Boolean> comp) {
try {
Map<String, Optional<TopicDescription>> topics = connect.kafka().describeTopics(topicNames);
Set<String> actualExistingTopics = topics.entrySet()
.stream()
.filter(e -> e.getValue().isPresent())
.map(Map.Entry::getKey)
.collect(Collectors.toSet());
return Optional.of(comp.apply(actualExistingTopics, topicNames));
} catch (Exception e) {
log.error("Failed to describe the topic(s): {}.", topicNames, e);
return Optional.empty();
}
}
/**
* Assert that the named topic is configured to have the specified replication factor and
* number of partitions.
*
* @param topicName the name of the topic that is expected to exist
* @param replicas the replication factor
* @param partitions the number of partitions
* @param detailMessage the assertion message
*/
public void assertTopicSettings(String topicName, int replicas, int partitions, String detailMessage)
throws InterruptedException {
try {
waitForCondition(
() -> checkTopicSettings(
topicName,
replicas,
partitions
).orElse(false),
VALIDATION_DURATION_MS,
"Topic " + topicName + " does not exist or does not have exactly "
+ partitions + " partitions or at least "
+ replicas + " per partition");
} catch (AssertionError e) {
throw new AssertionError(detailMessage, e);
}
}
protected Optional<Boolean> checkTopicSettings(String topicName, int replicas, int partitions) {
try {
Map<String, Optional<TopicDescription>> topics = connect.kafka().describeTopics(topicName);
TopicDescription topicDesc = topics.get(topicName).orElse(null);
boolean result = topicDesc != null
&& topicDesc.name().equals(topicName)
&& topicDesc.partitions().size() == partitions
&& topicDesc.partitions().stream().allMatch(p -> p.replicas().size() >= replicas);
return Optional.of(result);
} catch (Exception e) {
log.error("Failed to describe the topic: {}.", topicName, e);
return Optional.empty();
}
}
/**
* Assert that the required number of errors are produced by a connector config validation.
*
* @param connectorClass the | ConnectAssertions |
java | quarkusio__quarkus | devtools/cli-common/src/main/java/io/quarkus/cli/common/BuildToolContext.java | {
"start": 400,
"end": 432
} | class ____ pass along.
*/
public | to |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java | {
"start": 25076,
"end": 27188
} | class ____ extends TransportResponse {
protected String nodeId;
protected int totalShards;
protected List<BroadcastShardOperationFailedException> exceptions;
protected List<ShardOperationResult> results;
NodeResponse(StreamInput in) throws IOException {
nodeId = in.readString();
totalShards = in.readVInt();
results = in.readCollectionAsList((stream) -> stream.readBoolean() ? readShardResult(stream) : null);
if (in.readBoolean()) {
exceptions = in.readCollectionAsList(BroadcastShardOperationFailedException::new);
} else {
exceptions = null;
}
}
// visible for testing
public NodeResponse(
String nodeId,
int totalShards,
List<ShardOperationResult> results,
List<BroadcastShardOperationFailedException> exceptions
) {
this.nodeId = nodeId;
this.totalShards = totalShards;
this.results = results;
this.exceptions = exceptions;
}
String getNodeId() {
return nodeId;
}
int getTotalShards() {
return totalShards;
}
int getSuccessfulShards() {
return results.size();
}
List<ShardOperationResult> getResults() {
return results;
}
List<BroadcastShardOperationFailedException> getExceptions() {
return exceptions;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(nodeId);
out.writeVInt(totalShards);
out.writeCollection(results, StreamOutput::writeOptionalWriteable);
out.writeBoolean(exceptions != null);
if (exceptions != null) {
out.writeCollection(exceptions);
}
}
}
/**
* Can be used for implementations of {@link #shardOperation} for which there is no shard-level return value.
*/
public static final | NodeResponse |
java | micronaut-projects__micronaut-core | inject/src/main/java/io/micronaut/context/conditions/MatchesConditionUtils.java | {
"start": 1184,
"end": 7800
} | class ____ {
private MatchesConditionUtils() {
}
/**
* Create conditions from the annotation value.
*
* @param requirement The requirement
* @param preConditions The pre-conditions collection to fill
* @param postConditions The post-conditions collection to fill
*/
public static void createConditions(AnnotationValue<Requires> requirement,
List<Condition> preConditions,
List<Condition> postConditions) {
if (requirement.contains(RequiresCondition.MEMBER_CLASSES)) {
AnnotationClassValue<?>[] classes = requirement.annotationClassValues(RequiresCondition.MEMBER_CLASSES);
if (classes.length > 0) {
preConditions.add(new MatchesPresenceOfClassesCondition(classes));
}
}
if (requirement.contains(RequiresCondition.MEMBER_MISSING_CLASSES)) {
AnnotationClassValue<?>[] classes = requirement.annotationClassValues(RequiresCondition.MEMBER_MISSING_CLASSES);
if (classes.length > 0) {
preConditions.add(new MatchesAbsenceOfClassesCondition(classes));
}
}
if (requirement.contains(RequiresCondition.MEMBER_ENV)) {
String[] env = requirement.stringValues(RequiresCondition.MEMBER_ENV);
if (env.length > 0) {
preConditions.add(new MatchesEnvironmentCondition(env));
}
}
if (requirement.contains(RequiresCondition.MEMBER_NOT_ENV)) {
String[] env = requirement.stringValues(RequiresCondition.MEMBER_NOT_ENV);
if (env.length > 0) {
preConditions.add(new MatchesNotEnvironmentCondition(env));
}
}
if (requirement.contains(RequiresCondition.MEMBER_ENTITIES)) {
AnnotationClassValue<?>[] classes = requirement.annotationClassValues(RequiresCondition.MEMBER_ENTITIES);
if (classes.length > 0) {
preConditions.add(new MatchesPresenceOfEntitiesCondition(classes));
}
}
if (requirement.contains(RequiresCondition.MEMBER_PROPERTY)) {
String property = requirement.stringValue(RequiresCondition.MEMBER_PROPERTY).orElse(null);
if (StringUtils.isNotEmpty(property)) {
MatchesPropertyCondition.Condition condition = MatchesPropertyCondition.Condition.CONTAINS;
String value = requirement.stringValue().orElse(null);
if (value != null) {
condition = MatchesPropertyCondition.Condition.EQUALS;
}
String defaultValue = requirement.stringValue(RequiresCondition.MEMBER_DEFAULT_VALUE).orElse(null);
if (value == null) {
String notEquals = requirement.stringValue(RequiresCondition.MEMBER_NOT_EQUALS).orElse(null);
if (notEquals != null) {
value = notEquals;
condition = MatchesPropertyCondition.Condition.NOT_EQUALS;
} else {
String pattern = requirement.stringValue(RequiresCondition.MEMBER_PATTERN).orElse(null);
if (pattern != null) {
value = pattern;
condition = MatchesPropertyCondition.Condition.PATTERN;
}
}
}
preConditions.add(new MatchesPropertyCondition(property, value, defaultValue, condition));
}
}
if (requirement.contains(RequiresCondition.MEMBER_MISSING_PROPERTY)) {
String property = requirement.stringValue(RequiresCondition.MEMBER_MISSING_PROPERTY).orElse(null);
if (StringUtils.isNotEmpty(property)) {
preConditions.add(new MatchesMissingPropertyCondition(property));
}
}
if (requirement.contains(RequiresCondition.MEMBER_CONFIGURATION)) {
String configurationName = requirement.stringValue(RequiresCondition.MEMBER_CONFIGURATION).orElse(null);
if (StringUtils.isNotEmpty(configurationName)) {
String minimumVersion = requirement.stringValue(RequiresCondition.MEMBER_VERSION).orElse(null);
preConditions.add(new MatchesConfigurationCondition(configurationName, minimumVersion));
}
}
if (requirement.contains(RequiresCondition.MEMBER_SDK)) {
Requires.Sdk sdk = requirement.enumValue(RequiresCondition.MEMBER_SDK, Requires.Sdk.class).orElse(null);
String version = requirement.stringValue(RequiresCondition.MEMBER_VERSION).orElse(null);
if (sdk != null && StringUtils.isNotEmpty(version)) {
preConditions.add(new MatchesSdkCondition(sdk, version));
}
}
if (requirement.contains(RequiresCondition.MEMBER_RESOURCES)) {
final String[] resourcePaths = requirement.stringValues(RequiresCondition.MEMBER_RESOURCES);
if (ArrayUtils.isNotEmpty(resourcePaths)) {
preConditions.add(new MatchesPresenceOfResourcesCondition(resourcePaths));
}
}
if (requirement.contains(RequiresCondition.MEMBER_OS)) {
final Set<Requires.Family> os = requirement.enumValuesSet(RequiresCondition.MEMBER_OS, Requires.Family.class);
if (!os.isEmpty()) {
preConditions.add(new MatchesCurrentOsCondition(os));
}
}
if (requirement.contains(RequiresCondition.MEMBER_NOT_OS)) {
final Set<Requires.Family> notOs = requirement.enumValuesSet(RequiresCondition.MEMBER_NOT_OS, Requires.Family.class);
if (!notOs.isEmpty()) {
preConditions.add(new MatchesCurrentNotOsCondition(notOs));
}
}
if (requirement.contains(RequiresCondition.MEMBER_BEAN)) {
AnnotationClassValue<?> bean = requirement.annotationClassValue(RequiresCondition.MEMBER_BEAN).orElse(null);
preConditions.add(new MatchesPresenceOfClassesCondition(new AnnotationClassValue[]{bean}));
postConditions.add(new MatchesPresenceOfBeansCondition(new AnnotationClassValue[]{bean}));
}
if (requirement.contains(RequiresCondition.MEMBER_BEANS)) {
AnnotationClassValue<?>[] beans = requirement.annotationClassValues(RequiresCondition.MEMBER_BEANS);
if (beans.length != 0) {
// For presence beans check we add a pre-check for the bean | MatchesConditionUtils |
java | quarkusio__quarkus | extensions/redis-client/runtime/src/main/java/io/quarkus/redis/runtime/client/RedisDataLoader.java | {
"start": 474,
"end": 1063
} | class ____ {
static final Logger LOGGER = Logger.getLogger("RedisDataLoader");
static void load(Vertx vertx, Redis redis, String path) {
LOGGER.infof("Importing Redis data from %s", path);
Buffer buffer = vertx.fileSystem().readFileBlocking(path);
if (buffer == null) {
throw new ConfigurationException("Unable to read the " + path + " file");
}
List<Request> batch = read(buffer.toString().lines().collect(Collectors.toList()));
redis.batch(batch).await().atMost(Duration.ofMinutes(1));
}
private | RedisDataLoader |
java | elastic__elasticsearch | x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexStatsMonitoringDoc.java | {
"start": 1104,
"end": 9797
} | class ____ extends FilteredMonitoringDoc {
public static final String TYPE = "index_stats";
private final IndexStats indexStats;
private final IndexMetadata metadata;
private final IndexRoutingTable routingTable;
IndexStatsMonitoringDoc(
final String cluster,
final long timestamp,
final long intervalMillis,
final MonitoringDoc.Node node,
@Nullable final IndexStats indexStats,
final IndexMetadata metadata,
final IndexRoutingTable routingTable
) {
super(cluster, timestamp, intervalMillis, node, MonitoredSystem.ES, TYPE, null, XCONTENT_FILTERS);
this.indexStats = indexStats;
this.metadata = Objects.requireNonNull(metadata);
this.routingTable = Objects.requireNonNull(routingTable);
}
IndexStats getIndexStats() {
return indexStats;
}
IndexMetadata getIndexMetadata() {
return metadata;
}
IndexRoutingTable getIndexRoutingTable() {
return routingTable;
}
@Override
protected void innerToXContent(XContentBuilder builder, Params params) throws IOException {
final ClusterIndexHealth health = new ClusterIndexHealth(metadata, routingTable);
builder.startObject(TYPE);
{
builder.field("index", metadata.getIndex().getName());
builder.field("uuid", metadata.getIndexUUID());
builder.field("created", metadata.getCreationDate());
builder.field("status", health.getStatus().name().toLowerCase(Locale.ROOT));
builder.startObject("shards");
{
final int total = metadata.getTotalNumberOfShards();
final int primaries = metadata.getNumberOfShards();
final int activeTotal = health.getActiveShards();
final int activePrimaries = health.getActivePrimaryShards();
final int unassignedTotal = health.getUnassignedShards() + health.getInitializingShards();
final int unassignedPrimaries = primaries - health.getActivePrimaryShards();
builder.field("total", total);
builder.field("primaries", primaries);
builder.field("replicas", metadata.getNumberOfReplicas());
builder.field("active_total", activeTotal);
builder.field("active_primaries", activePrimaries);
builder.field("active_replicas", activeTotal - activePrimaries);
builder.field("unassigned_total", unassignedTotal);
builder.field("unassigned_primaries", unassignedPrimaries);
builder.field("unassigned_replicas", unassignedTotal - unassignedPrimaries);
builder.field("initializing", health.getInitializingShards());
builder.field("relocating", health.getRelocatingShards());
}
builder.endObject();
// when an index is completely red, then we don't get stats for it
if (indexStats != null) {
final CommonStats totalStats = indexStats.getTotal();
if (totalStats != null) {
builder.startObject("total");
{
totalStats.toXContent(builder, params);
}
builder.endObject();
}
final CommonStats primariesStats = indexStats.getPrimaries();
if (primariesStats != null) {
builder.startObject("primaries");
{
primariesStats.toXContent(builder, params);
}
builder.endObject();
}
}
}
builder.endObject();
}
public static final Set<String> XCONTENT_FILTERS = Set.of(
"index_stats.index",
"index_stats.uuid",
"index_stats.created",
"index_stats.status",
"index_stats.shards.total",
"index_stats.shards.primaries",
"index_stats.shards.replicas",
"index_stats.shards.active_total",
"index_stats.shards.active_primaries",
"index_stats.shards.active_replicas",
"index_stats.shards.unassigned_total",
"index_stats.shards.unassigned_primaries",
"index_stats.shards.unassigned_replicas",
"index_stats.shards.initializing",
"index_stats.shards.relocating",
"index_stats.primaries.docs.count",
"index_stats.primaries.fielddata.memory_size_in_bytes",
"index_stats.primaries.fielddata.evictions",
"index_stats.primaries.indexing.index_total",
"index_stats.primaries.indexing.index_time_in_millis",
"index_stats.primaries.indexing.throttle_time_in_millis",
"index_stats.primaries.merges.total_size_in_bytes",
"index_stats.primaries.query_cache.memory_size_in_bytes",
"index_stats.primaries.query_cache.evictions",
"index_stats.primaries.query_cache.hit_count",
"index_stats.primaries.query_cache.miss_count",
"index_stats.primaries.request_cache.memory_size_in_bytes",
"index_stats.primaries.request_cache.evictions",
"index_stats.primaries.request_cache.hit_count",
"index_stats.primaries.request_cache.miss_count",
"index_stats.primaries.search.query_total",
"index_stats.primaries.search.query_time_in_millis",
"index_stats.primaries.segments.count",
"index_stats.primaries.segments.memory_in_bytes",
"index_stats.primaries.segments.terms_memory_in_bytes",
"index_stats.primaries.segments.stored_fields_memory_in_bytes",
"index_stats.primaries.segments.term_vectors_memory_in_bytes",
"index_stats.primaries.segments.norms_memory_in_bytes",
"index_stats.primaries.segments.points_memory_in_bytes",
"index_stats.primaries.segments.doc_values_memory_in_bytes",
"index_stats.primaries.segments.index_writer_memory_in_bytes",
"index_stats.primaries.segments.version_map_memory_in_bytes",
"index_stats.primaries.segments.fixed_bit_set_memory_in_bytes",
"index_stats.primaries.store.size_in_bytes",
"index_stats.primaries.refresh.total_time_in_millis",
"index_stats.primaries.refresh.external_total_time_in_millis",
"index_stats.primaries.bulk.total_operations",
"index_stats.primaries.bulk.total_time_in_millis",
"index_stats.primaries.bulk.total_size_in_bytes",
"index_stats.primaries.bulk.avg_time_in_millis",
"index_stats.primaries.bulk.avg_size_in_bytes",
"index_stats.total.docs.count",
"index_stats.total.fielddata.memory_size_in_bytes",
"index_stats.total.fielddata.evictions",
"index_stats.total.indexing.index_total",
"index_stats.total.indexing.index_time_in_millis",
"index_stats.total.indexing.throttle_time_in_millis",
"index_stats.total.merges.total_size_in_bytes",
"index_stats.total.query_cache.memory_size_in_bytes",
"index_stats.total.query_cache.evictions",
"index_stats.total.query_cache.hit_count",
"index_stats.total.query_cache.miss_count",
"index_stats.total.request_cache.memory_size_in_bytes",
"index_stats.total.request_cache.evictions",
"index_stats.total.request_cache.hit_count",
"index_stats.total.request_cache.miss_count",
"index_stats.total.search.query_total",
"index_stats.total.search.query_time_in_millis",
"index_stats.total.segments.count",
"index_stats.total.segments.memory_in_bytes",
"index_stats.total.segments.terms_memory_in_bytes",
"index_stats.total.segments.stored_fields_memory_in_bytes",
"index_stats.total.segments.term_vectors_memory_in_bytes",
"index_stats.total.segments.norms_memory_in_bytes",
"index_stats.total.segments.points_memory_in_bytes",
"index_stats.total.segments.doc_values_memory_in_bytes",
"index_stats.total.segments.index_writer_memory_in_bytes",
"index_stats.total.segments.version_map_memory_in_bytes",
"index_stats.total.segments.fixed_bit_set_memory_in_bytes",
"index_stats.total.store.size_in_bytes",
"index_stats.total.refresh.total_time_in_millis",
"index_stats.total.refresh.external_total_time_in_millis",
"index_stats.total.bulk.total_operations",
"index_stats.total.bulk.total_time_in_millis",
"index_stats.total.bulk.total_size_in_bytes",
"index_stats.total.bulk.avg_time_in_millis",
"index_stats.total.bulk.avg_size_in_bytes"
);
}
| IndexStatsMonitoringDoc |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/single/SingleMergeTest.java | {
"start": 952,
"end": 3944
} | class ____ extends RxJavaTest {
@Test
public void mergeSingleSingle() {
Single.merge(Single.just(Single.just(1)))
.test()
.assertResult(1);
}
@Test
public void merge2() {
Single.merge(Single.just(1), Single.just(2))
.test()
.assertResult(1, 2);
}
@Test
public void merge3() {
Single.merge(Single.just(1), Single.just(2), Single.just(3))
.test()
.assertResult(1, 2, 3);
}
@Test
public void merge4() {
Single.merge(Single.just(1), Single.just(2), Single.just(3), Single.just(4))
.test()
.assertResult(1, 2, 3, 4);
}
@Test
public void mergeErrors() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
Single<Integer> source1 = Single.error(new TestException("First"));
Single<Integer> source2 = Single.error(new TestException("Second"));
Single.merge(source1, source2)
.to(TestHelper.<Integer>testConsumer())
.assertFailureAndMessage(TestException.class, "First");
assertTrue(errors.toString(), errors.isEmpty());
} finally {
RxJavaPlugins.reset();
}
}
@Test
public void mergeDelayErrorIterable() {
Single.mergeDelayError(Arrays.asList(
Single.just(1),
Single.<Integer>error(new TestException()),
Single.just(2))
)
.test()
.assertFailure(TestException.class, 1, 2);
}
@Test
public void mergeDelayErrorPublisher() {
Single.mergeDelayError(Flowable.just(
Single.just(1),
Single.<Integer>error(new TestException()),
Single.just(2))
)
.test()
.assertFailure(TestException.class, 1, 2);
}
@Test
public void mergeDelayError2() {
Single.mergeDelayError(
Single.just(1),
Single.<Integer>error(new TestException())
)
.test()
.assertFailure(TestException.class, 1);
}
@Test
public void mergeDelayError2ErrorFirst() {
Single.mergeDelayError(
Single.<Integer>error(new TestException()),
Single.just(1)
)
.test()
.assertFailure(TestException.class, 1);
}
@Test
public void mergeDelayError3() {
Single.mergeDelayError(
Single.just(1),
Single.<Integer>error(new TestException()),
Single.just(2)
)
.test()
.assertFailure(TestException.class, 1, 2);
}
@Test
public void mergeDelayError4() {
Single.mergeDelayError(
Single.just(1),
Single.<Integer>error(new TestException()),
Single.just(2),
Single.just(3)
)
.test()
.assertFailure(TestException.class, 1, 2, 3);
}
}
| SingleMergeTest |
java | redisson__redisson | redisson-spring-data/redisson-spring-data-17/src/main/java/org/redisson/spring/data/connection/ScoredSortedListReplayDecoder.java | {
"start": 1126,
"end": 1826
} | class ____ implements MultiDecoder<List<Tuple>> {
@Override
public Decoder<Object> getDecoder(Codec codec, int paramNum, State state, long size) {
if (paramNum % 2 != 0) {
return DoubleCodec.INSTANCE.getValueDecoder();
}
return MultiDecoder.super.getDecoder(codec, paramNum, state, size);
}
@Override
public List<Tuple> decode(List<Object> parts, State state) {
List<Tuple> result = new ArrayList<Tuple>();
for (int i = 0; i < parts.size(); i += 2) {
result.add(new DefaultTuple((byte[])parts.get(i), ((Number)parts.get(i+1)).doubleValue()));
}
return result;
}
}
| ScoredSortedListReplayDecoder |
java | apache__camel | components/camel-ai/camel-langchain4j-agent-api/src/main/java/org/apache/camel/component/langchain4j/agent/api/AiAgentWithMemoryService.java | {
"start": 1135,
"end": 1209
} | interface ____ to
* limitations in @MemoryId annotation usage.
*/
public | due |
java | redisson__redisson | redisson/src/main/java/org/redisson/RedissonReadLock.java | {
"start": 1156,
"end": 8756
} | class ____ extends RedissonLock implements RLock {
protected RedissonReadLock(CommandAsyncExecutor commandExecutor, String name) {
super(commandExecutor, name);
}
@Override
String getChannelName() {
return prefixName("redisson_rwlock", getRawName());
}
String getWriteLockName(long threadId) {
return super.getLockName(threadId) + ":write";
}
String getReadWriteTimeoutNamePrefix(long threadId) {
return suffixName(getRawName(), getLockName(threadId)) + ":rwlock_timeout";
}
@Override
<T> RFuture<T> tryLockInnerAsync(long waitTime, long leaseTime, TimeUnit unit, long threadId, RedisStrictCommand<T> command) {
return commandExecutor.syncedEvalNoRetry(getRawName(), LongCodec.INSTANCE, command,
"local mode = redis.call('hget', KEYS[1], 'mode'); " +
"if (mode == false) then " +
"redis.call('hset', KEYS[1], 'mode', 'read'); " +
"redis.call('hset', KEYS[1], ARGV[2], 1); " +
"redis.call('set', KEYS[2] .. ':1', 1); " +
"redis.call('pexpire', KEYS[2] .. ':1', ARGV[1]); " +
"redis.call('pexpire', KEYS[1], ARGV[1]); " +
"return nil; " +
"end; " +
"if (mode == 'read') or (mode == 'write' and redis.call('hexists', KEYS[1], ARGV[3]) == 1) then " +
"local ind = redis.call('hincrby', KEYS[1], ARGV[2], 1); " +
"local key = KEYS[2] .. ':' .. ind;" +
"redis.call('set', key, 1); " +
"redis.call('pexpire', key, ARGV[1]); " +
"local remainTime = redis.call('pttl', KEYS[1]); " +
"redis.call('pexpire', KEYS[1], math.max(remainTime, ARGV[1])); " +
"return nil; " +
"end;" +
"return redis.call('pttl', KEYS[1]);",
Arrays.<Object>asList(getRawName(), getReadWriteTimeoutNamePrefix(threadId)),
unit.toMillis(leaseTime), getLockName(threadId), getWriteLockName(threadId));
}
@Override
protected RFuture<Boolean> unlockInnerAsync(long threadId, String requestId, int timeout) {
String timeoutPrefix = getReadWriteTimeoutNamePrefix(threadId);
String keyPrefix = getKeyPrefix(threadId, timeoutPrefix);
return evalWriteSyncedNoRetryAsync(getRawName(), LongCodec.INSTANCE, RedisCommands.EVAL_BOOLEAN,
"local val = redis.call('get', KEYS[5]); " +
"if val ~= false then " +
"return tonumber(val);" +
"end; " +
"local mode = redis.call('hget', KEYS[1], 'mode'); " +
"if (mode == false) then " +
"redis.call(ARGV[3], KEYS[2], ARGV[1]); " +
"redis.call('set', KEYS[5], 1, 'px', ARGV[4]); " +
"return nil; " +
"end; " +
"local lockExists = redis.call('hexists', KEYS[1], ARGV[2]); " +
"if (lockExists == 0) then " +
"return nil;" +
"end; " +
"local counter = redis.call('hincrby', KEYS[1], ARGV[2], -1); " +
"if (counter == 0) then " +
"redis.call('hdel', KEYS[1], ARGV[2]); " +
"end;" +
"redis.call('del', KEYS[3] .. ':' .. (counter+1)); " +
"if (redis.call('hlen', KEYS[1]) > 1) then " +
"local maxRemainTime = -3; " +
"local keys = redis.call('hkeys', KEYS[1]); " +
"for n, key in ipairs(keys) do " +
"counter = tonumber(redis.call('hget', KEYS[1], key)); " +
"if type(counter) == 'number' then " +
"for i=counter, 1, -1 do " +
"local remainTime = redis.call('pttl', KEYS[4] .. ':' .. key .. ':rwlock_timeout:' .. i); " +
"maxRemainTime = math.max(remainTime, maxRemainTime);" +
"end; " +
"end; " +
"end; " +
"if maxRemainTime > 0 then " +
"redis.call('pexpire', KEYS[1], maxRemainTime); " +
"redis.call('set', KEYS[5], 0, 'px', ARGV[4]); " +
"return 0; " +
"end;" +
"if mode == 'write' then " +
"redis.call('set', KEYS[5], 0, 'px', ARGV[4]); " +
"return 0;" +
"end; " +
"end; " +
"redis.call('del', KEYS[1]); " +
"redis.call(ARGV[3], KEYS[2], ARGV[1]); " +
"redis.call('set', KEYS[5], 1, 'px', ARGV[4]); " +
"return 1; ",
Arrays.<Object>asList(getRawName(), getChannelName(), timeoutPrefix, keyPrefix, getUnlockLatchName(requestId)),
LockPubSub.UNLOCK_MESSAGE, getLockName(threadId), getSubscribeService().getPublishCommand(), timeout);
}
protected String getKeyPrefix(long threadId, String timeoutPrefix) {
return timeoutPrefix.split(":" + getLockName(threadId))[0];
}
@Override
protected void scheduleExpirationRenewal(long threadId) {
String timeoutPrefix = getReadWriteTimeoutNamePrefix(threadId);
String keyPrefix = getKeyPrefix(threadId, timeoutPrefix);
renewalScheduler.renewReadLock(getRawName(), threadId, getLockName(threadId), keyPrefix);
}
@Override
protected void cancelExpirationRenewal(Long threadId, Boolean unlockResult) {
super.cancelExpirationRenewal(threadId, unlockResult);
renewalScheduler.cancelReadLockRenewal(getRawName(), threadId);
}
@Override
public Condition newCondition() {
throw new UnsupportedOperationException();
}
@Override
public RFuture<Boolean> forceUnlockAsync() {
cancelExpirationRenewal(null, null);
return commandExecutor.syncedEvalWithRetry(getRawName(), LongCodec.INSTANCE, RedisCommands.EVAL_BOOLEAN,
"if (redis.call('hget', KEYS[1], 'mode') == 'read') then " +
"redis.call('del', KEYS[1]); " +
"redis.call(ARGV[2], KEYS[2], ARGV[1]); " +
"return 1; " +
"end; " +
"return 0; ",
Arrays.asList(getRawName(), getChannelName()),
LockPubSub.UNLOCK_MESSAGE, getSubscribeService().getPublishCommand());
}
@Override
public boolean isLocked() {
RFuture<Boolean> future = commandExecutor.evalWriteAsync(getRawName(), LongCodec.INSTANCE, RedisCommands.EVAL_BOOLEAN,
"local mode = redis.call('hget', KEYS[1], 'mode'); " +
"if (mode == 'read') or (mode == 'write' and redis.call('hlen', KEYS[1]) > 2) then " +
"return 1; " +
"end; " +
"return 0; ",
Arrays.asList(getRawName()));
return get(future);
}
}
| RedissonReadLock |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/jackson2/SecurityJackson2Modules.java | {
"start": 7066,
"end": 8075
} | class ____ instantiate
*/
private static void addToModulesList(ClassLoader loader, List<Module> modules, String className) {
Module module = loadAndGetInstance(className, loader);
if (module != null) {
modules.add(module);
}
}
/**
* Creates a TypeResolverBuilder that restricts allowed types.
* @return a TypeResolverBuilder that restricts allowed types.
*/
private static TypeResolverBuilder<? extends TypeResolverBuilder> createAllowlistedDefaultTyping() {
TypeResolverBuilder<? extends TypeResolverBuilder> result = new AllowlistTypeResolverBuilder(
ObjectMapper.DefaultTyping.NON_FINAL);
result = result.init(JsonTypeInfo.Id.CLASS, null);
result = result.inclusion(JsonTypeInfo.As.PROPERTY);
return result;
}
/**
* An implementation of {@link ObjectMapper.DefaultTypeResolverBuilder} that inserts
* an {@code allow all} {@link PolymorphicTypeValidator} and overrides the
* {@code TypeIdResolver}
*
* @author Rob Winch
*/
@SuppressWarnings("serial")
static | to |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/cglib/core/ReflectUtils.java | {
"start": 17415,
"end": 18002
} | class ____ that target ClassLoader instead.";
}
};
}
}
catch (Throwable ex) {
throw new CodeGenerationException(ex);
}
}
// No defineClass variant available at all?
if (c == null) {
throw new CodeGenerationException(t) {
@Override
public String getMessage() {
return "No compatible defineClass mechanism detected: " +
"JVM should be started with --add-opens=java.base/java.lang=ALL-UNNAMED " +
"for ClassLoader.defineClass to be accessible. On the module path, " +
"you may not be able to define this CGLIB-generated | in |
java | spring-projects__spring-framework | spring-aop/src/main/java/org/springframework/aop/framework/autoproxy/AbstractAutoProxyCreator.java | {
"start": 17253,
"end": 19845
} | class ____ the bean
* @param beanName the name of the bean
* @param specificInterceptors the set of interceptors that is
* specific to this bean (may be empty, but not null)
* @param targetSource the TargetSource for the proxy,
* already pre-configured to access the bean
* @return the AOP proxy for the bean
* @see #buildAdvisors
*/
protected Object createProxy(Class<?> beanClass, @Nullable String beanName,
Object @Nullable [] specificInterceptors, TargetSource targetSource) {
return buildProxy(beanClass, beanName, specificInterceptors, targetSource, false);
}
private Class<?> createProxyClass(Class<?> beanClass, @Nullable String beanName,
Object @Nullable [] specificInterceptors, TargetSource targetSource) {
return (Class<?>) buildProxy(beanClass, beanName, specificInterceptors, targetSource, true);
}
private Object buildProxy(Class<?> beanClass, @Nullable String beanName,
Object @Nullable [] specificInterceptors, TargetSource targetSource, boolean classOnly) {
if (this.beanFactory instanceof ConfigurableListableBeanFactory clbf) {
AutoProxyUtils.exposeTargetClass(clbf, beanName, beanClass);
}
ProxyFactory proxyFactory = new ProxyFactory();
proxyFactory.copyFrom(this);
proxyFactory.setFrozen(false);
if (shouldProxyTargetClass(beanClass, beanName)) {
proxyFactory.setProxyTargetClass(true);
}
else {
Class<?>[] ifcs = (this.beanFactory instanceof ConfigurableListableBeanFactory clbf ?
AutoProxyUtils.determineExposedInterfaces(clbf, beanName) : null);
if (ifcs != null) {
proxyFactory.setProxyTargetClass(false);
for (Class<?> ifc : ifcs) {
proxyFactory.addInterface(ifc);
}
}
if (ifcs != null ? ifcs.length == 0 : !proxyFactory.isProxyTargetClass()) {
evaluateProxyInterfaces(beanClass, proxyFactory);
}
}
if (proxyFactory.isProxyTargetClass()) {
// Explicit handling of JDK proxy targets and lambdas (for introduction advice scenarios)
if (Proxy.isProxyClass(beanClass) || ClassUtils.isLambdaClass(beanClass)) {
// Must allow for introductions; can't just set interfaces to the proxy's interfaces only.
for (Class<?> ifc : beanClass.getInterfaces()) {
proxyFactory.addInterface(ifc);
}
}
}
Advisor[] advisors = buildAdvisors(beanName, specificInterceptors);
proxyFactory.addAdvisors(advisors);
proxyFactory.setTargetSource(targetSource);
customizeProxyFactory(proxyFactory);
proxyFactory.setFrozen(isFrozen());
if (advisorsPreFiltered()) {
proxyFactory.setPreFiltered(true);
}
// Use original ClassLoader if bean | of |
java | apache__camel | components/camel-sjms/src/test/java/org/apache/camel/component/sjms/tx/TransactedConcurrentConsumersTest.java | {
"start": 1165,
"end": 2161
} | class ____ extends TransactedConsumerSupport {
@RegisterExtension
protected static ArtemisService service = ArtemisServiceFactory.createVMService();
/**
* We want to verify that when consuming from a single destination with multiple routes that we are thread safe and
* behave accordingly.
*/
@Test
public void testRoute() throws Exception {
final String destinationName = "sjms:queue:one.consumer.one.route.test.queue.TransactedAsyncExceptionTest";
int routeCount = 1;
int concurrentConsumers = 2;
int messageCount = 20;
int maxAttemptsCount = 10;
int totalRedeliverdFalse = 20;
int totalRedeliveredTrue = 1;
runTest(destinationName, routeCount, messageCount, totalRedeliverdFalse, totalRedeliveredTrue,
concurrentConsumers, maxAttemptsCount);
}
@Override
public String getBrokerUri() {
return service.serviceAddress();
}
}
| TransactedConcurrentConsumersTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/metrics/NonCollectingMultiMetricAggregator.java | {
"start": 1200,
"end": 2712
} | class ____ extends NumericMetricsAggregator.MultiValue {
private final InternalNumericMetricsAggregation.MultiValue emptyAggregation;
private final Predicate<String> hasMetric;
/**
* Build a {@linkplain NonCollectingMultiMetricAggregator} for {@link SingleValue} aggregators.
*/
public NonCollectingMultiMetricAggregator(
String name,
AggregationContext context,
Aggregator parent,
InternalNumericMetricsAggregation.MultiValue emptyAggregation,
Predicate<String> hasMetric,
Map<String, Object> metadata
) throws IOException {
super(name, context, parent, metadata);
this.emptyAggregation = emptyAggregation;
this.hasMetric = hasMetric;
}
@Override
public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, LeafBucketCollector sub) {
// the framework will automatically eliminate it
return LeafBucketCollector.NO_OP_COLLECTOR;
}
@Override
public InternalAggregation buildAggregation(long owningBucketOrd) throws IOException {
return buildEmptyAggregation();
}
@Override
public InternalAggregation buildEmptyAggregation() {
return emptyAggregation;
}
@Override
public boolean hasMetric(String name) {
return hasMetric.test(name);
}
@Override
public double metric(String name, long owningBucketOrd) {
return emptyAggregation.value(name);
}
}
| NonCollectingMultiMetricAggregator |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1300/Issue1399.java | {
"start": 162,
"end": 1157
} | class ____ extends TestCase {
public void test_for_issue() throws Exception {
JSON.parseObject("false", boolean.class);
JSON.parseObject("false", Boolean.class);
JSON.parseObject("\"false\"", boolean.class);
JSON.parseObject("\"false\"", Boolean.class);
// JSON.parseObject("FALSE", boolean.class);
// JSON.parseObject("FALSE", Boolean.class);
JSON.parseObject("\"FALSE\"", boolean.class);
JSON.parseObject("\"FALSE\"", Boolean.class);
}
public void test_for_issue_true() throws Exception {
JSON.parseObject("true", boolean.class);
JSON.parseObject("true", Boolean.class);
JSON.parseObject("\"true\"", boolean.class);
JSON.parseObject("\"true\"", Boolean.class);
// JSON.parseObject("FALSE", boolean.class);
// JSON.parseObject("FALSE", Boolean.class);
JSON.parseObject("\"TRUE\"", boolean.class);
JSON.parseObject("\"TRUE\"", Boolean.class);
}
}
| Issue1399 |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ser/TestJsonSerializeAs.java | {
"start": 1650,
"end": 1754
} | class ____ extends Bean1178Abstract {
public int getC() { return 3; }
}
static | Bean1178Impl |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java | {
"start": 5742,
"end": 38796
} | class ____ implements
Answer<GetReplicaVisibleLengthResponseProto> {
final BlockTokenSecretManager sm;
final BlockTokenIdentifier ident;
public GetLengthAnswer(BlockTokenSecretManager sm,
BlockTokenIdentifier ident) {
this.sm = sm;
this.ident = ident;
}
@Override
public GetReplicaVisibleLengthResponseProto answer(
InvocationOnMock invocation) throws IOException {
Object args[] = invocation.getArguments();
assertEquals(2, args.length);
GetReplicaVisibleLengthRequestProto req =
(GetReplicaVisibleLengthRequestProto) args[1];
Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
.getTokenIdentifiers();
assertEquals(1, tokenIds.size(), "Only one BlockTokenIdentifier expected");
long result = 0;
for (TokenIdentifier tokenId : tokenIds) {
BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
LOG.info("Got: " + id.toString());
assertTrue(ident.equals(id), "Received BlockTokenIdentifier is wrong");
sm.checkAccess(id, null, PBHelperClient.convert(req.getBlock()),
BlockTokenIdentifier.AccessMode.WRITE,
new StorageType[]{StorageType.DEFAULT}, null);
result = id.getBlockId();
}
return GetReplicaVisibleLengthResponseProto.newBuilder()
.setLength(result).build();
}
}
private BlockTokenIdentifier generateTokenId(BlockTokenSecretManager sm,
ExtendedBlock block, EnumSet<BlockTokenIdentifier.AccessMode> accessModes,
StorageType[] storageTypes, String[] storageIds)
throws IOException {
Token<BlockTokenIdentifier> token = sm.generateToken(block, accessModes,
storageTypes, storageIds);
BlockTokenIdentifier id = sm.createIdentifier();
id.readFields(new DataInputStream(new ByteArrayInputStream(token
.getIdentifier())));
return id;
}
private void testWritable(boolean enableProtobuf) throws Exception {
TestWritable.testWritable(new BlockTokenIdentifier());
BlockTokenSecretManager sm = new BlockTokenSecretManager(
blockKeyUpdateInterval, blockTokenLifetime, 0, 1, "fake-pool", null,
enableProtobuf);
TestWritable.testWritable(generateTokenId(sm, block3,
EnumSet.noneOf(BlockTokenIdentifier.AccessMode.class),
new StorageType[]{StorageType.DEFAULT}, null));
TestWritable.testWritable(generateTokenId(sm, block3,
EnumSet.of(BlockTokenIdentifier.AccessMode.WRITE),
new StorageType[]{StorageType.DEFAULT}, null));
TestWritable.testWritable(generateTokenId(sm, block3,
EnumSet.allOf(BlockTokenIdentifier.AccessMode.class),
new StorageType[]{StorageType.DEFAULT}, null));
TestWritable.testWritable(generateTokenId(sm, block1,
EnumSet.allOf(BlockTokenIdentifier.AccessMode.class),
new StorageType[]{StorageType.DEFAULT}, null));
TestWritable.testWritable(generateTokenId(sm, block2,
EnumSet.of(BlockTokenIdentifier.AccessMode.WRITE),
new StorageType[]{StorageType.DEFAULT}, null));
TestWritable.testWritable(generateTokenId(sm, block3,
EnumSet.noneOf(BlockTokenIdentifier.AccessMode.class),
new StorageType[]{StorageType.DEFAULT}, null));
// We must be backwards compatible when adding storageType
TestWritable.testWritable(generateTokenId(sm, block3,
EnumSet.noneOf(BlockTokenIdentifier.AccessMode.class), null, null));
TestWritable.testWritable(generateTokenId(sm, block3,
EnumSet.noneOf(BlockTokenIdentifier.AccessMode.class),
StorageType.EMPTY_ARRAY, null));
}
@Test
public void testWritableLegacy() throws Exception {
testWritable(false);
}
@Test
public void testWritableProtobuf() throws Exception {
testWritable(true);
}
private static void checkAccess(BlockTokenSecretManager m,
Token<BlockTokenIdentifier> t, ExtendedBlock blk,
BlockTokenIdentifier.AccessMode mode, StorageType[] storageTypes,
String[] storageIds) throws IOException {
if (storageIds == null) {
// Test overloaded checkAccess method.
m.checkAccess(t.decodeIdentifier(), null, blk, mode, storageTypes);
if (storageTypes == null) {
// Test overloaded checkAccess method.
m.checkAccess(t, null, blk, mode);
}
}
m.checkAccess(t, null, blk, mode, storageTypes, storageIds);
}
private void tokenGenerationAndVerification(BlockTokenSecretManager master,
BlockTokenSecretManager slave, StorageType[] storageTypes,
String[] storageIds) throws Exception {
// single-mode tokens
for (BlockTokenIdentifier.AccessMode mode : BlockTokenIdentifier.AccessMode
.values()) {
// generated by master
Token<BlockTokenIdentifier> token1 = master.generateToken(block1,
EnumSet.of(mode), storageTypes, storageIds);
checkAccess(master, token1, block1, mode, storageTypes, storageIds);
checkAccess(slave, token1, block1, mode, storageTypes, storageIds);
// generated by slave
Token<BlockTokenIdentifier> token2 = slave.generateToken(block2,
EnumSet.of(mode), storageTypes, storageIds);
checkAccess(master, token2, block2, mode, storageTypes, storageIds);
checkAccess(slave, token2, block2, mode, storageTypes, storageIds);
}
// multi-mode tokens
Token<BlockTokenIdentifier> mtoken = master.generateToken(block3,
EnumSet.allOf(BlockTokenIdentifier.AccessMode.class),
storageTypes, storageIds);
for (BlockTokenIdentifier.AccessMode mode : BlockTokenIdentifier.AccessMode
.values()) {
checkAccess(master, mtoken, block3, mode, storageTypes, storageIds);
checkAccess(slave, mtoken, block3, mode, storageTypes, storageIds);
}
}
/** test block key and token handling */
private void testBlockTokenSecretManager(boolean enableProtobuf)
throws Exception {
BlockTokenSecretManager masterHandler = new BlockTokenSecretManager(
blockKeyUpdateInterval, blockTokenLifetime, 0, 1, "fake-pool", null,
enableProtobuf);
BlockTokenSecretManager slaveHandler = new BlockTokenSecretManager(
blockKeyUpdateInterval, blockTokenLifetime, "fake-pool", null,
enableProtobuf);
ExportedBlockKeys keys = masterHandler.exportKeys();
slaveHandler.addKeys(keys);
tokenGenerationAndVerification(masterHandler, slaveHandler,
new StorageType[]{StorageType.DEFAULT}, null);
tokenGenerationAndVerification(masterHandler, slaveHandler, null, null);
// key updating
masterHandler.updateKeys();
tokenGenerationAndVerification(masterHandler, slaveHandler,
new StorageType[]{StorageType.DEFAULT}, null);
tokenGenerationAndVerification(masterHandler, slaveHandler, null, null);
keys = masterHandler.exportKeys();
slaveHandler.addKeys(keys);
tokenGenerationAndVerification(masterHandler, slaveHandler,
new StorageType[]{StorageType.DEFAULT}, null);
tokenGenerationAndVerification(masterHandler, slaveHandler, null, null);
}
@Test
public void testBlockTokenSecretManagerLegacy() throws Exception {
testBlockTokenSecretManager(false);
}
@Test
public void testBlockTokenSecretManagerProtobuf() throws Exception {
testBlockTokenSecretManager(true);
}
private static Server createMockDatanode(BlockTokenSecretManager sm,
Token<BlockTokenIdentifier> token, Configuration conf)
throws IOException, ServiceException {
ClientDatanodeProtocolPB mockDN = mock(ClientDatanodeProtocolPB.class);
BlockTokenIdentifier id = sm.createIdentifier();
id.readFields(new DataInputStream(new ByteArrayInputStream(token
.getIdentifier())));
doAnswer(new GetLengthAnswer(sm, id)).when(mockDN)
.getReplicaVisibleLength(any(), any());
RPC.setProtocolEngine(conf, ClientDatanodeProtocolPB.class,
ProtobufRpcEngine2.class);
BlockingService service = ClientDatanodeProtocolService
.newReflectiveBlockingService(mockDN);
return new RPC.Builder(conf).setProtocol(ClientDatanodeProtocolPB.class)
.setInstance(service).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();
}
private void testBlockTokenRpc(boolean enableProtobuf) throws Exception {
Configuration conf = new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
BlockTokenSecretManager sm = new BlockTokenSecretManager(
blockKeyUpdateInterval, blockTokenLifetime, 0, 1, "fake-pool", null,
enableProtobuf);
Token<BlockTokenIdentifier> token = sm.generateToken(block3,
EnumSet.allOf(BlockTokenIdentifier.AccessMode.class),
new StorageType[]{StorageType.DEFAULT}, new String[0]);
final Server server = createMockDatanode(sm, token, conf);
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
final UserGroupInformation ticket = UserGroupInformation
.createRemoteUser(block3.toString());
ticket.addToken(token);
ClientDatanodeProtocol proxy = null;
try {
proxy = DFSUtilClient.createClientDatanodeProtocolProxy(addr, ticket, conf,
NetUtils.getDefaultSocketFactory(conf));
assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
} finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
@Test
public void testBlockTokenRpcLegacy() throws Exception {
testBlockTokenRpc(false);
}
@Test
public void testBlockTokenRpcProtobuf() throws Exception {
testBlockTokenRpc(true);
}
/**
* Test that fast repeated invocations of createClientDatanodeProtocolProxy
* will not end up using up thousands of sockets. This is a regression test
* for HDFS-1965.
*/
private void testBlockTokenRpcLeak(boolean enableProtobuf) throws Exception {
Configuration conf = new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
assumeTrue(FD_DIR.exists());
BlockTokenSecretManager sm = new BlockTokenSecretManager(
blockKeyUpdateInterval, blockTokenLifetime, 0, 1, "fake-pool", null,
enableProtobuf);
Token<BlockTokenIdentifier> token = sm.generateToken(block3,
EnumSet.allOf(BlockTokenIdentifier.AccessMode.class),
new StorageType[]{StorageType.DEFAULT}, new String[0]);
final Server server = createMockDatanode(sm, token, conf);
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
LocatedBlock fakeBlock = new LocatedBlock(b, DatanodeInfo.EMPTY_ARRAY);
fakeBlock.setBlockToken(token);
// Create another RPC proxy with the same configuration - this will never
// attempt to connect anywhere -- but it causes the refcount on the
// RPC "Client" object to stay above 0 such that RPC.stopProxy doesn't
// actually close the TCP connections to the real target DN.
ClientDatanodeProtocol proxyToNoWhere = RPC.getProxy(
ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID,
new InetSocketAddress("1.1.1.1", 1),
UserGroupInformation.createRemoteUser("junk"), conf,
NetUtils.getDefaultSocketFactory(conf));
ClientDatanodeProtocol proxy = null;
int fdsAtStart = countOpenFileDescriptors();
try {
long endTime = Time.now() + 3000;
while (Time.now() < endTime) {
proxy = DFSUtilClient.createClientDatanodeProtocolProxy(fakeDnId, conf, 1000,
false, fakeBlock);
assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
if (proxy != null) {
RPC.stopProxy(proxy);
}
LOG.info("Num open fds:" + countOpenFileDescriptors());
}
int fdsAtEnd = countOpenFileDescriptors();
if (fdsAtEnd - fdsAtStart > 50) {
fail("Leaked " + (fdsAtEnd - fdsAtStart) + " fds!");
}
} finally {
server.stop();
}
RPC.stopProxy(proxyToNoWhere);
}
@Test
public void testBlockTokenRpcLeakLegacy() throws Exception {
testBlockTokenRpcLeak(false);
}
@Test
public void testBlockTokenRpcLeakProtobuf() throws Exception {
testBlockTokenRpcLeak(true);
}
/**
* @return the current number of file descriptors open by this process.
*/
private static int countOpenFileDescriptors() {
return FD_DIR.list().length;
}
/**
* Test {@link BlockPoolTokenSecretManager}
*/
private void testBlockPoolTokenSecretManager(boolean enableProtobuf)
throws Exception {
BlockPoolTokenSecretManager bpMgr = new BlockPoolTokenSecretManager();
// Test BlockPoolSecretManager with upto 10 block pools
for (int i = 0; i < 10; i++) {
String bpid = Integer.toString(i);
BlockTokenSecretManager masterHandler = new BlockTokenSecretManager(
blockKeyUpdateInterval, blockTokenLifetime, 0, 1, "fake-pool", null,
enableProtobuf);
BlockTokenSecretManager slaveHandler = new BlockTokenSecretManager(
blockKeyUpdateInterval, blockTokenLifetime, "fake-pool", null,
enableProtobuf);
bpMgr.addBlockPool(bpid, slaveHandler);
ExportedBlockKeys keys = masterHandler.exportKeys();
bpMgr.addKeys(bpid, keys, true);
String[] storageIds = new String[] {"DS-9001"};
tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid),
new StorageType[]{StorageType.DEFAULT}, storageIds);
tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid), null,
null);
// Test key updating
masterHandler.updateKeys();
tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid),
new StorageType[]{StorageType.DEFAULT}, storageIds);
tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid), null,
null);
keys = masterHandler.exportKeys();
bpMgr.addKeys(bpid, keys, true);
tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid),
new StorageType[]{StorageType.DEFAULT}, new String[]{"DS-9001"});
tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid), null,
null);
}
}
@Test
public void testBlockPoolTokenSecretManagerLegacy() throws Exception {
testBlockPoolTokenSecretManager(false);
}
@Test
public void testBlockPoolTokenSecretManagerProtobuf() throws Exception {
testBlockPoolTokenSecretManager(true);
}
/**
* This test writes a file and gets the block locations without closing the
* file, and tests the block token in the last block. Block token is verified
* by ensuring it is of correct kind.
*
* @throws IOException
* @throws InterruptedException
*/
private void testBlockTokenInLastLocatedBlock(boolean enableProtobuf)
throws IOException, InterruptedException {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512);
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE,
enableProtobuf);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(1).build();
cluster.waitActive();
try {
FileSystem fs = cluster.getFileSystem();
String fileName = "/testBlockTokenInLastLocatedBlock";
Path filePath = new Path(fileName);
FSDataOutputStream out = fs.create(filePath, (short) 1);
out.write(new byte[1000]);
// ensure that the first block is written out (see FSOutputSummer#flush)
out.flush();
LocatedBlocks locatedBlocks = cluster.getNameNodeRpc().getBlockLocations(
fileName, 0, 1000);
while (locatedBlocks.getLastLocatedBlock() == null) {
Thread.sleep(100);
locatedBlocks = cluster.getNameNodeRpc().getBlockLocations(fileName, 0,
1000);
}
Token<BlockTokenIdentifier> token = locatedBlocks.getLastLocatedBlock()
.getBlockToken();
assertEquals(BlockTokenIdentifier.KIND_NAME, token.getKind());
out.close();
} finally {
cluster.shutdown();
}
}
@Test
public void testBlockTokenInLastLocatedBlockLegacy() throws IOException,
InterruptedException {
testBlockTokenInLastLocatedBlock(false);
}
@Test
public void testBlockTokenInLastLocatedBlockProtobuf() throws IOException,
InterruptedException {
testBlockTokenInLastLocatedBlock(true);
}
@Test
public void testLegacyBlockTokenBytesIsLegacy() throws IOException {
final boolean useProto = false;
BlockTokenSecretManager sm = new BlockTokenSecretManager(
blockKeyUpdateInterval, blockTokenLifetime, 0, 1, "fake-pool", null,
useProto);
Token<BlockTokenIdentifier> token = sm.generateToken(block1,
EnumSet.noneOf(BlockTokenIdentifier.AccessMode.class),
new StorageType[]{StorageType.DEFAULT}, new String[0]);
final byte[] tokenBytes = token.getIdentifier();
BlockTokenIdentifier legacyToken = new BlockTokenIdentifier();
BlockTokenIdentifier protobufToken = new BlockTokenIdentifier();
BlockTokenIdentifier readToken = new BlockTokenIdentifier();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenBytes, tokenBytes.length);
legacyToken.readFieldsLegacy(dib);
boolean invalidProtobufMessage = false;
try {
dib.reset(tokenBytes, tokenBytes.length);
protobufToken.readFieldsProtobuf(dib);
} catch (IOException e) {
invalidProtobufMessage = true;
}
assertTrue(invalidProtobufMessage);
dib.reset(tokenBytes, tokenBytes.length);
readToken.readFields(dib);
// Using legacy, the token parses as a legacy block token and not a protobuf
assertEquals(legacyToken, readToken);
assertNotEquals(protobufToken, readToken);
}
@Test
public void testEmptyLegacyBlockTokenBytesIsLegacy() throws IOException {
BlockTokenIdentifier emptyIdent = new BlockTokenIdentifier();
DataOutputBuffer dob = new DataOutputBuffer(4096);
DataInputBuffer dib = new DataInputBuffer();
emptyIdent.writeLegacy(dob);
byte[] emptyIdentBytes = Arrays.copyOf(dob.getData(), dob.getLength());
BlockTokenIdentifier legacyToken = new BlockTokenIdentifier();
BlockTokenIdentifier protobufToken = new BlockTokenIdentifier();
BlockTokenIdentifier readToken = new BlockTokenIdentifier();
dib.reset(emptyIdentBytes, emptyIdentBytes.length);
legacyToken.readFieldsLegacy(dib);
boolean invalidProtobufMessage = false;
try {
dib.reset(emptyIdentBytes, emptyIdentBytes.length);
protobufToken.readFieldsProtobuf(dib);
} catch (IOException e) {
invalidProtobufMessage = true;
}
assertTrue(invalidProtobufMessage);
dib.reset(emptyIdentBytes, emptyIdentBytes.length);
readToken.readFields(dib);
}
/**
* If the NameNode predates HDFS-6708 and HDFS-9807, then the LocatedBlocks
* that it returns to the client will have block tokens that don't include
* the storage types or storage IDs. Simulate this by setting the storage
* type and storage ID to null to test backwards compatibility.
*/
@Test
public void testLegacyBlockTokenWithoutStorages() throws IOException,
IllegalAccessException {
BlockTokenIdentifier identifier = new BlockTokenIdentifier("user",
"blockpool", 123,
EnumSet.allOf(BlockTokenIdentifier.AccessMode.class), null, null,
false);
FieldUtils.writeField(identifier, "storageTypes", null, true);
FieldUtils.writeField(identifier, "storageIds", null, true);
testCraftedBlockTokenIdentifier(identifier, false, false, false);
}
@Test
public void testProtobufBlockTokenBytesIsProtobuf() throws IOException {
final boolean useProto = true;
BlockTokenSecretManager sm = new BlockTokenSecretManager(
blockKeyUpdateInterval, blockTokenLifetime, 0, 1, "fake-pool", null,
useProto);
Token<BlockTokenIdentifier> token = sm.generateToken(block1,
EnumSet.noneOf(BlockTokenIdentifier.AccessMode.class),
StorageType.EMPTY_ARRAY, new String[0]);
final byte[] tokenBytes = token.getIdentifier();
BlockTokenIdentifier legacyToken = new BlockTokenIdentifier();
BlockTokenIdentifier protobufToken = new BlockTokenIdentifier();
BlockTokenIdentifier readToken = new BlockTokenIdentifier();
DataInputBuffer dib = new DataInputBuffer();
/* We receive NegativeArraySizeException because we didn't call
* readFields and instead try to parse this directly as a legacy
* BlockTokenIdentifier.
*
* Note: because the parsing depends on the expiryDate which is based on
* `Time.now()` it can sometimes fail with IOException and sometimes with
* NegativeArraySizeException.
*/
boolean invalidLegacyMessage = false;
try {
dib.reset(tokenBytes, tokenBytes.length);
legacyToken.readFieldsLegacy(dib);
} catch (IOException | NegativeArraySizeException e) {
invalidLegacyMessage = true;
}
assertTrue(invalidLegacyMessage);
dib.reset(tokenBytes, tokenBytes.length);
protobufToken.readFieldsProtobuf(dib);
dib.reset(tokenBytes, tokenBytes.length);
readToken.readFields(dib);
// Using protobuf, the token parses as a protobuf and not a legacy block
// token
assertNotEquals(legacyToken, readToken);
assertEquals(protobufToken, readToken);
}
private void testCraftedBlockTokenIdentifier(
BlockTokenIdentifier identifier, boolean expectIOE,
boolean expectRTE, boolean isProtobuf) throws IOException {
DataOutputBuffer dob = new DataOutputBuffer(4096);
DataInputBuffer dib = new DataInputBuffer();
if (isProtobuf) {
identifier.writeProtobuf(dob);
} else {
identifier.writeLegacy(dob);
}
byte[] identBytes = Arrays.copyOf(dob.getData(), dob.getLength());
BlockTokenIdentifier legacyToken = new BlockTokenIdentifier();
BlockTokenIdentifier protobufToken = new BlockTokenIdentifier();
BlockTokenIdentifier readToken = new BlockTokenIdentifier();
boolean invalidLegacyMessage = false;
try {
dib.reset(identBytes, identBytes.length);
legacyToken.readFieldsLegacy(dib);
} catch (IOException e) {
if (!expectIOE) {
fail("Received IOException but it was not expected.");
}
invalidLegacyMessage = true;
} catch (RuntimeException e) {
if (!expectRTE) {
fail("Received RuntimeException but it was not expected.");
}
invalidLegacyMessage = true;
}
if (isProtobuf) {
assertTrue(invalidLegacyMessage);
dib.reset(identBytes, identBytes.length);
protobufToken.readFieldsProtobuf(dib);
dib.reset(identBytes, identBytes.length);
readToken.readFields(dib);
assertEquals(identifier, readToken);
assertEquals(protobufToken, readToken);
}
}
@Test
public void testEmptyProtobufBlockTokenBytesIsProtobuf() throws IOException {
// Empty BlockTokenIdentifiers throw IOException
BlockTokenIdentifier identifier = new BlockTokenIdentifier();
testCraftedBlockTokenIdentifier(identifier, true, false, true);
}
@Test
public void testCraftedProtobufBlockTokenBytesIsProtobuf() throws
IOException {
/* Parsing BlockTokenIdentifier with expiryDate
* 2017-02-09 00:12:35,072+0100 will throw IOException.
* However, expiryDate of
* 2017-02-09 00:12:35,071+0100 will throw NegativeArraySizeException.
*/
BlockTokenIdentifier identifier = new BlockTokenIdentifier("user",
"blockpool", 123, EnumSet.allOf(BlockTokenIdentifier.AccessMode.class),
new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
new String[] {"fake-storage-id"}, true);
Calendar cal = new GregorianCalendar();
cal.set(2017, 1, 9, 0, 12, 35);
long datetime = cal.getTimeInMillis();
datetime = ((datetime / 1000) * 1000); // strip milliseconds.
datetime = datetime + 71; // 2017-02-09 00:12:35,071+0100
identifier.setExpiryDate(datetime);
testCraftedBlockTokenIdentifier(identifier, false, true, true);
datetime += 1; // 2017-02-09 00:12:35,072+0100
identifier.setExpiryDate(datetime);
testCraftedBlockTokenIdentifier(identifier, true, false, true);
}
private BlockTokenIdentifier writeAndReadBlockToken(
BlockTokenIdentifier identifier) throws IOException {
DataOutputBuffer dob = new DataOutputBuffer(4096);
DataInputBuffer dib = new DataInputBuffer();
identifier.write(dob);
byte[] identBytes = Arrays.copyOf(dob.getData(), dob.getLength());
BlockTokenIdentifier readToken = new BlockTokenIdentifier();
dib.reset(identBytes, identBytes.length);
readToken.readFields(dib);
assertEquals(identifier, readToken);
return readToken;
}
@Test
public void testEmptyBlockTokenSerialization() throws IOException {
BlockTokenIdentifier ident = new BlockTokenIdentifier();
BlockTokenIdentifier ret = writeAndReadBlockToken(ident);
assertEquals(ret.getExpiryDate(), 0);
assertEquals(ret.getKeyId(), 0);
assertEquals(ret.getUserId(), null);
assertEquals(ret.getBlockPoolId(), null);
assertEquals(ret.getBlockId(), 0);
assertEquals(ret.getAccessModes(),
EnumSet.noneOf(BlockTokenIdentifier.AccessMode.class));
assertArrayEquals(ret.getStorageTypes(), StorageType.EMPTY_ARRAY);
}
private void testBlockTokenSerialization(boolean useProto) throws
IOException {
EnumSet<BlockTokenIdentifier.AccessMode> accessModes =
EnumSet.allOf(BlockTokenIdentifier.AccessMode.class);
StorageType[] storageTypes =
new StorageType[]{StorageType.RAM_DISK, StorageType.SSD,
StorageType.DISK, StorageType.ARCHIVE, StorageType.NVDIMM};
BlockTokenIdentifier ident = new BlockTokenIdentifier("user", "bpool",
123, accessModes, storageTypes, new String[] {"fake-storage-id"},
useProto);
ident.setExpiryDate(1487080345L);
BlockTokenIdentifier ret = writeAndReadBlockToken(ident);
assertEquals(ret.getExpiryDate(), 1487080345L);
assertEquals(ret.getKeyId(), 0);
assertEquals(ret.getUserId(), "user");
assertEquals(ret.getBlockPoolId(), "bpool");
assertEquals(ret.getBlockId(), 123);
assertEquals(ret.getAccessModes(),
EnumSet.allOf(BlockTokenIdentifier.AccessMode.class));
assertArrayEquals(ret.getStorageTypes(), storageTypes);
assertArrayEquals(ret.getStorageIds(), new String[] {"fake-storage-id"});
}
@Test
public void testBlockTokenSerialization() throws IOException {
testBlockTokenSerialization(false);
testBlockTokenSerialization(true);
}
private void testBadStorageIDCheckAccess(boolean enableProtobuf)
throws IOException {
BlockTokenSecretManager sm = new BlockTokenSecretManager(
blockKeyUpdateInterval, blockTokenLifetime, 0, 1, "fake-pool", null,
enableProtobuf);
StorageType[] storageTypes = new StorageType[] {StorageType.DISK};
String[] storageIds = new String[] {"fake-storage-id"};
String[] badStorageIds = new String[] {"BAD-STORAGE-ID"};
String[] emptyStorageIds = new String[] {};
BlockTokenIdentifier.AccessMode mode = BlockTokenIdentifier.AccessMode.READ;
BlockTokenIdentifier id = generateTokenId(sm, block3,
EnumSet.of(mode), storageTypes, storageIds);
sm.checkAccess(id, null, block3, mode, storageTypes, storageIds);
try {
sm.checkAccess(id, null, block3, mode, storageTypes, badStorageIds);
fail("Expected strict BlockTokenSecretManager to fail");
} catch(SecretManager.InvalidToken e) {
}
// We allow empty storageId tokens for backwards compatibility. i.e. old
// clients may not have known to pass the storageId parameter to the
// writeBlock api.
sm.checkAccess(id, null, block3, mode, storageTypes,
emptyStorageIds);
sm.checkAccess(id, null, block3, mode, storageTypes,
null);
sm.checkAccess(id, null, block3, mode, storageTypes);
sm.checkAccess(id, null, block3, mode);
}
@Test
public void testBadStorageIDCheckAccess() throws IOException {
testBadStorageIDCheckAccess(false);
testBadStorageIDCheckAccess(true);
}
/**
* Verify that block token serialNo is always within the range designated to
* to the NameNode.
*/
@Test
public void testBlockTokenRanges() throws IOException {
final int interval = 1024;
final int numNNs = Integer.MAX_VALUE / interval;
for(int nnIdx = 0; nnIdx < 64; nnIdx++) {
BlockTokenSecretManager sm = new BlockTokenSecretManager(
blockKeyUpdateInterval, blockTokenLifetime, nnIdx, numNNs,
"fake-pool", null, false);
int rangeStart = nnIdx * interval;
for(int i = 0; i < interval * 3; i++) {
int serialNo = sm.getSerialNoForTesting();
assertTrue(serialNo >= rangeStart && serialNo < (rangeStart + interval),
"serialNo " + serialNo + " is not in the designated range: [" + rangeStart
+ ", " + (rangeStart + interval) + ")");
sm.updateKeys();
}
}
}
@Test
public void testRetrievePasswordWithUnknownFields() throws IOException {
BlockTokenIdentifier id = new BlockTokenIdentifier();
BlockTokenIdentifier spyId = Mockito.spy(id);
Mockito.doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
DataOutput out = (DataOutput) invocation.getArguments()[0];
invocation.callRealMethod();
// write something at the end that BlockTokenIdentifier#readFields()
// will ignore, but which is still a part of the password
out.write(7);
return null;
}
}).when(spyId).write(Mockito.any());
BlockTokenSecretManager sm =
new BlockTokenSecretManager(blockKeyUpdateInterval, blockTokenLifetime,
0, 1, "fake-pool", null, false);
// master create password
byte[] password = sm.createPassword(spyId);
BlockTokenIdentifier slaveId = new BlockTokenIdentifier();
slaveId.readFields(
new DataInputStream(new ByteArrayInputStream(spyId.getBytes())));
// slave retrieve password
assertArrayEquals(password, sm.retrievePassword(slaveId));
}
@Test
public void testRetrievePasswordWithRecognizableFieldsOnly()
throws IOException {
BlockTokenSecretManager sm =
new BlockTokenSecretManager(blockKeyUpdateInterval, blockTokenLifetime,
0, 1, "fake-pool", null, false);
// master create password
BlockTokenIdentifier masterId = new BlockTokenIdentifier();
byte[] password = sm.createPassword(masterId);
// set cache to null, so that master getBytes() were only recognizable bytes
masterId.setExpiryDate(masterId.getExpiryDate());
BlockTokenIdentifier slaveId = new BlockTokenIdentifier();
slaveId.readFields(
new DataInputStream(new ByteArrayInputStream(masterId.getBytes())));
assertArrayEquals(password, sm.retrievePassword(slaveId));
}
/** Test for last in-progress block token expiry.
* 1. Write file with one block which is in-progress.
* 2. Open input stream and close the output stream.
* 3. Wait for block token expiration and read the data.
* 4. Read should be success.
*/
@Test
public void testLastLocatedBlockTokenExpiry()
throws IOException, InterruptedException {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(1).build()) {
cluster.waitClusterUp();
final NameNode nn = cluster.getNameNode();
final BlockManager bm = nn.getNamesystem().getBlockManager();
final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
// set a short token lifetime (1 second)
SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
DistributedFileSystem fs = cluster.getFileSystem();
Path p = new Path("/tmp/abc.log");
FSDataOutputStream out = fs.create(p);
byte[] data = "hello\n".getBytes(StandardCharsets.UTF_8);
out.write(data);
out.hflush();
FSDataInputStream in = fs.open(p);
out.close();
// wait for last block token to expire
Thread.sleep(2000L);
byte[] readData = new byte[data.length];
long startTime = System.currentTimeMillis();
in.read(readData);
// DFSInputStream#refetchLocations() minimum wait for 1sec to refetch
// complete located blocks.
assertTrue(1000L > (System.currentTimeMillis() - startTime),
"Should not wait for refetch complete located blocks");
}
}
}
| GetLengthAnswer |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/scheduling/annotation/AsyncAnnotationBeanPostProcessorTests.java | {
"start": 11581,
"end": 11732
} | class ____ implements Executor {
@Override
public void execute(Runnable r) {
r.run();
}
}
@Configuration
@EnableAsync
static | DirectExecutor |
java | micronaut-projects__micronaut-core | json-core/src/main/java/io/micronaut/json/JsonMapperSupplier.java | {
"start": 683,
"end": 783
} | interface ____ resolving a {@link JsonMapper}.
*
* @author graemerocher
* @since 4.0.0
*/
public | for |
java | quarkusio__quarkus | extensions/oidc-db-token-state-manager/deployment/src/test/java/io/quarkus/oidc/db/token/state/manager/GreetingEntity.java | {
"start": 234,
"end": 324
} | class ____ {
@Id
@GeneratedValue
Long id;
String greeting;
}
| GreetingEntity |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/config/AutowireCapableBeanFactory.java | {
"start": 17008,
"end": 20059
} | interface ____ superclass
* @return the bean name plus bean instance
* @throws NoSuchBeanDefinitionException if no matching bean was found
* @throws NoUniqueBeanDefinitionException if more than one matching bean was found
* @throws BeansException if the bean could not be created
* @since 4.3.3
* @see #getBean(Class)
*/
<T> NamedBeanHolder<T> resolveNamedBean(Class<T> requiredType) throws BeansException;
/**
* Resolve a bean instance for the given bean name, providing a dependency descriptor
* for exposure to target factory methods.
* <p>This is effectively a variant of {@link #getBean(String, Class)} which supports
* factory methods with an {@link org.springframework.beans.factory.InjectionPoint}
* argument.
* @param name the name of the bean to look up
* @param descriptor the dependency descriptor for the requesting injection point
* @return the corresponding bean instance
* @throws NoSuchBeanDefinitionException if there is no bean with the specified name
* @throws BeansException if the bean could not be created
* @since 5.1.5
* @see #getBean(String, Class)
*/
Object resolveBeanByName(String name, DependencyDescriptor descriptor) throws BeansException;
/**
* Resolve the specified dependency against the beans defined in this factory.
* @param descriptor the descriptor for the dependency (field/method/constructor)
* @param requestingBeanName the name of the bean which declares the given dependency
* @return the resolved object, or {@code null} if none found
* @throws NoSuchBeanDefinitionException if no matching bean was found
* @throws NoUniqueBeanDefinitionException if more than one matching bean was found
* @throws BeansException if dependency resolution failed for any other reason
* @since 2.5
* @see #resolveDependency(DependencyDescriptor, String, Set, TypeConverter)
*/
@Nullable Object resolveDependency(DependencyDescriptor descriptor, @Nullable String requestingBeanName) throws BeansException;
/**
* Resolve the specified dependency against the beans defined in this factory.
* @param descriptor the descriptor for the dependency (field/method/constructor)
* @param requestingBeanName the name of the bean which declares the given dependency
* @param autowiredBeanNames a Set that all names of autowired beans (used for
* resolving the given dependency) are supposed to be added to
* @param typeConverter the TypeConverter to use for populating arrays and collections
* @return the resolved object, or {@code null} if none found
* @throws NoSuchBeanDefinitionException if no matching bean was found
* @throws NoUniqueBeanDefinitionException if more than one matching bean was found
* @throws BeansException if dependency resolution failed for any other reason
* @since 2.5
* @see DependencyDescriptor
*/
@Nullable Object resolveDependency(DependencyDescriptor descriptor, @Nullable String requestingBeanName,
@Nullable Set<String> autowiredBeanNames, @Nullable TypeConverter typeConverter) throws BeansException;
}
| or |
java | apache__maven | impl/maven-core/src/main/java/org/apache/maven/eventspy/EventSpy.java | {
"start": 1139,
"end": 1368
} | interface ____ calls their {@link #init(Context)} method. <em>Note:</em>
* Implementors are strongly advised to inherit from {@link AbstractEventSpy} instead of directly implementing this
* interface.
* @since 3.0.2
*/
public | and |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/results/RankedDocsResultsTests.java | {
"start": 677,
"end": 2987
} | class ____ extends AbstractChunkedBWCSerializationTestCase<RankedDocsResults> {
@Override
protected Writeable.Reader<RankedDocsResults> instanceReader() {
return RankedDocsResults::new;
}
@Override
protected RankedDocsResults createTestInstance() {
return createRandom();
}
public static RankedDocsResults createRandom() {
return new RankedDocsResults(randomList(0, 10, RankedDocsResultsTests::createRandomDoc));
}
public static RankedDocsResults.RankedDoc createRandomDoc() {
return new RankedDocsResults.RankedDoc(randomIntBetween(0, 100), randomFloat(), randomBoolean() ? null : randomAlphaOfLength(10));
}
public void test_asMap() {
var index = randomIntBetween(0, 100);
var score = randomFloat();
var mapNullText = new RankedDocsResults.RankedDoc(index, score, null).asMap();
assertThat(mapNullText, Matchers.is(Map.of("ranked_doc", Map.of("index", index, "relevance_score", score))));
var mapWithText = new RankedDocsResults.RankedDoc(index, score, "Sample text").asMap();
assertThat(mapWithText, Matchers.is(Map.of("ranked_doc", Map.of("index", index, "relevance_score", score, "text", "Sample text"))));
}
@Override
protected RankedDocsResults mutateInstance(RankedDocsResults instance) throws IOException {
List<RankedDocsResults.RankedDoc> copy = new ArrayList<>(List.copyOf(instance.getRankedDocs()));
copy.add(createRandomDoc());
return new RankedDocsResults(copy);
}
@Override
protected RankedDocsResults mutateInstanceForVersion(RankedDocsResults instance, TransportVersion fromVersion) {
return instance;
}
@Override
protected RankedDocsResults doParseInstance(XContentParser parser) throws IOException {
return RankedDocsResults.createParser(true).apply(parser, null);
}
public record RerankExpectation(Map<String, Object> rankedDocFields) {}
public static Map<String, Object> buildExpectationRerank(List<RerankExpectation> rerank) {
return Map.of(
RankedDocsResults.RERANK,
rerank.stream().map(rerankExpectation -> Map.of(RankedDocsResults.RankedDoc.NAME, rerankExpectation.rankedDocFields)).toList()
);
}
}
| RankedDocsResultsTests |
java | apache__camel | components/camel-vertx/camel-vertx-http/src/test/java/org/apache/camel/component/vertx/http/VertxHttpProducerTest.java | {
"start": 1273,
"end": 4144
} | class ____ extends VertxHttpTestSupport {
@Test
public void testVertxHttpProducer() {
String expectedBody = "Hello World";
Exchange exchange = template.request(getProducerUri(), null);
Message message = exchange.getMessage();
Map<String, Object> headers = message.getHeaders();
assertTrue(headers.containsKey("Connection"));
assertTrue(headers.containsKey("Content-Length"));
assertTrue(headers.containsKey("user-agent"));
assertEquals(String.valueOf(expectedBody.length()), headers.get(Exchange.CONTENT_LENGTH));
assertEquals(200, headers.get(Exchange.HTTP_RESPONSE_CODE));
assertEquals("OK", headers.get(Exchange.HTTP_RESPONSE_TEXT));
assertEquals(expectedBody, message.getBody(String.class));
}
@Test
public void testVertxHttpProducerWithContentType() {
String expectedBody = "{\"foo\": \"bar\"}";
Exchange exchange = template.request(getProducerUri() + "/content/type", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getMessage().setHeader(Exchange.CONTENT_TYPE, "application/json; charset=iso-8859-4");
}
});
Message message = exchange.getMessage();
Map<String, Object> headers = message.getHeaders();
assertTrue(headers.containsKey("Connection"));
assertTrue(headers.containsKey("Content-Length"));
assertTrue(headers.containsKey("user-agent"));
assertEquals(String.valueOf(expectedBody.length()), headers.get(Exchange.CONTENT_LENGTH));
assertEquals(200, headers.get(Exchange.HTTP_RESPONSE_CODE));
assertEquals("OK", headers.get(Exchange.HTTP_RESPONSE_TEXT));
assertEquals(expectedBody, message.getBody(String.class));
}
@Override
protected RoutesBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from(getTestServerUri())
.setBody(constant("Hello World"));
from(getTestServerUri() + "/content/type")
.process(new Processor() {
@Override
public void process(Exchange exchange) {
String contentType = ExchangeHelper.getContentType(exchange);
if (!contentType.startsWith("application/json")) {
throw new IllegalStateException("Unexpected Content-Type header");
}
Message message = exchange.getMessage();
message.setBody("{\"foo\": \"bar\"}");
}
});
}
};
}
}
| VertxHttpProducerTest |
java | spring-projects__spring-boot | buildpack/spring-boot-buildpack-platform/src/main/java/org/springframework/boot/buildpack/platform/build/PrintStreamBuildLog.java | {
"start": 1054,
"end": 1440
} | class ____ extends AbstractBuildLog {
private final PrintStream out;
PrintStreamBuildLog(PrintStream out) {
this.out = out;
}
@Override
protected void log(String message) {
this.out.println(message);
}
@Override
protected Consumer<TotalProgressEvent> getProgressConsumer(String prefix) {
return new TotalProgressBar(prefix, '.', false, this.out);
}
}
| PrintStreamBuildLog |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/updatemethods/ErroneousOrganizationMapper2.java | {
"start": 1136,
"end": 1306
} | class ____ extends org.mapstruct.ap.test.updatemethods.DepartmentEntity {
private DepartmentEntity() {
super( null );
}
}
}
| DepartmentEntity |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/junit4/rules/FailingBeforeAndAfterMethodsSpringRuleTests.java | {
"start": 1423,
"end": 1676
} | class ____ an extension of {@link FailingBeforeAndAfterMethodsSpringRunnerTests}
* that has been modified to use {@link SpringClassRule} and
* {@link SpringMethodRule}.
*
* @author Sam Brannen
* @since 4.2
*/
@SuppressWarnings("deprecation")
public | is |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/TestInstanceFactoryTests.java | {
"start": 23052,
"end": 23209
} | class ____ {
@Test
void outerTest() {
}
@Nested
@ExtendWith(BarInstanceFactory.class)
| MultipleFactoriesRegisteredWithinNestedClassStructureTestCase |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/KeyProvider.java | {
"start": 1067,
"end": 1716
} | interface ____ {
/**
* Key providers must implement this method. Given a list of configuration
* parameters for the specified Azure storage account, retrieve the plaintext
* storage account key.
*
* @param accountName
* the storage account name
* @param conf
* Hadoop configuration parameters
* @return the plaintext storage account key
* @throws KeyProviderException Thrown if there is a problem instantiating a
* KeyProvider or retrieving a key using a KeyProvider object.
*/
String getStorageAccountKey(String accountName, Configuration conf)
throws KeyProviderException;
}
| KeyProvider |
java | apache__rocketmq | store/src/main/java/org/apache/rocketmq/store/DefaultMessageFilter.java | {
"start": 969,
"end": 1801
} | class ____ implements MessageFilter {
private SubscriptionData subscriptionData;
public DefaultMessageFilter(final SubscriptionData subscriptionData) {
this.subscriptionData = subscriptionData;
}
@Override
public boolean isMatchedByConsumeQueue(Long tagsCode, ConsumeQueueExt.CqExtUnit cqExtUnit) {
if (null == tagsCode || null == subscriptionData) {
return true;
}
if (subscriptionData.isClassFilterMode()) {
return true;
}
return subscriptionData.getSubString().equals(SubscriptionData.SUB_ALL)
|| subscriptionData.getCodeSet().contains(tagsCode.intValue());
}
@Override
public boolean isMatchedByCommitLog(ByteBuffer msgBuffer, Map<String, String> properties) {
return true;
}
}
| DefaultMessageFilter |
java | apache__spark | sql/core/src/test/java/test/org/apache/spark/sql/JavaBeanDeserializationSuite.java | {
"start": 9744,
"end": 10716
} | class ____ {
private int id;
private Map<String, Interval> intervals;
public MapRecord() { }
MapRecord(int id, Map<String, Interval> intervals) {
this.id = id;
this.intervals = intervals;
}
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public Map<String, Interval> getIntervals() {
return intervals;
}
public void setIntervals(Map<String, Interval> intervals) {
this.intervals = intervals;
}
@Override
public int hashCode() {
return id ^ Objects.hashCode(intervals);
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof MapRecord other)) return false;
return (other.id == this.id) && Objects.equals(other.intervals, this.intervals);
}
@Override
public String toString() {
return String.format("{ id: %d, intervals: %s }", id, intervals);
}
}
public static | MapRecord |
java | apache__spark | common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/protocol/StreamHandle.java | {
"start": 1236,
"end": 2333
} | class ____ extends BlockTransferMessage {
public final long streamId;
public final int numChunks;
public StreamHandle(long streamId, int numChunks) {
this.streamId = streamId;
this.numChunks = numChunks;
}
@Override
protected Type type() { return Type.STREAM_HANDLE; }
@Override
public int hashCode() {
return Objects.hash(streamId, numChunks);
}
@Override
public String toString() {
return "StreamHandle[streamId=" + streamId + ",numChunks=" + numChunks + "]";
}
@Override
public boolean equals(Object other) {
if (other instanceof StreamHandle o) {
return Objects.equals(streamId, o.streamId)
&& Objects.equals(numChunks, o.numChunks);
}
return false;
}
@Override
public int encodedLength() {
return 8 + 4;
}
@Override
public void encode(ByteBuf buf) {
buf.writeLong(streamId);
buf.writeInt(numChunks);
}
public static StreamHandle decode(ByteBuf buf) {
long streamId = buf.readLong();
int numChunks = buf.readInt();
return new StreamHandle(streamId, numChunks);
}
}
| StreamHandle |
java | elastic__elasticsearch | x-pack/plugin/transform/qa/multi-cluster-tests-with-security/src/test/java/org/elasticsearch/multi_cluster/MultiClusterYamlTestSuiteIT.java | {
"start": 934,
"end": 1856
} | class ____ extends ESClientYamlSuiteTestCase {
private static final String USER = "test_user";
private static final String PASS = "x-pack-test-password";
@Override
protected boolean resetFeatureStates() {
return false;
}
@Override
protected boolean preserveIndicesUponCompletion() {
return true;
}
public MultiClusterYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
super(testCandidate);
}
@ParametersFactory
public static Iterable<Object[]> parameters() throws Exception {
return createParameters();
}
@Override
protected Settings restClientSettings() {
String token = basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray()));
return Settings.builder().put(super.restClientSettings()).put(ThreadContext.PREFIX + ".Authorization", token).build();
}
}
| MultiClusterYamlTestSuiteIT |
java | apache__kafka | storage/src/test/java/org/apache/kafka/tiered/storage/integration/AlterLogDirTest.java | {
"start": 1197,
"end": 3178
} | class ____ extends TieredStorageTestHarness {
@Override
public int brokerCount() {
return 2;
}
@Override
protected void writeTestSpecifications(TieredStorageTestBuilder builder) {
final String topicB = "topicB";
final int p0 = 0;
final int partitionCount = 1;
final int replicationFactor = 2;
final int maxBatchCountPerSegment = 1;
final boolean enableRemoteLogStorage = true;
final int broker0 = 0;
final int broker1 = 1;
builder
// create topicB with 1 partition and 1 RF
.createTopic(topicB, partitionCount, replicationFactor, maxBatchCountPerSegment,
mkMap(mkEntry(p0, List.of(broker1, broker0))), enableRemoteLogStorage)
// send records to partition 0
.expectSegmentToBeOffloaded(broker1, topicB, p0, 0, new KeyValueSpec("k0", "v0"))
.expectSegmentToBeOffloaded(broker1, topicB, p0, 1, new KeyValueSpec("k1", "v1"))
.expectEarliestLocalOffsetInLogDirectory(topicB, p0, 2L)
.produce(topicB, p0, new KeyValueSpec("k0", "v0"), new KeyValueSpec("k1", "v1"),
new KeyValueSpec("k2", "v2"))
// alter dir within the replica, we only expect one replicaId
.alterLogDir(topicB, p0, broker0)
// make sure the altered replica can still be elected as the leader
.expectLeader(topicB, p0, broker0, true)
// produce some more events and verify the earliest local offset
.expectEarliestLocalOffsetInLogDirectory(topicB, p0, 3L)
.produce(topicB, p0, new KeyValueSpec("k3", "v3"))
// consume from the beginning of the topic to read data from local and remote storage
.expectFetchFromTieredStorage(broker0, topicB, p0, 3)
.consume(topicB, p0, 0L, 4, 3);
}
}
| AlterLogDirTest |
java | micronaut-projects__micronaut-core | core/src/main/java/io/micronaut/core/reflect/ReflectionUtils.java | {
"start": 3265,
"end": 3464
} | class ____ hierarchy is to be retrieved. Must not be null.
* @return A set of classes and interfaces representing the entire hierarchy of the given class,
* including the provided | whose |
java | spring-projects__spring-boot | loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/launch/LaunchedClassLoader.java | {
"start": 2223,
"end": 5903
} | class ____ for delegation
*/
public LaunchedClassLoader(boolean exploded, Archive rootArchive, URL[] urls, ClassLoader parent) {
super(urls, parent);
this.exploded = exploded;
this.rootArchive = rootArchive;
}
@Override
protected Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
if (name.startsWith(JAR_MODE_PACKAGE_PREFIX) || name.equals(JAR_MODE_RUNNER_CLASS_NAME)) {
try {
Class<?> result = loadClassInLaunchedClassLoader(name);
if (resolve) {
resolveClass(result);
}
return result;
}
catch (ClassNotFoundException ex) {
// Ignore
}
}
return super.loadClass(name, resolve);
}
private Class<?> loadClassInLaunchedClassLoader(String name) throws ClassNotFoundException {
try {
String internalName = name.replace('.', '/') + ".class";
try (InputStream inputStream = getParent().getResourceAsStream(internalName);
ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) {
if (inputStream == null) {
throw new ClassNotFoundException(name);
}
inputStream.transferTo(outputStream);
byte[] bytes = outputStream.toByteArray();
Class<?> definedClass = defineClass(name, bytes, 0, bytes.length);
definePackageIfNecessary(name);
return definedClass;
}
}
catch (IOException ex) {
throw new ClassNotFoundException("Cannot load resource for class [" + name + "]", ex);
}
}
@Override
protected Package definePackage(String name, Manifest man, URL url) throws IllegalArgumentException {
return (!this.exploded) ? super.definePackage(name, man, url) : definePackageForExploded(name, man, url);
}
private Package definePackageForExploded(String name, Manifest man, URL url) {
synchronized (this.definePackageLock) {
return definePackage(DefinePackageCallType.MANIFEST, () -> super.definePackage(name, man, url));
}
}
@Override
protected Package definePackage(String name, String specTitle, String specVersion, String specVendor,
String implTitle, String implVersion, String implVendor, URL sealBase) throws IllegalArgumentException {
if (!this.exploded) {
return super.definePackage(name, specTitle, specVersion, specVendor, implTitle, implVersion, implVendor,
sealBase);
}
return definePackageForExploded(name, sealBase, () -> super.definePackage(name, specTitle, specVersion,
specVendor, implTitle, implVersion, implVendor, sealBase));
}
private Package definePackageForExploded(String name, URL sealBase, Supplier<Package> call) {
synchronized (this.definePackageLock) {
if (this.definePackageCallType == null) {
// We're not part of a call chain which means that the URLClassLoader
// is trying to define a package for our exploded JAR. We use the
// manifest version to ensure package attributes are set
Manifest manifest = getManifest(this.rootArchive);
if (manifest != null) {
return definePackage(name, manifest, sealBase);
}
}
return definePackage(DefinePackageCallType.ATTRIBUTES, call);
}
}
private <T> T definePackage(DefinePackageCallType type, Supplier<T> call) {
DefinePackageCallType existingType = this.definePackageCallType;
try {
this.definePackageCallType = type;
return call.get();
}
finally {
this.definePackageCallType = existingType;
}
}
private Manifest getManifest(Archive archive) {
try {
return (archive != null) ? archive.getManifest() : null;
}
catch (IOException ex) {
return null;
}
}
/**
* The different types of call made to define a package. We track these for exploded
* jars so that we can detect packages that should have manifest attributes applied.
*/
private | loader |
java | alibaba__nacos | plugin/auth/src/main/java/com/alibaba/nacos/plugin/auth/api/IdentityContext.java | {
"start": 767,
"end": 1304
} | class ____ {
/**
* get context from request.
*/
private final Map<String, Object> param = new HashMap<>();
/**
* get key from context.
*
* @param key key of request
* @return value of param key
*/
public Object getParameter(String key) {
return param.get(key);
}
/**
* Get identity by key.
*
* @param key identity name
* @param defaultValue default value when the value is {@code null} or the value is not expected | IdentityContext |
java | apache__avro | lang/java/ipc/src/test/java/org/apache/avro/io/Perf.java | {
"start": 49553,
"end": 49802
} | class ____ extends ReflectTest<Rec> {
ReflectRecordTest() throws IOException {
super("ReflectRecord", new Rec(), 12);
}
@Override
protected Rec createDatum(Random r) {
return new Rec(r);
}
}
static | ReflectRecordTest |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ser/jdk/AtomicTypeSerializationTest.java | {
"start": 2739,
"end": 6078
} | class ____ {
@JsonUnwrapped
public AtomicReference<String> maybeText = new AtomicReference<>("value");
}
/*
/**********************************************************
/* Test methods
/**********************************************************
*/
private final ObjectMapper MAPPER = newJsonMapper();
@Test
public void testAtomicBoolean() throws Exception
{
assertEquals("true", MAPPER.writeValueAsString(new AtomicBoolean(true)));
assertEquals("false", MAPPER.writeValueAsString(new AtomicBoolean(false)));
}
@Test
public void testAtomicInteger() throws Exception
{
assertEquals("1", MAPPER.writeValueAsString(new AtomicInteger(1)));
assertEquals("-9", MAPPER.writeValueAsString(new AtomicInteger(-9)));
}
@Test
public void testAtomicLong() throws Exception
{
assertEquals("0", MAPPER.writeValueAsString(new AtomicLong(0)));
}
@Test
public void testAtomicReference() throws Exception
{
String[] strs = new String[] { "abc" };
assertEquals("[\"abc\"]", MAPPER.writeValueAsString(new AtomicReference<String[]>(strs)));
}
@Test
public void testCustomSerializer() throws Exception
{
final String VALUE = "fooBAR";
String json = MAPPER.writeValueAsString(new UCStringWrapper(VALUE));
assertEquals(json, a2q("{'value':'FOOBAR'}"));
}
@Test
public void testContextualAtomicReference() throws Exception
{
SimpleDateFormat df = new SimpleDateFormat("yyyy/MM/dd");
df.setTimeZone(TimeZone.getTimeZone("UTC"));
ObjectMapper mapper = jsonMapperBuilder()
.disable(JsonWriteFeature.ESCAPE_FORWARD_SLASHES)
.defaultDateFormat(df)
.build();
ContextualOptionals input = new ContextualOptionals();
input.date = new AtomicReference<>(new Date(0L));
input.date1 = new AtomicReference<>(new Date(0L));
input.date2 = new AtomicReference<>(new Date(0L));
final String json = mapper.writeValueAsString(input);
assertEquals(a2q(
"{'date1':'1970+01+01','date2':'1970*01*01','date':'1970/01/01'}"),
json);
}
// [databind#1673]
@Test
public void testPolymorphicReferenceSimple() throws Exception
{
final String EXPECTED = "{\"type\":\"Foo\",\"foo\":42}";
String json = MAPPER.writeValueAsString(new ContainerA());
assertEquals("{\"strategy\":" + EXPECTED + "}", json);
}
// [databind#1673]
@Test
public void testPolymorphicReferenceListOf() throws Exception
{
final String EXPECTED = "{\"type\":\"Foo\",\"foo\":42}";
// Reproduction of issue seen with scala.Option and java8 Optional types:
// https://github.com/FasterXML/jackson-module-scala/issues/346#issuecomment-336483326
String json = MAPPER.writeValueAsString(new ContainerB());
assertEquals("{\"strategy\":[" + EXPECTED + "]}", json);
}
// [databind#2565]: problems with JsonUnwrapped, non-unwrappable type
@Test
public void testWithUnwrappableUnwrapped() throws Exception
{
assertEquals(a2q("{'maybeText':'value'}"),
MAPPER.writeValueAsString(new MyBean2565()));
}
}
| MyBean2565 |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistance.java | {
"start": 3058,
"end": 3623
} | class ____ extends DistanceCalculator {
protected GeoDistanceCalculator() {
super(SpatialCoordinateTypes.GEO, CoordinateEncoder.GEO);
}
@Override
protected double distance(Point left, Point right) {
return SloppyMath.haversinMeters(
GeoUtils.quantizeLat(left.getY()),
GeoUtils.quantizeLon(left.getX()),
GeoUtils.quantizeLat(right.getY()),
GeoUtils.quantizeLon(right.getX())
);
}
}
protected static | GeoDistanceCalculator |
java | hibernate__hibernate-orm | hibernate-testing/src/main/java/org/hibernate/testing/orm/junit/DialectFeatureChecks.java | {
"start": 24057,
"end": 24246
} | class ____ implements DialectFeatureCheck {
public boolean apply(Dialect dialect) {
return !( dialect instanceof InformixDialect );
}
}
public static | SupportsExtractDayOfWeekYearMonth |
java | apache__camel | components/camel-microprofile/camel-microprofile-fault-tolerance/src/main/java/org/apache/camel/component/microprofile/faulttolerance/FaultToleranceProcessor.java | {
"start": 14606,
"end": 18020
} | class ____ implements PooledExchangeTask, Callable<Exchange> {
private Exchange exchange;
@Override
public void prepare(Exchange exchange, AsyncCallback callback) {
this.exchange = exchange;
// callback not in use
}
@Override
public void reset() {
this.exchange = null;
}
@Override
public void run() {
// not in use
}
@Override
public Exchange call() throws Exception {
Exchange copy = null;
UnitOfWork uow = null;
Throwable cause;
// turn of interruption to allow fault tolerance to process the exchange under its handling
exchange.getExchangeExtension().setInterruptable(false);
try {
LOG.debug("Running processor: {} with exchange: {}", processor, exchange);
// prepare a copy of exchange so downstream processors don't
// cause side-effects if they mutate the exchange
// in case timeout processing and continue with the fallback etc
copy = processorExchangeFactory.createCorrelatedCopy(exchange, false);
if (copy.getUnitOfWork() != null) {
uow = copy.getUnitOfWork();
} else {
// prepare uow on copy
uow = PluginHelper.getUnitOfWorkFactory(copy.getContext()).createUnitOfWork(copy);
copy.getExchangeExtension().setUnitOfWork(uow);
// the copy must be starting from the route where its copied from
Route route = ExchangeHelper.getRoute(exchange);
if (route != null) {
uow.pushRoute(route);
}
}
// process the processor until its fully done
processor.process(copy);
// handle the processing result
if (copy.getException() != null) {
exchange.setException(copy.getException());
} else {
// copy the result as it's regarded as success
ExchangeHelper.copyResults(exchange, copy);
exchange.setProperty(ExchangePropertyKey.CIRCUIT_BREAKER_RESPONSE_SUCCESSFUL_EXECUTION, true);
exchange.setProperty(ExchangePropertyKey.CIRCUIT_BREAKER_RESPONSE_FROM_FALLBACK, false);
String state = getCircuitBreakerState();
if (state != null) {
exchange.setProperty(ExchangePropertyKey.CIRCUIT_BREAKER_RESPONSE_STATE, state);
}
}
} catch (Exception e) {
exchange.setException(e);
} finally {
// must done uow
UnitOfWorkHelper.doneUow(uow, copy);
// remember any thrown exception
cause = exchange.getException();
}
// and release exchange back in pool
processorExchangeFactory.release(exchange);
if (cause != null) {
// throw exception so fault tolerance knows it was a failure
throw RuntimeExchangeException.wrapRuntimeException(cause);
}
return exchange;
}
}
private final | CircuitBreakerTask |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromBooleanEvaluator.java | {
"start": 3960,
"end": 4569
} | class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory bool;
public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory bool) {
this.source = source;
this.bool = bool;
}
@Override
public ToStringFromBooleanEvaluator get(DriverContext context) {
return new ToStringFromBooleanEvaluator(source, bool.get(context), context);
}
@Override
public String toString() {
return "ToStringFromBooleanEvaluator[" + "bool=" + bool + "]";
}
}
}
| Factory |
java | quarkusio__quarkus | extensions/oidc/runtime/src/main/java/io/quarkus/oidc/runtime/OidcTenantConfig.java | {
"start": 21998,
"end": 22119
} | interface ____ {
/**
* SameSite attribute values for the session cookie.
*/
| Authentication |
java | quarkusio__quarkus | extensions/arc/deployment/src/test/java/io/quarkus/arc/test/exclude/ExcludeTypesTest.java | {
"start": 2044,
"end": 2186
} | class ____ implements Pong {
public String ping() {
return "alpga";
}
}
@ApplicationScoped
static | Alpha |
java | processing__processing4 | app/src/processing/app/ui/ExportPrompt.java | {
"start": 12904,
"end": 13978
} | class ____ extends JPanel implements ActionListener {
ColorChooser chooser;
String prefName;
public ColorPreference(String pref) {
prefName = pref;
//setBorder(BorderFactory.createBevelBorder(BevelBorder.LOWERED));
setPreferredSize(new Dimension(30, 20));
setMaximumSize(new Dimension(30, 20));
addMouseListener(new MouseAdapter() {
public void mouseReleased(MouseEvent e) {
Color color = Preferences.getColor(prefName);
chooser = new ColorChooser(editor, true, color, Language.text("color_chooser.select"), ColorPreference.this);
chooser.show();
}
});
}
public void paintComponent(Graphics g) {
g.setColor(Preferences.getColor(prefName));
Dimension size = getSize();
g.fillRect(0, 0, size.width, size.height);
}
public void actionPerformed(ActionEvent e) {
Color color = chooser.getColor();
Preferences.setColor(prefName, color);
//presentColorPanel.repaint();
repaint();
chooser.hide();
}
}
} | ColorPreference |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/runtime/src/main/java/org/jboss/resteasy/reactive/server/handlers/ResponseHandler.java | {
"start": 11904,
"end": 13148
} | class ____ implements ResponseBuilderCustomizer {
private Map<String, List<String>> headers;
public AddHeadersCustomizer(Map<String, List<String>> headers) {
this.headers = headers;
}
public AddHeadersCustomizer() {
}
public Map<String, List<String>> getHeaders() {
return headers;
}
public void setHeaders(Map<String, List<String>> headers) {
this.headers = headers;
}
@Override
public void customize(Response.ResponseBuilder responseBuilder) {
for (Map.Entry<String, List<String>> header : headers.entrySet()) {
List<String> values = header.getValue();
String headerName = header.getKey();
if (values.size() == 1) {
responseBuilder.header(headerName, values.get(0));
} else {
for (int i = 0; i < values.size(); i++) {
responseBuilder.header(headerName, values.get(i));
}
}
}
}
}
}
}
| AddHeadersCustomizer |
java | netty__netty | transport-classes-kqueue/src/main/java/io/netty/channel/kqueue/AbstractKQueueServerChannel.java | {
"start": 994,
"end": 2242
} | class ____ extends AbstractKQueueChannel implements ServerChannel {
private static final ChannelMetadata METADATA = new ChannelMetadata(false, 16);
AbstractKQueueServerChannel(BsdSocket fd) {
this(fd, isSoErrorZero(fd));
}
AbstractKQueueServerChannel(BsdSocket fd, boolean active) {
super(null, fd, active);
}
@Override
public ChannelMetadata metadata() {
return METADATA;
}
@Override
protected InetSocketAddress remoteAddress0() {
return null;
}
@Override
protected AbstractKQueueUnsafe newUnsafe() {
return new KQueueServerSocketUnsafe();
}
@Override
protected void doWrite(ChannelOutboundBuffer in) throws Exception {
throw new UnsupportedOperationException();
}
@Override
protected Object filterOutboundMessage(Object msg) throws Exception {
throw new UnsupportedOperationException();
}
abstract Channel newChildChannel(int fd, byte[] remote, int offset, int len) throws Exception;
@Override
protected boolean doConnect(SocketAddress remoteAddress, SocketAddress localAddress) throws Exception {
throw new UnsupportedOperationException();
}
final | AbstractKQueueServerChannel |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/StringFieldTest_special_1.java | {
"start": 191,
"end": 1862
} | class ____ extends TestCase {
public void test_special() throws Exception {
Model model = new Model();
StringBuilder buf = new StringBuilder();
for (int i = Character.MIN_VALUE; i < Character.MAX_VALUE; ++i) {
buf.append((char) i);
}
model.name = buf.toString();
String text = JSON.toJSONString(model);
Model model2 = JSON.parseObject(text, Model.class);
Assert.assertEquals(model.name, model2.name);
}
public void test_special_browsecue() throws Exception {
Model model = new Model();
StringBuilder buf = new StringBuilder();
for (int i = Character.MIN_VALUE; i < Character.MAX_VALUE; ++i) {
buf.append((char) i);
}
model.name = buf.toString();
String text = JSON.toJSONString(model, SerializerFeature.BrowserSecure);
text = text.replaceAll("<", "<");
text = text.replaceAll(">", ">");
Model model2 = JSON.parseObject(text, Model.class);
Assert.assertEquals(model.name, model2.name);
}
public void test_special_browsecompatible() throws Exception {
Model model = new Model();
StringBuilder buf = new StringBuilder();
for (int i = Character.MIN_VALUE; i < Character.MAX_VALUE; ++i) {
buf.append((char) i);
}
model.name = buf.toString();
String text = JSON.toJSONString(model, SerializerFeature.BrowserCompatible);
Model model2 = JSON.parseObject(text, Model.class);
Assert.assertEquals(model.name, model2.name);
}
public static | StringFieldTest_special_1 |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/converter/GenericsConverterTest.java | {
"start": 3337,
"end": 3638
} | interface ____ {
@GET
@Path("/single")
String wrapper(@QueryParam("wrapper") final WrapperClass<StatusEnum> wrapper);
@GET
@Path("/list")
String wrapperList(@QueryParam("wrapperList") final List<WrapperClass<StatusEnum>> wrapperList);
}
}
| TestClient |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableFilter.java | {
"start": 1030,
"end": 1667
} | class ____<T> extends AbstractFlowableWithUpstream<T, T> {
final Predicate<? super T> predicate;
public FlowableFilter(Flowable<T> source, Predicate<? super T> predicate) {
super(source);
this.predicate = predicate;
}
@Override
protected void subscribeActual(Subscriber<? super T> s) {
if (s instanceof ConditionalSubscriber) {
source.subscribe(new FilterConditionalSubscriber<>(
(ConditionalSubscriber<? super T>) s, predicate));
} else {
source.subscribe(new FilterSubscriber<>(s, predicate));
}
}
static final | FlowableFilter |
java | elastic__elasticsearch | x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/predicate/regex/RegexProcessor.java | {
"start": 656,
"end": 2719
} | class ____ {
public static Boolean match(Object value, Pattern pattern) {
if (pattern == null) {
return Boolean.TRUE;
}
if (value == null) {
return null;
}
return pattern.matcher(value.toString()).matches();
}
public static Boolean match(Object value, String pattern) {
return match(value, pattern, Boolean.FALSE);
}
public static Boolean match(Object value, String pattern, Boolean caseInsensitive) {
if (pattern == null) {
return Boolean.TRUE;
}
if (value == null) {
return null;
}
int flags = 0;
if (Boolean.TRUE.equals(caseInsensitive)) {
flags |= Pattern.CASE_INSENSITIVE;
}
return Pattern.compile(pattern, flags).matcher(value.toString()).matches();
}
}
public static final String NAME = "rgx";
private Pattern pattern;
public RegexProcessor(String pattern) {
this.pattern = pattern != null ? Pattern.compile(pattern) : null;
}
@Override
public String getWriteableName() {
return NAME;
}
public RegexProcessor(StreamInput in) throws IOException {
this(in.readOptionalString());
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalString(pattern != null ? pattern.toString() : null);
}
@Override
public Object process(Object input) {
return RegexOperation.match(input, pattern);
}
@Override
public int hashCode() {
return Objects.hash(pattern);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
RegexProcessor other = (RegexProcessor) obj;
return Objects.equals(pattern, other.pattern);
}
}
| RegexOperation |
java | mapstruct__mapstruct | integrationtest/src/test/resources/faultyAstModifyingAnnotationProcessorTest/usage/src/main/java/org/mapstruct/itest/faultyAstModifyingProcessor/usage/Order.java | {
"start": 253,
"end": 425
} | class ____ {
private String item;
public String getItem() {
return item;
}
public void setItem(String item) {
this.item = item;
}
}
| Order |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/oauth2/client/OAuth2ClientConfigurerTests.java | {
"start": 19938,
"end": 20168
} | class ____ {
@Bean
OAuth2AuthorizationRequestResolver authorizationRequestResolver() {
return authorizationRequestResolver;
}
}
@Configuration
@EnableWebSecurity
@EnableWebMvc
static | AuthorizationRequestResolverConfig |
java | grpc__grpc-java | xds/src/main/java/io/grpc/xds/client/Bootstrapper.java | {
"start": 4692,
"end": 4799
} | class ____ the results of reading bootstrap.
*/
@AutoValue
@Internal
public abstract static | containing |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/kstream/Produced.java | {
"start": 1407,
"end": 8296
} | class ____<K, V> implements NamedOperation<Produced<K, V>> {
protected Serde<K> keySerde;
protected Serde<V> valueSerde;
protected StreamPartitioner<? super K, ? super V> partitioner;
protected String processorName;
private Produced(final Serde<K> keySerde,
final Serde<V> valueSerde,
final StreamPartitioner<? super K, ? super V> partitioner,
final String processorName) {
this.keySerde = keySerde;
this.valueSerde = valueSerde;
this.partitioner = partitioner;
this.processorName = processorName;
}
protected Produced(final Produced<K, V> produced) {
this.keySerde = produced.keySerde;
this.valueSerde = produced.valueSerde;
this.partitioner = produced.partitioner;
this.processorName = produced.processorName;
}
/**
* Create a Produced instance with provided keySerde and valueSerde.
* @param keySerde Serde to use for serializing the key
* @param valueSerde Serde to use for serializing the value
* @param <K> key type
* @param <V> value type
* @return A new {@link Produced} instance configured with keySerde and valueSerde
* @see KStream#to(String, Produced)
*/
public static <K, V> Produced<K, V> with(final Serde<K> keySerde,
final Serde<V> valueSerde) {
return new Produced<>(keySerde, valueSerde, null, null);
}
/**
* Create a Produced instance with provided keySerde, valueSerde, and partitioner.
* @param keySerde Serde to use for serializing the key
* @param valueSerde Serde to use for serializing the value
* @param partitioner the function used to determine how records are distributed among partitions of the topic,
* if not specified and {@code keySerde} provides a {@link WindowedSerializer} for the key
* {@link WindowedStreamPartitioner} will be used—otherwise {@link DefaultStreamPartitioner}
* will be used
* @param <K> key type
* @param <V> value type
* @return A new {@link Produced} instance configured with keySerde, valueSerde, and partitioner
* @see KStream#to(String, Produced)
*/
public static <K, V> Produced<K, V> with(final Serde<K> keySerde,
final Serde<V> valueSerde,
final StreamPartitioner<? super K, ? super V> partitioner) {
return new Produced<>(keySerde, valueSerde, partitioner, null);
}
/**
* Create an instance of {@link Produced} with provided processor name.
*
* @param processorName the processor name to be used. If {@code null} a default processor name will be generated
* @param <K> key type
* @param <V> value type
* @return a new instance of {@link Produced}
*/
public static <K, V> Produced<K, V> as(final String processorName) {
return new Produced<>(null, null, null, processorName);
}
/**
* Create a Produced instance with provided keySerde.
* @param keySerde Serde to use for serializing the key
* @param <K> key type
* @param <V> value type
* @return A new {@link Produced} instance configured with keySerde
* @see KStream#to(String, Produced)
*/
public static <K, V> Produced<K, V> keySerde(final Serde<K> keySerde) {
return new Produced<>(keySerde, null, null, null);
}
/**
* Create a Produced instance with provided valueSerde.
* @param valueSerde Serde to use for serializing the key
* @param <K> key type
* @param <V> value type
* @return A new {@link Produced} instance configured with valueSerde
* @see KStream#to(String, Produced)
*/
public static <K, V> Produced<K, V> valueSerde(final Serde<V> valueSerde) {
return new Produced<>(null, valueSerde, null, null);
}
/**
* Create a Produced instance with provided partitioner.
* @param partitioner the function used to determine how records are distributed among partitions of the topic,
* if not specified and the key serde provides a {@link WindowedSerializer} for the key
* {@link WindowedStreamPartitioner} will be used—otherwise {@link DefaultStreamPartitioner} will be used
* @param <K> key type
* @param <V> value type
* @return A new {@link Produced} instance configured with partitioner
* @see KStream#to(String, Produced)
*/
public static <K, V> Produced<K, V> streamPartitioner(final StreamPartitioner<? super K, ? super V> partitioner) {
return new Produced<>(null, null, partitioner, null);
}
/**
* Produce records using the provided partitioner.
* @param partitioner the function used to determine how records are distributed among partitions of the topic,
* if not specified and the key serde provides a {@link WindowedSerializer} for the key
* {@link WindowedStreamPartitioner} will be used—otherwise {@link DefaultStreamPartitioner} will be used
* @return this
*/
public Produced<K, V> withStreamPartitioner(final StreamPartitioner<? super K, ? super V> partitioner) {
this.partitioner = partitioner;
return this;
}
/**
* Produce records using the provided valueSerde.
* @param valueSerde Serde to use for serializing the value
* @return this
*/
public Produced<K, V> withValueSerde(final Serde<V> valueSerde) {
this.valueSerde = valueSerde;
return this;
}
/**
* Produce records using the provided keySerde.
* @param keySerde Serde to use for serializing the key
* @return this
*/
public Produced<K, V> withKeySerde(final Serde<K> keySerde) {
this.keySerde = keySerde;
return this;
}
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final Produced<?, ?> produced = (Produced<?, ?>) o;
return Objects.equals(keySerde, produced.keySerde) &&
Objects.equals(valueSerde, produced.valueSerde) &&
Objects.equals(partitioner, produced.partitioner);
}
@Override
public int hashCode() {
return Objects.hash(keySerde, valueSerde, partitioner);
}
@Override
public Produced<K, V> withName(final String name) {
this.processorName = name;
return this;
}
}
| Produced |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Bug_for_yuanmomo_Issue_505_1.java | {
"start": 135,
"end": 863
} | class ____ extends TestCase {
public void test_for_issue() throws Exception {
String userStr1 = "{\"id\":\"qfHdV0ez0N10\", \"ext\":{\"model\": \"10000\"} }";
User user = JSON.parseObject(userStr1, User.class);
System.out.println(user);
}
public void test_for_issue_1() throws Exception {
String text = "{\"model\":\"10002\" }";
UserExt ext = JSON.parseObject(text, UserExt.class);
}
public void test_for_issue_2() throws Exception {
String userStr2 = "{\"id\":\"qfHdV0ez0N10\", \"ext\":{\"model\":\"10000\" } }";
User user = JSON.parseObject(userStr2, User.class);
System.out.println(user);
}
public static | Bug_for_yuanmomo_Issue_505_1 |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/jsontype/impl/AsWrapperTypeSerializer.java | {
"start": 720,
"end": 1887
} | class ____ extends TypeSerializerBase
{
public AsWrapperTypeSerializer(TypeIdResolver idRes, BeanProperty property) {
super(idRes, property);
}
@Override
public AsWrapperTypeSerializer forProperty(SerializationContext ctxt, BeanProperty prop)
{
return (_property == prop) ? this : new AsWrapperTypeSerializer(_idResolver, prop);
}
@Override
public As getTypeInclusion() { return As.WRAPPER_OBJECT; }
/*
/**********************************************************************
/* Internal helper methods
/**********************************************************************
*/
/**
* Helper method used to ensure that intended type id is output as something that is valid:
* currently only used to ensure that `null` output is converted to an empty String.
*/
protected String _validTypeId(String typeId) {
return ClassUtil.nonNullString(typeId);
}
protected final void _writeTypeId(JsonGenerator g, String typeId)
throws JacksonException
{
if (typeId != null) {
g.writeTypeId(typeId);
}
}
}
| AsWrapperTypeSerializer |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletSpec.java | {
"start": 2483,
"end": 2637
} | enum ____ {
/**
* left to right
*/
ltr,
/**
* right to left
*/
rtl
};
/** %MediaDesc (case-sensitive) */
public | Dir |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthCancellationIT.java | {
"start": 2536,
"end": 7829
} | class ____ extends ESIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Arrays.asList(getTestTransportPlugin(), MockTransportService.TestPlugin.class);
}
@Override
protected Settings nodeSettings(int ordinal, Settings otherSettings) {
return Settings.builder()
.put(super.nodeSettings(ordinal, otherSettings))
.put(NetworkModule.HTTP_DEFAULT_TYPE_SETTING.getKey(), Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME)
.build();
}
@Override
protected boolean addMockHttpTransport() {
return false; // enable http
}
public void testCancellation() throws Exception {
internalCluster().startMasterOnlyNode(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "master_node").build());
internalCluster().startDataOnlyNode(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "data_node").build());
final CountDownLatch tasksBlockedLatch = new CountDownLatch(1);
final SubscribableListener<Void> fetchHealthInfoRequestReleaseListener = new SubscribableListener<>();
for (TransportService transportService : internalCluster().getInstances(TransportService.class)) {
((MockTransportService) transportService).addRequestHandlingBehavior(
FetchHealthInfoCacheAction.NAME,
(handler, request, channel, task) -> {
tasksBlockedLatch.countDown();
fetchHealthInfoRequestReleaseListener.addListener(
ActionListener.wrap(ignored -> handler.messageReceived(request, channel, task), e -> {
throw new AssertionError("unexpected", e);
})
);
}
);
}
final ClusterService clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class);
final PlainActionFuture<DiscoveryNode> findHealthNodeFuture = new PlainActionFuture<>();
// the health node might take a bit of time to be assigned by the persistent task framework so we wait until we have a health
// node in the cluster before proceeding with the test
// proceeding with the execution before the health node assignment would yield a non-deterministic behaviour as we
// wouldn't call the transport service anymore (there wouldn't be a node to fetch the health information from)
final ClusterStateListener clusterStateListener = event -> getHealthNodeIfPresent(event.state(), findHealthNodeFuture);
clusterService.addListener(clusterStateListener);
// look up the node in case the health node was assigned before we registered the listener
getHealthNodeIfPresent(clusterService.state(), findHealthNodeFuture);
DiscoveryNode healthNode = findHealthNodeFuture.get(10, TimeUnit.SECONDS);
assert healthNode != null : "the health node must be assigned";
clusterService.removeListener(clusterStateListener);
NodesInfoResponse nodesInfoResponse = clusterAdmin().prepareNodesInfo().get();
for (NodeInfo node : nodesInfoResponse.getNodes()) {
if (node.getInfo(HttpInfo.class) != null
&& Node.NODE_NAME_SETTING.get(node.getSettings()).equals(healthNode.getName()) == false) {
// we don't want the request to hit the health node as it will execute it locally (without going through our stub
// transport service)
TransportAddress publishAddress = node.getInfo(HttpInfo.class).address().publishAddress();
InetSocketAddress address = publishAddress.address();
getRestClient().setNodes(
List.of(
new org.elasticsearch.client.Node(
new HttpHost(NetworkAddress.format(address.getAddress()), address.getPort(), "http")
)
)
);
break;
}
}
final Request request = new Request(HttpGet.METHOD_NAME, "/_health_report");
final PlainActionFuture<Response> future = new PlainActionFuture<>();
final Cancellable cancellable = getRestClient().performRequestAsync(request, wrapAsRestResponseListener(future));
assertFalse(future.isDone());
safeAwait(tasksBlockedLatch); // must wait for the fetch health info request to start to avoid cancelling being handled earlier
cancellable.cancel();
assertAllCancellableTasksAreCancelled(FetchHealthInfoCacheAction.NAME);
assertAllCancellableTasksAreCancelled(GetHealthAction.NAME);
fetchHealthInfoRequestReleaseListener.onResponse(null);
expectThrows(CancellationException.class, future::actionGet);
assertAllTasksHaveFinished(FetchHealthInfoCacheAction.NAME);
assertAllTasksHaveFinished(GetHealthAction.NAME);
}
private static void getHealthNodeIfPresent(ClusterState event, ActionListener<DiscoveryNode> healthNodeReference) {
DiscoveryNode healthNode = HealthNode.findHealthNode(event);
if (healthNode != null) {
healthNodeReference.onResponse(healthNode);
}
}
}
| GetHealthCancellationIT |
java | google__guava | android/guava/src/com/google/common/hash/Murmur3_32HashFunction.java | {
"start": 7812,
"end": 12075
} | class ____ extends AbstractHasher {
private int h1;
private long buffer;
private int shift;
private int length;
private boolean isDone;
Murmur3_32Hasher(int seed) {
this.h1 = seed;
this.length = 0;
isDone = false;
}
private void update(int nBytes, long update) {
// 1 <= nBytes <= 4
buffer |= (update & 0xFFFFFFFFL) << shift;
shift += nBytes * 8;
length += nBytes;
if (shift >= 32) {
h1 = mixH1(h1, mixK1((int) buffer));
buffer >>>= 32;
shift -= 32;
}
}
@CanIgnoreReturnValue
@Override
public Hasher putByte(byte b) {
update(1, b & 0xFF);
return this;
}
@CanIgnoreReturnValue
@Override
public Hasher putBytes(byte[] bytes, int off, int len) {
checkPositionIndexes(off, off + len, bytes.length);
int i;
for (i = 0; i + 4 <= len; i += 4) {
update(4, getIntLittleEndian(bytes, off + i));
}
for (; i < len; i++) {
putByte(bytes[off + i]);
}
return this;
}
@CanIgnoreReturnValue
@Override
public Hasher putBytes(ByteBuffer buffer) {
ByteOrder bo = buffer.order();
buffer.order(ByteOrder.LITTLE_ENDIAN);
while (buffer.remaining() >= 4) {
putInt(buffer.getInt());
}
while (buffer.hasRemaining()) {
putByte(buffer.get());
}
buffer.order(bo);
return this;
}
@CanIgnoreReturnValue
@Override
public Hasher putInt(int i) {
update(4, i);
return this;
}
@CanIgnoreReturnValue
@Override
public Hasher putLong(long l) {
update(4, (int) l);
update(4, l >>> 32);
return this;
}
@CanIgnoreReturnValue
@Override
public Hasher putChar(char c) {
update(2, c);
return this;
}
@CanIgnoreReturnValue
@Override
public Hasher putString(CharSequence input, Charset charset) {
if (charset.equals(UTF_8)) {
int utf16Length = input.length();
int i = 0;
// This loop optimizes for pure ASCII.
while (i + 4 <= utf16Length) {
char c0 = input.charAt(i);
char c1 = input.charAt(i + 1);
char c2 = input.charAt(i + 2);
char c3 = input.charAt(i + 3);
if (c0 < 0x80 && c1 < 0x80 && c2 < 0x80 && c3 < 0x80) {
update(4, c0 | (c1 << 8) | (c2 << 16) | (c3 << 24));
i += 4;
} else {
break;
}
}
for (; i < utf16Length; i++) {
char c = input.charAt(i);
if (c < 0x80) {
update(1, c);
} else if (c < 0x800) {
update(2, charToTwoUtf8Bytes(c));
} else if (c < Character.MIN_SURROGATE || c > Character.MAX_SURROGATE) {
update(3, charToThreeUtf8Bytes(c));
} else {
int codePoint = Character.codePointAt(input, i);
if (codePoint == c) {
// fall back to JDK getBytes instead of trying to handle invalid surrogates ourselves
putBytes(input.subSequence(i, utf16Length).toString().getBytes(charset));
return this;
}
i++;
update(4, codePointToFourUtf8Bytes(codePoint));
}
}
return this;
} else {
return super.putString(input, charset);
}
}
@Override
public HashCode hash() {
checkState(!isDone);
isDone = true;
h1 ^= mixK1((int) buffer);
return fmix(h1, length);
}
}
private static long codePointToFourUtf8Bytes(int codePoint) {
// codePoint has at most 21 bits
return ((0xFL << 4) | (codePoint >>> 18))
| ((0x80L | (0x3F & (codePoint >>> 12))) << 8)
| ((0x80L | (0x3F & (codePoint >>> 6))) << 16)
| ((0x80L | (0x3F & codePoint)) << 24);
}
private static long charToThreeUtf8Bytes(char c) {
return ((0x7L << 5) | (c >>> 12))
| ((0x80 | (0x3F & (c >>> 6))) << 8)
| ((0x80 | (0x3F & c)) << 16);
}
private static long charToTwoUtf8Bytes(char c) {
// c has at most 11 bits
return ((0x3L << 6) | (c >>> 6)) | ((0x80 | (0x3F & c)) << 8);
}
private static final long serialVersionUID = 0L;
}
| Murmur3_32Hasher |
java | google__guice | core/src/com/google/inject/spi/ConstructorBinding.java | {
"start": 1032,
"end": 1841
} | interface ____<T> extends Binding<T>, HasDependencies {
/** Gets the constructor this binding injects. */
InjectionPoint getConstructor();
/**
* Returns all instance method and field injection points on {@code type}.
*
* @return a possibly empty set of injection points. The set has a specified iteration order. All
* fields are returned and then all methods. Within the fields, supertype fields are returned
* before subtype fields. Similarly, supertype methods are returned before subtype methods.
*/
Set<InjectionPoint> getInjectableMembers();
/**
* Returns the interceptors applied to each method, in the order that they will be applied.
*
* @return a possibly empty map
*/
Map<Method, List<MethodInterceptor>> getMethodInterceptors();
}
| ConstructorBinding |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorConfigurationTests.java | {
"start": 1475,
"end": 13959
} | class ____ extends ESTestCase {
private NamedWriteableRegistry namedWriteableRegistry;
@Before
public void registerNamedObjects() {
SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList());
List<NamedWriteableRegistry.Entry> namedWriteables = searchModule.getNamedWriteables();
namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables);
}
public final void testRandomSerialization() throws IOException {
for (int runs = 0; runs < 10; runs++) {
ConnectorConfiguration testInstance = ConnectorTestUtils.getRandomConnectorConfigurationField();
assertTransportSerialization(testInstance);
}
}
public void testToXContent() throws IOException {
String content = XContentHelper.stripWhitespace("""
{
"default_value": null,
"depends_on": [
{
"field": "some_field",
"value": true
}
],
"display": "textbox",
"label": "Very important field",
"options": [],
"order": 4,
"required": true,
"sensitive": false,
"tooltip": "Wow, this tooltip is useful.",
"type": "str",
"ui_restrictions": [],
"validations": [
{
"constraint": 0,
"type": "greater_than"
}
],
"value": ""
}
""");
ConnectorConfiguration configuration = ConnectorConfiguration.fromXContentBytes(new BytesArray(content), XContentType.JSON);
boolean humanReadable = true;
BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable);
ConnectorConfiguration parsed;
try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) {
parsed = ConnectorConfiguration.fromXContent(parser);
}
assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON);
}
public void testToXContent_WithNumericSelectOptions() throws IOException {
String content = XContentHelper.stripWhitespace("""
{
"default_value": null,
"depends_on": [
{
"field": "some_field",
"value": true
}
],
"display": "textbox",
"label": "Very important field",
"options": [
{
"label": "five",
"value": 5
},
{
"label": "ten",
"value": 10
}
],
"order": 4,
"required": true,
"sensitive": false,
"tooltip": "Wow, this tooltip is useful.",
"type": "str",
"ui_restrictions": [],
"validations": [
{
"constraint": 0,
"type": "greater_than"
}
],
"value": ""
}
""");
ConnectorConfiguration configuration = ConnectorConfiguration.fromXContentBytes(new BytesArray(content), XContentType.JSON);
boolean humanReadable = true;
BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable);
ConnectorConfiguration parsed;
try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) {
parsed = ConnectorConfiguration.fromXContent(parser);
}
assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON);
}
public void testToXContentCrawlerConfig_WithNullValue() throws IOException {
String content = XContentHelper.stripWhitespace("""
{
"label": "nextSyncConfig",
"value": null
}
""");
ConnectorConfiguration configuration = ConnectorConfiguration.fromXContentBytes(new BytesArray(content), XContentType.JSON);
boolean humanReadable = true;
BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable);
ConnectorConfiguration parsed;
try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) {
parsed = ConnectorConfiguration.fromXContent(parser);
}
assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON);
}
public void testToXContentCrawlerConfig_WithCrawlerConfigurationOverrides() throws IOException {
String content = XContentHelper.stripWhitespace("""
{
"label": "nextSyncConfig",
"value": {
"max_crawl_depth": 3,
"sitemap_discovery_disabled": false,
"seed_urls": ["https://elastic.co/"]
}
}
""");
ConnectorConfiguration configuration = ConnectorConfiguration.fromXContentBytes(new BytesArray(content), XContentType.JSON);
boolean humanReadable = true;
BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable);
ConnectorConfiguration parsed;
try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) {
parsed = ConnectorConfiguration.fromXContent(parser);
}
assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON);
}
public void testToXContentWithMultipleConstraintTypes() throws IOException {
String content = XContentHelper.stripWhitespace("""
{
"default_value": null,
"depends_on": [
{
"field": "some_field",
"value": true
}
],
"display": "textbox",
"label": "Very important field",
"options": [],
"order": 4,
"required": true,
"sensitive": false,
"tooltip": "Wow, this tooltip is useful.",
"type": "str",
"ui_restrictions": [],
"validations": [
{
"constraint": 32,
"type": "less_than"
},
{
"constraint": "^\\\\\\\\d{4}-\\\\\\\\d{2}-\\\\\\\\d{2}$",
"type": "regex"
},
{
"constraint": "int",
"type": "list_type"
},
{
"constraint": [
1,
2,
3
],
"type": "included_in"
},
{
"constraint": [
"string_1",
"string_2",
"string_3"
],
"type": "included_in"
}
],
"value": ""
}
""");
ConnectorConfiguration configuration = ConnectorConfiguration.fromXContentBytes(new BytesArray(content), XContentType.JSON);
boolean humanReadable = true;
BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable);
ConnectorConfiguration parsed;
try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) {
parsed = ConnectorConfiguration.fromXContent(parser);
}
assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON);
}
public void testToMap() {
ConnectorConfiguration configField = ConnectorTestUtils.getRandomConnectorConfigurationField();
Map<String, Object> configFieldAsMap = configField.toMap();
if (configField.getCategory() != null) {
assertThat(configFieldAsMap.get("category"), equalTo(configField.getCategory()));
} else {
assertFalse(configFieldAsMap.containsKey("category"));
}
assertThat(configFieldAsMap.get("default_value"), equalTo(configField.getDefaultValue()));
if (configField.getDependsOn() != null) {
List<Map<String, Object>> dependsOnAsList = configField.getDependsOn().stream().map(ConfigurationDependency::toMap).toList();
assertThat(configFieldAsMap.get("depends_on"), equalTo(dependsOnAsList));
} else {
assertFalse(configFieldAsMap.containsKey("depends_on"));
}
if (configField.getDisplay() != null) {
assertThat(configFieldAsMap.get("display"), equalTo(configField.getDisplay().toString()));
} else {
assertFalse(configFieldAsMap.containsKey("display"));
}
assertThat(configFieldAsMap.get("label"), equalTo(configField.getLabel()));
if (configField.getOptions() != null) {
List<Map<String, Object>> optionsAsList = configField.getOptions().stream().map(ConfigurationSelectOption::toMap).toList();
assertThat(configFieldAsMap.get("options"), equalTo(optionsAsList));
} else {
assertFalse(configFieldAsMap.containsKey("options"));
}
if (configField.getOrder() != null) {
assertThat(configFieldAsMap.get("order"), equalTo(configField.getOrder()));
} else {
assertFalse(configFieldAsMap.containsKey("order"));
}
if (configField.getPlaceholder() != null) {
assertThat(configFieldAsMap.get("placeholder"), equalTo(configField.getPlaceholder()));
} else {
assertFalse(configFieldAsMap.containsKey("placeholder"));
}
assertThat(configFieldAsMap.get("required"), equalTo(configField.isRequired()));
assertThat(configFieldAsMap.get("sensitive"), equalTo(configField.isSensitive()));
if (configField.getTooltip() != null) {
assertThat(configFieldAsMap.get("tooltip"), equalTo(configField.getTooltip()));
} else {
assertFalse(configFieldAsMap.containsKey("tooltip"));
}
if (configField.getType() != null) {
assertThat(configFieldAsMap.get("type"), equalTo(configField.getType().toString()));
} else {
assertFalse(configFieldAsMap.containsKey("type"));
}
if (configField.getUiRestrictions() != null) {
assertThat(configFieldAsMap.get("ui_restrictions"), equalTo(configField.getUiRestrictions()));
} else {
assertFalse(configFieldAsMap.containsKey("ui_restrictions"));
}
if (configField.getValidations() != null) {
List<Map<String, Object>> validationsAsList = configField.getValidations()
.stream()
.map(ConfigurationValidation::toMap)
.toList();
assertThat(configFieldAsMap.get("validations"), equalTo(validationsAsList));
} else {
assertFalse(configFieldAsMap.containsKey("validations"));
}
assertThat(configFieldAsMap.get("value"), equalTo(configField.getValue()));
}
private void assertTransportSerialization(ConnectorConfiguration testInstance) throws IOException {
ConnectorConfiguration deserializedInstance = copyInstance(testInstance);
assertNotSame(testInstance, deserializedInstance);
assertThat(testInstance, equalTo(deserializedInstance));
}
private ConnectorConfiguration copyInstance(ConnectorConfiguration instance) throws IOException {
return copyWriteable(instance, namedWriteableRegistry, ConnectorConfiguration::new);
}
}
| ConnectorConfigurationTests |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/load/engine/DecodeJob.java | {
"start": 26616,
"end": 26732
} | interface ____ {
DiskCache getDiskCache();
}
/** Why we're being executed again. */
private | DiskCacheProvider |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java | {
"start": 1445,
"end": 6150
} | class ____ {
private final static Logger LOG =
LoggerFactory.getLogger(JobSubmissionFiles.class);
// job submission directory is private!
final public static FsPermission JOB_DIR_PERMISSION =
FsPermission.createImmutable((short) 0700); // rwx------
//job files are world-wide readable and owner writable
final public static FsPermission JOB_FILE_PERMISSION =
FsPermission.createImmutable((short) 0644); // rw-r--r--
public static Path getJobSplitFile(Path jobSubmissionDir) {
return new Path(jobSubmissionDir, "job.split");
}
public static Path getJobSplitMetaFile(Path jobSubmissionDir) {
return new Path(jobSubmissionDir, "job.splitmetainfo");
}
/**
* Get the job conf path.
*/
public static Path getJobConfPath(Path jobSubmitDir) {
return new Path(jobSubmitDir, "job.xml");
}
/**
* Get the job jar path.
*/
public static Path getJobJar(Path jobSubmitDir) {
return new Path(jobSubmitDir, "job.jar");
}
/**
* Get the job distributed cache files path.
* @param jobSubmitDir
*/
public static Path getJobDistCacheFiles(Path jobSubmitDir) {
return new Path(jobSubmitDir, "files");
}
/**
* Get the job distributed cache path for log4j properties.
* @param jobSubmitDir
*/
public static Path getJobLog4jFile(Path jobSubmitDir) {
return new Path(jobSubmitDir, "log4j");
}
/**
* Get the job distributed cache archives path.
* @param jobSubmitDir
*/
public static Path getJobDistCacheArchives(Path jobSubmitDir) {
return new Path(jobSubmitDir, "archives");
}
/**
* Get the job distributed cache libjars path.
* @param jobSubmitDir
*/
public static Path getJobDistCacheLibjars(Path jobSubmitDir) {
return new Path(jobSubmitDir, "libjars");
}
/**
* Initializes the staging directory and returns the path. It also
* keeps track of all necessary ownership and permissions
* @param cluster
* @param conf
*/
public static Path getStagingDir(Cluster cluster, Configuration conf)
throws IOException, InterruptedException {
UserGroupInformation user = UserGroupInformation.getLoginUser();
return getStagingDir(cluster, conf, user);
}
/**
* Initializes the staging directory and returns the path. It also
* keeps track of all necessary ownership and permissions.
* It is kept for unit testing.
*
* @param cluster Information about the map/reduce cluster
* @param conf Configuration object
* @param realUser UserGroupInformation of login user
* @return staging dir path object
* @throws IOException when ownership of staging area directory does
* not match the login user or current user.
* @throws InterruptedException when getting the staging area directory path
*/
@VisibleForTesting
public static Path getStagingDir(Cluster cluster, Configuration conf,
UserGroupInformation realUser) throws IOException, InterruptedException {
Path stagingArea = cluster.getStagingAreaDir();
FileSystem fs = stagingArea.getFileSystem(conf);
UserGroupInformation currentUser = realUser.getCurrentUser();
try {
FileStatus fsStatus = fs.getFileStatus(stagingArea);
String fileOwner = fsStatus.getOwner();
if (!(fileOwner.equals(currentUser.getShortUserName()) || fileOwner
.equalsIgnoreCase(currentUser.getUserName()) || fileOwner
.equals(realUser.getShortUserName()) || fileOwner
.equalsIgnoreCase(realUser.getUserName()))) {
String errorMessage = "The ownership on the staging directory " +
stagingArea + " is not as expected. " +
"It is owned by " + fileOwner + ". The directory must " +
"be owned by the submitter " + currentUser.getShortUserName()
+ " or " + currentUser.getUserName();
if (!realUser.getUserName().equals(currentUser.getUserName())) {
throw new IOException(
errorMessage + " or " + realUser.getShortUserName() + " or "
+ realUser.getUserName());
} else {
throw new IOException(errorMessage);
}
}
if (!fsStatus.getPermission().equals(JOB_DIR_PERMISSION)) {
LOG.info("Permissions on staging directory " + stagingArea + " are " +
"incorrect: " + fsStatus.getPermission() + ". Fixing permissions " +
"to correct value " + JOB_DIR_PERMISSION);
fs.setPermission(stagingArea, JOB_DIR_PERMISSION);
}
} catch (FileNotFoundException e) {
FileSystem.mkdirs(fs, stagingArea, new FsPermission(JOB_DIR_PERMISSION));
}
return stagingArea;
}
}
| JobSubmissionFiles |
java | spring-projects__spring-framework | spring-oxm/src/main/java/org/springframework/oxm/xstream/XStreamMarshaller.java | {
"start": 4152,
"end": 5591
} | class ____ the classpath. As such, it is <b>not recommended to use the
* {@code XStreamMarshaller} to unmarshal XML from external sources</b> (i.e. the Web),
* as this can result in <b>security vulnerabilities</b>. If you do use the
* {@code XStreamMarshaller} to unmarshal external XML, set the
* {@link #setSupportedClasses(Class[]) supportedClasses} and
* {@link #setConverters(ConverterMatcher[]) converters} properties (possibly using
* a {@link CatchAllConverter}) or override the {@link #customizeXStream(XStream)}
* method to make sure it only accepts the classes you want it to support.
*
* <p>Due to XStream's API, it is required to set the encoding used for writing to
* OutputStreams. It defaults to {@code UTF-8}.
*
* <p><b>NOTE:</b> XStream is an XML serialization library, not a data binding library.
* Therefore, it has limited namespace support. As such, it is rather unsuitable for
* usage within Web Services.
*
* <p>This marshaller requires XStream 1.4.7 or higher.
*
* <p>As of Spring Framework 6.0, the default {@link HierarchicalStreamDriver} is
* a {@link DomDriver} that uses the configured {@linkplain #setEncoding(String)
* encoding} and {@link #setNameCoder(NameCoder) NameCoder}. The driver can be
* changed via {@link #setStreamDriver(HierarchicalStreamDriver)}.
*
* @author Peter Meijer
* @author Arjen Poutsma
* @author Juergen Hoeller
* @author Sam Brannen
* @since 3.0
*/
public | on |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/event/internal/ProxyVisitor.java | {
"start": 521,
"end": 2446
} | class ____ extends AbstractVisitor {
public ProxyVisitor(EventSource session) {
super(session);
}
Object processEntity(Object value, EntityType entityType) {
if ( value != null ) {
getSession().getPersistenceContext().reassociateIfUninitializedProxy( value );
// if it is an initialized proxy, let cascade
// handle it later on
}
return null;
}
/**
* Has the owner of the collection changed since the collection
* was snapshotted and detached?
*/
protected static boolean isOwnerUnchanged(
CollectionPersister persister, Object id, PersistentCollection<?> snapshot) {
return isCollectionSnapshotValid( snapshot )
&& persister.getRole().equals( snapshot.getRole() )
&& persister.getKeyType().isEqual( id, snapshot.getKey() );
}
private static boolean isCollectionSnapshotValid(PersistentCollection<?> snapshot) {
return snapshot != null
&& snapshot.getRole() != null
&& snapshot.getKey() != null;
}
/**
* Reattach a detached (disassociated) initialized or uninitialized
* collection wrapper, using a snapshot carried with the collection
* wrapper
*/
protected void reattachCollection(PersistentCollection<?> collection, CollectionType type)
throws HibernateException {
final var session = getSession();
final var metamodel = session.getFactory().getMappingMetamodel();
final var context = session.getPersistenceContext();
if ( collection.wasInitialized() ) {
final var persister = metamodel.getCollectionDescriptor( type.getRole() );
context.addInitializedDetachedCollection( persister, collection );
}
else {
if ( !isCollectionSnapshotValid( collection ) ) {
throw new HibernateException( "could not re-associate uninitialized transient collection" );
}
final var persister = metamodel.getCollectionDescriptor( collection.getRole() );
context.addUninitializedDetachedCollection( persister, collection );
}
}
}
| ProxyVisitor |
java | apache__spark | mllib/src/test/java/org/apache/spark/ml/regression/JavaGBTRegressorSuite.java | {
"start": 1257,
"end": 2839
} | class ____ extends SharedSparkSession {
@Test
public void runDT() {
int nPoints = 20;
double A = 2.0;
double B = -1.5;
JavaRDD<LabeledPoint> data = jsc.parallelize(
LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache();
Map<Integer, Integer> categoricalFeatures = new HashMap<>();
Dataset<Row> dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 0);
GBTRegressor rf = new GBTRegressor()
.setMaxDepth(2)
.setMaxBins(10)
.setMinInstancesPerNode(5)
.setMinInfoGain(0.0)
.setMaxMemoryInMB(256)
.setCacheNodeIds(false)
.setCheckpointInterval(10)
.setSubsamplingRate(1.0)
.setSeed(1234)
.setMaxIter(3)
.setStepSize(0.1)
.setMaxDepth(2); // duplicate setMaxDepth to check builder pattern
for (String lossType : GBTRegressor.supportedLossTypes()) {
rf.setLossType(lossType);
}
GBTRegressionModel model = rf.fit(dataFrame);
model.transform(dataFrame);
model.totalNumNodes();
model.toDebugString();
model.trees();
model.treeWeights();
/*
// TODO: Add test once save/load are implemented. SPARK-6725
File tempDir = Utils.createTempDir(System.getProperty("java.io.tmpdir"), "spark");
String path = tempDir.toURI().toString();
try {
model2.save(sc.sc(), path);
GBTRegressionModel sameModel = GBTRegressionModel.load(sc.sc(), path);
TreeTests.checkEqual(model2, sameModel);
} finally {
Utils.deleteRecursively(tempDir);
}
*/
}
}
| JavaGBTRegressorSuite |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java | {
"start": 61244,
"end": 66438
} | class ____ extends AppenderSkeleton {
private final List<LoggingEvent> log = new ArrayList<>();
@Override
public boolean requiresLayout() {
return false;
}
@Override
protected void append(final LoggingEvent loggingEvent) {
log.add(loggingEvent);
}
@Override
public void close() {
}
public List<LoggingEvent> getLog() {
return new ArrayList<>(log);
}
}
/**
*
* @throws Exception
*/
@Test
public void testReadActivelyUpdatedLog() throws Exception {
final TestAppender appender = new TestAppender();
LogManager.getRootLogger().addAppender(appender);
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
// Set single handler thread, so all transactions hit same thread-local ops.
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_KEY, 1);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FSImage fsimage = cluster.getNamesystem().getFSImage();
StorageDirectory sd = fsimage.getStorage().getStorageDir(0);
final DistributedFileSystem fileSys = cluster.getFileSystem();
DFSInotifyEventInputStream events = fileSys.getInotifyEventStream();
fileSys.mkdirs(new Path("/test"));
fileSys.mkdirs(new Path("/test/dir1"));
fileSys.delete(new Path("/test/dir1"), true);
fsimage.getEditLog().logSync();
fileSys.mkdirs(new Path("/test/dir2"));
final File inProgressEdit = NNStorage.getInProgressEditsFile(sd, 1);
assertTrue(inProgressEdit.exists());
EditLogFileInputStream elis = new EditLogFileInputStream(inProgressEdit);
FSEditLogOp op;
long pos = 0;
while (true) {
op = elis.readOp();
if (op != null && op.opCode != FSEditLogOpCodes.OP_INVALID) {
pos = elis.getPosition();
} else {
break;
}
}
elis.close();
assertTrue(pos > 0);
RandomAccessFile rwf = new RandomAccessFile(inProgressEdit, "rw");
rwf.seek(pos);
assertEquals(rwf.readByte(), (byte) -1);
rwf.seek(pos + 1);
rwf.writeByte(2);
rwf.close();
events.poll();
String pattern = "Caught exception after reading (.*) ops";
Pattern r = Pattern.compile(pattern);
final List<LoggingEvent> log = appender.getLog();
for (LoggingEvent event : log) {
Matcher m = r.matcher(event.getRenderedMessage());
if (m.find()) {
fail("Should not try to read past latest syned edit log op");
}
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
LogManager.getRootLogger().removeAppender(appender);
}
}
/**
* Test edits can be writen and read without ErasureCoding supported.
*/
@Test
public void testEditLogWithoutErasureCodingSupported()
throws IOException {
Configuration conf = getConf();
MiniDFSCluster cluster = null;
// ERASURECODING not supported
int logVersion = -61;
assertFalse(NameNodeLayoutVersion.supports(
NameNodeLayoutVersion.Feature.ERASURE_CODING, logVersion));
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final FSNamesystem namesystem = cluster.getNamesystem();
FSImage fsimage = namesystem.getFSImage();
FileSystem fileSys = cluster.getFileSystem();
final FSEditLog editLog = fsimage.getEditLog();
editLog.editLogStream.setCurrentLogVersion(logVersion);
// Write new version edit log
long txid = editLog.rollEditLog(logVersion);
String testDir = "/test";
String testFile = "testfile_001";
String testFilePath = testDir + "/" + testFile;
fileSys.mkdirs(new Path(testDir), new FsPermission("755"));
// Create a file
Path p = new Path(testFilePath);
DFSTestUtil.createFile(fileSys, p, 0, (short) 1, 1);
long blkId = 1;
long blkNumBytes = 1024;
long timestamp = 1426222918;
// Add a block to the file
BlockInfoContiguous blockInfo =
new BlockInfoContiguous(
new Block(blkId, blkNumBytes, timestamp),
(short)1);
INodeFile file
= (INodeFile)namesystem.getFSDirectory().getINode(testFilePath);
file.addBlock(blockInfo);
file.toUnderConstruction("testClient", "testMachine");
// Write edit log
editLog.logAddBlock(testFilePath, file);
editLog.rollEditLog(logVersion);
// Read edit log
Collection<EditLogInputStream> editStreams
= editLog.selectInputStreams(txid, txid + 1);
EditLogInputStream inputStream = null;
for (EditLogInputStream s : editStreams) {
if (s.getFirstTxId() == txid) {
inputStream = s;
break;
}
}
assertNotNull(inputStream);
int readLogVersion = inputStream.getVersion(false);
assertEquals(logVersion, readLogVersion);
FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0);
long records = loader.loadFSEdits(inputStream, txid);
assertTrue(records > 0);
editLog.close();
cluster.shutdown();
}
}
| TestAppender |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/pool/DruidDataSourceTest_autocommit2.java | {
"start": 437,
"end": 1414
} | class ____ extends TestCase {
private DruidDataSource dataSource;
protected void setUp() throws Exception {
dataSource = new DruidDataSource();
dataSource.setUrl("jdbc:mock:xxx");
dataSource.setTestOnBorrow(false);
dataSource.setDefaultAutoCommit(false);
dataSource.setInitialSize(1);
dataSource.getProxyFilters().add(new FilterAdapter() {
public ConnectionProxy connection_connect(FilterChain chain, Properties info) throws SQLException {
ConnectionProxy conn = chain.connection_connect(info);
conn.setAutoCommit(true);
return conn;
}
});
}
protected void tearDown() throws Exception {
dataSource.close();
}
public void test_autoCommit() throws Exception {
Connection conn = dataSource.getConnection();
assertFalse(conn.getAutoCommit());
conn.close();
}
}
| DruidDataSourceTest_autocommit2 |
java | spring-projects__spring-boot | module/spring-boot-micrometer-observation/src/test/java/org/springframework/boot/micrometer/observation/autoconfigure/ObservationAutoConfigurationTests.java | {
"start": 2267,
"end": 8622
} | class ____ {
private final ApplicationContextRunner contextRunner = new ApplicationContextRunner()
.withPropertyValues("management.observations.annotations.enabled=true")
.withConfiguration(AutoConfigurations.of(ObservationAutoConfiguration.class))
.withUserConfiguration(ObservationHandlers.class);
@Test
void beansShouldNotBeSuppliedWhenMicrometerObservationIsNotOnClassPath() {
this.contextRunner.withClassLoader(new FilteredClassLoader("io.micrometer.observation")).run((context) -> {
assertThat(context).doesNotHaveBean(ObservationRegistry.class);
assertThat(context).doesNotHaveBean(ObservedAspect.class);
assertThat(context).doesNotHaveBean(ObservationKeyValueAnnotationHandler.class);
});
}
@Test
void supplyObservationRegistryAndAspect() {
this.contextRunner.run((context) -> {
ObservationRegistry observationRegistry = context.getBean(ObservationRegistry.class);
Observation.start("test-observation", observationRegistry).stop();
assertThat(context).hasSingleBean(ObservedAspect.class);
assertThat(context).hasSingleBean(ObservationKeyValueAnnotationHandler.class);
assertThat(context.getBean(ObservedAspect.class)).extracting("observationKeyValueAnnotationHandler")
.isSameAs(context.getBean(ObservationKeyValueAnnotationHandler.class));
});
}
@Test
void allowsObservedAspectToBeDisabled() {
this.contextRunner.withClassLoader(new FilteredClassLoader(Advice.class)).run((context) -> {
assertThat(context).doesNotHaveBean(ObservedAspect.class);
assertThat(context).doesNotHaveBean(ObservationKeyValueAnnotationHandler.class);
});
}
@Test
void allowsObservedAspectToBeDisabledWithProperty() {
this.contextRunner.withPropertyValues("management.observations.annotations.enabled=false").run((context) -> {
assertThat(context).doesNotHaveBean(ObservedAspect.class);
assertThat(context).doesNotHaveBean(ObservationKeyValueAnnotationHandler.class);
});
}
@Test
void allowsObservedAspectToBeCustomized() {
this.contextRunner.withUserConfiguration(CustomObservedAspectConfiguration.class).run((context) -> {
assertThat(context).hasSingleBean(ObservedAspect.class)
.getBean(ObservedAspect.class)
.isSameAs(context.getBean("customObservedAspect"));
assertThat(context).hasSingleBean(ObservationKeyValueAnnotationHandler.class)
.getBean(ObservationKeyValueAnnotationHandler.class)
.isSameAs(context.getBean("customObservationKeyValueAnnotationHandler"));
});
}
@Test
void autoConfiguresObservationPredicates() {
this.contextRunner.withUserConfiguration(ObservationPredicates.class).run((context) -> {
ObservationRegistry observationRegistry = context.getBean(ObservationRegistry.class);
// This is allowed by ObservationPredicates.customPredicate
Observation observation = Observation.start("observation1", observationRegistry);
assertThat(observation.isNoop()).isFalse();
observation.stop();
// This isn't allowed by ObservationPredicates.customPredicate
observation = Observation.start("observation2", observationRegistry);
assertThat(observation.isNoop()).isTrue();
observation.stop();
});
}
@Test
void autoConfiguresObservationFilters() {
this.contextRunner.withUserConfiguration(ObservationFilters.class).run((context) -> {
ObservationRegistry observationRegistry = context.getBean(ObservationRegistry.class);
Observation observation = Observation.start("filtered", observationRegistry);
observation.stop();
observation.getContext().getLowCardinalityKeyValues().forEach((kv) -> System.out.println(kv.getKey()));
KeyValue filter = observation.getContext().getLowCardinalityKeyValue("filter");
assertThat(filter).isNotNull();
assertThat(filter.getValue()).isEqualTo("one");
});
}
@Test
void shouldSupplyPropertiesObservationFilterBean() {
this.contextRunner
.run((context) -> assertThat(context).hasSingleBean(PropertiesObservationFilterPredicate.class));
}
@Test
void shouldApplyCommonKeyValuesToObservations() {
this.contextRunner.withPropertyValues("management.observations.key-values.a=alpha").run((context) -> {
ObservationRegistry observationRegistry = context.getBean(ObservationRegistry.class);
Observation observation = Observation.start("keyvalues", observationRegistry);
observation.stop();
KeyValue a = observation.getContext().getLowCardinalityKeyValue("a");
assertThat(a).isNotNull();
assertThat(a.getValue()).isEqualTo("alpha");
});
}
@Test
void autoConfiguresGlobalObservationConventions() {
this.contextRunner.withUserConfiguration(CustomGlobalObservationConvention.class).run((context) -> {
ObservationRegistry observationRegistry = context.getBean(ObservationRegistry.class);
Context micrometerContext = new Context();
Observation.start("test-observation", () -> micrometerContext, observationRegistry).stop();
assertThat(micrometerContext.getAllKeyValues()).containsExactly(KeyValue.of("key1", "value1"));
});
}
@Test
void shouldNotDisableSpringSecurityObservationsByDefault() {
this.contextRunner.run((context) -> {
ObservationRegistry observationRegistry = context.getBean(ObservationRegistry.class);
Observation observation = Observation.start("spring.security.filterchains", observationRegistry);
assertThat(observation.isNoop()).isFalse();
observation.stop();
});
}
@Test
void shouldDisableSpringSecurityObservationsIfPropertyIsSet() {
this.contextRunner.withPropertyValues("management.observations.enable.spring.security=false").run((context) -> {
ObservationRegistry observationRegistry = context.getBean(ObservationRegistry.class);
Observation observation = Observation.start("spring.security.filterchains", observationRegistry);
assertThat(observation.isNoop()).isTrue();
observation.stop();
});
}
@Test
void autoConfiguresValueExpressionResolver() {
this.contextRunner.run((context) -> assertThat(context).hasSingleBean(SpelValueExpressionResolver.class));
}
@Test
void allowsUserDefinedValueExpressionResolver() {
this.contextRunner.withBean(ValueExpressionResolver.class, () -> mock(ValueExpressionResolver.class))
.run((context) -> assertThat(context).hasSingleBean(ValueExpressionResolver.class)
.doesNotHaveBean(SpelValueExpressionResolver.class));
}
@Configuration(proxyBeanMethods = false)
static | ObservationAutoConfigurationTests |
java | google__dagger | javatests/dagger/internal/codegen/InjectConstructorFactoryGeneratorTest.java | {
"start": 46219,
"end": 46836
} | class ____ {",
" @Inject void inject(Produced<String> str) {}",
"}");
daggerCompiler(file)
.compile(
subject -> {
subject.hasErrorCount(1);
subject.hasErrorContaining("Produced may only be injected in @Produces methods");
});
}
@Test public void injectMethodDependsOnProducer() {
Source file =
CompilerTests.javaSource(
"test.A",
"package test;",
"",
"import dagger.producers.Producer;",
"import javax.inject.Inject;",
"",
"final | A |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/InfiniteRecursionTest.java | {
"start": 4151,
"end": 4526
} | class ____ {
Test f() {
new Test();
// BUG: Diagnostic contains:
f();
return this;
}
}
""")
.doTest();
}
@Test
public void negativeDelegate() {
compilationHelper
.addSourceLines(
"Test.java",
"""
| Test |
java | spring-projects__spring-framework | spring-jdbc/src/main/java/org/springframework/jdbc/core/SimplePropertyRowMapper.java | {
"start": 2910,
"end": 3408
} | class ____ similar to
* {@link org.springframework.jdbc.core.namedparam.SimplePropertySqlParameterSource}
* and is similarly used for {@link org.springframework.jdbc.core.simple.JdbcClient}.
*
* @author Juergen Hoeller
* @since 6.1
* @param <T> the result type
* @see DataClassRowMapper
* @see BeanPropertyRowMapper
* @see org.springframework.jdbc.core.simple.JdbcClient.StatementSpec#query(Class)
* @see org.springframework.jdbc.core.namedparam.SimplePropertySqlParameterSource
*/
public | is |
java | apache__camel | components/camel-aws/camel-aws2-s3/src/main/java/org/apache/camel/component/aws2/s3/client/impl/AWS2S3ClientStandardImpl.java | {
"start": 1858,
"end": 5181
} | class ____ implements AWS2CamelS3InternalClient {
private static final Logger LOG = LoggerFactory.getLogger(AWS2S3ClientStandardImpl.class);
private AWS2S3Configuration configuration;
/**
* Constructor that uses the config file.
*/
public AWS2S3ClientStandardImpl(AWS2S3Configuration configuration) {
LOG.trace("Creating an AWS S3 manager using static credentials.");
this.configuration = configuration;
}
/**
* Getting the s3 aws client that is used.
*
* @return Amazon S3 Client.
*/
@Override
public S3Client getS3Client() {
S3Client client = null;
S3ClientBuilder clientBuilder = S3Client.builder();
ProxyConfiguration.Builder proxyConfig = null;
ApacheHttpClient.Builder httpClientBuilder = null;
boolean isClientConfigFound = false;
if (ObjectHelper.isNotEmpty(configuration.getProxyHost()) && ObjectHelper.isNotEmpty(configuration.getProxyPort())) {
proxyConfig = ProxyConfiguration.builder();
URI proxyEndpoint = URI.create(configuration.getProxyProtocol() + "://" + configuration.getProxyHost() + ":"
+ configuration.getProxyPort());
proxyConfig.endpoint(proxyEndpoint);
httpClientBuilder = ApacheHttpClient.builder().proxyConfiguration(proxyConfig.build());
isClientConfigFound = true;
}
if (configuration.getAccessKey() != null && configuration.getSecretKey() != null) {
AwsBasicCredentials cred = AwsBasicCredentials.create(configuration.getAccessKey(), configuration.getSecretKey());
if (isClientConfigFound) {
clientBuilder = clientBuilder.httpClientBuilder(httpClientBuilder)
.credentialsProvider(StaticCredentialsProvider.create(cred));
} else {
clientBuilder = clientBuilder.credentialsProvider(StaticCredentialsProvider.create(cred));
}
} else {
if (!isClientConfigFound) {
clientBuilder = clientBuilder.httpClientBuilder(httpClientBuilder);
}
}
if (ObjectHelper.isNotEmpty(configuration.getRegion())) {
clientBuilder = clientBuilder.region(Region.of(configuration.getRegion()));
}
if (configuration.isOverrideEndpoint()) {
clientBuilder.endpointOverride(URI.create(configuration.getUriEndpointOverride()));
}
if (configuration.isForcePathStyle()) {
clientBuilder.forcePathStyle(true);
}
if (configuration.isTrustAllCertificates()) {
if (httpClientBuilder == null) {
httpClientBuilder = ApacheHttpClient.builder();
}
SdkHttpClient ahc = httpClientBuilder.buildWithDefaults(AttributeMap
.builder()
.put(
SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES,
Boolean.TRUE)
.build());
// set created http client to use instead of builder
clientBuilder.httpClient(ahc);
clientBuilder.httpClientBuilder(null);
}
client = clientBuilder.build();
return client;
}
}
| AWS2S3ClientStandardImpl |
java | reactor__reactor-core | reactor-core/src/test/java/reactor/core/publisher/FluxReplayTest.java | {
"start": 27825,
"end": 28347
} | class ____ extends BaseSubscriber<Integer> {
final long firstRequest;
final long secondRequest;
private TwoRequestsSubscriber(long firstRequest, long secondRequest) {
this.firstRequest = firstRequest;
this.secondRequest = secondRequest;
}
@Override
protected void hookOnSubscribe(Subscription subscription) {
request(firstRequest);
}
@Override
protected void hookOnNext(Integer value) {
if (value.longValue() == firstRequest) {
request(secondRequest);
}
}
}
}
| TwoRequestsSubscriber |
java | micronaut-projects__micronaut-core | http-server-netty/src/main/java/io/micronaut/http/server/netty/HttpContentProcessorAsReactiveProcessor.java | {
"start": 1170,
"end": 1358
} | class ____ archived in
* <a href="https://github.com/micronaut-projects/micronaut-core/pull/8463">the original PR</a>.
*
* @since 4.0.0
* @author Jonas Konrad
*/
@Internal
public final | is |
java | apache__camel | components/camel-hl7/src/test/java/org/apache/camel/component/hl7/HL7MLLPCodecStandAndEndBytesTest.java | {
"start": 1326,
"end": 4182
} | class ____ extends HL7TestSupport {
@BindToRegistry("hl7codec")
public HL7MLLPCodec addCodec() {
HL7MLLPCodec codec = new HL7MLLPCodec();
codec.setCharset("iso-8859-1");
// to test with different start and end bytes.
codec.setStartByte('*');
codec.setEndByte1('#');
codec.setEndByte2('*');
codec.setConvertLFtoCR(false);
return codec;
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("mina:tcp://127.0.0.1:" + getPort() + "?sync=true&codec=#hl7codec").process(exchange -> {
Message input = exchange.getIn().getBody(Message.class);
assertEquals("2.4", input.getVersion());
QRD qrd = (QRD) input.get("QRD");
assertEquals("0101701234", qrd.getWhoSubjectFilter(0).getIDNumber().getValue());
Message response = createHL7AsMessage();
exchange.getMessage().setBody(response);
}).to("mock:result");
}
};
}
@Test
public void testSendHL7Message() {
String line1 = "MSH|^~\\&|MYSENDER|MYRECEIVER|MYAPPLICATION||200612211200||QRY^A19|1234|P|2.4";
String line2 = "QRD|200612211200|R|I|GetPatient|||1^RD|0101701234|DEM||";
StringBuilder in = new StringBuilder();
in.append(line1);
in.append("\r");
in.append(line2);
String out = template.requestBody("mina:tcp://127.0.0.1:" + getPort() + "?sync=true&codec=#hl7codec", in.toString(),
String.class);
String[] lines = out.split("\r");
assertEquals("MSH|^~\\&|MYSENDER||||200701011539||ADR^A19||||123", lines[0]);
assertEquals("MSA|AA|123", lines[1]);
}
private static Message createHL7AsMessage() throws Exception {
ADR_A19 adr = new ADR_A19();
// Populate the MSH Segment
MSH mshSegment = adr.getMSH();
mshSegment.getFieldSeparator().setValue("|");
mshSegment.getEncodingCharacters().setValue("^~\\&");
mshSegment.getDateTimeOfMessage().getTimeOfAnEvent().setValue("200701011539");
mshSegment.getSendingApplication().getNamespaceID().setValue("MYSENDER");
mshSegment.getSequenceNumber().setValue("123");
mshSegment.getMessageType().getMessageType().setValue("ADR");
mshSegment.getMessageType().getTriggerEvent().setValue("A19");
// Populate the PID Segment
MSA msa = adr.getMSA();
msa.getAcknowledgementCode().setValue("AA");
msa.getMessageControlID().setValue("123");
QRD qrd = adr.getQRD();
qrd.getQueryDateTime().getTimeOfAnEvent().setValue("20080805120000");
return adr;
}
}
| HL7MLLPCodecStandAndEndBytesTest |
java | apache__camel | components/camel-aws/camel-aws2-ec2/src/test/java/org/apache/camel/component/aws2/ec2/AWS2EC2ClientFactoryTest.java | {
"start": 1351,
"end": 2785
} | class ____ {
@Test
public void getStandardEC2ClientDefault() {
AWS2EC2Configuration ec2Configuration = new AWS2EC2Configuration();
AWS2EC2InternalClient ec2Client = AWS2EC2ClientFactory.getEc2Client(ec2Configuration);
assertTrue(ec2Client instanceof AWS2EC2ClientStandardImpl);
}
@Test
public void getStandardEC2Client() {
AWS2EC2Configuration ec2Configuration = new AWS2EC2Configuration();
ec2Configuration.setUseDefaultCredentialsProvider(false);
AWS2EC2InternalClient ec2Client = AWS2EC2ClientFactory.getEc2Client(ec2Configuration);
assertTrue(ec2Client instanceof AWS2EC2ClientStandardImpl);
}
@Test
public void getIAMOptimizedEC2Client() {
AWS2EC2Configuration ec2Configuration = new AWS2EC2Configuration();
ec2Configuration.setUseDefaultCredentialsProvider(true);
AWS2EC2InternalClient ec2Client = AWS2EC2ClientFactory.getEc2Client(ec2Configuration);
assertTrue(ec2Client instanceof AWS2EC2ClientIAMOptimizedImpl);
}
@Test
public void getSessionTokenEC2Client() {
AWS2EC2Configuration ec2Configuration = new AWS2EC2Configuration();
ec2Configuration.setUseSessionCredentials(true);
AWS2EC2InternalClient ec2Client = AWS2EC2ClientFactory.getEc2Client(ec2Configuration);
assertTrue(ec2Client instanceof AWS2EC2ClientSessionTokenImpl);
}
}
| AWS2EC2ClientFactoryTest |
java | spring-projects__spring-framework | spring-tx/src/main/java/org/springframework/transaction/support/SimpleTransactionStatus.java | {
"start": 1417,
"end": 2002
} | class ____ extends AbstractTransactionStatus {
private final boolean newTransaction;
/**
* Create a new {@code SimpleTransactionStatus} instance,
* indicating a new transaction.
*/
public SimpleTransactionStatus() {
this(true);
}
/**
* Create a new {@code SimpleTransactionStatus} instance.
* @param newTransaction whether to indicate a new transaction
*/
public SimpleTransactionStatus(boolean newTransaction) {
this.newTransaction = newTransaction;
}
@Override
public boolean isNewTransaction() {
return this.newTransaction;
}
}
| SimpleTransactionStatus |
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceTests.java | {
"start": 6033,
"end": 55466
} | class ____ extends ESSingleNodeTestCase {
private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS);
private final MockWebServer webServer = new MockWebServer();
private ThreadPool threadPool;
private HttpClientManager clientManager;
@Override
protected Collection<Class<? extends Plugin>> getPlugins() {
return List.of(LocalStateInferencePlugin.class);
}
@Before
public void init() throws Exception {
webServer.start();
threadPool = createThreadPool(inferenceUtilityExecutors());
clientManager = HttpClientManager.create(Settings.EMPTY, threadPool, mockClusterServiceEmpty(), mock(ThrottlerManager.class));
}
@After
public void shutdown() throws IOException {
clientManager.close();
terminate(threadPool);
webServer.close();
}
public void testParseRequestConfig_CreatesASparseEmbeddingsModel() throws IOException {
try (var service = createServiceWithMockSender()) {
ActionListener<Model> modelListener = ActionListener.wrap(model -> {
assertThat(model, instanceOf(ElasticInferenceServiceSparseEmbeddingsModel.class));
var completionModel = (ElasticInferenceServiceSparseEmbeddingsModel) model;
assertThat(completionModel.getServiceSettings().modelId(), is(ElserModels.ELSER_V2_MODEL));
}, e -> fail("Model parsing should have succeeded, but failed: " + e.getMessage()));
service.parseRequestConfig(
"id",
TaskType.SPARSE_EMBEDDING,
getRequestConfigMap(Map.of(ServiceFields.MODEL_ID, ElserModels.ELSER_V2_MODEL), Map.of(), Map.of()),
modelListener
);
}
}
public void testParseRequestConfig_CreatesARerankModel() throws IOException {
try (var service = createServiceWithMockSender()) {
ActionListener<Model> modelListener = ActionListener.wrap(model -> {
assertThat(model, instanceOf(ElasticInferenceServiceRerankModel.class));
ElasticInferenceServiceRerankModel rerankModel = (ElasticInferenceServiceRerankModel) model;
assertThat(rerankModel.getServiceSettings().modelId(), is("my-rerank-model-id"));
}, e -> fail("Model parsing should have succeeded, but failed: " + e.getMessage()));
service.parseRequestConfig(
"id",
TaskType.RERANK,
getRequestConfigMap(Map.of(ServiceFields.MODEL_ID, "my-rerank-model-id"), Map.of(), Map.of()),
modelListener
);
}
}
public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInConfig() throws IOException {
try (var service = createServiceWithMockSender()) {
var config = getRequestConfigMap(Map.of(ServiceFields.MODEL_ID, ElserModels.ELSER_V2_MODEL), Map.of(), Map.of());
config.put("extra_key", "value");
var failureListener = getModelListenerForException(
ElasticsearchStatusException.class,
"Configuration contains settings [{extra_key=value}] unknown to the [elastic] service"
);
service.parseRequestConfig("id", TaskType.SPARSE_EMBEDDING, config, failureListener);
}
}
public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInServiceSettingsMap() throws IOException {
try (var service = createServiceWithMockSender()) {
Map<String, Object> serviceSettings = new HashMap<>(Map.of(ServiceFields.MODEL_ID, ElserModels.ELSER_V2_MODEL));
serviceSettings.put("extra_key", "value");
var config = getRequestConfigMap(serviceSettings, Map.of(), Map.of());
var failureListener = getModelListenerForException(
ElasticsearchStatusException.class,
"Configuration contains settings [{extra_key=value}] unknown to the [elastic] service"
);
service.parseRequestConfig("id", TaskType.SPARSE_EMBEDDING, config, failureListener);
}
}
public void testParseRequestConfig_ThrowsWhenRateLimitFieldExistsInServiceSettingsMap() throws IOException {
try (var service = createServiceWithMockSender()) {
Map<String, Object> serviceSettings = new HashMap<>(
Map.of(
ServiceFields.MODEL_ID,
ElserModels.ELSER_V2_MODEL,
RateLimitSettings.FIELD_NAME,
new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, 100))
)
);
var config = getRequestConfigMap(serviceSettings, Map.of(), Map.of());
var failureListener = getModelListenerForException(
ValidationException.class,
"Validation Failed: 1: [service_settings] rate limit settings are not permitted for "
+ "service [elastic] and task type [sparse_embedding];"
);
service.parseRequestConfig("id", TaskType.SPARSE_EMBEDDING, config, failureListener);
}
}
public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInTaskSettingsMap() throws IOException {
try (var service = createServiceWithMockSender()) {
var taskSettings = Map.of("extra_key", (Object) "value");
var config = getRequestConfigMap(Map.of(ServiceFields.MODEL_ID, ElserModels.ELSER_V2_MODEL), taskSettings, Map.of());
var failureListener = getModelListenerForException(
ElasticsearchStatusException.class,
"Configuration contains settings [{extra_key=value}] unknown to the [elastic] service"
);
service.parseRequestConfig("id", TaskType.SPARSE_EMBEDDING, config, failureListener);
}
}
public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInSecretSettingsMap() throws IOException {
try (var service = createServiceWithMockSender()) {
var secretSettings = Map.of("extra_key", (Object) "value");
var config = getRequestConfigMap(Map.of(ServiceFields.MODEL_ID, ElserModels.ELSER_V2_MODEL), Map.of(), secretSettings);
var failureListener = getModelListenerForException(
ElasticsearchStatusException.class,
"Configuration contains settings [{extra_key=value}] unknown to the [elastic] service"
);
service.parseRequestConfig("id", TaskType.SPARSE_EMBEDDING, config, failureListener);
}
}
public void testParsePersistedConfigWithSecrets_CreatesASparseEmbeddingModel() throws IOException {
try (var service = createServiceWithMockSender()) {
var persistedConfig = getPersistedConfigMap(
new HashMap<>(Map.of(ServiceFields.MODEL_ID, ElserModels.ELSER_V2_MODEL)),
Map.of(),
Map.of()
);
var model = service.parsePersistedConfigWithSecrets(
"id",
TaskType.SPARSE_EMBEDDING,
persistedConfig.config(),
persistedConfig.secrets()
);
assertThat(model, instanceOf(ElasticInferenceServiceSparseEmbeddingsModel.class));
var sparseEmbeddingsModel = (ElasticInferenceServiceSparseEmbeddingsModel) model;
assertThat(sparseEmbeddingsModel.getServiceSettings().modelId(), is(ElserModels.ELSER_V2_MODEL));
assertThat(sparseEmbeddingsModel.getTaskSettings(), is(EmptyTaskSettings.INSTANCE));
assertThat(sparseEmbeddingsModel.getSecretSettings(), is(EmptySecretSettings.INSTANCE));
}
}
public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInConfig() throws IOException {
try (var service = createServiceWithMockSender()) {
var persistedConfig = getPersistedConfigMap(
new HashMap<>(Map.of(ServiceFields.MODEL_ID, ElserModels.ELSER_V2_MODEL)),
Map.of(),
Map.of()
);
persistedConfig.config().put("extra_key", "value");
var model = service.parsePersistedConfigWithSecrets(
"id",
TaskType.SPARSE_EMBEDDING,
persistedConfig.config(),
persistedConfig.secrets()
);
assertThat(model, instanceOf(ElasticInferenceServiceSparseEmbeddingsModel.class));
var completionModel = (ElasticInferenceServiceSparseEmbeddingsModel) model;
assertThat(completionModel.getServiceSettings().modelId(), is(ElserModels.ELSER_V2_MODEL));
assertThat(completionModel.getTaskSettings(), is(EmptyTaskSettings.INSTANCE));
assertThat(completionModel.getSecretSettings(), is(EmptySecretSettings.INSTANCE));
}
}
public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInServiceSettings() throws IOException {
try (var service = createServiceWithMockSender()) {
Map<String, Object> serviceSettingsMap = new HashMap<>(Map.of(ServiceFields.MODEL_ID, ElserModels.ELSER_V2_MODEL));
serviceSettingsMap.put("extra_key", "value");
var persistedConfig = getPersistedConfigMap(serviceSettingsMap, Map.of(), Map.of());
var model = service.parsePersistedConfigWithSecrets(
"id",
TaskType.SPARSE_EMBEDDING,
persistedConfig.config(),
persistedConfig.secrets()
);
assertThat(model, instanceOf(ElasticInferenceServiceSparseEmbeddingsModel.class));
var completionModel = (ElasticInferenceServiceSparseEmbeddingsModel) model;
assertThat(completionModel.getServiceSettings().modelId(), is(ElserModels.ELSER_V2_MODEL));
assertThat(completionModel.getTaskSettings(), is(EmptyTaskSettings.INSTANCE));
assertThat(completionModel.getSecretSettings(), is(EmptySecretSettings.INSTANCE));
}
}
public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenRateLimitFieldExistsInServiceSettings() throws IOException {
try (var service = createServiceWithMockSender()) {
Map<String, Object> serviceSettingsMap = new HashMap<>(
Map.of(
ServiceFields.MODEL_ID,
ElserModels.ELSER_V2_MODEL,
RateLimitSettings.FIELD_NAME,
new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, 100))
)
);
var persistedConfig = getPersistedConfigMap(serviceSettingsMap, Map.of(), Map.of());
var model = service.parsePersistedConfigWithSecrets(
"id",
TaskType.SPARSE_EMBEDDING,
persistedConfig.config(),
persistedConfig.secrets()
);
assertThat(model, instanceOf(ElasticInferenceServiceSparseEmbeddingsModel.class));
var parsedModel = (ElasticInferenceServiceSparseEmbeddingsModel) model;
assertThat(parsedModel.getServiceSettings().modelId(), is(ElserModels.ELSER_V2_MODEL));
assertThat(parsedModel.getTaskSettings(), is(EmptyTaskSettings.INSTANCE));
assertThat(parsedModel.getSecretSettings(), is(EmptySecretSettings.INSTANCE));
assertThat(
serviceSettingsMap,
is(Map.of(RateLimitSettings.FIELD_NAME, Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, 100)))
);
}
}
public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInTaskSettings() throws IOException {
try (var service = createServiceWithMockSender()) {
var taskSettings = Map.of("extra_key", (Object) "value");
var persistedConfig = getPersistedConfigMap(
new HashMap<>(Map.of(ServiceFields.MODEL_ID, ElserModels.ELSER_V2_MODEL)),
taskSettings,
Map.of()
);
var model = service.parsePersistedConfigWithSecrets(
"id",
TaskType.SPARSE_EMBEDDING,
persistedConfig.config(),
persistedConfig.secrets()
);
assertThat(model, instanceOf(ElasticInferenceServiceSparseEmbeddingsModel.class));
var completionModel = (ElasticInferenceServiceSparseEmbeddingsModel) model;
assertThat(completionModel.getServiceSettings().modelId(), is(ElserModels.ELSER_V2_MODEL));
assertThat(completionModel.getTaskSettings(), is(EmptyTaskSettings.INSTANCE));
assertThat(completionModel.getSecretSettings(), is(EmptySecretSettings.INSTANCE));
}
}
public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInSecretsSettings() throws IOException {
try (var service = createServiceWithMockSender()) {
var secretSettingsMap = Map.of("extra_key", (Object) "value");
var persistedConfig = getPersistedConfigMap(
new HashMap<>(Map.of(ServiceFields.MODEL_ID, ElserModels.ELSER_V2_MODEL)),
Map.of(),
secretSettingsMap
);
var model = service.parsePersistedConfigWithSecrets(
"id",
TaskType.SPARSE_EMBEDDING,
persistedConfig.config(),
persistedConfig.secrets()
);
assertThat(model, instanceOf(ElasticInferenceServiceSparseEmbeddingsModel.class));
var completionModel = (ElasticInferenceServiceSparseEmbeddingsModel) model;
assertThat(completionModel.getServiceSettings().modelId(), is(ElserModels.ELSER_V2_MODEL));
assertThat(completionModel.getTaskSettings(), is(EmptyTaskSettings.INSTANCE));
assertThat(completionModel.getSecretSettings(), is(EmptySecretSettings.INSTANCE));
}
}
public void testInfer_ThrowsErrorWhenModelIsNotAValidModel() throws IOException {
var sender = createMockSender();
var factory = mock(HttpRequestSender.Factory.class);
when(factory.createSender()).thenReturn(sender);
var mockModel = getInvalidModel("model_id", "service_name", TaskType.SPARSE_EMBEDDING);
try (var service = createService(factory)) {
PlainActionFuture<InferenceServiceResults> listener = new PlainActionFuture<>();
service.infer(
mockModel,
null,
null,
null,
List.of(""),
false,
new HashMap<>(),
InputType.INGEST,
InferenceAction.Request.DEFAULT_TIMEOUT,
listener
);
var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT));
MatcherAssert.assertThat(
thrownException.getMessage(),
is("The internal model was invalid, please delete the service [service_name] with id [model_id] and add it again.")
);
verify(factory, times(1)).createSender();
verify(sender, times(1)).startAsynchronously(any());
}
verify(sender, times(1)).close();
verifyNoMoreInteractions(factory);
verifyNoMoreInteractions(sender);
}
public void testInfer_ThrowsValidationErrorForInvalidRerankParams() throws IOException {
try (var service = createServiceWithMockSender()) {
var model = ElasticInferenceServiceRerankModelTests.createModel(getUrl(webServer), "my-rerank-model-id");
PlainActionFuture<InferenceServiceResults> listener = new PlainActionFuture<>();
service.infer(
model,
"search query",
Boolean.TRUE,
10,
List.of("doc1", "doc2", "doc3"),
false,
new HashMap<>(),
InputType.SEARCH,
InferenceAction.Request.DEFAULT_TIMEOUT,
listener
);
var thrownException = expectThrows(ValidationException.class, () -> listener.actionGet(TIMEOUT));
assertThat(
thrownException.getMessage(),
is("Validation Failed: 1: Invalid return_documents [true]. The return_documents option is not supported by this service;")
);
}
}
public void testInfer_ThrowsErrorWhenTaskTypeIsNotValid_ChatCompletion() throws IOException {
var sender = createMockSender();
var factory = mock(HttpRequestSender.Factory.class);
when(factory.createSender()).thenReturn(sender);
var mockModel = getInvalidModel("model_id", "service_name", TaskType.CHAT_COMPLETION);
try (var service = createService(factory)) {
PlainActionFuture<InferenceServiceResults> listener = new PlainActionFuture<>();
service.infer(
mockModel,
null,
null,
null,
List.of(""),
false,
new HashMap<>(),
InputType.INGEST,
InferenceAction.Request.DEFAULT_TIMEOUT,
listener
);
var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT));
MatcherAssert.assertThat(
thrownException.getMessage(),
is(
"Inference entity [model_id] does not support task type [chat_completion] "
+ "for inference, the task type must be one of [text_embedding, sparse_embedding, rerank, completion]. "
+ "The task type for the inference entity is chat_completion, "
+ "please use the _inference/chat_completion/model_id/_stream URL."
)
);
verify(factory, times(1)).createSender();
verify(sender, times(1)).startAsynchronously(any());
}
verify(sender, times(1)).close();
verifyNoMoreInteractions(factory);
verifyNoMoreInteractions(sender);
}
public void testInfer_SendsEmbeddingsRequest() throws IOException {
var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager);
var elasticInferenceServiceURL = getUrl(webServer);
try (var service = createService(senderFactory, elasticInferenceServiceURL)) {
String responseJson = """
{
"data": [
{
"hello": 2.1259406,
"greet": 1.7073475
}
]
}
""";
webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson));
var model = ElasticInferenceServiceSparseEmbeddingsModelTests.createModel(elasticInferenceServiceURL, "my-model-id");
PlainActionFuture<InferenceServiceResults> listener = new PlainActionFuture<>();
service.infer(
model,
null,
null,
null,
List.of("input text"),
false,
new HashMap<>(),
InputType.SEARCH,
InferenceAction.Request.DEFAULT_TIMEOUT,
listener
);
var result = listener.actionGet(TIMEOUT);
assertThat(
result.asMap(),
Matchers.is(
SparseEmbeddingResultsTests.buildExpectationSparseEmbeddings(
List.of(
new SparseEmbeddingResultsTests.EmbeddingExpectation(Map.of("hello", 2.1259406f, "greet", 1.7073475f), false)
)
)
)
);
var request = webServer.requests().getFirst();
assertNull(request.getUri().getQuery());
assertThat(request.getHeader(HttpHeaders.CONTENT_TYPE), Matchers.equalTo(XContentType.JSON.mediaType()));
var requestMap = entityAsMap(request.getBody());
assertThat(requestMap, is(Map.of("input", List.of("input text"), "model", "my-model-id", "usage_context", "search")));
}
}
@SuppressWarnings("unchecked")
public void testRerank_SendsRerankRequest() throws IOException {
var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager);
var elasticInferenceServiceURL = getUrl(webServer);
try (var service = createService(senderFactory, elasticInferenceServiceURL)) {
var modelId = "my-model-id";
var topN = 2;
String responseJson = """
{
"results": [
{"index": 0, "relevance_score": 0.95},
{"index": 1, "relevance_score": 0.85},
{"index": 2, "relevance_score": 0.75}
]
}
""";
webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson));
var model = ElasticInferenceServiceRerankModelTests.createModel(elasticInferenceServiceURL, modelId);
PlainActionFuture<InferenceServiceResults> listener = new PlainActionFuture<>();
service.infer(
model,
"search query",
null,
topN,
List.of("doc1", "doc2", "doc3"),
false,
new HashMap<>(),
InputType.SEARCH,
InferenceAction.Request.DEFAULT_TIMEOUT,
listener
);
var result = listener.actionGet(TIMEOUT);
var resultMap = result.asMap();
var rerankResults = (List<Map<String, Object>>) resultMap.get("rerank");
assertThat(rerankResults.size(), Matchers.is(3));
Map<String, Object> rankedDocOne = (Map<String, Object>) rerankResults.get(0).get("ranked_doc");
Map<String, Object> rankedDocTwo = (Map<String, Object>) rerankResults.get(1).get("ranked_doc");
Map<String, Object> rankedDocThree = (Map<String, Object>) rerankResults.get(2).get("ranked_doc");
assertThat(rankedDocOne.get("index"), equalTo(0));
assertThat(rankedDocTwo.get("index"), equalTo(1));
assertThat(rankedDocThree.get("index"), equalTo(2));
// Verify the outgoing HTTP request
var request = webServer.requests().getFirst();
assertNull(request.getUri().getQuery());
assertThat(request.getHeader(HttpHeaders.CONTENT_TYPE), Matchers.equalTo(XContentType.JSON.mediaType()));
// Verify the outgoing request body
Map<String, Object> requestMap = entityAsMap(request.getBody());
Map<String, Object> expectedRequestMap = Map.of(
"query",
"search query",
"model",
modelId,
"top_n",
topN,
"documents",
List.of("doc1", "doc2", "doc3")
);
assertThat(requestMap, is(expectedRequestMap));
}
}
public void testInfer_PropagatesProductUseCaseHeader() throws IOException {
var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager);
var elasticInferenceServiceURL = getUrl(webServer);
try (var service = createService(senderFactory, elasticInferenceServiceURL)) {
String responseJson = """
{
"data": [
{
"hello": 2.1259406,
"greet": 1.7073475
}
]
}
""";
webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson));
// Set up the product use case in the thread context
String productUseCase = "test-product-use-case";
threadPool.getThreadContext().putHeader(InferencePlugin.X_ELASTIC_PRODUCT_USE_CASE_HTTP_HEADER, productUseCase);
var model = ElasticInferenceServiceSparseEmbeddingsModelTests.createModel(elasticInferenceServiceURL, "my-model-id");
PlainActionFuture<InferenceServiceResults> listener = new PlainActionFuture<>();
try {
service.infer(
model,
null,
null,
null,
List.of("input text"),
false,
new HashMap<>(),
InputType.SEARCH,
InferenceAction.Request.DEFAULT_TIMEOUT,
listener
);
var result = listener.actionGet(TIMEOUT);
// Verify the response was processed correctly
assertThat(
result.asMap(),
Matchers.is(
SparseEmbeddingResultsTests.buildExpectationSparseEmbeddings(
List.of(
new SparseEmbeddingResultsTests.EmbeddingExpectation(
Map.of("hello", 2.1259406f, "greet", 1.7073475f),
false
)
)
)
)
);
// Verify the header was sent in the request
var request = webServer.requests().getFirst();
assertNull(request.getUri().getQuery());
assertThat(request.getHeader(HttpHeaders.CONTENT_TYPE), Matchers.equalTo(XContentType.JSON.mediaType()));
// Check that the product use case header was set correctly
var productUseCaseHeaders = request.getHeaders().get(InferencePlugin.X_ELASTIC_PRODUCT_USE_CASE_HTTP_HEADER);
assertThat(productUseCaseHeaders, contains("internal_search", productUseCase));
// Verify request body
var requestMap = entityAsMap(request.getBody());
assertThat(requestMap, is(Map.of("input", List.of("input text"), "model", "my-model-id", "usage_context", "search")));
} finally {
// Clean up the thread context
threadPool.getThreadContext().stashContext();
}
}
}
public void testUnifiedCompletionInfer_PropagatesProductUseCaseHeader() throws IOException {
var elasticInferenceServiceURL = getUrl(webServer);
var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager);
try (var service = createService(senderFactory, elasticInferenceServiceURL)) {
// Mock a successful streaming response
String responseJson = """
data: {"id":"1","object":"completion","created":1677858242,"model":"my-model-id",
"choices":[{"finish_reason":null,"index":0,"delta":{"role":"assistant","content":"Hello"}}]}
data: {"id":"2","object":"completion","created":1677858242,"model":"my-model-id",
"choices":[{"finish_reason":"stop","index":0,"delta":{"content":" world!"}}]}
data: [DONE]
""";
webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson));
String productUseCase = "test-product-use-case";
threadPool.getThreadContext().putHeader(InferencePlugin.X_ELASTIC_PRODUCT_USE_CASE_HTTP_HEADER, productUseCase);
// Create completion model
var model = new ElasticInferenceServiceCompletionModel(
"id",
TaskType.CHAT_COMPLETION,
"elastic",
new ElasticInferenceServiceCompletionServiceSettings("my-model-id"),
EmptyTaskSettings.INSTANCE,
EmptySecretSettings.INSTANCE,
ElasticInferenceServiceComponents.of(elasticInferenceServiceURL)
);
var request = UnifiedCompletionRequest.of(
List.of(new UnifiedCompletionRequest.Message(new UnifiedCompletionRequest.ContentString("Hello"), "user", null, null))
);
PlainActionFuture<InferenceServiceResults> listener = new PlainActionFuture<>();
try {
service.unifiedCompletionInfer(model, request, InferenceAction.Request.DEFAULT_TIMEOUT, listener);
// We don't need to check the actual response as we're only testing header propagation
listener.actionGet(TIMEOUT);
// Verify the request was sent
assertThat(webServer.requests(), hasSize(1));
var httpRequest = webServer.requests().getFirst();
// Check that the product use case header was set correctly
assertThat(httpRequest.getHeader(InferencePlugin.X_ELASTIC_PRODUCT_USE_CASE_HTTP_HEADER), is(productUseCase));
} finally {
// Clean up the thread context
threadPool.getThreadContext().stashContext();
}
}
}
public void testChunkedInfer_PropagatesProductUseCaseHeader() throws IOException {
var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager);
try (var service = createService(senderFactory, getUrl(webServer))) {
// Batching will call the service with 2 inputs
String responseJson = """
{
"data": [
[
0.123,
-0.456,
0.789
],
[
0.987,
-0.654,
0.321
]
],
"meta": {
"usage": {
"total_tokens": 10
}
}
}
""";
webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson));
var model = ElasticInferenceServiceDenseTextEmbeddingsModelTests.createModel(getUrl(webServer), "my-dense-model-id");
String productUseCase = "test-product-use-case";
threadPool.getThreadContext().putHeader(InferencePlugin.X_ELASTIC_PRODUCT_USE_CASE_HTTP_HEADER, productUseCase);
PlainActionFuture<List<ChunkedInference>> listener = new PlainActionFuture<>();
// 2 inputs
service.chunkedInfer(
model,
null,
List.of(new ChunkInferenceInput("hello world"), new ChunkInferenceInput("dense embedding")),
new HashMap<>(),
InputType.INGEST,
InferenceAction.Request.DEFAULT_TIMEOUT,
listener
);
var results = listener.actionGet(TIMEOUT);
assertThat(results, hasSize(2));
// Verify the response was processed correctly
ChunkedInference inferenceResult = results.getFirst();
assertThat(inferenceResult, instanceOf(ChunkedInferenceEmbedding.class));
// Verify the request was sent and contains expected headers
assertThat(webServer.requests(), hasSize(1));
var request = webServer.requests().getFirst();
assertNull(request.getUri().getQuery());
assertThat(request.getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType()));
// Check that the product use case header was set correctly
var productUseCaseHeaders = request.getHeaders().get(InferencePlugin.X_ELASTIC_PRODUCT_USE_CASE_HTTP_HEADER);
assertThat(productUseCaseHeaders, contains("internal_ingest", productUseCase));
} finally {
// Clean up the thread context
threadPool.getThreadContext().stashContext();
}
}
public void testChunkedInfer_BatchesCallsChunkingSettingsSet() throws IOException {
var model = ElasticInferenceServiceDenseTextEmbeddingsModelTests.createModel(getUrl(webServer), "my-dense-model-id");
var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager);
try (var service = createService(senderFactory, getUrl(webServer))) {
// Batching will call the service with 2 inputs
String responseJson = """
{
"data": [
[
0.123,
-0.456,
0.789
],
[
0.987,
-0.654,
0.321
]
],
"meta": {
"usage": {
"total_tokens": 10
}
}
}
""";
webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson));
PlainActionFuture<List<ChunkedInference>> listener = new PlainActionFuture<>();
// 2 inputs
service.chunkedInfer(
model,
null,
List.of(new ChunkInferenceInput("hello world"), new ChunkInferenceInput("dense embedding")),
new HashMap<>(),
InputType.INGEST,
InferenceAction.Request.DEFAULT_TIMEOUT,
listener
);
var results = listener.actionGet(TIMEOUT);
assertThat(results, hasSize(2));
// First result
{
assertThat(results.getFirst(), instanceOf(ChunkedInferenceEmbedding.class));
var denseResult = (ChunkedInferenceEmbedding) results.getFirst();
assertThat(denseResult.chunks(), hasSize(1));
assertEquals(new ChunkedInference.TextOffset(0, "hello world".length()), denseResult.chunks().getFirst().offset());
assertThat(denseResult.chunks().get(0).embedding(), instanceOf(DenseEmbeddingFloatResults.Embedding.class));
var embedding = (DenseEmbeddingFloatResults.Embedding) denseResult.chunks().get(0).embedding();
assertArrayEquals(new float[] { 0.123f, -0.456f, 0.789f }, embedding.values(), 0.0f);
}
// Second result
{
assertThat(results.get(1), instanceOf(ChunkedInferenceEmbedding.class));
var denseResult = (ChunkedInferenceEmbedding) results.get(1);
assertThat(denseResult.chunks(), hasSize(1));
assertEquals(new ChunkedInference.TextOffset(0, "dense embedding".length()), denseResult.chunks().getFirst().offset());
assertThat(denseResult.chunks().getFirst().embedding(), instanceOf(DenseEmbeddingFloatResults.Embedding.class));
var embedding = (DenseEmbeddingFloatResults.Embedding) denseResult.chunks().get(0).embedding();
assertArrayEquals(new float[] { 0.987f, -0.654f, 0.321f }, embedding.values(), 0.0f);
}
assertThat(webServer.requests(), hasSize(1));
assertNull(webServer.requests().getFirst().getUri().getQuery());
assertThat(webServer.requests().getFirst().getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType()));
var requestMap = entityAsMap(webServer.requests().getFirst().getBody());
MatcherAssert.assertThat(
requestMap,
is(Map.of("input", List.of("hello world", "dense embedding"), "model", "my-dense-model-id", "usage_context", "ingest"))
);
}
}
public void testHideFromConfigurationApi_ThrowsUnsupported_WithNoAvailableModels() throws Exception {
try (var service = createServiceWithMockSender(ElasticInferenceServiceAuthorizationModel.newDisabledService())) {
expectThrows(UnsupportedOperationException.class, service::hideFromConfigurationApi);
}
}
public void testHideFromConfigurationApi_ThrowsUnsupported_WithAvailableModels() throws Exception {
try (
var service = createServiceWithMockSender(
ElasticInferenceServiceAuthorizationModel.of(
new ElasticInferenceServiceAuthorizationResponseEntity(
List.of(
new ElasticInferenceServiceAuthorizationResponseEntity.AuthorizedModel(
"model-1",
EnumSet.of(TaskType.CHAT_COMPLETION)
)
)
)
)
)
) {
expectThrows(UnsupportedOperationException.class, service::hideFromConfigurationApi);
}
}
public void testCreateConfiguration() throws Exception {
String content = XContentHelper.stripWhitespace("""
{
"service": "elastic",
"name": "Elastic",
"task_types": ["sparse_embedding", "chat_completion", "text_embedding"],
"configurations": {
"model_id": {
"description": "The name of the model to use for the inference task.",
"label": "Model ID",
"required": true,
"sensitive": false,
"updatable": false,
"type": "str",
"supported_task_types": ["text_embedding", "sparse_embedding" , "rerank", "chat_completion"]
},
"max_input_tokens": {
"description": "Allows you to specify the maximum number of tokens per input.",
"label": "Maximum Input Tokens",
"required": false,
"sensitive": false,
"updatable": false,
"type": "int",
"supported_task_types": ["text_embedding", "sparse_embedding"]
}
}
}
""");
InferenceServiceConfiguration configuration = InferenceServiceConfiguration.fromXContentBytes(
new BytesArray(content),
XContentType.JSON
);
boolean humanReadable = true;
BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable);
InferenceServiceConfiguration serviceConfiguration = ElasticInferenceService.createConfiguration(
EnumSet.of(TaskType.SPARSE_EMBEDDING, TaskType.CHAT_COMPLETION, TaskType.TEXT_EMBEDDING)
);
assertToXContentEquivalent(originalBytes, toXContent(serviceConfiguration, XContentType.JSON, humanReadable), XContentType.JSON);
}
public void testGetConfiguration_WithoutSupportedTaskTypes() throws Exception {
String content = XContentHelper.stripWhitespace("""
{
"service": "elastic",
"name": "Elastic",
"task_types": [],
"configurations": {
"model_id": {
"description": "The name of the model to use for the inference task.",
"label": "Model ID",
"required": true,
"sensitive": false,
"updatable": false,
"type": "str",
"supported_task_types": ["text_embedding", "sparse_embedding" , "rerank", "chat_completion"]
},
"max_input_tokens": {
"description": "Allows you to specify the maximum number of tokens per input.",
"label": "Maximum Input Tokens",
"required": false,
"sensitive": false,
"updatable": false,
"type": "int",
"supported_task_types": ["text_embedding", "sparse_embedding"]
}
}
}
""");
InferenceServiceConfiguration configuration = InferenceServiceConfiguration.fromXContentBytes(
new BytesArray(content),
XContentType.JSON
);
var humanReadable = true;
BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable);
InferenceServiceConfiguration serviceConfiguration = ElasticInferenceService.createConfiguration(EnumSet.noneOf(TaskType.class));
assertToXContentEquivalent(originalBytes, toXContent(serviceConfiguration, XContentType.JSON, humanReadable), XContentType.JSON);
}
public void testGetConfiguration_ThrowsUnsupported() throws Exception {
try (
var service = createServiceWithMockSender(
// this service doesn't yet support text embedding so we should still have no task types
ElasticInferenceServiceAuthorizationModel.of(
new ElasticInferenceServiceAuthorizationResponseEntity(
List.of(
new ElasticInferenceServiceAuthorizationResponseEntity.AuthorizedModel(
"model-1",
EnumSet.of(TaskType.TEXT_EMBEDDING)
)
)
)
)
)
) {
expectThrows(UnsupportedOperationException.class, service::getConfiguration);
}
}
public void testSupportedStreamingTasks_ReturnsChatCompletion() throws Exception {
var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager);
try (var service = createService(senderFactory)) {
assertThat(service.supportedStreamingTasks(), is(EnumSet.of(TaskType.CHAT_COMPLETION)));
assertFalse(service.canStream(TaskType.ANY));
assertTrue(service.defaultConfigIds().isEmpty());
PlainActionFuture<List<Model>> listener = new PlainActionFuture<>();
service.defaultConfigs(listener);
assertTrue(listener.actionGet(TIMEOUT).isEmpty());
}
}
public void testDefaultConfigs_ReturnsEmptyLists() throws Exception {
var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager);
try (var service = createService(senderFactory)) {
assertTrue(service.defaultConfigIds().isEmpty());
PlainActionFuture<List<Model>> listener = new PlainActionFuture<>();
service.defaultConfigs(listener);
assertTrue(listener.actionGet(TIMEOUT).isEmpty());
}
}
public void testSupportedTaskTypes_Returns_Unsupported() throws Exception {
var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager);
try (var service = createService(senderFactory)) {
expectThrows(UnsupportedOperationException.class, service::supportedTaskTypes);
}
}
public void testUnifiedCompletionError() {
var e = assertThrows(UnifiedChatCompletionException.class, () -> testUnifiedStream(404, """
{
"error": "The model `rainbow-sprinkles` does not exist or you do not have access to it."
}"""));
assertThat(
e.getMessage(),
equalTo(
"Received an unsuccessful status code for request from inference entity id [id] status "
+ "[404]. Error message: [The model `rainbow-sprinkles` does not exist or you do not have access to it.]"
)
);
}
public void testUnifiedCompletionErrorMidStream() throws Exception {
testUnifiedStreamError(200, """
data: { "error": "some error" }
""", """
{\
"error":{\
"code":"stream_error",\
"message":"Received an error response for request from inference entity id [id]. Error message: [some error]",\
"type":"error"\
}}""");
}
public void testUnifiedCompletionMalformedError() throws Exception {
testUnifiedStreamError(200, """
data: { i am not json }
""", """
{\
"error":{\
"code":"bad_request",\
"message":"[1:3] Unexpected character ('i' (code 105)): was expecting double-quote to start field name\\n\
at [Source: (String)\\"{ i am not json }\\"; line: 1, column: 3]",\
"type":"x_content_parse_exception"\
}}""");
}
private void testUnifiedStreamError(int responseCode, String responseJson, String expectedJson) throws Exception {
testUnifiedStream(responseCode, responseJson).hasNoEvents().hasErrorMatching(e -> {
e = unwrapCause(e);
assertThat(e, isA(UnifiedChatCompletionException.class));
try (var builder = XContentFactory.jsonBuilder()) {
((UnifiedChatCompletionException) e).toXContentChunked(EMPTY_PARAMS).forEachRemaining(xContent -> {
try {
xContent.toXContent(builder, EMPTY_PARAMS);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
});
var json = XContentHelper.convertToJson(BytesReference.bytes(builder), false, builder.contentType());
assertThat(json, is(expectedJson));
}
});
}
private InferenceEventsAssertion testUnifiedStream(int responseCode, String responseJson) throws Exception {
var elasticInferenceServiceURL = getUrl(webServer);
var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager);
try (var service = createService(senderFactory, elasticInferenceServiceURL)) {
webServer.enqueue(new MockResponse().setResponseCode(responseCode).setBody(responseJson));
var model = new ElasticInferenceServiceCompletionModel(
"id",
TaskType.CHAT_COMPLETION,
"elastic",
new ElasticInferenceServiceCompletionServiceSettings("model_id"),
EmptyTaskSettings.INSTANCE,
EmptySecretSettings.INSTANCE,
ElasticInferenceServiceComponents.of(elasticInferenceServiceURL)
);
PlainActionFuture<InferenceServiceResults> listener = new PlainActionFuture<>();
service.unifiedCompletionInfer(
model,
UnifiedCompletionRequest.of(
List.of(new UnifiedCompletionRequest.Message(new UnifiedCompletionRequest.ContentString("hello"), "user", null, null))
),
InferenceAction.Request.DEFAULT_TIMEOUT,
listener
);
return InferenceEventsAssertion.assertThat(listener.actionGet(TIMEOUT)).hasFinishedStream();
}
}
private ElasticInferenceService createServiceWithMockSender() {
return createServiceWithMockSender(ElasticInferenceServiceAuthorizationModelTests.createEnabledAuth());
}
private ElasticInferenceService createServiceWithMockSender(ElasticInferenceServiceAuthorizationModel auth) {
var sender = createMockSender();
var factory = mock(HttpRequestSender.Factory.class);
when(factory.createSender()).thenReturn(sender);
var service = new ElasticInferenceService(
factory,
createWithEmptySettings(threadPool),
new ElasticInferenceServiceSettings(Settings.EMPTY),
mockClusterServiceEmpty(),
createNoopApplierFactory()
);
service.init();
return service;
}
private ElasticInferenceService createService(HttpRequestSender.Factory senderFactory) {
return createService(senderFactory, null);
}
private ElasticInferenceService createService(HttpRequestSender.Factory senderFactory, String elasticInferenceServiceURL) {
var service = new ElasticInferenceService(
senderFactory,
createWithEmptySettings(threadPool),
ElasticInferenceServiceSettingsTests.create(elasticInferenceServiceURL),
mockClusterServiceEmpty(),
createNoopApplierFactory()
);
service.init();
return service;
}
}
| ElasticInferenceServiceTests |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/util/pool/FactoryPools.java | {
"start": 561,
"end": 4670
} | class ____ {
private static final String TAG = "FactoryPools";
private static final int DEFAULT_POOL_SIZE = 20;
private static final Resetter<Object> EMPTY_RESETTER =
new Resetter<Object>() {
@Override
public void reset(@NonNull Object object) {
// Do nothing.
}
};
private FactoryPools() {}
/**
* Returns a non-thread safe {@link Pool} that never returns {@code null} from {@link
* Pool#acquire()} and that contains objects of the type created by the given {@link Factory} with
* the given maximum size.
*
* <p>If the pool is empty when {@link Pool#acquire()} is called, the given {@link Factory} will
* be used to create a new instance.
*
* @param <T> The type of object the pool will contains.
*/
@NonNull
public static <T extends Poolable> Pool<T> simple(int size, @NonNull Factory<T> factory) {
return build(new SimplePool<T>(size), factory);
}
/**
* Identical to {@link #threadSafe(int, Factory, Resetter)} except no action is taken when an
* instance is returned to the pool.
*/
@NonNull
public static <T extends Poolable> Pool<T> threadSafe(int size, @NonNull Factory<T> factory) {
return build(new SynchronizedPool<T>(size), factory);
}
/**
* Returns a new thread safe {@link Pool} that never returns {@code null} from {@link
* Pool#acquire()} and that contains objects of the type created by the given {@link Factory} with
* the given maximum size.
*
* <p>If the pool is empty when {@link Pool#acquire()} is called, the given {@link Factory} will
* be used to create a new instance.
*
* <p>Each time an instance is returned to the pool {@code resetter} will be called with the given
* instance.
*
* @param <T> The type of object the pool will contains.
*/
@NonNull
public static <T extends Poolable> Pool<T> threadSafe(
int size, @NonNull Factory<T> factory, @NonNull Resetter<T> resetter) {
return build(new SynchronizedPool<T>(size), factory, resetter);
}
/**
* Returns a new {@link Pool} that never returns {@code null} and that contains {@link List Lists}
* of a specific generic type with a standard maximum size of 20.
*
* <p>If the pool is empty when {@link Pool#acquire()} is called, a new {@link List} will be
* created.
*
* @param <T> The type of object that the {@link List Lists} will contain.
*/
@NonNull
public static <T> Pool<List<T>> threadSafeList() {
return threadSafeList(DEFAULT_POOL_SIZE);
}
/**
* Returns a new thread safe {@link Pool} that never returns {@code null} and that contains {@link
* List Lists} of a specific generic type with the given maximum size.
*
* <p>If the pool is empty when {@link Pool#acquire()} is called, a new {@link List} will be
* created.
*
* @param <T> The type of object that the {@link List Lists} will contain.
*/
// Public API.
@SuppressWarnings("WeakerAccess")
@NonNull
public static <T> Pool<List<T>> threadSafeList(int size) {
return build(
new SynchronizedPool<List<T>>(size),
new Factory<List<T>>() {
@NonNull
@Override
public List<T> create() {
return new ArrayList<>();
}
},
new Resetter<List<T>>() {
@Override
public void reset(@NonNull List<T> object) {
object.clear();
}
});
}
@NonNull
private static <T extends Poolable> Pool<T> build(
@NonNull Pool<T> pool, @NonNull Factory<T> factory) {
return build(pool, factory, FactoryPools.<T>emptyResetter());
}
@NonNull
private static <T> Pool<T> build(
@NonNull Pool<T> pool, @NonNull Factory<T> factory, @NonNull Resetter<T> resetter) {
return new FactoryPool<>(pool, factory, resetter);
}
@NonNull
@SuppressWarnings("unchecked")
private static <T> Resetter<T> emptyResetter() {
return (Resetter<T>) EMPTY_RESETTER;
}
/**
* Creates new instances of the given type.
*
* @param <T> The type of Object that will be created.
*/
public | FactoryPools |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java | {
"start": 2510,
"end": 6557
} | class ____ raise an exception.
* @param fsURI FileSystem URI
* @param conf configuration
* @return true if an option was set but ignored
* @throws PathIOException if an unsupported metastore was found.
*/
public static boolean checkNoS3Guard(URI fsURI, Configuration conf) throws PathIOException {
final String classname = conf.getTrimmed(S3_METADATA_STORE_IMPL, "");
if (classname.isEmpty()) {
// all good. declare nothing was found.
return false;
}
// there is a s3guard configuration option
// ignore if harmless; reject if DDB or unknown
final String[] sources = conf.getPropertySources(S3_METADATA_STORE_IMPL);
final String origin = sources == null
? "unknown"
: sources[0];
final String fsPath = fsURI.toString();
switch (classname) {
case NULL_METADATA_STORE:
// harmless
LOG.debug("Ignoring S3Guard store option of {} -no longer needed " +
"Origin {}",
NULL_METADATA_STORE, origin);
break;
case S3GUARD_METASTORE_LOCAL:
// used in some libraries (e.g. hboss) to force a consistent s3 in a test
// run.
// print a message and continue
LOG.warn("Ignoring S3Guard store option of {} -no longer needed or supported. "
+ "Origin {}",
S3GUARD_METASTORE_LOCAL, origin);
break;
case S3GUARD_METASTORE_DYNAMO:
// this is the dangerous one, as it is a sign that a config is in use where
// older releases will use DDB for listing metadata, yet this
// client will not update it.
final String message = String.format("S3Guard is no longer needed/supported,"
+ " yet %s is configured to use DynamoDB as the S3Guard metadata store."
+ " This is no longer needed or supported. " +
"Origin of setting is %s",
fsPath, origin);
LOG.error(message);
throw new PathIOException(fsPath, message);
default:
// an unknown store entirely.
throw new PathIOException(fsPath,
"Filesystem is configured to use unknown S3Guard store " + classname
+ " origin " + origin);
}
// an option was set, but it was harmless
return true;
}
/**
* Get the authoritative paths of a filesystem.
*
* @param uri FS URI
* @param conf configuration
* @param qualifyToDir a qualification operation
* @return list of URIs valid for this FS.
*/
@VisibleForTesting
static Collection<String> getAuthoritativePaths(
final URI uri,
final Configuration conf,
final Function<Path, String> qualifyToDir) {
String[] rawAuthoritativePaths =
conf.getTrimmedStrings(AUTHORITATIVE_PATH, DEFAULT_AUTHORITATIVE_PATH);
Collection<String> authoritativePaths = new ArrayList<>();
if (rawAuthoritativePaths.length > 0) {
for (int i = 0; i < rawAuthoritativePaths.length; i++) {
Path path = new Path(rawAuthoritativePaths[i]);
URI pathURI = path.toUri();
if (pathURI.getAuthority() != null &&
!pathURI.getAuthority().equals(uri.getAuthority())) {
// skip on auth
continue;
}
if (pathURI.getScheme() != null &&
!pathURI.getScheme().equals(uri.getScheme())) {
// skip on auth
continue;
}
authoritativePaths.add(qualifyToDir.apply(path));
}
}
return authoritativePaths;
}
/**
* Is the path for the given FS instance authoritative?
* @param p path
* @param fs filesystem
* @param authPaths possibly empty list of authoritative paths
* @return true iff the path is authoritative
*/
public static boolean allowAuthoritative(Path p, S3AFileSystem fs,
Collection<String> authPaths) {
String haystack = fs.maybeAddTrailingSlash(fs.qualify(p).toString());
if (!authPaths.isEmpty()) {
for (String needle : authPaths) {
if (haystack.startsWith(needle)) {
return true;
}
}
}
return false;
}
}
| will |
java | apache__kafka | metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTestEnv.java | {
"start": 1966,
"end": 8740
} | class ____ {
private final MockRaftClientTestEnv clientEnv;
private Consumer<QuorumController.Builder> controllerBuilderInitializer = __ -> { };
private OptionalLong sessionTimeoutMillis = OptionalLong.empty();
private OptionalLong leaderImbalanceCheckIntervalNs = OptionalLong.empty();
private BootstrapMetadata bootstrapMetadata = BootstrapMetadata.
fromVersion(MetadataVersion.latestTesting(), "test-provided version");
public Builder(MockRaftClientTestEnv clientEnv) {
this.clientEnv = clientEnv;
}
public Builder setControllerBuilderInitializer(Consumer<QuorumController.Builder> controllerBuilderInitializer) {
this.controllerBuilderInitializer = controllerBuilderInitializer;
return this;
}
public Builder setSessionTimeoutMillis(OptionalLong sessionTimeoutMillis) {
this.sessionTimeoutMillis = sessionTimeoutMillis;
return this;
}
public Builder setLeaderImbalanceCheckIntervalNs(OptionalLong leaderImbalanceCheckIntervalNs) {
this.leaderImbalanceCheckIntervalNs = leaderImbalanceCheckIntervalNs;
return this;
}
public Builder setBootstrapMetadata(BootstrapMetadata bootstrapMetadata) {
this.bootstrapMetadata = bootstrapMetadata;
return this;
}
public QuorumControllerTestEnv build() throws Exception {
return new QuorumControllerTestEnv(
clientEnv,
controllerBuilderInitializer,
sessionTimeoutMillis,
leaderImbalanceCheckIntervalNs,
bootstrapMetadata.metadataVersion().isElrSupported(),
bootstrapMetadata);
}
}
private QuorumControllerTestEnv(
MockRaftClientTestEnv clientEnv,
Consumer<QuorumController.Builder> controllerBuilderInitializer,
OptionalLong sessionTimeoutMillis,
OptionalLong leaderImbalanceCheckIntervalNs,
boolean eligibleLeaderReplicasEnabled,
BootstrapMetadata bootstrapMetadata
) throws Exception {
this.clientEnv = clientEnv;
int numControllers = clientEnv.raftClients().size();
this.controllers = new ArrayList<>(numControllers);
try {
List<Integer> nodeIds = IntStream.range(0, numControllers).boxed().toList();
for (int nodeId = 0; nodeId < numControllers; nodeId++) {
QuorumController.Builder builder = new QuorumController.Builder(nodeId, clientEnv.clusterId());
builder.setRaftClient(clientEnv.raftClients().get(nodeId));
if (eligibleLeaderReplicasEnabled) {
bootstrapMetadata = bootstrapMetadata.copyWithFeatureRecord(
EligibleLeaderReplicasVersion.FEATURE_NAME,
EligibleLeaderReplicasVersion.ELRV_1.featureLevel()
);
}
builder.setBootstrapMetadata(bootstrapMetadata);
builder.setLeaderImbalanceCheckIntervalNs(leaderImbalanceCheckIntervalNs);
builder.setQuorumFeatures(new QuorumFeatures(nodeId, QuorumFeatures.defaultSupportedFeatureMap(true), nodeIds));
sessionTimeoutMillis.ifPresent(timeout ->
builder.setSessionTimeoutNs(NANOSECONDS.convert(timeout, TimeUnit.MILLISECONDS))
);
MockFaultHandler fatalFaultHandler = new MockFaultHandler("fatalFaultHandler");
builder.setFatalFaultHandler(fatalFaultHandler);
fatalFaultHandlers.put(nodeId, fatalFaultHandler);
MockFaultHandler nonFatalFaultHandler = new MockFaultHandler("nonFatalFaultHandler");
builder.setNonFatalFaultHandler(nonFatalFaultHandler);
builder.setConfigSchema(FakeKafkaConfigSchema.INSTANCE);
nonFatalFaultHandlers.put(nodeId, fatalFaultHandler);
controllerBuilderInitializer.accept(builder);
QuorumController controller = builder.build();
this.controllers.add(controller);
}
} catch (Exception e) {
close();
throw e;
}
}
QuorumController activeController() throws InterruptedException {
return activeController(false);
}
QuorumController activeController(boolean waitForActivation) throws InterruptedException {
AtomicReference<QuorumController> value = new AtomicReference<>(null);
TestUtils.retryOnExceptionWithTimeout(20000, 3, () -> {
LeaderAndEpoch leader = clientEnv.leaderAndEpoch();
for (QuorumController controller : controllers) {
if (OptionalInt.of(controller.nodeId()).equals(leader.leaderId()) &&
controller.curClaimEpoch() == leader.epoch()) {
value.set(controller);
break;
}
}
if (value.get() == null) {
throw new RuntimeException(String.format("Expected to see %s as leader", leader));
}
});
if (waitForActivation) {
try {
// ControllerActivation happens after curClaimEpoch is set, so we need to put something on
// the end of the queue and wait for it to complete before returning the active controller.
value.get()
.appendReadEvent("wait for activation", OptionalLong.empty(), () -> null)
.get(20000, TimeUnit.MILLISECONDS);
} catch (Throwable t) {
throw new RuntimeException("Failed while waiting for controller activation", t);
}
}
return value.get();
}
public List<QuorumController> controllers() {
return controllers;
}
public MockFaultHandler fatalFaultHandler(Integer nodeId) {
return fatalFaultHandlers.get(nodeId);
}
public void ignoreFatalFaults() {
for (MockFaultHandler faultHandler : fatalFaultHandlers.values()) {
faultHandler.setIgnore(true);
}
}
@Override
public void close() throws InterruptedException {
for (QuorumController controller : controllers) {
controller.beginShutdown();
}
for (QuorumController controller : controllers) {
controller.close();
}
for (MockFaultHandler faultHandler : fatalFaultHandlers.values()) {
faultHandler.maybeRethrowFirstException();
}
for (MockFaultHandler faultHandler : nonFatalFaultHandlers.values()) {
faultHandler.maybeRethrowFirstException();
}
}
}
| Builder |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/api/operators/StreamSourceContexts.java | {
"start": 3873,
"end": 5059
} | class ____<T> implements SourceFunction.SourceContext<T> {
private final Object checkpointLock;
private ClosedContext(Object checkpointLock) {
this.checkpointLock = checkpointLock;
}
@Override
public void collect(T element) {
throwException();
}
@Override
public void collectWithTimestamp(T element, long timestamp) {
throwException();
}
@Override
public void emitWatermark(Watermark mark) {
throwException();
}
@Override
public void markAsTemporarilyIdle() {
throwException();
}
@Override
public Object getCheckpointLock() {
return checkpointLock;
}
@Override
public void close() {
// nothing to be done
}
private void throwException() {
throw new FlinkRuntimeException("The Source Context has been closed already.");
}
}
/**
* A source context that attached {@code -1} as a timestamp to all records, and that does not
* forward watermarks.
*/
private static | ClosedContext |
java | google__guice | core/test/com/google/inject/errors/BindingAlreadySetErrorTest.java | {
"start": 3265,
"end": 3426
} | class ____ extends AbstractModule {
@Override
protected void configure() {
install(new BindWithProviderMethodModule());
}
}
static | TestModule1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.