language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | spring-projects__spring-framework | spring-webmvc/src/test/java/org/springframework/web/servlet/mvc/method/annotation/ServletAnnotationControllerHandlerMethodTests.java | {
"start": 128167,
"end": 128291
} | class ____ {
@RequestMapping("/myPath.do")
public void myHandle() {
}
}
@Controller
static | CustomAnnotationController |
java | dropwizard__dropwizard | dropwizard-testing/src/test/java/io/dropwizard/testing/junit5/PersonResourceTest.java | {
"start": 991,
"end": 4677
} | class ____ implements ExceptionMapper<WebApplicationException> {
@Override
public Response toResponse(WebApplicationException e) {
throw new UnsupportedOperationException();
}
}
private static final ObjectMapper OBJECT_MAPPER = Jackson.newObjectMapper()
.registerModule(new GuavaModule());
private final PeopleStore peopleStore = mock(PeopleStore.class);
private final ResourceExtension resources = ResourceExtension.builder()
.addResource(new PersonResource(peopleStore))
.setMapper(OBJECT_MAPPER)
.setClientConfigurator(clientConfig -> clientConfig.register(DummyExceptionMapper.class))
.build();
private final Person person = new Person("blah", "blah@example.com");
@BeforeEach
void setup() {
when(peopleStore.fetchPerson("blah")).thenReturn(person);
}
@Test
void testGetPerson() {
assertThat(resources.target("/person/blah").request()
.get(Person.class))
.isEqualTo(person);
verify(peopleStore).fetchPerson("blah");
}
@Test
void testGetImmutableListOfPersons() {
assertThat(resources.target("/person/blah/list").request().get(new GenericType<List<Person>>() {
})).containsOnly(person);
}
@Test
void testGetPersonWithQueryParam() {
// Test to ensure that the dropwizard validator is registered so that
// it can validate the "ind" IntParam.
assertThat(resources.target("/person/blah/index")
.queryParam("ind", 0).request()
.get(Person.class))
.isEqualTo(person);
verify(peopleStore).fetchPerson("blah");
}
@Test
void testDefaultConstraintViolation() {
assertThat(resources.target("/person/blah/index")
.queryParam("ind", -1).request()
.get().readEntity(String.class))
.isEqualTo("{\"errors\":[\"query param ind must be greater than or equal to 0\"]}");
}
@Test
void testDefaultJsonProcessingMapper() {
assertThat(resources.target("/person/blah/runtime-exception")
.request()
.post(Entity.json("{ \"he: \"ho\"}"))
.readEntity(String.class))
.isEqualTo("{\"code\":400,\"message\":\"Unable to process JSON\"}");
}
@Test
void testDefaultExceptionMapper() {
assertThat(resources.target("/person/blah/runtime-exception")
.request()
.post(Entity.json("{}"))
.readEntity(String.class))
.startsWith("{\"code\":500,\"message\":\"There was an error processing your request. It has been logged");
}
@Test
void testDefaultEofExceptionMapper() {
assertThat(resources.target("/person/blah/eof-exception")
.request()
.get().getStatus())
.isEqualTo(Response.Status.BAD_REQUEST.getStatusCode());
}
@Test
void testValidationGroupsException() {
assertThat(resources.target("/person/blah/validation-groups-exception")
.request()
.post(Entity.json("{}")))
.satisfies(response -> assertThat(response.getStatus()).isEqualTo(Response.Status.INTERNAL_SERVER_ERROR.getStatusCode()))
.satisfies(response -> assertThat(response.readEntity(String.class))
.isEqualTo("{\"code\":500,\"message\":\"Parameters must have the same" +
" validation groups in validationGroupsException\"}"));
}
@Test
void testCustomClientConfiguration() {
assertThat(resources.client().getConfiguration().isRegistered(DummyExceptionMapper.class)).isTrue();
}
}
| DummyExceptionMapper |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Validation.java | {
"start": 6170,
"end": 7409
} | class ____ {
public static final int MAX_DESCRIPTION_LENGTH = 1000;
public static Error validateRoleName(String roleName, boolean allowReserved) {
return validateRoleName(roleName, allowReserved, MAX_NAME_LENGTH);
}
public static Error validateRoleDescription(String description) {
if (description != null && description.length() > MAX_DESCRIPTION_LENGTH) {
return new Error(Strings.format("Role description must be less than %s characters.", MAX_DESCRIPTION_LENGTH));
}
return null;
}
static Error validateRoleName(String roleName, boolean allowReserved, int maxLength) {
if (roleName == null) {
return new Error("role name is missing");
}
if (isValidUserOrRoleName(roleName, maxLength) == false) {
return new Error(String.format(Locale.ROOT, INVALID_NAME_MESSAGE, "Role", maxLength));
}
if (allowReserved == false && ReservedRolesStore.isReserved(roleName)) {
return new Error("Role [" + roleName + "] is reserved and may not be used.");
}
return null;
}
}
public static | Roles |
java | apache__hadoop | hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/object/ObjectUtils.java | {
"start": 1118,
"end": 3119
} | class ____ {
public static final String SLASH = "/";
private ObjectUtils() {
}
public static Path keyToPath(String key) {
return new Path(SLASH + key);
}
public static String path(String key) {
return key.startsWith(SLASH) ? key : SLASH + key;
}
public static String pathToKey(Path p) {
return pathToKey(p, false);
}
public static String pathToKey(Path p, Boolean isDir) {
Preconditions.checkArgument(p != null, "Null path");
if (p.toUri().getScheme() != null && p.toUri().getPath().isEmpty()) {
return "";
}
String key = p.toUri().getPath().substring(1);
if (isDir && !key.isEmpty()) {
return key.endsWith(SLASH) ? key : key + SLASH;
}
return key;
}
public static void deleteAllObjects(ObjectStorage storage, Iterable<ObjectInfo> objects,
int batchSize) {
List<String> keysToDelete = Lists.newArrayList();
for (ObjectInfo obj : objects) {
keysToDelete.add(obj.key());
if (keysToDelete.size() == batchSize) {
batchDelete(storage, keysToDelete);
keysToDelete.clear();
}
}
if (!keysToDelete.isEmpty()) {
batchDelete(storage, keysToDelete);
}
}
private static void batchDelete(ObjectStorage storage, List<String> keys) {
List<String> failedKeys = storage.batchDelete(keys);
if (!failedKeys.isEmpty()) {
throw new RuntimeException(String.format("Failed to delete %s objects, detail: %s",
failedKeys.size(), Joiner.on(",").join(failedKeys)));
}
}
public static Range calculateRange(final long offset, final long limit, final long objSize) {
Preconditions.checkArgument(offset >= 0,
String.format("offset is a negative number: %s", offset));
Preconditions.checkArgument(offset <= objSize,
String.format("offset: %s is bigger than object size: %s", offset, objSize));
long len = limit < 0 ? objSize - offset : Math.min(objSize - offset, limit);
return Range.of(offset, len);
}
}
| ObjectUtils |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/SlotCountExceedingParallelismTest.java | {
"start": 2406,
"end": 6022
} | class ____ extends TestLogger {
// Test configuration
private static final int NUMBER_OF_TMS = 2;
private static final int NUMBER_OF_SLOTS_PER_TM = 2;
private static final int PARALLELISM = NUMBER_OF_TMS * NUMBER_OF_SLOTS_PER_TM;
public static final String JOB_NAME =
"SlotCountExceedingParallelismTest (no slot sharing, blocking results)";
@ClassRule
public static final MiniClusterResource MINI_CLUSTER_RESOURCE =
new MiniClusterResource(
new MiniClusterResourceConfiguration.Builder()
.setConfiguration(getFlinkConfiguration())
.setNumberTaskManagers(NUMBER_OF_TMS)
.setNumberSlotsPerTaskManager(NUMBER_OF_SLOTS_PER_TM)
.build());
private static Configuration getFlinkConfiguration() {
final Configuration config = new Configuration();
config.set(RpcOptions.ASK_TIMEOUT_DURATION, TestingUtils.DEFAULT_ASK_TIMEOUT);
return config;
}
@Test
public void testNoSlotSharingAndBlockingResultSender() throws Exception {
// Sender with higher parallelism than available slots
JobGraph jobGraph = createTestJobGraph(JOB_NAME, PARALLELISM * 2, PARALLELISM);
submitJobGraphAndWait(jobGraph);
}
@Test
public void testNoSlotSharingAndBlockingResultReceiver() throws Exception {
// Receiver with higher parallelism than available slots
JobGraph jobGraph = createTestJobGraph(JOB_NAME, PARALLELISM, PARALLELISM * 2);
submitJobGraphAndWait(jobGraph);
}
@Test
public void testNoSlotSharingAndBlockingResultBoth() throws Exception {
// Both sender and receiver with higher parallelism than available slots
JobGraph jobGraph = createTestJobGraph(JOB_NAME, PARALLELISM * 2, PARALLELISM * 2);
submitJobGraphAndWait(jobGraph);
}
// ---------------------------------------------------------------------------------------------
private void submitJobGraphAndWait(final JobGraph jobGraph)
throws JobExecutionException, InterruptedException {
MINI_CLUSTER_RESOURCE.getMiniCluster().executeJobBlocking(jobGraph);
}
private JobGraph createTestJobGraph(
String jobName, int senderParallelism, int receiverParallelism) {
// The sender and receiver invokable logic ensure that each subtask gets the expected data
final JobVertex sender = new JobVertex("Sender");
sender.setInvokableClass(RoundRobinSubtaskIndexSender.class);
sender.getConfiguration()
.get(
getIntConfigOption(RoundRobinSubtaskIndexSender.CONFIG_KEY),
receiverParallelism);
sender.setParallelism(senderParallelism);
final JobVertex receiver = new JobVertex("Receiver");
receiver.setInvokableClass(SubtaskIndexReceiver.class);
receiver.getConfiguration()
.get(getIntConfigOption(SubtaskIndexReceiver.CONFIG_KEY), senderParallelism);
receiver.setParallelism(receiverParallelism);
connectNewDataSetAsInput(
receiver, sender, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);
return JobGraphBuilder.newBatchJobGraphBuilder()
.setJobName(jobName)
.addJobVertices(Arrays.asList(sender, receiver))
.build();
}
/** Sends the subtask index a configurable number of times in a round-robin fashion. */
public static | SlotCountExceedingParallelismTest |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/querydsl/query/SingleValueMatchQuery.java | {
"start": 14752,
"end": 14842
} | interface ____ {
boolean test(int doc) throws IOException;
}
}
| CheckedIntPredicate |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/appender/rolling/action/ZipCompressAction.java | {
"start": 1142,
"end": 4753
} | class ____ extends AbstractAction {
private static final int BUF_SIZE = 8192;
/**
* Source file.
*/
private final File source;
/**
* Destination file.
*/
private final File destination;
/**
* If true, attempts to delete file on completion.
*/
private final boolean deleteSource;
/**
* Compression level.
*/
private final int level;
/**
* Creates new instance of GzCompressAction.
*
* @param source file to compress, may not be null.
* @param destination compressed file, may not be null.
* @param deleteSource if true, attempt to delete file on completion. Failure to delete does not cause an exception
* to be thrown or affect return value.
* @param level TODO
*/
public ZipCompressAction(final File source, final File destination, final boolean deleteSource, final int level) {
Objects.requireNonNull(source, "source");
Objects.requireNonNull(destination, "destination");
this.source = source;
this.destination = destination;
this.deleteSource = deleteSource;
this.level = level;
}
/**
* Compresses.
*
* @return true if successfully compressed.
* @throws IOException on IO exception.
*/
@Override
public boolean execute() throws IOException {
return execute(source, destination, deleteSource, level);
}
/**
* Compresses a file.
*
* @param source file to compress, may not be null.
* @param destination compressed file, may not be null.
* @param deleteSource if true, attempt to delete file on completion. Failure to delete does not cause an exception
* to be thrown or affect return value.
* @param level the compression level
* @return true if source file compressed.
* @throws IOException on IO exception.
*/
public static boolean execute(
final File source, final File destination, final boolean deleteSource, final int level) throws IOException {
if (source.exists()) {
try (final FileInputStream fis = new FileInputStream(source);
final ZipOutputStream zos = new ZipOutputStream(new FileOutputStream(destination))) {
zos.setLevel(level);
final ZipEntry zipEntry = new ZipEntry(source.getName());
zos.putNextEntry(zipEntry);
final byte[] inbuf = new byte[BUF_SIZE];
int n;
while ((n = fis.read(inbuf)) != -1) {
zos.write(inbuf, 0, n);
}
}
if (deleteSource && !source.delete()) {
LOGGER.warn("Unable to delete " + source.toString() + '.');
}
return true;
}
return false;
}
/**
* Captures exception.
*
* @param ex exception.
*/
@Override
protected void reportException(final Exception ex) {
LOGGER.warn("Exception during compression of '" + source.toString() + "'.", ex);
}
@Override
public String toString() {
return ZipCompressAction.class.getSimpleName() + '[' + source + " to " + destination + ", level=" + level
+ ", deleteSource=" + deleteSource + ']';
}
public File getSource() {
return source;
}
public File getDestination() {
return destination;
}
public boolean isDeleteSource() {
return deleteSource;
}
public int getLevel() {
return level;
}
}
| ZipCompressAction |
java | google__guava | android/guava-tests/benchmark/com/google/common/primitives/UnsignedLongsBenchmark.java | {
"start": 912,
"end": 4462
} | class ____ {
private static final int ARRAY_SIZE = 0x10000;
private static final int ARRAY_MASK = 0x0ffff;
private static final Random randomSource = new Random(314159265358979L);
private static final long[] longs = new long[ARRAY_SIZE];
private static final long[] divisors = new long[ARRAY_SIZE];
private static final String[] decimalStrings = new String[ARRAY_SIZE];
private static final String[] binaryStrings = new String[ARRAY_SIZE];
private static final String[] hexStrings = new String[ARRAY_SIZE];
private static final String[] prefixedHexStrings = new String[ARRAY_SIZE];
@BeforeExperiment
void setUp() {
for (int i = 0; i < ARRAY_SIZE; i++) {
longs[i] = random();
divisors[i] = randomDivisor(longs[i]);
decimalStrings[i] = UnsignedLongs.toString(longs[i]);
binaryStrings[i] = UnsignedLongs.toString(longs[i], 2);
hexStrings[i] = UnsignedLongs.toString(longs[i], 16);
prefixedHexStrings[i] = "0x" + hexStrings[i];
}
}
@Benchmark
long divide(int reps) {
long tmp = 0;
for (int i = 0; i < reps; i++) {
int j = i & ARRAY_MASK;
tmp += UnsignedLongs.divide(longs[j], divisors[j]);
}
return tmp;
}
@Benchmark
long remainder(int reps) {
long tmp = 0;
for (int i = 0; i < reps; i++) {
int j = i & ARRAY_MASK;
tmp += UnsignedLongs.remainder(longs[j], divisors[j]);
}
return tmp;
}
@Benchmark
long parseUnsignedLong(int reps) {
long tmp = 0;
// Given that we make three calls per pass, we scale reps down in order
// to do a comparable amount of work to other measurements.
int scaledReps = reps / 3 + 1;
for (int i = 0; i < scaledReps; i++) {
int j = i & ARRAY_MASK;
tmp += UnsignedLongs.parseUnsignedLong(decimalStrings[j]);
tmp += UnsignedLongs.parseUnsignedLong(hexStrings[j], 16);
tmp += UnsignedLongs.parseUnsignedLong(binaryStrings[j], 2);
}
return tmp;
}
@Benchmark
long parseDecode10(int reps) {
long tmp = 0;
for (int i = 0; i < reps; i++) {
int j = i & ARRAY_MASK;
tmp += UnsignedLongs.decode(decimalStrings[j]);
}
return tmp;
}
@Benchmark
long parseDecode16(int reps) {
long tmp = 0;
for (int i = 0; i < reps; i++) {
int j = i & ARRAY_MASK;
tmp += UnsignedLongs.decode(prefixedHexStrings[j]);
}
return tmp;
}
@Benchmark
int toString(int reps) {
int tmp = 0;
// Given that we make three calls per pass, we scale reps down in order
// to do a comparable amount of work to other measurements.
int scaledReps = reps / 3 + 1;
for (int i = 0; i < scaledReps; i++) {
int j = i & ARRAY_MASK;
long x = longs[j];
tmp += UnsignedLongs.toString(x).length();
tmp += UnsignedLongs.toString(x, 16).length();
tmp += UnsignedLongs.toString(x, 2).length();
}
return tmp;
}
private static long random() {
return randomSource.nextLong();
}
// A random value that cannot be 0 and that is unsigned-less-than or equal
// to the given dividend, so that we don't have half of our divisions being
// trivial because the divisor is bigger than the dividend.
// Using remainder here does not give us a uniform distribution but it should
// not have a big impact on the measurement.
private static long randomDivisor(long dividend) {
long r = randomSource.nextLong();
if (dividend == -1) {
return r;
} else {
return UnsignedLongs.remainder(r, dividend + 1);
}
}
}
| UnsignedLongsBenchmark |
java | quarkusio__quarkus | independent-projects/arc/processor/src/test/java/io/quarkus/arc/processor/types/Foo.java | {
"start": 273,
"end": 518
} | class ____ extends AbstractList<String> {
@PreDestroy
void superCoolDestroyCallback() {
}
@Override
public String get(int index) {
return null;
}
@Override
public int size() {
return 0;
}
}
| Foo |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/serializer/filters/AfterFilterTest3.java | {
"start": 204,
"end": 1050
} | class ____ extends TestCase {
public void test_afterFilter() throws Exception {
AfterFilter filter = new AfterFilter() {
@Override
public void writeAfter(Object object) {
this.writeKeyValue("id", 123);
}
};
Assert.assertEquals(JSON.toJSONString(new VO(), filter), "{\"value\":1001,\"id\":123}");
}
public void test_afterFilter2() throws Exception {
AfterFilter filter = new AfterFilter() {
@Override
public void writeAfter(Object object) {
this.writeKeyValue("id", 123);
this.writeKeyValue("name", "wenshao");
}
};
Assert.assertEquals(JSON.toJSONString(new VO(), filter), "{\"value\":1001,\"id\":123,\"name\":\"wenshao\"}");
}
public static | AfterFilterTest3 |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client-jackson/deployment/src/test/java/io/quarkus/rest/client/reactive/jackson/test/MultiSseTest.java | {
"start": 10039,
"end": 10434
} | class ____ implements Predicate<SseEvent<String>> {
@Override
public boolean test(SseEvent<String> event) {
if ("heartbeat".equals(event.id())) {
return false;
}
if ("END".equals(event.data())) {
return false;
}
return true;
}
}
@Path("/sse")
public static | CustomFilter |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/bind/MissingMatrixVariableException.java | {
"start": 1071,
"end": 2766
} | class ____ extends MissingRequestValueException {
private final String variableName;
private final MethodParameter parameter;
/**
* Constructor for MissingMatrixVariableException.
* @param variableName the name of the missing matrix variable
* @param parameter the method parameter
*/
public MissingMatrixVariableException(String variableName, MethodParameter parameter) {
this(variableName, parameter, false);
}
/**
* Constructor for use when a value was present but converted to {@code null}.
* @param variableName the name of the missing matrix variable
* @param parameter the method parameter
* @param missingAfterConversion whether the value became null after conversion
* @since 5.3.6
*/
public MissingMatrixVariableException(
String variableName, MethodParameter parameter, boolean missingAfterConversion) {
super("", missingAfterConversion, null, new Object[] {variableName});
this.variableName = variableName;
this.parameter = parameter;
getBody().setDetail("Required path parameter '" + this.variableName + "' is not present.");
}
@Override
public String getMessage() {
return "Required matrix variable '" + this.variableName + "' for method parameter type " +
this.parameter.getNestedParameterType().getSimpleName() + " is " +
(isMissingAfterConversion() ? "present but converted to null" : "not present");
}
/**
* Return the expected name of the matrix variable.
*/
public final String getVariableName() {
return this.variableName;
}
/**
* Return the method parameter bound to the matrix variable.
*/
public final MethodParameter getParameter() {
return this.parameter;
}
}
| MissingMatrixVariableException |
java | apache__kafka | storage/src/test/java/org/apache/kafka/admin/RemoteTopicCrudTest.java | {
"start": 3964,
"end": 32532
} | class ____ {
private final ClusterInstance cluster;
private final int numPartitions = 2;
private final short numReplicationFactor = 2;
private String testTopicName;
public RemoteTopicCrudTest(ClusterInstance cluster) {
this.cluster = cluster;
}
@BeforeEach
void setUp(TestInfo info) {
var methodName = info.getTestMethod().orElseThrow().getName();
testTopicName = methodName + "-" + TestUtils.randomString(3);
}
@ClusterTest
void testCreateRemoteTopicWithValidRetentionTime() {
try (var admin = cluster.admin()) {
var topicConfig = Map.of(
TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true",
TopicConfig.RETENTION_MS_CONFIG, "60000",
TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "30000"
);
admin.createTopics(List.of(new NewTopic(testTopicName, numPartitions, numReplicationFactor).configs(topicConfig)));
}
}
@ClusterTest
void testCreateRemoteTopicWithValidRetentionSize() throws Exception {
var topicConfig = Map.of(
TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true",
TopicConfig.RETENTION_BYTES_CONFIG, "512",
TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "256"
);
try (var admin = cluster.admin()) {
admin.createTopics(List.of(new NewTopic(testTopicName, numPartitions, numReplicationFactor).configs(topicConfig)));
}
verifyRemoteLogTopicConfigs(topicConfig);
}
@ClusterTest
void testCreateRemoteTopicWithInheritedLocalRetentionTime() throws Exception {
// inherited local retention ms is 1000
var topicConfig = Map.of(
TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true",
TopicConfig.RETENTION_MS_CONFIG, "1001"
);
try (var admin = cluster.admin()) {
admin.createTopics(List.of(new NewTopic(testTopicName, numPartitions, numReplicationFactor).configs(topicConfig)));
}
verifyRemoteLogTopicConfigs(topicConfig);
}
@ClusterTest
void testCreateRemoteTopicWithInheritedLocalRetentionSize() throws Exception {
// inherited local retention bytes is 1024
var topicConfig = Map.of(
TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true",
TopicConfig.RETENTION_BYTES_CONFIG, "1025"
);
try (var admin = cluster.admin()) {
admin.createTopics(List.of(new NewTopic(testTopicName, numPartitions, numReplicationFactor).configs(topicConfig)));
}
verifyRemoteLogTopicConfigs(topicConfig);
}
@ClusterTest
void testCreateRemoteTopicWithInvalidRetentionTime() {
// inherited local retention ms is 1000
var topicConfig = Map.of(
TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true",
TopicConfig.RETENTION_MS_CONFIG, "200"
);
try (var admin = cluster.admin()) {
assertFutureThrows(InvalidConfigurationException.class, admin.createTopics(List.of(new NewTopic(testTopicName, numPartitions, numReplicationFactor).configs(topicConfig))).all());
}
}
@ClusterTest
void testCreateRemoteTopicWithInvalidRetentionSize() {
// inherited local retention bytes is 1024
var topicConfig = Map.of(
TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true",
TopicConfig.RETENTION_BYTES_CONFIG, "512"
);
try (var admin = cluster.admin()) {
assertFutureThrows(InvalidConfigurationException.class, admin.createTopics(List.of(new NewTopic(testTopicName, numPartitions, numReplicationFactor).configs(topicConfig))).all());
}
}
@ClusterTest
void testCreateCompactedRemoteStorage() {
var topicConfig = Map.of(
TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true",
TopicConfig.CLEANUP_POLICY_CONFIG, "compact"
);
try (var admin = cluster.admin()) {
assertFutureThrows(InvalidConfigurationException.class, admin.createTopics(List.of(new NewTopic(testTopicName, numPartitions, numReplicationFactor).configs(topicConfig))).all());
}
}
@ClusterTests({
@ClusterTest(serverProperties = {
@ClusterConfigProperty(key = TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, value = "true"),
@ClusterConfigProperty(key = TopicConfig.REMOTE_LOG_DELETE_ON_DISABLE_CONFIG, value = "true")
}),
@ClusterTest(serverProperties = {
@ClusterConfigProperty(key = TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, value = "true"),
@ClusterConfigProperty(key = TopicConfig.REMOTE_LOG_DELETE_ON_DISABLE_CONFIG, value = "false")
}),
@ClusterTest(serverProperties = {
@ClusterConfigProperty(key = TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, value = "false"),
@ClusterConfigProperty(key = TopicConfig.REMOTE_LOG_DELETE_ON_DISABLE_CONFIG, value = "true")
}),
@ClusterTest(serverProperties = {
@ClusterConfigProperty(key = TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, value = "false"),
@ClusterConfigProperty(key = TopicConfig.REMOTE_LOG_DELETE_ON_DISABLE_CONFIG, value = "false")
})
})
void testCreateRemoteTopicWithCopyDisabledAndDeleteOnDisable() throws Exception {
var topicConfig = Map.of(
TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, cluster.config().serverProperties().get(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG),
TopicConfig.REMOTE_LOG_DELETE_ON_DISABLE_CONFIG, cluster.config().serverProperties().get(TopicConfig.REMOTE_LOG_DELETE_ON_DISABLE_CONFIG)
);
try (var admin = cluster.admin()) {
var result = admin.createTopics(List.of(new NewTopic(testTopicName, numPartitions, numReplicationFactor).configs(topicConfig)));
assertDoesNotThrow(() -> result.all().get(30, TimeUnit.SECONDS));
}
verifyRemoteLogTopicConfigs(topicConfig);
}
@ClusterTest
void testCreateTopicRetentionMsValidationWithRemoteCopyDisabled() throws Exception {
var testTopicName2 = testTopicName + "2";
var testTopicName3 = testTopicName + "3";
var errorMsgMs = "When `remote.log.copy.disable` is set to true, the `local.retention.ms` and `retention.ms` " +
"must be set to the identical value because there will be no more logs copied to the remote storage.";
// 1. create a topic with `remote.log.copy.disable=true` and have different local.retention.ms and retention.ms value,
// it should fail to create the topic
Map<String, String> topicConfig = new HashMap<>();
topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true");
topicConfig.put(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, "true");
topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "100");
topicConfig.put(TopicConfig.RETENTION_MS_CONFIG, "1000");
topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "-2");
try (var admin = cluster.admin()) {
// Test that creating topic with invalid config fails with appropriate error message
var err = assertFutureThrows(InvalidConfigurationException.class, admin.createTopics(List.of(new NewTopic(testTopicName, numPartitions, numReplicationFactor).configs(topicConfig))).all());
assertEquals(errorMsgMs, Objects.requireNonNull(err).getMessage());
// 2. change the local.retention.ms value to the same value as retention.ms should successfully create the topic
topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "1000");
admin.createTopics(List.of(new NewTopic(testTopicName, numPartitions, numReplicationFactor).configs(topicConfig))).all().get();
// 3. change the local.retention.ms value to "-2" should also successfully create the topic
topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "-2");
admin.createTopics(List.of(new NewTopic(testTopicName2, numPartitions, numReplicationFactor).configs(topicConfig))).values().get(testTopicName2).get();
// 4. create a topic with `remote.log.copy.disable=false` and have different local.retention.ms and retention.ms value,
// it should successfully creates the topic.
topicConfig.clear();
topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true");
topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "100");
topicConfig.put(TopicConfig.RETENTION_MS_CONFIG, "1000");
topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "-2");
admin.createTopics(List.of(new NewTopic(testTopicName3, numPartitions, numReplicationFactor).configs(topicConfig))).values().get(testTopicName3).get();
// 5. alter the config to `remote.log.copy.disable=true`, it should fail the config change
var configs = new HashMap<ConfigResource, Collection<AlterConfigOp>>();
configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName3),
List.of(
new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, "true"),
AlterConfigOp.OpType.SET)
));
var err2 = assertFutureThrows(InvalidConfigurationException.class, admin.incrementalAlterConfigs(configs).all());
assertEquals(errorMsgMs, Objects.requireNonNull(err2).getMessage());
// 6. alter the config to `remote.log.copy.disable=true` and local.retention.ms == retention.ms, it should work without error
configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName3),
List.of(
new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, "true"),
AlterConfigOp.OpType.SET),
new AlterConfigOp(new ConfigEntry(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "1000"),
AlterConfigOp.OpType.SET)
));
admin.incrementalAlterConfigs(configs).all().get();
}
}
@ClusterTest
void testCreateTopicRetentionBytesValidationWithRemoteCopyDisabled() throws Exception {
var testTopicName2 = testTopicName + "2";
var testTopicName3 = testTopicName + "3";
var errorMsgBytes = "When `remote.log.copy.disable` is set to true, the `local.retention.bytes` and `retention.bytes` " +
"must be set to the identical value because there will be no more logs copied to the remote storage.";
// 1. create a topic with `remote.log.copy.disable=true` and have different local.retention.bytes and retention.bytes value,
// it should fail to create the topic
Map<String, String> topicConfig = new HashMap<>();
topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true");
topicConfig.put(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, "true");
topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "100");
topicConfig.put(TopicConfig.RETENTION_BYTES_CONFIG, "1000");
topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "-2");
try (var admin = cluster.admin()) {
var err = assertFutureThrows(InvalidConfigurationException.class, admin.createTopics(List.of(new NewTopic(testTopicName, numPartitions, numReplicationFactor).configs(topicConfig))).all());
assertEquals(errorMsgBytes, Objects.requireNonNull(err).getMessage());
// 2. change the local.retention.bytes value to the same value as retention.bytes should successfully create the topic
topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "1000");
admin.createTopics(List.of(new NewTopic(testTopicName, numPartitions, numReplicationFactor).configs(topicConfig))).all().get();
// 3. change the local.retention.bytes value to "-2" should also successfully create the topic
topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "-2");
admin.createTopics(List.of(new NewTopic(testTopicName2, numPartitions, numReplicationFactor).configs(topicConfig))).values().get(testTopicName2).get();
// 4. create a topic with `remote.log.copy.disable=false` and have different local.retention.bytes and retention.bytes value,
// it should successfully creates the topic.
topicConfig.clear();
topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true");
topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "100");
topicConfig.put(TopicConfig.RETENTION_BYTES_CONFIG, "1000");
topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "-2");
admin.createTopics(List.of(new NewTopic(testTopicName3, numPartitions, numReplicationFactor).configs(topicConfig))).values().get(testTopicName3).get();
// 5. alter the config to `remote.log.copy.disable=true`, it should fail the config change
var configs = new HashMap<ConfigResource, Collection<AlterConfigOp>>();
configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName3),
List.of(
new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, "true"),
AlterConfigOp.OpType.SET)
));
var err2 = assertFutureThrows(InvalidConfigurationException.class, admin.incrementalAlterConfigs(configs).all());
assertEquals(errorMsgBytes, Objects.requireNonNull(err2).getMessage());
// 6. alter the config to `remote.log.copy.disable=true` and local.retention.bytes == retention.bytes, it should work without error
configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName3),
List.of(
new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, "true"),
AlterConfigOp.OpType.SET),
new AlterConfigOp(new ConfigEntry(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "1000"),
AlterConfigOp.OpType.SET)
));
admin.incrementalAlterConfigs(configs).all().get();
}
}
@ClusterTest
void testEnableRemoteLogOnExistingTopic() throws Exception {
try (var admin = cluster.admin()) {
admin.createTopics(List.of(new NewTopic(testTopicName, numPartitions, numReplicationFactor).configs(Map.of()))).all().get();
var configs = new HashMap<ConfigResource, Collection<AlterConfigOp>>();
configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName),
Set.of(new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true"), AlterConfigOp.OpType.SET))
);
admin.incrementalAlterConfigs(configs).all().get();
verifyRemoteLogTopicConfigs(Map.of());
}
}
@ClusterTest(serverProperties = {
@ClusterConfigProperty(key = RemoteLogManagerConfig.REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP, value = "false")
})
void testEnableRemoteLogWhenSystemRemoteStorageIsDisabled() throws ExecutionException, InterruptedException {
try (var admin = cluster.admin()) {
var topicConfig = Map.of(
TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true"
);
var error = assertFutureThrows(InvalidConfigurationException.class, admin.createTopics(List.of(new NewTopic(testTopicName, numPartitions, numReplicationFactor).configs(topicConfig))).all());
assertTrue(Objects.requireNonNull(error).getMessage().contains("Tiered Storage functionality is disabled in the broker"));
admin.createTopics(List.of(new NewTopic(testTopicName, numPartitions, numReplicationFactor))).all().get();
var configs = new HashMap<ConfigResource, Collection<AlterConfigOp>>();
configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName),
Set.of(
new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true"),
AlterConfigOp.OpType.SET))
);
var error2 = assertFutureThrows(InvalidConfigurationException.class, admin.incrementalAlterConfigs(configs).all());
assertTrue(Objects.requireNonNull(error2).getMessage().contains("Tiered Storage functionality is disabled in the broker"));
}
}
@ClusterTest
void testUpdateTopicConfigWithValidRetentionTime() throws Exception {
try (var admin = cluster.admin()) {
var topicConfig = Map.of(
TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true"
);
admin.createTopics(List.of(new NewTopic(testTopicName, numPartitions, numReplicationFactor)
.configs(topicConfig))).all().get();
var configs = new HashMap<ConfigResource, Collection<AlterConfigOp>>();
configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName),
List.of(
new AlterConfigOp(new ConfigEntry(TopicConfig.RETENTION_MS_CONFIG, "200"),
AlterConfigOp.OpType.SET),
new AlterConfigOp(new ConfigEntry(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "100"),
AlterConfigOp.OpType.SET)
));
admin.incrementalAlterConfigs(configs).all().get();
verifyRemoteLogTopicConfigs(topicConfig);
}
}
@ClusterTest
void testUpdateTopicConfigWithValidRetentionSize() throws Exception {
try (var admin = cluster.admin()) {
var topicConfig = Map.of(
TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true"
);
admin.createTopics(List.of(new NewTopic(testTopicName, numPartitions, numReplicationFactor)
.configs(topicConfig))).all().get();
var configs = new HashMap<ConfigResource, Collection<AlterConfigOp>>();
configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName),
List.of(
new AlterConfigOp(new ConfigEntry(TopicConfig.RETENTION_BYTES_CONFIG, "200"),
AlterConfigOp.OpType.SET),
new AlterConfigOp(new ConfigEntry(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "100"),
AlterConfigOp.OpType.SET)
));
admin.incrementalAlterConfigs(configs).all().get();
verifyRemoteLogTopicConfigs(topicConfig);
}
}
@ClusterTest
void testUpdateTopicConfigWithInheritedLocalRetentionTime() throws Exception {
try (var admin = cluster.admin()) {
var topicConfig = Map.of(
TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true"
);
admin.createTopics(List.of(new NewTopic(testTopicName, numPartitions, numReplicationFactor)
.configs(topicConfig))).all().get();
// inherited local retention ms is 1000
var configs = new HashMap<ConfigResource, Collection<AlterConfigOp>>();
configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName),
List.of(
new AlterConfigOp(new ConfigEntry(TopicConfig.RETENTION_MS_CONFIG, "200"),
AlterConfigOp.OpType.SET)
));
assertFutureThrows(InvalidConfigurationException.class, admin.incrementalAlterConfigs(configs).all());
}
}
@ClusterTest
void testUpdateTopicConfigWithInheritedLocalRetentionSize() throws Exception {
try (var admin = cluster.admin()) {
var topicConfig = Map.of(
TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true"
);
admin.createTopics(List.of(new NewTopic(testTopicName, numPartitions, numReplicationFactor)
.configs(topicConfig))).all().get();
// inherited local retention bytes is 1024
var configs = new HashMap<ConfigResource, Collection<AlterConfigOp>>();
configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName),
List.of(
new AlterConfigOp(new ConfigEntry(TopicConfig.RETENTION_BYTES_CONFIG, "512"),
AlterConfigOp.OpType.SET)
));
assertFutureThrows(InvalidConfigurationException.class, admin.incrementalAlterConfigs(configs).all(), "Invalid value 1024 for configuration local.retention.bytes: Value must not be more than retention.bytes property value: 512");
}
}
@ClusterTest
void testUpdateTopicConfigWithDisablingRemoteStorage() throws Exception {
try (var admin = cluster.admin()) {
var topicConfig = Map.of(
TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true"
);
admin.createTopics(List.of(new NewTopic(testTopicName, numPartitions, numReplicationFactor)
.configs(topicConfig))).all().get();
var configs = new HashMap<ConfigResource, Collection<AlterConfigOp>>();
configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName),
List.of(
new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "false"),
AlterConfigOp.OpType.SET)
));
assertFutureThrows(InvalidConfigurationException.class,
admin.incrementalAlterConfigs(configs).all(),
"It is invalid to disable remote storage without deleting remote data. " +
"If you want to keep the remote data and turn to read only, please set `remote.storage.enable=true,remote.log.copy.disable=true`. " +
"If you want to disable remote storage and delete all remote data, please set `remote.storage.enable=false,remote.log.delete.on.disable=true`."
);
}
}
@ClusterTest
void testUpdateTopicConfigWithDisablingRemoteStorageWithDeleteOnDisable() throws Exception {
try (var admin = cluster.admin()) {
var topicConfig = Map.of(
TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true"
);
admin.createTopics(List.of(new NewTopic(testTopicName, numPartitions, numReplicationFactor)
.configs(topicConfig))).all().get();
var configs = new HashMap<ConfigResource, Collection<AlterConfigOp>>();
configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName),
List.of(
new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "false"),
AlterConfigOp.OpType.SET),
new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_DELETE_ON_DISABLE_CONFIG, "true"),
AlterConfigOp.OpType.SET)
));
admin.incrementalAlterConfigs(configs).all().get();
var newProps = new HashMap<String, String>();
for (AlterConfigOp op : configs.get(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName))) {
newProps.put(op.configEntry().name(), op.configEntry().value());
}
verifyRemoteLogTopicConfigs(newProps);
}
}
@ClusterTest(
serverProperties = {
@ClusterConfigProperty(key = RemoteLogManagerConfig.REMOTE_STORAGE_MANAGER_CLASS_NAME_PROP, value = "org.apache.kafka.admin.RemoteTopicCrudTest$MyRemoteStorageManager"),
@ClusterConfigProperty(key = RemoteLogManagerConfig.REMOTE_LOG_METADATA_MANAGER_CLASS_NAME_PROP, value = "org.apache.kafka.admin.RemoteTopicCrudTest$MyRemoteLogMetadataManager")
}
)
void testTopicDeletion() throws Exception {
try (var admin = cluster.admin()) {
MyRemoteStorageManager.DELETE_SEGMENT_EVENT_COUNTER.set(0);
var topicConfig = Map.of(
TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true",
TopicConfig.RETENTION_MS_CONFIG, "200",
TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "100"
);
admin.createTopics(List.of(new NewTopic(testTopicName, numPartitions, numReplicationFactor)
.configs(topicConfig))).all().get();
admin.deleteTopics(List.of(testTopicName)).all().get();
TestUtils.waitForCondition(() -> {
assertFutureThrows(UnknownTopicOrPartitionException.class, admin.describeTopics(List.of(testTopicName)).allTopicNames());
return true;
}, "Topic should be deleted");
TestUtils.waitForCondition(() ->
numPartitions * MyRemoteLogMetadataManager.SEGMENT_COUNT_PER_PARTITION == MyRemoteStorageManager.DELETE_SEGMENT_EVENT_COUNTER.get(),
"Remote log segments should be deleted only once by the leader");
}
}
private void verifyRemoteLogTopicConfigs(Map<String, String> topicConfig) throws Exception {
TestCondition condition = () -> {
var logBuffer = cluster.brokers().values()
.stream()
.map(broker -> broker.logManager().getLog(new TopicPartition(testTopicName, 0), false))
.map(OptionConverters::toJava)
.flatMap(Optional::stream)
.toList();
var result = !logBuffer.isEmpty();
if (result) {
if (topicConfig.containsKey(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG)) {
result = Boolean.parseBoolean(
topicConfig.get(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG)) == logBuffer.get(0).config().remoteStorageEnable();
}
if (topicConfig.containsKey(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG)) {
result = result
&& Long.parseLong(
topicConfig.get(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG))
== logBuffer.get(0).config().localRetentionBytes();
}
if (topicConfig.containsKey(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG)) {
result = result
&& Long.parseLong(
topicConfig.get(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG))
== logBuffer.get(0).config().localRetentionMs();
}
if (topicConfig.containsKey(TopicConfig.RETENTION_MS_CONFIG)) {
result = result
&& Long.parseLong(
topicConfig.get(TopicConfig.RETENTION_MS_CONFIG))
== logBuffer.get(0).config().retentionMs;
}
if (topicConfig.containsKey(TopicConfig.RETENTION_BYTES_CONFIG)) {
result = result
&& Long.parseLong(
topicConfig.get(TopicConfig.RETENTION_BYTES_CONFIG))
== logBuffer.get(0).config().retentionSize;
}
if (topicConfig.containsKey(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG)) {
result = result
&& Boolean.parseBoolean(
topicConfig.get(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG))
== logBuffer.get(0).config().remoteLogCopyDisable();
}
if (topicConfig.containsKey(TopicConfig.REMOTE_LOG_DELETE_ON_DISABLE_CONFIG)) {
result = result
&& Boolean.parseBoolean(
topicConfig.get(TopicConfig.REMOTE_LOG_DELETE_ON_DISABLE_CONFIG))
== logBuffer.get(0).config().remoteLogDeleteOnDisable();
}
}
return result;
};
TestUtils.waitForCondition(condition, "Failed to update topic config $topicConfig" + topicConfig);
}
public static | RemoteTopicCrudTest |
java | apache__spark | common/network-common/src/main/java/org/apache/spark/network/util/LevelDBProvider.java | {
"start": 3947,
"end": 5205
} | class ____ implements org.iq80.leveldb.Logger {
private static final SparkLogger LOG = SparkLoggerFactory.getLogger(LevelDBLogger.class);
@Override
public void log(String message) {
LOG.info(message);
}
}
/**
* Simple major.minor versioning scheme. Any incompatible changes should be across major
* versions. Minor version differences are allowed -- meaning we should be able to read
* dbs that are either earlier *or* later on the minor version.
*/
public static void checkVersion(DB db, StoreVersion newversion, ObjectMapper mapper) throws
IOException {
byte[] bytes = db.get(StoreVersion.KEY);
if (bytes == null) {
storeVersion(db, newversion, mapper);
} else {
StoreVersion version = mapper.readValue(bytes, StoreVersion.class);
if (version.major != newversion.major) {
throw new IOException("cannot read state DB with version " + version + ", incompatible " +
"with current version " + newversion);
}
storeVersion(db, newversion, mapper);
}
}
public static void storeVersion(DB db, StoreVersion version, ObjectMapper mapper)
throws IOException {
db.put(StoreVersion.KEY, mapper.writeValueAsBytes(version));
}
}
| LevelDBLogger |
java | apache__kafka | streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/KafkaStreamsCloseOptionsIntegrationTest.java | {
"start": 2788,
"end": 8537
} | class ____ {
private static MockTime mockTime;
protected static final String INPUT_TOPIC = "inputTopic";
protected static final String OUTPUT_TOPIC = "outputTopic";
protected Properties streamsConfig;
protected static KafkaStreams streams;
protected static Admin adminClient;
protected Properties commonClientConfig;
private Properties producerConfig;
protected Properties resultConsumerConfig;
private final File testFolder = TestUtils.tempDirectory();
public static final EmbeddedKafkaCluster CLUSTER;
static {
final Properties brokerProps = new Properties();
brokerProps.setProperty(GroupCoordinatorConfig.GROUP_MAX_SESSION_TIMEOUT_MS_CONFIG, Integer.toString(Integer.MAX_VALUE));
CLUSTER = new EmbeddedKafkaCluster(1, brokerProps);
}
@BeforeAll
public static void startCluster() throws IOException {
CLUSTER.start();
}
@AfterAll
public static void closeCluster() {
Utils.closeQuietly(adminClient, "admin");
CLUSTER.stop();
}
@BeforeEach
public void before(final TestInfo testName) throws Exception {
mockTime = CLUSTER.time;
final String appID = safeUniqueTestName(testName);
commonClientConfig = new Properties();
commonClientConfig.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
streamsConfig = new Properties();
streamsConfig.put(StreamsConfig.APPLICATION_ID_CONFIG, appID);
streamsConfig.put(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, "someGroupInstance");
streamsConfig.put(StreamsConfig.STATE_DIR_CONFIG, testFolder.getPath());
streamsConfig.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.Long().getClass());
streamsConfig.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
streamsConfig.put(StreamsConfig.STATESTORE_CACHE_MAX_BYTES_CONFIG, 0);
streamsConfig.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
streamsConfig.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 100);
streamsConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
// In this test, we set the SESSION_TIMEOUT_MS_CONFIG high in order to show that the call to
// `close(CloseOptions)` can remove the application from the Consumder Groups successfully.
streamsConfig.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, Integer.MAX_VALUE);
streamsConfig.putAll(commonClientConfig);
producerConfig = new Properties();
producerConfig.put(ProducerConfig.ACKS_CONFIG, "all");
producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, LongSerializer.class);
producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
producerConfig.putAll(commonClientConfig);
resultConsumerConfig = new Properties();
resultConsumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, appID + "-result-consumer");
resultConsumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
resultConsumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class);
resultConsumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
resultConsumerConfig.putAll(commonClientConfig);
if (adminClient == null) {
adminClient = Admin.create(commonClientConfig);
}
CLUSTER.deleteAllTopics();
CLUSTER.createTopic(INPUT_TOPIC, 2, 1);
CLUSTER.createTopic(OUTPUT_TOPIC, 2, 1);
add10InputElements();
}
@AfterEach
public void after() throws Exception {
if (streams != null) {
streams.close(Duration.ofSeconds(30));
}
Utils.delete(testFolder);
}
@Test
public void testCloseOptions() throws Exception {
// Test with two threads to show that each of the threads is being called to remove clients from the CG.
streamsConfig.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 2);
streams = new KafkaStreams(setupTopologyWithoutIntermediateUserTopic(), streamsConfig);
IntegrationTestUtils.startApplicationAndWaitUntilRunning(streams);
IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(resultConsumerConfig, OUTPUT_TOPIC, 10);
streams.close(CloseOptions.groupMembershipOperation(CloseOptions.GroupMembershipOperation.LEAVE_GROUP).withTimeout(Duration.ofSeconds(30)));
waitForEmptyConsumerGroup(adminClient, streamsConfig.getProperty(StreamsConfig.APPLICATION_ID_CONFIG), 0);
}
protected Topology setupTopologyWithoutIntermediateUserTopic() {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<Long, String> input = builder.stream(INPUT_TOPIC);
input.to(OUTPUT_TOPIC, Produced.with(Serdes.Long(), Serdes.String()));
return builder.build();
}
private void add10InputElements() {
final List<KeyValue<Long, String>> records = Arrays.asList(KeyValue.pair(0L, "aaa"),
KeyValue.pair(1L, "bbb"),
KeyValue.pair(0L, "ccc"),
KeyValue.pair(1L, "ddd"),
KeyValue.pair(0L, "eee"),
KeyValue.pair(1L, "fff"),
KeyValue.pair(0L, "ggg"),
KeyValue.pair(1L, "hhh"),
KeyValue.pair(0L, "iii"),
KeyValue.pair(1L, "jjj"));
for (final KeyValue<Long, String> record : records) {
mockTime.sleep(10);
IntegrationTestUtils.produceKeyValuesSynchronouslyWithTimestamp(INPUT_TOPIC, Collections.singleton(record), producerConfig, mockTime.milliseconds());
}
}
}
| KafkaStreamsCloseOptionsIntegrationTest |
java | spring-projects__spring-boot | build-plugin/spring-boot-gradle-plugin/src/dockerTest/java/org/springframework/boot/gradle/tasks/bundling/BootBuildImageIntegrationTests.java | {
"start": 26963,
"end": 32294
} | class ____ {");
writer.println();
writer.println(" public static void main(String[] args) throws Exception {");
writer.println(" System.out.println(\"Launched\");");
writer.println(" synchronized(args) {");
writer.println(" args.wait(); // Prevent exit");
writer.println(" }");
writer.println(" }");
writer.println();
writer.println("}");
}
}
private void writeLongNameResource() throws IOException {
StringBuilder name = new StringBuilder();
new Random().ints('a', 'z' + 1).limit(128).forEach((i) -> name.append((char) i));
Path path = this.gradleBuild.getProjectDir()
.toPath()
.resolve(Paths.get("src", "main", "resources", name.toString()));
Files.createDirectories(path.getParent());
Files.createFile(path);
}
private void writeBuildpackContent() throws IOException {
FileAttribute<Set<PosixFilePermission>> dirAttribute = PosixFilePermissions
.asFileAttribute(PosixFilePermissions.fromString("rwxr-xr-x"));
FileAttribute<Set<PosixFilePermission>> execFileAttribute = PosixFilePermissions
.asFileAttribute(PosixFilePermissions.fromString("rwxrwxrwx"));
File buildpackDir = new File(this.gradleBuild.getProjectDir(), "buildpack/hello-world");
Files.createDirectories(buildpackDir.toPath(), dirAttribute);
File binDir = new File(buildpackDir, "bin");
Files.createDirectories(binDir.toPath(), dirAttribute);
File descriptor = new File(buildpackDir, "buildpack.toml");
try (PrintWriter writer = new PrintWriter(new FileWriter(descriptor))) {
writer.println("api = \"0.10\"");
writer.println("[buildpack]");
writer.println("id = \"example/hello-world\"");
writer.println("version = \"0.0.1\"");
writer.println("name = \"Hello World Buildpack\"");
writer.println("homepage = \"https://github.com/buildpacks/samples/tree/main/buildpacks/hello-world\"");
writer.println("[[stacks]]\n");
writer.println("id = \"*\"");
}
File detect = Files.createFile(Paths.get(binDir.getAbsolutePath(), "detect"), execFileAttribute).toFile();
try (PrintWriter writer = new PrintWriter(new FileWriter(detect))) {
writer.println("#!/usr/bin/env bash");
writer.println("set -eo pipefail");
writer.println("exit 0");
}
File build = Files.createFile(Paths.get(binDir.getAbsolutePath(), "build"), execFileAttribute).toFile();
try (PrintWriter writer = new PrintWriter(new FileWriter(build))) {
writer.println("#!/usr/bin/env bash");
writer.println("set -eo pipefail");
writer.println("echo \"---> Hello World buildpack\"");
writer.println("echo \"---> done\"");
writer.println("exit 0");
}
}
private void tarGzipBuildpackContent() throws IOException {
Path tarGzipPath = Paths.get(this.gradleBuild.getProjectDir().getAbsolutePath(), "hello-world.tgz");
try (TarArchiveOutputStream tar = new TarArchiveOutputStream(
new GzipCompressorOutputStream(Files.newOutputStream(Files.createFile(tarGzipPath))))) {
File buildpackDir = new File(this.gradleBuild.getProjectDir(), "buildpack/hello-world");
writeDirectoryToTar(tar, buildpackDir, buildpackDir.getAbsolutePath());
}
}
private void writeDirectoryToTar(TarArchiveOutputStream tar, File dir, String baseDirPath) throws IOException {
for (File file : dir.listFiles()) {
String name = file.getAbsolutePath().replace(baseDirPath, "");
int mode = FilePermissions.umaskForPath(file.toPath());
if (file.isDirectory()) {
writeTarEntry(tar, name + "/", mode);
writeDirectoryToTar(tar, file, baseDirPath);
}
else {
writeTarEntry(tar, file, name, mode);
}
}
}
private void writeTarEntry(TarArchiveOutputStream tar, String name, int mode) throws IOException {
TarArchiveEntry entry = new TarArchiveEntry(name);
entry.setMode(mode);
tar.putArchiveEntry(entry);
tar.closeArchiveEntry();
}
private void writeTarEntry(TarArchiveOutputStream tar, File file, String name, int mode) throws IOException {
TarArchiveEntry entry = new TarArchiveEntry(file, name);
entry.setMode(mode);
tar.putArchiveEntry(entry);
StreamUtils.copy(Files.newInputStream(file.toPath()), tar);
tar.closeArchiveEntry();
}
private void writeCertificateBindingFiles() throws IOException {
File bindingDir = new File(this.gradleBuild.getProjectDir(), "bindings/ca-certificates");
bindingDir.mkdirs();
File type = new File(bindingDir, "type");
try (PrintWriter writer = new PrintWriter(new FileWriter(type))) {
writer.print("ca-certificates");
}
File cert1 = new File(bindingDir, "test1.crt");
try (PrintWriter writer = new PrintWriter(new FileWriter(cert1))) {
writer.println("---certificate one---");
}
File cert2 = new File(bindingDir, "test2.crt");
try (PrintWriter writer = new PrintWriter(new FileWriter(cert2))) {
writer.println("---certificate two---");
}
}
private void removeImages(String... names) throws IOException {
ImageApi imageApi = new DockerApi().image();
for (String name : names) {
try {
imageApi.remove(ImageReference.of(name), false);
}
catch (DockerEngineException ex) {
// ignore image remove failures
}
}
}
private void deleteVolumes(String... names) throws IOException {
VolumeApi volumeApi = new DockerApi().volume();
for (String name : names) {
volumeApi.delete(VolumeName.of(name), false);
}
}
}
| Main |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java | {
"start": 7711,
"end": 8889
} | class ____ extends MockEngineFactoryPlugin {
public static final Setting<Double> EXCEPTION_TOP_LEVEL_RATIO_SETTING = Setting.doubleSetting(
EXCEPTION_TOP_LEVEL_RATIO_KEY,
0.1d,
0.0d,
Property.IndexScope
);
public static final Setting<Double> EXCEPTION_LOW_LEVEL_RATIO_SETTING = Setting.doubleSetting(
EXCEPTION_LOW_LEVEL_RATIO_KEY,
0.1d,
0.0d,
Property.IndexScope
);
@Override
public List<Setting<?>> getSettings() {
List<Setting<?>> settings = new ArrayList<>();
settings.addAll(super.getSettings());
settings.add(EXCEPTION_TOP_LEVEL_RATIO_SETTING);
settings.add(EXCEPTION_LOW_LEVEL_RATIO_SETTING);
return settings;
}
@Override
protected Class<? extends FilterDirectoryReader> getReaderWrapperClass() {
return RandomExceptionDirectoryReaderWrapper.class;
}
}
private final Settings settings;
static | TestPlugin |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/runtime/translators/BroadcastStateTransformationTranslator.java | {
"start": 1851,
"end": 3871
} | class ____<IN1, IN2, OUT>
extends AbstractTwoInputTransformationTranslator<
IN1, IN2, OUT, BroadcastStateTransformation<IN1, IN2, OUT>> {
@Override
protected Collection<Integer> translateForBatchInternal(
final BroadcastStateTransformation<IN1, IN2, OUT> transformation,
final Context context) {
checkNotNull(transformation);
checkNotNull(context);
BatchCoBroadcastWithNonKeyedOperator<IN1, IN2, OUT> operator =
new BatchCoBroadcastWithNonKeyedOperator<>(
transformation.getUserFunction(),
transformation.getBroadcastStateDescriptors());
return translateInternal(
transformation,
transformation.getRegularInput(),
transformation.getBroadcastInput(),
SimpleOperatorFactory.of(operator),
null /* no key type*/,
null /* no first key selector */,
null /* no second */,
context);
}
@Override
protected Collection<Integer> translateForStreamingInternal(
final BroadcastStateTransformation<IN1, IN2, OUT> transformation,
final Context context) {
checkNotNull(transformation);
checkNotNull(context);
CoBroadcastWithNonKeyedOperator<IN1, IN2, OUT> operator =
new CoBroadcastWithNonKeyedOperator<>(
transformation.getUserFunction(),
transformation.getBroadcastStateDescriptors());
return translateInternal(
transformation,
transformation.getRegularInput(),
transformation.getBroadcastInput(),
SimpleOperatorFactory.of(operator),
null /* no key type*/,
null /* no first key selector */,
null /* no key selector on broadcast input*/,
context);
}
}
| BroadcastStateTransformationTranslator |
java | grpc__grpc-java | api/src/main/java/io/grpc/CallOptions.java | {
"start": 2874,
"end": 10939
} | class ____ {
Deadline deadline;
Executor executor;
String authority;
CallCredentials credentials;
String compressorName;
Object[][] customOptions;
// Unmodifiable list
List<ClientStreamTracer.Factory> streamTracerFactories;
Boolean waitForReady;
Integer maxInboundMessageSize;
Integer maxOutboundMessageSize;
Integer onReadyThreshold;
private CallOptions build() {
return new CallOptions(this);
}
}
/**
* Override the HTTP/2 authority the channel claims to be connecting to. <em>This is not
* generally safe.</em> Overriding allows advanced users to re-use a single Channel for multiple
* services, even if those services are hosted on different domain names. That assumes the
* server is virtually hosting multiple domains and is guaranteed to continue doing so. It is
* rare for a service provider to make such a guarantee. <em>At this time, there is no security
* verification of the overridden value, such as making sure the authority matches the server's
* TLS certificate.</em>
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/1767")
public CallOptions withAuthority(@Nullable String authority) {
Builder builder = toBuilder(this);
builder.authority = authority;
return builder.build();
}
/**
* Returns a new {@code CallOptions} with the given call credentials.
*/
public CallOptions withCallCredentials(@Nullable CallCredentials credentials) {
Builder builder = toBuilder(this);
builder.credentials = credentials;
return builder.build();
}
/**
* Sets the compression to use for the call. The compressor must be a valid name known in the
* {@link CompressorRegistry}. By default, the "gzip" compressor will be available.
*
* <p>It is only safe to call this if the server supports the compression format chosen. There is
* no negotiation performed; if the server does not support the compression chosen, the call will
* fail.
*/
public CallOptions withCompression(@Nullable String compressorName) {
Builder builder = toBuilder(this);
builder.compressorName = compressorName;
return builder.build();
}
/**
* Returns a new {@code CallOptions} with the given absolute deadline.
*
* <p>This is mostly used for propagating an existing deadline. {@link #withDeadlineAfter} is the
* recommended way of setting a new deadline,
*
* @param deadline the deadline or {@code null} for unsetting the deadline.
*/
public CallOptions withDeadline(@Nullable Deadline deadline) {
Builder builder = toBuilder(this);
builder.deadline = deadline;
return builder.build();
}
/**
* Returns a new {@code CallOptions} with a deadline that is after the given {@code duration} from
* now.
*/
public CallOptions withDeadlineAfter(long duration, TimeUnit unit) {
return withDeadline(Deadline.after(duration, unit));
}
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/11657")
public CallOptions withDeadlineAfter(Duration duration) {
return withDeadlineAfter(convertToNanos(duration), TimeUnit.NANOSECONDS);
}
/**
* Returns the deadline or {@code null} if the deadline is not set.
*/
@Nullable
public Deadline getDeadline() {
return deadline;
}
/**
* Enables <a href="https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md">
* 'wait for ready'</a> for the call. Wait-for-ready queues the RPC until a connection is
* available. This may dramatically increase the latency of the RPC, but avoids failing
* "unnecessarily." The default queues the RPC until an attempt to connect has completed, but
* fails RPCs without sending them if unable to connect.
*/
public CallOptions withWaitForReady() {
Builder builder = toBuilder(this);
builder.waitForReady = Boolean.TRUE;
return builder.build();
}
/**
* Disables 'wait for ready' feature for the call.
* This method should be rarely used because the default is without 'wait for ready'.
*/
public CallOptions withoutWaitForReady() {
Builder builder = toBuilder(this);
builder.waitForReady = Boolean.FALSE;
return builder.build();
}
/**
* Specifies how many bytes must be queued before the call is
* considered not ready to send more messages.
*
* @param numBytes The number of bytes that must be queued. Must be a
* positive integer.
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/11021")
public CallOptions withOnReadyThreshold(int numBytes) {
checkArgument(numBytes > 0, "numBytes must be positive: %s", numBytes);
Builder builder = toBuilder(this);
builder.onReadyThreshold = numBytes;
return builder.build();
}
/**
* Resets to the default number of bytes that must be queued before the
* call will leave the <a href="https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md">
* 'wait for ready'</a> state.
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/11021")
public CallOptions clearOnReadyThreshold() {
Builder builder = toBuilder(this);
builder.onReadyThreshold = null;
return builder.build();
}
/**
* Returns to the default number of bytes that must be queued before the
* call will leave the <a href="https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md">
* 'wait for ready'</a> state.
*
* @return null if the default threshold is used.
*/
@Nullable
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/11021")
public Integer getOnReadyThreshold() {
return onReadyThreshold;
}
/**
* Returns the compressor's name.
*/
@Nullable
public String getCompressor() {
return compressorName;
}
/**
* Override the HTTP/2 authority the channel claims to be connecting to. <em>This is not
* generally safe.</em> Overriding allows advanced users to re-use a single Channel for multiple
* services, even if those services are hosted on different domain names. That assumes the
* server is virtually hosting multiple domains and is guaranteed to continue doing so. It is
* rare for a service provider to make such a guarantee. <em>At this time, there is no security
* verification of the overridden value, such as making sure the authority matches the server's
* TLS certificate.</em>
*/
@Nullable
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/1767")
public String getAuthority() {
return authority;
}
/**
* Returns the call credentials.
*/
@Nullable
public CallCredentials getCredentials() {
return credentials;
}
/**
* Returns a new {@code CallOptions} with {@code executor} to be used instead of the default
* executor specified with {@link ManagedChannelBuilder#executor}.
*/
public CallOptions withExecutor(@Nullable Executor executor) {
Builder builder = toBuilder(this);
builder.executor = executor;
return builder.build();
}
/**
* Returns a new {@code CallOptions} with a {@code ClientStreamTracerFactory} in addition to
* the existing factories.
*
* <p>This method doesn't replace existing factories, or try to de-duplicate factories.
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/2861")
public CallOptions withStreamTracerFactory(ClientStreamTracer.Factory factory) {
ArrayList<ClientStreamTracer.Factory> newList =
new ArrayList<>(streamTracerFactories.size() + 1);
newList.addAll(streamTracerFactories);
newList.add(factory);
Builder builder = toBuilder(this);
builder.streamTracerFactories = Collections.unmodifiableList(newList);
return builder.build();
}
/**
* Returns an immutable list of {@code ClientStreamTracerFactory}s.
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/2861")
public List<ClientStreamTracer.Factory> getStreamTracerFactories() {
return streamTracerFactories;
}
/**
* Key for a key-value pair. Uses reference equality.
*/
public static final | Builder |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/cluster/metadata/ManifestTests.java | {
"start": 1094,
"end": 4497
} | class ____ extends ESTestCase {
private Manifest copyState(Manifest state, boolean introduceErrors) {
long currentTerm = state.currentTerm();
long clusterStateVersion = state.clusterStateVersion();
long generation = state.globalGeneration();
Map<Index, Long> indices = new HashMap<>(state.indexGenerations());
if (introduceErrors) {
switch (randomInt(3)) {
case 0 -> {
currentTerm = randomValueOtherThan(currentTerm, () -> randomNonNegativeLong());
}
case 1 -> {
clusterStateVersion = randomValueOtherThan(clusterStateVersion, () -> randomNonNegativeLong());
}
case 2 -> {
generation = randomValueOtherThan(generation, () -> randomNonNegativeLong());
}
case 3 -> {
switch (randomInt(2)) {
case 0 -> {
indices.remove(randomFrom(indices.keySet()));
}
case 1 -> {
Tuple<Index, Long> indexEntry = randomIndexEntry();
indices.put(indexEntry.v1(), indexEntry.v2());
}
case 2 -> {
Index index = randomFrom(indices.keySet());
indices.compute(index, (i, g) -> randomValueOtherThan(g, () -> randomNonNegativeLong()));
}
}
}
}
}
return new Manifest(currentTerm, clusterStateVersion, generation, indices);
}
private Tuple<Index, Long> randomIndexEntry() {
final String name = randomAlphaOfLengthBetween(4, 15);
final String uuid = UUIDs.randomBase64UUID();
final Index index = new Index(name, uuid);
final long indexGeneration = randomNonNegativeLong();
return Tuple.tuple(index, indexGeneration);
}
private Manifest randomManifest() {
long currentTerm = randomNonNegativeLong();
long clusterStateVersion = randomNonNegativeLong();
long generation = randomNonNegativeLong();
Map<Index, Long> indices = new HashMap<>();
for (int i = 0; i < randomIntBetween(1, 5); i++) {
Tuple<Index, Long> indexEntry = randomIndexEntry();
indices.put(indexEntry.v1(), indexEntry.v2());
}
return new Manifest(currentTerm, clusterStateVersion, generation, indices);
}
public void testEqualsAndHashCode() {
checkEqualsAndHashCode(randomManifest(), org -> copyState(org, false), org -> copyState(org, true));
}
public void testXContent() throws IOException {
Manifest state = randomManifest();
final XContentBuilder builder = JsonXContent.contentBuilder();
builder.startObject();
Manifest.FORMAT.toXContent(builder, state);
builder.endObject();
BytesReference bytes = BytesReference.bytes(builder);
try (XContentParser parser = createParser(JsonXContent.jsonXContent, bytes)) {
assertThat(Manifest.fromXContent(parser), equalTo(state));
}
}
public void testEmptyManifest() {
assertTrue(Manifest.empty().isEmpty());
assertFalse(randomManifest().isEmpty());
}
}
| ManifestTests |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/logging/log4j2/CorrelationIdConverter.java | {
"start": 1732,
"end": 2665
} | class ____ extends LogEventPatternConverter {
private final CorrelationIdFormatter formatter;
private CorrelationIdConverter(CorrelationIdFormatter formatter) {
super("correlationId{%s}".formatted(formatter), "mdc");
this.formatter = formatter;
}
@Override
public void format(LogEvent event, StringBuilder toAppendTo) {
ReadOnlyStringMap contextData = event.getContextData();
this.formatter.formatTo(contextData::getValue, toAppendTo);
}
/**
* Factory method to create a new {@link CorrelationIdConverter}.
* @param options options, may be null or first element contains name of property to
* format.
* @return instance of PropertiesPatternConverter.
*/
public static CorrelationIdConverter newInstance(String @Nullable [] options) {
String pattern = (!ObjectUtils.isEmpty(options)) ? options[0] : null;
return new CorrelationIdConverter(CorrelationIdFormatter.of(pattern));
}
}
| CorrelationIdConverter |
java | grpc__grpc-java | core/src/main/java/io/grpc/internal/BackoffPolicyRetryScheduler.java | {
"start": 1066,
"end": 3006
} | class ____ implements RetryScheduler {
private final ScheduledExecutorService scheduledExecutorService;
private final SynchronizationContext syncContext;
private final BackoffPolicy.Provider policyProvider;
private BackoffPolicy policy;
private ScheduledHandle scheduledHandle;
private static final Logger logger = Logger.getLogger(
BackoffPolicyRetryScheduler.class.getName());
BackoffPolicyRetryScheduler(BackoffPolicy.Provider policyProvider,
ScheduledExecutorService scheduledExecutorService,
SynchronizationContext syncContext) {
this.policyProvider = policyProvider;
this.scheduledExecutorService = scheduledExecutorService;
this.syncContext = syncContext;
}
/**
* Schedules a future retry operation. Only allows one retry to be scheduled at any given time.
*/
@Override
public void schedule(Runnable retryOperation) {
syncContext.throwIfNotInThisSynchronizationContext();
if (policy == null) {
policy = policyProvider.get();
}
// If a retry is already scheduled, take no further action.
if (scheduledHandle != null && scheduledHandle.isPending()) {
return;
}
long delayNanos = policy.nextBackoffNanos();
scheduledHandle = syncContext.schedule(retryOperation, delayNanos, TimeUnit.NANOSECONDS,
scheduledExecutorService);
logger.log(Level.FINE, "Scheduling DNS resolution backoff for {0}ns", delayNanos);
}
/**
* Resets the {@link BackoffPolicyRetryScheduler} and cancels any pending retry task. The policy
* will be cleared thus also resetting any state associated with it (e.g. a backoff multiplier).
*/
@Override
public void reset() {
syncContext.throwIfNotInThisSynchronizationContext();
syncContext.execute(() -> {
if (scheduledHandle != null && scheduledHandle.isPending()) {
scheduledHandle.cancel();
}
policy = null;
});
}
}
| BackoffPolicyRetryScheduler |
java | elastic__elasticsearch | build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/dependencies/patches/MethodReplacement.java | {
"start": 616,
"end": 1200
} | class ____ extends MethodVisitor {
private final MethodVisitor delegate;
private final Runnable bodyWriter;
public MethodReplacement(MethodVisitor delegate, Runnable bodyWriter) {
super(Opcodes.ASM9);
this.delegate = delegate;
this.bodyWriter = bodyWriter;
}
@Override
public void visitCode() {
// delegate.visitCode();
bodyWriter.run();
// delegate.visitEnd();
}
@Override
public void visitMaxs(int maxStack, int maxLocals) {
delegate.visitMaxs(maxStack, maxLocals);
}
}
| MethodReplacement |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/compliance/tck2_2/NonSelectQueryLockMode.java | {
"start": 366,
"end": 899
} | class ____ extends AbstractJPATest {
@Test
public void testNonSelectQueryGetLockMode() {
Assertions.assertThrows(
IllegalStateException.class,
() -> inTransaction(
session -> session.createQuery( "delete Item" ).getLockMode()
)
);
}
@Test
public void testNonSelectQuerySetLockMode() {
Assertions.assertThrows(
IllegalStateException.class,
() -> inTransaction(
session -> session.createQuery( "delete Item" ).setLockMode( LockModeType.PESSIMISTIC_WRITE )
)
);
}
}
| NonSelectQueryLockMode |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/DistinctVarargsCheckerTest.java | {
"start": 9230,
"end": 9435
} | class ____ {",
" void testFunction() {",
" // BUG: Diagnostic contains: DistinctVarargsChecker",
large,
" }",
"}")
.doTest();
}
}
| Test |
java | spring-projects__spring-boot | module/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/endpoint/OperationResponseBodyMap.java | {
"start": 909,
"end": 1093
} | class ____<K, V> extends LinkedHashMap<K, V> implements OperationResponseBody {
OperationResponseBodyMap(Map<? extends K, ? extends V> map) {
super(map);
}
}
| OperationResponseBodyMap |
java | apache__dubbo | dubbo-remoting/dubbo-remoting-http12/src/main/java/org/apache/dubbo/remoting/http12/netty4/h1/NettyHttp1Codec.java | {
"start": 1992,
"end": 5092
} | class ____ extends ChannelDuplexHandler {
private boolean keepAlive;
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
// decode FullHttpRequest
if (msg instanceof FullHttpRequest) {
FullHttpRequest request = (FullHttpRequest) msg;
keepAlive = HttpUtil.isKeepAlive(request);
super.channelRead(
ctx,
new DefaultHttp1Request(
new Http1RequestMetadata(
new NettyHttp1HttpHeaders(request.headers()),
request.method().name(),
request.uri()),
new Http1InputMessage(new ByteBufInputStream(request.content(), true))));
return;
}
super.channelRead(ctx, msg);
}
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
if (msg instanceof HttpMetadata) {
doWriteHeader(ctx, ((HttpMetadata) msg), promise);
return;
}
if (msg instanceof HttpOutputMessage) {
doWriteMessage(ctx, ((HttpOutputMessage) msg), promise);
return;
}
super.write(ctx, msg, promise);
}
private void doWriteHeader(ChannelHandlerContext ctx, HttpMetadata msg, ChannelPromise promise) {
// process status
NettyHttp1HttpHeaders headers = (NettyHttp1HttpHeaders) msg.headers();
List<String> statusHeaders = headers.remove(HttpHeaderNames.STATUS.getKey());
HttpResponseStatus status = HttpResponseStatus.OK;
if (CollectionUtils.isNotEmpty(statusHeaders)) {
status = HttpResponseStatus.valueOf(Integer.parseInt(statusHeaders.get(0)));
}
if (keepAlive) {
headers.add(HttpHeaderNames.CONNECTION.getKey(), String.valueOf(HttpHeaderValues.KEEP_ALIVE));
} else {
headers.add(HttpHeaderNames.CONNECTION.getKey(), String.valueOf(HttpHeaderValues.CLOSE));
}
// process normal headers
ctx.writeAndFlush(new DefaultHttpResponse(HttpVersion.HTTP_1_1, status, headers.getHeaders()), promise);
}
private void doWriteMessage(ChannelHandlerContext ctx, HttpOutputMessage msg, ChannelPromise promise) {
if (HttpOutputMessage.EMPTY_MESSAGE == msg) {
if (keepAlive) {
ctx.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT, promise);
} else {
ctx.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT, promise).addListener(ChannelFutureListener.CLOSE);
}
return;
}
OutputStream body = msg.getBody();
if (body instanceof ByteBufOutputStream) {
ByteBuf buffer = ((ByteBufOutputStream) body).buffer();
ctx.writeAndFlush(buffer, promise);
return;
}
throw new IllegalArgumentException("HttpOutputMessage body must be 'io.netty.buffer.ByteBufOutputStream'");
}
}
| NettyHttp1Codec |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestTrafficControlBandwidthHandlerImpl.java | {
"start": 2363,
"end": 6186
} | class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(TestTrafficControlBandwidthHandlerImpl.class);
private static final int ROOT_BANDWIDTH_MBIT = 100;
private static final int YARN_BANDWIDTH_MBIT = 70;
private static final int TEST_CLASSID = 100;
private static final String TEST_CLASSID_STR = "42:100";
private static final String TEST_CONTAINER_ID_STR = "container_01";
private static final String TEST_TASKS_FILE = "testTasksFile";
private PrivilegedOperationExecutor privilegedOperationExecutorMock;
private CGroupsHandler cGroupsHandlerMock;
private TrafficController trafficControllerMock;
private Configuration conf;
private String tmpPath;
private String device;
ContainerId containerIdMock;
Container containerMock;
@BeforeEach
public void setup() {
privilegedOperationExecutorMock = mock(PrivilegedOperationExecutor.class);
cGroupsHandlerMock = mock(CGroupsHandler.class);
trafficControllerMock = mock(TrafficController.class);
conf = new YarnConfiguration();
tmpPath = new StringBuilder(System.getProperty("test.build.data")).append
('/').append("hadoop.tmp.dir").toString();
device = YarnConfiguration.DEFAULT_NM_NETWORK_RESOURCE_INTERFACE;
containerIdMock = mock(ContainerId.class);
containerMock = mock(Container.class);
when(containerIdMock.toString()).thenReturn(TEST_CONTAINER_ID_STR);
//mock returning a mock - an angel died somewhere.
when(containerMock.getContainerId()).thenReturn(containerIdMock);
conf.setInt(YarnConfiguration
.NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_MBIT, ROOT_BANDWIDTH_MBIT);
conf.setInt(YarnConfiguration
.NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_YARN_MBIT, YARN_BANDWIDTH_MBIT);
conf.set("hadoop.tmp.dir", tmpPath);
//In these tests, we'll only use TrafficController with recovery disabled
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, false);
}
@Test
public void testBootstrap() {
TrafficControlBandwidthHandlerImpl handlerImpl = new
TrafficControlBandwidthHandlerImpl(privilegedOperationExecutorMock,
cGroupsHandlerMock, trafficControllerMock);
try {
handlerImpl.bootstrap(conf);
verify(cGroupsHandlerMock).initializeCGroupController(
eq(CGroupsHandler.CGroupController.NET_CLS));
verifyNoMoreInteractions(cGroupsHandlerMock);
verify(trafficControllerMock).bootstrap(eq(device),
eq(ROOT_BANDWIDTH_MBIT),
eq(YARN_BANDWIDTH_MBIT));
verifyNoMoreInteractions(trafficControllerMock);
} catch (ResourceHandlerException e) {
LOG.error("Unexpected exception: " + e);
fail("Caught unexpected ResourceHandlerException!");
}
}
@Test
public void testLifeCycle() {
TrafficController trafficControllerSpy = spy(new TrafficController(conf,
privilegedOperationExecutorMock));
TrafficControlBandwidthHandlerImpl handlerImpl = new
TrafficControlBandwidthHandlerImpl(privilegedOperationExecutorMock,
cGroupsHandlerMock, trafficControllerSpy);
try {
handlerImpl.bootstrap(conf);
testPreStart(trafficControllerSpy, handlerImpl);
testPostComplete(trafficControllerSpy, handlerImpl);
} catch (ResourceHandlerException e) {
LOG.error("Unexpected exception: " + e);
fail("Caught unexpected ResourceHandlerException!");
}
}
private void testPreStart(TrafficController trafficControllerSpy,
TrafficControlBandwidthHandlerImpl handlerImpl) throws
ResourceHandlerException {
//This is not the cleanest of solutions - but since we are testing the
//preStart/postComplete lifecycle, we don't have a different way of
//handling this - we don't keep track of the number of invocations by
//a | TestTrafficControlBandwidthHandlerImpl |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java | {
"start": 823,
"end": 1956
} | class ____ extends MasterNodeReadOperationRequestBuilder<
GetRepositoriesRequest,
GetRepositoriesResponse,
GetRepositoriesRequestBuilder> {
/**
* Creates new get repository request builder
*/
public GetRepositoriesRequestBuilder(ElasticsearchClient client, TimeValue masterNodeTimeout, String... repositories) {
super(client, GetRepositoriesAction.INSTANCE, new GetRepositoriesRequest(masterNodeTimeout, repositories));
}
/**
* Sets list of repositories to get
*
* @param repositories list of repositories
* @return builder
*/
public GetRepositoriesRequestBuilder setRepositories(String... repositories) {
request.repositories(repositories);
return this;
}
/**
* Adds repositories to the list of repositories to get
*
* @param repositories list of repositories
* @return builder
*/
public GetRepositoriesRequestBuilder addRepositories(String... repositories) {
request.repositories(ArrayUtils.concat(request.repositories(), repositories));
return this;
}
}
| GetRepositoriesRequestBuilder |
java | spring-projects__spring-boot | configuration-metadata/spring-boot-configuration-processor/src/test/java/org/springframework/boot/configurationsample/source/ConcreteSourceAnnotated.java | {
"start": 902,
"end": 1096
} | class ____ {
@TestNestedConfigurationProperty
private final ConcreteSource nested = new ConcreteSource();
public ConcreteSource getNested() {
return this.nested;
}
}
| ConcreteSourceAnnotated |
java | elastic__elasticsearch | x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvSpecReader.java | {
"start": 618,
"end": 4513
} | class ____ implements SpecReader.Parser {
private final StringBuilder query = new StringBuilder();
private final StringBuilder data = new StringBuilder();
private final List<String> requiredCapabilities = new ArrayList<>();
private CsvTestCase testCase;
private CsvSpecParser() {}
@Override
public Object parse(String line) {
// read the query
if (testCase == null) {
if (line.toLowerCase(Locale.ROOT).startsWith("required_capability:")) {
requiredCapabilities.add(line.substring("required_capability:".length()).trim());
} else {
if (line.endsWith("\\;")) {
// SET statement with escaped ";"
var updatedLine = line.substring(0, line.length() - 2);
query.append(updatedLine);
query.append(";");
query.append("\r\n");
} else if (line.endsWith(";")) {
// pick up the query
testCase = new CsvTestCase();
query.append(line.substring(0, line.length() - 1).trim());
testCase.query = query.toString();
testCase.requiredCapabilities = List.copyOf(requiredCapabilities);
requiredCapabilities.clear();
query.setLength(0);
}
// keep reading the query
else {
query.append(line);
query.append("\r\n");
}
}
}
// read the results
else {
// read data
String lower = line.toLowerCase(Locale.ROOT);
if (lower.startsWith("warning:")) {
if (testCase.expectedWarningsRegex.isEmpty() == false) {
throw new IllegalArgumentException("Cannot mix warnings and regex warnings in CSV SPEC files: [" + line + "]");
}
testCase.expectedWarnings.add(line.substring("warning:".length()).trim());
} else if (lower.startsWith("warningregex:")) {
if (testCase.expectedWarnings.isEmpty() == false) {
throw new IllegalArgumentException("Cannot mix warnings and regex warnings in CSV SPEC files: [" + line + "]");
}
String regex = line.substring("warningregex:".length()).trim();
testCase.expectedWarningsRegexString.add(regex);
testCase.expectedWarningsRegex.add(warningRegexToPattern(regex));
} else if (lower.startsWith("ignoreorder:")) {
String value = lower.substring("ignoreOrder:".length()).trim();
if ("true".equals(value)) {
testCase.ignoreOrder = true;
} else if ("false".equals(value) == false) {
throw new IllegalArgumentException("Invalid value for ignoreOrder: [" + value + "], it can only be true or false");
}
} else if (line.startsWith(";")) {
testCase.expectedResults = data.toString();
// clean-up and emit
CsvTestCase result = testCase;
testCase = null;
data.setLength(0);
return result;
} else {
data.append(line);
data.append("\r\n");
}
}
return null;
}
}
private static Pattern warningRegexToPattern(String regex) {
return Pattern.compile(".*" + regex + ".*");
}
public static | CsvSpecParser |
java | spring-projects__spring-boot | build-plugin/spring-boot-gradle-plugin/src/test/java/org/springframework/boot/gradle/tasks/bundling/AbstractBootArchiveIntegrationTests.java | {
"start": 34081,
"end": 37235
} | class ____ {");
writer.println();
writer.println(" public static void main(String[] args) {");
writer.println(" }");
writer.println();
writer.println("}");
}
catch (IOException ex) {
throw new RuntimeException(ex);
}
}
private void writeResource() {
try {
Path path = this.gradleBuild.getProjectDir()
.toPath()
.resolve(Paths.get("src", "main", "resources", "static", "file.txt"));
Files.createDirectories(path.getParent());
Files.createFile(path);
}
catch (IOException ex) {
throw new RuntimeException(ex);
}
}
private Map<String, List<String>> readLayerIndex(JarFile jarFile) throws IOException {
Map<String, List<String>> index = new LinkedHashMap<>();
ZipEntry indexEntry = jarFile.getEntry(this.indexPath + "layers.idx");
try (BufferedReader reader = new BufferedReader(new InputStreamReader(jarFile.getInputStream(indexEntry)))) {
String line = reader.readLine();
String layer = null;
while (line != null) {
if (line.startsWith("- ")) {
layer = line.substring(3, line.length() - 2);
}
else if (line.startsWith(" - ")) {
index.computeIfAbsent(layer, (key) -> new ArrayList<>()).add(line.substring(5, line.length() - 1));
}
line = reader.readLine();
}
return index;
}
}
private Map<String, List<String>> readExtractedLayers(File root, List<String> layerNames) throws IOException {
Map<String, List<String>> extractedLayers = new LinkedHashMap<>();
for (String layerName : layerNames) {
File layer = new File(root, layerName);
assertThat(layer).isDirectory();
List<String> files;
try (Stream<Path> pathStream = Files.walk(layer.toPath())) {
files = pathStream.filter((path) -> path.toFile().isFile())
.map(layer.toPath()::relativize)
.map(Path::toString)
.map(StringUtils::cleanPath)
.toList();
}
extractedLayers.put(layerName, files);
}
return extractedLayers;
}
private void assertExtractedLayers(List<String> layerNames, Map<String, List<String>> indexedLayers)
throws IOException {
Map<String, List<String>> extractedLayers = readExtractedLayers(this.gradleBuild.getProjectDir(), layerNames);
assertThat(extractedLayers.keySet()).isEqualTo(indexedLayers.keySet());
extractedLayers.forEach((name, contents) -> {
List<String> index = indexedLayers.get(name);
assertThat(index).isNotNull();
List<String> unexpected = new ArrayList<>();
for (String file : contents) {
if (!isInIndex(index, file)) {
unexpected.add(name);
}
}
assertThat(unexpected).isEmpty();
});
}
private boolean isInIndex(List<String> index, String file) {
for (String candidate : index) {
if (file.equals(candidate) || candidate.endsWith("/") && file.startsWith(candidate)) {
return true;
}
}
return false;
}
private static void assertEntryMode(ZipArchiveEntry entry, int expectedMode) {
assertThat(entry.getUnixMode())
.withFailMessage(() -> "Expected mode " + Integer.toOctalString(expectedMode) + " for entry "
+ entry.getName() + " but actual is " + Integer.toOctalString(entry.getUnixMode()))
.isEqualTo(expectedMode);
}
}
| Main |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/batch/BatchingInheritanceDeleteTest.java | {
"start": 2839,
"end": 3590
} | class ____ {
@Id
@GeneratedValue(strategy = GenerationType.AUTO)
private long id;
@Column(nullable = false)
private String name;
public Baz() {
super();
}
public Baz(final String name) {
super();
this.name = name;
}
public long getId() {
return id;
}
public void setId(final long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(final String name) {
this.name = name;
}
@Override
public String toString() {
final StringBuilder builder = new StringBuilder();
builder.append( "Bar [name=" ).append( name ).append( "]" );
return builder.toString();
}
}
@Entity(name = "Foo")
@Inheritance(strategy = InheritanceType.JOINED)
public static | Baz |
java | elastic__elasticsearch | x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/analyzer/PreAnalyzer.java | {
"start": 774,
"end": 1615
} | class ____ {
public static final PreAnalysis EMPTY = new PreAnalysis(emptyList());
public final List<TableInfo> indices;
public PreAnalysis(List<TableInfo> indices) {
this.indices = indices;
}
}
public PreAnalysis preAnalyze(LogicalPlan plan) {
if (plan.analyzed()) {
return PreAnalysis.EMPTY;
}
return doPreAnalyze(plan);
}
private static PreAnalysis doPreAnalyze(LogicalPlan plan) {
List<TableInfo> indices = new ArrayList<>();
plan.forEachUp(UnresolvedRelation.class, p -> indices.add(new TableInfo(p.table(), p.frozen())));
// mark plan as preAnalyzed (if it were marked, there would be no analysis)
plan.forEachUp(LogicalPlan::setPreAnalyzed);
return new PreAnalysis(indices);
}
}
| PreAnalysis |
java | elastic__elasticsearch | modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EExplicit.java | {
"start": 687,
"end": 1579
} | class ____ extends AExpression {
private final String canonicalTypeName;
private final AExpression childNode;
public EExplicit(int identifier, Location location, String canonicalTypeName, AExpression childNode) {
super(identifier, location);
this.canonicalTypeName = Objects.requireNonNull(canonicalTypeName);
this.childNode = Objects.requireNonNull(childNode);
}
public String getCanonicalTypeName() {
return canonicalTypeName;
}
public AExpression getChildNode() {
return childNode;
}
@Override
public <Scope> void visit(UserTreeVisitor<Scope> userTreeVisitor, Scope scope) {
userTreeVisitor.visitExplicit(this, scope);
}
@Override
public <Scope> void visitChildren(UserTreeVisitor<Scope> userTreeVisitor, Scope scope) {
childNode.visit(userTreeVisitor, scope);
}
}
| EExplicit |
java | elastic__elasticsearch | x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java | {
"start": 3121,
"end": 30452
} | class ____ extends AllocatedPersistentTask implements TransformScheduler.Listener, TransformContext.Listener {
// Default interval the scheduler sends an event if the config does not specify a frequency
private static final Logger logger = LogManager.getLogger(TransformTask.class);
private static final IndexerState[] RUNNING_STATES = new IndexerState[] { IndexerState.STARTED, IndexerState.INDEXING };
private final TransformTaskParams transform;
private final TransformScheduler transformScheduler;
private final ThreadPool threadPool;
private final TransformAuditor auditor;
private final TransformIndexerPosition initialPosition;
private final IndexerState initialIndexerState;
private final TransformContext context;
private final TransformNode transformNode;
private final SetOnce<ClientTransformIndexer> indexer = new SetOnce<>();
@SuppressWarnings("this-escape")
TransformTask(
long id,
String type,
String action,
TaskId parentTask,
TransformTaskParams transform,
TransformState state,
TransformScheduler transformScheduler,
TransformAuditor auditor,
ThreadPool threadPool,
Map<String, String> headers,
TransformNode transformNode
) {
super(id, type, action, TransformField.PERSISTENT_TASK_DESCRIPTION_PREFIX + transform.getId(), parentTask, headers);
this.transform = transform;
this.transformScheduler = transformScheduler;
this.threadPool = threadPool;
this.auditor = auditor;
IndexerState initialState = IndexerState.STOPPED;
TransformTaskState initialTaskState = TransformTaskState.STOPPED;
String initialReason = null;
long initialCheckpoint = 0;
TransformIndexerPosition initialPosition = null;
if (state != null) {
initialTaskState = state.getTaskState();
initialReason = state.getReason();
final IndexerState existingState = state.getIndexerState();
if (existingState.equals(IndexerState.INDEXING)) {
// reset to started as no indexer is running
initialState = IndexerState.STARTED;
} else if (existingState.equals(IndexerState.ABORTING) || existingState.equals(IndexerState.STOPPING)) {
// reset to stopped as something bad happened
initialState = IndexerState.STOPPED;
} else {
initialState = existingState;
}
initialPosition = state.getPosition();
initialCheckpoint = state.getCheckpoint();
}
this.initialIndexerState = initialState;
this.initialPosition = initialPosition;
this.context = new TransformContext(initialTaskState, initialReason, initialCheckpoint, transform.from(), this);
if (state != null) {
this.context.setAuthState(state.getAuthState());
}
this.transformNode = transformNode;
}
public String getTransformId() {
return transform.getId();
}
/**
* Enable Task API to return detailed status information
*/
@Override
public Status getStatus() {
return getState();
}
private ClientTransformIndexer getIndexer() {
return indexer.get();
}
public TransformState getState() {
if (getIndexer() == null) {
return new TransformState(
context.getTaskState(),
initialIndexerState,
initialPosition,
context.getCheckpoint(),
context.getStateReason(),
null,
null,
false,
context.getAuthState()
);
} else {
return new TransformState(
context.getTaskState(),
indexer.get().getState(),
indexer.get().getPosition(),
context.getCheckpoint(),
context.getStateReason(),
getIndexer().getProgress(),
null,
context.shouldStopAtCheckpoint(),
context.getAuthState()
);
}
}
public TransformIndexerStats getStats() {
if (getIndexer() == null) {
return new TransformIndexerStats();
} else {
return getIndexer().getStats();
}
}
public void getCheckpointingInfo(
TransformCheckpointService transformsCheckpointService,
ParentTaskAssigningClient parentTaskClient,
ActionListener<TransformCheckpointingInfo> listener,
TimeValue timeout
) {
ActionListener<TransformCheckpointingInfoBuilder> checkPointInfoListener = ActionListener.wrap(infoBuilder -> {
if (context.getChangesLastDetectedAt() != null) {
infoBuilder.setChangesLastDetectedAt(context.getChangesLastDetectedAt());
}
if (context.getLastSearchTime() != null) {
infoBuilder.setLastSearchTime(context.getLastSearchTime());
}
listener.onResponse(infoBuilder.build());
}, listener::onFailure);
ClientTransformIndexer transformIndexer = getIndexer();
if (transformIndexer == null) {
transformsCheckpointService.getCheckpointingInfo(
parentTaskClient,
timeout,
transform.getId(),
context.getCheckpoint(),
initialPosition,
null,
checkPointInfoListener
);
return;
}
transformsCheckpointService.getCheckpointProvider(parentTaskClient, transformIndexer.getConfig())
.getCheckpointingInfo(
transformIndexer.getLastCheckpoint(),
transformIndexer.getNextCheckpoint(),
transformIndexer.getPosition(),
transformIndexer.getProgress(),
timeout,
checkPointInfoListener
);
}
/**
* Derives basic checkpointing stats. This does not make a call to obtain any additional information.
* This will only read checkpointing information from this TransformTask.
*
* @return basic checkpointing info, including id, position, and progress of the Next Checkpoint and the id of the Last Checkpoint.
*/
public TransformCheckpointingInfo deriveBasicCheckpointingInfo() {
var transformIndexer = getIndexer();
if (transformIndexer == null) {
return TransformCheckpointingInfo.EMPTY;
}
return new TransformCheckpointingInfo.TransformCheckpointingInfoBuilder().setLastCheckpoint(transformIndexer.getLastCheckpoint())
.setNextCheckpoint(transformIndexer.getNextCheckpoint())
.setNextCheckpointPosition(transformIndexer.getPosition())
.setNextCheckpointProgress(transformIndexer.getProgress())
.build();
}
/**
* Starts the transform and schedules it to be triggered in the future.
*
* @param startingCheckpoint The starting checkpoint, could null. Null indicates that there is no starting checkpoint
* @param listener The listener to alert once started
*/
void start(Long startingCheckpoint, ActionListener<StartTransformAction.Response> listener) {
logger.debug("[{}] start called with state [{}].", getTransformId(), getState());
if (context.getTaskState() == TransformTaskState.FAILED) {
listener.onFailure(
new CannotStartFailedTransformException(
TransformMessages.getMessage(CANNOT_START_FAILED_TRANSFORM, getTransformId(), context.getStateReason())
)
);
return;
}
synchronized (context) {
if (getIndexer() == null) {
// If our state is failed AND the indexer is null, the user needs to _stop?force=true so that the indexer gets
// fully initialized.
// If we are NOT failed, then we can assume that `start` was just called early in the process.
String msg = context.getTaskState() == TransformTaskState.FAILED
? "It failed during the initialization process; force stop to allow reinitialization."
: "Try again later.";
listener.onFailure(
new ElasticsearchStatusException(
"Task for transform [{}] not fully initialized. {}",
RestStatus.CONFLICT,
getTransformId(),
msg
)
);
return;
}
final IndexerState newState = getIndexer().start();
if (Arrays.stream(RUNNING_STATES).noneMatch(newState::equals)) {
listener.onFailure(
new ElasticsearchException("Cannot start task for transform [{}], because state was [{}]", transform.getId(), newState)
);
return;
}
context.resetTaskState();
if (startingCheckpoint != null) {
context.setCheckpoint(startingCheckpoint);
}
final TransformState state = new TransformState(
TransformTaskState.STARTED,
IndexerState.STOPPED,
getIndexer().getPosition(),
context.getCheckpoint(),
null,
getIndexer().getProgress(),
null,
context.shouldStopAtCheckpoint(),
context.getAuthState()
);
logger.info("[{}] updating state for transform to [{}].", transform.getId(), state.toString());
// Even though the indexer information is persisted to an index, we still need TransformTaskState in the clusterstate
// This keeps track of STARTED, FAILED, STOPPED
// This is because a FAILED state can occur because we cannot read the config from the internal index, which would imply that
// we could not read the previous state information from said index.
persistStateToClusterState(state, ActionListener.wrap(task -> {
auditor.info(transform.getId(), "Updated transform state to [" + state.getTaskState() + "].");
listener.onResponse(new StartTransformAction.Response(true));
}, exc -> {
logger.error(() -> format("[%s] failed updating state to [%s].", getTransformId(), state), exc);
getIndexer().stop();
listener.onFailure(
new ElasticsearchException(
"Error while updating state for transform [" + transform.getId() + "] to [" + TransformTaskState.STARTED + "].",
exc
)
);
}));
}
}
/**
* This sets the flag for the task to stop at the next checkpoint.
*
* @param shouldStopAtCheckpoint whether or not we should stop at the next checkpoint or not
* @param shouldStopAtCheckpointListener the listener to return to when we have persisted the updated value to the state index.
*/
public void setShouldStopAtCheckpoint(boolean shouldStopAtCheckpoint, ActionListener<Void> shouldStopAtCheckpointListener) {
// this should be called from the generic threadpool
assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.GENERIC);
logger.debug(
"[{}] attempted to set task to stop at checkpoint [{}] with state [{}]",
getTransformId(),
shouldStopAtCheckpoint,
getState()
);
synchronized (context) {
if (context.getTaskState() != TransformTaskState.STARTED || getIndexer() == null) {
shouldStopAtCheckpointListener.onResponse(null);
return;
}
if (context.shouldStopAtCheckpoint() == shouldStopAtCheckpoint) {
shouldStopAtCheckpointListener.onResponse(null);
return;
}
getIndexer().setStopAtCheckpoint(shouldStopAtCheckpoint, shouldStopAtCheckpointListener);
}
}
public void stop(boolean force, boolean shouldStopAtCheckpoint) {
logger.debug(
"[{}] stop called with force [{}], shouldStopAtCheckpoint [{}], state [{}], indexerstate[{}]",
getTransformId(),
force,
shouldStopAtCheckpoint,
getState(),
getIndexer() != null ? getIndexer().getState() : null
);
synchronized (context) {
if (context.getTaskState() == TransformTaskState.FAILED && force == false) {
throw new ElasticsearchStatusException(
TransformMessages.getMessage(CANNOT_STOP_SINGLE_FAILED_TRANSFORM, getTransformId(), context.getStateReason()),
RestStatus.CONFLICT
);
}
// cleanup potentially failed state.
boolean wasFailed = context.setTaskState(TransformTaskState.FAILED, TransformTaskState.STARTED);
context.resetReasonAndFailureCounter();
if (getIndexer() == null) {
// If there is no indexer the task has not been triggered
// but it still needs to be stopped and removed
shutdown();
return;
}
// If state was in a failed state, we should stop immediately
if (wasFailed) {
getIndexer().stopAndMaybeSaveState();
return;
}
IndexerState indexerState = getIndexer().getState();
if (indexerState == IndexerState.STOPPED || indexerState == IndexerState.STOPPING) {
return;
}
// shouldStopAtCheckpoint only comes into play when onFinish is called (or doSaveState right after).
// if it is false, stop immediately
if (shouldStopAtCheckpoint == false ||
// If the indexerState is STARTED and it is on an initialRun, that means that the indexer has previously finished a checkpoint,
// or has yet to even start one.
// Either way, this means that we won't get to have onFinish called down stream (or at least won't for some time).
(indexerState == IndexerState.STARTED && getIndexer().initialRun())) {
getIndexer().stopAndMaybeSaveState();
}
}
}
public void applyNewSettings(SettingsConfig newSettings) {
synchronized (context) {
getIndexer().applyNewSettings(newSettings);
}
}
public void applyNewAuthState(AuthorizationState authState) {
synchronized (context) {
context.setAuthState(authState);
}
}
public void checkAndResetDestinationIndexBlock(TransformConfig config) {
if (context.isWaitingForIndexToUnblock()) {
var currentIndex = getIndexer() == null ? null : getIndexer().getConfig().getDestination().getIndex();
var updatedIndex = config.getDestination().getIndex();
if (updatedIndex.equals(currentIndex) == false) {
context.setIsWaitingForIndexToUnblock(false);
}
}
}
public void applyNewFrequency(TransformConfig config) {
var frequency = config != null ? config.getFrequency() : null;
if (frequency != null) {
transformScheduler.updateFrequency(config.getId(), frequency);
}
}
@Override
protected void init(
PersistentTasksService persistentTasksService,
TaskManager taskManager,
String persistentTaskId,
long allocationId
) {
super.init(persistentTasksService, taskManager, persistentTaskId, allocationId);
}
@Override
public void triggered(TransformScheduler.Event event) {
logger.trace(() -> format("[%s] triggered(event=%s) ", getTransformId(), event));
// Ignore if event is not for this job
if (event.transformId().equals(getTransformId()) == false) {
return;
}
synchronized (context) {
if (getIndexer() == null) {
logger.warn("[{}] transform task triggered with an unintialized indexer.", getTransformId());
return;
}
if (context.getTaskState() == TransformTaskState.FAILED || context.getTaskState() == TransformTaskState.STOPPED) {
logger.debug(
"[{}] schedule was triggered for transform but task is [{}]. Ignoring trigger.",
getTransformId(),
context.getTaskState()
);
return;
}
// ignore trigger if indexer is running, stopping, stopped or aborting
IndexerState indexerState = getIndexer().getState();
if (IndexerState.INDEXING.equals(indexerState)
|| IndexerState.STOPPING.equals(indexerState)
|| IndexerState.STOPPED.equals(indexerState)
|| IndexerState.ABORTING.equals(indexerState)) {
logger.debug("[{}] indexer for transform has state [{}]. Ignoring trigger.", getTransformId(), indexerState);
return;
}
logger.debug("[{}] transform indexer schedule has triggered, state: [{}].", getTransformId(), indexerState);
// if it runs for the 1st time we just do it, if not we check for changes
if (context.getCheckpoint() == 0) {
logger.debug("[{}] trigger initial run.", getTransformId());
getIndexer().maybeTriggerAsyncJob(System.currentTimeMillis());
} else if (getIndexer().isContinuous()) {
getIndexer().maybeTriggerAsyncJob(System.currentTimeMillis());
}
}
}
@Override
public boolean shouldCancelChildrenOnCancellation() {
// shutdown implements graceful shutdown of children
return false;
}
/**
* Attempt to gracefully cleanup the transform so it can be terminated.
* This tries to remove the job from the scheduler and completes the persistent task
*/
@Override
public void shutdown() {
logger.debug("[{}] shutdown of transform requested", transform.getId());
transformScheduler.deregisterTransform(getTransformId());
markAsCompleted();
}
void persistStateToClusterState(TransformState state, ActionListener<PersistentTask<?>> listener) {
updatePersistentTaskState(state, ActionListener.wrap(success -> {
logger.debug("[{}] successfully updated state for transform to [{}].", transform.getId(), state.toString());
listener.onResponse(success);
}, failure -> {
logger.error(() -> "[" + transform.getId() + "] failed to update cluster state for transform.", failure);
listener.onFailure(failure);
}));
}
@Override
public void failureCountChanged() {
transformScheduler.handleTransformFailureCountChanged(transform.getId(), context.getFailureCount());
}
@Override
public void fail(Throwable exception, String reason, ActionListener<Void> listener) {
synchronized (context) {
// If we are already flagged as failed, this probably means that a second trigger started firing while we were attempting to
// flag the previously triggered indexer as failed. Exit early as we are already flagged as failed.
if (context.getTaskState() == TransformTaskState.FAILED) {
logger.warn("[{}] is already failed but encountered new failure; reason [{}].", getTransformId(), reason);
listener.onResponse(null);
return;
}
// If the indexer is `STOPPING` this means that `TransformTask#stop` was called previously, but something caused
// the indexer to fail. Since `ClientTransformIndexer#doSaveState` will persist the state to the index once the indexer stops,
// it is probably best to NOT change the internal state of the task and allow the normal stopping logic to continue.
if (getIndexer() != null && getIndexer().getState() == IndexerState.STOPPING) {
logger.info("[{}] attempt to fail transform with reason [{}] while it was stopping.", getTransformId(), reason);
listener.onResponse(null);
return;
}
// If we are stopped, this means that between the failure occurring and being handled, somebody called stop
// We should just allow that stop to continue
if (getIndexer() != null && getIndexer().getState() == IndexerState.STOPPED) {
logger.info("[{}] encountered a failure but indexer is STOPPED; reason [{}].", getTransformId(), reason);
listener.onResponse(null);
return;
}
// We should not keep retrying. Either the task will be stopped, or started
// If it is started again, it is registered again.
transformScheduler.deregisterTransform(getTransformId());
if (transformNode.isShuttingDown().orElse(false)) {
logger.atDebug()
.withThrowable(exception)
.log(
"Aborting transform [{}]. Transform has failed while node [{}] is shutting down. Reason: [{}]",
transform.getId(),
transformNode.nodeId(),
reason
);
markAsLocallyAborted("Node is shutting down.");
listener.onResponse(null);
return;
}
logger.atError().withThrowable(exception).log("[{}] transform has failed; experienced: [{}].", transform.getId(), reason);
auditor.error(transform.getId(), reason);
// The idea of stopping at the next checkpoint is no longer valid. Since a failed task could potentially START again,
// we should set this flag to false.
context.setShouldStopAtCheckpoint(false);
// The end user should see that the task is in a failed state, and attempt to stop it again but with force=true
context.setTaskStateToFailed(reason);
TransformState newState = getState();
// Even though the indexer information is persisted to an index, we still need TransformTaskState in the cluster state
// This keeps track of STARTED, FAILED, STOPPED
// This is because a FAILED state could occur because we failed to read the config from the internal index, which would imply
// that
// we could not read the previous state information from said index.
persistStateToClusterState(newState, ActionListener.wrap(r -> listener.onResponse(null), e -> {
String msg = "Failed to persist to cluster state while marking task as failed with reason [" + reason + "].";
auditor.warning(transform.getId(), msg + " Failure: " + e.getMessage());
logger.error(() -> format("[%s] %s", getTransformId(), msg), e);
listener.onFailure(e);
}));
}
}
/**
* This is called when the persistent task signals that the allocated task should be terminated.
* Termination in the task framework is essentially voluntary, as the allocated task can only be
* shut down from the inside.
*/
@Override
public void onCancelled() {
logger.info("[{}] received cancellation request for transform, state: [{}].", getTransformId(), context.getTaskState());
ClientTransformIndexer theIndexer = getIndexer();
if (theIndexer != null && theIndexer.abort()) {
// there is no background transform running, we can shutdown safely
shutdown();
}
}
public boolean isRetryingStartup() {
return getContext().getStartUpFailureCount() > 0;
}
TransformTask setNumFailureRetries(int numFailureRetries) {
context.setNumFailureRetries(numFailureRetries);
return this;
}
TransformTask setAuthState(AuthorizationState authState) {
context.setAuthState(authState);
return this;
}
void initializeIndexer(ClientTransformIndexerBuilder indexerBuilder) {
initializeIndexer(indexerBuilder.build(getThreadPool(), context));
}
/** Visible for testing. */
void initializeIndexer(ClientTransformIndexer indexer) {
this.indexer.set(indexer);
}
ThreadPool getThreadPool() {
return threadPool;
}
public static PersistentTask<?> getTransformTask(String transformId, ClusterState clusterState) {
Collection<PersistentTask<?>> transformTasks = findTransformTasks(t -> t.getId().equals(transformId), clusterState);
if (transformTasks.isEmpty()) {
return null;
}
// Task ids are unique
assert (transformTasks.size() == 1) : "There were 2 or more transform tasks with the same id";
PersistentTask<?> pTask = transformTasks.iterator().next();
if (pTask.getParams() instanceof TransformTaskParams) {
return pTask;
}
throw new ElasticsearchStatusException(
"Found transform persistent task [{}] with incorrect params",
RestStatus.INTERNAL_SERVER_ERROR,
transformId
);
}
public static Collection<PersistentTask<?>> findAllTransformTasks(ClusterState clusterState) {
return findTransformTasks(Predicates.always(), clusterState);
}
public static Collection<PersistentTask<?>> findTransformTasks(Set<String> transformIds, ClusterState clusterState) {
return findTransformTasks(task -> transformIds.contains(task.getId()), clusterState);
}
public static Collection<PersistentTask<?>> findTransformTasks(String transformIdPattern, ClusterState clusterState) {
Predicate<PersistentTasksCustomMetadata.PersistentTask<?>> taskMatcher = transformIdPattern == null
|| Strings.isAllOrWildcard(transformIdPattern) ? Predicates.always() : t -> {
TransformTaskParams transformParams = (TransformTaskParams) t.getParams();
return Regex.simpleMatch(transformIdPattern, transformParams.getId());
};
return findTransformTasks(taskMatcher, clusterState);
}
// used for {@link TransformHealthChecker}
public TransformContext getContext() {
return context;
}
private static Collection<PersistentTask<?>> findTransformTasks(Predicate<PersistentTask<?>> predicate, ClusterState clusterState) {
final var project = clusterState.metadata().getDefaultProject();
PersistentTasksCustomMetadata pTasksMeta = PersistentTasksCustomMetadata.get(project);
if (pTasksMeta == null) {
return Collections.emptyList();
}
return pTasksMeta.findTasks(TransformTaskParams.NAME, predicate);
}
}
| TransformTask |
java | quarkusio__quarkus | extensions/jdbc/jdbc-postgresql/deployment/src/test/java/io/quarkus/jdbc/postgresql/deployment/DevServicesPostgresqlDatasourceDevModeTestCase.java | {
"start": 412,
"end": 1896
} | class ____ {
@RegisterExtension
static QuarkusDevModeTest test = new QuarkusDevModeTest()
.withApplicationRoot((jar) -> jar
.addClass(PgResource.class)
.addAsResource(new StringAsset(""), "application.properties"))
// Expect no warnings (in particular from Agroal)
.setLogRecordPredicate(record -> record.getLevel().intValue() >= Level.WARNING.intValue()
// There are other warnings: JDK8, TestContainers, drivers, ...
// Ignore them: we're only interested in Agroal here.
&& record.getMessage().contains("Agroal"));
@Inject
AgroalDataSource dataSource;
@Test
public void testDatasource() throws Exception {
RestAssured.get("/pg/save?name=foo&value=bar")
.then().statusCode(204);
RestAssured.get("/pg/get?name=foo")
.then().statusCode(200)
.body(Matchers.equalTo("bar"));
test.modifyResourceFile("application.properties", s -> "quarkus.datasource.devservices.properties.log=TRACE");
RestAssured.get("/pg/get?name=foo")
.then().statusCode(404);
RestAssured.get("/pg/save?name=foo&value=bar")
.then().statusCode(204);
RestAssured.get("/pg/get?name=foo")
.then().statusCode(200)
.body(Matchers.equalTo("bar"));
}
}
| DevServicesPostgresqlDatasourceDevModeTestCase |
java | netty__netty | transport-classes-io_uring/src/main/java/io/netty/channel/uring/CmsgHdr.java | {
"start": 977,
"end": 2624
} | class ____ {
private CmsgHdr() { }
static void write(ByteBuffer cmsghdr, int cmsgHdrDataOffset,
int cmsgLen, int cmsgLevel, int cmsgType, short segmentSize) {
int cmsghdrPosition = cmsghdr.position();
if (Native.SIZEOF_SIZE_T == 4) {
cmsghdr.putInt(cmsghdrPosition + Native.CMSG_OFFSETOF_CMSG_LEN, cmsgLen);
} else {
assert Native.SIZEOF_SIZE_T == 8;
cmsghdr.putLong(cmsghdrPosition + Native.CMSG_OFFSETOF_CMSG_LEN, cmsgLen);
}
cmsghdr.putInt(cmsghdrPosition + Native.CMSG_OFFSETOF_CMSG_LEVEL, cmsgLevel);
cmsghdr.putInt(cmsghdrPosition + Native.CMSG_OFFSETOF_CMSG_TYPE, cmsgType);
cmsghdr.putShort(cmsghdrPosition + cmsgHdrDataOffset, segmentSize);
}
static void writeScmRights(ByteBuffer cmsghdr, int cmsgHdrDataOffset, int fd) {
int cmsghdrPosition = cmsghdr.position();
if (Native.SIZEOF_SIZE_T == 4) {
cmsghdr.putInt(cmsghdrPosition + Native.CMSG_OFFSETOF_CMSG_LEN, Native.CMSG_LEN_FOR_FD);
} else {
assert Native.SIZEOF_SIZE_T == 8;
cmsghdr.putLong(cmsghdrPosition + Native.CMSG_OFFSETOF_CMSG_LEN, Native.CMSG_LEN_FOR_FD);
}
cmsghdr.putInt(cmsghdrPosition + Native.CMSG_OFFSETOF_CMSG_LEVEL, Native.SOL_SOCKET);
cmsghdr.putInt(cmsghdrPosition + Native.CMSG_OFFSETOF_CMSG_TYPE, Native.SCM_RIGHTS);
cmsghdr.putInt(cmsghdrPosition + cmsgHdrDataOffset, fd);
}
static int readScmRights(ByteBuffer cmsghdr, int cmsgHdrDataOffset) {
return cmsghdr.getInt(cmsghdr.position() + cmsgHdrDataOffset);
}
}
| CmsgHdr |
java | spring-projects__spring-boot | module/spring-boot-mongodb/src/dockerTest/java/org/springframework/boot/mongodb/testcontainers/DeprecatedMongoDbContainerConnectionDetailsFactoryTests.java | {
"start": 1733,
"end": 2279
} | class ____ {
@Container
@ServiceConnection
static final MongoDBContainer mongoDb = TestImage.container(MongoDBContainer.class);
@Autowired(required = false)
private MongoConnectionDetails connectionDetails;
@Test
void connectionCanBeMadeToContainer() {
assertThat(this.connectionDetails).isNotNull();
MongoClient client = MongoClients.create(this.connectionDetails.getConnectionString());
assertThat(client.listDatabaseNames()).containsExactly("admin", "config", "local");
}
}
| DeprecatedMongoDbContainerConnectionDetailsFactoryTests |
java | spring-projects__spring-boot | module/spring-boot-graphql-test/src/test/java/org/springframework/boot/graphql/test/autoconfigure/Book.java | {
"start": 695,
"end": 1572
} | class ____ {
@SuppressWarnings("NullAway.Init")
String id;
@SuppressWarnings("NullAway.Init")
String name;
@SuppressWarnings("NullAway.Init")
int pageCount;
@SuppressWarnings("NullAway.Init")
String author;
protected Book() {
}
public Book(String id, String name, int pageCount, String author) {
this.id = id;
this.name = name;
this.pageCount = pageCount;
this.author = author;
}
public String getId() {
return this.id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return this.name;
}
public void setName(String name) {
this.name = name;
}
public int getPageCount() {
return this.pageCount;
}
public void setPageCount(int pageCount) {
this.pageCount = pageCount;
}
public String getAuthor() {
return this.author;
}
public void setAuthor(String author) {
this.author = author;
}
}
| Book |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java | {
"start": 1630,
"end": 7067
} | class ____ implements ServiceDisruptionScheme {
private static final Logger logger = LogManager.getLogger(NetworkDisruption.class);
private final DisruptedLinks disruptedLinks;
private final NetworkLinkDisruptionType networkLinkDisruptionType;
protected volatile InternalTestCluster cluster;
protected volatile boolean activeDisruption = false;
public NetworkDisruption(DisruptedLinks disruptedLinks, NetworkLinkDisruptionType networkLinkDisruptionType) {
this.disruptedLinks = disruptedLinks;
this.networkLinkDisruptionType = networkLinkDisruptionType;
}
public DisruptedLinks getDisruptedLinks() {
return disruptedLinks;
}
public NetworkLinkDisruptionType getNetworkLinkDisruptionType() {
return networkLinkDisruptionType;
}
@Override
public void applyToCluster(InternalTestCluster testCluster) {
this.cluster = testCluster;
}
@Override
public void removeFromCluster(InternalTestCluster testCluster) {
stopDisrupting();
}
@Override
public void removeAndEnsureHealthy(InternalTestCluster testCluster) {
removeFromCluster(testCluster);
ensureHealthy(testCluster);
}
/**
* ensures the cluster is healthy after the disruption
*/
public void ensureHealthy(InternalTestCluster testCluster) {
assert activeDisruption == false;
ensureNodeCount(testCluster);
ensureFullyConnectedCluster(testCluster);
}
/**
* Ensures that all nodes in the cluster are connected to each other.
*
* Some network disruptions may leave nodes that are not the master disconnected from each other.
* {@link org.elasticsearch.cluster.NodeConnectionsService} will eventually reconnect but it's
* handy to be able to ensure this happens faster
*/
public static void ensureFullyConnectedCluster(InternalTestCluster cluster) {
final String[] nodeNames = cluster.getNodeNames();
final CountDownLatch countDownLatch = new CountDownLatch(nodeNames.length);
for (String node : nodeNames) {
ClusterState stateOnNode = cluster.getInstance(ClusterService.class, node).state();
cluster.getInstance(NodeConnectionsService.class, node).reconnectToNodes(stateOnNode.nodes(), countDownLatch::countDown);
}
try {
countDownLatch.await();
} catch (InterruptedException e) {
throw new AssertionError(e);
}
}
protected void ensureNodeCount(InternalTestCluster testCluster) {
testCluster.validateClusterFormed();
}
@Override
public synchronized void applyToNode(String node, InternalTestCluster testCluster) {
}
@Override
public synchronized void removeFromNode(String node1, InternalTestCluster testCluster) {
logger.info("stop disrupting node (disruption type: {}, disrupted links: {})", networkLinkDisruptionType, disruptedLinks);
applyToNodes(new String[] { node1 }, testCluster.getNodeNames(), networkLinkDisruptionType::removeDisruption);
applyToNodes(testCluster.getNodeNames(), new String[] { node1 }, networkLinkDisruptionType::removeDisruption);
}
@Override
public synchronized void testClusterClosed() {
}
@Override
public synchronized void startDisrupting() {
logger.info("start disrupting (disruption type: {}, disrupted links: {})", networkLinkDisruptionType, disruptedLinks);
applyToNodes(cluster.getNodeNames(), cluster.getNodeNames(), networkLinkDisruptionType::applyDisruption);
activeDisruption = true;
}
@Override
public synchronized void stopDisrupting() {
if (activeDisruption == false) {
return;
}
logger.info("stop disrupting (disruption scheme: {}, disrupted links: {})", networkLinkDisruptionType, disruptedLinks);
applyToNodes(cluster.getNodeNames(), cluster.getNodeNames(), networkLinkDisruptionType::removeDisruption);
activeDisruption = false;
}
/**
* Applies action to all disrupted links between two sets of nodes.
*/
private void applyToNodes(String[] nodes1, String[] nodes2, BiConsumer<MockTransportService, MockTransportService> consumer) {
for (String node1 : nodes1) {
if (disruptedLinks.nodes().contains(node1)) {
for (String node2 : nodes2) {
if (disruptedLinks.nodes().contains(node2)) {
if (node1.equals(node2) == false) {
if (disruptedLinks.disrupt(node1, node2)) {
consumer.accept(transport(node1), transport(node2));
}
}
}
}
}
}
}
@Override
public TimeValue expectedTimeToHeal() {
return networkLinkDisruptionType.expectedTimeToHeal();
}
private MockTransportService transport(String node) {
return (MockTransportService) cluster.getInstance(TransportService.class, node);
}
@Override
public String toString() {
return "network disruption (disruption type: " + networkLinkDisruptionType + ", disrupted links: " + disruptedLinks + ")";
}
/**
* Represents a set of nodes with connections between nodes that are to be disrupted
*/
public abstract static | NetworkDisruption |
java | spring-projects__spring-framework | spring-webmvc/src/test/java/org/springframework/web/servlet/DispatcherServletTests.java | {
"start": 44709,
"end": 44977
} | class ____ implements Controller {
@Override
public ModelAndView handleRequest(HttpServletRequest request, HttpServletResponse response) throws Exception {
return new ModelAndView(ControllerFromParent.class.getName());
}
}
private static | ControllerFromParent |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/test/TestMatchers.java | {
"start": 730,
"end": 4103
} | class ____ extends Matchers {
/**
* @deprecated Use {@link FileMatchers#pathExists}
*/
@Deprecated
public static Matcher<Path> pathExists(Path path, LinkOption... options) {
return new CustomMatcher<Path>("Path " + path + " exists") {
@Override
public boolean matches(Object item) {
return Files.exists(path, options);
}
};
}
public static Matcher<Throwable> throwableWithMessage(String message) {
return throwableWithMessage(CoreMatchers.equalTo(message));
}
public static Matcher<Throwable> throwableWithMessage(Matcher<String> messageMatcher) {
return new BaseMatcher<>() {
@Override
public void describeTo(Description description) {
description.appendText("a throwable with message of ").appendDescriptionOf(messageMatcher);
}
@Override
public boolean matches(Object actual) {
if (actual instanceof final Throwable throwable) {
return messageMatcher.matches(throwable.getMessage());
} else {
return false;
}
}
@Override
public void describeMismatch(Object item, Description description) {
super.describeMismatch(item, description);
if (item instanceof Throwable e) {
final StackTraceElement at = e.getStackTrace()[0];
description.appendText(" at ").appendText(at.toString());
}
}
};
}
@SuppressWarnings("unchecked")
public static <T> Matcher<Predicate<T>> predicateMatches(T value) {
return new CustomMatcher<Predicate<T>>("Matches " + value) {
@Override
public boolean matches(Object item) {
if (Predicate.class.isInstance(item)) {
return ((Predicate<T>) item).test(value);
} else {
return false;
}
}
};
}
public static Matcher<String> matchesPattern(String regex) {
return matchesPattern(Pattern.compile(regex));
}
public static Matcher<String> matchesPattern(Pattern pattern) {
return predicate("Matches " + pattern.pattern(), String.class, pattern.asPredicate());
}
public static Matcher<Response> hasStatusCode(RestStatus expected) {
return new CustomMatcher<>("Response with status " + expected.getStatus() + " (" + expected.name() + ")") {
@Override
public boolean matches(Object item) {
if (item instanceof Response response) {
return response.getStatusLine().getStatusCode() == expected.getStatus();
} else {
return false;
}
}
};
}
private static <T> Matcher<T> predicate(String description, Class<T> type, Predicate<T> predicate) {
return new CustomMatcher<T>(description) {
@Override
public boolean matches(Object item) {
if (type.isInstance(item)) {
return predicate.test(type.cast(item));
} else {
return false;
}
}
};
}
}
| TestMatchers |
java | quarkusio__quarkus | independent-projects/arc/runtime/src/main/java/io/quarkus/arc/impl/CovariantTypes.java | {
"start": 672,
"end": 1095
} | class ____ to understand and maintain, there is a separate isAssignableFrom method for each combination
* of possible types. Each of these methods compares two type instances and determines whether the first one is assignable from
* the other.
*
* TypeVariables are considered a specific unknown type restricted by the upper bound. No inference of type variables is
* performed.
*
* @author Jozef Hartinger
*
*/
| easier |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilderTests.java | {
"start": 703,
"end": 1555
} | class ____ extends BaseAggregationTestCase<DiversifiedAggregationBuilder> {
@Override
protected final DiversifiedAggregationBuilder createTestAggregatorBuilder() {
DiversifiedAggregationBuilder factory = new DiversifiedAggregationBuilder(randomAlphaOfLengthBetween(3, 10));
String field = randomNumericField();
randomFieldOrScript(factory, field);
if (randomBoolean()) {
factory.missing("MISSING");
}
if (randomBoolean()) {
factory.maxDocsPerValue(randomIntBetween(1, 1000));
}
if (randomBoolean()) {
factory.shardSize(randomIntBetween(1, 1000));
}
if (randomBoolean()) {
factory.executionHint(randomFrom(ExecutionMode.values()).toString());
}
return factory;
}
}
| DiversifiedAggregationBuilderTests |
java | hibernate__hibernate-orm | hibernate-envers/src/main/java/org/hibernate/envers/boot/internal/EnversIntegrator.java | {
"start": 1519,
"end": 4916
} | class ____ implements Integrator {
private static final Logger log = Logger.getLogger( EnversIntegrator.class );
public static final String AUTO_REGISTER = "hibernate.envers.autoRegisterListeners";
public void integrate(
Metadata metadata,
BootstrapContext bootstrapContext,
SessionFactoryImplementor sessionFactory) {
final ServiceRegistry serviceRegistry = sessionFactory.getServiceRegistry();
final EnversService enversService = serviceRegistry.getService( EnversService.class );
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Opt-out of registration if EnversService is disabled
if ( !enversService.isEnabled() ) {
log.debug( "Skipping Envers listener registrations : EnversService disabled" );
return;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Opt-out of registration if asked to not register
final boolean autoRegister = serviceRegistry.getService( ConfigurationService.class ).getSetting(
AUTO_REGISTER,
StandardConverters.BOOLEAN,
true
);
if ( !autoRegister ) {
log.debug( "Skipping Envers listener registrations : Listener auto-registration disabled" );
return;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Verify that the EnversService is fully initialized and ready to go.
if ( !enversService.isInitialized() ) {
throw new HibernateException(
"Expecting EnversService to have been initialized prior to call to EnversIntegrator#integrate"
);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Opt-out of registration if no audited entities found
if ( !enversService.getEntitiesConfigurations().hasAuditedEntities() ) {
log.debug( "Skipping Envers listener registrations : No audited entities found" );
return;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Do the registrations
final EventListenerRegistry listenerRegistry = sessionFactory.getEventListenerRegistry();
listenerRegistry.addDuplicationStrategy( EnversListenerDuplicationStrategy.INSTANCE );
if ( enversService.getEntitiesConfigurations().hasAuditedEntities() ) {
listenerRegistry.appendListeners(
EventType.POST_DELETE,
new EnversPostDeleteEventListenerImpl( enversService )
);
listenerRegistry.appendListeners(
EventType.POST_INSERT,
new EnversPostInsertEventListenerImpl( enversService )
);
listenerRegistry.appendListeners(
EventType.PRE_UPDATE,
new EnversPreUpdateEventListenerImpl( enversService )
);
listenerRegistry.appendListeners(
EventType.POST_UPDATE,
new EnversPostUpdateEventListenerImpl( enversService )
);
listenerRegistry.appendListeners(
EventType.POST_COLLECTION_RECREATE,
new EnversPostCollectionRecreateEventListenerImpl( enversService )
);
listenerRegistry.appendListeners(
EventType.PRE_COLLECTION_REMOVE,
new EnversPreCollectionRemoveEventListenerImpl( enversService )
);
listenerRegistry.appendListeners(
EventType.PRE_COLLECTION_UPDATE,
new EnversPreCollectionUpdateEventListenerImpl( enversService )
);
}
}
@Override
public void disintegrate(SessionFactoryImplementor sessionFactory, SessionFactoryServiceRegistry serviceRegistry) {
ReflectionTools.reset();
}
}
| EnversIntegrator |
java | resilience4j__resilience4j | resilience4j-rxjava2/src/main/java/io/github/resilience4j/circuitbreaker/operator/SingleCircuitBreaker.java | {
"start": 1713,
"end": 2543
} | class ____ extends AbstractSingleObserver<T> {
private final long start;
CircuitBreakerSingleObserver(SingleObserver<? super T> downstreamObserver) {
super(downstreamObserver);
this.start = circuitBreaker.getCurrentTimestamp();
}
@Override
protected void hookOnError(Throwable e) {
circuitBreaker.onError(circuitBreaker.getCurrentTimestamp() - start, circuitBreaker.getTimestampUnit(), e);
}
@Override
protected void hookOnSuccess(T value) {
circuitBreaker.onResult(circuitBreaker.getCurrentTimestamp() - start, circuitBreaker.getTimestampUnit(), value);
}
@Override
protected void hookOnCancel() {
circuitBreaker.releasePermission();
}
}
}
| CircuitBreakerSingleObserver |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/consumer/BufferManager.java | {
"start": 2124,
"end": 15688
} | class ____ implements BufferListener, BufferRecycler {
/** The available buffer queue wraps both exclusive and requested floating buffers. */
private final AvailableBufferQueue bufferQueue = new AvailableBufferQueue();
/** The buffer provider for requesting exclusive buffers. */
private final MemorySegmentProvider globalPool;
/** The input channel to own this buffer manager. */
private final InputChannel inputChannel;
/**
* The tag indicates whether it is waiting for additional floating buffers from the buffer pool.
*/
@GuardedBy("bufferQueue")
private boolean isWaitingForFloatingBuffers;
/** The total number of required buffers for the respective input channel. */
@GuardedBy("bufferQueue")
private int numRequiredBuffers;
public BufferManager(
MemorySegmentProvider globalPool, InputChannel inputChannel, int numRequiredBuffers) {
this.globalPool = checkNotNull(globalPool);
this.inputChannel = checkNotNull(inputChannel);
checkArgument(numRequiredBuffers >= 0);
this.numRequiredBuffers = numRequiredBuffers;
}
// ------------------------------------------------------------------------
// Buffer request
// ------------------------------------------------------------------------
@Nullable
Buffer requestBuffer() {
synchronized (bufferQueue) {
// decrease the number of buffers require to avoid the possibility of
// allocating more than required buffers after the buffer is taken
--numRequiredBuffers;
return bufferQueue.takeBuffer();
}
}
Buffer requestBufferBlocking() throws InterruptedException {
synchronized (bufferQueue) {
Buffer buffer;
while ((buffer = bufferQueue.takeBuffer()) == null) {
if (inputChannel.isReleased()) {
throw new CancelTaskException(
"Input channel ["
+ inputChannel.channelInfo
+ "] has already been released.");
}
if (!isWaitingForFloatingBuffers) {
BufferPool bufferPool = inputChannel.inputGate.getBufferPool();
buffer = bufferPool.requestBuffer();
if (buffer == null && shouldContinueRequest(bufferPool)) {
continue;
}
}
if (buffer != null) {
return buffer;
}
bufferQueue.wait();
}
return buffer;
}
}
private boolean shouldContinueRequest(BufferPool bufferPool) {
if (bufferPool.addBufferListener(this)) {
isWaitingForFloatingBuffers = true;
numRequiredBuffers = 1;
return false;
} else if (bufferPool.isDestroyed()) {
throw new CancelTaskException("Local buffer pool has already been released.");
} else {
return true;
}
}
/** Requests exclusive buffers from the provider. */
void requestExclusiveBuffers(int numExclusiveBuffers) throws IOException {
checkArgument(numExclusiveBuffers >= 0, "Num exclusive buffers must be non-negative.");
if (numExclusiveBuffers == 0) {
return;
}
Collection<MemorySegment> segments =
globalPool.requestUnpooledMemorySegments(numExclusiveBuffers);
synchronized (bufferQueue) {
// AvailableBufferQueue::addExclusiveBuffer may release the previously allocated
// floating buffer, which requires the caller to recycle these released floating
// buffers. There should be no floating buffers that have been allocated before the
// exclusive buffers are initialized, so here only a simple assertion is required
checkState(
unsynchronizedGetFloatingBuffersAvailable() == 0,
"Bug in buffer allocation logic: floating buffer is allocated before exclusive buffers are initialized.");
for (MemorySegment segment : segments) {
bufferQueue.addExclusiveBuffer(
new NetworkBuffer(segment, this), numRequiredBuffers);
}
}
}
/**
* Requests floating buffers from the buffer pool based on the given required amount, and
* returns the actual requested amount. If the required amount is not fully satisfied, it will
* register as a listener.
*/
int requestFloatingBuffers(int numRequired) {
int numRequestedBuffers = 0;
synchronized (bufferQueue) {
// Similar to notifyBufferAvailable(), make sure that we never add a buffer after
// channel
// released all buffers via releaseAllResources().
if (inputChannel.isReleased()) {
return numRequestedBuffers;
}
numRequiredBuffers = numRequired;
numRequestedBuffers = tryRequestBuffers();
}
return numRequestedBuffers;
}
private int tryRequestBuffers() {
assert Thread.holdsLock(bufferQueue);
int numRequestedBuffers = 0;
while (bufferQueue.getAvailableBufferSize() < numRequiredBuffers
&& !isWaitingForFloatingBuffers) {
BufferPool bufferPool = inputChannel.inputGate.getBufferPool();
Buffer buffer = bufferPool.requestBuffer();
if (buffer != null) {
bufferQueue.addFloatingBuffer(buffer);
numRequestedBuffers++;
} else if (bufferPool.addBufferListener(this)) {
isWaitingForFloatingBuffers = true;
break;
}
}
return numRequestedBuffers;
}
// ------------------------------------------------------------------------
// Buffer recycle
// ------------------------------------------------------------------------
/**
* Exclusive buffer is recycled to this channel manager directly and it may trigger return extra
* floating buffer based on <tt>numRequiredBuffers</tt>.
*
* @param segment The exclusive segment of this channel.
*/
@Override
public void recycle(MemorySegment segment) {
@Nullable Buffer releasedFloatingBuffer = null;
synchronized (bufferQueue) {
try {
// Similar to notifyBufferAvailable(), make sure that we never add a buffer
// after channel released all buffers via releaseAllResources().
if (inputChannel.isReleased()) {
globalPool.recycleUnpooledMemorySegments(Collections.singletonList(segment));
return;
} else {
releasedFloatingBuffer =
bufferQueue.addExclusiveBuffer(
new NetworkBuffer(segment, this), numRequiredBuffers);
}
} catch (Throwable t) {
ExceptionUtils.rethrow(t);
} finally {
bufferQueue.notifyAll();
}
}
if (releasedFloatingBuffer != null) {
releasedFloatingBuffer.recycleBuffer();
} else {
try {
inputChannel.notifyBufferAvailable(1);
} catch (Throwable t) {
ExceptionUtils.rethrow(t);
}
}
}
void releaseFloatingBuffers() {
Queue<Buffer> buffers;
synchronized (bufferQueue) {
numRequiredBuffers = 0;
buffers = bufferQueue.clearFloatingBuffers();
}
// recycle all buffers out of the synchronization block to avoid dead lock
while (!buffers.isEmpty()) {
buffers.poll().recycleBuffer();
}
}
/** Recycles all the exclusive and floating buffers from the given buffer queue. */
void releaseAllBuffers(ArrayDeque<Buffer> buffers) throws IOException {
// Gather all exclusive buffers and recycle them to global pool in batch, because
// we do not want to trigger redistribution of buffers after each recycle.
final List<MemorySegment> exclusiveRecyclingSegments = new ArrayList<>();
Exception err = null;
Buffer buffer;
while ((buffer = buffers.poll()) != null) {
try {
if (buffer.getRecycler() == BufferManager.this) {
exclusiveRecyclingSegments.add(buffer.getMemorySegment());
} else {
buffer.recycleBuffer();
}
} catch (Exception e) {
err = firstOrSuppressed(e, err);
}
}
try {
synchronized (bufferQueue) {
bufferQueue.releaseAll(exclusiveRecyclingSegments);
bufferQueue.notifyAll();
}
} catch (Exception e) {
err = firstOrSuppressed(e, err);
}
try {
if (exclusiveRecyclingSegments.size() > 0) {
globalPool.recycleUnpooledMemorySegments(exclusiveRecyclingSegments);
}
} catch (Exception e) {
err = firstOrSuppressed(e, err);
}
if (err != null) {
throw err instanceof IOException ? (IOException) err : new IOException(err);
}
}
// ------------------------------------------------------------------------
// Buffer listener notification
// ------------------------------------------------------------------------
/**
* The buffer pool notifies this listener of an available floating buffer. If the listener is
* released or currently does not need extra buffers, the buffer should be returned to the
* buffer pool. Otherwise, the buffer will be added into the <tt>bufferQueue</tt>.
*
* @param buffer Buffer that becomes available in buffer pool.
* @return true if the buffer is accepted by this listener.
*/
@Override
public boolean notifyBufferAvailable(Buffer buffer) {
// Assuming two remote channels with respective buffer managers as listeners inside
// LocalBufferPool.
// While canceler thread calling ch1#releaseAllResources, it might trigger
// bm2#notifyBufferAvaialble.
// Concurrently if task thread is recycling exclusive buffer, it might trigger
// bm1#notifyBufferAvailable.
// Then these two threads will both occupy the respective bufferQueue lock and wait for
// other side's
// bufferQueue lock to cause deadlock. So we check the isReleased state out of synchronized
// to resolve it.
if (inputChannel.isReleased()) {
return false;
}
int numBuffers = 0;
boolean isBufferUsed = false;
try {
synchronized (bufferQueue) {
checkState(
isWaitingForFloatingBuffers,
"This channel should be waiting for floating buffers.");
isWaitingForFloatingBuffers = false;
// Important: make sure that we never add a buffer after releaseAllResources()
// released all buffers. Following scenarios exist:
// 1) releaseAllBuffers() already released buffers inside bufferQueue
// -> while isReleased is set correctly in InputChannel
// 2) releaseAllBuffers() did not yet release buffers from bufferQueue
// -> we may or may not have set isReleased yet but will always wait for the
// lock on bufferQueue to release buffers
if (inputChannel.isReleased()
|| bufferQueue.getAvailableBufferSize() >= numRequiredBuffers) {
return false;
}
bufferQueue.addFloatingBuffer(buffer);
isBufferUsed = true;
numBuffers += 1 + tryRequestBuffers();
bufferQueue.notifyAll();
}
inputChannel.notifyBufferAvailable(numBuffers);
} catch (Throwable t) {
inputChannel.setError(t);
}
return isBufferUsed;
}
@Override
public void notifyBufferDestroyed() {
// Nothing to do actually.
}
// ------------------------------------------------------------------------
// Getter properties
// ------------------------------------------------------------------------
@VisibleForTesting
int unsynchronizedGetNumberOfRequiredBuffers() {
return numRequiredBuffers;
}
int getNumberOfRequiredBuffers() {
synchronized (bufferQueue) {
return numRequiredBuffers;
}
}
@VisibleForTesting
boolean unsynchronizedIsWaitingForFloatingBuffers() {
return isWaitingForFloatingBuffers;
}
@VisibleForTesting
int getNumberOfAvailableBuffers() {
synchronized (bufferQueue) {
return bufferQueue.getAvailableBufferSize();
}
}
int unsynchronizedGetAvailableExclusiveBuffers() {
return bufferQueue.exclusiveBuffers.size();
}
int unsynchronizedGetFloatingBuffersAvailable() {
return bufferQueue.floatingBuffers.size();
}
/**
* Manages the exclusive and floating buffers of this channel, and handles the internal buffer
* related logic.
*/
static final | BufferManager |
java | spring-projects__spring-boot | core/spring-boot-test/src/main/java/org/springframework/boot/test/json/BasicJsonTester.java | {
"start": 1053,
"end": 1519
} | class ____ {
*
* private BasicJsonTester json = new BasicJsonTester(getClass());
*
* @Test
* public void testWriteJson() throws IOException {
* assertThat(json.from("example.json")).extractingJsonPathStringValue("@.name")
.isEqualTo("Spring");
* }
*
* }
* </pre>
*
* See {@link AbstractJsonMarshalTester} for more details.
*
* @author Phillip Webb
* @author Andy Wilkinson
* @since 1.4.0
*/
public | ExampleObjectJsonTests |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/views/ViewsWithSchemaTest.java | {
"start": 509,
"end": 730
} | class ____ {
@JsonView({ ViewAB.class })
public int a;
@JsonView({ ViewAB.class, ViewBC.class })
public int b;
@JsonView({ ViewBC.class })
public int c;
}
static | POJO |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1100/Issue1121.java | {
"start": 202,
"end": 810
} | class ____ extends TestCase {
public void test_for_issue() throws Exception {
JSONObject userObject = new JSONObject();
userObject.put("name","jack");
userObject.put("age",20);
JSONObject result = new JSONObject();
result.put("host","127.0.0.1");
result.put("port",3306);
result.put("user",userObject);
result.put("admin",userObject);
String json = JSON.toJSONString(result, true);
System.out.println(json);
JSONObject jsonObject2 = JSON.parseObject(json);
assertEquals(result, jsonObject2);
}
}
| Issue1121 |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/conversion/uuid/UUIDTarget.java | {
"start": 235,
"end": 644
} | class ____ {
private String uuidA;
private String invalidUUID;
public String getUUIDA() {
return this.uuidA;
}
public void setUUIDA(final String uuidA) {
this.uuidA = uuidA;
}
public String getInvalidUUID() {
return this.invalidUUID;
}
public void setInvalidUUID(final String invalidUUID) {
this.invalidUUID = invalidUUID;
}
}
| UUIDTarget |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/common/JournalingBatchObserver.java | {
"start": 234,
"end": 780
} | class ____ implements BatchObserver {
private int implicitExecutionCount;
private int explicitExecutionCount;
@Override
public void batchExplicitlyExecuted() {
explicitExecutionCount++;
}
@Override
public void batchImplicitlyExecuted() {
implicitExecutionCount++;
}
public int getImplicitExecutionCount() {
return implicitExecutionCount;
}
public int getExplicitExecutionCount() {
return explicitExecutionCount;
}
public void reset() {
explicitExecutionCount = 0;
implicitExecutionCount = 0;
}
}
| JournalingBatchObserver |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-server-webmvc/src/main/java/org/springframework/cloud/gateway/server/mvc/predicate/MvcPredicateSupplier.java | {
"start": 854,
"end": 1027
} | class ____ implements PredicateSupplier {
@Override
public Collection<Method> get() {
return Arrays.asList(RequestPredicates.class.getMethods());
}
}
| MvcPredicateSupplier |
java | spring-projects__spring-boot | build-plugin/spring-boot-gradle-plugin/src/test/java/org/springframework/boot/gradle/tasks/run/BootRunIntegrationTests.java | {
"start": 1541,
"end": 4034
} | class ____ {
@SuppressWarnings("NullAway.Init")
GradleBuild gradleBuild;
@TestTemplate
void basicExecution() throws IOException {
copyClasspathApplication();
new File(this.gradleBuild.getProjectDir(), "src/main/resources").mkdirs();
BuildResult result = this.gradleBuild.build("bootRun");
BuildTask task = result.task(":bootRun");
assertThat(task).isNotNull();
assertThat(task.getOutcome()).isEqualTo(TaskOutcome.SUCCESS);
assertThat(result.getOutput()).contains("1. " + canonicalPathOf("build/classes/java/main"));
assertThat(result.getOutput()).contains("2. " + canonicalPathOf("build/resources/main"));
assertThat(result.getOutput()).doesNotContain(canonicalPathOf("src/main/resources"));
}
@TestTemplate
void sourceResourcesCanBeUsed() throws IOException {
copyClasspathApplication();
BuildResult result = this.gradleBuild.build("bootRun");
BuildTask task = result.task(":bootRun");
assertThat(task).isNotNull();
assertThat(task.getOutcome()).isEqualTo(TaskOutcome.SUCCESS);
assertThat(result.getOutput()).contains("1. " + canonicalPathOf("src/main/resources"));
assertThat(result.getOutput()).contains("2. " + canonicalPathOf("build/classes/java/main"));
assertThat(result.getOutput()).doesNotContain(canonicalPathOf("build/resources/main"));
}
@TestTemplate
void springBootExtensionMainClassNameIsUsed() throws IOException {
copyMainClassApplication();
BuildResult result = this.gradleBuild.build("bootRun");
BuildTask task = result.task(":bootRun");
assertThat(task).isNotNull();
assertThat(task.getOutcome()).isEqualTo(TaskOutcome.SUCCESS);
assertThat(result.getOutput()).contains("com.example.bootrun.main.CustomMainClass");
}
@TestTemplate
void applicationPluginMainClassNameIsUsed() throws IOException {
copyMainClassApplication();
BuildResult result = this.gradleBuild.build("bootRun");
BuildTask task = result.task(":bootRun");
assertThat(task).isNotNull();
assertThat(task.getOutcome()).isEqualTo(TaskOutcome.SUCCESS);
assertThat(result.getOutput()).contains("com.example.bootrun.main.CustomMainClass");
}
@TestTemplate
void applicationPluginMainClassNameIsNotUsedWhenItIsNull() throws IOException {
copyClasspathApplication();
BuildResult result = this.gradleBuild.build("bootRun");
BuildTask task = result.task(":bootRun");
assertThat(task).isNotNull();
assertThat(task.getOutcome()).isEqualTo(TaskOutcome.SUCCESS);
assertThat(result.getOutput())
.contains("Main | BootRunIntegrationTests |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/table/RowInTableLookupRandomizedTests.java | {
"start": 2288,
"end": 3437
} | class ____ extends ESTestCase {
private static final int TRIES = 100;
private static final int ROW_COUNT = 1000;
@ParametersFactory
public static List<Object[]> params() {
List<Object[]> params = new ArrayList<>();
for (int keysPerPosition : new int[] { 1, 2 }) {
for (int groups : new int[] { 1, 2, 5, 10 }) {
params.add(
new Object[] {
groups,
MultivalueDedupeTests.supportedTypes(),
IntStream.range(0, groups).mapToObj(i -> RANDOM_KEY_ELEMENT).toList(),
keysPerPosition,
1000,
any(RowInTableLookup.class) }
);
}
params.add(
new Object[] {
1,
List.of(ElementType.INT),
List.of(ASCENDING),
keysPerPosition,
1000,
any(AscendingSequenceRowInTableLookup.class) }
);
}
return params;
}
| RowInTableLookupRandomizedTests |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/net/TrustManagerFactoryWrapper.java | {
"start": 1286,
"end": 1784
} | class ____ extends TrustManagerFactory {
private static final Logger LOGGER = LoggerFactory.getLogger(TrustManagerFactoryWrapper.class);
private static final String KEY_MANAGER_FACTORY_ALGORITHM = "no-algorithm";
private static final Provider PROVIDER = new Provider("", 1.0, "") {
};
TrustManagerFactoryWrapper(TrustManager trustManager) {
super(new TrustManagerFactorySpiWrapper(trustManager), PROVIDER, KEY_MANAGER_FACTORY_ALGORITHM);
}
private static | TrustManagerFactoryWrapper |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/mockito/MockitoAssertions.java | {
"start": 922,
"end": 2402
} | class ____ {
public static void assertIsMock(Object obj) {
assertThat(isMock(obj)).as("is a Mockito mock").isTrue();
assertIsNotSpy(obj);
}
public static void assertIsMock(Object obj, String message) {
assertThat(isMock(obj)).as("%s is a Mockito mock", message).isTrue();
assertIsNotSpy(obj, message);
}
public static void assertIsNotMock(Object obj) {
assertThat(isMock(obj)).as("is a Mockito mock").isFalse();
}
public static void assertIsNotMock(Object obj, String message) {
assertThat(isMock(obj)).as("%s is a Mockito mock", message).isFalse();
}
public static void assertIsSpy(Object obj) {
assertThat(isSpy(obj)).as("is a Mockito spy").isTrue();
}
public static void assertIsSpy(Object obj, String message) {
assertThat(isSpy(obj)).as("%s is a Mockito spy", message).isTrue();
}
public static void assertIsNotSpy(Object obj) {
assertThat(isSpy(obj)).as("is a Mockito spy").isFalse();
}
public static void assertIsNotSpy(Object obj, String message) {
assertThat(isSpy(obj)).as("%s is a Mockito spy", message).isFalse();
}
public static void assertMockName(Object mock, String name) {
MockName mockName = mockingDetails(mock).getMockCreationSettings().getMockName();
assertThat(mockName.toString()).as("mock name").isEqualTo(name);
}
private static boolean isMock(Object obj) {
return mockingDetails(obj).isMock();
}
private static boolean isSpy(Object obj) {
return mockingDetails(obj).isSpy();
}
}
| MockitoAssertions |
java | apache__logging-log4j2 | log4j-api-test/src/main/java/org/apache/logging/log4j/test/junit/SetTestProperty.java | {
"start": 1967,
"end": 2041
} | interface ____ {
SetTestProperty[] value();
}
}
| SetTestProperties |
java | apache__flink | flink-streaming-java/src/test/java/org/apache/flink/streaming/api/functions/PrintSinkTest.java | {
"start": 6182,
"end": 8053
} | class ____
implements WriterInitContext, SerializationSchema.InitializationContext {
private final JobInfo jobInfo;
private final TaskInfo taskInfo;
private MockInitContext(int numSubtasks) {
this.jobInfo = new JobInfoImpl(new JobID(), "MockJob");
this.taskInfo = new TaskInfoImpl("MockTask", numSubtasks + 1, 0, numSubtasks, 0);
}
@Override
public UserCodeClassLoader getUserCodeClassLoader() {
return SimpleUserCodeClassLoader.create(PrintSinkTest.class.getClassLoader());
}
@Override
public MailboxExecutor getMailboxExecutor() {
return new DummyMailboxExecutor();
}
@Override
public ProcessingTimeService getProcessingTimeService() {
return new TestProcessingTimeService();
}
@Override
public SinkWriterMetricGroup metricGroup() {
return MetricsGroupTestUtils.mockWriterMetricGroup();
}
@Override
public MetricGroup getMetricGroup() {
return metricGroup();
}
@Override
public OptionalLong getRestoredCheckpointId() {
return OptionalLong.empty();
}
@Override
public SerializationSchema.InitializationContext
asSerializationSchemaInitializationContext() {
return this;
}
@Override
public boolean isObjectReuseEnabled() {
return false;
}
@Override
public <IN> TypeSerializer<IN> createInputSerializer() {
return null;
}
@Override
public JobInfo getJobInfo() {
return jobInfo;
}
@Override
public TaskInfo getTaskInfo() {
return taskInfo;
}
}
private static | MockInitContext |
java | apache__flink | flink-yarn/src/main/java/org/apache/flink/yarn/YarnClusterDescriptor.java | {
"start": 77105,
"end": 77472
} | class ____ extends RuntimeException {
private static final long serialVersionUID = -812040641215388943L;
public YarnDeploymentException(String message) {
super(message);
}
public YarnDeploymentException(String message, Throwable cause) {
super(message, cause);
}
}
private | YarnDeploymentException |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/issues/AdviceWithWeaveByToUriCBRTest.java | {
"start": 1117,
"end": 2123
} | class ____ extends ContextTestSupport {
@Test
public void testAdviceCBR() throws Exception {
RouteDefinition route = context.getRouteDefinitions().get(0);
AdviceWith.adviceWith(route, context, new AdviceWithRouteBuilder() {
@Override
public void configure() throws Exception {
weaveByToUri("direct:branch*").replace().to("mock:foo");
mockEndpointsAndSkip("direct:branch*");
}
});
getMockEndpoint("mock:foo").expectedBodiesReceived("Hello World");
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").choice().when(header("foo")).to("direct:branch-1").otherwise().to("direct:branch-2");
}
};
}
}
| AdviceWithWeaveByToUriCBRTest |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/core/annotation/UniqueSecurityAnnotationScanner.java | {
"start": 2376,
"end": 2603
} | class ____ method; in case of a method
* parameter, it will only consider annotations on the parameter. In all cases, it will
* consider meta-annotations in its traversal.
*
* <p>
* When traversing the type hierarchy, this | and |
java | google__guava | android/guava-testlib/test/com/google/common/testing/EqualsTesterTest.java | {
"start": 10574,
"end": 11211
} | class ____ {
private final int aspect1;
private final int aspect2;
InvalidHashCodeObject(int aspect1, int aspect2) {
this.aspect1 = aspect1;
this.aspect2 = aspect2;
}
@SuppressWarnings("EqualsHashCode")
@Override
public boolean equals(@Nullable Object o) {
if (!(o instanceof InvalidHashCodeObject)) {
return false;
}
InvalidHashCodeObject other = (InvalidHashCodeObject) o;
if (aspect1 != other.aspect1) {
return false;
}
if (aspect2 != other.aspect2) {
return false;
}
return true;
}
}
/** Test | InvalidHashCodeObject |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/action/GetAnalyticsCollectionRequestBWCSerializingTests.java | {
"start": 847,
"end": 2726
} | class ____ extends AbstractBWCSerializationTestCase<
GetAnalyticsCollectionAction.Request> {
@Override
protected Writeable.Reader<GetAnalyticsCollectionAction.Request> instanceReader() {
return GetAnalyticsCollectionAction.Request::new;
}
@Override
protected GetAnalyticsCollectionAction.Request createTestInstance() {
return new GetAnalyticsCollectionAction.Request(
TEST_REQUEST_TIMEOUT,
randomArray(10, String[]::new, () -> randomAlphaOfLengthBetween(1, 10))
);
}
@Override
protected GetAnalyticsCollectionAction.Request mutateInstance(GetAnalyticsCollectionAction.Request instance) throws IOException {
return new GetAnalyticsCollectionAction.Request(
TEST_REQUEST_TIMEOUT,
randomArrayOtherThan(instance.getNames(), () -> randomArray(10, String[]::new, () -> randomAlphaOfLengthBetween(1, 10)))
);
}
@Override
protected GetAnalyticsCollectionAction.Request doParseInstance(XContentParser parser) throws IOException {
return PARSER.apply(parser, null);
}
@Override
protected GetAnalyticsCollectionAction.Request mutateInstanceForVersion(
GetAnalyticsCollectionAction.Request instance,
TransportVersion version
) {
return new GetAnalyticsCollectionAction.Request(TEST_REQUEST_TIMEOUT, instance.getNames());
}
@SuppressWarnings("unchecked")
private static final ConstructingObjectParser<GetAnalyticsCollectionAction.Request, Void> PARSER = new ConstructingObjectParser<>(
"get_analytics_collection_request",
p -> new GetAnalyticsCollectionAction.Request(TEST_REQUEST_TIMEOUT, ((List<String>) p[0]).toArray(String[]::new))
);
static {
PARSER.declareStringArray(constructorArg(), NAMES_FIELD);
}
}
| GetAnalyticsCollectionRequestBWCSerializingTests |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/BlockLoaderWarnings.java | {
"start": 793,
"end": 1716
} | class ____ implements org.elasticsearch.index.mapper.blockloader.Warnings {
private final DriverContext.WarningsMode warningsMode;
private final Source source;
private Warnings delegate;
public BlockLoaderWarnings(DriverContext.WarningsMode warningsMode, Source source) {
this.warningsMode = warningsMode;
this.source = source;
}
@Override
public void registerException(Class<? extends Exception> exceptionClass, String message) {
if (delegate == null) {
delegate = Warnings.createOnlyWarnings(
warningsMode,
source.source().getLineNumber(),
source.source().getColumnNumber(),
source.text()
);
}
delegate.registerException(exceptionClass, message);
}
@Override
public String toString() {
return "warnings for " + source;
}
}
| BlockLoaderWarnings |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/XsltSaxonComponentBuilderFactory.java | {
"start": 11021,
"end": 13304
} | class ____
extends AbstractComponentBuilder<XsltSaxonComponent>
implements XsltSaxonComponentBuilder {
@Override
protected XsltSaxonComponent buildConcreteComponent() {
return new XsltSaxonComponent();
}
@Override
protected boolean setPropertyOnComponent(
Component component,
String name,
Object value) {
switch (name) {
case "allowTemplateFromHeader": ((XsltSaxonComponent) component).setAllowTemplateFromHeader((boolean) value); return true;
case "contentCache": ((XsltSaxonComponent) component).setContentCache((boolean) value); return true;
case "lazyStartProducer": ((XsltSaxonComponent) component).setLazyStartProducer((boolean) value); return true;
case "autowiredEnabled": ((XsltSaxonComponent) component).setAutowiredEnabled((boolean) value); return true;
case "saxonConfiguration": ((XsltSaxonComponent) component).setSaxonConfiguration((net.sf.saxon.Configuration) value); return true;
case "saxonConfigurationProperties": ((XsltSaxonComponent) component).setSaxonConfigurationProperties((java.util.Map) value); return true;
case "saxonExtensionFunctions": ((XsltSaxonComponent) component).setSaxonExtensionFunctions((java.lang.String) value); return true;
case "secureProcessing": ((XsltSaxonComponent) component).setSecureProcessing((boolean) value); return true;
case "transformerFactoryClass": ((XsltSaxonComponent) component).setTransformerFactoryClass((java.lang.String) value); return true;
case "transformerFactoryConfigurationStrategy": ((XsltSaxonComponent) component).setTransformerFactoryConfigurationStrategy((org.apache.camel.component.xslt.TransformerFactoryConfigurationStrategy) value); return true;
case "uriResolver": ((XsltSaxonComponent) component).setUriResolver((javax.xml.transform.URIResolver) value); return true;
case "uriResolverFactory": ((XsltSaxonComponent) component).setUriResolverFactory((org.apache.camel.component.xslt.XsltUriResolverFactory) value); return true;
default: return false;
}
}
}
} | XsltSaxonComponentBuilderImpl |
java | google__guava | android/guava/src/com/google/common/util/concurrent/Futures.java | {
"start": 3601,
"end": 31226
} | class ____ extends GwtFuturesCatchingSpecialization {
// A note on memory visibility.
// Many of the utilities in this class (transform, withFallback, withTimeout, asList, combine)
// have two requirements that significantly complicate their design.
// 1. Cancellation should propagate from the returned future to the input future(s).
// 2. The returned futures shouldn't unnecessarily 'pin' their inputs after completion.
//
// A consequence of these requirements is that the delegate futures cannot be stored in
// final fields.
//
// For simplicity the rest of this description will discuss Futures.catching since it is the
// simplest instance, though very similar descriptions apply to many other classes in this file.
//
// In the constructor of AbstractCatchingFuture, the delegate future is assigned to a field
// 'inputFuture'. That field is non-final and non-volatile. There are 2 places where the
// 'inputFuture' field is read and where we will have to consider visibility of the write
// operation in the constructor.
//
// 1. In the listener that performs the callback. In this case it is fine since inputFuture is
// assigned prior to calling addListener, and addListener happens-before any invocation of the
// listener. Notably, this means that 'volatile' is unnecessary to make 'inputFuture' visible
// to the listener.
//
// 2. In done() where we may propagate cancellation to the input. In this case it is _not_ fine.
// There is currently nothing that enforces that the write to inputFuture in the constructor is
// visible to done(). This is because there is no happens before edge between the write and a
// (hypothetical) unsafe read by our caller. Note: adding 'volatile' does not fix this issue,
// it would just add an edge such that if done() observed non-null, then it would also
// definitely observe all earlier writes, but we still have no guarantee that done() would see
// the initial write (just stronger guarantees if it does).
//
// See: http://cs.oswego.edu/pipermail/concurrency-interest/2015-January/013800.html
// For a (long) discussion about this specific issue and the general futility of life.
//
// For the time being we are OK with the problem discussed above since it requires a caller to
// introduce a very specific kind of data-race. And given the other operations performed by these
// methods that involve volatile read/write operations, in practice there is no issue. Also, the
// way in such a visibility issue would surface is most likely as a failure of cancel() to
// propagate to the input. Cancellation propagation is fundamentally racy so this is fine.
//
// Future versions of the JMM may revise safe construction semantics in such a way that we can
// safely publish these objects and we won't need this whole discussion.
// TODO(user,lukes): consider adding volatile to all these fields since in current known JVMs
// that should resolve the issue. This comes at the cost of adding more write barriers to the
// implementations.
private Futures() {}
/**
* Creates a {@code ListenableFuture} which has its value set immediately upon construction. The
* getters just return the value. This {@code Future} can't be canceled or timed out and its
* {@code isDone()} method always returns {@code true}.
*/
public static <V extends @Nullable Object> ListenableFuture<V> immediateFuture(
@ParametricNullness V value) {
if (value == null) {
// This cast is safe because null is assignable to V for all V (i.e. it is bivariant)
@SuppressWarnings("unchecked")
ListenableFuture<V> typedNull = (ListenableFuture<V>) ImmediateFuture.NULL;
return typedNull;
}
return new ImmediateFuture<>(value);
}
/**
* Returns a successful {@code ListenableFuture<Void>}. This method is equivalent to {@code
* immediateFuture(null)} except that it is restricted to produce futures of type {@code Void}.
*
* @since 29.0
*/
@SuppressWarnings("unchecked")
public static ListenableFuture<@Nullable Void> immediateVoidFuture() {
return (ListenableFuture<@Nullable Void>) ImmediateFuture.NULL;
}
/**
* Returns a {@code ListenableFuture} which has an exception set immediately upon construction.
*
* <p>The returned {@code Future} can't be cancelled, and its {@code isDone()} method always
* returns {@code true}. Calling {@code get()} will immediately throw the provided {@code
* Throwable} wrapped in an {@code ExecutionException}.
*/
public static <V extends @Nullable Object> ListenableFuture<V> immediateFailedFuture(
Throwable throwable) {
checkNotNull(throwable);
return new ImmediateFailedFuture<>(throwable);
}
/**
* Creates a {@code ListenableFuture} which is cancelled immediately upon construction, so that
* {@code isCancelled()} always returns {@code true}.
*
* @since 14.0
*/
@SuppressWarnings("unchecked") // ImmediateCancelledFuture can work with any type
public static <V extends @Nullable Object> ListenableFuture<V> immediateCancelledFuture() {
ListenableFuture<Object> instance = ImmediateCancelledFuture.INSTANCE;
if (instance != null) {
return (ListenableFuture<V>) instance;
}
return new ImmediateCancelledFuture<>();
}
/**
* Executes {@code callable} on the specified {@code executor}, returning a {@code Future}.
*
* @throws RejectedExecutionException if the task cannot be scheduled for execution
* @since 28.2
*/
public static <O extends @Nullable Object> ListenableFuture<O> submit(
Callable<O> callable, Executor executor) {
TrustedListenableFutureTask<O> task = TrustedListenableFutureTask.create(callable);
executor.execute(task);
return task;
}
/**
* Executes {@code runnable} on the specified {@code executor}, returning a {@code Future} that
* will complete after execution.
*
* @throws RejectedExecutionException if the task cannot be scheduled for execution
* @since 28.2
*/
public static ListenableFuture<@Nullable Void> submit(Runnable runnable, Executor executor) {
TrustedListenableFutureTask<@Nullable Void> task =
TrustedListenableFutureTask.create(runnable, null);
executor.execute(task);
return task;
}
/**
* Executes {@code callable} on the specified {@code executor}, returning a {@code Future}.
*
* @throws RejectedExecutionException if the task cannot be scheduled for execution
* @since 23.0
*/
public static <O extends @Nullable Object> ListenableFuture<O> submitAsync(
AsyncCallable<O> callable, Executor executor) {
TrustedListenableFutureTask<O> task = TrustedListenableFutureTask.create(callable);
executor.execute(task);
return task;
}
/**
* Schedules {@code callable} on the specified {@code executor}, returning a {@code Future}.
*
* @throws RejectedExecutionException if the task cannot be scheduled for execution
* @since 33.4.0 (but since 28.0 in the JRE flavor)
*/
@J2ktIncompatible
@GwtIncompatible // java.util.concurrent.ScheduledExecutorService
@IgnoreJRERequirement // Users will use this only if they're already using Duration.
// TODO(cpovirk): Return ListenableScheduledFuture?
public static <O extends @Nullable Object> ListenableFuture<O> scheduleAsync(
AsyncCallable<O> callable, Duration delay, ScheduledExecutorService executorService) {
return scheduleAsync(callable, toNanosSaturated(delay), TimeUnit.NANOSECONDS, executorService);
}
/**
* Schedules {@code callable} on the specified {@code executor}, returning a {@code Future}.
*
* @throws RejectedExecutionException if the task cannot be scheduled for execution
* @since 23.0
*/
@J2ktIncompatible
@GwtIncompatible // java.util.concurrent.ScheduledExecutorService
@SuppressWarnings("GoodTime") // should accept a java.time.Duration
// TODO(cpovirk): Return ListenableScheduledFuture?
public static <O extends @Nullable Object> ListenableFuture<O> scheduleAsync(
AsyncCallable<O> callable,
long delay,
TimeUnit timeUnit,
ScheduledExecutorService executorService) {
TrustedListenableFutureTask<O> task = TrustedListenableFutureTask.create(callable);
Future<?> scheduled = executorService.schedule(task, delay, timeUnit);
/*
* Even when the user interrupts the task, we pass `false` to `cancel` so that we don't
* interrupt a second time after the interruption performed by TrustedListenableFutureTask.
*/
task.addListener(() -> scheduled.cancel(false), directExecutor());
return task;
}
/**
* Returns a {@code Future} whose result is taken from the given primary {@code input} or, if the
* primary input fails with the given {@code exceptionType}, from the result provided by the
* {@code fallback}. {@link Function#apply} is not invoked until the primary input has failed, so
* if the primary input succeeds, it is never invoked. If, during the invocation of {@code
* fallback}, an exception is thrown, this exception is used as the result of the output {@code
* Future}.
*
* <p>Usage example:
*
* {@snippet :
* ListenableFuture<Integer> fetchCounterFuture = ...;
*
* // Falling back to a zero counter in case an exception happens when
* // processing the RPC to fetch counters.
* ListenableFuture<Integer> faultTolerantFuture = Futures.catching(
* fetchCounterFuture, FetchException.class, x -> 0, directExecutor());
* }
*
* <p>When selecting an executor, note that {@code directExecutor} is dangerous in some cases. See
* the warnings the {@link MoreExecutors#directExecutor} documentation.
*
* @param input the primary input {@code Future}
* @param exceptionType the exception type that triggers use of {@code fallback}. The exception
* type is matched against the input's exception. "The input's exception" means the cause of
* the {@link ExecutionException} thrown by {@code input.get()} or, if {@code get()} throws a
* different kind of exception, that exception itself. To avoid hiding bugs and other
* unrecoverable errors, callers should prefer more specific types, avoiding {@code
* Throwable.class} in particular.
* @param fallback the {@link Function} to be called if {@code input} fails with the expected
* exception type. The function's argument is the input's exception. "The input's exception"
* means the cause of the {@link ExecutionException} thrown by {@code input.get()} or, if
* {@code get()} throws a different kind of exception, that exception itself.
* @param executor the executor that runs {@code fallback} if {@code input} fails
* @since 19.0
*/
@J2ktIncompatible
@Partially.GwtIncompatible("AVAILABLE but requires exceptionType to be Throwable.class")
public static <V extends @Nullable Object, X extends Throwable> ListenableFuture<V> catching(
ListenableFuture<? extends V> input,
Class<X> exceptionType,
Function<? super X, ? extends V> fallback,
Executor executor) {
return AbstractCatchingFuture.create(input, exceptionType, fallback, executor);
}
/**
* Returns a {@code Future} whose result is taken from the given primary {@code input} or, if the
* primary input fails with the given {@code exceptionType}, from the result provided by the
* {@code fallback}. {@link AsyncFunction#apply} is not invoked until the primary input has
* failed, so if the primary input succeeds, it is never invoked. If, during the invocation of
* {@code fallback}, an exception is thrown, this exception is used as the result of the output
* {@code Future}.
*
* <p>Usage examples:
*
* {@snippet :
* ListenableFuture<Integer> fetchCounterFuture = ...;
*
* // Falling back to a zero counter in case an exception happens when
* // processing the RPC to fetch counters.
* ListenableFuture<Integer> faultTolerantFuture = Futures.catchingAsync(
* fetchCounterFuture, FetchException.class, x -> immediateFuture(0), directExecutor());
* }
*
* <p>The fallback can also choose to propagate the original exception when desired:
*
* {@snippet :
* ListenableFuture<Integer> fetchCounterFuture = ...;
*
* // Falling back to a zero counter only in case the exception was a
* // TimeoutException.
* ListenableFuture<Integer> faultTolerantFuture = Futures.catchingAsync(
* fetchCounterFuture,
* FetchException.class,
* e -> {
* if (omitDataOnFetchFailure) {
* return immediateFuture(0);
* }
* throw e;
* },
* directExecutor());
* }
*
* <p>When selecting an executor, note that {@code directExecutor} is dangerous in some cases. See
* the warnings the {@link MoreExecutors#directExecutor} documentation.
*
* @param input the primary input {@code Future}
* @param exceptionType the exception type that triggers use of {@code fallback}. The exception
* type is matched against the input's exception. "The input's exception" means the cause of
* the {@link ExecutionException} thrown by {@code input.get()} or, if {@code get()} throws a
* different kind of exception, that exception itself. To avoid hiding bugs and other
* unrecoverable errors, callers should prefer more specific types, avoiding {@code
* Throwable.class} in particular.
* @param fallback the {@link AsyncFunction} to be called if {@code input} fails with the expected
* exception type. The function's argument is the input's exception. "The input's exception"
* means the cause of the {@link ExecutionException} thrown by {@code input.get()} or, if
* {@code get()} throws a different kind of exception, that exception itself.
* @param executor the executor that runs {@code fallback} if {@code input} fails
* @since 19.0 (similar functionality in 14.0 as {@code withFallback})
*/
@J2ktIncompatible
@Partially.GwtIncompatible("AVAILABLE but requires exceptionType to be Throwable.class")
public static <V extends @Nullable Object, X extends Throwable> ListenableFuture<V> catchingAsync(
ListenableFuture<? extends V> input,
Class<X> exceptionType,
AsyncFunction<? super X, ? extends V> fallback,
Executor executor) {
return AbstractCatchingFuture.createAsync(input, exceptionType, fallback, executor);
}
/**
* Returns a future that delegates to another but will finish early (via a {@link
* TimeoutException} wrapped in an {@link ExecutionException}) if the specified duration expires.
*
* <p>The delegate future is interrupted and cancelled if it times out.
*
* @param delegate The future to delegate to.
* @param time when to time out the future
* @param scheduledExecutor The executor service to enforce the timeout.
* @since 33.4.0 (but since 28.0 in the JRE flavor)
*/
@J2ktIncompatible
@GwtIncompatible // java.util.concurrent.ScheduledExecutorService
@IgnoreJRERequirement // Users will use this only if they're already using Duration.
public static <V extends @Nullable Object> ListenableFuture<V> withTimeout(
ListenableFuture<V> delegate, Duration time, ScheduledExecutorService scheduledExecutor) {
return withTimeout(delegate, toNanosSaturated(time), TimeUnit.NANOSECONDS, scheduledExecutor);
}
/**
* Returns a future that delegates to another but will finish early (via a {@link
* TimeoutException} wrapped in an {@link ExecutionException}) if the specified duration expires.
*
* <p>The delegate future is interrupted and cancelled if it times out.
*
* @param delegate The future to delegate to.
* @param time when to time out the future
* @param unit the time unit of the time parameter
* @param scheduledExecutor The executor service to enforce the timeout.
* @since 19.0
*/
@J2ktIncompatible
@GwtIncompatible // java.util.concurrent.ScheduledExecutorService
@SuppressWarnings("GoodTime") // should accept a java.time.Duration
public static <V extends @Nullable Object> ListenableFuture<V> withTimeout(
ListenableFuture<V> delegate,
long time,
TimeUnit unit,
ScheduledExecutorService scheduledExecutor) {
if (delegate.isDone()) {
return delegate;
}
return TimeoutFuture.create(delegate, time, unit, scheduledExecutor);
}
/**
* Returns a new {@code Future} whose result is asynchronously derived from the result of the
* given {@code Future}. If the given {@code Future} fails, the returned {@code Future} fails with
* the same exception (and the function is not invoked).
*
* <p>More precisely, the returned {@code Future} takes its result from a {@code Future} produced
* by applying the given {@code AsyncFunction} to the result of the original {@code Future}.
* Example usage:
*
* {@snippet :
* ListenableFuture<RowKey> rowKeyFuture = indexService.lookUp(query);
* ListenableFuture<QueryResult> queryFuture =
* transformAsync(rowKeyFuture, dataService::readFuture, executor);
* }
*
* <p>When selecting an executor, note that {@code directExecutor} is dangerous in some cases. See
* the warnings the {@link MoreExecutors#directExecutor} documentation.
*
* <p>The returned {@code Future} attempts to keep its cancellation state in sync with that of the
* input future and that of the future returned by the chain function. That is, if the returned
* {@code Future} is cancelled, it will attempt to cancel the other two, and if either of the
* other two is cancelled, the returned {@code Future} will receive a callback in which it will
* attempt to cancel itself.
*
* @param input The future to transform
* @param function A function to transform the result of the input future to the result of the
* output future
* @param executor Executor to run the function in.
* @return A future that holds result of the function (if the input succeeded) or the original
* input's failure (if not)
* @since 19.0 (in 11.0 as {@code transform})
*/
public static <I extends @Nullable Object, O extends @Nullable Object>
ListenableFuture<O> transformAsync(
ListenableFuture<I> input,
AsyncFunction<? super I, ? extends O> function,
Executor executor) {
return AbstractTransformFuture.createAsync(input, function, executor);
}
/**
* Returns a new {@code Future} whose result is derived from the result of the given {@code
* Future}. If {@code input} fails, the returned {@code Future} fails with the same exception (and
* the function is not invoked). Example usage:
*
* {@snippet :
* ListenableFuture<QueryResult> queryFuture = ...;
* ListenableFuture<List<Row>> rowsFuture =
* transform(queryFuture, QueryResult::getRows, executor);
* }
*
* <p>When selecting an executor, note that {@code directExecutor} is dangerous in some cases. See
* the warnings the {@link MoreExecutors#directExecutor} documentation.
*
* <p>The returned {@code Future} attempts to keep its cancellation state in sync with that of the
* input future. That is, if the returned {@code Future} is cancelled, it will attempt to cancel
* the input, and if the input is cancelled, the returned {@code Future} will receive a callback
* in which it will attempt to cancel itself.
*
* <p>An example use of this method is to convert a serializable object returned from an RPC into
* a POJO.
*
* @param input The future to transform
* @param function A Function to transform the results of the provided future to the results of
* the returned future.
* @param executor Executor to run the function in.
* @return A future that holds result of the transformation.
* @since 9.0 (in 2.0 as {@code compose})
*/
public static <I extends @Nullable Object, O extends @Nullable Object>
ListenableFuture<O> transform(
ListenableFuture<I> input, Function<? super I, ? extends O> function, Executor executor) {
return AbstractTransformFuture.create(input, function, executor);
}
/**
* Like {@link #transform(ListenableFuture, Function, Executor)} except that the transformation
* {@code function} is invoked on each call to {@link Future#get() get()} on the returned future.
*
* <p>The returned {@code Future} reflects the input's cancellation state directly, and any
* attempt to cancel the returned Future is likewise passed through to the input Future.
*
* <p>Note that calls to {@linkplain Future#get(long, TimeUnit) timed get} only apply the timeout
* to the execution of the underlying {@code Future}, <em>not</em> to the execution of the
* transformation function.
*
* <p>The primary audience of this method is callers of {@code transform} who don't have a {@code
* ListenableFuture} available and do not mind repeated, lazy function evaluation.
*
* @param input The future to transform
* @param function A Function to transform the results of the provided future to the results of
* the returned future.
* @return A future that returns the result of the transformation.
* @since 10.0
*/
@J2ktIncompatible
@GwtIncompatible // TODO
public static <I extends @Nullable Object, O extends @Nullable Object> Future<O> lazyTransform(
Future<I> input, Function<? super I, ? extends O> function) {
checkNotNull(input);
checkNotNull(function);
return new Future<O>() {
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return input.cancel(mayInterruptIfRunning);
}
@Override
public boolean isCancelled() {
return input.isCancelled();
}
@Override
public boolean isDone() {
return input.isDone();
}
@Override
public O get() throws InterruptedException, ExecutionException {
return applyTransformation(input.get());
}
@Override
public O get(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
return applyTransformation(input.get(timeout, unit));
}
private O applyTransformation(I input) throws ExecutionException {
try {
return function.apply(input);
} catch (Throwable t) {
// Any Exception is either a RuntimeException or sneaky checked exception.
throw new ExecutionException(t);
}
}
};
}
/**
* Creates a new {@code ListenableFuture} whose value is a list containing the values of all its
* input futures, if all succeed.
*
* <p>The list of results is in the same order as the input list.
*
* <p>This differs from {@link #successfulAsList(ListenableFuture[])} in that it will return a
* failed future if any of the items fails.
*
* <p>Canceling this future will attempt to cancel all the component futures, and if any of the
* provided futures fails or is canceled, this one is, too.
*
* @param futures futures to combine
* @return a future that provides a list of the results of the component futures
* @since 10.0
*/
@SafeVarargs
public static <V extends @Nullable Object> ListenableFuture<List<V>> allAsList(
ListenableFuture<? extends V>... futures) {
ListenableFuture<List<@Nullable V>> nullable =
new ListFuture<V>(ImmutableList.copyOf(futures), true);
// allAsList ensures that it fills the output list with V instances.
@SuppressWarnings("nullness")
ListenableFuture<List<V>> nonNull = nullable;
return nonNull;
}
/**
* Creates a new {@code ListenableFuture} whose value is a list containing the values of all its
* input futures, if all succeed.
*
* <p>The list of results is in the same order as the input list.
*
* <p>This differs from {@link #successfulAsList(Iterable)} in that it will return a failed future
* if any of the items fails.
*
* <p>Canceling this future will attempt to cancel all the component futures, and if any of the
* provided futures fails or is canceled, this one is, too.
*
* @param futures futures to combine
* @return a future that provides a list of the results of the component futures
* @since 10.0
*/
public static <V extends @Nullable Object> ListenableFuture<List<V>> allAsList(
Iterable<? extends ListenableFuture<? extends V>> futures) {
ListenableFuture<List<@Nullable V>> nullable =
new ListFuture<V>(ImmutableList.copyOf(futures), true);
// allAsList ensures that it fills the output list with V instances.
@SuppressWarnings("nullness")
ListenableFuture<List<V>> nonNull = nullable;
return nonNull;
}
/**
* Creates a {@link FutureCombiner} that processes the completed futures whether or not they're
* successful.
*
* <p>Any failures from the input futures will not be propagated to the returned future.
*
* @since 20.0
*/
@SafeVarargs
public static <V extends @Nullable Object> FutureCombiner<V> whenAllComplete(
ListenableFuture<? extends V>... futures) {
return new FutureCombiner<>(false, ImmutableList.copyOf(futures));
}
/**
* Creates a {@link FutureCombiner} that processes the completed futures whether or not they're
* successful.
*
* <p>Any failures from the input futures will not be propagated to the returned future.
*
* @since 20.0
*/
public static <V extends @Nullable Object> FutureCombiner<V> whenAllComplete(
Iterable<? extends ListenableFuture<? extends V>> futures) {
return new FutureCombiner<>(false, ImmutableList.copyOf(futures));
}
/**
* Creates a {@link FutureCombiner} requiring that all passed in futures are successful.
*
* <p>If any input fails, the returned future fails immediately.
*
* @since 20.0
*/
@SafeVarargs
public static <V extends @Nullable Object> FutureCombiner<V> whenAllSucceed(
ListenableFuture<? extends V>... futures) {
return new FutureCombiner<>(true, ImmutableList.copyOf(futures));
}
/**
* Creates a {@link FutureCombiner} requiring that all passed in futures are successful.
*
* <p>If any input fails, the returned future fails immediately.
*
* @since 20.0
*/
public static <V extends @Nullable Object> FutureCombiner<V> whenAllSucceed(
Iterable<? extends ListenableFuture<? extends V>> futures) {
return new FutureCombiner<>(true, ImmutableList.copyOf(futures));
}
/**
* A helper to create a new {@code ListenableFuture} whose result is generated from a combination
* of input futures.
*
* <p>See {@link #whenAllComplete} and {@link #whenAllSucceed} for how to instantiate this class.
*
* <p>Example:
*
* {@snippet :
* final ListenableFuture<Instant> loginDateFuture =
* loginService.findLastLoginDate(username);
* final ListenableFuture<List<String>> recentCommandsFuture =
* recentCommandsService.findRecentCommands(username);
* ListenableFuture<UsageHistory> usageFuture =
* Futures.whenAllSucceed(loginDateFuture, recentCommandsFuture)
* .call(
* () ->
* new UsageHistory(
* username,
* Futures.getDone(loginDateFuture),
* Futures.getDone(recentCommandsFuture)),
* executor);
* }
*
* @since 20.0
*/
@GwtCompatible
public static final | Futures |
java | google__auto | value/src/test/java/com/google/auto/value/processor/AutoValueCompilationTest.java | {
"start": 76092,
"end": 76442
} | enum ____");
}
@Test
public void autoValueBuilderDuplicate() {
JavaFileObject javaFileObject =
JavaFileObjects.forSourceLines(
"foo.bar.Baz",
"package foo.bar;",
"",
"import com.google.auto.value.AutoValue;",
"",
"@AutoValue",
"public abstract | Builder |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/UnusedStateRemover.java | {
"start": 2110,
"end": 2193
} | class ____ any unused documents stored
* in the .ml-state* indices.
*/
public | deletes |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesAction.java | {
"start": 611,
"end": 1092
} | class ____ extends ActionType<HasPrivilegesResponse> {
public static final HasPrivilegesAction INSTANCE = new HasPrivilegesAction();
public static final String NAME = "cluster:admin/xpack/security/user/has_privileges";
public static final RemoteClusterActionType<HasPrivilegesResponse> REMOTE_TYPE = new RemoteClusterActionType<>(
NAME,
HasPrivilegesResponse::new
);
private HasPrivilegesAction() {
super(NAME);
}
}
| HasPrivilegesAction |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java | {
"start": 1581,
"end": 8118
} | class ____ extends SymlinkBaseTest {
// Workaround for HADOOP-9652
static {
RawLocalFileSystem.useStatIfAvailable();
}
@Override
protected String getScheme() {
return "file";
}
@Override
protected String testBaseDir1() throws IOException {
return wrapper.getAbsoluteTestRootDir()+"/test1";
}
@Override
protected String testBaseDir2() throws IOException {
return wrapper.getAbsoluteTestRootDir()+"/test2";
}
@Override
protected URI testURI() {
try {
return new URI("file:///");
} catch (URISyntaxException e) {
return null;
}
}
@Override
public void testCreateDanglingLink() throws IOException {
// Dangling symlinks are not supported on Windows local file system.
assumeNotWindows();
super.testCreateDanglingLink();
}
@Override
public void testCreateFileViaDanglingLinkParent() throws IOException {
assumeNotWindows();
super.testCreateFileViaDanglingLinkParent();
}
@Override
public void testOpenResolvesLinks() throws IOException {
assumeNotWindows();
super.testOpenResolvesLinks();
}
@Override
public void testRecursiveLinks() throws IOException {
assumeNotWindows();
super.testRecursiveLinks();
}
@Override
public void testRenameDirToDanglingSymlink() throws IOException {
assumeNotWindows();
super.testRenameDirToDanglingSymlink();
}
@Override
public void testStatDanglingLink() throws IOException {
assumeNotWindows();
super.testStatDanglingLink();
}
@Test
@Timeout(value = 10)
/** lstat a non-existant file using a partially qualified path */
public void testDanglingLinkFilePartQual() throws IOException {
Path filePartQual = new Path(getScheme()+":///doesNotExist");
try {
wrapper.getFileLinkStatus(filePartQual);
fail("Got FileStatus for non-existant file");
} catch (FileNotFoundException f) {
// Expected
}
try {
wrapper.getLinkTarget(filePartQual);
fail("Got link target for non-existant file");
} catch (FileNotFoundException f) {
// Expected
}
}
@Test
@Timeout(value = 10)
/** Stat and lstat a dangling link */
public void testDanglingLink() throws IOException {
assumeNotWindows();
Path fileAbs = new Path(testBaseDir1()+"/file");
Path fileQual = new Path(testURI().toString(), fileAbs);
Path link = new Path(testBaseDir1()+"/linkToFile");
Path linkQual = new Path(testURI().toString(), link.toString());
wrapper.createSymlink(fileAbs, link, false);
// Deleting the link using FileContext currently fails because
// resolve looks up LocalFs rather than RawLocalFs for the path
// so we call ChecksumFs delete (which doesn't delete dangling
// links) instead of delegating to delete in RawLocalFileSystem
// which deletes via fullyDelete. testDeleteLink above works
// because the link is not dangling.
//assertTrue(fc.delete(link, false));
FileUtil.fullyDelete(new File(link.toUri().getPath()));
wrapper.createSymlink(fileAbs, link, false);
try {
wrapper.getFileStatus(link);
fail("Got FileStatus for dangling link");
} catch (FileNotFoundException f) {
// Expected. File's exists method returns false for dangling links
}
// We can stat a dangling link
UserGroupInformation user = UserGroupInformation.getCurrentUser();
FileStatus fsd = wrapper.getFileLinkStatus(link);
assertEquals(fileQual, fsd.getSymlink());
assertTrue(fsd.isSymlink());
assertFalse(fsd.isDirectory());
assertEquals(user.getUserName(), fsd.getOwner());
// Compare against user's primary group
assertEquals(user.getGroupNames()[0], fsd.getGroup());
assertEquals(linkQual, fsd.getPath());
// Accessing the link
try {
readFile(link);
fail("Got FileStatus for dangling link");
} catch (FileNotFoundException f) {
// Ditto.
}
// Creating the file makes the link work
createAndWriteFile(fileAbs);
wrapper.getFileStatus(link);
}
@Test
@Timeout(value = 10)
/**
* Test getLinkTarget with a partially qualified target.
* NB: Hadoop does not support fully qualified URIs for the
* file scheme (eg file://host/tmp/test).
*/
public void testGetLinkStatusPartQualTarget() throws IOException {
Path fileAbs = new Path(testBaseDir1()+"/file");
Path fileQual = new Path(testURI().toString(), fileAbs);
Path dir = new Path(testBaseDir1());
Path link = new Path(testBaseDir1()+"/linkToFile");
Path dirNew = new Path(testBaseDir2());
Path linkNew = new Path(testBaseDir2()+"/linkToFile");
wrapper.delete(dirNew, true);
createAndWriteFile(fileQual);
wrapper.setWorkingDirectory(dir);
// Link target is partially qualified, we get the same back.
wrapper.createSymlink(fileQual, link, false);
assertEquals(fileQual, wrapper.getFileLinkStatus(link).getSymlink());
// Because the target was specified with an absolute path the
// link fails to resolve after moving the parent directory.
wrapper.rename(dir, dirNew);
// The target is still the old path
assertEquals(fileQual, wrapper.getFileLinkStatus(linkNew).getSymlink());
try {
readFile(linkNew);
fail("The link should be dangling now.");
} catch (FileNotFoundException x) {
// Expected.
}
// RawLocalFs only maintains the path part, not the URI, and
// therefore does not support links to other file systems.
Path anotherFs = new Path("hdfs://host:1000/dir/file");
FileUtil.fullyDelete(new File(linkNew.toString()));
try {
wrapper.createSymlink(anotherFs, linkNew, false);
fail("Created a local fs link to a non-local fs");
} catch (IOException x) {
// Excpected.
}
}
/** Test create symlink to . */
@Override
public void testCreateLinkToDot() throws IOException {
try {
super.testCreateLinkToDot();
} catch (IllegalArgumentException iae) {
// Expected.
}
}
@Override
public void testSetTimesSymlinkToFile() throws IOException {
assumeTrue(!Shell.WINDOWS && !Shell.SOLARIS);
super.testSetTimesSymlinkToFile();
}
@Override
public void testSetTimesSymlinkToDir() throws IOException {
assumeTrue(!Path.WINDOWS && !Shell.SOLARIS);
super.testSetTimesSymlinkToDir();
}
@Override
public void testSetTimesDanglingLink() throws IOException {
assumeNotWindows();
super.testSetTimesDanglingLink();
}
}
| TestSymlinkLocalFS |
java | playframework__playframework | core/play-guice/src/main/java/play/libs/pekko/PekkoGuiceSupport.java | {
"start": 1638,
"end": 2666
} | class ____ implements the actor.
* @param name The name of the actor.
* @param props A function to provide props for the actor. The props passed in will just describe
* how to create the actor, this function can be used to provide additional configuration such
* as router and dispatcher configuration.
*/
default <T extends Actor> void bindActor(
Class<T> actorClass, String name, Function<Props, Props> props) {
BinderAccessor.binder(this)
.bind(ActorRef.class)
.annotatedWith(Names.named(name))
.toProvider(Providers.guicify(Pekko.providerOf(actorClass, name, props)))
.asEagerSingleton();
}
/**
* Bind an actor.
*
* <p>This will cause the actor to be instantiated by Guice, allowing it to be dependency injected
* itself. It will bind the returned ActorRef for the actor will be bound, qualified with the
* passed in name, so that it can be injected into other components.
*
* @param <T> the actor type.
* @param actorClass The | that |
java | apache__flink | flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/catalog/listener/AlterTableEvent.java | {
"start": 1135,
"end": 2359
} | interface ____ extends TableModificationEvent {
CatalogBaseTable newTable();
boolean ignoreIfNotExists();
static AlterTableEvent createEvent(
final CatalogContext context,
final ObjectIdentifier identifier,
final CatalogBaseTable newTable,
final boolean ignoreIfNotExists) {
return new AlterTableEvent() {
@Override
public CatalogBaseTable newTable() {
return newTable;
}
@Override
public boolean ignoreIfNotExists() {
return ignoreIfNotExists;
}
@Override
public ObjectIdentifier identifier() {
return identifier;
}
@Override
public CatalogBaseTable table() {
throw new IllegalStateException(
"There is no table in AlterTableEvent, use identifier() instead.");
}
@Override
public boolean isTemporary() {
return false;
}
@Override
public CatalogContext context() {
return context;
}
};
}
}
| AlterTableEvent |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/OnCompletionBridgeErrorHandlerTest.java | {
"start": 1524,
"end": 2696
} | class ____ extends ContextTestSupport {
protected final CountDownLatch latch = new CountDownLatch(1);
@Test
public void test() throws Exception {
getMockEndpoint("mock:result").expectedMessageCount(0);
getMockEndpoint("mock:dead").expectedMessageCount(1);
latch.countDown();
assertMockEndpointsSatisfied();
Exception cause = getMockEndpoint("mock:dead").getReceivedExchanges().get(0).getProperty(Exchange.EXCEPTION_CAUGHT,
Exception.class);
assertNotNull(cause);
assertEquals("Simulated", cause.getMessage());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
getContext().addComponent("my", new MyComponent());
errorHandler(noErrorHandler());
from("my:foo?bridgeErrorHandler=true")
.onCompletion()
.to("mock:dead")
.end()
.to("mock:result");
}
};
}
public static | OnCompletionBridgeErrorHandlerTest |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/source/ignore/IgnoreUnmappedSourceMapper.java | {
"start": 448,
"end": 643
} | interface ____ {
@BeanMapping(
ignoreUnmappedSourceProperties = {
"name",
"surname"
}
)
PersonDto map(Person person);
}
| IgnoreUnmappedSourceMapper |
java | hibernate__hibernate-orm | tooling/metamodel-generator/src/jakartaData/java/org/hibernate/processor/test/data/generic/DataRepositoryGenericParametersTest.java | {
"start": 13949,
"end": 14055
} | class ____ {
@Id
@GeneratedValue
UUID id;
String name;
Integer age;
}
@Repository
public | Cat |
java | netty__netty | handler/src/test/java/io/netty/handler/ssl/OpenSslKeyMaterialProviderTest.java | {
"start": 3679,
"end": 7322
} | class ____ implements X509KeyManager {
private final String keyAlias;
private final PrivateKey pk;
private final X509Certificate[] certChain;
SingleKeyManager(String keyAlias, PrivateKey pk, X509Certificate[] certChain) {
this.keyAlias = keyAlias;
this.pk = pk;
this.certChain = certChain;
}
@Override
public String[] getClientAliases(String keyType, Principal[] issuers) {
return new String[]{keyAlias};
}
@Override
public String chooseClientAlias(String[] keyType, Principal[] issuers, Socket socket) {
return keyAlias;
}
@Override
public String[] getServerAliases(String keyType, Principal[] issuers) {
return new String[]{keyAlias};
}
@Override
public String chooseServerAlias(String keyType, Principal[] issuers, Socket socket) {
return keyAlias;
}
@Override
public X509Certificate[] getCertificateChain(String alias) {
return certChain;
}
@Override
public PrivateKey getPrivateKey(String alias) {
return pk;
}
}
@Test
public void testChooseOpenSslPrivateKeyMaterial() throws Exception {
PrivateKey privateKey;
try (InputStream keyInputStream = getClass().getResourceAsStream("localhost_server.key")) {
privateKey = SslContext.toPrivateKey(
keyInputStream,
null);
}
assertNotNull(privateKey);
assertEquals("PKCS#8", privateKey.getFormat());
try (InputStream resourceAsStream = getClass().getResourceAsStream("localhost_server.pem")) {
final X509Certificate[] certChain = SslContext.toX509Certificates(resourceAsStream);
assertNotNull(certChain);
PemEncoded pemKey = null;
long pkeyBio = 0L;
OpenSslPrivateKey sslPrivateKey;
try {
pemKey = PemPrivateKey.toPEM(ByteBufAllocator.DEFAULT, true, privateKey);
pkeyBio = ReferenceCountedOpenSslContext.toBIO(ByteBufAllocator.DEFAULT, pemKey.retain());
sslPrivateKey = new OpenSslPrivateKey(SSL.parsePrivateKey(pkeyBio, null));
} finally {
ReferenceCountUtil.safeRelease(pemKey);
if (pkeyBio != 0L) {
SSL.freeBIO(pkeyBio);
}
}
final String keyAlias = "key";
OpenSslKeyMaterialProvider provider = new OpenSslKeyMaterialProvider(
new SingleKeyManager(keyAlias, sslPrivateKey, certChain),
null);
OpenSslKeyMaterial material = provider.chooseKeyMaterial(ByteBufAllocator.DEFAULT, keyAlias);
assertNotNull(material);
assertEquals(2, sslPrivateKey.refCnt());
assertEquals(1, material.refCnt());
assertTrue(material.release());
assertEquals(1, sslPrivateKey.refCnt());
// Can get material multiple times from the same key
material = provider.chooseKeyMaterial(ByteBufAllocator.DEFAULT, keyAlias);
assertNotNull(material);
assertEquals(2, sslPrivateKey.refCnt());
assertTrue(material.release());
assertTrue(sslPrivateKey.release());
assertEquals(0, sslPrivateKey.refCnt());
assertEquals(0, material.refCnt());
assertEquals(0, ((OpenSslPrivateKey.OpenSslPrivateKeyMaterial) material).certificateChain);
}
}
}
| SingleKeyManager |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/dynamic/output/CommandOutputResolverSupport.java | {
"start": 436,
"end": 1217
} | class ____ {
/**
* Overridable hook to check whether {@code selector} can be assigned from the provider type {@code provider}.
* <p>
* This method descends the component type hierarchy and considers primitive/wrapper type conversion.
*
* @param selector must not be {@code null}.
* @param provider must not be {@code null}.
* @return {@code true} if selector can be assigned from its provider type.
*/
protected boolean isAssignableFrom(OutputSelector selector, OutputType provider) {
ResolvableType selectorType = selector.getOutputType();
ResolvableType resolvableType = provider.withCodec(selector.getRedisCodec());
return selectorType.isAssignableFrom(resolvableType);
}
}
| CommandOutputResolverSupport |
java | apache__camel | components/camel-aws/camel-aws-config/src/main/java/org/apache/camel/component/aws/config/AWSConfigConfiguration.java | {
"start": 1174,
"end": 8749
} | class ____ implements Cloneable {
@UriPath(description = "Logical name")
@Metadata(required = true)
private String label;
@UriParam
@Metadata(label = "advanced", autowired = true)
private ConfigClient configClient;
@UriParam(label = "security", secret = true)
private String accessKey;
@UriParam(label = "security", secret = true)
private String secretKey;
@UriParam(label = "security", secret = true)
private String sessionToken;
@UriParam
@Metadata(required = true)
private AWSConfigOperations operation;
@UriParam(label = "proxy", enums = "HTTP,HTTPS", defaultValue = "HTTPS")
private Protocol proxyProtocol = Protocol.HTTPS;
@UriParam(label = "proxy")
private String proxyHost;
@UriParam(label = "proxy")
private Integer proxyPort;
@UriParam(enums = "ap-south-2,ap-south-1,eu-south-1,eu-south-2,us-gov-east-1,me-central-1,il-central-1,ca-central-1,eu-central-1,us-iso-west-1,eu-central-2,eu-isoe-west-1,us-west-1,us-west-2,af-south-1,eu-north-1,eu-west-3,eu-west-2,eu-west-1,ap-northeast-3,ap-northeast-2,ap-northeast-1,me-south-1,sa-east-1,ap-east-1,cn-north-1,ca-west-1,us-gov-west-1,ap-southeast-1,ap-southeast-2,us-iso-east-1,ap-southeast-3,ap-southeast-4,us-east-1,us-east-2,cn-northwest-1,us-isob-east-1,aws-global,aws-cn-global,aws-us-gov-global,aws-iso-global,aws-iso-b-global")
private String region;
@UriParam
private boolean pojoRequest;
@UriParam(label = "security")
private boolean trustAllCertificates;
@UriParam
private boolean overrideEndpoint;
@UriParam
private String uriEndpointOverride;
@UriParam(label = "security")
private boolean useDefaultCredentialsProvider;
@UriParam(label = "security")
private boolean useProfileCredentialsProvider;
@UriParam(label = "security")
private boolean useSessionCredentials;
@UriParam(label = "security")
private String profileCredentialsName;
public ConfigClient getConfigClient() {
return configClient;
}
/**
* Amazon AWS Config Client instance
*/
public void setConfigClient(ConfigClient configClient) {
this.configClient = configClient;
}
public String getAccessKey() {
return accessKey;
}
/**
* Amazon AWS Access Key
*/
public void setAccessKey(String accessKey) {
this.accessKey = accessKey;
}
public String getSecretKey() {
return secretKey;
}
/**
* Amazon AWS Secret Key
*/
public void setSecretKey(String secretKey) {
this.secretKey = secretKey;
}
public String getSessionToken() {
return sessionToken;
}
/**
* Amazon AWS Session Token used when the user needs to assume an IAM role
*/
public void setSessionToken(String sessionToken) {
this.sessionToken = sessionToken;
}
public AWSConfigOperations getOperation() {
return operation;
}
/**
* The operation to perform
*/
public void setOperation(AWSConfigOperations operation) {
this.operation = operation;
}
public Protocol getProxyProtocol() {
return proxyProtocol;
}
/**
* To define a proxy protocol when instantiating the Config client
*/
public void setProxyProtocol(Protocol proxyProtocol) {
this.proxyProtocol = proxyProtocol;
}
public String getProxyHost() {
return proxyHost;
}
/**
* To define a proxy host when instantiating the Config client
*/
public void setProxyHost(String proxyHost) {
this.proxyHost = proxyHost;
}
public Integer getProxyPort() {
return proxyPort;
}
/**
* To define a proxy port when instantiating the Config client
*/
public void setProxyPort(Integer proxyPort) {
this.proxyPort = proxyPort;
}
public String getRegion() {
return region;
}
/**
* The region in which the Config client needs to work. When using this parameter, the configuration will expect the
* lowercase name of the region (for example, ap-east-1) You'll need to use the name Region.EU_WEST_1.id()
*/
public void setRegion(String region) {
this.region = region;
}
public boolean isPojoRequest() {
return pojoRequest;
}
/**
* If we want to use a POJO request as body or not
*/
public void setPojoRequest(boolean pojoRequest) {
this.pojoRequest = pojoRequest;
}
public boolean isTrustAllCertificates() {
return trustAllCertificates;
}
/**
* If we want to trust all certificates in case of overriding the endpoint
*/
public void setTrustAllCertificates(boolean trustAllCertificates) {
this.trustAllCertificates = trustAllCertificates;
}
public boolean isOverrideEndpoint() {
return overrideEndpoint;
}
/**
* Set the need for overriding the endpoint. This option needs to be used in combination with the
* uriEndpointOverride option
*/
public void setOverrideEndpoint(boolean overrideEndpoint) {
this.overrideEndpoint = overrideEndpoint;
}
public String getUriEndpointOverride() {
return uriEndpointOverride;
}
/**
* Set the overriding uri endpoint. This option needs to be used in combination with overrideEndpoint option
*/
public void setUriEndpointOverride(String uriEndpointOverride) {
this.uriEndpointOverride = uriEndpointOverride;
}
/**
* Set whether the Config client should expect to load credentials through a default credentials provider or to
* expect static credentials to be passed in.
*/
public void setUseDefaultCredentialsProvider(Boolean useDefaultCredentialsProvider) {
this.useDefaultCredentialsProvider = useDefaultCredentialsProvider;
}
public Boolean isUseDefaultCredentialsProvider() {
return useDefaultCredentialsProvider;
}
public boolean isUseProfileCredentialsProvider() {
return useProfileCredentialsProvider;
}
/**
* Set whether the Config client should expect to load credentials through a profile credentials provider.
*/
public void setUseProfileCredentialsProvider(boolean useProfileCredentialsProvider) {
this.useProfileCredentialsProvider = useProfileCredentialsProvider;
}
public boolean isUseSessionCredentials() {
return useSessionCredentials;
}
/**
* Set whether the Config client should expect to use Session Credentials. This is useful in a situation in which
* the user needs to assume an IAM role for doing operations in Config.
*/
public void setUseSessionCredentials(boolean useSessionCredentials) {
this.useSessionCredentials = useSessionCredentials;
}
public String getProfileCredentialsName() {
return profileCredentialsName;
}
/**
* If using a profile credentials provider, this parameter will set the profile name
*/
public void setProfileCredentialsName(String profileCredentialsName) {
this.profileCredentialsName = profileCredentialsName;
}
// *************************************************
//
// *************************************************
public AWSConfigConfiguration copy() {
try {
return (AWSConfigConfiguration) super.clone();
} catch (CloneNotSupportedException e) {
throw new RuntimeCamelException(e);
}
}
}
| AWSConfigConfiguration |
java | quarkusio__quarkus | extensions/hibernate-orm/deployment/src/main/java/io/quarkus/hibernate/orm/deployment/AdditionalJpaModelBuildItem.java | {
"start": 717,
"end": 1028
} | class ____ extends MultiBuildItem {
private final String className;
public AdditionalJpaModelBuildItem(String className) {
Objects.requireNonNull(className);
this.className = className;
}
public String getClassName() {
return className;
}
}
| AdditionalJpaModelBuildItem |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/layout/AbstractJacksonLayout.java | {
"start": 14840,
"end": 15346
} | class ____ {
/**
* The empty array.
*/
static final ResolvableKeyValuePair[] EMPTY_ARRAY = {};
final String key;
final String value;
final boolean valueNeedsLookup;
ResolvableKeyValuePair(final KeyValuePair pair) {
this.key = pair.getKey();
this.value = pair.getValue();
this.valueNeedsLookup = AbstractJacksonLayout.valueNeedsLookup(this.value);
}
}
private static | ResolvableKeyValuePair |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTests.java | {
"start": 13496,
"end": 13840
} | class ____ extends FilterInputStream {
public boolean closed = false;
CheckClosedInputStream(InputStream in) {
super(in);
}
@Override
public void close() throws IOException {
closed = true;
super.close();
}
}
private static final | CheckClosedInputStream |
java | elastic__elasticsearch | modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExplainError.java | {
"start": 1060,
"end": 2455
} | class ____ extends Error {
private final Object objectToExplain;
public PainlessExplainError(Object objectToExplain) {
this.objectToExplain = objectToExplain;
}
Object getObjectToExplain() {
return objectToExplain;
}
/**
* Headers to be added to the {@link ScriptException} for structured rendering.
*/
public Map<String, List<String>> getHeaders(PainlessLookup painlessLookup) {
Map<String, List<String>> headers = new TreeMap<>();
String toString = "null";
String javaClassName = null;
String painlessClassName = null;
if (objectToExplain != null) {
toString = objectToExplain.toString();
javaClassName = objectToExplain.getClass().getName();
PainlessClass struct = painlessLookup.lookupPainlessClass(objectToExplain.getClass());
if (struct != null) {
painlessClassName = PainlessLookupUtility.typeToCanonicalTypeName(objectToExplain.getClass());
}
}
headers.put("es.to_string", singletonList(toString));
if (painlessClassName != null) {
headers.put("es.painless_class", singletonList(painlessClassName));
}
if (javaClassName != null) {
headers.put("es.java_class", singletonList(javaClassName));
}
return headers;
}
}
| PainlessExplainError |
java | spring-projects__spring-boot | module/spring-boot-restdocs/src/main/java/org/springframework/boot/restdocs/test/autoconfigure/RestDocumentationContextProviderRegistrar.java | {
"start": 1354,
"end": 2218
} | class ____ implements ImportBeanDefinitionRegistrar {
@Override
public void registerBeanDefinitions(AnnotationMetadata importingClassMetadata, BeanDefinitionRegistry registry) {
Map<String, @Nullable Object> annotationAttributes = importingClassMetadata
.getAnnotationAttributes(AutoConfigureRestDocs.class.getName());
BeanDefinitionBuilder definitionBuilder = BeanDefinitionBuilder
.rootBeanDefinition(ManualRestDocumentation.class);
Assert.state(annotationAttributes != null, "'annotationAttributes' must not be null");
String outputDir = (String) annotationAttributes.get("outputDir");
if (StringUtils.hasText(outputDir)) {
definitionBuilder.addConstructorArgValue(outputDir);
}
registry.registerBeanDefinition(ManualRestDocumentation.class.getName(), definitionBuilder.getBeanDefinition());
}
}
| RestDocumentationContextProviderRegistrar |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/selection/wildcards/WildCardTest.java | {
"start": 427,
"end": 1705
} | class ____ {
@ProcessorTest
@WithClasses( SourceWildCardExtendsMapper.class )
public void testWildCardAsSourceType() {
// prepare source
SourceWildCardExtendsMapper.Wrapper<BigInteger> wrapper =
new SourceWildCardExtendsMapper.Wrapper<>( new BigInteger( "5" ) );
SourceWildCardExtendsMapper.Source source = new SourceWildCardExtendsMapper.Source( wrapper );
// action
SourceWildCardExtendsMapper.Target target = SourceWildCardExtendsMapper.INSTANCE.map( source );
// verify target
assertThat( target ).isNotNull();
assertThat( target.getProp() ).isEqualTo( "5" );
}
@ProcessorTest
@WithClasses( ReturnTypeWildCardExtendsMapper.class )
public void testWildCardAsReturnType() {
// prepare source
ReturnTypeWildCardExtendsMapper.Source source = new ReturnTypeWildCardExtendsMapper.Source( "5" );
// action
ReturnTypeWildCardExtendsMapper.Target target = ReturnTypeWildCardExtendsMapper.INSTANCE.map( source );
// verify target
assertThat( target ).isNotNull();
assertThat( target.getProp() ).isNotNull();
assertThat( target.getProp().getWrapped() ).isEqualTo( BigInteger.valueOf( 5 ) );
}
}
| WildCardTest |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/EncryptionTestUtils.java | {
"start": 1565,
"end": 5899
} | class ____ {
/** Private constructor */
private EncryptionTestUtils() {
}
public static final String AWS_KMS_SSE_ALGORITHM = "aws:kms";
public static final String AWS_KMS_DSSE_ALGORITHM = "aws:kms:dsse";
public static final String SSE_C_ALGORITHM = "AES256";
/**
* Decodes the SERVER_SIDE_ENCRYPTION_KEY from base64 into an AES key, then
* gets the md5 of it, then encodes it in base64 so it will match the version
* that AWS returns to us.
*
* @return md5'd base64 encoded representation of the server side encryption
* key
*/
public static String convertKeyToMd5(FileSystem fs) {
String base64Key = fs.getConf().getTrimmed(
S3_ENCRYPTION_KEY
);
byte[] key = Base64.decodeBase64(base64Key);
byte[] md5 = DigestUtils.md5(key);
return Base64.encodeBase64String(md5).trim();
}
/**
* Assert that a path is encrypted with right encryption settings.
* @param path file path.
* @param algorithm encryption algorithm.
* @param kmsKeyArn full kms key.
*/
public static void assertEncrypted(S3AFileSystem fs,
final Path path,
final S3AEncryptionMethods algorithm,
final String kmsKeyArn)
throws IOException {
HeadObjectResponse md = fs.getS3AInternals().getObjectMetadata(path);
String details = String.format(
"file %s with encryption algorithm %s and key %s",
path,
md.serverSideEncryptionAsString(),
md.ssekmsKeyId());
switch(algorithm) {
case SSE_C:
assertThat(md.serverSideEncryptionAsString())
.describedAs("Details of the server-side encryption algorithm used: %s", details)
.isNull();
assertThat(md.sseCustomerAlgorithm())
.describedAs("Details of SSE-C algorithm: %s", details)
.isEqualTo(SSE_C_ALGORITHM);
String md5Key = convertKeyToMd5(fs);
assertThat(md.sseCustomerKeyMD5())
.describedAs("Details of the customer provided encryption key: %s", details)
.isEqualTo(md5Key);
break;
case SSE_KMS:
assertThat(md.serverSideEncryptionAsString())
.describedAs("Details of the server-side encryption algorithm used: %s", details)
.isEqualTo(AWS_KMS_SSE_ALGORITHM);
assertThat(md.ssekmsKeyId())
.describedAs("Details of the KMS key: %s", details)
.isEqualTo(kmsKeyArn);
break;
case DSSE_KMS:
assertThat(md.serverSideEncryptionAsString())
.describedAs("Details of the server-side encryption algorithm used: %s", details)
.isEqualTo(AWS_KMS_DSSE_ALGORITHM);
assertThat(md.ssekmsKeyId())
.describedAs("Details of the KMS key: %s", details)
.isEqualTo(kmsKeyArn);
break;
default:
assertThat(md.serverSideEncryptionAsString())
.isEqualTo("AES256");
}
}
/**
* Assert that a path is encrypted with right encryption settings.
* @param fs filesystem.
* @param path path
* @param algorithm encryption algorithm.
* @param kmsKey full kms key if present.
* @throws IOException any IOE.
*/
public static void validateEncryptionFileAttributes(S3AFileSystem fs,
Path path,
String algorithm,
Optional<String> kmsKey) throws IOException {
Map<String, byte[]> xAttrs = fs.getXAttrs(path);
Assertions.assertThat(xAttrs.get(XA_SERVER_SIDE_ENCRYPTION))
.describedAs("Server side encryption must not be null")
.isNotNull();
Assertions.assertThat(HeaderProcessing.decodeBytes(xAttrs.get(XA_SERVER_SIDE_ENCRYPTION)))
.describedAs("Server side encryption algorithm must match")
.isEqualTo(algorithm);
Assertions.assertThat(xAttrs)
.describedAs("Encryption key id should be present")
.containsKey(XA_ENCRYPTION_KEY_ID);
kmsKey.ifPresent(s -> Assertions
.assertThat(HeaderProcessing.decodeBytes(xAttrs.get(XA_ENCRYPTION_KEY_ID)))
.describedAs("Encryption key id should match with the kms key")
.isEqualTo(s));
}
}
| EncryptionTestUtils |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestCompositeService.java | {
"start": 31157,
"end": 31540
} | class ____ extends CompositeService{
Service child;
public CompositeServiceAddingAChild(Service child) {
super("CompositeServiceAddingAChild");
this.child = child;
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
addService(child);
super.serviceInit(conf);
}
}
public static | CompositeServiceAddingAChild |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/oracle/pl/Oracle_pl_for_2.java | {
"start": 923,
"end": 4834
} | class ____ extends OracleTest {
public void test_0() throws Exception {
String sql = "DROP TABLE employees_temp;\n" +
"CREATE TABLE employees_temp AS SELECT * FROM employees;\n" +
"\n" +
"DECLARE\n" +
" TYPE NumList IS VARRAY(20) OF NUMBER;\n" +
" depts NumList := NumList(10, 30, 70); -- department numbers\n" +
"BEGIN\n" +
" FOR i IN depts.FIRST..depts.LAST LOOP\n" +
" DELETE FROM employees_temp\n" +
" WHERE department_id = depts(i);\n" +
" END LOOP;\n" +
"END;";
List<SQLStatement> statementList = SQLUtils.parseStatements(sql, JdbcConstants.ORACLE);
assertEquals(3, statementList.size());
SQLStatement stmt = statementList.get(0);
SchemaStatVisitor visitor = SQLUtils.createSchemaStatVisitor(JdbcConstants.ORACLE);
for (SQLStatement statement : statementList) {
statement.accept(visitor);
}
// System.out.println("Tables : " + visitor.getTables());
// System.out.println("fields : " + visitor.getColumns());
// System.out.println("coditions : " + visitor.getConditions());
// System.out.println("relationships : " + visitor.getRelationships());
// System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(2, visitor.getTables().size());
// assertTrue(visitor.getTables().containsKey(new TableStat.Name("employees")));
// assertTrue(visitor.getTables().containsKey(new TableStat.Name("emp_name")));
// assertEquals(7, visitor.getColumns().size());
// assertEquals(3, visitor.getConditions().size());
// assertEquals(1, visitor.getRelationships().size());
// assertTrue(visitor.getColumns().contains(new TableStat.Column("employees", "salary")));
{
String output = SQLUtils.toSQLString(statementList, JdbcConstants.ORACLE);
System.out.println(output);
assertEquals("DROP TABLE employees_temp;\n" +
"CREATE TABLE employees_temp\n" +
"AS\n" +
"SELECT *\n" +
"FROM employees;\n" +
"DECLARE\n" +
"\tTYPE NumList IS VARRAY(20) OF NUMBER;\n" +
"\tdepts NumList := NumList(10, 30, 70);\n" +
"BEGIN\n" +
"\tFOR i IN depts.FIRST..depts.LAST\n" +
"\tLOOP\n" +
"\t\tDELETE FROM employees_temp\n" +
"\t\tWHERE department_id = depts(i);\n" +
"\tEND LOOP;\n" +
"END;", //
output);
}
{
String output = SQLUtils.toSQLString(statementList, JdbcConstants.ORACLE, SQLUtils.DEFAULT_LCASE_FORMAT_OPTION);
assertEquals("drop table employees_temp;\n" +
"create table employees_temp\n" +
"as\n" +
"select *\n" +
"from employees;\n" +
"declare\n" +
"\ttype NumList is VARRAY(20) OF NUMBER;\n" +
"\tdepts NumList := NumList(10, 30, 70);\n" +
"begin\n" +
"\tfor i in depts.FIRST..depts.LAST\n" +
"\tloop\n" +
"\t\tdelete from employees_temp\n" +
"\t\twhere department_id = depts(i);\n" +
"\tend loop;\n" +
"end;",
output);
}
}
}
| Oracle_pl_for_2 |
java | micronaut-projects__micronaut-core | context/src/main/java/io/micronaut/runtime/beans/MapperIntroduction.java | {
"start": 32437,
"end": 34857
} | class ____<B> implements MappingBuilder<B> {
private final BeanIntrospection.Builder<B> builder;
private final MergeStrategy mergeStrategy;
private final Argument<?>[] arguments;
private final Object[] params;
private int argIndex = 0;
public MergeMappingBuilder(
BeanIntrospection.Builder<B> builder,
MergeStrategy mergeStrategy
) {
this.builder = builder;
this.arguments = builder.getBuilderArguments();
this.params = new Object[arguments.length];
this.mergeStrategy = mergeStrategy;
}
public MergeMappingBuilder<B> setArgIndex(int argIndex) {
this.argIndex = argIndex;
return this;
}
@Override
public <A> MappingBuilder<B> with(int index, Argument<A> argument, A value, String mappedPropertyName, Object owner) {
if (argIndex == 0) {
params[index] = value;
} else {
params[index] = mergeStrategy.merge(params[index], value, owner, argument.getName(), mappedPropertyName);
}
return this;
}
@Override
public @NonNull Argument<?>[] getBuilderArguments() {
return arguments;
}
@Override
public int indexOf(String name) {
return builder.indexOf(name);
}
@Override
public B build(Object... builderParams) {
for (int i = 0; i < params.length; i++) {
if (params[i] == null) {
continue;
}
builder.with(i, (Argument<Object>) arguments[i], params[i]);
}
return builder.build(builderParams);
}
@Override
public <A> MappingBuilder<B> convert(int index, ArgumentConversionContext<A> conversionContext, A value, ConversionService conversionService, String mappedPropertyName, Object owner) {
Argument<A> argument = conversionContext.getArgument();
if (value != null) {
if (!argument.isInstance(value)) {
value = conversionService.convertRequired(value, conversionContext);
}
with(index, argument, value, mappedPropertyName, owner);
}
return this;
}
}
private static final | MergeMappingBuilder |
java | elastic__elasticsearch | plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeService.java | {
"start": 1098,
"end": 2786
} | class ____ {
public static final Setting<String> SUBSCRIPTION_ID_SETTING = Setting.simpleString(
"cloud.azure.management.subscription.id",
Property.NodeScope,
Property.Filtered
);
public static final Setting<String> SERVICE_NAME_SETTING = Setting.simpleString(
"cloud.azure.management.cloud.service.name",
Property.NodeScope
);
// Keystore settings
public static final Setting<String> KEYSTORE_PATH_SETTING = Setting.simpleString(
"cloud.azure.management.keystore.path",
Property.NodeScope,
Property.Filtered
);
public static final Setting<String> KEYSTORE_PASSWORD_SETTING = Setting.simpleString(
"cloud.azure.management.keystore.password",
Property.NodeScope,
Property.Filtered
);
public static final Setting<KeyStoreType> KEYSTORE_TYPE_SETTING = new Setting<>(
"cloud.azure.management.keystore.type",
KeyStoreType.pkcs12.name(),
KeyStoreType::fromString,
Property.NodeScope,
Property.Filtered
);
// so that it can overridden for tests
public static final Setting<URI> ENDPOINT_SETTING = new Setting<>(
"cloud.azure.management.endpoint",
"https://management.core.windows.net/",
s -> {
try {
return new URI(s);
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
},
Property.NodeScope
);
}
final | Management |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/ClassificationFeatureImportance.java | {
"start": 1221,
"end": 3121
} | class ____ extends AbstractFeatureImportance {
private final List<ClassImportance> classImportance;
private final String featureName;
static final String FEATURE_NAME = "feature_name";
static final String CLASSES = "classes";
@SuppressWarnings("unchecked")
private static final ConstructingObjectParser<ClassificationFeatureImportance, Void> PARSER = new ConstructingObjectParser<>(
"classification_feature_importance",
a -> new ClassificationFeatureImportance((String) a[0], (List<ClassImportance>) a[1])
);
static {
PARSER.declareString(constructorArg(), new ParseField(ClassificationFeatureImportance.FEATURE_NAME));
PARSER.declareObjectArray(
optionalConstructorArg(),
(p, c) -> ClassImportance.fromXContent(p),
new ParseField(ClassificationFeatureImportance.CLASSES)
);
}
public static ClassificationFeatureImportance fromXContent(XContentParser parser) {
return PARSER.apply(parser, null);
}
public ClassificationFeatureImportance(String featureName, List<ClassImportance> classImportance) {
this.featureName = Objects.requireNonNull(featureName);
this.classImportance = classImportance == null ? Collections.emptyList() : Collections.unmodifiableList(classImportance);
}
public ClassificationFeatureImportance(StreamInput in) throws IOException {
this.featureName = in.readString();
this.classImportance = in.readCollectionAsList(ClassImportance::new);
}
public List<ClassImportance> getClassImportance() {
return classImportance;
}
@Override
public String getFeatureName() {
return featureName;
}
public double getTotalImportance() {
if (classImportance.size() == 2) {
// Binary classification. We can return the first | ClassificationFeatureImportance |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/entities/onetomany/detached/SetJoinColumnRefCollEntity.java | {
"start": 695,
"end": 2225
} | class ____ {
@Id
private Integer id;
@Audited
private String data;
@Audited
@OneToMany
@JoinColumn(name = "SJCR_ID")
@AuditJoinTable(name = "SetJoinColRefColl_StrTest_AUD")
private Set<StrTestEntity> collection;
public SetJoinColumnRefCollEntity() {
}
public SetJoinColumnRefCollEntity(Integer id, String data) {
this.id = id;
this.data = data;
}
public SetJoinColumnRefCollEntity(String data) {
this.data = data;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getData() {
return data;
}
public void setData(String data) {
this.data = data;
}
public Set<StrTestEntity> getCollection() {
return collection;
}
public void setCollection(Set<StrTestEntity> collection) {
this.collection = collection;
}
public boolean equals(Object o) {
if ( this == o ) {
return true;
}
if ( !(o instanceof SetJoinColumnRefCollEntity) ) {
return false;
}
SetJoinColumnRefCollEntity that = (SetJoinColumnRefCollEntity) o;
if ( data != null ? !data.equals( that.data ) : that.data != null ) {
return false;
}
if ( id != null ? !id.equals( that.id ) : that.id != null ) {
return false;
}
return true;
}
public int hashCode() {
int result;
result = (id != null ? id.hashCode() : 0);
result = 31 * result + (data != null ? data.hashCode() : 0);
return result;
}
public String toString() {
return "SetJoinColumnRefCollEntity(id = " + id + ", data = " + data + ")";
}
}
| SetJoinColumnRefCollEntity |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/logging/LoggingApplicationListenerTests.java | {
"start": 30454,
"end": 31242
} | class ____ extends LoggingSystem {
BrokenInitializationLoggingSystem(ClassLoader classLoader) {
}
@Override
public void beforeInitialize() {
}
@Override
public void initialize(LoggingInitializationContext initializationContext, @Nullable String configLocation,
@Nullable LogFile logFile) {
throw new Error("Deliberately broken");
}
@Override
public void setLogLevel(@Nullable String loggerName, @Nullable LogLevel level) {
}
@Override
public List<LoggerConfiguration> getLoggerConfigurations() {
return Collections.emptyList();
}
@Override
public @Nullable LoggerConfiguration getLoggerConfiguration(String loggerName) {
return null;
}
}
@Configuration
@Import(WebServerStyleLifecycle.class)
static | BrokenInitializationLoggingSystem |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/rest/StreamingXContentResponse.java | {
"start": 11698,
"end": 11924
} | class ____ any time, in the sense that one such instance becoming inactive
* <i>happens-before</i> the creation of the next instance. One of these parts may send chunks for more than one fragment.
*/
private final | at |
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/request/IbmWatsonxChatCompletionRequestEntityTests.java | {
"start": 1110,
"end": 2695
} | class ____ extends ESTestCase {
private static final String ROLE = "user";
public void testModelUserFieldsSerialization() throws IOException, URISyntaxException {
UnifiedCompletionRequest.Message message = new UnifiedCompletionRequest.Message(
new UnifiedCompletionRequest.ContentString("test content"),
ROLE,
null,
null
);
var messageList = new ArrayList<UnifiedCompletionRequest.Message>();
messageList.add(message);
var unifiedRequest = UnifiedCompletionRequest.of(messageList);
UnifiedChatInput unifiedChatInput = new UnifiedChatInput(unifiedRequest, true);
IbmWatsonxChatCompletionModel model = createModel(new URI("abc.com"), "apiVersion", "modelId", "projectId", "apiKey");
IbmWatsonxChatCompletionRequestEntity entity = new IbmWatsonxChatCompletionRequestEntity(unifiedChatInput, model);
XContentBuilder builder = JsonXContent.contentBuilder();
entity.toXContent(builder, ToXContent.EMPTY_PARAMS);
String expectedJson = """
{
"project_id": "projectId",
"messages": [
{
"content": "test content",
"role": "user"
}
],
"model": "modelId",
"n": 1,
"stream": true
}
""";
assertEquals(XContentHelper.stripWhitespace(expectedJson), Strings.toString(builder));
}
}
| IbmWatsonxChatCompletionRequestEntityTests |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/configuration/ImportTests.java | {
"start": 8230,
"end": 8364
} | class ____ {
@Bean
ITestBean two() {
return new TestBean();
}
}
@Configuration
static | OtherConfigurationWithImportAnnotation |
java | elastic__elasticsearch | x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java | {
"start": 2717,
"end": 51175
} | class ____ extends ESTestCase {
private static final Settings SETTINGS = Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build();
private ThreadPool pool;
@Before
public void createSuiteThreadPool() {
pool = new TestThreadPool("test");
}
@After
public void stopThreadPool() {
assertThat(ThreadPool.terminate(pool, 10L, TimeUnit.SECONDS), equalTo(true));
}
public void testInitialStatusStopped() {
RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap());
RollupJobStatus status = new RollupJobStatus(IndexerState.STOPPED, Collections.singletonMap("foo", "bar"));
Client client = mock(Client.class);
when(client.settings()).thenReturn(Settings.EMPTY);
SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC());
TaskId taskId = new TaskId("node", 123);
RollupJobTask task = new RollupJobTask(
1,
"type",
"action",
taskId,
job,
status,
client,
schedulerEngine,
pool,
Collections.emptyMap()
);
task.init(null, mock(TaskManager.class), taskId.toString(), 123);
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED));
assertThat(((RollupJobStatus) task.getStatus()).getPosition().size(), equalTo(1));
assertTrue(((RollupJobStatus) task.getStatus()).getPosition().containsKey("foo"));
}
public void testInitialStatusAborting() {
RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap());
RollupJobStatus status = new RollupJobStatus(IndexerState.ABORTING, Collections.singletonMap("foo", "bar"));
Client client = mock(Client.class);
when(client.settings()).thenReturn(Settings.EMPTY);
SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC());
TaskId taskId = new TaskId("node", 123);
RollupJobTask task = new RollupJobTask(
1,
"type",
"action",
taskId,
job,
status,
client,
schedulerEngine,
pool,
Collections.emptyMap()
);
task.init(null, mock(TaskManager.class), taskId.toString(), 123);
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED));
assertThat(((RollupJobStatus) task.getStatus()).getPosition().size(), equalTo(1));
assertTrue(((RollupJobStatus) task.getStatus()).getPosition().containsKey("foo"));
}
public void testInitialStatusStopping() {
RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap());
RollupJobStatus status = new RollupJobStatus(IndexerState.STOPPING, Collections.singletonMap("foo", "bar"));
Client client = mock(Client.class);
when(client.settings()).thenReturn(Settings.EMPTY);
SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC());
TaskId taskId = new TaskId("node", 123);
RollupJobTask task = new RollupJobTask(
1,
"type",
"action",
taskId,
job,
status,
client,
schedulerEngine,
pool,
Collections.emptyMap()
);
task.init(null, mock(TaskManager.class), taskId.toString(), 123);
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED));
assertThat(((RollupJobStatus) task.getStatus()).getPosition().size(), equalTo(1));
assertTrue(((RollupJobStatus) task.getStatus()).getPosition().containsKey("foo"));
}
public void testInitialStatusStarted() {
RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap());
RollupJobStatus status = new RollupJobStatus(IndexerState.STARTED, Collections.singletonMap("foo", "bar"));
Client client = mock(Client.class);
when(client.settings()).thenReturn(Settings.EMPTY);
SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC());
TaskId taskId = new TaskId("node", 123);
RollupJobTask task = new RollupJobTask(
1,
"type",
"action",
taskId,
job,
status,
client,
schedulerEngine,
pool,
Collections.emptyMap()
);
task.init(null, mock(TaskManager.class), taskId.toString(), 123);
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED));
assertThat(((RollupJobStatus) task.getStatus()).getPosition().size(), equalTo(1));
assertTrue(((RollupJobStatus) task.getStatus()).getPosition().containsKey("foo"));
}
public void testInitialStatusIndexingOldID() {
RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap());
RollupJobStatus status = new RollupJobStatus(IndexerState.INDEXING, Collections.singletonMap("foo", "bar"));
Client client = mock(Client.class);
when(client.settings()).thenReturn(Settings.EMPTY);
SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC());
TaskId taskId = new TaskId("node", 123);
RollupJobTask task = new RollupJobTask(
1,
"type",
"action",
taskId,
job,
status,
client,
schedulerEngine,
pool,
Collections.emptyMap()
);
task.init(null, mock(TaskManager.class), taskId.toString(), 123);
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED));
assertThat(((RollupJobStatus) task.getStatus()).getPosition().size(), equalTo(1));
assertTrue(((RollupJobStatus) task.getStatus()).getPosition().containsKey("foo"));
}
public void testInitialStatusIndexingNewID() {
RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap());
RollupJobStatus status = new RollupJobStatus(IndexerState.INDEXING, Collections.singletonMap("foo", "bar"));
Client client = mock(Client.class);
when(client.settings()).thenReturn(Settings.EMPTY);
SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC());
TaskId taskId = new TaskId("node", 123);
RollupJobTask task = new RollupJobTask(
1,
"type",
"action",
taskId,
job,
status,
client,
schedulerEngine,
pool,
Collections.emptyMap()
);
task.init(null, mock(TaskManager.class), taskId.toString(), 123);
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED));
assertThat(((RollupJobStatus) task.getStatus()).getPosition().size(), equalTo(1));
assertTrue(((RollupJobStatus) task.getStatus()).getPosition().containsKey("foo"));
}
public void testNoInitialStatus() {
RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap());
Client client = mock(Client.class);
when(client.settings()).thenReturn(Settings.EMPTY);
SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC());
TaskId taskId = new TaskId("node", 123);
RollupJobTask task = new RollupJobTask(
1,
"type",
"action",
taskId,
job,
null,
client,
schedulerEngine,
pool,
Collections.emptyMap()
);
task.init(null, mock(TaskManager.class), taskId.toString(), 123);
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED));
assertNull(((RollupJobStatus) task.getStatus()).getPosition());
}
public void testStartWhenStarted() throws InterruptedException {
RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap());
RollupJobStatus status = new RollupJobStatus(IndexerState.STARTED, Collections.singletonMap("foo", "bar"));
Client client = mock(Client.class);
when(client.settings()).thenReturn(Settings.EMPTY);
SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC());
TaskId taskId = new TaskId("node", 123);
RollupJobTask task = new RollupJobTask(
1,
"type",
"action",
taskId,
job,
status,
client,
schedulerEngine,
pool,
Collections.emptyMap()
);
task.init(null, mock(TaskManager.class), taskId.toString(), 123);
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED));
assertThat(((RollupJobStatus) task.getStatus()).getPosition().size(), equalTo(1));
assertTrue(((RollupJobStatus) task.getStatus()).getPosition().containsKey("foo"));
CountDownLatch latch = new CountDownLatch(1);
task.start(new ActionListener<>() {
@Override
public void onResponse(StartRollupJobAction.Response response) {
assertTrue(response.isStarted());
latch.countDown();
}
@Override
public void onFailure(Exception e) {
fail("Should not have throw exception: " + e.getMessage());
}
});
latch.await(3, TimeUnit.SECONDS);
}
public void testStartWhenStopping() throws InterruptedException {
RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap());
final CountDownLatch block = new CountDownLatch(1);
final CountDownLatch unblock = new CountDownLatch(1);
try (var threadPool = createThreadPool()) {
final var client = getEmptySearchResponseClient(threadPool, block, unblock);
SchedulerEngine schedulerEngine = mock(SchedulerEngine.class);
AtomicInteger counter = new AtomicInteger(0);
TaskId taskId = new TaskId("node", 123);
RollupJobTask task = new RollupJobTask(
1,
"type",
"action",
taskId,
job,
null,
client,
schedulerEngine,
pool,
Collections.emptyMap()
) {
@Override
public void updatePersistentTaskState(
PersistentTaskState taskState,
ActionListener<PersistentTasksCustomMetadata.PersistentTask<?>> listener
) {
assertThat(taskState, instanceOf(RollupJobStatus.class));
int c = counter.get();
if (c == 0) {
assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED));
} else if (c == 1) {
assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STOPPED));
} else if (c == 2) {
assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STOPPED));
} else {
fail("Should not have updated persistent statuses > 3 times");
}
listener.onResponse(
new PersistentTasksCustomMetadata.PersistentTask<>(
"foo",
RollupField.TASK_NAME,
job,
1,
new PersistentTasksCustomMetadata.Assignment("foo", "foo")
)
);
counter.incrementAndGet();
}
};
task.init(null, mock(TaskManager.class), taskId.toString(), 123);
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED));
assertNull(((RollupJobStatus) task.getStatus()).getPosition());
CountDownLatch latch = new CountDownLatch(1);
task.start(new ActionListener<StartRollupJobAction.Response>() {
@Override
public void onResponse(StartRollupJobAction.Response response) {
assertTrue(response.isStarted());
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED));
latch.countDown();
}
@Override
public void onFailure(Exception e) {
fail("Should not have entered onFailure");
}
});
assertUnblockIn10s(latch);
task.triggered(new SchedulerEngine.Event(RollupJobTask.SCHEDULE_NAME + "_" + job.getConfig().getId(), 123, 123));
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.INDEXING));
assertThat(task.getStats().getNumInvocations(), equalTo(1L));
// wait until the search request is send, this is unblocked in the client
assertUnblockIn10s(block);
task.stop(new ActionListener<StopRollupJobAction.Response>() {
@Override
public void onResponse(StopRollupJobAction.Response response) {
assertTrue(response.isStopped());
}
@Override
public void onFailure(Exception e) {
fail("should not have entered onFailure");
}
});
// we issued stop but the indexer is waiting for the search response, therefore we should be in stopping state
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPING));
CountDownLatch latch2 = new CountDownLatch(1);
task.start(new ActionListener<StartRollupJobAction.Response>() {
@Override
public void onResponse(StartRollupJobAction.Response response) {
fail("should not have entered onResponse");
}
@Override
public void onFailure(Exception e) {
assertThat(
e.getMessage(),
equalTo("Cannot start task for Rollup Job [" + job.getConfig().getId() + "] because state was [STOPPING]")
);
latch2.countDown();
}
});
assertUnblockIn10s(latch2);
// the client answer
unblock.countDown();
}
}
public void testStartWhenStopped() throws InterruptedException {
RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap());
RollupJobStatus status = new RollupJobStatus(IndexerState.STOPPED, Collections.singletonMap("foo", "bar"));
Client client = mock(Client.class);
when(client.settings()).thenReturn(Settings.EMPTY);
SchedulerEngine schedulerEngine = mock(SchedulerEngine.class);
TaskId taskId = new TaskId("node", 123);
RollupJobTask task = new RollupJobTask(
1,
"type",
"action",
taskId,
job,
status,
client,
schedulerEngine,
pool,
Collections.emptyMap()
) {
@Override
public void updatePersistentTaskState(
PersistentTaskState taskState,
ActionListener<PersistentTasksCustomMetadata.PersistentTask<?>> listener
) {
assertThat(taskState, instanceOf(RollupJobStatus.class));
assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED));
listener.onResponse(
new PersistentTasksCustomMetadata.PersistentTask<>(
"foo",
RollupField.TASK_NAME,
job,
1,
new PersistentTasksCustomMetadata.Assignment("foo", "foo")
)
);
}
};
task.init(null, mock(TaskManager.class), taskId.toString(), 123);
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED));
assertThat(((RollupJobStatus) task.getStatus()).getPosition().size(), equalTo(1));
assertTrue(((RollupJobStatus) task.getStatus()).getPosition().containsKey("foo"));
CountDownLatch latch = new CountDownLatch(1);
task.start(new ActionListener<StartRollupJobAction.Response>() {
@Override
public void onResponse(StartRollupJobAction.Response response) {
assertTrue(response.isStarted());
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED));
latch.countDown();
}
@Override
public void onFailure(Exception e) {
fail("Should not have entered onFailure");
}
});
latch.await(3, TimeUnit.SECONDS);
}
public void testTriggerUnrelated() throws InterruptedException {
RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap());
RollupJobStatus status = new RollupJobStatus(IndexerState.STOPPED, Collections.singletonMap("foo", "bar"));
Client client = mock(Client.class);
when(client.settings()).thenReturn(Settings.EMPTY);
SchedulerEngine schedulerEngine = mock(SchedulerEngine.class);
TaskId taskId = new TaskId("node", 123);
RollupJobTask task = new RollupJobTask(
1,
"type",
"action",
taskId,
job,
status,
client,
schedulerEngine,
pool,
Collections.emptyMap()
) {
@Override
public void updatePersistentTaskState(
PersistentTaskState taskState,
ActionListener<PersistentTasksCustomMetadata.PersistentTask<?>> listener
) {
assertThat(taskState, instanceOf(RollupJobStatus.class));
assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED));
listener.onResponse(
new PersistentTasksCustomMetadata.PersistentTask<>(
"foo",
RollupField.TASK_NAME,
job,
1,
new PersistentTasksCustomMetadata.Assignment("foo", "foo")
)
);
}
};
task.init(null, mock(TaskManager.class), taskId.toString(), 123);
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED));
assertThat(((RollupJobStatus) task.getStatus()).getPosition().size(), equalTo(1));
assertTrue(((RollupJobStatus) task.getStatus()).getPosition().containsKey("foo"));
CountDownLatch latch = new CountDownLatch(1);
task.start(new ActionListener<StartRollupJobAction.Response>() {
@Override
public void onResponse(StartRollupJobAction.Response response) {
assertTrue(response.isStarted());
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED));
latch.countDown();
}
@Override
public void onFailure(Exception e) {
fail("Should not have entered onFailure");
}
});
latch.await(3, TimeUnit.SECONDS);
task.triggered(new SchedulerEngine.Event("unrelated", 123, 123));
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED));
}
public void testTrigger() throws InterruptedException {
RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap());
Client client = mock(Client.class);
when(client.settings()).thenReturn(Settings.EMPTY);
when(client.threadPool()).thenReturn(pool);
SchedulerEngine schedulerEngine = mock(SchedulerEngine.class);
TaskId taskId = new TaskId("node", 123);
RollupJobTask task = new RollupJobTask(
1,
"type",
"action",
taskId,
job,
null,
client,
schedulerEngine,
pool,
Collections.emptyMap()
) {
@Override
public void updatePersistentTaskState(
PersistentTaskState taskState,
ActionListener<PersistentTasksCustomMetadata.PersistentTask<?>> listener
) {
assertThat(taskState, instanceOf(RollupJobStatus.class));
assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED));
listener.onResponse(
new PersistentTasksCustomMetadata.PersistentTask<>(
"foo",
RollupField.TASK_NAME,
job,
1,
new PersistentTasksCustomMetadata.Assignment("foo", "foo")
)
);
}
};
task.init(null, mock(TaskManager.class), taskId.toString(), 123);
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED));
assertNull(((RollupJobStatus) task.getStatus()).getPosition());
CountDownLatch latch = new CountDownLatch(1);
task.start(new ActionListener<StartRollupJobAction.Response>() {
@Override
public void onResponse(StartRollupJobAction.Response response) {
assertTrue(response.isStarted());
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED));
latch.countDown();
}
@Override
public void onFailure(Exception e) {
fail("Should not have entered onFailure");
}
});
latch.await(3, TimeUnit.SECONDS);
task.triggered(new SchedulerEngine.Event(RollupJobTask.SCHEDULE_NAME + "_" + job.getConfig().getId(), 123, 123));
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.INDEXING));
assertThat(task.getStats().getNumInvocations(), equalTo(1L));
}
@SuppressWarnings("unchecked")
public void testTriggerWithoutHeaders() throws Exception {
final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap());
Client client = mock(Client.class);
doAnswer(invocationOnMock -> {
BroadcastResponse r = new BroadcastResponse(2, 2, 0, Collections.emptyList());
((ActionListener) invocationOnMock.getArguments()[2]).onResponse(r);
return null;
}).when(client).execute(eq(RefreshAction.INSTANCE), any(), any());
when(client.settings()).thenReturn(Settings.EMPTY);
AtomicBoolean started = new AtomicBoolean(false);
AtomicBoolean finished = new AtomicBoolean(false);
AtomicInteger counter = new AtomicInteger(0);
CountDownLatch latch = new CountDownLatch(1);
final ThreadPool threadPool = mock(ThreadPool.class);
when(client.threadPool()).thenReturn(threadPool);
when(threadPool.getThreadContext()).thenReturn(threadContext);
doAnswer(invocationOnMock -> {
assertTrue(threadContext.getHeaders().isEmpty());
SearchResponse r = mock(SearchResponse.class);
when(r.getShardFailures()).thenReturn(ShardSearchFailure.EMPTY_ARRAY);
InternalComposite compositeAgg = mock(InternalComposite.class);
when(compositeAgg.getBuckets()).thenReturn(Collections.emptyList());
when(compositeAgg.getName()).thenReturn(RollupField.NAME);
InternalAggregations aggs = InternalAggregations.from(Collections.singletonList(compositeAgg));
when(r.getAggregations()).thenReturn(aggs);
// Wait before progressing
latch.await();
((ActionListener) invocationOnMock.getArguments()[2]).onResponse(r);
return null;
}).when(client).execute(eq(TransportSearchAction.TYPE), any(), any());
SchedulerEngine schedulerEngine = mock(SchedulerEngine.class);
TaskId taskId = new TaskId("node", 123);
RollupJobTask task = new RollupJobTask(
1,
"type",
"action",
taskId,
job,
null,
client,
schedulerEngine,
pool,
Collections.emptyMap()
) {
@Override
public void updatePersistentTaskState(
PersistentTaskState taskState,
ActionListener<PersistentTasksCustomMetadata.PersistentTask<?>> listener
) {
Integer counterValue = counter.getAndIncrement();
if (counterValue == 0) {
assertThat(taskState, instanceOf(RollupJobStatus.class));
assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED));
listener.onResponse(
new PersistentTasksCustomMetadata.PersistentTask<>(
"foo",
RollupField.TASK_NAME,
job,
1,
new PersistentTasksCustomMetadata.Assignment("foo", "foo")
)
);
} else if (counterValue == 1) {
finished.set(true);
}
}
};
task.init(null, mock(TaskManager.class), taskId.toString(), 123);
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED));
assertNull(((RollupJobStatus) task.getStatus()).getPosition());
task.start(new ActionListener<>() {
@Override
public void onResponse(StartRollupJobAction.Response response) {
assertTrue(response.isStarted());
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED));
started.set(true);
}
@Override
public void onFailure(Exception e) {
fail("Should not have entered onFailure");
}
});
assertBusy(() -> assertTrue(started.get()));
task.triggered(new SchedulerEngine.Event(RollupJobTask.SCHEDULE_NAME + "_" + job.getConfig().getId(), 123, 123));
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.INDEXING));
assertThat(task.getStats().getNumInvocations(), equalTo(1L));
// Allow search response to return now
latch.countDown();
// Wait for the final persistent status to finish
assertBusy(() -> assertTrue(finished.get()));
}
@SuppressWarnings("unchecked")
public void testTriggerWithHeaders() throws Exception {
final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
Map<String, String> headers = Maps.newMapWithExpectedSize(1);
headers.put("es-security-runas-user", "foo");
headers.put("_xpack_security_authentication", "bar");
RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), headers);
Client client = mock(Client.class);
doAnswer(invocationOnMock -> {
BroadcastResponse r = new BroadcastResponse(2, 2, 0, Collections.emptyList());
((ActionListener) invocationOnMock.getArguments()[2]).onResponse(r);
return null;
}).when(client).execute(eq(RefreshAction.INSTANCE), any(), any());
AtomicBoolean started = new AtomicBoolean(false);
AtomicBoolean finished = new AtomicBoolean(false);
AtomicInteger counter = new AtomicInteger(0);
CountDownLatch latch = new CountDownLatch(1);
final ThreadPool threadPool = mock(ThreadPool.class);
when(client.threadPool()).thenReturn(threadPool);
when(threadPool.getThreadContext()).thenReturn(threadContext);
doAnswer(invocationOnMock -> {
assertFalse(threadContext.getHeaders().isEmpty());
assertThat(threadContext.getHeaders().get("es-security-runas-user"), equalTo("foo"));
assertThat(threadContext.getHeaders().get("_xpack_security_authentication"), equalTo("bar"));
SearchResponse r = mock(SearchResponse.class);
when(r.getShardFailures()).thenReturn(ShardSearchFailure.EMPTY_ARRAY);
InternalComposite compositeAgg = mock(InternalComposite.class);
when(compositeAgg.getBuckets()).thenReturn(Collections.emptyList());
when(compositeAgg.getName()).thenReturn(RollupField.NAME);
InternalAggregations aggs = InternalAggregations.from(Collections.singletonList(compositeAgg));
when(r.getAggregations()).thenReturn(aggs);
// Wait before progressing
latch.await();
((ActionListener) invocationOnMock.getArguments()[2]).onResponse(r);
return null;
}).when(client).execute(eq(TransportSearchAction.TYPE), any(), any());
SchedulerEngine schedulerEngine = mock(SchedulerEngine.class);
TaskId taskId = new TaskId("node", 123);
RollupJobTask task = new RollupJobTask(
1,
"type",
"action",
taskId,
job,
null,
client,
schedulerEngine,
pool,
Collections.emptyMap()
) {
@Override
public void updatePersistentTaskState(
PersistentTaskState taskState,
ActionListener<PersistentTasksCustomMetadata.PersistentTask<?>> listener
) {
Integer counterValue = counter.getAndIncrement();
if (counterValue == 0) {
assertThat(taskState, instanceOf(RollupJobStatus.class));
assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED));
listener.onResponse(
new PersistentTasksCustomMetadata.PersistentTask<>(
"foo",
RollupField.TASK_NAME,
job,
1,
new PersistentTasksCustomMetadata.Assignment("foo", "foo")
)
);
} else if (counterValue == 1) {
finished.set(true);
}
}
};
task.init(null, mock(TaskManager.class), taskId.toString(), 123);
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED));
assertNull(((RollupJobStatus) task.getStatus()).getPosition());
task.start(new ActionListener<>() {
@Override
public void onResponse(StartRollupJobAction.Response response) {
assertTrue(response.isStarted());
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED));
started.set(true);
}
@Override
public void onFailure(Exception e) {
fail("Should not have entered onFailure");
}
});
assertBusy(() -> assertTrue(started.get()));
task.triggered(new SchedulerEngine.Event(RollupJobTask.SCHEDULE_NAME + "_" + job.getConfig().getId(), 123, 123));
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.INDEXING));
assertThat(task.getStats().getNumInvocations(), equalTo(1L));
// Allow search response to return now
latch.countDown();
// Wait for the final persistent status to finish
assertBusy(() -> assertTrue(finished.get()), 30, TimeUnit.SECONDS);
}
@SuppressWarnings("unchecked")
public void testSaveStateChangesIDScheme() throws Exception {
final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
Map<String, String> headers = Maps.newMapWithExpectedSize(1);
headers.put("es-security-runas-user", "foo");
headers.put("_xpack_security_authentication", "bar");
RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), headers);
Client client = mock(Client.class);
doAnswer(invocationOnMock -> {
BroadcastResponse r = new BroadcastResponse(2, 2, 0, Collections.emptyList());
((ActionListener) invocationOnMock.getArguments()[2]).onResponse(r);
return null;
}).when(client).execute(eq(RefreshAction.INSTANCE), any(), any());
when(client.settings()).thenReturn(Settings.EMPTY);
AtomicBoolean started = new AtomicBoolean(false);
AtomicBoolean finished = new AtomicBoolean(false);
AtomicInteger counter = new AtomicInteger(0);
CountDownLatch latch = new CountDownLatch(1);
final ThreadPool threadPool = mock(ThreadPool.class);
when(client.threadPool()).thenReturn(threadPool);
when(threadPool.getThreadContext()).thenReturn(threadContext);
doAnswer(invocationOnMock -> {
assertFalse(threadContext.getHeaders().isEmpty());
assertThat(threadContext.getHeaders().get("es-security-runas-user"), equalTo("foo"));
assertThat(threadContext.getHeaders().get("_xpack_security_authentication"), equalTo("bar"));
SearchResponse r = mock(SearchResponse.class);
when(r.getShardFailures()).thenReturn(ShardSearchFailure.EMPTY_ARRAY);
InternalComposite compositeAgg = mock(InternalComposite.class);
when(compositeAgg.getBuckets()).thenReturn(Collections.emptyList());
when(compositeAgg.getName()).thenReturn(RollupField.NAME);
InternalAggregations aggs = InternalAggregations.from(Collections.singletonList(compositeAgg));
when(r.getAggregations()).thenReturn(aggs);
// Wait before progressing
latch.await();
((ActionListener) invocationOnMock.getArguments()[2]).onResponse(r);
return null;
}).when(client).execute(eq(TransportSearchAction.TYPE), any(), any());
SchedulerEngine schedulerEngine = mock(SchedulerEngine.class);
RollupJobStatus status = new RollupJobStatus(IndexerState.STOPPED, null);
TaskId taskId = new TaskId("node", 123);
RollupJobTask task = new RollupJobTask(
1,
"type",
"action",
taskId,
job,
status,
client,
schedulerEngine,
pool,
Collections.emptyMap()
) {
@Override
public void updatePersistentTaskState(
PersistentTaskState taskState,
ActionListener<PersistentTasksCustomMetadata.PersistentTask<?>> listener
) {
Integer counterValue = counter.getAndIncrement();
if (counterValue == 0) {
assertThat(taskState, instanceOf(RollupJobStatus.class));
assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED));
listener.onResponse(
new PersistentTasksCustomMetadata.PersistentTask<>(
"foo",
RollupField.TASK_NAME,
job,
1,
new PersistentTasksCustomMetadata.Assignment("foo", "foo")
)
);
} else if (counterValue == 1) {
finished.set(true);
}
}
};
task.init(null, mock(TaskManager.class), taskId.toString(), 123);
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED));
assertNull(((RollupJobStatus) task.getStatus()).getPosition());
task.start(new ActionListener<>() {
@Override
public void onResponse(StartRollupJobAction.Response response) {
assertTrue(response.isStarted());
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED));
started.set(true);
}
@Override
public void onFailure(Exception e) {
fail("Should not have entered onFailure");
}
});
assertBusy(() -> assertTrue(started.get()));
task.triggered(new SchedulerEngine.Event(RollupJobTask.SCHEDULE_NAME + "_" + job.getConfig().getId(), 123, 123));
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.INDEXING));
assertThat(task.getStats().getNumInvocations(), equalTo(1L));
// Allow search response to return now
latch.countDown();
// Wait for the final persistent status to finish
assertBusy(() -> assertTrue(finished.get()));
}
public void testStopWhenStopped() throws InterruptedException {
RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap());
RollupJobStatus status = new RollupJobStatus(IndexerState.STOPPED, null);
Client client = mock(Client.class);
when(client.settings()).thenReturn(Settings.EMPTY);
SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC());
TaskId taskId = new TaskId("node", 123);
RollupJobTask task = new RollupJobTask(
1,
"type",
"action",
taskId,
job,
status,
client,
schedulerEngine,
pool,
Collections.emptyMap()
);
task.init(null, mock(TaskManager.class), taskId.toString(), 123);
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED));
CountDownLatch latch = new CountDownLatch(1);
task.stop(new ActionListener<StopRollupJobAction.Response>() {
@Override
public void onResponse(StopRollupJobAction.Response response) {
assertTrue(response.isStopped());
latch.countDown();
}
@Override
public void onFailure(Exception e) {
fail("Should not have entered onFailure");
}
});
latch.await(3, TimeUnit.SECONDS);
}
public void testStopWhenStopping() throws InterruptedException {
RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap());
final CountDownLatch block = new CountDownLatch(1);
final CountDownLatch unblock = new CountDownLatch(1);
try (var threadPool = createThreadPool()) {
final var client = getEmptySearchResponseClient(threadPool, block, unblock);
SchedulerEngine schedulerEngine = mock(SchedulerEngine.class);
AtomicInteger counter = new AtomicInteger(0);
TaskId taskId = new TaskId("node", 123);
RollupJobTask task = new RollupJobTask(
1,
"type",
"action",
taskId,
job,
null,
client,
schedulerEngine,
pool,
Collections.emptyMap()
) {
@Override
public void updatePersistentTaskState(
PersistentTaskState taskState,
ActionListener<PersistentTasksCustomMetadata.PersistentTask<?>> listener
) {
assertThat(taskState, instanceOf(RollupJobStatus.class));
int c = counter.get();
if (c == 0) {
assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED));
} else if (c == 1) {
assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STOPPED));
} else if (c == 2) {
assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STOPPED));
} else if (c == 3) {
assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STOPPED));
} else {
fail("Should not have updated persistent statuses > 4 times");
}
listener.onResponse(
new PersistentTasksCustomMetadata.PersistentTask<>(
"foo",
RollupField.TASK_NAME,
job,
1,
new PersistentTasksCustomMetadata.Assignment("foo", "foo")
)
);
counter.incrementAndGet();
}
};
task.init(null, mock(TaskManager.class), taskId.toString(), 123);
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED));
assertNull(((RollupJobStatus) task.getStatus()).getPosition());
CountDownLatch latch = new CountDownLatch(1);
task.start(new ActionListener<StartRollupJobAction.Response>() {
@Override
public void onResponse(StartRollupJobAction.Response response) {
assertTrue(response.isStarted());
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED));
latch.countDown();
}
@Override
public void onFailure(Exception e) {
fail("Should not have entered onFailure");
}
});
assertUnblockIn10s(latch);
task.triggered(new SchedulerEngine.Event(RollupJobTask.SCHEDULE_NAME + "_" + job.getConfig().getId(), 123, 123));
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.INDEXING));
assertThat(task.getStats().getNumInvocations(), equalTo(1L));
// wait until the search request is send, this is unblocked in the client
assertUnblockIn10s(block);
task.stop(new ActionListener<StopRollupJobAction.Response>() {
@Override
public void onResponse(StopRollupJobAction.Response response) {
assertTrue(response.isStopped());
}
@Override
public void onFailure(Exception e) {
fail("should not have entered onFailure");
}
});
// we issued stop but the indexer is waiting for the search response, therefore we should be in stopping state
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPING));
CountDownLatch latch2 = new CountDownLatch(1);
task.stop(new ActionListener<StopRollupJobAction.Response>() {
@Override
public void onResponse(StopRollupJobAction.Response response) {
assertTrue(response.isStopped());
latch2.countDown();
}
@Override
public void onFailure(Exception e) {
fail("Should not have entered onFailure");
}
});
assertUnblockIn10s(latch2);
unblock.countDown();
}
}
public void testStopWhenAborting() throws InterruptedException {
RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap());
RollupJobStatus status = new RollupJobStatus(IndexerState.STOPPED, null);
Client client = mock(Client.class);
when(client.settings()).thenReturn(Settings.EMPTY);
SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC());
CountDownLatch latch = new CountDownLatch(2);
// This isn't really realistic, since start/stop/cancelled are all synchronized...
// the task would end before stop could be called. But to help test out all pathways,
// just in case, we can override markAsCompleted so it's a no-op and test how stop
// handles the situation
TaskId taskId = new TaskId("node", 123);
RollupJobTask task = new RollupJobTask(
1,
"type",
"action",
taskId,
job,
status,
client,
schedulerEngine,
pool,
Collections.emptyMap()
) {
@Override
public void markAsCompleted() {
latch.countDown();
}
};
task.init(null, mock(TaskManager.class), taskId.toString(), 123);
assertThat(((RollupJobStatus) task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED));
task.onCancelled();
task.stop(new ActionListener<StopRollupJobAction.Response>() {
@Override
public void onResponse(StopRollupJobAction.Response response) {
fail("Should not have entered onFailure");
}
@Override
public void onFailure(Exception e) {
assertThat(
e.getMessage(),
equalTo("Cannot stop task for Rollup Job [" + job.getConfig().getId() + "] because state was [ABORTING]")
);
latch.countDown();
}
});
latch.await(3, TimeUnit.SECONDS);
}
private static void assertUnblockIn10s(CountDownLatch latch) {
try {
assertThat(latch.await(10, TimeUnit.SECONDS), equalTo(true));
} catch (InterruptedException e) {
throw new AssertionError("Should not have been interrupted", e);
}
}
private NoOpClient getEmptySearchResponseClient(ThreadPool threadPool, CountDownLatch unblock, CountDownLatch block) {
return new NoOpClient(threadPool) {
@SuppressWarnings("unchecked")
@Override
protected <Request extends ActionRequest, Response extends ActionResponse> void doExecute(
ActionType<Response> action,
Request request,
ActionListener<Response> listener
) {
unblock.countDown();
assertUnblockIn10s(block);
listener.onResponse((Response) mock(SearchResponse.class));
}
};
}
}
| RollupJobTaskTests |
java | google__guice | extensions/assistedinject/test/com/google/inject/assistedinject/ManyConstructorsTest.java | {
"start": 6757,
"end": 6843
} | interface ____ {
Foo create(String name, int idx);
}
public static | SimpleFactory2 |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Utils.java | {
"start": 1652,
"end": 1920
} | class ____ extends OutputLogFilter {
public boolean accept(Path path) {
return super.accept(path)
&& !FileOutputCommitter.SUCCEEDED_FILE_NAME
.equals(path.getName());
}
}
/**
* This | OutputFilesFilter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.