language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/InferenceInputsTests.java | {
"start": 582,
"end": 1816
} | class ____ extends ESTestCase {
public void testCastToSucceeds() {
InferenceInputs inputs = new EmbeddingsInput(List.of(), InputTypeTests.randomWithNull());
assertThat(inputs.castTo(EmbeddingsInput.class), Matchers.instanceOf(EmbeddingsInput.class));
var emptyRequest = new UnifiedCompletionRequest(List.of(), null, null, null, null, null, null, null);
assertThat(new UnifiedChatInput(emptyRequest, false).castTo(UnifiedChatInput.class), Matchers.instanceOf(UnifiedChatInput.class));
assertThat(
new QueryAndDocsInputs("hello", List.of(), Boolean.TRUE, 33, false).castTo(QueryAndDocsInputs.class),
Matchers.instanceOf(QueryAndDocsInputs.class)
);
}
public void testCastToFails() {
InferenceInputs inputs = new EmbeddingsInput(List.of(), null);
var exception = expectThrows(IllegalArgumentException.class, () -> inputs.castTo(QueryAndDocsInputs.class));
assertThat(
exception.getMessage(),
Matchers.containsString(
Strings.format("Unable to convert inference inputs type: [%s] to [%s]", EmbeddingsInput.class, QueryAndDocsInputs.class)
)
);
}
}
| InferenceInputsTests |
java | apache__kafka | group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/TasksTupleWithEpochsTest.java | {
"start": 1545,
"end": 10437
} | class ____ {
private static final String SUBTOPOLOGY_1 = "1";
private static final String SUBTOPOLOGY_2 = "2";
private static final String SUBTOPOLOGY_3 = "3";
@Test
public void testTasksCannotBeNull() {
assertThrows(NullPointerException.class, () -> new TasksTupleWithEpochs(null, Map.of(), Map.of()));
assertThrows(NullPointerException.class, () -> new TasksTupleWithEpochs(Map.of(), null, Map.of()));
assertThrows(NullPointerException.class, () -> new TasksTupleWithEpochs(Map.of(), Map.of(), null));
}
@Test
public void testReturnUnmodifiableTaskAssignments() {
Map<String, Map<Integer, Integer>> activeTasks = Map.of(
SUBTOPOLOGY_1, Map.of(1, 10, 2, 11, 3, 12)
);
Map<String, Set<Integer>> standbyTasks = mkTasksPerSubtopology(
mkTasks(SUBTOPOLOGY_2, 9, 8, 7)
);
Map<String, Set<Integer>> warmupTasks = mkTasksPerSubtopology(
mkTasks(SUBTOPOLOGY_3, 4, 5, 6)
);
TasksTupleWithEpochs tuple = new TasksTupleWithEpochs(activeTasks, standbyTasks, warmupTasks);
assertEquals(activeTasks, tuple.activeTasksWithEpochs());
assertThrows(UnsupportedOperationException.class, () -> tuple.activeTasksWithEpochs().put("not allowed", Map.of()));
assertEquals(standbyTasks, tuple.standbyTasks());
assertThrows(UnsupportedOperationException.class, () -> tuple.standbyTasks().put("not allowed", Set.of()));
assertEquals(warmupTasks, tuple.warmupTasks());
assertThrows(UnsupportedOperationException.class, () -> tuple.warmupTasks().put("not allowed", Set.of()));
}
@Test
public void testFromCurrentAssignmentRecord() {
List<StreamsGroupCurrentMemberAssignmentValue.TaskIds> activeTasks = new ArrayList<>();
activeTasks.add(new StreamsGroupCurrentMemberAssignmentValue.TaskIds()
.setSubtopologyId(SUBTOPOLOGY_1)
.setPartitions(Arrays.asList(1, 2, 3))
.setAssignmentEpochs(Arrays.asList(10, 11, 12)));
activeTasks.add(new StreamsGroupCurrentMemberAssignmentValue.TaskIds()
.setSubtopologyId(SUBTOPOLOGY_2)
.setPartitions(Arrays.asList(4, 5, 6))
.setAssignmentEpochs(Arrays.asList(20, 21, 22)));
List<StreamsGroupCurrentMemberAssignmentValue.TaskIds> standbyTasks = new ArrayList<>();
standbyTasks.add(new StreamsGroupCurrentMemberAssignmentValue.TaskIds()
.setSubtopologyId(SUBTOPOLOGY_1)
.setPartitions(Arrays.asList(7, 8, 9)));
standbyTasks.add(new StreamsGroupCurrentMemberAssignmentValue.TaskIds()
.setSubtopologyId(SUBTOPOLOGY_2)
.setPartitions(Arrays.asList(1, 2, 3)));
List<StreamsGroupCurrentMemberAssignmentValue.TaskIds> warmupTasks = new ArrayList<>();
warmupTasks.add(new StreamsGroupCurrentMemberAssignmentValue.TaskIds()
.setSubtopologyId(SUBTOPOLOGY_1)
.setPartitions(Arrays.asList(4, 5, 6)));
warmupTasks.add(new StreamsGroupCurrentMemberAssignmentValue.TaskIds()
.setSubtopologyId(SUBTOPOLOGY_2)
.setPartitions(Arrays.asList(7, 8, 9)));
TasksTupleWithEpochs tuple = TasksTupleWithEpochs.fromCurrentAssignmentRecord(
activeTasks, standbyTasks, warmupTasks, 100
);
assertEquals(
Map.of(
SUBTOPOLOGY_1, Map.of(1, 10, 2, 11, 3, 12),
SUBTOPOLOGY_2, Map.of(4, 20, 5, 21, 6, 22)
),
tuple.activeTasksWithEpochs()
);
assertEquals(
mkTasksPerSubtopology(
mkTasks(SUBTOPOLOGY_1, 7, 8, 9),
mkTasks(SUBTOPOLOGY_2, 1, 2, 3)
),
tuple.standbyTasks()
);
assertEquals(
mkTasksPerSubtopology(
mkTasks(SUBTOPOLOGY_1, 4, 5, 6),
mkTasks(SUBTOPOLOGY_2, 7, 8, 9)
),
tuple.warmupTasks()
);
}
@Test
public void testFromCurrentAssignmentRecordWithoutEpochs() {
// Test legacy format where epochs are not present
List<StreamsGroupCurrentMemberAssignmentValue.TaskIds> activeTasks = new ArrayList<>();
activeTasks.add(new StreamsGroupCurrentMemberAssignmentValue.TaskIds()
.setSubtopologyId(SUBTOPOLOGY_1)
.setPartitions(Arrays.asList(1, 2, 3)));
int memberEpoch = 100;
TasksTupleWithEpochs tuple = TasksTupleWithEpochs.fromCurrentAssignmentRecord(
activeTasks, List.of(), List.of(), memberEpoch
);
// Should use member epoch as default
assertEquals(
Map.of(SUBTOPOLOGY_1, Map.of(1, memberEpoch, 2, memberEpoch, 3, memberEpoch)),
tuple.activeTasksWithEpochs()
);
}
@Test
public void testFromCurrentAssignmentRecordWithMismatchedEpochs() {
// Test error case where number of epochs doesn't match number of partitions
List<StreamsGroupCurrentMemberAssignmentValue.TaskIds> activeTasks = new ArrayList<>();
activeTasks.add(new StreamsGroupCurrentMemberAssignmentValue.TaskIds()
.setSubtopologyId(SUBTOPOLOGY_1)
.setPartitions(Arrays.asList(1, 2, 3))
.setAssignmentEpochs(Arrays.asList(10, 11))); // Only 2 epochs for 3 partitions
assertThrows(IllegalStateException.class, () ->
TasksTupleWithEpochs.fromCurrentAssignmentRecord(activeTasks, List.of(), List.of(), 100)
);
}
@Test
public void testIsEmpty() {
TasksTupleWithEpochs emptyTuple = new TasksTupleWithEpochs(Map.of(), Map.of(), Map.of());
assertTrue(emptyTuple.isEmpty());
assertEquals(TasksTupleWithEpochs.EMPTY, emptyTuple);
TasksTupleWithEpochs nonEmptyTuple = new TasksTupleWithEpochs(
Map.of(SUBTOPOLOGY_1, Map.of(1, 10)),
Map.of(),
Map.of()
);
assertFalse(nonEmptyTuple.isEmpty());
}
@Test
public void testMerge() {
TasksTupleWithEpochs tuple1 = new TasksTupleWithEpochs(
Map.of(SUBTOPOLOGY_1, Map.of(1, 10, 2, 11)),
Map.of(SUBTOPOLOGY_2, Set.of(4, 5)),
Map.of(SUBTOPOLOGY_3, Set.of(7, 8))
);
TasksTupleWithEpochs tuple2 = new TasksTupleWithEpochs(
Map.of(
SUBTOPOLOGY_1, Map.of(3, 13), // Different partition in same subtopology
SUBTOPOLOGY_2, Map.of(6, 26) // Different subtopology
),
Map.of(SUBTOPOLOGY_2, Set.of(9, 10)),
Map.of(SUBTOPOLOGY_3, Set.of(11, 12))
);
TasksTupleWithEpochs merged = tuple1.merge(tuple2);
assertEquals(
Map.of(
SUBTOPOLOGY_1, Map.of(1, 10, 2, 11, 3, 13),
SUBTOPOLOGY_2, Map.of(6, 26)
),
merged.activeTasksWithEpochs()
);
assertEquals(
mkTasksPerSubtopology(
mkTasks(SUBTOPOLOGY_2, 4, 5, 9, 10)
),
merged.standbyTasks()
);
assertEquals(
mkTasksPerSubtopology(
mkTasks(SUBTOPOLOGY_3, 7, 8, 11, 12)
),
merged.warmupTasks()
);
}
@Test
public void testMergeWithOverlappingActiveTasks() {
// When merging overlapping active tasks, epochs from the second tuple take precedence
TasksTupleWithEpochs tuple1 = new TasksTupleWithEpochs(
Map.of(SUBTOPOLOGY_1, Map.of(1, 10, 2, 11)),
Map.of(),
Map.of()
);
TasksTupleWithEpochs tuple2 = new TasksTupleWithEpochs(
Map.of(SUBTOPOLOGY_1, Map.of(1, 99, 3, 13)), // partition 1 overlaps with different epoch
Map.of(),
Map.of()
);
TasksTupleWithEpochs merged = tuple1.merge(tuple2);
// Epoch for partition 1 should be from tuple2 (99, not 10) since the second tuple takes precedence
assertEquals(99, merged.activeTasksWithEpochs().get(SUBTOPOLOGY_1).get(1));
assertEquals(11, merged.activeTasksWithEpochs().get(SUBTOPOLOGY_1).get(2));
assertEquals(13, merged.activeTasksWithEpochs().get(SUBTOPOLOGY_1).get(3));
}
@Test
public void testToString() {
TasksTupleWithEpochs tuple = new TasksTupleWithEpochs(
Map.of(
SUBTOPOLOGY_1, Map.of(1, 10, 2, 11),
SUBTOPOLOGY_2, Map.of(3, 20)
),
Map.of(SUBTOPOLOGY_2, Set.of(4, 5)),
Map.of(SUBTOPOLOGY_3, Set.of(6))
);
String result = tuple.toString();
// Verify the exact toString format
assertEquals(
"(active=[1-1@10, 1-2@11, 2-3@20], " +
"standby=[2-4, 2-5], " +
"warmup=[3-6])",
result
);
}
}
| TasksTupleWithEpochsTest |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/util/LambdaSafeTests.java | {
"start": 17134,
"end": 17213
} | interface ____ extends GenericCallback<StringBuilder> {
}
| StringBuilderCallback |
java | apache__spark | core/src/main/java/org/apache/spark/shuffle/sort/io/LocalDiskShuffleDataIO.java | {
"start": 1266,
"end": 1693
} | class ____ implements ShuffleDataIO {
private final SparkConf sparkConf;
public LocalDiskShuffleDataIO(SparkConf sparkConf) {
this.sparkConf = sparkConf;
}
@Override
public ShuffleExecutorComponents executor() {
return new LocalDiskShuffleExecutorComponents(sparkConf);
}
@Override
public ShuffleDriverComponents driver() {
return new LocalDiskShuffleDriverComponents();
}
}
| LocalDiskShuffleDataIO |
java | spring-projects__spring-boot | module/spring-boot-graphql/src/main/java/org/springframework/boot/graphql/autoconfigure/rsocket/GraphQlRSocketAutoConfiguration.java | {
"start": 4834,
"end": 4973
} | class ____ {
}
@ConditionalOnProperty(name = "spring.graphql.rsocket.preferred-json-mapper", havingValue = "jackson2")
static | NoJackson |
java | apache__maven | impl/maven-impl/src/main/java/org/apache/maven/api/services/model/ProfileInjector.java | {
"start": 1144,
"end": 2931
} | interface ____ {
/**
* Merges values from the specified profile into the given model. Implementations are expected to keep the profile
* and model completely decoupled by injecting deep copies rather than the original objects from the profile.
*
* @param model The model into which to merge the values defined by the profile, must not be <code>null</code>.
* @param profile The (read-only) profile whose values should be injected, may be <code>null</code>.
* @param request The model building request that holds further settings, must not be {@code null}.
* @param problems The container used to collect problems that were encountered, must not be {@code null}.
*/
default Model injectProfile(
Model model, Profile profile, ModelBuilderRequest request, ModelProblemCollector problems) {
return injectProfiles(model, List.of(profile), request, problems);
}
/**
* Merges values from the specified profile into the given model. Implementations are expected to keep the profile
* and model completely decoupled by injecting deep copies rather than the original objects from the profile.
*
* @param model The model into which to merge the values defined by the profile, must not be <code>null</code>.
* @param profiles The (read-only) list of profiles whose values should be injected, must not be <code>null</code>.
* @param request The model building request that holds further settings, must not be {@code null}.
* @param problems The container used to collect problems that were encountered, must not be {@code null}.
*/
Model injectProfiles(
Model model, List<Profile> profiles, ModelBuilderRequest request, ModelProblemCollector problems);
}
| ProfileInjector |
java | quarkusio__quarkus | extensions/smallrye-reactive-messaging-pulsar/deployment/src/test/java/io/quarkus/smallrye/reactivemessaging/pulsar/deployment/DefaultSchemaConfigTest.java | {
"start": 78804,
"end": 79311
} | class ____ {
@Incoming("channel1")
void method1(CustomDto msg) {
}
@Incoming("channel2")
void method2(String msg) {
}
}
@Test
void targetedOutgoings() {
Tuple[] expectations = {
tuple("mp.messaging.incoming.channel1.schema", "STRING"),
tuple("mp.messaging.incoming.channel2.schema", "STRING"),
};
doTest(expectations, TargetedOutgoings.class);
}
private static | ObjectMapperSchema |
java | spring-projects__spring-security | itest/context/src/main/java/org/springframework/security/integration/multiannotation/PreAuthorizeServiceImpl.java | {
"start": 731,
"end": 849
} | class ____ implements PreAuthorizeService {
@Override
public void preAuthorizedMethod() {
}
}
| PreAuthorizeServiceImpl |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/PublicDatasetTestUtils.java | {
"start": 1926,
"end": 6798
} | class ____ {
/**
* Private constructor for utility class.
*/
private PublicDatasetTestUtils() {}
/**
* Default path for an object inside a requester pays bucket: {@value}.
*/
private static final String DEFAULT_REQUESTER_PAYS_FILE
= "s3a://usgs-landsat/collection02/catalog.json";
/**
* Default bucket name for the requester pays bucket.
* Value = {@value}.
*/
public static final String DEFAULT_REQUESTER_PAYS_BUCKET_NAME =
"usgs-landsat";
/**
* Default bucket for an S3A file system with many objects: {@value}.
*
* We use a subdirectory to ensure we have permissions on all objects
* contained within as well as permission to inspect the directory itself.
*/
private static final String DEFAULT_BUCKET_WITH_MANY_OBJECTS
= "s3a://usgs-landsat/collection02/level-1/";
/**
* ORC dataset: {@value}.
*/
private static final Path ORC_DATA = new Path("s3a://osm-pds/planet/planet-latest.orc");
/**
* Provide a Path for some ORC data.
*
* @param conf Hadoop configuration
* @return S3A FS URI
*/
public static Path getOrcData(Configuration conf) {
return ORC_DATA;
}
/**
* Default path for the external test file: {@value}.
* This must be: gzipped, large enough for the performance
* tests and in a read-only bucket with anonymous access.
* */
public static final String DEFAULT_EXTERNAL_FILE =
"s3a://noaa-cors-pds/raw/2023/017/ohfh/OHFH017d.23_.gz";
/**
* Get the external test file.
* <p>
* This must be: gzipped, large enough for the performance
* tests and in a read-only bucket with anon
* @param conf configuration
* @return a dataset which meets the requirements.
*/
public static Path getExternalData(Configuration conf) {
return new Path(fetchFromConfig(conf,
S3ATestConstants.KEY_CSVTEST_FILE, DEFAULT_EXTERNAL_FILE));
}
/**
* Get the anonymous dataset..
* @param conf configuration
* @return a dataset which supports anonymous access.
*/
public static Path requireAnonymousDataPath(Configuration conf) {
return requireDefaultExternalData(conf);
}
/**
* Get the external test file; assume() that it is not modified (i.e. we haven't
* switched to a new storage infrastructure where the bucket is no longer
* read only).
* @return test file.
* @param conf test configuration
*/
public static String requireDefaultExternalDataFile(Configuration conf) {
String filename = getExternalData(conf).toUri().toString();
assumeThat(filename)
.as("External test file is not the default")
.isEqualTo(DEFAULT_EXTERNAL_FILE);
return filename;
}
/**
* To determine whether {@value S3ATestConstants#KEY_CSVTEST_FILE} is configured to be
* different from the default external file.
*
* @param conf Configuration object.
* @return True if the default external data file is being used.
*/
public static boolean isUsingDefaultExternalDataFile(final Configuration conf) {
final String filename = getExternalData(conf).toUri().toString();
return DEFAULT_EXTERNAL_FILE.equals(filename);
}
/**
* Get the test external file; assume() that it is not modified (i.e. we haven't
* switched to a new storage infrastructure where the bucket is no longer
* read only).
* @param conf test configuration
* @return test file as a path.
*/
public static Path requireDefaultExternalData(Configuration conf) {
return new Path(requireDefaultExternalDataFile(conf));
}
/**
* Provide a URI for a directory containing many objects.
*
* Unless otherwise configured,
* this will be {@value DEFAULT_BUCKET_WITH_MANY_OBJECTS}.
*
* @param conf Hadoop configuration
* @return S3A FS URI
*/
public static String getBucketPrefixWithManyObjects(Configuration conf) {
return fetchFromConfig(conf,
KEY_BUCKET_WITH_MANY_OBJECTS, DEFAULT_BUCKET_WITH_MANY_OBJECTS);
}
/**
* Provide a URI to an object within a requester pays enabled bucket.
*
* Unless otherwise configured,
* this will be {@value DEFAULT_REQUESTER_PAYS_FILE}.
*
* @param conf Hadoop configuration
* @return S3A FS URI
*/
public static String getRequesterPaysObject(Configuration conf) {
return fetchFromConfig(conf,
KEY_REQUESTER_PAYS_FILE, DEFAULT_REQUESTER_PAYS_FILE);
}
/**
* Fetch a trimmed configuration value, require it to to be non-empty.
* @param conf configuration file
* @param key key
* @param defaultValue default value.
* @return the resolved value.
*/
private static String fetchFromConfig(Configuration conf, String key, String defaultValue) {
String value = conf.getTrimmed(key, defaultValue);
S3ATestUtils.assume("Empty test property: " + key, !value.isEmpty());
return value;
}
}
| PublicDatasetTestUtils |
java | elastic__elasticsearch | x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/stats/ProgressTrackerTests.java | {
"start": 718,
"end": 10550
} | class ____ extends ESTestCase {
public void testCtor() {
List<PhaseProgress> phases = Collections.unmodifiableList(
Arrays.asList(
new PhaseProgress("reindexing", 10),
new PhaseProgress("loading_data", 20),
new PhaseProgress("a", 30),
new PhaseProgress("b", 40),
new PhaseProgress("writing_results", 50)
)
);
ProgressTracker progressTracker = new ProgressTracker(phases);
assertThat(progressTracker.report(), equalTo(phases));
}
public void testFromZeroes() {
ProgressTracker progressTracker = ProgressTracker.fromZeroes(Arrays.asList("a", "b", "c"), false);
List<PhaseProgress> phases = progressTracker.report();
assertThat(phases.size(), equalTo(6));
assertThat(
phases.stream().map(PhaseProgress::getPhase).collect(Collectors.toList()),
contains("reindexing", "loading_data", "a", "b", "c", "writing_results")
);
assertThat(phases.stream().map(PhaseProgress::getProgressPercent).allMatch(p -> p == 0), is(true));
}
public void testFromZeroes_GivenAnalysisWithoutInference() {
ProgressTracker progressTracker = ProgressTracker.fromZeroes(Arrays.asList("a", "b"), false);
List<PhaseProgress> phaseProgresses = progressTracker.report();
assertThat(phaseProgresses.size(), equalTo(5));
assertThat(
phaseProgresses.stream().map(PhaseProgress::getPhase).collect(Collectors.toList()),
contains("reindexing", "loading_data", "a", "b", "writing_results")
);
}
public void testFromZeroes_GivenAnalysisWithInference() {
ProgressTracker progressTracker = ProgressTracker.fromZeroes(Arrays.asList("a", "b"), true);
List<PhaseProgress> phaseProgresses = progressTracker.report();
assertThat(phaseProgresses.size(), equalTo(6));
assertThat(
phaseProgresses.stream().map(PhaseProgress::getPhase).collect(Collectors.toList()),
contains("reindexing", "loading_data", "a", "b", "writing_results", "inference")
);
}
public void testUpdates() {
ProgressTracker progressTracker = ProgressTracker.fromZeroes(Collections.singletonList("foo"), false);
progressTracker.updateReindexingProgress(1);
progressTracker.updateLoadingDataProgress(2);
progressTracker.updatePhase(new PhaseProgress("foo", 3));
progressTracker.updateWritingResultsProgress(4);
assertThat(progressTracker.getReindexingProgressPercent(), equalTo(1));
assertThat(progressTracker.getWritingResultsProgressPercent(), equalTo(4));
List<PhaseProgress> phases = progressTracker.report();
assertThat(phases.size(), equalTo(4));
assertThat(
phases.stream().map(PhaseProgress::getPhase).collect(Collectors.toList()),
contains("reindexing", "loading_data", "foo", "writing_results")
);
assertThat(phases.get(0).getProgressPercent(), equalTo(1));
assertThat(phases.get(1).getProgressPercent(), equalTo(2));
assertThat(phases.get(2).getProgressPercent(), equalTo(3));
assertThat(phases.get(3).getProgressPercent(), equalTo(4));
}
public void testUpdatePhase_GivenUnknownPhase() {
ProgressTracker progressTracker = ProgressTracker.fromZeroes(Collections.singletonList("foo"), false);
progressTracker.updatePhase(new PhaseProgress("unknown", 42));
List<PhaseProgress> phases = progressTracker.report();
assertThat(phases.size(), equalTo(4));
assertThat(
phases.stream().map(PhaseProgress::getPhase).collect(Collectors.toList()),
contains("reindexing", "loading_data", "foo", "writing_results")
);
}
public void testUpdateReindexingProgress_GivenLowerValueThanCurrentProgress() {
ProgressTracker progressTracker = ProgressTracker.fromZeroes(Collections.singletonList("foo"), false);
progressTracker.updateReindexingProgress(10);
progressTracker.updateReindexingProgress(11);
assertThat(progressTracker.getReindexingProgressPercent(), equalTo(11));
progressTracker.updateReindexingProgress(10);
assertThat(progressTracker.getReindexingProgressPercent(), equalTo(11));
}
public void testUpdateLoadingDataProgress_GivenLowerValueThanCurrentProgress() {
ProgressTracker progressTracker = ProgressTracker.fromZeroes(Collections.singletonList("foo"), false);
progressTracker.updateLoadingDataProgress(20);
progressTracker.updateLoadingDataProgress(21);
assertThat(progressTracker.getLoadingDataProgressPercent(), equalTo(21));
progressTracker.updateLoadingDataProgress(20);
assertThat(progressTracker.getLoadingDataProgressPercent(), equalTo(21));
}
public void testUpdateWritingResultsProgress_GivenLowerValueThanCurrentProgress() {
ProgressTracker progressTracker = ProgressTracker.fromZeroes(Collections.singletonList("foo"), false);
progressTracker.updateWritingResultsProgress(30);
progressTracker.updateWritingResultsProgress(31);
assertThat(progressTracker.getWritingResultsProgressPercent(), equalTo(31));
progressTracker.updateWritingResultsProgress(30);
assertThat(progressTracker.getWritingResultsProgressPercent(), equalTo(31));
}
public void testUpdatePhase_GivenLowerValueThanCurrentProgress() {
ProgressTracker progressTracker = ProgressTracker.fromZeroes(Collections.singletonList("foo"), false);
progressTracker.updatePhase(new PhaseProgress("foo", 40));
progressTracker.updatePhase(new PhaseProgress("foo", 41));
assertThat(getProgressForPhase(progressTracker, "foo"), equalTo(41));
progressTracker.updatePhase(new PhaseProgress("foo", 40));
assertThat(getProgressForPhase(progressTracker, "foo"), equalTo(41));
}
public void testResetForInference_GivenInference() {
ProgressTracker progressTracker = ProgressTracker.fromZeroes(Arrays.asList("a", "b"), true);
progressTracker.updateReindexingProgress(10);
progressTracker.updateLoadingDataProgress(20);
progressTracker.updatePhase(new PhaseProgress("a", 30));
progressTracker.updatePhase(new PhaseProgress("b", 40));
progressTracker.updateWritingResultsProgress(50);
progressTracker.updateInferenceProgress(60);
progressTracker.resetForInference();
List<PhaseProgress> progress = progressTracker.report();
assertThat(
progress,
contains(
new PhaseProgress(ProgressTracker.REINDEXING, 100),
new PhaseProgress(ProgressTracker.LOADING_DATA, 100),
new PhaseProgress("a", 100),
new PhaseProgress("b", 100),
new PhaseProgress(ProgressTracker.WRITING_RESULTS, 100),
new PhaseProgress(ProgressTracker.INFERENCE, 0)
)
);
}
public void testResetForInference_GivenNoInference() {
ProgressTracker progressTracker = ProgressTracker.fromZeroes(Arrays.asList("a", "b"), false);
progressTracker.updateReindexingProgress(10);
progressTracker.updateLoadingDataProgress(20);
progressTracker.updatePhase(new PhaseProgress("a", 30));
progressTracker.updatePhase(new PhaseProgress("b", 40));
progressTracker.updateWritingResultsProgress(50);
progressTracker.resetForInference();
List<PhaseProgress> progress = progressTracker.report();
assertThat(
progress,
contains(
new PhaseProgress(ProgressTracker.REINDEXING, 100),
new PhaseProgress(ProgressTracker.LOADING_DATA, 100),
new PhaseProgress("a", 100),
new PhaseProgress("b", 100),
new PhaseProgress(ProgressTracker.WRITING_RESULTS, 100)
)
);
}
public void testAreAllPhasesExceptInferenceComplete_GivenComplete() {
ProgressTracker progressTracker = ProgressTracker.fromZeroes(Collections.singletonList("a"), true);
progressTracker.updateReindexingProgress(100);
progressTracker.updateLoadingDataProgress(100);
progressTracker.updatePhase(new PhaseProgress("a", 100));
progressTracker.updateWritingResultsProgress(100);
progressTracker.updateInferenceProgress(50);
assertThat(progressTracker.areAllPhasesExceptInferenceComplete(), is(true));
}
public void testAreAllPhasesExceptInferenceComplete_GivenNotComplete() {
Map<String, Integer> phasePerProgress = new LinkedHashMap<>();
phasePerProgress.put(ProgressTracker.REINDEXING, 100);
phasePerProgress.put(ProgressTracker.LOADING_DATA, 100);
phasePerProgress.put("a", 100);
phasePerProgress.put(ProgressTracker.WRITING_RESULTS, 100);
String nonCompletePhase = randomFrom(phasePerProgress.keySet());
phasePerProgress.put(ProgressTracker.INFERENCE, 50);
phasePerProgress.put(nonCompletePhase, randomIntBetween(0, 99));
ProgressTracker progressTracker = new ProgressTracker(
phasePerProgress.entrySet()
.stream()
.map(entry -> new PhaseProgress(entry.getKey(), entry.getValue()))
.collect(Collectors.toList())
);
assertThat(progressTracker.areAllPhasesExceptInferenceComplete(), is(false));
}
private static int getProgressForPhase(ProgressTracker progressTracker, String phase) {
return progressTracker.report().stream().filter(p -> p.getPhase().equals(phase)).findFirst().get().getProgressPercent();
}
}
| ProgressTrackerTests |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/load/model/MultiModelLoaderFactory.java | {
"start": 746,
"end": 7153
} | class ____ {
private static final Factory DEFAULT_FACTORY = new Factory();
private static final ModelLoader<Object, Object> EMPTY_MODEL_LOADER = new EmptyModelLoader();
private final List<Entry<?, ?>> entries = new ArrayList<>();
private final Factory factory;
private final Set<Entry<?, ?>> alreadyUsedEntries = new HashSet<>();
private final Pool<List<Throwable>> throwableListPool;
public MultiModelLoaderFactory(@NonNull Pool<List<Throwable>> throwableListPool) {
this(throwableListPool, DEFAULT_FACTORY);
}
@VisibleForTesting
MultiModelLoaderFactory(
@NonNull Pool<List<Throwable>> throwableListPool, @NonNull Factory factory) {
this.throwableListPool = throwableListPool;
this.factory = factory;
}
synchronized <Model, Data> void append(
@NonNull Class<Model> modelClass,
@NonNull Class<Data> dataClass,
@NonNull ModelLoaderFactory<? extends Model, ? extends Data> factory) {
add(modelClass, dataClass, factory, /* append= */ true);
}
synchronized <Model, Data> void prepend(
@NonNull Class<Model> modelClass,
@NonNull Class<Data> dataClass,
@NonNull ModelLoaderFactory<? extends Model, ? extends Data> factory) {
add(modelClass, dataClass, factory, /* append= */ false);
}
private <Model, Data> void add(
@NonNull Class<Model> modelClass,
@NonNull Class<Data> dataClass,
@NonNull ModelLoaderFactory<? extends Model, ? extends Data> factory,
boolean append) {
Entry<Model, Data> entry = new Entry<>(modelClass, dataClass, factory);
entries.add(append ? entries.size() : 0, entry);
}
@NonNull
synchronized <Model, Data> List<ModelLoaderFactory<? extends Model, ? extends Data>> replace(
@NonNull Class<Model> modelClass,
@NonNull Class<Data> dataClass,
@NonNull ModelLoaderFactory<? extends Model, ? extends Data> factory) {
List<ModelLoaderFactory<? extends Model, ? extends Data>> removed =
remove(modelClass, dataClass);
append(modelClass, dataClass, factory);
return removed;
}
@NonNull
synchronized <Model, Data> List<ModelLoaderFactory<? extends Model, ? extends Data>> remove(
@NonNull Class<Model> modelClass, @NonNull Class<Data> dataClass) {
List<ModelLoaderFactory<? extends Model, ? extends Data>> factories = new ArrayList<>();
for (Iterator<Entry<?, ?>> iterator = entries.iterator(); iterator.hasNext(); ) {
Entry<?, ?> entry = iterator.next();
if (entry.handles(modelClass, dataClass)) {
iterator.remove();
factories.add(this.<Model, Data>getFactory(entry));
}
}
return factories;
}
@NonNull
synchronized <Model> List<ModelLoader<Model, ?>> build(@NonNull Class<Model> modelClass) {
try {
List<ModelLoader<Model, ?>> loaders = new ArrayList<>();
for (Entry<?, ?> entry : entries) {
// Avoid stack overflow recursively creating model loaders by only creating loaders in
// recursive requests if they haven't been created earlier in the chain. For example:
// A Uri loader may translate to another model, which in turn may translate back to a Uri.
// The original Uri loader won't be provided to the intermediate model loader, although
// other Uri loaders will be.
if (alreadyUsedEntries.contains(entry)) {
continue;
}
if (entry.handles(modelClass)) {
alreadyUsedEntries.add(entry);
loaders.add(this.<Model, Object>build(entry));
alreadyUsedEntries.remove(entry);
}
}
return loaders;
} catch (Throwable t) {
alreadyUsedEntries.clear();
throw t;
}
}
@NonNull
synchronized List<Class<?>> getDataClasses(@NonNull Class<?> modelClass) {
List<Class<?>> result = new ArrayList<>();
for (Entry<?, ?> entry : entries) {
if (!result.contains(entry.dataClass) && entry.handles(modelClass)) {
result.add(entry.dataClass);
}
}
return result;
}
@NonNull
public synchronized <Model, Data> ModelLoader<Model, Data> build(
@NonNull Class<Model> modelClass, @NonNull Class<Data> dataClass) {
try {
List<ModelLoader<Model, Data>> loaders = new ArrayList<>();
boolean ignoredAnyEntries = false;
for (Entry<?, ?> entry : entries) {
// Avoid stack overflow recursively creating model loaders by only creating loaders in
// recursive requests if they haven't been created earlier in the chain. For example:
// A Uri loader may translate to another model, which in turn may translate back to a Uri.
// The original Uri loader won't be provided to the intermediate model loader, although
// other Uri loaders will be.
if (alreadyUsedEntries.contains(entry)) {
ignoredAnyEntries = true;
continue;
}
if (entry.handles(modelClass, dataClass)) {
alreadyUsedEntries.add(entry);
loaders.add(this.<Model, Data>build(entry));
alreadyUsedEntries.remove(entry);
}
}
if (loaders.size() > 1) {
return factory.build(loaders, throwableListPool);
} else if (loaders.size() == 1) {
return loaders.get(0);
} else {
// Avoid crashing if recursion results in no loaders available. The assertion is supposed to
// catch completely unhandled types, recursion may mean a subtype isn't handled somewhere
// down the stack, which is often ok.
if (ignoredAnyEntries) {
return emptyModelLoader();
} else {
throw new NoModelLoaderAvailableException(modelClass, dataClass);
}
}
} catch (Throwable t) {
alreadyUsedEntries.clear();
throw t;
}
}
@NonNull
@SuppressWarnings("unchecked")
private <Model, Data> ModelLoaderFactory<Model, Data> getFactory(@NonNull Entry<?, ?> entry) {
return (ModelLoaderFactory<Model, Data>) entry.factory;
}
@NonNull
@SuppressWarnings("unchecked")
private <Model, Data> ModelLoader<Model, Data> build(@NonNull Entry<?, ?> entry) {
return (ModelLoader<Model, Data>) Preconditions.checkNotNull(entry.factory.build(this));
}
@NonNull
@SuppressWarnings("unchecked")
private static <Model, Data> ModelLoader<Model, Data> emptyModelLoader() {
return (ModelLoader<Model, Data>) EMPTY_MODEL_LOADER;
}
private static | MultiModelLoaderFactory |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/web/client/ResponseCreator.java | {
"start": 1146,
"end": 1356
} | interface ____ {
/**
* Create a response for the given request.
* @param request the request
*/
ClientHttpResponse createResponse(@Nullable ClientHttpRequest request) throws IOException;
}
| ResponseCreator |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java | {
"start": 5842,
"end": 24383
} | class ____ implements Closeable, MetricsSource,
CountersAndGauges, IOStatisticsSource {
private static final Logger LOG = LoggerFactory.getLogger(
S3AInstrumentation.class);
private static final String METRICS_SOURCE_BASENAME = "S3AMetrics";
/**
* {@value} The name of the s3a-specific metrics
* system instance used for s3a metrics.
*/
public static final String METRICS_SYSTEM_NAME = "s3a-file-system";
/**
* {@value} Currently all s3a metrics are placed in a single
* "context". Distinct contexts may be used in the future.
*/
public static final String CONTEXT = "s3aFileSystem";
/**
* {@value} The name of a field added to metrics
* records that uniquely identifies a specific FileSystem instance.
*/
public static final String METRIC_TAG_FILESYSTEM_ID = "s3aFileSystemId";
/**
* {@value} The name of a field added to metrics records
* that indicates the hostname portion of the FS URL.
*/
public static final String METRIC_TAG_BUCKET = "bucket";
// metricsSystemLock must be used to synchronize modifications to
// metricsSystem and the following counters.
private static final Object METRICS_SYSTEM_LOCK = new Object();
private static MetricsSystem metricsSystem = null;
private static int metricsSourceNameCounter = 0;
private static int metricsSourceActiveCounter = 0;
private final DurationTrackerFactory durationTrackerFactory;
/**
* Weak reference so there's no back reference to the instrumentation.
*/
private WeakRefMetricsSource metricsSourceReference;
private final MetricsRegistry registry =
new MetricsRegistry("s3aFileSystem").setContext(CONTEXT);
private final MutableQuantiles throttleRateQuantile;
/**
* This is the IOStatistics store for the S3AFileSystem
* instance.
* It is not kept in sync with the rest of the S3A instrumentation.
* Most inner statistics implementation classes only update this
* store when it is pushed back, such as as in close().
*/
private final IOStatisticsStore instanceIOStatistics;
/**
* Construct the instrumentation for a filesystem.
* @param name URI of filesystem.
*/
public S3AInstrumentation(URI name) {
UUID fileSystemInstanceId = UUID.randomUUID();
registry.tag(METRIC_TAG_FILESYSTEM_ID,
"A unique identifier for the instance",
fileSystemInstanceId.toString());
registry.tag(METRIC_TAG_BUCKET, "Hostname from the FS URL", name.getHost());
// now set up the instance IOStatistics.
// create the builder
IOStatisticsStoreBuilder storeBuilder = iostatisticsStore();
// declare all counter statistics
EnumSet.allOf(Statistic.class).stream()
.filter(statistic ->
statistic.getType() == StatisticTypeEnum.TYPE_COUNTER)
.forEach(stat -> {
counter(stat);
storeBuilder.withCounters(stat.getSymbol());
});
// declare all gauge statistics
EnumSet.allOf(Statistic.class).stream()
.filter(statistic ->
statistic.getType() == StatisticTypeEnum.TYPE_GAUGE)
.forEach(stat -> {
gauge(stat);
storeBuilder.withGauges(stat.getSymbol());
});
// and durations
EnumSet.allOf(Statistic.class).stream()
.filter(statistic ->
statistic.getType() == StatisticTypeEnum.TYPE_DURATION)
.forEach(stat -> {
duration(stat);
storeBuilder.withDurationTracking(stat.getSymbol());
});
//todo need a config for the quantiles interval?
int interval = 1;
throttleRateQuantile = quantiles(STORE_IO_THROTTLE_RATE,
"events", "frequency (Hz)", interval);
// register with Hadoop metrics
registerAsMetricsSource(name);
// and build the IO Statistics
instanceIOStatistics = storeBuilder.build();
// duration track metrics (Success/failure) and IOStatistics.
durationTrackerFactory = IOStatisticsBinding.pairedTrackerFactory(
instanceIOStatistics,
new MetricDurationTrackerFactory());
}
/**
* Get the current metrics system; demand creating.
* @return a metric system, creating if need be.
*/
@VisibleForTesting
static MetricsSystem getMetricsSystem() {
synchronized (METRICS_SYSTEM_LOCK) {
if (metricsSystem == null) {
metricsSystem = new MetricsSystemImpl();
metricsSystem.init(METRICS_SYSTEM_NAME);
LOG.debug("Metrics system inited {}", metricsSystem);
}
}
return metricsSystem;
}
/**
* Does the instrumentation have a metrics system?
* @return true if the metrics system is present.
*/
@VisibleForTesting
static boolean hasMetricSystem() {
return metricsSystem != null;
}
/**
* Register this instance as a metrics source via a weak reference.
* @param name s3a:// URI for the associated FileSystem instance
*/
private void registerAsMetricsSource(URI name) {
int number;
synchronized(METRICS_SYSTEM_LOCK) {
getMetricsSystem();
metricsSourceActiveCounter++;
number = ++metricsSourceNameCounter;
}
String msName = METRICS_SOURCE_BASENAME + number;
String metricsSourceName = msName + "-" + name.getHost();
metricsSourceReference = new WeakRefMetricsSource(metricsSourceName, this);
metricsSystem.register(metricsSourceName, "", metricsSourceReference);
}
/**
* Create a counter in the registry.
* @param name counter name
* @param desc counter description
* @return a new counter
*/
protected final MutableCounterLong counter(String name, String desc) {
return registry.newCounter(name, desc, 0L);
}
/**
* Create a counter in the registry.
* @param op statistic to count
* @return a new counter
*/
protected final MutableCounterLong counter(Statistic op) {
return counter(op.getSymbol(), op.getDescription());
}
/**
* Registering a duration adds the success and failure counters.
* @param op statistic to track
*/
protected final void duration(Statistic op) {
counter(op.getSymbol(), op.getDescription());
counter(op.getSymbol() + SUFFIX_FAILURES, op.getDescription());
}
/**
* Create a gauge in the registry.
* @param name name gauge name
* @param desc description
* @return the gauge
*/
protected final MutableGaugeLong gauge(String name, String desc) {
return registry.newGauge(name, desc, 0L);
}
/**
* Create a gauge in the registry.
* @param op statistic to count
* @return the gauge
*/
protected final MutableGaugeLong gauge(Statistic op) {
return gauge(op.getSymbol(), op.getDescription());
}
/**
* Create a quantiles in the registry.
* @param op statistic to collect
* @param sampleName sample name of the quantiles
* @param valueName value name of the quantiles
* @param interval interval of the quantiles in seconds
* @return the created quantiles metric
*/
protected final MutableQuantiles quantiles(Statistic op,
String sampleName,
String valueName,
int interval) {
return registry.newQuantiles(op.getSymbol(), op.getDescription(),
sampleName, valueName, interval);
}
/**
* Get the metrics registry.
* @return the registry
*/
public MetricsRegistry getRegistry() {
return registry;
}
/**
* Dump all the metrics to a string.
* @param prefix prefix before every entry
* @param separator separator between name and value
* @param suffix suffix
* @param all get all the metrics even if the values are not changed.
* @return a string dump of the metrics
*/
public String dump(String prefix,
String separator,
String suffix,
boolean all) {
MetricStringBuilder metricBuilder = new MetricStringBuilder(null,
prefix,
separator, suffix);
registry.snapshot(metricBuilder, all);
return metricBuilder.toString();
}
/**
* Get the value of a counter.
* @param statistic the operation
* @return its value, or 0 if not found.
*/
public long getCounterValue(Statistic statistic) {
return getCounterValue(statistic.getSymbol());
}
/**
* Get the value of a counter.
* If the counter is null, return 0.
* @param name the name of the counter
* @return its value.
*/
public long getCounterValue(String name) {
MutableCounterLong counter = lookupCounter(name);
return counter == null ? 0 : counter.value();
}
/**
* Lookup a counter by name. Return null if it is not known.
* @param name counter name
* @return the counter
* @throws IllegalStateException if the metric is not a counter
*/
private MutableCounterLong lookupCounter(String name) {
MutableMetric metric = lookupMetric(name);
if (metric == null) {
return null;
}
if (!(metric instanceof MutableCounterLong)) {
throw new IllegalStateException("Metric " + name
+ " is not a MutableCounterLong: " + metric
+ " (type: " + metric.getClass() +")");
}
return (MutableCounterLong) metric;
}
/**
* Look up a gauge.
* @param name gauge name
* @return the gauge or null
* @throws ClassCastException if the metric is not a Gauge.
*/
public MutableGaugeLong lookupGauge(String name) {
MutableMetric metric = lookupMetric(name);
if (metric == null) {
LOG.debug("No gauge {}", name);
}
return (MutableGaugeLong) metric;
}
/**
* Look up a quantiles.
* @param name quantiles name
* @return the quantiles or null
* @throws ClassCastException if the metric is not a Quantiles.
*/
public MutableQuantiles lookupQuantiles(String name) {
MutableMetric metric = lookupMetric(name);
if (metric == null) {
LOG.debug("No quantiles {}", name);
}
return (MutableQuantiles) metric;
}
/**
* Look up a metric from both the registered set and the lighter weight
* stream entries.
* @param name metric name
* @return the metric or null
*/
public MutableMetric lookupMetric(String name) {
MutableMetric metric = getRegistry().get(name);
return metric;
}
/**
* Get the instance IO Statistics.
* @return statistics.
*/
@Override
public IOStatisticsStore getIOStatistics() {
return instanceIOStatistics;
}
/**
* Get the duration tracker factory.
* @return duration tracking for the instrumentation.
*/
public DurationTrackerFactory getDurationTrackerFactory() {
return durationTrackerFactory;
}
/**
* The duration tracker updates the metrics with the count
* and IOStatistics will full duration information.
* @param key statistic key prefix
* @param count #of times to increment the matching counter in this
* operation.
* @return a duration tracker.
*/
@Override
public DurationTracker trackDuration(final String key, final long count) {
return durationTrackerFactory.trackDuration(key, count);
}
/**
* Create an IOStatistics store which updates FS metrics
* as well as IOStatistics.
* @return instance of the store.
*/
public IOStatisticsStore createMetricsUpdatingStore() {
return new MetricsUpdatingIOStatisticsStore();
}
/**
* String representation. Includes the IOStatistics
* when logging is at DEBUG.
* @return a string form.
*/
@Override
public String toString() {
final StringBuilder sb = new StringBuilder(
"S3AInstrumentation{");
if (LOG.isDebugEnabled()) {
sb.append("instanceIOStatistics=").append(instanceIOStatistics);
}
sb.append('}');
return sb.toString();
}
/**
* Indicate that S3A created a file.
*/
public void fileCreated() {
incrementCounter(FILES_CREATED, 1);
}
/**
* Indicate that S3A deleted one or more files.
* @param count number of files.
*/
public void fileDeleted(int count) {
incrementCounter(FILES_DELETED, count);
}
/**
* Indicate that fake directory request was made.
* @param count number of directory entries included in the delete request.
*/
public void fakeDirsDeleted(int count) {
incrementCounter(FAKE_DIRECTORIES_DELETED, count);
}
/**
* Indicate that S3A created a directory.
*/
public void directoryCreated() {
incrementCounter(DIRECTORIES_CREATED, 1);
}
/**
* Indicate that S3A just deleted a directory.
*/
public void directoryDeleted() {
incrementCounter(DIRECTORIES_DELETED, 1);
}
/**
* Indicate that S3A copied some files within the store.
*
* @param files number of files
* @param size total size in bytes
*/
public void filesCopied(int files, long size) {
incrementCounter(FILES_COPIED, files);
incrementCounter(FILES_COPIED_BYTES, size);
}
/**
* Note that an error was ignored.
*/
public void errorIgnored() {
incrementCounter(IGNORED_ERRORS, 1);
}
/**
* Increments a mutable counter and the matching
* instance IOStatistics counter.
* No-op if the counter is not defined, or the count == 0.
* @param op operation
* @param count increment value
*/
public void incrementCounter(Statistic op, long count) {
incrementNamedCounter(op.getSymbol(), count);
}
/**
* Increments a mutable counter and the matching
* instance IOStatistics counter.
* No-op if the counter is not defined, or the count == 0.
* @param name counter name
* @param count increment value
* @return the updated value or, if the counter is unknown: 0
*/
private long incrementNamedCounter(final String name,
final long count) {
if (count != 0) {
incrementMutableCounter(name, count);
return instanceIOStatistics.incrementCounter(name, count);
} else {
return 0;
}
}
/**
* Increments a Mutable counter.
* No-op if not a positive integer.
* @param name counter name.
* @param count increment value
*/
private void incrementMutableCounter(final String name, final long count) {
if (count > 0) {
MutableCounterLong counter = lookupCounter(name);
if (counter != null) {
counter.incr(count);
}
}
}
/**
* Add a value to a quantiles statistic. No-op if the quantile
* isn't found.
* @param op operation to look up.
* @param value value to add.
* @throws ClassCastException if the metric is not a Quantiles.
*/
public void addValueToQuantiles(Statistic op, long value) {
MutableQuantiles quantiles = lookupQuantiles(op.getSymbol());
if (quantiles != null) {
quantiles.add(value);
}
}
/**
* Increments a mutable counter and the matching
* instance IOStatistics counter with the value of
* the atomic long.
* No-op if the counter is not defined, or the count == 0.
* @param op operation
* @param count atomic long containing value
*/
public void incrementCounter(Statistic op, AtomicLong count) {
incrementCounter(op, count.get());
}
/**
* Increment a specific gauge.
* No-op if not defined.
* @param op operation
* @param count increment value
* @throws ClassCastException if the metric is of the wrong type
*/
public void incrementGauge(Statistic op, long count) {
MutableGaugeLong gauge = lookupGauge(op.getSymbol());
if (gauge != null) {
gauge.incr(count);
} else {
LOG.debug("No Gauge: "+ op);
}
}
/**
* Decrement a specific gauge.
* No-op if not defined.
* @param op operation
* @param count increment value
* @throws ClassCastException if the metric is of the wrong type
*/
public void decrementGauge(Statistic op, long count) {
MutableGaugeLong gauge = lookupGauge(op.getSymbol());
if (gauge != null) {
gauge.decr(count);
} else {
LOG.debug("No Gauge: {}", op);
}
}
/**
* Add the duration as a timed statistic, deriving
* statistic name from the operation symbol and the outcome.
* @param op operation
* @param success was the operation a success?
* @param duration how long did it take
*/
@Override
public void recordDuration(final Statistic op,
final boolean success,
final Duration duration) {
String name = op.getSymbol()
+ (success ? "" : SUFFIX_FAILURES);
instanceIOStatistics.addTimedOperation(name, duration);
}
/**
* Create a stream input statistics instance.
* @return the new instance
* @param filesystemStatistics FS Statistics to update in close().
*/
public S3AInputStreamStatistics newInputStreamStatistics(
@Nullable final FileSystem.Statistics filesystemStatistics) {
return new InputStreamStatistics(filesystemStatistics);
}
/**
* Create a new instance of the committer statistics.
* @return a new committer statistics instance
*/
public CommitterStatistics newCommitterStatistics() {
return new CommitterStatisticsImpl();
}
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
registry.snapshot(collector.addRecord(registry.info().name()), true);
}
/**
* if registered with the metrics, return the
* name of the source.
* @return the name of the metrics, or null if this instance is not bonded.
*/
public String getMetricSourceName() {
return metricsSourceReference != null
? metricsSourceReference.getName()
: null;
}
public void close() {
if (metricsSourceReference != null) {
// get the name
String name = metricsSourceReference.getName();
LOG.debug("Unregistering metrics for {}", name);
// then set to null so a second close() is a noop here.
metricsSourceReference = null;
synchronized (METRICS_SYSTEM_LOCK) {
// it is critical to close each quantile, as they start a scheduled
// task in a shared thread pool.
if (metricsSystem == null) {
LOG.debug("there is no metric system to unregister {} from", name);
return;
}
throttleRateQuantile.stop();
metricsSystem.unregisterSource(name);
metricsSourceActiveCounter--;
int activeSources = metricsSourceActiveCounter;
if (activeSources == 0) {
LOG.debug("Shutting down metrics publisher");
metricsSystem.publishMetricsNow();
metricsSystem.shutdown();
metricsSystem = null;
}
}
}
}
/**
* A duration tracker which updates a mutable counter with a metric.
* The metric is updated with the count on start; after a failure
* the failures count is incremented by one.
*/
private final | S3AInstrumentation |
java | greenrobot__EventBus | eventbus-android/src/main/java/org/greenrobot/eventbus/HandlerPoster.java | {
"start": 799,
"end": 2973
} | class ____ extends Handler implements Poster {
private final PendingPostQueue queue;
private final int maxMillisInsideHandleMessage;
private final EventBus eventBus;
private boolean handlerActive;
public HandlerPoster(EventBus eventBus, Looper looper, int maxMillisInsideHandleMessage) {
super(looper);
this.eventBus = eventBus;
this.maxMillisInsideHandleMessage = maxMillisInsideHandleMessage;
queue = new PendingPostQueue();
}
public void enqueue(Subscription subscription, Object event) {
PendingPost pendingPost = PendingPost.obtainPendingPost(subscription, event);
synchronized (this) {
queue.enqueue(pendingPost);
if (!handlerActive) {
handlerActive = true;
if (!sendMessage(obtainMessage())) {
throw new EventBusException("Could not send handler message");
}
}
}
}
@Override
public void handleMessage(Message msg) {
boolean rescheduled = false;
try {
long started = SystemClock.uptimeMillis();
while (true) {
PendingPost pendingPost = queue.poll();
if (pendingPost == null) {
synchronized (this) {
// Check again, this time in synchronized
pendingPost = queue.poll();
if (pendingPost == null) {
handlerActive = false;
return;
}
}
}
eventBus.invokeSubscriber(pendingPost);
long timeInMethod = SystemClock.uptimeMillis() - started;
if (timeInMethod >= maxMillisInsideHandleMessage) {
if (!sendMessage(obtainMessage())) {
throw new EventBusException("Could not send handler message");
}
rescheduled = true;
return;
}
}
} finally {
handlerActive = rescheduled;
}
}
} | HandlerPoster |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/appender/rolling/RollingFileManager.java | {
"start": 2427,
"end": 25369
} | class ____ extends FileManager {
private static final int MAX_TRIES = 3;
private static final int MIN_DURATION = 100;
private static final FileTime EPOCH = FileTime.fromMillis(0);
protected long size;
private long initialTime;
private volatile PatternProcessor patternProcessor;
private final Semaphore semaphore = new Semaphore(1);
private final Log4jThreadFactory threadFactory = Log4jThreadFactory.createThreadFactory("RollingFileManager");
private volatile TriggeringPolicy triggeringPolicy;
private volatile RolloverStrategy rolloverStrategy;
private volatile boolean renameEmptyFiles;
private volatile boolean initialized;
private volatile String fileName;
private final boolean directWrite;
private final CopyOnWriteArrayList<RolloverListener> rolloverListeners = new CopyOnWriteArrayList<>();
/* This executor pool will create a new Thread for every work async action to be performed. Using it allows
us to make sure all the Threads are completed when the Manager is stopped. */
private final ExecutorService asyncExecutor =
new ThreadPoolExecutor(0, Integer.MAX_VALUE, 0, TimeUnit.MILLISECONDS, new EmptyQueue(), threadFactory);
private static final AtomicReferenceFieldUpdater<RollingFileManager, TriggeringPolicy> triggeringPolicyUpdater =
AtomicReferenceFieldUpdater.newUpdater(
RollingFileManager.class, TriggeringPolicy.class, "triggeringPolicy");
private static final AtomicReferenceFieldUpdater<RollingFileManager, RolloverStrategy> rolloverStrategyUpdater =
AtomicReferenceFieldUpdater.newUpdater(
RollingFileManager.class, RolloverStrategy.class, "rolloverStrategy");
private static final AtomicReferenceFieldUpdater<RollingFileManager, PatternProcessor> patternProcessorUpdater =
AtomicReferenceFieldUpdater.newUpdater(
RollingFileManager.class, PatternProcessor.class, "patternProcessor");
@Deprecated
protected RollingFileManager(
final String fileName,
final String pattern,
final OutputStream os,
final boolean append,
final long size,
final long initialTime,
final TriggeringPolicy triggeringPolicy,
final RolloverStrategy rolloverStrategy,
final String advertiseURI,
final Layout<? extends Serializable> layout,
final int bufferSize,
final boolean writeHeader) {
this(
fileName,
pattern,
os,
append,
size,
initialTime,
triggeringPolicy,
rolloverStrategy,
advertiseURI,
layout,
writeHeader,
ByteBuffer.wrap(new byte[Constants.ENCODER_BYTE_BUFFER_SIZE]));
}
@Deprecated
protected RollingFileManager(
final String fileName,
final String pattern,
final OutputStream os,
final boolean append,
final long size,
final long initialTime,
final TriggeringPolicy triggeringPolicy,
final RolloverStrategy rolloverStrategy,
final String advertiseURI,
final Layout<? extends Serializable> layout,
final boolean writeHeader,
final ByteBuffer buffer) {
super(fileName != null ? fileName : pattern, os, append, false, advertiseURI, layout, writeHeader, buffer);
this.size = size;
this.initialTime = initialTime;
this.triggeringPolicy = triggeringPolicy;
this.rolloverStrategy = rolloverStrategy;
this.patternProcessor = new PatternProcessor(pattern);
this.patternProcessor.setPrevFileTime(initialTime);
this.fileName = fileName;
this.directWrite = rolloverStrategy instanceof DirectWriteRolloverStrategy;
}
@Deprecated
protected RollingFileManager(
final LoggerContext loggerContext,
final String fileName,
final String pattern,
final OutputStream os,
final boolean append,
final boolean createOnDemand,
final long size,
final long initialTime,
final TriggeringPolicy triggeringPolicy,
final RolloverStrategy rolloverStrategy,
final String advertiseURI,
final Layout<? extends Serializable> layout,
final boolean writeHeader,
final ByteBuffer buffer) {
super(
loggerContext,
fileName != null ? fileName : pattern,
os,
append,
false,
createOnDemand,
advertiseURI,
layout,
writeHeader,
buffer);
this.size = size;
this.initialTime = initialTime;
this.triggeringPolicy = triggeringPolicy;
this.rolloverStrategy = rolloverStrategy;
this.patternProcessor = new PatternProcessor(pattern);
this.patternProcessor.setPrevFileTime(initialTime);
this.fileName = fileName;
this.directWrite = rolloverStrategy instanceof DirectWriteRolloverStrategy;
}
/**
* @since 2.9
*/
protected RollingFileManager(
final LoggerContext loggerContext,
final String fileName,
final String pattern,
final OutputStream os,
final boolean append,
final boolean createOnDemand,
final long size,
final long initialTime,
final TriggeringPolicy triggeringPolicy,
final RolloverStrategy rolloverStrategy,
final String advertiseURI,
final Layout<? extends Serializable> layout,
final String filePermissions,
final String fileOwner,
final String fileGroup,
final boolean writeHeader,
final ByteBuffer buffer) {
super(
loggerContext,
fileName != null ? fileName : pattern,
os,
append,
false,
createOnDemand,
advertiseURI,
layout,
filePermissions,
fileOwner,
fileGroup,
writeHeader,
buffer);
this.size = size;
this.initialTime = initialTime;
this.patternProcessor = new PatternProcessor(pattern);
this.patternProcessor.setPrevFileTime(initialTime);
this.triggeringPolicy = triggeringPolicy;
this.rolloverStrategy = rolloverStrategy;
this.fileName = fileName;
this.directWrite = rolloverStrategy instanceof DirectFileRolloverStrategy;
}
@SuppressFBWarnings(
value = "PATH_TRAVERSAL_IN",
justification = "The name of the accessed files is based on a configuration value.")
public void initialize() {
if (!initialized) {
LOGGER.debug("Initializing triggering policy {}", triggeringPolicy);
initialized = true;
// LOG4J2-2981 - set the file size before initializing the triggering policy.
if (directWrite) {
// LOG4J2-2485: Initialize size from the most recently written file.
final File file = new File(getFileName());
if (file.exists()) {
size = file.length();
} else {
((DirectFileRolloverStrategy) rolloverStrategy).clearCurrentFileName();
}
}
triggeringPolicy.initialize(this);
if (triggeringPolicy instanceof LifeCycle) {
((LifeCycle) triggeringPolicy).start();
}
if (directWrite) {
// LOG4J2-2485: Initialize size from the most recently written file.
final File file = new File(getFileName());
if (file.exists()) {
size = file.length();
} else {
((DirectFileRolloverStrategy) rolloverStrategy).clearCurrentFileName();
}
}
}
}
/**
* Returns a RollingFileManager.
* @param fileName The file name.
* @param pattern The pattern for rolling file.
* @param append true if the file should be appended to.
* @param bufferedIO true if data should be buffered.
* @param policy The TriggeringPolicy.
* @param strategy The RolloverStrategy.
* @param advertiseURI the URI to use when advertising the file
* @param layout The Layout.
* @param bufferSize buffer size to use if bufferedIO is true
* @param immediateFlush flush on every write or not
* @param createOnDemand true if you want to lazy-create the file (a.k.a. on-demand.)
* @param filePermissions File permissions
* @param fileOwner File owner
* @param fileGroup File group
* @param configuration The configuration.
* @return A RollingFileManager.
*/
public static RollingFileManager getFileManager(
final String fileName,
final String pattern,
final boolean append,
final boolean bufferedIO,
final TriggeringPolicy policy,
final RolloverStrategy strategy,
final String advertiseURI,
final Layout<? extends Serializable> layout,
final int bufferSize,
final boolean immediateFlush,
final boolean createOnDemand,
final String filePermissions,
final String fileOwner,
final String fileGroup,
final Configuration configuration) {
if (strategy instanceof DirectWriteRolloverStrategy && fileName != null) {
LOGGER.error("The fileName attribute must not be specified with the DirectWriteRolloverStrategy");
return null;
}
String actualName = fileName == null ? pattern : fileName;
int actualBufferSize = bufferedIO ? bufferSize : Constants.ENCODER_BYTE_BUFFER_SIZE;
return narrow(
RollingFileManager.class,
getManager(
actualName,
(name, data) -> {
long size = 0;
File file = null;
if (fileName != null) {
file = new File(fileName);
try {
FileUtils.makeParentDirs(file);
final boolean created = createOnDemand ? false : file.createNewFile();
LOGGER.trace("New file '{}' created = {}", name, created);
} catch (final IOException ioe) {
LOGGER.error("Unable to create file {}", name, ioe);
return null;
}
size = append ? file.length() : 0;
}
try {
final ByteBuffer buffer = ByteBuffer.allocate(actualBufferSize);
final OutputStream os = createOnDemand || fileName == null
? null
: new FileOutputStream(fileName, append);
// LOG4J2-531 create file first so time has valid value.
final long initialTime = file == null || !file.exists() ? 0 : initialFileTime(file);
final boolean writeHeader = file != null && file.exists() && file.length() == 0;
final RollingFileManager rm = new RollingFileManager(
data.getLoggerContext(),
fileName,
data.getPattern(),
os,
append,
createOnDemand,
size,
initialTime,
data.getTriggeringPolicy(),
data.getRolloverStrategy(),
advertiseURI,
layout,
filePermissions,
fileOwner,
fileGroup,
writeHeader,
buffer);
if (os != null && rm.isAttributeViewEnabled()) {
rm.defineAttributeView(file.toPath());
}
return rm;
} catch (final IOException ex) {
LOGGER.error("RollingFileManager ({}): {}", name, ex.getMessage(), ex);
}
return null;
},
new FactoryData(pattern, policy, strategy, configuration)));
}
/**
* Add a RolloverListener.
* @param listener The RolloverListener.
*/
public void addRolloverListener(final RolloverListener listener) {
rolloverListeners.add(listener);
}
/**
* Remove a RolloverListener.
* @param listener The RolloverListener.
*/
public void removeRolloverListener(final RolloverListener listener) {
rolloverListeners.remove(listener);
}
/**
* Returns the name of the File being managed.
* @return The name of the File being managed.
*/
@Override
public String getFileName() {
if (directWrite) {
fileName = ((DirectFileRolloverStrategy) rolloverStrategy).getCurrentFileName(this);
}
return fileName;
}
@Override
protected void createParentDir(File file) {
if (directWrite) {
final File parent = file.getParentFile();
// If the parent is null the file is in the current working directory.
if (parent != null) {
parent.mkdirs();
}
}
}
public boolean isDirectWrite() {
return directWrite;
}
public FileExtension getFileExtension() {
return patternProcessor.getFileExtension();
}
// override to make visible for unit tests
@Override
protected synchronized void write(
final byte[] bytes, final int offset, final int length, final boolean immediateFlush) {
super.write(bytes, offset, length, immediateFlush);
}
@Override
protected synchronized void writeToDestination(final byte[] bytes, final int offset, final int length) {
size += length;
super.writeToDestination(bytes, offset, length);
}
public boolean isRenameEmptyFiles() {
return renameEmptyFiles;
}
public void setRenameEmptyFiles(final boolean renameEmptyFiles) {
this.renameEmptyFiles = renameEmptyFiles;
}
/**
* Returns the current size of the file.
* @return The size of the file in bytes.
*/
public long getFileSize() {
return size + byteBuffer.position();
}
/**
* Returns the time the file was created.
* @return The time the file was created.
*/
public long getFileTime() {
return initialTime;
}
/**
* Determines if a rollover should occur.
* @param event The LogEvent.
*/
public synchronized void checkRollover(final LogEvent event) {
if (triggeringPolicy.isTriggeringEvent(event)) {
rollover();
}
}
@Override
public boolean releaseSub(final long timeout, final TimeUnit timeUnit) {
LOGGER.debug("Shutting down RollingFileManager {}", getName());
boolean stopped = true;
if (triggeringPolicy instanceof LifeCycle2) {
stopped &= ((LifeCycle2) triggeringPolicy).stop(timeout, timeUnit);
} else if (triggeringPolicy instanceof LifeCycle) {
((LifeCycle) triggeringPolicy).stop();
stopped &= true;
}
final boolean status = super.releaseSub(timeout, timeUnit) && stopped;
asyncExecutor.shutdown();
try {
// Allow at least the minimum interval to pass so async actions can complete.
final long millis = timeUnit.toMillis(timeout);
final long waitInterval = MIN_DURATION < millis ? millis : MIN_DURATION;
for (int count = 1; count <= MAX_TRIES && !asyncExecutor.isTerminated(); ++count) {
asyncExecutor.awaitTermination(waitInterval * count, TimeUnit.MILLISECONDS);
}
if (asyncExecutor.isTerminated()) {
LOGGER.debug("All asynchronous threads have terminated");
} else {
asyncExecutor.shutdownNow();
try {
asyncExecutor.awaitTermination(timeout, timeUnit);
if (asyncExecutor.isTerminated()) {
LOGGER.debug("All asynchronous threads have terminated");
} else {
LOGGER.debug(
"RollingFileManager shutting down but some asynchronous services may not have completed");
}
} catch (final InterruptedException inner) {
LOGGER.warn("RollingFileManager stopped but some asynchronous services may not have completed.");
}
}
} catch (final InterruptedException ie) {
asyncExecutor.shutdownNow();
try {
asyncExecutor.awaitTermination(timeout, timeUnit);
if (asyncExecutor.isTerminated()) {
LOGGER.debug("All asynchronous threads have terminated");
}
} catch (final InterruptedException inner) {
LOGGER.warn("RollingFileManager stopped but some asynchronous services may not have completed.");
}
// Preserve interrupt status
Thread.currentThread().interrupt();
}
LOGGER.debug("RollingFileManager shutdown completed with status {}", status);
return status;
}
public synchronized void rollover(final Date prevFileTime, final Date prevRollTime) {
LOGGER.debug("Rollover PrevFileTime: {}, PrevRollTime: {}", prevFileTime.getTime(), prevRollTime.getTime());
getPatternProcessor().setPrevFileTime(prevFileTime.getTime());
getPatternProcessor().setCurrentFileTime(prevRollTime.getTime());
rollover();
}
public synchronized void rollover() {
if (!hasOutputStream() && !isCreateOnDemand() && !isDirectWrite()) {
return;
}
final String currentFileName = fileName;
if (rolloverListeners.size() > 0) {
for (RolloverListener listener : rolloverListeners) {
try {
listener.rolloverTriggered(currentFileName);
} catch (Exception ex) {
LOGGER.warn(
"Rollover Listener {} failed with {}: {}",
listener.getClass().getSimpleName(),
ex.getClass().getName(),
ex.getMessage());
}
}
}
final boolean interrupted = Thread.interrupted(); // clear interrupted state
try {
if (interrupted) {
LOGGER.warn("RollingFileManager cleared thread interrupted state, continue to rollover");
}
if (rollover(rolloverStrategy)) {
try {
size = 0;
initialTime = System.currentTimeMillis();
createFileAfterRollover();
} catch (final IOException e) {
logError("Failed to create file after rollover", e);
}
}
} finally {
if (interrupted) { // restore interrupted state
Thread.currentThread().interrupt();
}
}
if (rolloverListeners.size() > 0) {
for (RolloverListener listener : rolloverListeners) {
try {
listener.rolloverComplete(currentFileName);
} catch (Exception ex) {
LOGGER.warn(
"Rollover Listener {} failed with {}: {}",
listener.getClass().getSimpleName(),
ex.getClass().getName(),
ex.getMessage());
}
}
}
}
protected void createFileAfterRollover() throws IOException {
setOutputStream(createOutputStream());
}
/**
* Returns the pattern processor.
* @return The PatternProcessor.
*/
public PatternProcessor getPatternProcessor() {
return patternProcessor;
}
public void setTriggeringPolicy(final TriggeringPolicy triggeringPolicy) {
triggeringPolicy.initialize(this);
final TriggeringPolicy policy = this.triggeringPolicy;
int count = 0;
boolean policyUpdated = false;
do {
++count;
} while (!(policyUpdated = triggeringPolicyUpdater.compareAndSet(this, this.triggeringPolicy, triggeringPolicy))
&& count < MAX_TRIES);
if (policyUpdated) {
if (triggeringPolicy instanceof LifeCycle) {
((LifeCycle) triggeringPolicy).start();
}
if (policy instanceof LifeCycle) {
((LifeCycle) policy).stop();
}
} else if (triggeringPolicy instanceof LifeCycle) {
((LifeCycle) triggeringPolicy).stop();
}
}
public void setRolloverStrategy(final RolloverStrategy rolloverStrategy) {
rolloverStrategyUpdater.compareAndSet(this, this.rolloverStrategy, rolloverStrategy);
}
public void setPatternProcessor(final PatternProcessor patternProcessor) {
patternProcessorUpdater.compareAndSet(this, this.patternProcessor, patternProcessor);
}
/**
* Returns the triggering policy.
* @param <T> TriggeringPolicy type
* @return The TriggeringPolicy
*/
@SuppressWarnings("unchecked")
public <T extends TriggeringPolicy> T getTriggeringPolicy() {
// TODO We could parameterize this | RollingFileManager |
java | spring-projects__spring-boot | module/spring-boot-jackson2/src/test/java/org/springframework/boot/jackson2/autoconfigure/Jackson2AutoConfigurationTests.java | {
"start": 26228,
"end": 26540
} | class ____ extends SimpleModule {
private final Set<ObjectCodec> owners = new HashSet<>();
@Override
public void setupModule(SetupContext context) {
this.owners.add(context.getOwner());
}
Set<ObjectCodec> getOwners() {
return this.owners;
}
}
@SuppressWarnings("unused")
static | CustomModule |
java | apache__flink | flink-metrics/flink-metrics-jmx/src/test/java/org/apache/flink/metrics/jmx/JMXReporterFactoryTest.java | {
"start": 1197,
"end": 2472
} | class ____ {
@AfterEach
void shutdownService() throws IOException {
JMXService.stopInstance();
}
@Test
void testPortRangeArgument() {
Properties properties = new Properties();
properties.setProperty(JMXReporterFactory.ARG_PORT, "9000-9010");
JMXReporter metricReporter = new JMXReporterFactory().createMetricReporter(properties);
try {
assertThat(metricReporter.getPort())
.hasValueSatisfying(
port ->
assertThat(port)
.isGreaterThanOrEqualTo(9000)
.isLessThanOrEqualTo(9010));
} finally {
metricReporter.close();
}
}
@Test
void testWithoutArgument() {
JMXReporter metricReporter =
new JMXReporterFactory().createMetricReporter(new Properties());
try {
assertThat(metricReporter.getPort()).isEmpty();
} finally {
metricReporter.close();
}
}
@Test
void testMetricReporterSetupViaSPI() {
MetricReporterTestUtils.testMetricReporterSetupViaSPI(JMXReporterFactory.class);
}
}
| JMXReporterFactoryTest |
java | apache__camel | components/camel-servicenow/camel-servicenow-component/src/main/java/org/apache/camel/component/servicenow/releases/helsinki/HelsinkiServiceNowImportSetProcessor.java | {
"start": 1709,
"end": 6271
} | class ____ extends AbstractServiceNowProcessor {
HelsinkiServiceNowImportSetProcessor(ServiceNowEndpoint endpoint) throws Exception {
super(endpoint);
addDispatcher(ACTION_RETRIEVE, this::retrieveRecord);
addDispatcher(ACTION_CREATE, this::createRecord);
}
/*
* GET
* https://instance.service-now.com/api/now/import/{tableName}/{sys_id}
*/
private void retrieveRecord(Exchange exchange) throws Exception {
final Message in = exchange.getIn();
final String tableName = getTableName(in);
final String apiVersion = getApiVersion(in);
final Class<?> responseModel = getResponseModel(in, tableName);
final String sysId = getSysID(in);
Response response = client.reset()
.types(MediaType.APPLICATION_JSON_TYPE)
.path("now")
.path(apiVersion)
.path("import")
.path(ObjectHelper.notNull(tableName, "tableName"))
.path(ObjectHelper.notNull(sysId, "sysId"))
.query(responseModel)
.invoke(HttpMethod.GET);
setBodyAndHeaders(in, responseModel, response);
}
/*
* POST
* https://instance.service-now.com/api/now/import/{tableName}
*/
private void createRecord(Exchange exchange) throws Exception {
final Message in = exchange.getIn();
final String tableName = getTableName(in);
final String apiVersion = getApiVersion(in);
final Class<?> requestModel = getRequestModel(in, tableName);
final boolean retrieve = in.getHeader(ServiceNowConstants.RETRIEVE_TARGET_RECORD,
config::getRetrieveTargetRecordOnImport, Boolean.class);
Class<?> responseModel = getResponseModel(in, tableName);
Response response;
validateBody(in, requestModel);
if (retrieve) {
// If the endpoint is configured to retrieve the target record, the
// import response model is ignored and the response is ImportSetResponse
response = client.reset()
.types(MediaType.APPLICATION_JSON_TYPE)
.path("now")
.path(apiVersion)
.path("import")
.path(tableName)
.invoke(HttpMethod.POST, in.getMandatoryBody());
if (ObjectHelper.isNotEmpty(response.getHeaderString(HttpHeaders.CONTENT_TYPE))) {
for (ImportSetResult result : response.readEntity(ImportSetResponse.class).getResults()) {
final String status = result.getStatus();
final String table = result.getTable();
final String sysId = result.getSysId();
if (ObjectHelper.equalIgnoreCase("inserted", status)) {
// If the endpoint is configured to retrieve the target
// record, the response model is related to the target
// table
responseModel = getResponseModel(in, table);
// Do get the record
response = client.reset()
.types(MediaType.APPLICATION_JSON_TYPE)
.path("now")
.path(apiVersion)
.path("table")
.path(ObjectHelper.notNull(table, "table"))
.path(ObjectHelper.notNull(sysId, "sys_id"))
.query(ServiceNowParams.SYSPARM_DISPLAY_VALUE, in)
.query(ServiceNowParams.SYSPARM_EXCLUDE_REFERENCE_LINK, in)
.query(ServiceNowParams.SYSPARM_FIELDS, in)
.query(ServiceNowParams.SYSPARM_VIEW, in)
.query(responseModel)
.invoke(HttpMethod.GET);
break;
}
}
}
} else {
response = client.reset()
.types(MediaType.APPLICATION_JSON_TYPE)
.path("now")
.path(apiVersion)
.path("import")
.path(tableName)
.query(responseModel)
.invoke(HttpMethod.POST, in.getMandatoryBody());
}
setBodyAndHeaders(in, responseModel, response);
}
}
| HelsinkiServiceNowImportSetProcessor |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/spi/RestProducerFactory.java | {
"start": 1114,
"end": 2365
} | interface ____ {
/**
* Creates a new REST producer.
*
* @param camelContext the camel context
* @param host host in the syntax scheme:hostname:port, such as http:myserver:8080
* @param verb HTTP verb such as GET, POST
* @param basePath base path
* @param uriTemplate uri template
* @param queryParameters uri query parameters
* @param consumes media-types for what the REST service consume as input (accept-type), is <tt>null</tt> or
* <tt>*/*</tt> for anything
* @param produces media-types for what the REST service produces as output, can be <tt>null</tt>
* @param configuration REST configuration
* @param parameters additional parameters
* @return a newly created REST producer
* @throws Exception can be thrown
*/
Producer createProducer(
CamelContext camelContext, String host,
String verb, String basePath, String uriTemplate, String queryParameters,
String consumes, String produces, RestConfiguration configuration, Map<String, Object> parameters)
throws Exception;
}
| RestProducerFactory |
java | apache__kafka | clients/src/test/java/org/apache/kafka/clients/consumer/MockShareConsumerTest.java | {
"start": 1237,
"end": 2312
} | class ____ {
private final MockShareConsumer<String, String> consumer = new MockShareConsumer<>();
@Test
public void testSimpleMock() {
consumer.subscribe(Collections.singleton("test"));
assertEquals(0, consumer.poll(Duration.ZERO).count());
ConsumerRecord<String, String> rec1 = new ConsumerRecord<>("test", 0, 0, 0L, TimestampType.CREATE_TIME,
0, 0, "key1", "value1", new RecordHeaders(), Optional.empty());
ConsumerRecord<String, String> rec2 = new ConsumerRecord<>("test", 0, 1, 0L, TimestampType.CREATE_TIME,
0, 0, "key2", "value2", new RecordHeaders(), Optional.empty());
consumer.addRecord(rec1);
consumer.addRecord(rec2);
ConsumerRecords<String, String> recs = consumer.poll(Duration.ofMillis(1));
Iterator<ConsumerRecord<String, String>> iter = recs.iterator();
assertEquals(rec1, iter.next());
assertEquals(rec2, iter.next());
assertFalse(iter.hasNext());
assertEquals(0, recs.nextOffsets().size());
}
} | MockShareConsumerTest |
java | square__okhttp | samples/guide/src/main/java/okhttp3/recipes/LoggingInterceptors.java | {
"start": 808,
"end": 1328
} | class ____ {
private static final Logger logger = Logger.getLogger(LoggingInterceptors.class.getName());
private final OkHttpClient client = new OkHttpClient.Builder()
.addInterceptor(new LoggingInterceptor())
.build();
public void run() throws Exception {
Request request = new Request.Builder()
.url("https://publicobject.com/helloworld.txt")
.build();
Response response = client.newCall(request).execute();
response.body().close();
}
private static | LoggingInterceptors |
java | apache__kafka | clients/src/test/java/org/apache/kafka/clients/admin/internals/AllBrokersStrategyIntegrationTest.java | {
"start": 2019,
"end": 10554
} | class ____ {
private static final long TIMEOUT_MS = 5000;
private static final long RETRY_BACKOFF_MS = 100;
private final LogContext logContext = new LogContext();
private final MockTime time = new MockTime();
private AdminApiDriver<AllBrokersStrategy.BrokerKey, Integer> buildDriver(
AllBrokersStrategy.AllBrokersFuture<Integer> result
) {
return new AdminApiDriver<>(
new MockApiHandler(),
result,
time.milliseconds() + TIMEOUT_MS,
RETRY_BACKOFF_MS,
RETRY_BACKOFF_MS,
logContext
);
}
@Test
public void testFatalLookupError() {
AllBrokersStrategy.AllBrokersFuture<Integer> result = new AllBrokersStrategy.AllBrokersFuture<>();
AdminApiDriver<AllBrokersStrategy.BrokerKey, Integer> driver = buildDriver(result);
List<AdminApiDriver.RequestSpec<AllBrokersStrategy.BrokerKey>> requestSpecs = driver.poll();
assertEquals(1, requestSpecs.size());
AdminApiDriver.RequestSpec<AllBrokersStrategy.BrokerKey> spec = requestSpecs.get(0);
assertEquals(AllBrokersStrategy.LOOKUP_KEYS, spec.keys);
driver.onFailure(time.milliseconds(), spec, new UnknownServerException());
assertTrue(result.all().isDone());
TestUtils.assertFutureThrows(UnknownServerException.class, result.all());
assertEquals(Collections.emptyList(), driver.poll());
}
@Test
public void testRetryLookupAfterDisconnect() {
AllBrokersStrategy.AllBrokersFuture<Integer> result = new AllBrokersStrategy.AllBrokersFuture<>();
AdminApiDriver<AllBrokersStrategy.BrokerKey, Integer> driver = buildDriver(result);
List<AdminApiDriver.RequestSpec<AllBrokersStrategy.BrokerKey>> requestSpecs = driver.poll();
assertEquals(1, requestSpecs.size());
AdminApiDriver.RequestSpec<AllBrokersStrategy.BrokerKey> spec = requestSpecs.get(0);
assertEquals(AllBrokersStrategy.LOOKUP_KEYS, spec.keys);
driver.onFailure(time.milliseconds(), spec, new DisconnectException());
List<AdminApiDriver.RequestSpec<AllBrokersStrategy.BrokerKey>> retrySpecs = driver.poll();
assertEquals(1, retrySpecs.size());
AdminApiDriver.RequestSpec<AllBrokersStrategy.BrokerKey> retrySpec = retrySpecs.get(0);
assertEquals(AllBrokersStrategy.LOOKUP_KEYS, retrySpec.keys);
assertEquals(time.milliseconds(), retrySpec.nextAllowedTryMs);
assertEquals(Collections.emptyList(), driver.poll());
}
@Test
public void testMultiBrokerCompletion() throws Exception {
AllBrokersStrategy.AllBrokersFuture<Integer> result = new AllBrokersStrategy.AllBrokersFuture<>();
AdminApiDriver<AllBrokersStrategy.BrokerKey, Integer> driver = buildDriver(result);
List<AdminApiDriver.RequestSpec<AllBrokersStrategy.BrokerKey>> lookupSpecs = driver.poll();
assertEquals(1, lookupSpecs.size());
AdminApiDriver.RequestSpec<AllBrokersStrategy.BrokerKey> lookupSpec = lookupSpecs.get(0);
Set<Integer> brokerIds = Set.of(1, 2);
driver.onResponse(time.milliseconds(), lookupSpec, responseWithBrokers(brokerIds), Node.noNode());
assertTrue(result.all().isDone());
Map<Integer, KafkaFutureImpl<Integer>> brokerFutures = result.all().get();
List<AdminApiDriver.RequestSpec<AllBrokersStrategy.BrokerKey>> requestSpecs = driver.poll();
assertEquals(2, requestSpecs.size());
AdminApiDriver.RequestSpec<AllBrokersStrategy.BrokerKey> requestSpec1 = requestSpecs.get(0);
assertTrue(requestSpec1.scope.destinationBrokerId().isPresent());
int brokerId1 = requestSpec1.scope.destinationBrokerId().getAsInt();
assertTrue(brokerIds.contains(brokerId1));
driver.onResponse(time.milliseconds(), requestSpec1, null, Node.noNode());
KafkaFutureImpl<Integer> future1 = brokerFutures.get(brokerId1);
assertTrue(future1.isDone());
AdminApiDriver.RequestSpec<AllBrokersStrategy.BrokerKey> requestSpec2 = requestSpecs.get(1);
assertTrue(requestSpec2.scope.destinationBrokerId().isPresent());
int brokerId2 = requestSpec2.scope.destinationBrokerId().getAsInt();
assertNotEquals(brokerId1, brokerId2);
assertTrue(brokerIds.contains(brokerId2));
driver.onResponse(time.milliseconds(), requestSpec2, null, Node.noNode());
KafkaFutureImpl<Integer> future2 = brokerFutures.get(brokerId2);
assertTrue(future2.isDone());
assertEquals(Collections.emptyList(), driver.poll());
}
@Test
public void testRetryFulfillmentAfterDisconnect() throws Exception {
AllBrokersStrategy.AllBrokersFuture<Integer> result = new AllBrokersStrategy.AllBrokersFuture<>();
AdminApiDriver<AllBrokersStrategy.BrokerKey, Integer> driver = buildDriver(result);
List<AdminApiDriver.RequestSpec<AllBrokersStrategy.BrokerKey>> lookupSpecs = driver.poll();
assertEquals(1, lookupSpecs.size());
AdminApiDriver.RequestSpec<AllBrokersStrategy.BrokerKey> lookupSpec = lookupSpecs.get(0);
int brokerId = 1;
driver.onResponse(time.milliseconds(), lookupSpec, responseWithBrokers(Collections.singleton(brokerId)), Node.noNode());
assertTrue(result.all().isDone());
Map<Integer, KafkaFutureImpl<Integer>> brokerFutures = result.all().get();
KafkaFutureImpl<Integer> future = brokerFutures.get(brokerId);
assertFalse(future.isDone());
List<AdminApiDriver.RequestSpec<AllBrokersStrategy.BrokerKey>> requestSpecs = driver.poll();
assertEquals(1, requestSpecs.size());
AdminApiDriver.RequestSpec<AllBrokersStrategy.BrokerKey> requestSpec = requestSpecs.get(0);
driver.onFailure(time.milliseconds(), requestSpec, new DisconnectException());
assertFalse(future.isDone());
List<AdminApiDriver.RequestSpec<AllBrokersStrategy.BrokerKey>> retrySpecs = driver.poll();
assertEquals(1, retrySpecs.size());
AdminApiDriver.RequestSpec<AllBrokersStrategy.BrokerKey> retrySpec = retrySpecs.get(0);
assertEquals(time.milliseconds() + RETRY_BACKOFF_MS, retrySpec.nextAllowedTryMs);
assertEquals(OptionalInt.of(brokerId), retrySpec.scope.destinationBrokerId());
driver.onResponse(time.milliseconds(), retrySpec, null, new Node(brokerId, "host", 1234));
assertTrue(future.isDone());
assertEquals(brokerId, future.get());
assertEquals(Collections.emptyList(), driver.poll());
}
@Test
public void testFatalFulfillmentError() throws Exception {
AllBrokersStrategy.AllBrokersFuture<Integer> result = new AllBrokersStrategy.AllBrokersFuture<>();
AdminApiDriver<AllBrokersStrategy.BrokerKey, Integer> driver = buildDriver(result);
List<AdminApiDriver.RequestSpec<AllBrokersStrategy.BrokerKey>> lookupSpecs = driver.poll();
assertEquals(1, lookupSpecs.size());
AdminApiDriver.RequestSpec<AllBrokersStrategy.BrokerKey> lookupSpec = lookupSpecs.get(0);
int brokerId = 1;
driver.onResponse(time.milliseconds(), lookupSpec, responseWithBrokers(Collections.singleton(brokerId)), Node.noNode());
assertTrue(result.all().isDone());
Map<Integer, KafkaFutureImpl<Integer>> brokerFutures = result.all().get();
KafkaFutureImpl<Integer> future = brokerFutures.get(brokerId);
assertFalse(future.isDone());
List<AdminApiDriver.RequestSpec<AllBrokersStrategy.BrokerKey>> requestSpecs = driver.poll();
assertEquals(1, requestSpecs.size());
AdminApiDriver.RequestSpec<AllBrokersStrategy.BrokerKey> requestSpec = requestSpecs.get(0);
driver.onFailure(time.milliseconds(), requestSpec, new UnknownServerException());
assertTrue(future.isDone());
TestUtils.assertFutureThrows(UnknownServerException.class, future);
assertEquals(Collections.emptyList(), driver.poll());
}
private MetadataResponse responseWithBrokers(Set<Integer> brokerIds) {
MetadataResponseData response = new MetadataResponseData();
for (Integer brokerId : brokerIds) {
response.brokers().add(new MetadataResponseData.MetadataResponseBroker()
.setNodeId(brokerId)
.setHost("host" + brokerId)
.setPort(9092)
);
}
return new MetadataResponse(response, ApiKeys.METADATA.latestVersion());
}
private | AllBrokersStrategyIntegrationTest |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/junit/jupiter/TestKitUtils.java | {
"start": 1215,
"end": 3175
} | class ____ {
private TestKitUtils() {}
static AbstractThrowableAssert<?, ? extends Throwable> assertThatTest(Class<?> testClass, String... config) {
checkClass(testClass);
Logger logger = Logger.getLogger("org.junit.jupiter");
Level oldLevel = logger.getLevel();
try {
// Suppress log output while the testkit is running
logger.setLevel(Level.OFF);
EngineTestKit.Builder builder = EngineTestKit.engine("junit-jupiter")
.selectors(selectClass(testClass))
.configurationParameter("junit.jupiter.conditions.deactivate", "*");
if (config != null) {
if (config.length % 2 != 0) {
throw new IllegalStateException("Odd number of config parameters provided: " + Arrays.toString(config));
}
for (int i = 0; i < config.length; i++) {
builder.configurationParameter(config[i++], config[i]);
}
}
Event testEvent = builder.execute()
.allEvents()
.filter(event -> event.getType().equals(FINISHED))
.findAny()
.orElseThrow(() -> new IllegalStateException("Test failed to run at all"));
TestExecutionResult result = testEvent.getPayload(TestExecutionResult.class)
.orElseThrow(() -> new IllegalStateException("Test result payload missing"));
return assertThat(result.getThrowable().orElse(null));
} finally {
// Restore the filter to what it was so that we do not interfere with the parent test
logger.setLevel(oldLevel);
}
}
private static void checkClass(Class<?> testClass) {
// This is to protect against developer slip-ups that can be costly...
if (!Modifier.isStatic(testClass.getModifiers())) throw new IllegalStateException("Test | TestKitUtils |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/TestInstancePreConstructCallbackTests.java | {
"start": 9519,
"end": 10371
} | class ____ extends CallSequenceRecordingTestCase {
static AtomicInteger instanceCounter = new AtomicInteger();
private final String instanceId;
PreConstructInNestedTestCase() {
record("constructor");
instanceId = "#" + instanceCounter.incrementAndGet();
}
@BeforeAll
static void beforeAll() {
instanceCounter.set(0);
record("beforeAll");
}
@BeforeEach
void beforeEach() {
record("beforeEach");
}
@Test
void outerTest1() {
record("outerTest1");
}
@Test
void outerTest2() {
record("outerTest2");
}
@AfterEach
void afterEach() {
record("afterEach");
}
@AfterAll
static void afterAll() {
record("afterAll");
}
@Override
public String toString() {
return instanceId;
}
@ExtendWith(InstancePreConstructCallbackRecordingBar.class)
abstract | PreConstructInNestedTestCase |
java | google__gson | gson/src/test/java/com/google/gson/functional/EnumTest.java | {
"start": 3490,
"end": 7124
} | class ____ {
private final MyEnum value1 = MyEnum.VALUE1;
private final MyEnum value2 = MyEnum.VALUE2;
String getExpectedJson() {
return "{\"value1\":\"" + value1 + "\",\"value2\":\"" + value2 + "\"}";
}
}
/** Test for issue 226. */
@Test
@SuppressWarnings("GetClassOnEnum")
public void testEnumSubclass() {
assertThat(Roshambo.ROCK.getClass()).isNotEqualTo(Roshambo.class);
assertThat(gson.toJson(Roshambo.ROCK)).isEqualTo("\"ROCK\"");
assertThat(gson.toJson(EnumSet.allOf(Roshambo.class)))
.isEqualTo("[\"ROCK\",\"PAPER\",\"SCISSORS\"]");
assertThat(gson.fromJson("\"ROCK\"", Roshambo.class)).isEqualTo(Roshambo.ROCK);
Set<Roshambo> deserialized =
gson.fromJson("[\"ROCK\",\"PAPER\",\"SCISSORS\"]", new TypeToken<>() {});
assertThat(deserialized).isEqualTo(EnumSet.allOf(Roshambo.class));
// A bit contrived, but should also work if explicitly deserializing using anonymous enum
// subclass
assertThat(gson.fromJson("\"ROCK\"", Roshambo.ROCK.getClass())).isEqualTo(Roshambo.ROCK);
}
@Test
@SuppressWarnings("GetClassOnEnum")
public void testEnumSubclassWithRegisteredTypeAdapter() {
gson =
new GsonBuilder()
.registerTypeHierarchyAdapter(Roshambo.class, new MyEnumTypeAdapter())
.create();
assertThat(Roshambo.ROCK.getClass()).isNotEqualTo(Roshambo.class);
assertThat(gson.toJson(Roshambo.ROCK)).isEqualTo("\"123ROCK\"");
assertThat(gson.toJson(EnumSet.allOf(Roshambo.class)))
.isEqualTo("[\"123ROCK\",\"123PAPER\",\"123SCISSORS\"]");
assertThat(gson.fromJson("\"123ROCK\"", Roshambo.class)).isEqualTo(Roshambo.ROCK);
Set<Roshambo> deserialized =
gson.fromJson("[\"123ROCK\",\"123PAPER\",\"123SCISSORS\"]", new TypeToken<>() {});
assertThat(deserialized).isEqualTo(EnumSet.allOf(Roshambo.class));
}
@Test
public void testEnumSubclassAsParameterizedType() {
Collection<Roshambo> list = new ArrayList<>();
list.add(Roshambo.ROCK);
list.add(Roshambo.PAPER);
String json = gson.toJson(list);
assertThat(json).isEqualTo("[\"ROCK\",\"PAPER\"]");
Type collectionType = new TypeToken<Collection<Roshambo>>() {}.getType();
Collection<Roshambo> actualJsonList = gson.fromJson(json, collectionType);
MoreAsserts.assertContains(actualJsonList, Roshambo.ROCK);
MoreAsserts.assertContains(actualJsonList, Roshambo.PAPER);
}
@Test
public void testEnumCaseMapping() {
assertThat(gson.fromJson("\"boy\"", Gender.class)).isEqualTo(Gender.MALE);
assertThat(gson.toJson(Gender.MALE, Gender.class)).isEqualTo("\"boy\"");
}
@Test
public void testEnumSet() {
EnumSet<Roshambo> foo = EnumSet.of(Roshambo.ROCK, Roshambo.PAPER);
String json = gson.toJson(foo);
assertThat(json).isEqualTo("[\"ROCK\",\"PAPER\"]");
Type type = new TypeToken<EnumSet<Roshambo>>() {}.getType();
EnumSet<Roshambo> bar = gson.fromJson(json, type);
assertThat(bar).containsExactly(Roshambo.ROCK, Roshambo.PAPER).inOrder();
assertThat(bar).doesNotContain(Roshambo.SCISSORS);
}
@Test
public void testEnumMap() {
EnumMap<MyEnum, String> map = new EnumMap<>(MyEnum.class);
map.put(MyEnum.VALUE1, "test");
String json = gson.toJson(map);
assertThat(json).isEqualTo("{\"VALUE1\":\"test\"}");
Type type = new TypeToken<EnumMap<MyEnum, String>>() {}.getType();
EnumMap<?, ?> actualMap = gson.fromJson("{\"VALUE1\":\"test\"}", type);
Map<?, ?> expectedMap = Collections.singletonMap(MyEnum.VALUE1, "test");
assertThat(actualMap).isEqualTo(expectedMap);
}
private | ClassWithEnumFields |
java | apache__rocketmq | filter/src/main/java/org/apache/rocketmq/filter/parser/SelectorParser.java | {
"start": 39324,
"end": 45482
} | class ____ extends java.lang.Error {
}
final private LookaheadSuccess jjLs = new LookaheadSuccess();
private boolean jj_scan_token(int kind) {
if (jjScanpos == jjLastpos) {
jjLa--;
if (jjScanpos.next == null) {
jjLastpos = jjScanpos = jjScanpos.next = tokenSource.getNextToken();
} else {
jjLastpos = jjScanpos = jjScanpos.next;
}
} else {
jjScanpos = jjScanpos.next;
}
if (jjRescan) {
int i = 0;
Token tok = token;
while (tok != null && tok != jjScanpos) {
i++;
tok = tok.next;
}
if (tok != null) jj_add_error_token(kind, i);
}
if (jjScanpos.kind != kind) return true;
if (jjLa == 0 && jjScanpos == jjLastpos) throw jjLs;
return false;
}
/**
* Get the next Token.
*/
final public Token getNextToken() {
if (token.next != null) token = token.next;
else token = token.next = tokenSource.getNextToken();
jjNtk = -1;
jjGen++;
return token;
}
/**
* Get the specific Token.
*/
final public Token getToken(int index) {
Token t = token;
for (int i = 0; i < index; i++) {
if (t.next != null) t = t.next;
else t = t.next = tokenSource.getNextToken();
}
return t;
}
private int jj_ntk() {
if ((jjNt = token.next) == null)
return jjNtk = (token.next = tokenSource.getNextToken()).kind;
else
return jjNtk = jjNt.kind;
}
private java.util.List<int[]> jjExpentries = new java.util.ArrayList<>();
private int[] jjExpentry;
private int jjKind = -1;
private int[] jjLasttokens = new int[100];
private int jjEndpos;
private void jj_add_error_token(int kind, int pos) {
if (pos >= 100) return;
if (pos == jjEndpos + 1) {
jjLasttokens[jjEndpos++] = kind;
} else if (jjEndpos != 0) {
jjExpentry = new int[jjEndpos];
for (int i = 0; i < jjEndpos; i++) {
jjExpentry[i] = jjLasttokens[i];
}
boolean exists = false;
for (java.util.Iterator<?> it = jjExpentries.iterator(); it.hasNext(); ) {
exists = true;
int[] oldentry = (int[]) (it.next());
if (oldentry.length == jjExpentry.length) {
for (int i = 0; i < jjExpentry.length; i++) {
if (oldentry[i] != jjExpentry[i]) {
exists = false;
break;
}
}
if (exists) break;
}
}
if (!exists) jjExpentries.add(jjExpentry);
if (pos != 0) jjLasttokens[(jjEndpos = pos) - 1] = kind;
}
}
/**
* Generate ParseException.
*/
public ParseException generateParseException() {
jjExpentries.clear();
boolean[] la1tokens = new boolean[36];
if (jjKind >= 0) {
la1tokens[jjKind] = true;
jjKind = -1;
}
for (int i = 0; i < 16; i++) {
if (jjLa1[i] == jjGen) {
for (int j = 0; j < 32; j++) {
if ((jjLa10[i] & (1 << j)) != 0) {
la1tokens[j] = true;
}
if ((jjLa11[i] & (1 << j)) != 0) {
la1tokens[32 + j] = true;
}
}
}
}
for (int i = 0; i < 36; i++) {
if (la1tokens[i]) {
jjExpentry = new int[1];
jjExpentry[0] = i;
jjExpentries.add(jjExpentry);
}
}
jjEndpos = 0;
jj_rescan_token();
jj_add_error_token(0, 0);
int[][] exptokseq = new int[jjExpentries.size()][];
for (int i = 0; i < jjExpentries.size(); i++) {
exptokseq[i] = jjExpentries.get(i);
}
return new ParseException(token, exptokseq, TOKEN_IMAGE);
}
/**
* Enable tracing.
*/
final public void enable_tracing() {
}
/**
* Disable tracing.
*/
final public void disable_tracing() {
}
private void jj_rescan_token() {
jjRescan = true;
for (int i = 0; i < 7; i++) {
try {
JJCalls p = jj2Rtns[i];
do {
if (p.gen > jjGen) {
jjLa = p.arg;
jjLastpos = jjScanpos = p.first;
switch (i) {
case 0:
jj_3_1();
break;
case 1:
jj_3_2();
break;
case 2:
jj_3_3();
break;
case 3:
jj_3_4();
break;
case 4:
jj_3_5();
break;
case 5:
jj_3_6();
break;
case 6:
jj_3_7();
break;
}
}
p = p.next;
} while (p != null);
} catch (LookaheadSuccess ls) {
}
}
jjRescan = false;
}
private void jj_save(int index, int xla) {
JJCalls p = jj2Rtns[index];
while (p.gen > jjGen) {
if (p.next == null) {
p = p.next = new JJCalls();
break;
}
p = p.next;
}
p.gen = jjGen + xla - jjLa;
p.first = token;
p.arg = xla;
}
static final | LookaheadSuccess |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/engine/LazySoftDeletesDirectoryReaderWrapper.java | {
"start": 6991,
"end": 10005
} | class ____ implements Bits {
private final int maxDoc;
private final String field;
private final LeafReader reader;
private final int numSoftDeletes;
private final int numDocs;
volatile Bits materializedBits;
public LazyBits(int maxDoc, String field, LeafReader reader, int numSoftDeletes, int numDocs) {
this.maxDoc = maxDoc;
this.field = field;
this.reader = reader;
this.numSoftDeletes = numSoftDeletes;
this.numDocs = numDocs;
materializedBits = null;
assert numSoftDeletes > 0;
}
@Override
public boolean get(int index) {
if (materializedBits == null) {
synchronized (this) {
try {
if (materializedBits == null) {
materializedBits = init();
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
return materializedBits.get(index);
}
@Override
public int length() {
return maxDoc;
}
private Bits init() throws IOException {
assert Thread.holdsLock(this);
DocIdSetIterator iterator = getDocValuesDocIdSetIterator(field, reader);
assert iterator != null;
Bits liveDocs = reader.getLiveDocs();
final FixedBitSet bits;
if (liveDocs != null) {
bits = FixedBitSet.copyOf(liveDocs);
} else {
bits = new FixedBitSet(maxDoc);
bits.set(0, maxDoc);
}
int numComputedSoftDeletes = applySoftDeletes(iterator, bits);
assert numComputedSoftDeletes == numSoftDeletes
: "numComputedSoftDeletes: " + numComputedSoftDeletes + " expected: " + numSoftDeletes;
int numDeletes = reader.numDeletedDocs() + numComputedSoftDeletes;
int computedNumDocs = reader.maxDoc() - numDeletes;
assert computedNumDocs == numDocs : "computedNumDocs: " + computedNumDocs + " expected: " + numDocs;
return bits;
}
public boolean initialized() {
return materializedBits != null;
}
}
static int applySoftDeletes(DocIdSetIterator iterator, FixedBitSet bits) throws IOException {
assert iterator != null;
int newDeletes = 0;
int docID;
while ((docID = iterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
if (bits.get(docID)) { // doc is live - clear it
bits.clear(docID);
newDeletes++;
// now that we know we deleted it and we fully control the hard deletes we can do correct
// accounting
// below.
}
}
return newDeletes;
}
private static | LazyBits |
java | netty__netty | example/src/main/java/io/netty/example/http/snoop/HttpSnoopClient.java | {
"start": 1716,
"end": 4334
} | class ____ {
static final String URL = System.getProperty("url", "http://127.0.0.1:8080/");
public static void main(String[] args) throws Exception {
URI uri = new URI(URL);
String scheme = uri.getScheme() == null? "http" : uri.getScheme();
String host = uri.getHost() == null? "127.0.0.1" : uri.getHost();
int port = uri.getPort();
if (port == -1) {
if ("http".equalsIgnoreCase(scheme)) {
port = 80;
} else if ("https".equalsIgnoreCase(scheme)) {
port = 443;
}
}
if (!"http".equalsIgnoreCase(scheme) && !"https".equalsIgnoreCase(scheme)) {
System.err.println("Only HTTP(S) is supported.");
return;
}
// Configure SSL context if necessary.
final boolean ssl = "https".equalsIgnoreCase(scheme);
final SslContext sslCtx;
if (ssl) {
sslCtx = SslContextBuilder.forClient()
.trustManager(InsecureTrustManagerFactory.INSTANCE).build();
} else {
sslCtx = null;
}
// Configure the client.
EventLoopGroup group = new MultiThreadIoEventLoopGroup(NioIoHandler.newFactory());
try {
Bootstrap b = new Bootstrap();
b.group(group)
.channel(NioSocketChannel.class)
.handler(new HttpSnoopClientInitializer(sslCtx));
// Make the connection attempt.
Channel ch = b.connect(host, port).sync().channel();
// Prepare the HTTP request.
HttpRequest request = new DefaultFullHttpRequest(
HttpVersion.HTTP_1_1, HttpMethod.GET, uri.getRawPath(), Unpooled.EMPTY_BUFFER);
request.headers().set(HttpHeaderNames.HOST, host);
request.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE);
request.headers().set(HttpHeaderNames.ACCEPT_ENCODING, HttpHeaderValues.GZIP);
// Set some example cookies.
request.headers().set(
HttpHeaderNames.COOKIE,
ClientCookieEncoder.STRICT.encode(
new DefaultCookie("my-cookie", "foo"),
new DefaultCookie("another-cookie", "bar")));
// Send the HTTP request.
ch.writeAndFlush(request);
// Wait for the server to close the connection.
ch.closeFuture().sync();
} finally {
// Shut down executor threads to exit.
group.shutdownGracefully();
}
}
}
| HttpSnoopClient |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/redirect/RedirectingResourceClient307.java | {
"start": 205,
"end": 599
} | interface ____ {
/**
* By default, the `quarkus.rest-client.follow-redirects` property only works in GET and HEAD resources, so POST resources
* are never redirect, unless users register a custom {@link org.jboss.resteasy.reactive.client.handlers.RedirectHandler}.
*/
@POST
Response post(@QueryParam("redirects") Integer numberOfRedirects);
}
| RedirectingResourceClient307 |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/get/MultiGetItemResponse.java | {
"start": 694,
"end": 2099
} | class ____ implements Writeable {
private final GetResponse response;
private final MultiGetResponse.Failure failure;
public MultiGetItemResponse(GetResponse response, MultiGetResponse.Failure failure) {
this.response = response;
this.failure = failure;
}
/**
* The index name of the document.
*/
public String getIndex() {
if (failure != null) {
return failure.getIndex();
}
return response.getIndex();
}
/**
* The id of the document.
*/
public String getId() {
if (failure != null) {
return failure.getId();
}
return response.getId();
}
/**
* Is this a failed execution?
*/
public boolean isFailed() {
return failure != null;
}
/**
* The actual get response, {@code null} if its a failure.
*/
public GetResponse getResponse() {
return this.response;
}
/**
* The failure if relevant.
*/
public MultiGetResponse.Failure getFailure() {
return this.failure;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
if (failure != null) {
out.writeBoolean(true);
failure.writeTo(out);
} else {
out.writeBoolean(false);
response.writeTo(out);
}
}
}
| MultiGetItemResponse |
java | apache__camel | components/camel-kubernetes/src/generated/java/org/apache/camel/component/kubernetes/service_accounts/KubernetesServiceAccountsEndpointConfigurer.java | {
"start": 754,
"end": 9119
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
KubernetesServiceAccountsEndpoint target = (KubernetesServiceAccountsEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiversion":
case "apiVersion": target.getConfiguration().setApiVersion(property(camelContext, java.lang.String.class, value)); return true;
case "cacertdata":
case "caCertData": target.getConfiguration().setCaCertData(property(camelContext, java.lang.String.class, value)); return true;
case "cacertfile":
case "caCertFile": target.getConfiguration().setCaCertFile(property(camelContext, java.lang.String.class, value)); return true;
case "clientcertdata":
case "clientCertData": target.getConfiguration().setClientCertData(property(camelContext, java.lang.String.class, value)); return true;
case "clientcertfile":
case "clientCertFile": target.getConfiguration().setClientCertFile(property(camelContext, java.lang.String.class, value)); return true;
case "clientkeyalgo":
case "clientKeyAlgo": target.getConfiguration().setClientKeyAlgo(property(camelContext, java.lang.String.class, value)); return true;
case "clientkeydata":
case "clientKeyData": target.getConfiguration().setClientKeyData(property(camelContext, java.lang.String.class, value)); return true;
case "clientkeyfile":
case "clientKeyFile": target.getConfiguration().setClientKeyFile(property(camelContext, java.lang.String.class, value)); return true;
case "clientkeypassphrase":
case "clientKeyPassphrase": target.getConfiguration().setClientKeyPassphrase(property(camelContext, java.lang.String.class, value)); return true;
case "connectiontimeout":
case "connectionTimeout": target.getConfiguration().setConnectionTimeout(property(camelContext, java.lang.Integer.class, value)); return true;
case "dnsdomain":
case "dnsDomain": target.getConfiguration().setDnsDomain(property(camelContext, java.lang.String.class, value)); return true;
case "kubernetesclient":
case "kubernetesClient": target.getConfiguration().setKubernetesClient(property(camelContext, io.fabric8.kubernetes.client.KubernetesClient.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "namespace": target.getConfiguration().setNamespace(property(camelContext, java.lang.String.class, value)); return true;
case "oauthtoken":
case "oauthToken": target.getConfiguration().setOauthToken(property(camelContext, java.lang.String.class, value)); return true;
case "operation": target.getConfiguration().setOperation(property(camelContext, java.lang.String.class, value)); return true;
case "password": target.getConfiguration().setPassword(property(camelContext, java.lang.String.class, value)); return true;
case "portname":
case "portName": target.getConfiguration().setPortName(property(camelContext, java.lang.String.class, value)); return true;
case "portprotocol":
case "portProtocol": target.getConfiguration().setPortProtocol(property(camelContext, java.lang.String.class, value)); return true;
case "trustcerts":
case "trustCerts": target.getConfiguration().setTrustCerts(property(camelContext, java.lang.Boolean.class, value)); return true;
case "username": target.getConfiguration().setUsername(property(camelContext, java.lang.String.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiversion":
case "apiVersion": return java.lang.String.class;
case "cacertdata":
case "caCertData": return java.lang.String.class;
case "cacertfile":
case "caCertFile": return java.lang.String.class;
case "clientcertdata":
case "clientCertData": return java.lang.String.class;
case "clientcertfile":
case "clientCertFile": return java.lang.String.class;
case "clientkeyalgo":
case "clientKeyAlgo": return java.lang.String.class;
case "clientkeydata":
case "clientKeyData": return java.lang.String.class;
case "clientkeyfile":
case "clientKeyFile": return java.lang.String.class;
case "clientkeypassphrase":
case "clientKeyPassphrase": return java.lang.String.class;
case "connectiontimeout":
case "connectionTimeout": return java.lang.Integer.class;
case "dnsdomain":
case "dnsDomain": return java.lang.String.class;
case "kubernetesclient":
case "kubernetesClient": return io.fabric8.kubernetes.client.KubernetesClient.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "namespace": return java.lang.String.class;
case "oauthtoken":
case "oauthToken": return java.lang.String.class;
case "operation": return java.lang.String.class;
case "password": return java.lang.String.class;
case "portname":
case "portName": return java.lang.String.class;
case "portprotocol":
case "portProtocol": return java.lang.String.class;
case "trustcerts":
case "trustCerts": return java.lang.Boolean.class;
case "username": return java.lang.String.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
KubernetesServiceAccountsEndpoint target = (KubernetesServiceAccountsEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiversion":
case "apiVersion": return target.getConfiguration().getApiVersion();
case "cacertdata":
case "caCertData": return target.getConfiguration().getCaCertData();
case "cacertfile":
case "caCertFile": return target.getConfiguration().getCaCertFile();
case "clientcertdata":
case "clientCertData": return target.getConfiguration().getClientCertData();
case "clientcertfile":
case "clientCertFile": return target.getConfiguration().getClientCertFile();
case "clientkeyalgo":
case "clientKeyAlgo": return target.getConfiguration().getClientKeyAlgo();
case "clientkeydata":
case "clientKeyData": return target.getConfiguration().getClientKeyData();
case "clientkeyfile":
case "clientKeyFile": return target.getConfiguration().getClientKeyFile();
case "clientkeypassphrase":
case "clientKeyPassphrase": return target.getConfiguration().getClientKeyPassphrase();
case "connectiontimeout":
case "connectionTimeout": return target.getConfiguration().getConnectionTimeout();
case "dnsdomain":
case "dnsDomain": return target.getConfiguration().getDnsDomain();
case "kubernetesclient":
case "kubernetesClient": return target.getConfiguration().getKubernetesClient();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "namespace": return target.getConfiguration().getNamespace();
case "oauthtoken":
case "oauthToken": return target.getConfiguration().getOauthToken();
case "operation": return target.getConfiguration().getOperation();
case "password": return target.getConfiguration().getPassword();
case "portname":
case "portName": return target.getConfiguration().getPortName();
case "portprotocol":
case "portProtocol": return target.getConfiguration().getPortProtocol();
case "trustcerts":
case "trustCerts": return target.getConfiguration().getTrustCerts();
case "username": return target.getConfiguration().getUsername();
default: return null;
}
}
}
| KubernetesServiceAccountsEndpointConfigurer |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/util/xml/DomContentHandler.java | {
"start": 1119,
"end": 3720
} | class ____ implements ContentHandler {
private final Document document;
private final List<Element> elements = new ArrayList<>();
private final Node node;
/**
* Create a new instance of the {@code DomContentHandler} with the given node.
* @param node the node to publish events to
*/
DomContentHandler(Node node) {
this.node = node;
// The following pattern variable "doc" cannot be named "document" due to lacking
// support in Checkstyle: https://github.com/checkstyle/checkstyle/issues/10969
if (node instanceof Document doc) {
this.document = doc;
}
else {
this.document = node.getOwnerDocument();
}
}
private Node getParent() {
if (!this.elements.isEmpty()) {
return this.elements.get(this.elements.size() - 1);
}
else {
return this.node;
}
}
@Override
public void startElement(String uri, String localName, String qName, Attributes attributes) {
Node parent = getParent();
Element element = this.document.createElementNS(uri, qName);
for (int i = 0; i < attributes.getLength(); i++) {
String attrUri = attributes.getURI(i);
String attrQname = attributes.getQName(i);
String value = attributes.getValue(i);
if (!attrQname.startsWith("xmlns")) {
element.setAttributeNS(attrUri, attrQname, value);
}
}
element = (Element) parent.appendChild(element);
this.elements.add(element);
}
@Override
public void endElement(String uri, String localName, String qName) {
this.elements.remove(this.elements.size() - 1);
}
@Override
public void characters(char[] ch, int start, int length) {
String data = new String(ch, start, length);
Node parent = getParent();
Node lastChild = parent.getLastChild();
if (lastChild != null && lastChild.getNodeType() == Node.TEXT_NODE) {
((Text) lastChild).appendData(data);
}
else {
Text text = this.document.createTextNode(data);
parent.appendChild(text);
}
}
@Override
public void processingInstruction(String target, String data) {
Node parent = getParent();
ProcessingInstruction pi = this.document.createProcessingInstruction(target, data);
parent.appendChild(pi);
}
// Unsupported
@Override
public void setDocumentLocator(Locator locator) {
}
@Override
public void startDocument() {
}
@Override
public void endDocument() {
}
@Override
public void startPrefixMapping(String prefix, String uri) {
}
@Override
public void endPrefixMapping(String prefix) {
}
@Override
public void ignorableWhitespace(char[] ch, int start, int length) {
}
@Override
public void skippedEntity(String name) {
}
}
| DomContentHandler |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/support/hsf/HSFJSONUtilsTest_1.java | {
"start": 250,
"end": 8742
} | class ____ extends TestCase {
private Method method_f2;
private Method method_f3;
private Method method_f4;
private Method method_f5;
private MethodLocator methodLocator;
protected void setUp() throws Exception {
method_f2 = Service.class.getMethod("f2", String.class, Model.class);
method_f3 = Service.class.getMethod("f3", String.class, List.class);
method_f4 = Service.class.getMethod("f3", String.class, Model[].class);
method_f5 = Service.class.getMethod("f3", int.class, long.class);
methodLocator = new MethodLocator() {
public Method findMethod(String[] types) {
if (types == null || types.length == 0) {
return null;
}
if (types[0].equals("int")) {
return method_f5;
}
if (types[1].equals("java.util.List")) {
return method_f3;
}
if (types[1].equals("com.alibaba.json.bvt.support.hsf.HSFJSONUtilsTest_0$Model[]")) {
return method_f4;
}
return method_f2;
}
};
}
public void test_invoke() throws Exception {
String json = "{ \n" +
" \"argsTypes\" : [ \"java.lang.String\", \"com.alibaba.json.bvt.support.hsf.HSFJSONUtilsTest_0$Model\"],\n" +
" \"argsObjs\" : [ \"abc\", {\"value\":\"xxx\"} ]\n" +
"}";
Object[] values = HSFJSONUtils.parseInvocationArguments(json, methodLocator);
assertNotNull(values);
assertEquals(2, values.length);
assertEquals("abc", values[0]);
assertEquals("xxx", ((Model) values[1]).value);
}
public void test_invoke_type() throws Exception {
String json = "{\"@type\":\"com.alibaba.fastjson.JSONObject\", \n" +
" \"argsTypes\" : [ \"java.lang.String\", \"com.alibaba.json.bvt.support.hsf.HSFJSONUtilsTest_0$Model\"],\n" +
" \"argsObjs\" : [ \"abc\", {\"value\":\"xxx\"} ]\n" +
"}";
Object[] values = HSFJSONUtils.parseInvocationArguments(json, methodLocator);
assertNotNull(values);
assertEquals(2, values.length);
assertEquals("abc", values[0]);
assertEquals("xxx", ((Model) values[1]).value);
}
public void test_invoke_reverse() throws Exception {
String json = "{ \n" +
" \"argsObjs\" : [ \"abc\", {\"value\":\"xxx\"} ],\n" +
" \"argsTypes\" : [ \"java.lang.String\", \"com.alibaba.json.bvt.support.hsf.HSFJSONUtilsTest_0$Model\"]\n" +
"}";
Object[] values = HSFJSONUtils.parseInvocationArguments(json, methodLocator);
assertNotNull(values);
assertEquals(2, values.length);
assertEquals("abc", values[0]);
assertEquals("xxx", ((Model) values[1]).value);
}
public void test_invoke_reverse_list() throws Exception {
String json = "{ \n" +
" \"argsObjs\" : [ \"abc\", [{\"value\":\"xxx\"}] ],\n" +
" \"argsTypes\" : [ \"java.lang.String\", \"java.util.List\"]\n" +
"}";
Object[] values = HSFJSONUtils.parseInvocationArguments(json, methodLocator);
assertNotNull(values);
assertEquals(2, values.length);
assertEquals("abc", values[0]);
List list = (List) values[1];
assertEquals("xxx", ((Model) list.get(0)).value);
}
public void test_invoke_reverse_array() throws Exception {
String json = "{ \n" +
" \"argsObjs\" : [ \"abc\", [{\"value\":\"xxx\"}] ],\n" +
" \"argsTypes\" : [ \"java.lang.String\", \"com.alibaba.json.bvt.support.hsf.HSFJSONUtilsTest_0$Model[]\"]\n" +
"}";
Object[] values = HSFJSONUtils.parseInvocationArguments(json, methodLocator);
assertNotNull(values);
assertEquals(2, values.length);
assertEquals("abc", values[0]);
Model[] list = (Model[]) values[1];
assertEquals("xxx", ((Model) list[0]).value);
}
public void test_invoke_array() throws Exception {
String json = "[ \n" +
" [ \"java.lang.String\", \"com.alibaba.json.bvt.support.hsf.HSFJSONUtilsTest_0$Model\"],\n" +
" [ \"abc\", {\"value\":\"xxx\"} ]\n" +
"]";
Object[] values = HSFJSONUtils.parseInvocationArguments(json, methodLocator);
assertNotNull(values);
assertEquals(2, values.length);
assertEquals("abc", values[0]);
assertEquals("xxx", ((Model) values[1]).value);
}
public void test_invoke_array_2() throws Exception {
String json = "[ \n" +
" [ \"java.lang.String\", \"java.util.List\"],\n" +
" [ \"abc\", [{\"value\":\"xxx\"}] ]\n" +
"]";
Object[] values = HSFJSONUtils.parseInvocationArguments(json, methodLocator);
assertNotNull(values);
assertEquals(2, values.length);
assertEquals("abc", values[0]);
List list = (List) values[1];
assertEquals("xxx", ((Model) list.get(0)).value);
}
public void test_invoke_array_3() throws Exception {
String json = "[ \n" +
" [ \"java.lang.String\", \"com.alibaba.json.bvt.support.hsf.HSFJSONUtilsTest_0$Model[]\"],\n" +
" [ \"abc\", [{\"value\":\"xxx\"}] ]\n" +
"]";
Object[] values = HSFJSONUtils.parseInvocationArguments(json, methodLocator);
assertNotNull(values);
assertEquals(2, values.length);
assertEquals("abc", values[0]);
Model[] list = (Model[]) values[1];
assertEquals("xxx", ((Model) list[0]).value);
}
public void test_invoke_int() throws Exception {
String json = "[ \n" +
" [ \"int\", \"long\"],\n" +
" [ 3,4 ]\n" +
"]";
Object[] values = HSFJSONUtils.parseInvocationArguments(json, methodLocator);
assertNotNull(values);
assertEquals(2, values.length);
assertEquals(3, ((Integer)values[0]).intValue());
assertEquals(4L, ((Long)values[1]).longValue());
}
public void test_invoke_int_obj_reverse() throws Exception {
String json = "{ \n" +
" \"argsObjs\" : [ 3, 4],\n" +
" \"argsTypes\" : [ \"int\", \"long\"]\n" +
"}";
Object[] values = HSFJSONUtils.parseInvocationArguments(json, methodLocator);
assertNotNull(values);
assertEquals(2, values.length);
assertEquals(3, ((Integer)values[0]).intValue());
assertEquals(4L, ((Long)values[1]).longValue());
}
public void test_invoke_int_obj() throws Exception {
String json = "{ \n" +
" \"argsTypes\" : [ \"int\", \"long\"],\n" +
" \"argsObjs\" : [ 3, 4 ]\n" +
"}";
Object[] values = HSFJSONUtils.parseInvocationArguments(json, methodLocator);
assertNotNull(values);
assertEquals(2, values.length);
assertEquals(3, ((Integer)values[0]).intValue());
assertEquals(4L, ((Long)values[1]).longValue());
}
public void test_invoke_int_obj_2() throws Exception {
String json = "{ \n" +
" \"argsObjs\" : [ 3, 4 ]\n" +
"}";
Object[] values = HSFJSONUtils.parseInvocationArguments(json, new MethodLocator() {
public Method findMethod(String[] types) {
return method_f5;
}
});
assertNotNull(values);
assertEquals(2, values.length);
assertEquals(3, ((Integer)values[0]).intValue());
assertEquals(4L, ((Long)values[1]).longValue());
}
public void test_invoke_int_2() throws Exception {
String json = "[ \n" +
" null, [ 3,4 ]\n" +
"]";
Object[] values = HSFJSONUtils.parseInvocationArguments(json, new MethodLocator() {
public Method findMethod(String[] types) {
return method_f5;
}
});
assertNotNull(values);
assertEquals(2, values.length);
assertEquals(3, ((Integer)values[0]).intValue());
assertEquals(4L, ((Long)values[1]).longValue());
}
//
public static | HSFJSONUtilsTest_1 |
java | jhy__jsoup | src/main/java/org/jsoup/nodes/Attribute.java | {
"start": 532,
"end": 13294
} | class ____ implements Map.Entry<String, String>, Cloneable {
private static final String[] booleanAttributes = {
"allowfullscreen", "async", "autofocus", "checked", "compact", "declare", "default", "defer", "disabled",
"formnovalidate", "hidden", "inert", "ismap", "itemscope", "multiple", "muted", "nohref", "noresize",
"noshade", "novalidate", "nowrap", "open", "readonly", "required", "reversed", "seamless", "selected",
"sortable", "truespeed", "typemustmatch"
};
private String key;
@Nullable private String val;
@Nullable Attributes parent; // used to update the holding Attributes when the key / value is changed via this interface
/**
* Create a new attribute from unencoded (raw) key and value.
* @param key attribute key; case is preserved.
* @param value attribute value (may be null)
* @see #createFromEncoded
*/
public Attribute(String key, @Nullable String value) {
this(key, value, null);
}
/**
* Create a new attribute from unencoded (raw) key and value.
* @param key attribute key; case is preserved.
* @param val attribute value (may be null)
* @param parent the containing Attributes (this Attribute is not automatically added to said Attributes)
* @see #createFromEncoded*/
public Attribute(String key, @Nullable String val, @Nullable Attributes parent) {
Validate.notNull(key);
key = key.trim();
Validate.notEmpty(key); // trimming could potentially make empty, so validate here
this.key = key;
this.val = val;
this.parent = parent;
}
/**
Get the attribute's key (aka name).
@return the attribute key
*/
@Override
public String getKey() {
return key;
}
/**
Set the attribute key; case is preserved.
@param key the new key; must not be null
*/
public void setKey(String key) {
Validate.notNull(key);
key = key.trim();
Validate.notEmpty(key); // trimming could potentially make empty, so validate here
if (parent != null) {
int i = parent.indexOfKey(this.key);
if (i != Attributes.NotFound) {
String oldKey = parent.keys[i];
parent.keys[i] = key;
// if tracking source positions, update the key in the range map
Map<String, Range.AttributeRange> ranges = parent.getRanges();
if (ranges != null) {
Range.AttributeRange range = ranges.remove(oldKey);
ranges.put(key, range);
}
}
}
this.key = key;
}
/**
Get the attribute value. Will return an empty string if the value is not set.
@return the attribute value
*/
@Override
public String getValue() {
return Attributes.checkNotNull(val);
}
/**
* Check if this Attribute has a value. Set boolean attributes have no value.
* @return if this is a boolean attribute / attribute without a value
*/
public boolean hasDeclaredValue() {
return val != null;
}
/**
Set the attribute value.
@param val the new attribute value; may be null (to set an enabled boolean attribute)
@return the previous value (if was null; an empty string)
*/
@Override public String setValue(@Nullable String val) {
String oldVal = this.val;
if (parent != null) {
int i = parent.indexOfKey(this.key);
if (i != Attributes.NotFound) {
oldVal = parent.get(this.key); // trust the container more
parent.vals[i] = val;
}
}
this.val = val;
return Attributes.checkNotNull(oldVal);
}
/**
Get this attribute's key prefix, if it has one; else the empty string.
<p>For example, the attribute {@code og:title} has prefix {@code og}, and local {@code title}.</p>
@return the tag's prefix
@since 1.20.1
*/
public String prefix() {
int pos = key.indexOf(':');
if (pos == -1) return "";
else return key.substring(0, pos);
}
/**
Get this attribute's local name. The local name is the name without the prefix (if any).
<p>For example, the attribute key {@code og:title} has local name {@code title}.</p>
@return the tag's local name
@since 1.20.1
*/
public String localName() {
int pos = key.indexOf(':');
if (pos == -1) return key;
else return key.substring(pos + 1);
}
/**
Get this attribute's namespace URI, if the attribute was prefixed with a defined namespace name. Otherwise, returns
the empty string. These will only be defined if using the XML parser.
@return the tag's namespace URI, or empty string if not defined
@since 1.20.1
*/
public String namespace() {
// set as el.attributes.userData(SharedConstants.XmlnsAttr + prefix, ns)
if (parent != null) {
String ns = (String) parent.userData(SharedConstants.XmlnsAttr + prefix());
if (ns != null)
return ns;
}
return "";
}
/**
Get the HTML representation of this attribute; e.g. {@code href="index.html"}.
@return HTML
*/
public String html() {
StringBuilder sb = StringUtil.borrowBuilder();
html(QuietAppendable.wrap(sb), new Document.OutputSettings());
return StringUtil.releaseBuilder(sb);
}
/**
Get the source ranges (start to end positions) in the original input source from which this attribute's <b>name</b>
and <b>value</b> were parsed.
<p>Position tracking must be enabled prior to parsing the content.</p>
@return the ranges for the attribute's name and value, or {@code untracked} if the attribute does not exist or its range
was not tracked.
@see org.jsoup.parser.Parser#setTrackPosition(boolean)
@see Attributes#sourceRange(String)
@see Node#sourceRange()
@see Element#endSourceRange()
@since 1.17.1
*/
public Range.AttributeRange sourceRange() {
if (parent == null) return Range.AttributeRange.UntrackedAttr;
return parent.sourceRange(key);
}
void html(QuietAppendable accum, Document.OutputSettings out) {
html(key, val, accum, out);
}
static void html(String key, @Nullable String val, QuietAppendable accum, Document.OutputSettings out) {
key = getValidKey(key, out.syntax());
if (key == null) return; // can't write it :(
htmlNoValidate(key, val, accum, out);
}
/** @deprecated internal method and will be removed in a future version */
@Deprecated
protected void html(Appendable accum, Document.OutputSettings out) throws IOException {
html(key, val, accum, out);
}
/** @deprecated internal method and will be removed in a future version */
@Deprecated
protected static void html(String key, @Nullable String val, Appendable accum, Document.OutputSettings out) throws IOException {
html(key, val, QuietAppendable.wrap(accum), out);
}
static void htmlNoValidate(String key, @Nullable String val, QuietAppendable accum, Document.OutputSettings out) {
// structured like this so that Attributes can check we can write first, so it can add whitespace correctly
accum.append(key);
if (!shouldCollapseAttribute(key, val, out)) {
accum.append("=\"");
Entities.escape(accum, Attributes.checkNotNull(val), out, Entities.ForAttribute); // preserves whitespace
accum.append('"');
}
}
private static final Pattern xmlKeyReplace = Pattern.compile("[^-a-zA-Z0-9_:.]+");
private static final Pattern htmlKeyReplace = Pattern.compile("[\\x00-\\x1f\\x7f-\\x9f \"'/=]+");
/**
* Get a valid attribute key for the given syntax. If the key is not valid, it will be coerced into a valid key.
* @param key the original attribute key
* @param syntax HTML or XML
* @return the original key if it's valid; a key with invalid characters replaced with "_" otherwise; or null if a valid key could not be created.
*/
@Nullable public static String getValidKey(String key, Syntax syntax) {
if (syntax == Syntax.xml && !isValidXmlKey(key)) {
key = xmlKeyReplace.matcher(key).replaceAll("_");
return isValidXmlKey(key) ? key : null; // null if could not be coerced
}
else if (syntax == Syntax.html && !isValidHtmlKey(key)) {
key = htmlKeyReplace.matcher(key).replaceAll("_");
return isValidHtmlKey(key) ? key : null; // null if could not be coerced
}
return key;
}
// perf critical in html() so using manual scan vs regex:
// note that we aren't using anything in supplemental space, so OK to iter charAt
private static boolean isValidXmlKey(String key) {
// =~ [a-zA-Z_:][-a-zA-Z0-9_:.]*
final int length = key.length();
if (length == 0) return false;
char c = key.charAt(0);
if (!((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || c == ':'))
return false;
for (int i = 1; i < length; i++) {
c = key.charAt(i);
if (!((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '-' || c == '_' || c == ':' || c == '.'))
return false;
}
return true;
}
private static boolean isValidHtmlKey(String key) {
// =~ [\x00-\x1f\x7f-\x9f "'/=]+
final int length = key.length();
if (length == 0) return false;
for (int i = 0; i < length; i++) {
char c = key.charAt(i);
if ((c <= 0x1f) || (c >= 0x7f && c <= 0x9f) || c == ' ' || c == '"' || c == '\'' || c == '/' || c == '=')
return false;
}
return true;
}
/**
Get the string representation of this attribute, implemented as {@link #html()}.
@return string
*/
@Override
public String toString() {
return html();
}
/**
* Create a new Attribute from an unencoded key and a HTML attribute encoded value.
* @param unencodedKey assumes the key is not encoded, as can be only run of simple \w chars.
* @param encodedValue HTML attribute encoded value
* @return attribute
*/
public static Attribute createFromEncoded(String unencodedKey, String encodedValue) {
String value = Entities.unescape(encodedValue, true);
return new Attribute(unencodedKey, value, null); // parent will get set when Put
}
protected boolean isDataAttribute() {
return isDataAttribute(key);
}
protected static boolean isDataAttribute(String key) {
return key.startsWith(Attributes.dataPrefix) && key.length() > Attributes.dataPrefix.length();
}
/**
* Collapsible if it's a boolean attribute and value is empty or same as name
*
* @param out output settings
* @return Returns whether collapsible or not
* @deprecated internal method and will be removed in a future version
*/
@Deprecated
protected final boolean shouldCollapseAttribute(Document.OutputSettings out) {
return shouldCollapseAttribute(key, val, out);
}
// collapse unknown foo=null, known checked=null, checked="", checked=checked; write out others
protected static boolean shouldCollapseAttribute(final String key, @Nullable final String val, final Document.OutputSettings out) {
return (out.syntax() == Syntax.html &&
(val == null || (val.isEmpty() || val.equalsIgnoreCase(key)) && Attribute.isBooleanAttribute(key)));
}
/**
* Checks if this attribute name is defined as a boolean attribute in HTML5
*/
public static boolean isBooleanAttribute(final String key) {
return Arrays.binarySearch(booleanAttributes, Normalizer.lowerCase(key)) >= 0;
}
@Override
public boolean equals(@Nullable Object o) { // note parent not considered
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Attribute attribute = (Attribute) o;
return Objects.equals(key, attribute.key) && Objects.equals(val, attribute.val);
}
@Override
public int hashCode() { // note parent not considered
return Objects.hash(key, val);
}
@Override
public Attribute clone() {
try {
return (Attribute) super.clone();
} catch (CloneNotSupportedException e) {
throw new RuntimeException(e);
}
}
}
| Attribute |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetcher.java | {
"start": 2671,
"end": 9385
} | class ____<K, V> extends AbstractFetch {
private final Logger log;
private final ConsumerNetworkClient client;
private final FetchCollector<K, V> fetchCollector;
public Fetcher(LogContext logContext,
ConsumerNetworkClient client,
ConsumerMetadata metadata,
SubscriptionState subscriptions,
FetchConfig fetchConfig,
Deserializers<K, V> deserializers,
FetchMetricsManager metricsManager,
Time time,
ApiVersions apiVersions) {
super(logContext, metadata, subscriptions, fetchConfig, new FetchBuffer(logContext), metricsManager, time, apiVersions);
this.log = logContext.logger(Fetcher.class);
this.client = client;
this.fetchCollector = new FetchCollector<>(logContext,
metadata,
subscriptions,
fetchConfig,
deserializers,
metricsManager,
time);
}
@Override
protected boolean isUnavailable(Node node) {
return client.isUnavailable(node);
}
@Override
protected void maybeThrowAuthFailure(Node node) {
client.maybeThrowAuthFailure(node);
}
public void clearBufferedDataForUnassignedPartitions(Collection<TopicPartition> assignedPartitions) {
fetchBuffer.retainAll(new HashSet<>(assignedPartitions));
}
/**
* Set up a fetch request for any node that we have assigned partitions for which doesn't already have
* an in-flight fetch or pending fetch data.
* @return number of fetches sent
*/
public synchronized int sendFetches() {
final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests();
sendFetchesInternal(
fetchRequests,
(fetchTarget, data, clientResponse) -> {
synchronized (Fetcher.this) {
handleFetchSuccess(fetchTarget, data, clientResponse);
}
},
(fetchTarget, data, error) -> {
synchronized (Fetcher.this) {
handleFetchFailure(fetchTarget, data, error);
}
});
return fetchRequests.size();
}
protected void maybeCloseFetchSessions(final Timer timer) {
final List<RequestFuture<ClientResponse>> requestFutures = sendFetchesInternal(
prepareCloseFetchSessionRequests(),
this::handleCloseFetchSessionSuccess,
this::handleCloseFetchSessionFailure
);
// Poll to ensure that request has been written to the socket. Wait until either the timer has expired or until
// all requests have received a response.
while (timer.notExpired() && !requestFutures.stream().allMatch(RequestFuture::isDone)) {
client.poll(timer, null, true);
timer.update();
}
if (!requestFutures.stream().allMatch(RequestFuture::isDone)) {
// we ran out of time before completing all futures. It is ok since we don't want to block the shutdown
// here.
log.debug("All requests couldn't be sent in the specific timeout period {}ms. " +
"This may result in unnecessary fetch sessions at the broker. Consider increasing the timeout passed for " +
"KafkaConsumer.close(...)", timer.timeoutMs());
}
}
public Fetch<K, V> collectFetch() {
return fetchCollector.collectFetch(fetchBuffer);
}
/**
* This method is called by {@link #close(Timer)} which is guarded by the {@link IdempotentCloser}) such as to only
* be executed once the first time that any of the {@link #close()} methods are called. Subclasses can override
* this method without the need for extra synchronization at the instance level.
*
* <p/>
*
* <em>Note</em>: this method is <code>synchronized</code> to reinstitute the 3.5 behavior:
*
* <blockquote>
* Shared states (e.g. sessionHandlers) could be accessed by multiple threads (such as heartbeat thread), hence,
* it is necessary to acquire a lock on the fetcher instance before modifying the states.
* </blockquote>
*
* @param timer Timer to enforce time limit
*/
// Visible for testing
protected synchronized void closeInternal(Timer timer) {
// we do not need to re-enable wake-ups since we are closing already
client.disableWakeups();
maybeCloseFetchSessions(timer);
super.closeInternal(timer);
}
/**
* Creates the {@link FetchRequest.Builder fetch request},
* {@link NetworkClient#send(ClientRequest, long) enqueues/sends it, and adds the {@link RequestFuture callback}
* for the response.
*
* @param fetchRequests {@link Map} of {@link Node nodes} to their
* {@link FetchSessionHandler.FetchRequestData request data}
* @param successHandler {@link ResponseHandler Handler for successful responses}
* @param errorHandler {@link ResponseHandler Handler for failure responses}
* @return List of {@link RequestFuture callbacks}
*/
private List<RequestFuture<ClientResponse>> sendFetchesInternal(Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests,
ResponseHandler<ClientResponse> successHandler,
ResponseHandler<Throwable> errorHandler) {
final List<RequestFuture<ClientResponse>> requestFutures = new ArrayList<>();
for (Map.Entry<Node, FetchSessionHandler.FetchRequestData> entry : fetchRequests.entrySet()) {
final Node fetchTarget = entry.getKey();
final FetchSessionHandler.FetchRequestData data = entry.getValue();
final FetchRequest.Builder request = createFetchRequest(fetchTarget, data);
final RequestFuture<ClientResponse> responseFuture = client.send(fetchTarget, request);
responseFuture.addListener(new RequestFutureListener<>() {
@Override
public void onSuccess(ClientResponse resp) {
successHandler.handle(fetchTarget, data, resp);
}
@Override
public void onFailure(RuntimeException e) {
errorHandler.handle(fetchTarget, data, e);
}
});
requestFutures.add(responseFuture);
}
return requestFutures;
}
} | Fetcher |
java | elastic__elasticsearch | x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java | {
"start": 9381,
"end": 10481
} | class ____ implements SecurityExtension {
private final String realmType;
private final ServiceAccountTokenStore serviceAccountTokenStore;
private final String extensionName;
DummyExtension(String realmType) {
this(realmType, "DummyExtension", null);
}
DummyExtension(String realmType, String extensionName, @Nullable ServiceAccountTokenStore serviceAccountTokenStore) {
this.realmType = realmType;
this.extensionName = extensionName;
this.serviceAccountTokenStore = serviceAccountTokenStore;
}
@Override
public String extensionName() {
return extensionName;
}
@Override
public Map<String, Realm.Factory> getRealms(SecurityComponents components) {
return Collections.singletonMap(realmType, config -> null);
}
@Override
public ServiceAccountTokenStore getServiceAccountTokenStore(SecurityComponents components) {
return serviceAccountTokenStore;
}
}
public static | DummyExtension |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/char2darrays/Char2DArrays_assertEmpty_Test.java | {
"start": 1069,
"end": 1322
} | class ____ extends Char2DArraysBaseTest {
@Test
void should_delegate_to_Arrays2D() {
// WHEN
char2DArrays.assertEmpty(someInfo(), actual);
// THEN
verify(arrays2d).assertEmpty(info, failures, actual);
}
}
| Char2DArrays_assertEmpty_Test |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/NarrowCalculationTest.java | {
"start": 937,
"end": 1375
} | class ____ {
private final CompilationTestHelper helper =
CompilationTestHelper.newInstance(NarrowCalculation.class, getClass());
private final BugCheckerRefactoringTestHelper refactoring =
BugCheckerRefactoringTestHelper.newInstance(NarrowCalculation.class, getClass());
@Test
public void integerDivision() {
helper
.addSourceLines(
"Test.java",
"""
| NarrowCalculationTest |
java | apache__camel | components/camel-jdbc/src/main/java/org/apache/camel/component/jdbc/JdbcComponent.java | {
"start": 1321,
"end": 4750
} | class ____ extends DefaultComponent {
private static final Logger LOG = LoggerFactory.getLogger(JdbcComponent.class);
@Metadata
private DataSource dataSource;
@Metadata(label = "advanced")
private ConnectionStrategy connectionStrategy;
@Override
protected Endpoint createEndpoint(String uri, String remaining, Map<String, Object> parameters) throws Exception {
DataSource dataSource;
String dataSourceRef;
if (this.dataSource != null) {
// prefer to use datasource set by setter
dataSource = this.dataSource;
dataSourceRef = "component";
} else {
DataSource target = CamelContextHelper.lookup(getCamelContext(), remaining, DataSource.class);
if (target == null && !isDefaultDataSourceName(remaining)) {
throw new NoSuchBeanException(remaining, DataSource.class.getName());
} else if (target == null) {
// check if the registry contains a single instance of DataSource
Set<DataSource> dataSources = getCamelContext().getRegistry().findByType(DataSource.class);
if (dataSources.size() > 1) {
throw new IllegalArgumentException(
"Multiple DataSources found in the registry and no explicit configuration provided");
} else if (dataSources.size() == 1) {
target = dataSources.iterator().next();
}
if (target == null) {
throw new IllegalArgumentException("No default DataSource found in the registry");
}
LOG.debug("Using default DataSource discovered from registry: {}", target);
}
dataSource = target;
dataSourceRef = remaining;
}
Map<String, Object> params = PropertiesHelper.extractProperties(parameters, "statement.");
JdbcEndpoint jdbc = createEndpoint(uri, this, dataSource);
if (connectionStrategy != null) {
jdbc.setConnectionStrategy(connectionStrategy);
}
jdbc.setDataSourceName(dataSourceRef);
jdbc.setParameters(params);
setProperties(jdbc, parameters);
return jdbc;
}
protected JdbcEndpoint createEndpoint(String uri, JdbcComponent component, DataSource dataSource) {
return new JdbcEndpoint(uri, component, dataSource);
}
/**
* To use the {@link DataSource} instance instead of looking up the data source by name from the registry.
*/
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
public DataSource getDataSource() {
return dataSource;
}
public ConnectionStrategy getConnectionStrategy() {
return connectionStrategy;
}
/**
* To use a custom strategy for working with connections.
*
* Do not use a custom strategy when using the spring-jdbc component because a special Spring ConnectionStrategy is
* used by default to support Spring Transactions.
*/
public void setConnectionStrategy(ConnectionStrategy connectionStrategy) {
this.connectionStrategy = connectionStrategy;
}
private static boolean isDefaultDataSourceName(String remaining) {
return "dataSource".equalsIgnoreCase(remaining) || "default".equalsIgnoreCase(remaining);
}
}
| JdbcComponent |
java | elastic__elasticsearch | x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/lucene40/blocktree/SegmentTermsEnum.java | {
"start": 1843,
"end": 46482
} | class ____ extends BaseTermsEnum {
// Lazy init:
IndexInput in;
private SegmentTermsEnumFrame[] stack;
private final SegmentTermsEnumFrame staticFrame;
SegmentTermsEnumFrame currentFrame;
boolean termExists;
final FieldReader fr;
private int targetBeforeCurrentLength;
// static boolean DEBUG = BlockTreeTermsWriter.DEBUG;
private final ByteArrayDataInput scratchReader = new ByteArrayDataInput();
// What prefix of the current term was present in the index; when we only next() through the
// index, this stays at 0. It's only set when
// we seekCeil/Exact:
private int validIndexPrefix;
// assert only:
private boolean eof;
final BytesRefBuilder term = new BytesRefBuilder();
private final FST.BytesReader fstReader;
@SuppressWarnings({ "rawtypes", "unchecked" })
private FST.Arc<BytesRef>[] arcs = new FST.Arc[1];
SegmentTermsEnum(FieldReader fr) throws IOException {
this.fr = fr;
// if (DEBUG) {
// System.out.println("BTTR.init seg=" + fr.parent.segment);
// }
stack = new SegmentTermsEnumFrame[0];
// Used to hold seek by TermState, or cached seek
staticFrame = new SegmentTermsEnumFrame(this, -1);
if (fr.index == null) {
fstReader = null;
} else {
fstReader = fr.index.getBytesReader();
}
// Init w/ root block; don't use index since it may
// not (and need not) have been loaded
for (int arcIdx = 0; arcIdx < arcs.length; arcIdx++) {
arcs[arcIdx] = new FST.Arc<>();
}
currentFrame = staticFrame;
final FST.Arc<BytesRef> arc;
if (fr.index != null) {
arc = fr.index.getFirstArc(arcs[0]);
// Empty string prefix must have an output in the index!
assert arc.isFinal();
} else {
arc = null;
}
// currentFrame = pushFrame(arc, rootCode, 0);
// currentFrame.loadBlock();
validIndexPrefix = 0;
// if (DEBUG) {
// System.out.println("init frame state " + currentFrame.ord);
// printSeekState();
// }
// System.out.println();
// computeBlockStats().print(System.out);
}
// Not private to avoid synthetic access$NNN methods
void initIndexInput() {
if (this.in == null) {
this.in = fr.parent.termsIn.clone();
}
}
/** Runs next() through the entire terms dict, computing aggregate statistics. */
public Stats computeBlockStats() throws IOException {
Stats stats = new Stats(fr.parent.segment, fr.fieldInfo.name);
if (fr.index != null) {
stats.indexNumBytes = fr.index.ramBytesUsed();
}
currentFrame = staticFrame;
FST.Arc<BytesRef> arc;
if (fr.index != null) {
arc = fr.index.getFirstArc(arcs[0]);
// Empty string prefix must have an output in the index!
assert arc.isFinal();
} else {
arc = null;
}
// Empty string prefix must have an output in the
// index!
currentFrame = pushFrame(arc, fr.rootCode, 0);
currentFrame.fpOrig = currentFrame.fp;
currentFrame.loadBlock();
validIndexPrefix = 0;
stats.startBlock(currentFrame, currentFrame.isLastInFloor == false);
allTerms: while (true) {
// Pop finished blocks
while (currentFrame.nextEnt == currentFrame.entCount) {
stats.endBlock(currentFrame);
if (currentFrame.isLastInFloor == false) {
// Advance to next floor block
currentFrame.loadNextFloorBlock();
stats.startBlock(currentFrame, true);
break;
} else {
if (currentFrame.ord == 0) {
break allTerms;
}
final long lastFP = currentFrame.fpOrig;
currentFrame = stack[currentFrame.ord - 1];
assert lastFP == currentFrame.lastSubFP;
// if (DEBUG) {
// System.out.println(" reset validIndexPrefix=" + validIndexPrefix);
// }
}
}
while (true) {
if (currentFrame.next()) {
// Push to new block:
currentFrame = pushFrame(null, currentFrame.lastSubFP, term.length());
currentFrame.fpOrig = currentFrame.fp;
// This is a "next" frame -- even if it's
// floor'd we must pretend it isn't so we don't
// try to scan to the right floor frame:
currentFrame.loadBlock();
stats.startBlock(currentFrame, currentFrame.isLastInFloor == false);
} else {
stats.term(term.get());
break;
}
}
}
stats.finish();
// Put root frame back:
currentFrame = staticFrame;
if (fr.index != null) {
arc = fr.index.getFirstArc(arcs[0]);
// Empty string prefix must have an output in the index!
assert arc.isFinal();
} else {
arc = null;
}
currentFrame = pushFrame(arc, fr.rootCode, 0);
currentFrame.rewind();
currentFrame.loadBlock();
validIndexPrefix = 0;
term.clear();
return stats;
}
private SegmentTermsEnumFrame getFrame(int ord) throws IOException {
if (ord >= stack.length) {
final SegmentTermsEnumFrame[] next = new SegmentTermsEnumFrame[ArrayUtil.oversize(
1 + ord,
RamUsageEstimator.NUM_BYTES_OBJECT_REF
)];
System.arraycopy(stack, 0, next, 0, stack.length);
for (int stackOrd = stack.length; stackOrd < next.length; stackOrd++) {
next[stackOrd] = new SegmentTermsEnumFrame(this, stackOrd);
}
stack = next;
}
assert stack[ord].ord == ord;
return stack[ord];
}
private FST.Arc<BytesRef> getArc(int ord) {
if (ord >= arcs.length) {
@SuppressWarnings({ "rawtypes", "unchecked" })
final FST.Arc<BytesRef>[] next = new FST.Arc[ArrayUtil.oversize(1 + ord, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
System.arraycopy(arcs, 0, next, 0, arcs.length);
for (int arcOrd = arcs.length; arcOrd < next.length; arcOrd++) {
next[arcOrd] = new FST.Arc<>();
}
arcs = next;
}
return arcs[ord];
}
// Pushes a frame we seek'd to
SegmentTermsEnumFrame pushFrame(FST.Arc<BytesRef> arc, BytesRef frameData, int length) throws IOException {
scratchReader.reset(frameData.bytes, frameData.offset, frameData.length);
final long code = scratchReader.readVLong();
final long fpSeek = code >>> Lucene40BlockTreeTermsReader.OUTPUT_FLAGS_NUM_BITS;
final SegmentTermsEnumFrame f = getFrame(1 + currentFrame.ord);
f.hasTerms = (code & Lucene40BlockTreeTermsReader.OUTPUT_FLAG_HAS_TERMS) != 0;
f.hasTermsOrig = f.hasTerms;
f.isFloor = (code & Lucene40BlockTreeTermsReader.OUTPUT_FLAG_IS_FLOOR) != 0;
if (f.isFloor) {
f.setFloorData(scratchReader, frameData);
}
pushFrame(arc, fpSeek, length);
return f;
}
// Pushes next'd frame or seek'd frame; we later
// lazy-load the frame only when needed
SegmentTermsEnumFrame pushFrame(FST.Arc<BytesRef> arc, long fp, int length) throws IOException {
final SegmentTermsEnumFrame f = getFrame(1 + currentFrame.ord);
f.arc = arc;
if (f.fpOrig == fp && f.nextEnt != -1) {
// if (DEBUG) System.out.println(" push reused frame ord=" + f.ord + " fp=" + f.fp + "
// isFloor?=" + f.isFloor + " hasTerms=" + f.hasTerms + " pref=" + term + " nextEnt=" +
// f.nextEnt + " targetBeforeCurrentLength=" + targetBeforeCurrentLength + " term.length=" +
// term.length + " vs prefix=" + f.prefix);
// if (f.prefix > targetBeforeCurrentLength) {
if (f.ord > targetBeforeCurrentLength) {
f.rewind();
} else {
// if (DEBUG) {
// System.out.println(" skip rewind!");
// }
}
assert length == f.prefix;
} else {
f.nextEnt = -1;
f.prefix = length;
f.state.termBlockOrd = 0;
f.fpOrig = f.fp = fp;
f.lastSubFP = -1;
// if (DEBUG) {
// final int sav = term.length;
// term.length = length;
// System.out.println(" push new frame ord=" + f.ord + " fp=" + f.fp + " hasTerms=" +
// f.hasTerms + " isFloor=" + f.isFloor + " pref=" + brToString(term));
// term.length = sav;
// }
}
return f;
}
// asserts only
private boolean clearEOF() {
eof = false;
return true;
}
// asserts only
private boolean setEOF() {
eof = true;
return true;
}
/*
// for debugging
@SuppressWarnings("unused")
static String brToString(BytesRef b) {
try {
return b.utf8ToString() + " " + b;
} catch (Throwable t) {
// If BytesRef isn't actually UTF8, or it's eg a
// prefix of UTF8 that ends mid-unicode-char, we
// fallback to hex:
return b.toString();
}
}
// for debugging
@SuppressWarnings("unused")
static String brToString(BytesRefBuilder b) {
return brToString(b.get());
}
*/
@Override
public boolean seekExact(BytesRef target) throws IOException {
if (fr.index == null) {
throw new IllegalStateException("terms index was not loaded");
}
if (fr.size() > 0 && (target.compareTo(fr.getMin()) < 0 || target.compareTo(fr.getMax()) > 0)) {
return false;
}
term.grow(1 + target.length);
assert clearEOF();
// if (DEBUG) {
// System.out.println("\nBTTR.seekExact seg=" + fr.parent.segment + " target=" +
// fr.fieldInfo.name + ":" + brToString(target) + " current=" + brToString(term) + " (exists?="
// + termExists + ") validIndexPrefix=" + validIndexPrefix);
// printSeekState(System.out);
// }
FST.Arc<BytesRef> arc;
int targetUpto;
BytesRef output;
targetBeforeCurrentLength = currentFrame.ord;
if (currentFrame != staticFrame) {
// We are already seek'd; find the common
// prefix of new seek term vs current term and
// re-use the corresponding seek state. For
// example, if app first seeks to foobar, then
// seeks to foobaz, we can re-use the seek state
// for the first 5 bytes.
// if (DEBUG) {
// System.out.println(" re-use current seek state validIndexPrefix=" + validIndexPrefix);
// }
arc = arcs[0];
assert arc.isFinal();
output = arc.output();
targetUpto = 0;
SegmentTermsEnumFrame lastFrame = stack[0];
assert validIndexPrefix <= term.length();
final int targetLimit = Math.min(target.length, validIndexPrefix);
int cmp = 0;
// TODO: reverse vLong byte order for better FST
// prefix output sharing
// First compare up to valid seek frames:
while (targetUpto < targetLimit) {
cmp = (term.byteAt(targetUpto) & 0xFF) - (target.bytes[target.offset + targetUpto] & 0xFF);
// if (DEBUG) {
// System.out.println(" cycle targetUpto=" + targetUpto + " (vs limit=" + targetLimit
// + ") cmp=" + cmp + " (targetLabel=" + (char) (target.bytes[target.offset + targetUpto]) +
// " vs termLabel=" + (char) (term.bytes[targetUpto]) + ")" + " arc.output=" + arc.output
// + " output=" + output);
// }
if (cmp != 0) {
break;
}
arc = arcs[1 + targetUpto];
assert arc.label() == (target.bytes[target.offset + targetUpto] & 0xFF)
: "arc.label=" + (char) arc.label() + " targetLabel=" + (char) (target.bytes[target.offset + targetUpto] & 0xFF);
if (arc.output() != Lucene40BlockTreeTermsReader.NO_OUTPUT) {
output = Lucene40BlockTreeTermsReader.FST_OUTPUTS.add(output, arc.output());
}
if (arc.isFinal()) {
lastFrame = stack[1 + lastFrame.ord];
}
targetUpto++;
}
if (cmp == 0) {
final int targetUptoMid = targetUpto;
// Second compare the rest of the term, but
// don't save arc/output/frame; we only do this
// to find out if the target term is before,
// equal or after the current term
final int targetLimit2 = Math.min(target.length, term.length());
while (targetUpto < targetLimit2) {
cmp = (term.byteAt(targetUpto) & 0xFF) - (target.bytes[target.offset + targetUpto] & 0xFF);
// if (DEBUG) {
// System.out.println(" cycle2 targetUpto=" + targetUpto + " (vs limit=" +
// targetLimit + ") cmp=" + cmp + " (targetLabel=" + (char) (target.bytes[target.offset +
// targetUpto]) + " vs termLabel=" + (char) (term.bytes[targetUpto]) + ")");
// }
if (cmp != 0) {
break;
}
targetUpto++;
}
if (cmp == 0) {
cmp = term.length() - target.length;
}
targetUpto = targetUptoMid;
}
if (cmp < 0) {
// Common case: target term is after current
// term, ie, app is seeking multiple terms
// in sorted order
// if (DEBUG) {
// System.out.println(" target is after current (shares prefixLen=" + targetUpto + ");
// frame.ord=" + lastFrame.ord);
// }
currentFrame = lastFrame;
} else if (cmp > 0) {
// Uncommon case: target term
// is before current term; this means we can
// keep the currentFrame but we must rewind it
// (so we scan from the start)
targetBeforeCurrentLength = lastFrame.ord;
// if (DEBUG) {
// System.out.println(" target is before current (shares prefixLen=" + targetUpto + ");
// rewind frame ord=" + lastFrame.ord);
// }
currentFrame = lastFrame;
currentFrame.rewind();
} else {
// Target is exactly the same as current term
assert term.length() == target.length;
if (termExists) {
// if (DEBUG) {
// System.out.println(" target is same as current; return true");
// }
return true;
} else {
// if (DEBUG) {
// System.out.println(" target is same as current but term doesn't exist");
// }
}
// validIndexPrefix = currentFrame.depth;
// term.length = target.length;
// return termExists;
}
} else {
targetBeforeCurrentLength = -1;
arc = fr.index.getFirstArc(arcs[0]);
// Empty string prefix must have an output (block) in the index!
assert arc.isFinal();
assert arc.output() != null;
// if (DEBUG) {
// System.out.println(" no seek state; push root frame");
// }
output = arc.output();
currentFrame = staticFrame;
// term.length = 0;
targetUpto = 0;
currentFrame = pushFrame(arc, Lucene40BlockTreeTermsReader.FST_OUTPUTS.add(output, arc.nextFinalOutput()), 0);
}
// if (DEBUG) {
// System.out.println(" start index loop targetUpto=" + targetUpto + " output=" + output + "
// currentFrame.ord=" + currentFrame.ord + " targetBeforeCurrentLength=" +
// targetBeforeCurrentLength);
// }
// We are done sharing the common prefix with the incoming target and where we are currently
// seek'd; now continue walking the index:
while (targetUpto < target.length) {
final int targetLabel = target.bytes[target.offset + targetUpto] & 0xFF;
final FST.Arc<BytesRef> nextArc = fr.index.findTargetArc(targetLabel, arc, getArc(1 + targetUpto), fstReader);
if (nextArc == null) {
// Index is exhausted
// if (DEBUG) {
// System.out.println(" index: index exhausted label=" + ((char) targetLabel) + " " +
// toHex(targetLabel));
// }
validIndexPrefix = currentFrame.prefix;
// validIndexPrefix = targetUpto;
currentFrame.scanToFloorFrame(target);
if (currentFrame.hasTerms == false) {
termExists = false;
term.setByteAt(targetUpto, (byte) targetLabel);
term.setLength(1 + targetUpto);
// if (DEBUG) {
// System.out.println(" FAST NOT_FOUND term=" + brToString(term));
// }
return false;
}
currentFrame.loadBlock();
final SeekStatus result = currentFrame.scanToTerm(target, true);
if (result == SeekStatus.FOUND) {
// if (DEBUG) {
// System.out.println(" return FOUND term=" + term.utf8ToString() + " " + term);
// }
return true;
} else {
// if (DEBUG) {
// System.out.println(" got " + result + "; return NOT_FOUND term=" +
// brToString(term));
// }
return false;
}
} else {
// Follow this arc
arc = nextArc;
term.setByteAt(targetUpto, (byte) targetLabel);
// Aggregate output as we go:
assert arc.output() != null;
if (arc.output() != Lucene40BlockTreeTermsReader.NO_OUTPUT) {
output = Lucene40BlockTreeTermsReader.FST_OUTPUTS.add(output, arc.output());
}
// if (DEBUG) {
// System.out.println(" index: follow label=" + toHex(target.bytes[target.offset +
// targetUpto]&0xff) + " arc.output=" + arc.output + " arc.nfo=" + arc.nextFinalOutput);
// }
targetUpto++;
if (arc.isFinal()) {
// if (DEBUG) System.out.println(" arc is final!");
currentFrame = pushFrame(arc, Lucene40BlockTreeTermsReader.FST_OUTPUTS.add(output, arc.nextFinalOutput()), targetUpto);
// if (DEBUG) System.out.println(" curFrame.ord=" + currentFrame.ord + " hasTerms=" +
// currentFrame.hasTerms);
}
}
}
// validIndexPrefix = targetUpto;
validIndexPrefix = currentFrame.prefix;
currentFrame.scanToFloorFrame(target);
// Target term is entirely contained in the index:
if (currentFrame.hasTerms == false) {
termExists = false;
term.setLength(targetUpto);
// if (DEBUG) {
// System.out.println(" FAST NOT_FOUND term=" + brToString(term));
// }
return false;
}
currentFrame.loadBlock();
final SeekStatus result = currentFrame.scanToTerm(target, true);
if (result == SeekStatus.FOUND) {
// if (DEBUG) {
// System.out.println(" return FOUND term=" + term.utf8ToString() + " " + term);
// }
return true;
} else {
// if (DEBUG) {
// System.out.println(" got result " + result + "; return NOT_FOUND term=" +
// term.utf8ToString());
// }
return false;
}
}
@Override
public SeekStatus seekCeil(BytesRef target) throws IOException {
if (fr.index == null) {
throw new IllegalStateException("terms index was not loaded");
}
term.grow(1 + target.length);
assert clearEOF();
// if (DEBUG) {
// System.out.println("\nBTTR.seekCeil seg=" + fr.parent.segment + " target=" +
// fr.fieldInfo.name + ":" + brToString(target) + " " + target + " current=" + brToString(term)
// + " (exists?=" + termExists + ") validIndexPrefix= " + validIndexPrefix);
// printSeekState(System.out);
// }
FST.Arc<BytesRef> arc;
int targetUpto;
BytesRef output;
targetBeforeCurrentLength = currentFrame.ord;
if (currentFrame != staticFrame) {
// We are already seek'd; find the common
// prefix of new seek term vs current term and
// re-use the corresponding seek state. For
// example, if app first seeks to foobar, then
// seeks to foobaz, we can re-use the seek state
// for the first 5 bytes.
// if (DEBUG) {
// System.out.println(" re-use current seek state validIndexPrefix=" + validIndexPrefix);
// }
arc = arcs[0];
assert arc.isFinal();
output = arc.output();
targetUpto = 0;
SegmentTermsEnumFrame lastFrame = stack[0];
assert validIndexPrefix <= term.length();
final int targetLimit = Math.min(target.length, validIndexPrefix);
int cmp = 0;
// TODO: we should write our vLong backwards (MSB
// first) to get better sharing from the FST
// First compare up to valid seek frames:
while (targetUpto < targetLimit) {
cmp = (term.byteAt(targetUpto) & 0xFF) - (target.bytes[target.offset + targetUpto] & 0xFF);
// if (DEBUG) {
// System.out.println(" cycle targetUpto=" + targetUpto + " (vs limit=" + targetLimit +
// ") cmp=" + cmp + " (targetLabel=" + (char) (target.bytes[target.offset + targetUpto]) + "
// vs termLabel=" + (char) (term.byteAt(targetUpto)) + ")" + " arc.output=" + arc.output +
// " output=" + output);
// }
if (cmp != 0) {
break;
}
arc = arcs[1 + targetUpto];
assert arc.label() == (target.bytes[target.offset + targetUpto] & 0xFF)
: "arc.label=" + (char) arc.label() + " targetLabel=" + (char) (target.bytes[target.offset + targetUpto] & 0xFF);
// TODO: we could save the outputs in local
// byte[][] instead of making new objs ever
// seek; but, often the FST doesn't have any
// shared bytes (but this could change if we
// reverse vLong byte order)
if (arc.output() != Lucene40BlockTreeTermsReader.NO_OUTPUT) {
output = Lucene40BlockTreeTermsReader.FST_OUTPUTS.add(output, arc.output());
}
if (arc.isFinal()) {
lastFrame = stack[1 + lastFrame.ord];
}
targetUpto++;
}
if (cmp == 0) {
final int targetUptoMid = targetUpto;
// Second compare the rest of the term, but
// don't save arc/output/frame:
final int targetLimit2 = Math.min(target.length, term.length());
while (targetUpto < targetLimit2) {
cmp = (term.byteAt(targetUpto) & 0xFF) - (target.bytes[target.offset + targetUpto] & 0xFF);
// if (DEBUG) {
// System.out.println(" cycle2 targetUpto=" + targetUpto + " (vs limit=" + targetLimit
// + ") cmp=" + cmp + " (targetLabel=" + (char) (target.bytes[target.offset + targetUpto])
// + " vs termLabel=" + (char) (term.byteAt(targetUpto)) + ")");
// }
if (cmp != 0) {
break;
}
targetUpto++;
}
if (cmp == 0) {
cmp = term.length() - target.length;
}
targetUpto = targetUptoMid;
}
if (cmp < 0) {
// Common case: target term is after current
// term, ie, app is seeking multiple terms
// in sorted order
// if (DEBUG) {
// System.out.println(" target is after current (shares prefixLen=" + targetUpto + ");
// clear frame.scanned ord=" + lastFrame.ord);
// }
currentFrame = lastFrame;
} else if (cmp > 0) {
// Uncommon case: target term
// is before current term; this means we can
// keep the currentFrame but we must rewind it
// (so we scan from the start)
targetBeforeCurrentLength = 0;
// if (DEBUG) {
// System.out.println(" target is before current (shares prefixLen=" + targetUpto + ");
// rewind frame ord=" + lastFrame.ord);
// }
currentFrame = lastFrame;
currentFrame.rewind();
} else {
// Target is exactly the same as current term
assert term.length() == target.length;
if (termExists) {
// if (DEBUG) {
// System.out.println(" target is same as current; return FOUND");
// }
return SeekStatus.FOUND;
} else {
// if (DEBUG) {
// System.out.println(" target is same as current but term doesn't exist");
// }
}
}
} else {
targetBeforeCurrentLength = -1;
arc = fr.index.getFirstArc(arcs[0]);
// Empty string prefix must have an output (block) in the index!
assert arc.isFinal();
assert arc.output() != null;
// if (DEBUG) {
// System.out.println(" no seek state; push root frame");
// }
output = arc.output();
currentFrame = staticFrame;
// term.length = 0;
targetUpto = 0;
currentFrame = pushFrame(arc, Lucene40BlockTreeTermsReader.FST_OUTPUTS.add(output, arc.nextFinalOutput()), 0);
}
// if (DEBUG) {
// System.out.println(" start index loop targetUpto=" + targetUpto + " output=" + output + "
// currentFrame.ord+1=" + currentFrame.ord + " targetBeforeCurrentLength=" +
// targetBeforeCurrentLength);
// }
// We are done sharing the common prefix with the incoming target and where we are currently
// seek'd; now continue walking the index:
while (targetUpto < target.length) {
final int targetLabel = target.bytes[target.offset + targetUpto] & 0xFF;
final FST.Arc<BytesRef> nextArc = fr.index.findTargetArc(targetLabel, arc, getArc(1 + targetUpto), fstReader);
if (nextArc == null) {
// Index is exhausted
// if (DEBUG) {
// System.out.println(" index: index exhausted label=" + ((char) targetLabel) + " " +
// targetLabel);
// }
validIndexPrefix = currentFrame.prefix;
// validIndexPrefix = targetUpto;
currentFrame.scanToFloorFrame(target);
currentFrame.loadBlock();
// if (DEBUG) System.out.println(" now scanToTerm");
final SeekStatus result = currentFrame.scanToTerm(target, false);
if (result == SeekStatus.END) {
term.copyBytes(target);
termExists = false;
if (next() != null) {
// if (DEBUG) {
// System.out.println(" return NOT_FOUND term=" + brToString(term));
// }
return SeekStatus.NOT_FOUND;
} else {
// if (DEBUG) {
// System.out.println(" return END");
// }
return SeekStatus.END;
}
} else {
// if (DEBUG) {
// System.out.println(" return " + result + " term=" + brToString(term));
// }
return result;
}
} else {
// Follow this arc
term.setByteAt(targetUpto, (byte) targetLabel);
arc = nextArc;
// Aggregate output as we go:
assert arc.output() != null;
if (arc.output() != Lucene40BlockTreeTermsReader.NO_OUTPUT) {
output = Lucene40BlockTreeTermsReader.FST_OUTPUTS.add(output, arc.output());
}
// if (DEBUG) {
// System.out.println(" index: follow label=" + (target.bytes[target.offset +
// targetUpto]&0xff) + " arc.output=" + arc.output + " arc.nfo=" + arc.nextFinalOutput);
// }
targetUpto++;
if (arc.isFinal()) {
// if (DEBUG) System.out.println(" arc is final!");
currentFrame = pushFrame(arc, Lucene40BlockTreeTermsReader.FST_OUTPUTS.add(output, arc.nextFinalOutput()), targetUpto);
// if (DEBUG) System.out.println(" curFrame.ord=" + currentFrame.ord + " hasTerms=" +
// currentFrame.hasTerms);
}
}
}
// validIndexPrefix = targetUpto;
validIndexPrefix = currentFrame.prefix;
currentFrame.scanToFloorFrame(target);
currentFrame.loadBlock();
final SeekStatus result = currentFrame.scanToTerm(target, false);
if (result == SeekStatus.END) {
term.copyBytes(target);
termExists = false;
if (next() != null) {
// if (DEBUG) {
// System.out.println(" return NOT_FOUND term=" + term.get().utf8ToString() + " " + term);
// }
return SeekStatus.NOT_FOUND;
} else {
// if (DEBUG) {
// System.out.println(" return END");
// }
return SeekStatus.END;
}
} else {
return result;
}
}
@SuppressWarnings("unused")
private void printSeekState(PrintStream out) throws IOException {
if (currentFrame == staticFrame) {
out.println(" no prior seek");
} else {
out.println(" prior seek state:");
int ord = 0;
boolean isSeekFrame = true;
while (true) {
SegmentTermsEnumFrame f = getFrame(ord);
assert f != null;
final BytesRef prefix = new BytesRef(term.get().bytes, 0, f.prefix);
if (f.nextEnt == -1) {
out.println(
" frame "
+ (isSeekFrame ? "(seek)" : "(next)")
+ " ord="
+ ord
+ " fp="
+ f.fp
+ (f.isFloor ? (" (fpOrig=" + f.fpOrig + ")") : "")
+ " prefixLen="
+ f.prefix
+ " prefix="
+ prefix
+ (f.nextEnt == -1 ? "" : (" (of " + f.entCount + ")"))
+ " hasTerms="
+ f.hasTerms
+ " isFloor="
+ f.isFloor
+ " code="
+ ((f.fp << Lucene40BlockTreeTermsReader.OUTPUT_FLAGS_NUM_BITS) + (f.hasTerms
? Lucene40BlockTreeTermsReader.OUTPUT_FLAG_HAS_TERMS
: 0) + (f.isFloor ? Lucene40BlockTreeTermsReader.OUTPUT_FLAG_IS_FLOOR : 0))
+ " isLastInFloor="
+ f.isLastInFloor
+ " mdUpto="
+ f.metaDataUpto
+ " tbOrd="
+ f.getTermBlockOrd()
);
} else {
out.println(
" frame "
+ (isSeekFrame ? "(seek, loaded)" : "(next, loaded)")
+ " ord="
+ ord
+ " fp="
+ f.fp
+ (f.isFloor ? (" (fpOrig=" + f.fpOrig + ")") : "")
+ " prefixLen="
+ f.prefix
+ " prefix="
+ prefix
+ " nextEnt="
+ f.nextEnt
+ (f.nextEnt == -1 ? "" : (" (of " + f.entCount + ")"))
+ " hasTerms="
+ f.hasTerms
+ " isFloor="
+ f.isFloor
+ " code="
+ ((f.fp << Lucene40BlockTreeTermsReader.OUTPUT_FLAGS_NUM_BITS) + (f.hasTerms
? Lucene40BlockTreeTermsReader.OUTPUT_FLAG_HAS_TERMS
: 0) + (f.isFloor ? Lucene40BlockTreeTermsReader.OUTPUT_FLAG_IS_FLOOR : 0))
+ " lastSubFP="
+ f.lastSubFP
+ " isLastInFloor="
+ f.isLastInFloor
+ " mdUpto="
+ f.metaDataUpto
+ " tbOrd="
+ f.getTermBlockOrd()
);
}
if (fr.index != null) {
assert isSeekFrame == false || f.arc != null : "isSeekFrame=" + isSeekFrame + " f.arc=" + f.arc;
if (f.prefix > 0 && isSeekFrame && f.arc.label() != (term.byteAt(f.prefix - 1) & 0xFF)) {
out.println(
" broken seek state: arc.label="
+ (char) f.arc.label()
+ " vs term byte="
+ (char) (term.byteAt(f.prefix - 1) & 0xFF)
);
throw new RuntimeException("seek state is broken");
}
BytesRef output = Util.get(fr.index, prefix);
if (output == null) {
out.println(" broken seek state: prefix is not final in index");
throw new RuntimeException("seek state is broken");
} else if (isSeekFrame && f.isFloor == false) {
final ByteArrayDataInput reader = new ByteArrayDataInput(output.bytes, output.offset, output.length);
final long codeOrig = reader.readVLong();
final long code = (f.fp << Lucene40BlockTreeTermsReader.OUTPUT_FLAGS_NUM_BITS) | (f.hasTerms
? Lucene40BlockTreeTermsReader.OUTPUT_FLAG_HAS_TERMS
: 0) | (f.isFloor ? Lucene40BlockTreeTermsReader.OUTPUT_FLAG_IS_FLOOR : 0);
if (codeOrig != code) {
out.println(" broken seek state: output code=" + codeOrig + " doesn't match frame code=" + code);
throw new RuntimeException("seek state is broken");
}
}
}
if (f == currentFrame) {
break;
}
if (f.prefix == validIndexPrefix) {
isSeekFrame = false;
}
ord++;
}
}
}
/* Decodes only the term bytes of the next term. If caller then asks for
metadata, ie docFreq, totalTermFreq or pulls a D/&PEnum, we then (lazily)
decode all metadata up to the current term. */
@Override
public BytesRef next() throws IOException {
if (in == null) {
// Fresh TermsEnum; seek to first term:
final FST.Arc<BytesRef> arc;
if (fr.index != null) {
arc = fr.index.getFirstArc(arcs[0]);
// Empty string prefix must have an output in the index!
assert arc.isFinal();
} else {
arc = null;
}
currentFrame = pushFrame(arc, fr.rootCode, 0);
currentFrame.loadBlock();
}
targetBeforeCurrentLength = currentFrame.ord;
assert eof == false;
// if (DEBUG) {
// System.out.println("\nBTTR.next seg=" + fr.parent.segment + " term=" + brToString(term) + "
// termExists?=" + termExists + " field=" + fr.fieldInfo.name + " termBlockOrd=" +
// currentFrame.state.termBlockOrd + " validIndexPrefix=" + validIndexPrefix);
// printSeekState(System.out);
// }
if (currentFrame == staticFrame) {
// If seek was previously called and the term was
// cached, or seek(TermState) was called, usually
// caller is just going to pull a D/&PEnum or get
// docFreq, etc. But, if they then call next(),
// this method catches up all internal state so next()
// works properly:
// if (DEBUG) System.out.println(" re-seek to pending term=" + term.utf8ToString() + " " +
// term);
final boolean result = seekExact(term.get());
assert result;
}
// Pop finished blocks
while (currentFrame.nextEnt == currentFrame.entCount) {
if (currentFrame.isLastInFloor == false) {
// Advance to next floor block
currentFrame.loadNextFloorBlock();
break;
} else {
// if (DEBUG) System.out.println(" pop frame");
if (currentFrame.ord == 0) {
// if (DEBUG) System.out.println(" return null");
assert setEOF();
term.clear();
validIndexPrefix = 0;
currentFrame.rewind();
termExists = false;
return null;
}
final long lastFP = currentFrame.fpOrig;
currentFrame = stack[currentFrame.ord - 1];
if (currentFrame.nextEnt == -1 || currentFrame.lastSubFP != lastFP) {
// We popped into a frame that's not loaded
// yet or not scan'd to the right entry
currentFrame.scanToFloorFrame(term.get());
currentFrame.loadBlock();
currentFrame.scanToSubBlock(lastFP);
}
// Note that the seek state (last seek) has been
// invalidated beyond this depth
validIndexPrefix = Math.min(validIndexPrefix, currentFrame.prefix);
// if (DEBUG) {
// System.out.println(" reset validIndexPrefix=" + validIndexPrefix);
// }
}
}
while (true) {
if (currentFrame.next()) {
// Push to new block:
// if (DEBUG) System.out.println(" push frame");
currentFrame = pushFrame(null, currentFrame.lastSubFP, term.length());
// This is a "next" frame -- even if it's
// floor'd we must pretend it isn't so we don't
// try to scan to the right floor frame:
currentFrame.loadBlock();
} else {
// if (DEBUG) System.out.println(" return term=" + brToString(term) + " currentFrame.ord="
// + currentFrame.ord);
return term.get();
}
}
}
@Override
public BytesRef term() {
assert eof == false;
return term.get();
}
@Override
public int docFreq() throws IOException {
assert eof == false;
// if (DEBUG) System.out.println("BTR.docFreq");
currentFrame.decodeMetaData();
// if (DEBUG) System.out.println(" return " + currentFrame.state.docFreq);
return currentFrame.state.docFreq;
}
@Override
public long totalTermFreq() throws IOException {
assert eof == false;
currentFrame.decodeMetaData();
return currentFrame.state.totalTermFreq;
}
@Override
public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException {
assert eof == false;
// if (DEBUG) {
// System.out.println("BTTR.docs seg=" + segment);
// }
currentFrame.decodeMetaData();
// if (DEBUG) {
// System.out.println(" state=" + currentFrame.state);
// }
return fr.parent.postingsReader.postings(fr.fieldInfo, currentFrame.state, reuse, flags);
}
@Override
public ImpactsEnum impacts(int flags) throws IOException {
assert eof == false;
// if (DEBUG) {
// System.out.println("BTTR.docs seg=" + segment);
// }
currentFrame.decodeMetaData();
// if (DEBUG) {
// System.out.println(" state=" + currentFrame.state);
// }
return fr.parent.postingsReader.impacts(fr.fieldInfo, currentFrame.state, flags);
}
@Override
public void seekExact(BytesRef target, TermState otherState) {
// if (DEBUG) {
// System.out.println("BTTR.seekExact termState seg=" + segment + " target=" +
// target.utf8ToString() + " " + target + " state=" + otherState);
// }
assert clearEOF();
if (target.compareTo(term.get()) != 0 || termExists == false) {
assert otherState != null && otherState instanceof BlockTermState;
currentFrame = staticFrame;
currentFrame.state.copyFrom(otherState);
term.copyBytes(target);
currentFrame.metaDataUpto = currentFrame.getTermBlockOrd();
assert currentFrame.metaDataUpto > 0;
validIndexPrefix = 0;
} else {
// if (DEBUG) {
// System.out.println(" skip seek: already on target state=" + currentFrame.state);
// }
}
}
@Override
public TermState termState() throws IOException {
assert eof == false;
currentFrame.decodeMetaData();
TermState ts = currentFrame.state.clone();
// if (DEBUG) System.out.println("BTTR.termState seg=" + segment + " state=" + ts);
return ts;
}
@Override
public void seekExact(long ord) {
throw new UnsupportedOperationException();
}
@Override
public long ord() {
throw new UnsupportedOperationException();
}
}
| SegmentTermsEnum |
java | quarkusio__quarkus | extensions/quartz/runtime/src/main/java/io/quarkus/quartz/runtime/jdbc/DBDelegateUtils.java | {
"start": 132,
"end": 751
} | class ____ {
/**
* A method to deserialize a marshalled object in an input stream.
* This implementation uses {@link QuarkusObjectInputStream} instead of {@link ObjectInputStream} to workaround
* a {@link ClassNotFoundException} issue observed in Test & Dev mode when `resolveClass(ObjectStreamClass)` is called.
*/
static Object getObjectFromInput(InputStream binaryInput) throws ClassNotFoundException, IOException {
if (binaryInput == null || binaryInput.available() == 0) {
return null;
}
// use an instance of the QuarkusObjectInputStream | DBDelegateUtils |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java | {
"start": 2440,
"end": 11273
} | class ____ extends SearchContext {
private final SearchContext in;
public FilteredSearchContext(SearchContext in) {
this.in = in;
}
@Override
public boolean hasStoredFields() {
return in.hasStoredFields();
}
@Override
public StoredFieldsContext storedFieldsContext() {
return in.storedFieldsContext();
}
@Override
public SearchContext storedFieldsContext(StoredFieldsContext storedFieldsContext) {
return in.storedFieldsContext(storedFieldsContext);
}
@Override
public void preProcess() {
in.preProcess();
}
@Override
public Query buildFilteredQuery(Query query) {
return in.buildFilteredQuery(query);
}
@Override
public ShardSearchContextId id() {
return in.id();
}
@Override
public String source() {
return in.source();
}
@Override
public ShardSearchRequest request() {
return in.request();
}
@Override
public SearchType searchType() {
return in.searchType();
}
@Override
public SearchShardTarget shardTarget() {
return in.shardTarget();
}
@Override
public int numberOfShards() {
return in.numberOfShards();
}
@Override
public ScrollContext scrollContext() {
return in.scrollContext();
}
@Override
public SearchContextAggregations aggregations() {
return in.aggregations();
}
@Override
public SearchContext aggregations(SearchContextAggregations aggregations) {
return in.aggregations(aggregations);
}
@Override
public SearchHighlightContext highlight() {
return in.highlight();
}
@Override
public void highlight(SearchHighlightContext highlight) {
in.highlight(highlight);
}
@Override
public InnerHitsContext innerHits() {
return in.innerHits();
}
@Override
public SuggestionSearchContext suggest() {
return in.suggest();
}
@Override
public QueryPhaseRankShardContext queryPhaseRankShardContext() {
return in.queryPhaseRankShardContext();
}
@Override
public void queryPhaseRankShardContext(QueryPhaseRankShardContext queryPhaseRankShardContext) {
in.queryPhaseRankShardContext(queryPhaseRankShardContext);
}
@Override
public List<RescoreContext> rescore() {
return in.rescore();
}
@Override
public boolean hasScriptFields() {
return in.hasScriptFields();
}
@Override
public ScriptFieldsContext scriptFields() {
return in.scriptFields();
}
@Override
public boolean sourceRequested() {
return in.sourceRequested();
}
@Override
public FetchSourceContext fetchSourceContext() {
return in.fetchSourceContext();
}
@Override
public SearchContext fetchSourceContext(FetchSourceContext fetchSourceContext) {
return in.fetchSourceContext(fetchSourceContext);
}
@Override
public ContextIndexSearcher searcher() {
return in.searcher();
}
@Override
public IndexShard indexShard() {
return in.indexShard();
}
@Override
public BitsetFilterCache bitsetFilterCache() {
return in.bitsetFilterCache();
}
@Override
public TimeValue timeout() {
return in.timeout();
}
@Override
public int terminateAfter() {
return in.terminateAfter();
}
@Override
public void terminateAfter(int terminateAfter) {
in.terminateAfter(terminateAfter);
}
@Override
public boolean lowLevelCancellation() {
return in.lowLevelCancellation();
}
@Override
public SearchContext minimumScore(float minimumScore) {
return in.minimumScore(minimumScore);
}
@Override
public Float minimumScore() {
return in.minimumScore();
}
@Override
public SearchContext sort(SortAndFormats sort) {
return in.sort(sort);
}
@Override
public SortAndFormats sort() {
return in.sort();
}
@Override
public SearchContext trackScores(boolean trackScores) {
return in.trackScores(trackScores);
}
@Override
public boolean trackScores() {
return in.trackScores();
}
@Override
public SearchContext trackTotalHitsUpTo(int trackTotalHitsUpTo) {
return in.trackTotalHitsUpTo(trackTotalHitsUpTo);
}
@Override
public int trackTotalHitsUpTo() {
return in.trackTotalHitsUpTo();
}
@Override
public SearchContext searchAfter(FieldDoc searchAfter) {
return in.searchAfter(searchAfter);
}
@Override
public FieldDoc searchAfter() {
return in.searchAfter();
}
@Override
public SearchContext parsedPostFilter(ParsedQuery postFilter) {
return in.parsedPostFilter(postFilter);
}
@Override
public ParsedQuery parsedPostFilter() {
return in.parsedPostFilter();
}
@Override
public SearchContext parsedQuery(ParsedQuery query) {
return in.parsedQuery(query);
}
@Override
public ParsedQuery parsedQuery() {
return in.parsedQuery();
}
@Override
public Query query() {
return in.query();
}
@Override
public int from() {
return in.from();
}
@Override
public SearchContext from(int from) {
return in.from(from);
}
@Override
public int size() {
return in.size();
}
@Override
public SearchContext size(int size) {
return in.size(size);
}
@Override
public boolean explain() {
return in.explain();
}
@Override
public void explain(boolean explain) {
in.explain(explain);
}
@Override
public List<String> groupStats() {
return in.groupStats();
}
@Override
public boolean version() {
return in.version();
}
@Override
public void version(boolean version) {
in.version(version);
}
@Override
public boolean seqNoAndPrimaryTerm() {
return in.seqNoAndPrimaryTerm();
}
@Override
public void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) {
in.seqNoAndPrimaryTerm(seqNoAndPrimaryTerm);
}
@Override
public DfsSearchResult dfsResult() {
return in.dfsResult();
}
@Override
public void addDfsResult() {
in.addDfsResult();
}
@Override
public QuerySearchResult queryResult() {
return in.queryResult();
}
@Override
public void addQueryResult() {
in.addQueryResult();
}
@Override
public TotalHits getTotalHits() {
return in.getTotalHits();
}
@Override
public float getMaxScore() {
return in.getMaxScore();
}
@Override
public void addRankFeatureResult() {
in.addRankFeatureResult();
}
@Override
public RankFeatureResult rankFeatureResult() {
return in.rankFeatureResult();
}
@Override
public FetchSearchResult fetchResult() {
return in.fetchResult();
}
@Override
public void addFetchResult() {
in.addFetchResult();
}
@Override
public FetchPhase fetchPhase() {
return in.fetchPhase();
}
@Override
public long getRelativeTimeInMillis() {
return in.getRelativeTimeInMillis();
}
@Override
public SearchExtBuilder getSearchExt(String name) {
return in.getSearchExt(name);
}
@Override
public Profilers getProfilers() {
return in.getProfilers();
}
@Override
public SearchExecutionContext getSearchExecutionContext() {
return in.getSearchExecutionContext();
}
@Override
public void setTask(CancellableTask task) {
in.setTask(task);
}
@Override
public CancellableTask getTask() {
return in.getTask();
}
@Override
public boolean isCancelled() {
return in.isCancelled();
}
@Override
public CollapseContext collapse() {
return in.collapse();
}
@Override
public void addRescore(RescoreContext rescore) {
in.addRescore(rescore);
}
@Override
public ReaderContext readerContext() {
return in.readerContext();
}
@Override
public SourceLoader newSourceLoader(@Nullable SourceFilter filter) {
return in.newSourceLoader(filter);
}
@Override
public IdLoader newIdLoader() {
return in.newIdLoader();
}
@Override
public CircuitBreaker circuitBreaker() {
return in.circuitBreaker();
}
@Override
public long memAccountingBufferSize() {
return in.memAccountingBufferSize();
}
}
| FilteredSearchContext |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/introspect/AnnotationBundlesTest.java | {
"start": 2598,
"end": 2728
} | class ____ {
@InformativeHolder public int unimportant = 42;
}
@SuppressWarnings("serial")
static | InformingHolder |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/functions/sql/SqlListAggFunction.java | {
"start": 1818,
"end": 2989
} | class ____ extends SqlAggFunction {
public SqlListAggFunction() {
super(
"LISTAGG",
null,
SqlKind.LISTAGG,
ReturnTypes.ARG0_NULLABLE,
null,
OperandTypes.or(
OperandTypes.CHARACTER,
OperandTypes.sequence(
"'LISTAGG(<CHARACTER>, <CHARACTER_LITERAL>)'",
OperandTypes.CHARACTER,
OperandTypes.and(OperandTypes.CHARACTER, OperandTypes.LITERAL))),
SqlFunctionCategory.SYSTEM,
false,
false,
Optionality.FORBIDDEN);
}
@Override
public List<RelDataType> getParameterTypes(RelDataTypeFactory typeFactory) {
return ImmutableList.of(
typeFactory.createTypeWithNullability(
typeFactory.createSqlType(SqlTypeName.VARCHAR), true));
}
@Override
public RelDataType getReturnType(RelDataTypeFactory typeFactory) {
return typeFactory.createSqlType(SqlTypeName.VARCHAR);
}
}
| SqlListAggFunction |
java | spring-projects__spring-boot | module/spring-boot-graphql/src/main/java/org/springframework/boot/graphql/autoconfigure/rsocket/GraphQlRSocketController.java | {
"start": 987,
"end": 1507
} | class ____ {
private final GraphQlRSocketHandler handler;
GraphQlRSocketController(GraphQlRSocketHandler handler) {
this.handler = handler;
}
@MessageMapping("${spring.graphql.rsocket.mapping}")
Mono<Map<String, Object>> handle(Map<String, Object> payload) {
return this.handler.handle(payload);
}
@MessageMapping("${spring.graphql.rsocket.mapping}")
Flux<Map<String, Object>> handleSubscription(Map<String, Object> payload) {
return this.handler.handleSubscription(payload);
}
}
| GraphQlRSocketController |
java | apache__kafka | server-common/src/main/java/org/apache/kafka/server/share/persister/PersisterStateManager.java | {
"start": 6162,
"end": 7017
} | class ____ {
private final int maxAttempts;
private int attempts;
private final ExponentialBackoff backoff;
BackoffManager(int maxAttempts, long initialBackoffMs, long maxBackoffMs) {
this.maxAttempts = maxAttempts;
this.backoff = new ExponentialBackoff(
initialBackoffMs,
CommonClientConfigs.RETRY_BACKOFF_EXP_BASE,
maxBackoffMs,
CommonClientConfigs.RETRY_BACKOFF_JITTER
);
}
void incrementAttempt() {
attempts++;
}
void resetAttempts() {
attempts = 0;
}
boolean canAttempt() {
return attempts < maxAttempts;
}
long backOff() {
return this.backoff.backoff(attempts);
}
}
public | BackoffManager |
java | spring-projects__spring-boot | module/spring-boot-freemarker/src/test/java/org/springframework/boot/freemarker/autoconfigure/FreeMarkerWebFluxTestIntegrationTests.java | {
"start": 1269,
"end": 1576
} | class ____ {
@Autowired
private ApplicationContext applicationContext;
@Test
void freemarkerAutoConfigurationWasImported() {
assertThat(this.applicationContext).has(importedAutoConfiguration(FreeMarkerAutoConfiguration.class));
}
@SpringBootConfiguration
static | FreeMarkerWebFluxTestIntegrationTests |
java | apache__spark | common/utils-java/src/main/java/org/apache/spark/api/java/function/MapFunction.java | {
"start": 889,
"end": 982
} | interface ____ a map function used in Dataset's map function.
*/
@FunctionalInterface
public | for |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/JoinWithSolutionSetFirstDriver.java | {
"start": 3401,
"end": 9840
} | class ____
// re-instantiated for
// every iterations
return false;
}
// --------------------------------------------------------------------------------------------
@Override
@SuppressWarnings("unchecked")
public void initialize() {
final TypeSerializer<IT1> solutionSetSerializer;
final TypeComparator<IT1> solutionSetComparator;
// grab a handle to the hash table from the iteration broker
if (taskContext instanceof AbstractIterativeTask) {
AbstractIterativeTask<?, ?> iterativeTaskContext =
(AbstractIterativeTask<?, ?>) taskContext;
String identifier = iterativeTaskContext.brokerKey();
Object table = SolutionSetBroker.instance().get(identifier);
if (table instanceof CompactingHashTable) {
this.hashTable = (CompactingHashTable<IT1>) table;
solutionSetSerializer = this.hashTable.getBuildSideSerializer();
solutionSetComparator = this.hashTable.getBuildSideComparator().duplicate();
} else if (table instanceof JoinHashMap) {
this.objectMap = (JoinHashMap<IT1>) table;
solutionSetSerializer = this.objectMap.getBuildSerializer();
solutionSetComparator = this.objectMap.getBuildComparator().duplicate();
} else {
throw new RuntimeException("Unrecognized solution set index: " + table);
}
} else {
throw new RuntimeException(
"The task context of this driver is no iterative task context.");
}
TaskConfig config = taskContext.getTaskConfig();
ClassLoader classLoader = taskContext.getUserCodeClassLoader();
TypeSerializer<IT2> probeSideSerializer =
taskContext.<IT2>getInputSerializer(0).getSerializer();
TypeComparatorFactory<IT2> probeSideComparatorFactory =
config.getDriverComparator(0, classLoader);
this.probeSideComparator = probeSideComparatorFactory.createComparator();
ExecutionConfig executionConfig = taskContext.getExecutionConfig();
objectReuseEnabled = executionConfig.isObjectReuseEnabled();
if (objectReuseEnabled) {
solutionSideRecord = solutionSetSerializer.createInstance();
probeSideRecord = probeSideSerializer.createInstance();
}
TypePairComparatorFactory<IT1, IT2> factory =
taskContext
.getTaskConfig()
.getPairComparatorFactory(taskContext.getUserCodeClassLoader());
pairComparator =
factory.createComparator21(solutionSetComparator, this.probeSideComparator);
}
@Override
public void prepare() {
// nothing to prepare in each iteration
// later, if we support out-of-core operation, we need to put the code in here
// that brings the initial in-memory partitions into memory
}
@Override
public void run() throws Exception {
final FlatJoinFunction<IT1, IT2, OT> joinFunction = taskContext.getStub();
final Collector<OT> collector = taskContext.getOutputCollector();
final MutableObjectIterator<IT2> probeSideInput = taskContext.<IT2>getInput(0);
if (objectReuseEnabled) {
IT2 probeSideRecord = this.probeSideRecord;
if (hashTable != null) {
final CompactingHashTable<IT1> join = hashTable;
final CompactingHashTable<IT1>.HashTableProber<IT2> prober =
join.getProber(probeSideComparator, pairComparator);
IT1 buildSideRecord = this.solutionSideRecord;
while (this.running
&& ((probeSideRecord = probeSideInput.next(probeSideRecord)) != null)) {
IT1 matchedRecord = prober.getMatchFor(probeSideRecord, buildSideRecord);
joinFunction.join(matchedRecord, probeSideRecord, collector);
}
} else if (objectMap != null) {
final JoinHashMap<IT1> hashTable = this.objectMap;
final JoinHashMap<IT1>.Prober<IT2> prober =
this.objectMap.createProber(probeSideComparator, pairComparator);
final TypeSerializer<IT1> buildSerializer = hashTable.getBuildSerializer();
while (this.running
&& ((probeSideRecord = probeSideInput.next(probeSideRecord)) != null)) {
IT1 match = prober.lookupMatch(probeSideRecord);
joinFunction.join(buildSerializer.copy(match), probeSideRecord, collector);
}
} else {
throw new RuntimeException();
}
} else {
IT2 probeSideRecord;
if (hashTable != null) {
final CompactingHashTable<IT1> join = hashTable;
final CompactingHashTable<IT1>.HashTableProber<IT2> prober =
join.getProber(probeSideComparator, pairComparator);
IT1 buildSideRecord;
while (this.running && ((probeSideRecord = probeSideInput.next()) != null)) {
buildSideRecord = prober.getMatchFor(probeSideRecord);
joinFunction.join(buildSideRecord, probeSideRecord, collector);
}
} else if (objectMap != null) {
final JoinHashMap<IT1> hashTable = this.objectMap;
final JoinHashMap<IT1>.Prober<IT2> prober =
this.objectMap.createProber(probeSideComparator, pairComparator);
final TypeSerializer<IT1> buildSerializer = hashTable.getBuildSerializer();
while (this.running && ((probeSideRecord = probeSideInput.next()) != null)) {
IT1 match = prober.lookupMatch(probeSideRecord);
joinFunction.join(buildSerializer.copy(match), probeSideRecord, collector);
}
} else {
throw new RuntimeException();
}
}
}
@Override
public void cleanup() {}
@Override
public void reset() {}
@Override
public void teardown() {
// hash table is torn down by the iteration head task
}
@Override
public void cancel() {
this.running = false;
}
}
| is |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/BroadcastingOutputCollector.java | {
"start": 1567,
"end": 4376
} | class ____<T> implements WatermarkGaugeExposingOutput<StreamRecord<T>> {
protected final OutputWithChainingCheck<StreamRecord<T>>[] outputs;
private final Random random = new XORShiftRandom();
private final WatermarkGauge watermarkGauge = new WatermarkGauge();
protected final Counter numRecordsOutForTask;
public BroadcastingOutputCollector(
OutputWithChainingCheck<StreamRecord<T>>[] outputs, Counter numRecordsOutForTask) {
this.outputs = outputs;
this.numRecordsOutForTask = numRecordsOutForTask;
}
@Override
public void emitWatermark(Watermark mark) {
watermarkGauge.setCurrentWatermark(mark.getTimestamp());
for (Output<StreamRecord<T>> output : outputs) {
output.emitWatermark(mark);
}
}
@Override
public void emitWatermarkStatus(WatermarkStatus watermarkStatus) {
for (Output<StreamRecord<T>> output : outputs) {
output.emitWatermarkStatus(watermarkStatus);
}
}
@Override
public void emitLatencyMarker(LatencyMarker latencyMarker) {
if (outputs.length <= 0) {
// ignore
} else if (outputs.length == 1) {
outputs[0].emitLatencyMarker(latencyMarker);
} else {
// randomly select an output
outputs[random.nextInt(outputs.length)].emitLatencyMarker(latencyMarker);
}
}
@Override
public Gauge<Long> getWatermarkGauge() {
return watermarkGauge;
}
@Override
public void collect(StreamRecord<T> record) {
boolean emitted = false;
for (OutputWithChainingCheck<StreamRecord<T>> output : outputs) {
emitted |= output.collectAndCheckIfChained(record);
}
if (emitted) {
numRecordsOutForTask.inc();
}
}
@Override
public <X> void collect(OutputTag<X> outputTag, StreamRecord<X> record) {
boolean emitted = false;
for (OutputWithChainingCheck<StreamRecord<T>> output : outputs) {
emitted |= output.collectAndCheckIfChained(outputTag, record);
}
if (emitted) {
numRecordsOutForTask.inc();
}
}
@Override
public void close() {
for (Output<StreamRecord<T>> output : outputs) {
output.close();
}
}
@Override
public void emitRecordAttributes(RecordAttributes recordAttributes) {
for (OutputWithChainingCheck<StreamRecord<T>> output : outputs) {
output.emitRecordAttributes(recordAttributes);
}
}
@Override
public void emitWatermark(WatermarkEvent watermark) {
for (OutputWithChainingCheck<StreamRecord<T>> output : outputs) {
output.emitWatermark(watermark);
}
}
}
| BroadcastingOutputCollector |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Bug_for_agapple.java | {
"start": 141,
"end": 453
} | class ____ extends TestCase {
public void test_for_agapple() throws Exception {
Entity entity = new Entity();
entity.setProperties(new Properties());
String text = JSON.toJSONString(entity);
JSON.parseObject(text, Entity.class);
}
private static | Bug_for_agapple |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/query/MatchAllQueryBuilderTests.java | {
"start": 742,
"end": 1554
} | class ____ extends AbstractQueryTestCase<MatchAllQueryBuilder> {
@Override
protected MatchAllQueryBuilder doCreateTestQueryBuilder() {
return new MatchAllQueryBuilder();
}
@Override
protected void doAssertLuceneQuery(MatchAllQueryBuilder queryBuilder, Query query, SearchExecutionContext context) throws IOException {
assertThat(query, instanceOf(MatchAllDocsQuery.class));
}
public void testFromJson() throws IOException {
String json = """
{
"match_all" : {
"boost" : 1.2
}
}""";
MatchAllQueryBuilder parsed = (MatchAllQueryBuilder) parseQuery(json);
checkGeneratedJson(json, parsed);
assertEquals(json, 1.2, parsed.boost(), 0.0001);
}
}
| MatchAllQueryBuilderTests |
java | alibaba__nacos | naming/src/main/java/com/alibaba/nacos/naming/misc/SwitchManager.java | {
"start": 2662,
"end": 23749
} | class ____ extends RequestProcessor4CP {
private final SwitchDomain switchDomain;
private final ProtocolManager protocolManager;
private final ReentrantReadWriteLock raftLock;
private final ReentrantLock requestLock;
private final Serializer serializer;
private final SwitchDomainSnapshotOperation snapshotOperation;
private final File dataFile;
public SwitchManager(SwitchDomain switchDomain, ProtocolManager protocolManager) {
this.switchDomain = switchDomain;
this.protocolManager = protocolManager;
this.raftLock = new ReentrantReadWriteLock();
this.requestLock = new ReentrantLock();
this.serializer = SerializeFactory.getSerializer("JSON");
this.snapshotOperation = new SwitchDomainSnapshotOperation(this.raftLock, this, this.serializer);
this.dataFile = Paths.get(UtilsAndCommons.DATA_BASE_DIR, "data", KeyBuilder.getSwitchDomainKey()).toFile();
try {
DiskUtils.forceMkdir(this.dataFile.getParent());
} catch (IOException e) {
Loggers.RAFT.error("Init Switch Domain directory failed: ", e);
}
protocolManager.getCpProtocol().addRequestProcessors(Collections.singletonList(this));
}
/**
* Update switch information.
*
* @param entry item entry of switch, {@link SwitchEntry}
* @param value switch value
* @param debug whether debug
* @throws Exception exception
*/
@SuppressWarnings("PMD")
public void update(String entry, String value, boolean debug) throws Exception {
this.requestLock.lock();
try {
SwitchDomain tempSwitchDomain = this.switchDomain.clone();
if (entry.equals(SwitchEntry.DISTRO_THRESHOLD)) {
float threshold = Float.parseFloat(value);
if (threshold <= 0) {
throw new IllegalArgumentException("distroThreshold can not be zero or negative: " + threshold);
}
tempSwitchDomain.setDistroThreshold(threshold);
}
if (entry.equals(SwitchEntry.CLIENT_BEAT_INTERVAL)) {
long clientBeatInterval = Long.parseLong(value);
tempSwitchDomain.setClientBeatInterval(clientBeatInterval);
}
if (entry.equals(SwitchEntry.PUSH_VERSION)) {
String type = value.split(":")[0];
String version = value.split(":")[1];
if (!version.matches(UtilsAndCommons.VERSION_STRING_SYNTAX)) {
throw new IllegalArgumentException(
"illegal version, must match: " + UtilsAndCommons.VERSION_STRING_SYNTAX);
}
if (StringUtils.equals(SwitchEntry.CLIENT_JAVA, type)) {
tempSwitchDomain.setPushJavaVersion(version);
} else if (StringUtils.equals(SwitchEntry.CLIENT_PYTHON, type)) {
tempSwitchDomain.setPushPythonVersion(version);
} else if (StringUtils.equals(SwitchEntry.CLIENT_C, type)) {
tempSwitchDomain.setPushCVersion(version);
} else if (StringUtils.equals(SwitchEntry.CLIENT_GO, type)) {
tempSwitchDomain.setPushGoVersion(version);
} else if (StringUtils.equals(SwitchEntry.CLIENT_CSHARP, type)) {
tempSwitchDomain.setPushCSharpVersion(version);
} else {
throw new IllegalArgumentException("unsupported client type: " + type);
}
}
if (entry.equals(SwitchEntry.PUSH_CACHE_MILLIS)) {
long cacheMillis = Long.parseLong(value);
if (cacheMillis < SwitchEntry.MIN_PUSH_CACHE_TIME_MIILIS) {
throw new IllegalArgumentException("min cache time for http or tcp is too small(<10000)");
}
tempSwitchDomain.setDefaultPushCacheMillis(cacheMillis);
}
// extremely careful while modifying this, cause it will affect all clients without pushing enabled
if (entry.equals(SwitchEntry.DEFAULT_CACHE_MILLIS)) {
long cacheMillis = Long.parseLong(value);
if (cacheMillis < SwitchEntry.MIN_CACHE_TIME_MIILIS) {
throw new IllegalArgumentException("min default cache time is too small(<1000)");
}
tempSwitchDomain.setDefaultCacheMillis(cacheMillis);
}
if (entry.equals(SwitchEntry.MASTERS)) {
List<String> masters = Arrays.asList(value.split(","));
tempSwitchDomain.setMasters(masters);
}
if (entry.equals(SwitchEntry.DISTRO)) {
boolean enabled = Boolean.parseBoolean(value);
tempSwitchDomain.setDistroEnabled(enabled);
}
if (entry.equals(SwitchEntry.CHECK)) {
boolean enabled = Boolean.parseBoolean(value);
tempSwitchDomain.setHealthCheckEnabled(enabled);
}
if (entry.equals(SwitchEntry.PUSH_ENABLED)) {
boolean enabled = Boolean.parseBoolean(value);
tempSwitchDomain.setPushEnabled(enabled);
}
if (entry.equals(SwitchEntry.SERVICE_STATUS_SYNC_PERIOD)) {
long millis = Long.parseLong(value);
if (millis < SwitchEntry.MIN_SERVICE_SYNC_TIME_MIILIS) {
throw new IllegalArgumentException("serviceStatusSynchronizationPeriodMillis is too small(<5000)");
}
tempSwitchDomain.setServiceStatusSynchronizationPeriodMillis(millis);
}
if (entry.equals(SwitchEntry.SERVER_STATUS_SYNC_PERIOD)) {
long millis = Long.parseLong(value);
if (millis < SwitchEntry.MIN_SERVER_SYNC_TIME_MIILIS) {
throw new IllegalArgumentException("serverStatusSynchronizationPeriodMillis is too small(<15000)");
}
tempSwitchDomain.setServerStatusSynchronizationPeriodMillis(millis);
}
if (entry.equals(SwitchEntry.HEALTH_CHECK_TIMES)) {
int times = Integer.parseInt(value);
tempSwitchDomain.setCheckTimes(times);
}
if (entry.equals(SwitchEntry.DISABLE_ADD_IP)) {
boolean disableAddIp = Boolean.parseBoolean(value);
tempSwitchDomain.setDisableAddIP(disableAddIp);
}
if (entry.equals(SwitchEntry.SEND_BEAT_ONLY)) {
boolean sendBeatOnly = Boolean.parseBoolean(value);
tempSwitchDomain.setSendBeatOnly(sendBeatOnly);
}
if (entry.equals(SwitchEntry.LIMITED_URL_MAP)) {
Map<String, Integer> limitedUrlMap = new HashMap<>(16);
if (!StringUtils.isEmpty(value)) {
String[] entries = value.split(",");
for (String each : entries) {
String[] parts = each.split(":");
if (parts.length < 2) {
throw new IllegalArgumentException("invalid input for limited urls");
}
String limitedUrl = parts[0];
if (StringUtils.isEmpty(limitedUrl)) {
throw new IllegalArgumentException("url can not be empty, url: " + limitedUrl);
}
int statusCode = Integer.parseInt(parts[1]);
if (statusCode <= 0) {
throw new IllegalArgumentException("illegal normal status code: " + statusCode);
}
limitedUrlMap.put(limitedUrl, statusCode);
}
tempSwitchDomain.setLimitedUrlMap(limitedUrlMap);
}
}
if (entry.equals(SwitchEntry.ENABLE_STANDALONE)) {
if (!StringUtils.isNotEmpty(value)) {
tempSwitchDomain.setEnableStandalone(Boolean.parseBoolean(value));
}
}
if (entry.equals(SwitchEntry.OVERRIDDEN_SERVER_STATUS)) {
String status = value;
if (Constants.NULL_STRING.equals(status)) {
status = StringUtils.EMPTY;
}
tempSwitchDomain.setOverriddenServerStatus(status);
}
if (entry.equals(SwitchEntry.DEFAULT_INSTANCE_EPHEMERAL)) {
tempSwitchDomain.setDefaultInstanceEphemeral(Boolean.parseBoolean(value));
}
if (entry.equals(SwitchEntry.DISTRO_SERVER_EXPIRED_MILLIS)) {
tempSwitchDomain.setDistroServerExpiredMillis(Long.parseLong(value));
}
if (entry.equals(SwitchEntry.LIGHT_BEAT_ENABLED)) {
tempSwitchDomain.setLightBeatEnabled(ConvertUtils.toBoolean(value));
}
if (entry.equals(SwitchEntry.AUTO_CHANGE_HEALTH_CHECK_ENABLED)) {
tempSwitchDomain.setAutoChangeHealthCheckEnabled(ConvertUtils.toBoolean(value));
}
try {
if (SwitchEntry.HTTP_HEALTH_PARAMS.equals(entry)) {
SwitchDomain.HttpHealthParams httpHealthParams = JacksonUtils.toObj(value, SwitchDomain.HttpHealthParams.class);
tempSwitchDomain.setHttpHealthParams(httpHealthParams);
validateHealthParams(httpHealthParams);
}
if (SwitchEntry.TCP_HEALTH_PARAMS.equals(entry)) {
SwitchDomain.TcpHealthParams tcpHealthParams = JacksonUtils.toObj(value, SwitchDomain.TcpHealthParams.class);
tempSwitchDomain.setTcpHealthParams(tcpHealthParams);
validateHealthParams(tcpHealthParams);
}
if (SwitchEntry.MYSQL_HEALTH_PARAMS.equals(entry)) {
tempSwitchDomain.setMysqlHealthParams(JacksonUtils.toObj(value, SwitchDomain.MysqlHealthParams.class));
}
} catch (NacosDeserializationException e) {
throw new IllegalArgumentException("json param invalid.");
}
if (debug) {
update(tempSwitchDomain);
} else {
updateWithConsistency(tempSwitchDomain);
}
} finally {
this.requestLock.unlock();
}
}
/**
* Update switch information from new switch domain.
*
* @param newSwitchDomain new switch domain
*/
public void update(SwitchDomain newSwitchDomain) {
switchDomain.setMasters(newSwitchDomain.getMasters());
switchDomain.setAdWeightMap(newSwitchDomain.getAdWeightMap());
switchDomain.setDefaultPushCacheMillis(newSwitchDomain.getDefaultPushCacheMillis());
switchDomain.setClientBeatInterval(newSwitchDomain.getClientBeatInterval());
switchDomain.setDefaultCacheMillis(newSwitchDomain.getDefaultCacheMillis());
switchDomain.setDistroThreshold(newSwitchDomain.getDistroThreshold());
switchDomain.setHealthCheckEnabled(newSwitchDomain.isHealthCheckEnabled());
switchDomain.setAutoChangeHealthCheckEnabled(newSwitchDomain.isAutoChangeHealthCheckEnabled());
switchDomain.setDistroEnabled(newSwitchDomain.isDistroEnabled());
switchDomain.setPushEnabled(newSwitchDomain.isPushEnabled());
switchDomain.setEnableStandalone(newSwitchDomain.isEnableStandalone());
switchDomain.setCheckTimes(newSwitchDomain.getCheckTimes());
switchDomain.setHttpHealthParams(newSwitchDomain.getHttpHealthParams());
switchDomain.setTcpHealthParams(newSwitchDomain.getTcpHealthParams());
switchDomain.setMysqlHealthParams(newSwitchDomain.getMysqlHealthParams());
switchDomain.setIncrementalList(newSwitchDomain.getIncrementalList());
switchDomain.setServerStatusSynchronizationPeriodMillis(
newSwitchDomain.getServerStatusSynchronizationPeriodMillis());
switchDomain.setServiceStatusSynchronizationPeriodMillis(
newSwitchDomain.getServiceStatusSynchronizationPeriodMillis());
switchDomain.setDisableAddIP(newSwitchDomain.isDisableAddIP());
switchDomain.setSendBeatOnly(newSwitchDomain.isSendBeatOnly());
switchDomain.setLimitedUrlMap(newSwitchDomain.getLimitedUrlMap());
switchDomain.setDistroServerExpiredMillis(newSwitchDomain.getDistroServerExpiredMillis());
switchDomain.setPushGoVersion(newSwitchDomain.getPushVersionOfGo());
switchDomain.setPushJavaVersion(newSwitchDomain.getPushVersionOfJava());
switchDomain.setPushPythonVersion(newSwitchDomain.getPushVersionOfPython());
switchDomain.setPushCVersion(newSwitchDomain.getPushVersionOfC());
switchDomain.setPushCSharpVersion(newSwitchDomain.getPushVersionOfCsharp());
switchDomain.setEnableAuthentication(newSwitchDomain.isEnableAuthentication());
switchDomain.setOverriddenServerStatus(newSwitchDomain.getOverriddenServerStatus());
switchDomain.setDefaultInstanceEphemeral(newSwitchDomain.isDefaultInstanceEphemeral());
switchDomain.setLightBeatEnabled(newSwitchDomain.isLightBeatEnabled());
}
/**
* Validate health params.
*
* @param healthParams health params
*/
public void validateHealthParams(SwitchDomain.HealthParams healthParams) {
if (healthParams.getMin() < SwitchDomain.HttpHealthParams.MIN_MIN) {
throw new IllegalArgumentException("min check time for http or tcp is too small(<500)");
}
if (healthParams.getMax() < SwitchDomain.HttpHealthParams.MIN_MAX) {
throw new IllegalArgumentException("max check time for http or tcp is too small(<3000)");
}
if (healthParams.getFactor() < 0 || healthParams.getFactor() > 1) {
throw new IllegalArgumentException("malformed factor");
}
}
private void updateWithConsistency(SwitchDomain tempSwitchDomain) throws NacosException {
try {
final BatchWriteRequest req = new BatchWriteRequest();
String switchDomainKey = KeyBuilder.getSwitchDomainKey();
Datum datum = Datum.createDatum(switchDomainKey, tempSwitchDomain);
req.append(ByteUtils.toBytes(switchDomainKey), serializer.serialize(datum));
WriteRequest operationLog = WriteRequest.newBuilder().setGroup(group())
.setOperation(OldDataOperation.Write.getDesc()).setData(ByteString.copyFrom(serializer.serialize(req)))
.build();
protocolManager.getCpProtocol().write(operationLog);
} catch (Exception e) {
Loggers.RAFT.error("Submit switch domain failed: ", e);
throw new NacosException(HttpStatus.INTERNAL_SERVER_ERROR.value(), e.getMessage());
}
}
public SwitchDomain getSwitchDomain() {
return switchDomain;
}
@Override
public List<SnapshotOperation> loadSnapshotOperate() {
return Collections.singletonList(snapshotOperation);
}
/**
* Load Snapshot from snapshot dir.
*
* @param snapshotPath snapshot dir
*/
public void loadSnapshot(String snapshotPath) {
this.raftLock.writeLock().lock();
try {
File srcDir = Paths.get(snapshotPath).toFile();
// If snapshot path is non-exist, means snapshot is empty
if (srcDir.exists()) {
// First clean up the local file information, before the file copy
String baseDir = this.dataFile.getParent();
DiskUtils.deleteDirThenMkdir(baseDir);
File descDir = Paths.get(baseDir).toFile();
DiskUtils.copyDirectory(srcDir, descDir);
if (!this.dataFile.exists()) {
return;
}
byte[] snapshotData = DiskUtils.readFileBytes(this.dataFile);
final Datum datum = serializer.deserialize(snapshotData, getDatumType());
final Record value = null != datum ? datum.value : null;
if (!(value instanceof SwitchDomain)) {
return;
}
update((SwitchDomain) value);
}
} catch (IOException e) {
throw new NacosRuntimeException(ErrorCode.IOCopyDirError.getCode(), e);
} finally {
this.raftLock.writeLock().unlock();
}
}
/**
* Dump data from data dir to snapshot dir.
*
* @param backupPath snapshot dir
*/
public void dumpSnapshot(String backupPath) {
this.raftLock.writeLock().lock();
try {
File srcDir = Paths.get(this.dataFile.getParent()).toFile();
File descDir = Paths.get(backupPath).toFile();
DiskUtils.copyDirectory(srcDir, descDir);
} catch (IOException e) {
throw new NacosRuntimeException(ErrorCode.IOCopyDirError.getCode(), e);
} finally {
this.raftLock.writeLock().unlock();
}
}
@Override
public Response onRequest(ReadRequest request) {
this.raftLock.readLock().lock();
try {
final List<byte[]> keys = serializer.deserialize(request.getData().toByteArray(),
TypeUtils.parameterize(List.class, byte[].class));
if (isNotSwitchDomainKey(keys)) {
return Response.newBuilder().setSuccess(false).setErrMsg("not switch domain key").build();
}
Datum datum = Datum.createDatum(KeyBuilder.getSwitchDomainKey(), switchDomain);
final BatchReadResponse response = new BatchReadResponse();
response.append(ByteUtils.toBytes(KeyBuilder.getSwitchDomainKey()), serializer.serialize(datum));
return Response.newBuilder().setSuccess(true).setData(ByteString.copyFrom(serializer.serialize(response)))
.build();
} catch (Exception e) {
Loggers.RAFT.warn("On read switch domain failed, ", e);
return Response.newBuilder().setSuccess(false).setErrMsg(e.getMessage()).build();
} finally {
this.raftLock.readLock().unlock();
}
}
@Override
public Response onApply(WriteRequest log) {
this.raftLock.writeLock().lock();
try {
BatchWriteRequest bwRequest = serializer.deserialize(log.getData().toByteArray(), BatchWriteRequest.class);
if (isNotSwitchDomainKey(bwRequest.getKeys())) {
return Response.newBuilder().setSuccess(false).setErrMsg("not switch domain key").build();
}
final Datum datum = serializer.deserialize(bwRequest.getValues().get(0), getDatumType());
final Record value = null != datum ? datum.value : null;
if (!(value instanceof SwitchDomain)) {
return Response.newBuilder().setSuccess(false).setErrMsg("datum is not switch domain").build();
}
DiskUtils.touch(dataFile);
DiskUtils.writeFile(dataFile, bwRequest.getValues().get(0), false);
SwitchDomain switchDomain = (SwitchDomain) value;
update(switchDomain);
return Response.newBuilder().setSuccess(true).build();
} catch (Exception e) {
Loggers.RAFT.warn("On apply switch domain failed, ", e);
return Response.newBuilder().setSuccess(false).setErrMsg(e.getMessage()).build();
} finally {
this.raftLock.writeLock().unlock();
}
}
@Override
public String group() {
return com.alibaba.nacos.naming.constants.Constants.NAMING_PERSISTENT_SERVICE_GROUP;
}
private boolean isNotSwitchDomainKey(List<byte[]> keys) {
if (1 != keys.size()) {
return false;
}
String keyString = new String(keys.get(0));
return !KeyBuilder.getSwitchDomainKey().equals(keyString);
}
private Type getDatumType() {
return TypeUtils.parameterize(Datum.class, SwitchDomain.class);
}
}
| SwitchManager |
java | apache__maven | impl/maven-di/src/test/java/org/apache/maven/di/impl/InjectorImplTest.java | {
"start": 7518,
"end": 8777
} | class ____ implements MyService {}
}
@Test
void injectMapTest() {
Injector injector = Injector.create().bindImplicit(InjectMap.class);
Map<String, InjectMap.MyService> services =
injector.getInstance(new Key<Map<String, InjectMap.MyService>>() {});
assertNotNull(services);
assertEquals(2, services.size());
List<Map.Entry<String, InjectMap.MyService>> entries = new ArrayList<>(services.entrySet());
assertNotNull(entries.get(0));
assertInstanceOf(InjectMap.MyService.class, entries.get(0).getValue());
assertInstanceOf(String.class, entries.get(0).getKey());
assertNotNull(entries.get(1));
assertInstanceOf(String.class, entries.get(1).getKey());
assertInstanceOf(InjectMap.MyService.class, entries.get(1).getValue());
assertNotEquals(entries.get(0).getKey(), entries.get(1).getKey());
assertNotSame(
entries.get(0).getValue().getClass(), entries.get(1).getValue().getClass());
InjectMap.MyMojo mojo = injector.getInstance(InjectMap.MyMojo.class);
assertNotNull(mojo);
assertNotNull(mojo.services);
assertEquals(2, mojo.services.size());
}
static | LowPriorityServiceImpl |
java | spring-projects__spring-framework | spring-expression/src/test/java/org/springframework/expression/spel/SpelCompilationCoverageTests.java | {
"start": 252297,
"end": 252434
} | class ____ {
private FooObject foo = new FooObject();
public FooObject getFoo() {
return foo;
}
}
public static | FooObjectHolder |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/authentication/jaas/JaasPasswordCallbackHandler.java | {
"start": 1393,
"end": 2024
} | class ____ implements JaasAuthenticationCallbackHandler {
/**
* If the callback passed to the 'handle' method is an instance of PasswordCallback,
* the JaasPasswordCallbackHandler will call,
* callback.setPassword(authentication.getCredentials().toString()).
* @param callback
* @param auth
*
*/
@Override
public void handle(Callback callback, Authentication auth) {
if (callback instanceof PasswordCallback) {
Object credentials = auth.getCredentials();
if (credentials != null) {
((PasswordCallback) callback).setPassword(credentials.toString().toCharArray());
}
}
}
}
| JaasPasswordCallbackHandler |
java | dropwizard__dropwizard | dropwizard-configuration/src/main/java/io/dropwizard/configuration/JsonConfigurationFactory.java | {
"start": 192,
"end": 460
} | class ____ loading JSON configuration files, binding them to configuration objects, and
* validating their constraints. Allows for overriding configuration parameters from system properties.
*
* @param <T> the type of the configuration objects to produce
*/
public | for |
java | hibernate__hibernate-orm | tooling/metamodel-generator/src/test/java/org/hibernate/processor/test/generatedannotation/GenerationDateTest.java | {
"start": 840,
"end": 1556
} | class ____ {
@Test
@TestForIssue(jiraKey = "METAGEN-73")
@WithClasses(TestEntity.class)
@WithProcessorOption(key = HibernateProcessor.ADD_GENERATION_DATE, value = "true")
void testGeneratedAnnotationGenerated() {
assertMetamodelClassGeneratedFor( TestEntity.class );
// need to check the source because @Generated is not a runtime annotation
String metaModelSource = getMetaModelSourceAsString( TestEntity.class );
dumpMetaModelSourceFor( TestEntity.class );
String generatedString = "@Generated(value = \"org.hibernate.processor.HibernateProcessor\", date = \"";
assertTrue( metaModelSource.contains( generatedString ), "@Generated should also contain the date parameter." );
}
}
| GenerationDateTest |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/domain/blog/immutable/ImmutableComment.java | {
"start": 707,
"end": 1264
} | class ____ {
private final int id;
private final String name;
private final String comment;
public ImmutableComment(int id, String name, String comment) {
this.id = id;
this.name = name;
this.comment = comment;
}
public int getId() {
return id;
}
public String getName() {
return name;
}
public String getComment() {
return comment;
}
@Override
public String toString() {
return "ImmutableComment{" + "id=" + id + ", name='" + name + '\'' + ", comment='" + comment + '\'' + '}';
}
}
| ImmutableComment |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/LangChain4jAgentEndpointBuilderFactory.java | {
"start": 10310,
"end": 10677
} | class ____ extends AbstractEndpointBuilder implements LangChain4jAgentEndpointBuilder, AdvancedLangChain4jAgentEndpointBuilder {
public LangChain4jAgentEndpointBuilderImpl(String path) {
super(componentName, path);
}
}
return new LangChain4jAgentEndpointBuilderImpl(path);
}
} | LangChain4jAgentEndpointBuilderImpl |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java | {
"start": 24203,
"end": 26826
} | class ____ implements ClusterStateTaskExecutor<ReconcileDesiredBalanceTask> {
@Override
public ClusterState execute(BatchExecutionContext<ReconcileDesiredBalanceTask> batchExecutionContext) {
var latest = findLatest(batchExecutionContext.taskContexts());
var newState = applyBalance(batchExecutionContext, latest);
discardSupersededTasks(batchExecutionContext.taskContexts(), latest);
return newState;
}
private static TaskContext<ReconcileDesiredBalanceTask> findLatest(
List<? extends TaskContext<ReconcileDesiredBalanceTask>> taskContexts
) {
return taskContexts.stream().max(Comparator.comparing(context -> context.getTask().desiredBalance.lastConvergedIndex())).get();
}
private ClusterState applyBalance(
BatchExecutionContext<ReconcileDesiredBalanceTask> batchExecutionContext,
TaskContext<ReconcileDesiredBalanceTask> latest
) {
try (var ignored = batchExecutionContext.dropHeadersContext()) {
var newState = reconciler.apply(
batchExecutionContext.initialState(),
createReconcileAllocationAction(latest.getTask().desiredBalance)
);
latest.success(() -> pendingListenersQueue.complete(latest.getTask().desiredBalance.lastConvergedIndex()));
return newState;
}
}
private static void discardSupersededTasks(
List<? extends TaskContext<ReconcileDesiredBalanceTask>> taskContexts,
TaskContext<ReconcileDesiredBalanceTask> latest
) {
for (TaskContext<ReconcileDesiredBalanceTask> taskContext : taskContexts) {
if (taskContext != latest) {
taskContext.success(() -> {});
}
}
}
}
// only for tests - in production, this happens after reconciliation
protected final void completeToLastConvergedIndex() {
pendingListenersQueue.complete(currentDesiredBalanceRef.get().lastConvergedIndex());
}
private void recordTime(CounterMetric metric, Runnable action) {
final long started = threadPool.relativeTimeInMillis();
try {
action.run();
} finally {
final long finished = threadPool.relativeTimeInMillis();
metric.inc(finished - started);
}
}
// Visible for testing
Set<String> getProcessedNodeShutdowns() {
return Set.copyOf(processedNodeShutdowns);
}
}
| ReconcileDesiredBalanceExecutor |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/api/AbstractUniversalComparableAssert.java | {
"start": 2086,
"end": 2481
} | class ____ to allow writing
* <pre><code class='java'> // assertThatComparable resolves to AbstractUniversalComparableAssert
* assertThatComparable(name1).isLessThanOrEqualTo(name1);
*
* // it works with the concrete type too
* assertThatComparable(name).isEqualByComparingTo(name);</code></pre>
*
* @see Assertions#assertThatComparable(Comparable)
* @since 3.23.0
*/
public abstract | aims |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcNoSuchProtocolException.java | {
"start": 1101,
"end": 1641
} | class ____ extends RpcServerException {
private static final long serialVersionUID = 1L;
public RpcNoSuchProtocolException(final String message) {
super(message);
}
/**
* get the rpc status corresponding to this exception
*/
public RpcStatusProto getRpcStatusProto() {
return RpcStatusProto.ERROR;
}
/**
* get the detailed rpc status corresponding to this exception
*/
public RpcErrorCodeProto getRpcErrorCodeProto() {
return RpcErrorCodeProto.ERROR_NO_SUCH_PROTOCOL;
}
}
| RpcNoSuchProtocolException |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/web/error/ErrorAttributeOptions.java | {
"start": 4146,
"end": 4195
} | enum ____ {
/**
* Include the exception | Include |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/utils/JavaUserDefinedAggFunctions.java | {
"start": 14618,
"end": 15502
} | class ____
extends AggregateFunction<UserDefinedObject, UserDefinedObject> {
private static final String KEY = "key";
@Override
public UserDefinedObject getValue(UserDefinedObject accumulator) {
return accumulator;
}
@Override
public UserDefinedObject createAccumulator() {
return new UserDefinedObject();
}
public void accumulate(UserDefinedObject acc, String a) {
if (a != null) {
acc.testObjectList.add(new TestObject(a));
acc.testObjectMap.put(KEY, new TestObject(a));
}
}
public void retract(UserDefinedObject acc, UserDefinedObject a) {
// do nothing.
}
}
/** User defined UDAF whose value and acc is user defined complex pojo object. */
public static | UserDefinedObjectUDAF |
java | micronaut-projects__micronaut-core | http/src/main/java/io/micronaut/http/uri/UriTypeMatchTemplate.java | {
"start": 4254,
"end": 5474
} | class ____ extends UriMatchTemplateParser {
private int variableIndex = 0;
/**
* @param templateText The template
* @param matchTemplate The match template
*/
TypedUriMatchTemplateParser(String templateText, UriTypeMatchTemplate matchTemplate) {
super(templateText, matchTemplate);
}
@Override
public UriTypeMatchTemplate getMatchTemplate() {
return (UriTypeMatchTemplate) super.getMatchTemplate();
}
@Override
protected String getVariablePattern(String variable, char operator) {
UriTypeMatchTemplate matchTemplate = getMatchTemplate();
Class<?>[] variableTypes = matchTemplate.variableTypes;
try {
if (variableIndex < variableTypes.length) {
Class<?> variableType = variableTypes[variableIndex];
return matchTemplate.resolveTypePattern(variableType, variable, operator);
} else {
return super.getVariablePattern(variable, operator);
}
} finally {
variableIndex++;
}
}
}
}
| TypedUriMatchTemplateParser |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementAttemptedItems.java | {
"start": 8232,
"end": 11457
} | class ____ implements Runnable {
@Override
public void run() {
while (monitorRunning) {
try {
blockStorageMovementReportedItemsCheck();
blocksStorageMovementUnReportedItemsCheck();
Thread.sleep(minCheckTimeout);
} catch (InterruptedException ie) {
LOG.info("BlocksStorageMovementAttemptMonitor thread "
+ "is interrupted.", ie);
} catch (IOException ie) {
LOG.warn("BlocksStorageMovementAttemptMonitor thread "
+ "received exception and exiting.", ie);
}
}
}
}
@VisibleForTesting
void blocksStorageMovementUnReportedItemsCheck() {
synchronized (storageMovementAttemptedItems) {
Iterator<AttemptedItemInfo> iter = storageMovementAttemptedItems
.iterator();
long now = monotonicNow();
while (iter.hasNext()) {
AttemptedItemInfo itemInfo = iter.next();
if (now > itemInfo.getLastAttemptedOrReportedTime()
+ selfRetryTimeout) {
long file = itemInfo.getFile();
ItemInfo candidate = new ItemInfo(itemInfo.getStartPath(), file,
itemInfo.getRetryCount() + 1);
blockStorageMovementNeeded.add(candidate);
iter.remove();
LOG.info("TrackID: {} becomes timed out and moved to needed "
+ "retries queue for next iteration.", file);
}
}
}
}
@VisibleForTesting
void blockStorageMovementReportedItemsCheck() throws IOException {
// Removes all available blocks from this queue and process it.
Collection<Block> finishedBlks = new ArrayList<>();
movementFinishedBlocks.drainTo(finishedBlks);
// Update attempted items list
for (Block blk : finishedBlks) {
synchronized (storageMovementAttemptedItems) {
Iterator<AttemptedItemInfo> iterator = storageMovementAttemptedItems
.iterator();
while (iterator.hasNext()) {
AttemptedItemInfo attemptedItemInfo = iterator.next();
attemptedItemInfo.getBlocks().remove(blk);
if (attemptedItemInfo.getBlocks().isEmpty()) {
blockStorageMovementNeeded.add(new ItemInfo(
attemptedItemInfo.getStartPath(), attemptedItemInfo.getFile(),
attemptedItemInfo.getRetryCount() + 1));
iterator.remove();
}
}
}
}
}
@VisibleForTesting
public int getMovementFinishedBlocksCount() {
return movementFinishedBlocks.size();
}
@VisibleForTesting
public int getAttemptedItemsCount() {
synchronized (storageMovementAttemptedItems) {
return storageMovementAttemptedItems.size();
}
}
@VisibleForTesting
public List<AttemptedItemInfo> getStorageMovementAttemptedItems() {
return storageMovementAttemptedItems;
}
@VisibleForTesting
public BlockingQueue<Block> getMovementFinishedBlocks() {
return movementFinishedBlocks;
}
public void clearQueues() {
movementFinishedBlocks.clear();
synchronized (storageMovementAttemptedItems) {
storageMovementAttemptedItems.clear();
}
synchronized (scheduledBlkLocs) {
scheduledBlkLocs.clear();
}
}
}
| BlocksStorageMovementAttemptMonitor |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/ThrowsUncheckedExceptionTest.java | {
"start": 5809,
"end": 5955
} | interface ____ {
void f() throws ReflectiveOperationException, IOException;
}
""")
.doTest();
}
}
| Test |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/types/inference/CallContext.java | {
"start": 1664,
"end": 6406
} | interface ____ {
/** Enables to lookup types in a catalog and resolve RAW types. */
DataTypeFactory getDataTypeFactory();
/** Returns the function definition that defines the function currently being called. */
FunctionDefinition getFunctionDefinition();
/** Returns whether the argument at the given position is a value literal. */
boolean isArgumentLiteral(int pos);
/**
* Returns {@code true} if the argument at the given position is a literal and {@code null},
* {@code false} otherwise. If the argument is declared as optional and has no value, true is
* returned.
*
* <p>Use {@link #isArgumentLiteral(int)} before to check if the argument is actually a literal.
*/
boolean isArgumentNull(int pos);
/**
* Returns the literal value of the argument at the given position, given that the argument is a
* literal, is not null, and can be expressed as an instance of the provided class.
*
* <p>It supports conversions to default conversion classes of {@link LogicalType LogicalTypes}.
* This method should not be called with other classes.
*
* <p>Use {@link #isArgumentLiteral(int)} before to check if the argument is actually a literal.
*/
<T> Optional<T> getArgumentValue(int pos, Class<T> clazz);
/**
* Returns information about the table that has been passed to a table argument.
*
* <p>This method applies only to {@link ProcessTableFunction}s.
*
* <p>Semantics are only available for table arguments that are annotated with
* {@code @ArgumentHint(SET_SEMANTIC_TABLE)} or {@code @ArgumentHint(ROW_SEMANTIC_TABLE)}).
*/
default Optional<TableSemantics> getTableSemantics(int pos) {
return Optional.empty();
}
/**
* Returns information about the model that has been passed to a model argument.
*
* <p>This method applies only to {@link ProcessTableFunction}s.
*/
default Optional<ModelSemantics> getModelSemantics(int pos) {
return Optional.empty();
}
/**
* Returns the {@link ChangelogMode} that the framework requires from the function.
*
* <p>This method applies only to {@link ProcessTableFunction}.
*
* <p>Returns empty during type inference phase as the changelog mode is still unknown. Returns
* an actual changelog mode, when the PTF implements the {@link ChangelogFunction} interface.
*/
default Optional<ChangelogMode> getOutputChangelogMode() {
return Optional.empty();
}
/**
* Returns the function's name usually referencing the function in a catalog.
*
* <p>Note: The name is meant for debugging purposes only.
*/
String getName();
/**
* Returns a resolved list of the call's argument types. It also includes a type for every
* argument in a vararg function call.
*/
List<DataType> getArgumentDataTypes();
/**
* Returns the inferred output data type of the function call.
*
* <p>It does this by inferring the input argument data type using {@link
* ArgumentTypeStrategy#inferArgumentType(CallContext, int, boolean)} of a wrapping call (if
* available) where this function call is an argument. For example, {@code
* takes_string(this_function(NULL))} would lead to a {@link DataTypes#STRING()} because the
* wrapping call expects a string argument.
*/
Optional<DataType> getOutputDataType();
/**
* Creates a validation exception for exiting the type inference process with a meaningful
* exception.
*/
default ValidationException newValidationError(String message, Object... args) {
final String formatted;
if (args.length > 0) {
formatted = String.format(message, args);
} else {
formatted = message;
}
return new ValidationException(formatted);
}
/**
* Helper method for handling failures during the type inference process while considering the
* {@code throwOnFailure} flag.
*
* <p>Shorthand for {@code if (throwOnFailure) throw ValidationException(...) else return
* Optional.empty()}.
*/
default <T> Optional<T> fail(boolean throwOnFailure, String message, Object... args) {
if (throwOnFailure) {
throw newValidationError(message, args);
}
return Optional.empty();
}
/**
* Returns whether the function call happens as part of an aggregation that defines grouping
* columns.
*
* <p>E.g. {@code SELECT COUNT(*) FROM t} is not a grouped aggregation but {@code SELECT
* COUNT(*) FROM t GROUP BY k} is.
*/
boolean isGroupedAggregation();
}
| CallContext |
java | apache__flink | flink-core/src/main/java/org/apache/flink/util/concurrent/FutureUtils.java | {
"start": 35045,
"end": 35180
} | interface ____ waits only for the completion of
* its futures and does not return their values.
*/
private static final | which |
java | apache__rocketmq | store/src/main/java/org/apache/rocketmq/store/exception/StoreException.java | {
"start": 855,
"end": 1375
} | class ____ extends Exception {
public StoreException() {
}
public StoreException(String message) {
super(message);
}
public StoreException(String message, Throwable cause) {
super(message, cause);
}
public StoreException(Throwable cause) {
super(cause);
}
public StoreException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) {
super(message, cause, enableSuppression, writableStackTrace);
}
}
| StoreException |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/appender/db/AbstractDatabaseManager.java | {
"start": 1636,
"end": 13555
} | class ____ extends AbstractManager.AbstractFactoryData {
private final int bufferSize;
private final Layout<? extends Serializable> layout;
/**
* Constructs the base factory data.
*
* @param bufferSize The size of the buffer.
* @param layout The appender-level layout
* @deprecated Use {@link AbstractFactoryData#AbstractFactoryData(Configuration, int, Layout)}.
*/
protected AbstractFactoryData(final int bufferSize, final Layout<? extends Serializable> layout) {
this(null, bufferSize, layout);
}
/**
* Constructs the base factory data.
* @param configuration Configuration creating this instance.
* @param bufferSize The size of the buffer.
* @param layout The appender-level layout
*/
protected AbstractFactoryData(
final Configuration configuration, final int bufferSize, final Layout<? extends Serializable> layout) {
super(configuration);
this.bufferSize = bufferSize;
this.layout = layout;
}
/**
* Gets the buffer size.
*
* @return the buffer size.
*/
public int getBufferSize() {
return bufferSize;
}
/**
* Gets the layout.
*
* @return the layout.
*/
public Layout<? extends Serializable> getLayout() {
return layout;
}
}
/**
* Implementations should define their own getManager method and call this method from that to create or get
* existing managers.
*
* @param name The manager name, which should include any configuration details that one might want to be able to
* reconfigure at runtime, such as database name, username, (hashed) password, etc.
* @param data The concrete instance of {@link AbstractFactoryData} appropriate for the given manager.
* @param factory A factory instance for creating the appropriate manager.
* @param <M> The concrete manager type.
* @param <T> The concrete {@link AbstractFactoryData} type.
* @return a new or existing manager of the specified type and name.
*/
protected static <M extends AbstractDatabaseManager, T extends AbstractFactoryData> M getManager(
final String name, final T data, final ManagerFactory<M, T> factory) {
return AbstractManager.getManager(name, factory, data);
}
private final ArrayList<LogEvent> buffer;
private final int bufferSize;
private final Layout<? extends Serializable> layout;
private boolean running;
/**
* Constructs the base manager.
*
* @param name The manager name, which should include any configuration details that one might want to be able to
* reconfigure at runtime, such as database name, username, (hashed) password, etc.
* @param bufferSize The size of the log event buffer.
* @deprecated Use {@link AbstractDatabaseManager#AbstractDatabaseManager(String, int, Layout, Configuration)}.
*/
@Deprecated
protected AbstractDatabaseManager(final String name, final int bufferSize) {
this(name, bufferSize, null);
}
/**
* Constructs the base manager.
*
* @param name The manager name, which should include any configuration details that one might want to be able to
* reconfigure at runtime, such as database name, username, (hashed) password, etc.
* @param layout the Appender-level layout.
* @param bufferSize The size of the log event buffer.
* @deprecated Use {@link AbstractDatabaseManager#AbstractDatabaseManager(String, int, Layout, Configuration)}.
*/
@Deprecated
protected AbstractDatabaseManager(
final String name, final int bufferSize, final Layout<? extends Serializable> layout) {
this(name, bufferSize, layout, null);
}
/**
* Constructs the base manager.
*
* @param name The manager name, which should include any configuration details that one might want to be able to
* reconfigure at runtime, such as database name, username, (hashed) password, etc.
* @param layout the Appender-level layout.
* @param bufferSize The size of the log event buffer.
* @param configuration My configuration.
*/
protected AbstractDatabaseManager(
final String name,
final int bufferSize,
final Layout<? extends Serializable> layout,
final Configuration configuration) {
// null configuration allowed for backward compatibility.
// TODO should super track Configuration instead of LoggerContext?
super(configuration != null ? configuration.getLoggerContext() : null, name);
this.bufferSize = bufferSize;
this.buffer = new ArrayList<>(bufferSize + 1);
this.layout = layout; // A null layout is allowed.
}
protected void buffer(final LogEvent event) {
this.buffer.add(event.toImmutable());
if (this.buffer.size() >= this.bufferSize || event.isEndOfBatch()) {
this.flush();
}
}
/**
* Commits any active transaction (if applicable) and disconnects from the database (returns the connection to the
* connection pool). With buffering enabled, this is called when flushing the buffer completes, after the last call
* to {@link #writeInternal}. With buffering disabled, this is called immediately after every invocation of
* {@link #writeInternal}.
* @return true if all resources were closed normally, false otherwise.
*/
protected abstract boolean commitAndClose();
/**
* Connects to the database and starts a transaction (if applicable). With buffering enabled, this is called when
* flushing the buffer begins, before the first call to {@link #writeInternal}. With buffering disabled, this is
* called immediately before every invocation of {@link #writeInternal}.
*/
protected abstract void connectAndStart();
/**
* This method is called automatically when the buffer size reaches its maximum or at the beginning of a call to
* {@link #shutdown()}. It can also be called manually to flush events to the database.
*/
@Override
public final synchronized void flush() {
if (this.isRunning() && isBuffered()) {
this.connectAndStart();
try {
for (final LogEvent event : this.buffer) {
this.writeInternal(event, layout != null ? layout.toSerializable(event) : null);
}
} finally {
this.commitAndClose();
// not sure if this should be done when writing the events failed
this.buffer.clear();
}
}
}
protected boolean isBuffered() {
return this.bufferSize > 0;
}
/**
* Indicates whether the manager is currently connected {@link #startup()} has been called and {@link #shutdown()}
* has not been called).
*
* @return {@code true} if the manager is connected.
*/
public final boolean isRunning() {
return this.running;
}
@Override
public final boolean releaseSub(final long timeout, final TimeUnit timeUnit) {
return this.shutdown();
}
/**
* This method is called from the {@link #close()} method when the appender is stopped or the appender's manager
* is replaced. If it has not already been called, it calls {@link #shutdownInternal()} and catches any exceptions
* it might throw.
* @return true if all resources were closed normally, false otherwise.
*/
public final synchronized boolean shutdown() {
boolean closed = true;
this.flush();
if (this.isRunning()) {
try {
closed &= this.shutdownInternal();
} catch (final Exception e) {
logWarn("Caught exception while performing database shutdown operations", e);
closed = false;
} finally {
this.running = false;
}
}
return closed;
}
/**
* Implementations should implement this method to perform any proprietary disconnection / shutdown operations. This
* method will never be called twice on the same instance, and it will only be called <em>after</em>
* {@link #startupInternal()}. It is safe to throw any exceptions from this method. This method does not
* necessarily disconnect from the database for the same reasons outlined in {@link #startupInternal()}.
* @return true if all resources were closed normally, false otherwise.
*/
protected abstract boolean shutdownInternal() throws Exception;
/**
* This method is called within the appender when the appender is started. If it has not already been called, it
* calls {@link #startupInternal()} and catches any exceptions it might throw.
*/
public final synchronized void startup() {
if (!this.isRunning()) {
try {
this.startupInternal();
this.running = true;
} catch (final Exception e) {
logError("Could not perform database startup operations", e);
}
}
}
/**
* Implementations should implement this method to perform any proprietary startup operations. This method will
* never be called twice on the same instance. It is safe to throw any exceptions from this method. This method
* does not necessarily connect to the database, as it is generally unreliable to connect once and use the same
* connection for hours.
*/
protected abstract void startupInternal() throws Exception;
@Override
public final String toString() {
return this.getName();
}
/**
* This method manages buffering and writing of events.
*
* @param event The event to write to the database.
* @deprecated since 2.11.0 Use {@link #write(LogEvent, Serializable)}.
*/
@Deprecated
public final synchronized void write(final LogEvent event) {
write(event, null);
}
/**
* This method manages buffering and writing of events.
*
* @param event The event to write to the database.
* @param serializable Serializable event
*/
public final synchronized void write(final LogEvent event, final Serializable serializable) {
if (isBuffered()) {
buffer(event);
} else {
writeThrough(event, serializable);
}
}
/**
* Performs the actual writing of the event in an implementation-specific way. This method is called immediately
* from {@link #write(LogEvent, Serializable)} if buffering is off, or from {@link #flush()} if the buffer has reached its limit.
*
* @param event The event to write to the database.
* @deprecated Use {@link #writeInternal(LogEvent, Serializable)}.
*/
@Deprecated
protected void writeInternal(final LogEvent event) {
writeInternal(event, null);
}
/**
* Performs the actual writing of the event in an implementation-specific way. This method is called immediately
* from {@link #write(LogEvent, Serializable)} if buffering is off, or from {@link #flush()} if the buffer has reached its limit.
*
* @param event The event to write to the database.
*/
protected abstract void writeInternal(LogEvent event, Serializable serializable);
protected void writeThrough(final LogEvent event, final Serializable serializable) {
this.connectAndStart();
try {
this.writeInternal(event, serializable);
} finally {
this.commitAndClose();
}
}
}
| AbstractFactoryData |
java | google__guava | android/guava/src/com/google/common/collect/Maps.java | {
"start": 125277,
"end": 129340
} | class ____<
K extends @Nullable Object, V extends @Nullable Object>
extends AbstractNavigableMap<K, V> {
/*
* It's less code to extend AbstractNavigableMap and forward the filtering logic to
* FilteredEntryMap than to extend FilteredEntrySortedMap and reimplement all the NavigableMap
* methods.
*/
private final NavigableMap<K, V> unfiltered;
private final Predicate<? super Entry<K, V>> entryPredicate;
private final Map<K, V> filteredDelegate;
FilteredEntryNavigableMap(
NavigableMap<K, V> unfiltered, Predicate<? super Entry<K, V>> entryPredicate) {
this.unfiltered = checkNotNull(unfiltered);
this.entryPredicate = entryPredicate;
this.filteredDelegate = new FilteredEntryMap<>(unfiltered, entryPredicate);
}
@Override
public @Nullable Comparator<? super K> comparator() {
return unfiltered.comparator();
}
@Override
public NavigableSet<K> navigableKeySet() {
return new NavigableKeySet<K, V>(this) {
@Override
public boolean removeAll(Collection<?> collection) {
return FilteredEntryMap.removeAllKeys(unfiltered, entryPredicate, collection);
}
@Override
public boolean retainAll(Collection<?> collection) {
return FilteredEntryMap.retainAllKeys(unfiltered, entryPredicate, collection);
}
};
}
@Override
public Collection<V> values() {
return new FilteredMapValues<>(this, unfiltered, entryPredicate);
}
@Override
Iterator<Entry<K, V>> entryIterator() {
return Iterators.filter(unfiltered.entrySet().iterator(), entryPredicate);
}
@Override
Iterator<Entry<K, V>> descendingEntryIterator() {
return Iterators.filter(unfiltered.descendingMap().entrySet().iterator(), entryPredicate);
}
@Override
public int size() {
return filteredDelegate.size();
}
@Override
public boolean isEmpty() {
return !Iterables.any(unfiltered.entrySet(), entryPredicate);
}
@Override
public @Nullable V get(@Nullable Object key) {
return filteredDelegate.get(key);
}
@Override
public boolean containsKey(@Nullable Object key) {
return filteredDelegate.containsKey(key);
}
@Override
public @Nullable V put(@ParametricNullness K key, @ParametricNullness V value) {
return filteredDelegate.put(key, value);
}
@Override
public @Nullable V remove(@Nullable Object key) {
return filteredDelegate.remove(key);
}
@Override
public void putAll(Map<? extends K, ? extends V> m) {
filteredDelegate.putAll(m);
}
@Override
public void clear() {
filteredDelegate.clear();
}
@Override
public Set<Entry<K, V>> entrySet() {
return filteredDelegate.entrySet();
}
@Override
public @Nullable Entry<K, V> pollFirstEntry() {
return Iterables.removeFirstMatching(unfiltered.entrySet(), entryPredicate);
}
@Override
public @Nullable Entry<K, V> pollLastEntry() {
return Iterables.removeFirstMatching(unfiltered.descendingMap().entrySet(), entryPredicate);
}
@Override
public NavigableMap<K, V> descendingMap() {
return filterEntries(unfiltered.descendingMap(), entryPredicate);
}
@Override
public NavigableMap<K, V> subMap(
@ParametricNullness K fromKey,
boolean fromInclusive,
@ParametricNullness K toKey,
boolean toInclusive) {
return filterEntries(
unfiltered.subMap(fromKey, fromInclusive, toKey, toInclusive), entryPredicate);
}
@Override
public NavigableMap<K, V> headMap(@ParametricNullness K toKey, boolean inclusive) {
return filterEntries(unfiltered.headMap(toKey, inclusive), entryPredicate);
}
@Override
public NavigableMap<K, V> tailMap(@ParametricNullness K fromKey, boolean inclusive) {
return filterEntries(unfiltered.tailMap(fromKey, inclusive), entryPredicate);
}
}
static final | FilteredEntryNavigableMap |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/webapp/GPGOverviewPage.java | {
"start": 1226,
"end": 1731
} | class ____ extends TwoColumnLayout {
@Override
protected void preHead(Page.HTML<__> html) {
commonPreHead(html);
setTitle("GPG");
}
protected void commonPreHead(Page.HTML<__> html) {
set(ACCORDION_ID, "nav");
set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}");
}
@Override
protected Class<? extends SubView> nav() {
return NavBlock.class;
}
@Override
protected Class<? extends SubView> content() {
return GPGOverviewBlock.class;
}
}
| GPGOverviewPage |
java | spring-projects__spring-boot | module/spring-boot-health/src/test/java/org/springframework/boot/health/autoconfigure/actuate/endpoint/AutoConfiguredHealthEndpointGroupsTests.java | {
"start": 19242,
"end": 19513
} | class ____ {
@Bean
@Primary
HttpCodeStatusMapper httpCodeStatusMapper() {
return new SimpleHttpCodeStatusMapper(Collections.singletonMap(Status.DOWN.getCode(), 200));
}
}
@Configuration(proxyBeanMethods = false)
static | CustomHttpCodeStatusMapperConfiguration |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bootstrap/spi/metadatabuildercontributor/SqlFunctionMetadataBuilderContributorInstanceTest.java | {
"start": 407,
"end": 645
} | class ____
extends AbstractSqlFunctionMetadataBuilderContributorTest {
@Override
protected Object matadataBuilderContributor() {
return new SqlFunctionMetadataBuilderContributor();
}
}
| SqlFunctionMetadataBuilderContributorInstanceTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/VarifierTest.java | {
"start": 5505,
"end": 5645
} | class ____ {
static Builder newBuilder() {
return new Builder();
}
static | Foo |
java | apache__flink | flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/runtime/typeutils/TimestampDataSerializerTest.java | {
"start": 1886,
"end": 2068
} | class ____ extends TimestampDataSerializerTest {
@Override
protected int getPrecision() {
return 0;
}
}
static final | TimestampSerializer0Test |
java | micronaut-projects__micronaut-core | inject-java/src/test/groovy/io/micronaut/inject/foreach/MyContextBeanListener.java | {
"start": 251,
"end": 680
} | class ____ implements BeanCreatedEventListener<MyContextBean> {
Set<String> createdNames = new HashSet<>();
@Override
public MyContextBean onCreated(BeanCreatedEvent<MyContextBean> event) {
String name = event.getBeanIdentifier().getName();
createdNames.add(name);
return event.getBean();
}
public Set<String> getCreatedNames() {
return createdNames;
}
}
| MyContextBeanListener |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/EndpointBuilderFactory.java | {
"start": 1184,
"end": 39595
} | interface ____
extends
org.apache.camel.builder.endpoint.dsl.AMQPEndpointBuilderFactory.AMQPBuilders,
org.apache.camel.builder.endpoint.dsl.AS2EndpointBuilderFactory.AS2Builders,
org.apache.camel.builder.endpoint.dsl.AWS2EC2EndpointBuilderFactory.AWS2EC2Builders,
org.apache.camel.builder.endpoint.dsl.AWS2S3EndpointBuilderFactory.AWS2S3Builders,
org.apache.camel.builder.endpoint.dsl.AWSConfigEndpointBuilderFactory.AWSConfigBuilders,
org.apache.camel.builder.endpoint.dsl.ActiveMQ6EndpointBuilderFactory.ActiveMQ6Builders,
org.apache.camel.builder.endpoint.dsl.ActiveMQEndpointBuilderFactory.ActiveMQBuilders,
org.apache.camel.builder.endpoint.dsl.ArangoDbEndpointBuilderFactory.ArangoDbBuilders,
org.apache.camel.builder.endpoint.dsl.AsteriskEndpointBuilderFactory.AsteriskBuilders,
org.apache.camel.builder.endpoint.dsl.Athena2EndpointBuilderFactory.Athena2Builders,
org.apache.camel.builder.endpoint.dsl.AtmosphereWebsocketEndpointBuilderFactory.AtmosphereWebsocketBuilders,
org.apache.camel.builder.endpoint.dsl.AtomEndpointBuilderFactory.AtomBuilders,
org.apache.camel.builder.endpoint.dsl.AvroEndpointBuilderFactory.AvroBuilders,
org.apache.camel.builder.endpoint.dsl.BeanEndpointBuilderFactory.BeanBuilders,
org.apache.camel.builder.endpoint.dsl.BeanValidatorEndpointBuilderFactory.BeanValidatorBuilders,
org.apache.camel.builder.endpoint.dsl.BedrockAgentEndpointBuilderFactory.BedrockAgentBuilders,
org.apache.camel.builder.endpoint.dsl.BedrockAgentRuntimeEndpointBuilderFactory.BedrockAgentRuntimeBuilders,
org.apache.camel.builder.endpoint.dsl.BedrockEndpointBuilderFactory.BedrockBuilders,
org.apache.camel.builder.endpoint.dsl.BlobEndpointBuilderFactory.BlobBuilders,
org.apache.camel.builder.endpoint.dsl.BonitaEndpointBuilderFactory.BonitaBuilders,
org.apache.camel.builder.endpoint.dsl.BoxEndpointBuilderFactory.BoxBuilders,
org.apache.camel.builder.endpoint.dsl.BraintreeEndpointBuilderFactory.BraintreeBuilders,
org.apache.camel.builder.endpoint.dsl.BrowseEndpointBuilderFactory.BrowseBuilders,
org.apache.camel.builder.endpoint.dsl.CMEndpointBuilderFactory.CMBuilders,
org.apache.camel.builder.endpoint.dsl.CaffeineCacheEndpointBuilderFactory.CaffeineCacheBuilders,
org.apache.camel.builder.endpoint.dsl.CaffeineLoadCacheEndpointBuilderFactory.CaffeineLoadCacheBuilders,
org.apache.camel.builder.endpoint.dsl.CassandraEndpointBuilderFactory.CassandraBuilders,
org.apache.camel.builder.endpoint.dsl.ChatScriptEndpointBuilderFactory.ChatScriptBuilders,
org.apache.camel.builder.endpoint.dsl.ChunkEndpointBuilderFactory.ChunkBuilders,
org.apache.camel.builder.endpoint.dsl.CinderEndpointBuilderFactory.CinderBuilders,
org.apache.camel.builder.endpoint.dsl.ClassEndpointBuilderFactory.ClassBuilders,
org.apache.camel.builder.endpoint.dsl.ClickUpEndpointBuilderFactory.ClickUpBuilders,
org.apache.camel.builder.endpoint.dsl.ClientEndpointBuilderFactory.ClientBuilders,
org.apache.camel.builder.endpoint.dsl.CloudtrailEndpointBuilderFactory.CloudtrailBuilders,
org.apache.camel.builder.endpoint.dsl.CoAPEndpointBuilderFactory.CoAPBuilders,
org.apache.camel.builder.endpoint.dsl.CometdEndpointBuilderFactory.CometdBuilders,
org.apache.camel.builder.endpoint.dsl.ConsulEndpointBuilderFactory.ConsulBuilders,
org.apache.camel.builder.endpoint.dsl.ControlBusEndpointBuilderFactory.ControlBusBuilders,
org.apache.camel.builder.endpoint.dsl.CosmosDbEndpointBuilderFactory.CosmosDbBuilders,
org.apache.camel.builder.endpoint.dsl.CouchDbEndpointBuilderFactory.CouchDbBuilders,
org.apache.camel.builder.endpoint.dsl.CouchbaseEndpointBuilderFactory.CouchbaseBuilders,
org.apache.camel.builder.endpoint.dsl.CronEndpointBuilderFactory.CronBuilders,
org.apache.camel.builder.endpoint.dsl.Cw2EndpointBuilderFactory.Cw2Builders,
org.apache.camel.builder.endpoint.dsl.CxfEndpointBuilderFactory.CxfBuilders,
org.apache.camel.builder.endpoint.dsl.CxfRsEndpointBuilderFactory.CxfRsBuilders,
org.apache.camel.builder.endpoint.dsl.CyberArkVaultEndpointBuilderFactory.CyberArkVaultBuilders,
org.apache.camel.builder.endpoint.dsl.DJLEndpointBuilderFactory.DJLBuilders,
org.apache.camel.builder.endpoint.dsl.DMSEndpointBuilderFactory.DMSBuilders,
org.apache.camel.builder.endpoint.dsl.DaprEndpointBuilderFactory.DaprBuilders,
org.apache.camel.builder.endpoint.dsl.DataFormatEndpointBuilderFactory.DataFormatBuilders,
org.apache.camel.builder.endpoint.dsl.DataLakeEndpointBuilderFactory.DataLakeBuilders,
org.apache.camel.builder.endpoint.dsl.DataSetEndpointBuilderFactory.DataSetBuilders,
org.apache.camel.builder.endpoint.dsl.DataSetTestEndpointBuilderFactory.DataSetTestBuilders,
org.apache.camel.builder.endpoint.dsl.Ddb2EndpointBuilderFactory.Ddb2Builders,
org.apache.camel.builder.endpoint.dsl.Ddb2StreamEndpointBuilderFactory.Ddb2StreamBuilders,
org.apache.camel.builder.endpoint.dsl.DebeziumDb2EndpointBuilderFactory.DebeziumDb2Builders,
org.apache.camel.builder.endpoint.dsl.DebeziumMongodbEndpointBuilderFactory.DebeziumMongodbBuilders,
org.apache.camel.builder.endpoint.dsl.DebeziumMySqlEndpointBuilderFactory.DebeziumMySqlBuilders,
org.apache.camel.builder.endpoint.dsl.DebeziumOracleEndpointBuilderFactory.DebeziumOracleBuilders,
org.apache.camel.builder.endpoint.dsl.DebeziumPostgresEndpointBuilderFactory.DebeziumPostgresBuilders,
org.apache.camel.builder.endpoint.dsl.DebeziumSqlserverEndpointBuilderFactory.DebeziumSqlserverBuilders,
org.apache.camel.builder.endpoint.dsl.DfdlEndpointBuilderFactory.DfdlBuilders,
org.apache.camel.builder.endpoint.dsl.Dhis2EndpointBuilderFactory.Dhis2Builders,
org.apache.camel.builder.endpoint.dsl.DigitalOceanEndpointBuilderFactory.DigitalOceanBuilders,
org.apache.camel.builder.endpoint.dsl.DigitalSignatureEndpointBuilderFactory.DigitalSignatureBuilders,
org.apache.camel.builder.endpoint.dsl.DirectEndpointBuilderFactory.DirectBuilders,
org.apache.camel.builder.endpoint.dsl.DisruptorEndpointBuilderFactory.DisruptorBuilders,
org.apache.camel.builder.endpoint.dsl.DisruptorVmEndpointBuilderFactory.DisruptorVmBuilders,
org.apache.camel.builder.endpoint.dsl.DnsEndpointBuilderFactory.DnsBuilders,
org.apache.camel.builder.endpoint.dsl.DockerEndpointBuilderFactory.DockerBuilders,
org.apache.camel.builder.endpoint.dsl.DoclingEndpointBuilderFactory.DoclingBuilders,
org.apache.camel.builder.endpoint.dsl.DrillEndpointBuilderFactory.DrillBuilders,
org.apache.camel.builder.endpoint.dsl.DropboxEndpointBuilderFactory.DropboxBuilders,
org.apache.camel.builder.endpoint.dsl.DynamicRouterControlEndpointBuilderFactory.DynamicRouterControlBuilders,
org.apache.camel.builder.endpoint.dsl.DynamicRouterEndpointBuilderFactory.DynamicRouterBuilders,
org.apache.camel.builder.endpoint.dsl.ECS2EndpointBuilderFactory.ECS2Builders,
org.apache.camel.builder.endpoint.dsl.EKS2EndpointBuilderFactory.EKS2Builders,
org.apache.camel.builder.endpoint.dsl.EhcacheEndpointBuilderFactory.EhcacheBuilders,
org.apache.camel.builder.endpoint.dsl.ElasticsearchEndpointBuilderFactory.ElasticsearchBuilders,
org.apache.camel.builder.endpoint.dsl.ElasticsearchRestClientEndpointBuilderFactory.ElasticsearchRestClientBuilders,
org.apache.camel.builder.endpoint.dsl.EventEndpointBuilderFactory.EventBuilders,
org.apache.camel.builder.endpoint.dsl.EventHubsEndpointBuilderFactory.EventHubsBuilders,
org.apache.camel.builder.endpoint.dsl.EventbridgeEndpointBuilderFactory.EventbridgeBuilders,
org.apache.camel.builder.endpoint.dsl.ExecEndpointBuilderFactory.ExecBuilders,
org.apache.camel.builder.endpoint.dsl.FaceRecognitionEndpointBuilderFactory.FaceRecognitionBuilders,
org.apache.camel.builder.endpoint.dsl.FhirEndpointBuilderFactory.FhirBuilders,
org.apache.camel.builder.endpoint.dsl.FileEndpointBuilderFactory.FileBuilders,
org.apache.camel.builder.endpoint.dsl.FileWatchEndpointBuilderFactory.FileWatchBuilders,
org.apache.camel.builder.endpoint.dsl.FilesEndpointBuilderFactory.FilesBuilders,
org.apache.camel.builder.endpoint.dsl.FlatpackEndpointBuilderFactory.FlatpackBuilders,
org.apache.camel.builder.endpoint.dsl.FlinkEndpointBuilderFactory.FlinkBuilders,
org.apache.camel.builder.endpoint.dsl.FlowableEndpointBuilderFactory.FlowableBuilders,
org.apache.camel.builder.endpoint.dsl.FopEndpointBuilderFactory.FopBuilders,
org.apache.camel.builder.endpoint.dsl.FreemarkerEndpointBuilderFactory.FreemarkerBuilders,
org.apache.camel.builder.endpoint.dsl.FtpEndpointBuilderFactory.FtpBuilders,
org.apache.camel.builder.endpoint.dsl.FtpsEndpointBuilderFactory.FtpsBuilders,
org.apache.camel.builder.endpoint.dsl.FunctionGraphEndpointBuilderFactory.FunctionGraphBuilders,
org.apache.camel.builder.endpoint.dsl.GeoCoderEndpointBuilderFactory.GeoCoderBuilders,
org.apache.camel.builder.endpoint.dsl.GitEndpointBuilderFactory.GitBuilders,
org.apache.camel.builder.endpoint.dsl.GitHubEndpointBuilderFactory.GitHubBuilders,
org.apache.camel.builder.endpoint.dsl.GlanceEndpointBuilderFactory.GlanceBuilders,
org.apache.camel.builder.endpoint.dsl.GoogleBigQueryEndpointBuilderFactory.GoogleBigQueryBuilders,
org.apache.camel.builder.endpoint.dsl.GoogleBigQuerySQLEndpointBuilderFactory.GoogleBigQuerySQLBuilders,
org.apache.camel.builder.endpoint.dsl.GoogleCalendarEndpointBuilderFactory.GoogleCalendarBuilders,
org.apache.camel.builder.endpoint.dsl.GoogleCalendarStreamEndpointBuilderFactory.GoogleCalendarStreamBuilders,
org.apache.camel.builder.endpoint.dsl.GoogleCloudFunctionsEndpointBuilderFactory.GoogleCloudFunctionsBuilders,
org.apache.camel.builder.endpoint.dsl.GoogleCloudStorageEndpointBuilderFactory.GoogleCloudStorageBuilders,
org.apache.camel.builder.endpoint.dsl.GoogleDriveEndpointBuilderFactory.GoogleDriveBuilders,
org.apache.camel.builder.endpoint.dsl.GoogleMailEndpointBuilderFactory.GoogleMailBuilders,
org.apache.camel.builder.endpoint.dsl.GoogleMailStreamEndpointBuilderFactory.GoogleMailStreamBuilders,
org.apache.camel.builder.endpoint.dsl.GooglePubsubEndpointBuilderFactory.GooglePubsubBuilders,
org.apache.camel.builder.endpoint.dsl.GooglePubsubLiteEndpointBuilderFactory.GooglePubsubLiteBuilders,
org.apache.camel.builder.endpoint.dsl.GoogleSecretManagerEndpointBuilderFactory.GoogleSecretManagerBuilders,
org.apache.camel.builder.endpoint.dsl.GoogleSheetsEndpointBuilderFactory.GoogleSheetsBuilders,
org.apache.camel.builder.endpoint.dsl.GoogleSheetsStreamEndpointBuilderFactory.GoogleSheetsStreamBuilders,
org.apache.camel.builder.endpoint.dsl.GrapeEndpointBuilderFactory.GrapeBuilders,
org.apache.camel.builder.endpoint.dsl.GraphqlEndpointBuilderFactory.GraphqlBuilders,
org.apache.camel.builder.endpoint.dsl.GridFsEndpointBuilderFactory.GridFsBuilders,
org.apache.camel.builder.endpoint.dsl.GrpcEndpointBuilderFactory.GrpcBuilders,
org.apache.camel.builder.endpoint.dsl.GuavaEventBusEndpointBuilderFactory.GuavaEventBusBuilders,
org.apache.camel.builder.endpoint.dsl.HashicorpVaultEndpointBuilderFactory.HashicorpVaultBuilders,
org.apache.camel.builder.endpoint.dsl.HazelcastAtomicnumberEndpointBuilderFactory.HazelcastAtomicnumberBuilders,
org.apache.camel.builder.endpoint.dsl.HazelcastInstanceEndpointBuilderFactory.HazelcastInstanceBuilders,
org.apache.camel.builder.endpoint.dsl.HazelcastListEndpointBuilderFactory.HazelcastListBuilders,
org.apache.camel.builder.endpoint.dsl.HazelcastMapEndpointBuilderFactory.HazelcastMapBuilders,
org.apache.camel.builder.endpoint.dsl.HazelcastMultimapEndpointBuilderFactory.HazelcastMultimapBuilders,
org.apache.camel.builder.endpoint.dsl.HazelcastQueueEndpointBuilderFactory.HazelcastQueueBuilders,
org.apache.camel.builder.endpoint.dsl.HazelcastReplicatedmapEndpointBuilderFactory.HazelcastReplicatedmapBuilders,
org.apache.camel.builder.endpoint.dsl.HazelcastRingbufferEndpointBuilderFactory.HazelcastRingbufferBuilders,
org.apache.camel.builder.endpoint.dsl.HazelcastSedaEndpointBuilderFactory.HazelcastSedaBuilders,
org.apache.camel.builder.endpoint.dsl.HazelcastSetEndpointBuilderFactory.HazelcastSetBuilders,
org.apache.camel.builder.endpoint.dsl.HazelcastTopicEndpointBuilderFactory.HazelcastTopicBuilders,
org.apache.camel.builder.endpoint.dsl.HttpEndpointBuilderFactory.HttpBuilders,
org.apache.camel.builder.endpoint.dsl.IAM2EndpointBuilderFactory.IAM2Builders,
org.apache.camel.builder.endpoint.dsl.IAMEndpointBuilderFactory.IAMBuilders,
org.apache.camel.builder.endpoint.dsl.IBMCOSEndpointBuilderFactory.IBMCOSBuilders,
org.apache.camel.builder.endpoint.dsl.IBMSecretsManagerEndpointBuilderFactory.IBMSecretsManagerBuilders,
org.apache.camel.builder.endpoint.dsl.IgniteCacheEndpointBuilderFactory.IgniteCacheBuilders,
org.apache.camel.builder.endpoint.dsl.IgniteComputeEndpointBuilderFactory.IgniteComputeBuilders,
org.apache.camel.builder.endpoint.dsl.IgniteEventsEndpointBuilderFactory.IgniteEventsBuilders,
org.apache.camel.builder.endpoint.dsl.IgniteIdGenEndpointBuilderFactory.IgniteIdGenBuilders,
org.apache.camel.builder.endpoint.dsl.IgniteMessagingEndpointBuilderFactory.IgniteMessagingBuilders,
org.apache.camel.builder.endpoint.dsl.IgniteQueueEndpointBuilderFactory.IgniteQueueBuilders,
org.apache.camel.builder.endpoint.dsl.IgniteSetEndpointBuilderFactory.IgniteSetBuilders,
org.apache.camel.builder.endpoint.dsl.ImageRecognitionEndpointBuilderFactory.ImageRecognitionBuilders,
org.apache.camel.builder.endpoint.dsl.InfinispanEmbeddedEndpointBuilderFactory.InfinispanEmbeddedBuilders,
org.apache.camel.builder.endpoint.dsl.InfinispanRemoteEndpointBuilderFactory.InfinispanRemoteBuilders,
org.apache.camel.builder.endpoint.dsl.InfluxDb2EndpointBuilderFactory.InfluxDb2Builders,
org.apache.camel.builder.endpoint.dsl.InfluxDbEndpointBuilderFactory.InfluxDbBuilders,
org.apache.camel.builder.endpoint.dsl.IrcEndpointBuilderFactory.IrcBuilders,
org.apache.camel.builder.endpoint.dsl.IronMQEndpointBuilderFactory.IronMQBuilders,
org.apache.camel.builder.endpoint.dsl.JCacheEndpointBuilderFactory.JCacheBuilders,
org.apache.camel.builder.endpoint.dsl.JGroupsEndpointBuilderFactory.JGroupsBuilders,
org.apache.camel.builder.endpoint.dsl.JGroupsRaftEndpointBuilderFactory.JGroupsRaftBuilders,
org.apache.camel.builder.endpoint.dsl.JMXEndpointBuilderFactory.JMXBuilders,
org.apache.camel.builder.endpoint.dsl.JcrEndpointBuilderFactory.JcrBuilders,
org.apache.camel.builder.endpoint.dsl.JdbcEndpointBuilderFactory.JdbcBuilders,
org.apache.camel.builder.endpoint.dsl.JettyHttp12EndpointBuilderFactory.JettyHttp12Builders,
org.apache.camel.builder.endpoint.dsl.JiraEndpointBuilderFactory.JiraBuilders,
org.apache.camel.builder.endpoint.dsl.JmsEndpointBuilderFactory.JmsBuilders,
org.apache.camel.builder.endpoint.dsl.JoltEndpointBuilderFactory.JoltBuilders,
org.apache.camel.builder.endpoint.dsl.JooqEndpointBuilderFactory.JooqBuilders,
org.apache.camel.builder.endpoint.dsl.JpaEndpointBuilderFactory.JpaBuilders,
org.apache.camel.builder.endpoint.dsl.JsltEndpointBuilderFactory.JsltBuilders,
org.apache.camel.builder.endpoint.dsl.JsonPatchEndpointBuilderFactory.JsonPatchBuilders,
org.apache.camel.builder.endpoint.dsl.JsonValidatorEndpointBuilderFactory.JsonValidatorBuilders,
org.apache.camel.builder.endpoint.dsl.JsonataEndpointBuilderFactory.JsonataBuilders,
org.apache.camel.builder.endpoint.dsl.Jt400EndpointBuilderFactory.Jt400Builders,
org.apache.camel.builder.endpoint.dsl.JteEndpointBuilderFactory.JteBuilders,
org.apache.camel.builder.endpoint.dsl.KMS2EndpointBuilderFactory.KMS2Builders,
org.apache.camel.builder.endpoint.dsl.KServeEndpointBuilderFactory.KServeBuilders,
org.apache.camel.builder.endpoint.dsl.KafkaEndpointBuilderFactory.KafkaBuilders,
org.apache.camel.builder.endpoint.dsl.KameletEndpointBuilderFactory.KameletBuilders,
org.apache.camel.builder.endpoint.dsl.KeyVaultEndpointBuilderFactory.KeyVaultBuilders,
org.apache.camel.builder.endpoint.dsl.KeycloakEndpointBuilderFactory.KeycloakBuilders,
org.apache.camel.builder.endpoint.dsl.KeystoneEndpointBuilderFactory.KeystoneBuilders,
org.apache.camel.builder.endpoint.dsl.Kinesis2EndpointBuilderFactory.Kinesis2Builders,
org.apache.camel.builder.endpoint.dsl.KinesisFirehose2EndpointBuilderFactory.KinesisFirehose2Builders,
org.apache.camel.builder.endpoint.dsl.KnativeEndpointBuilderFactory.KnativeBuilders,
org.apache.camel.builder.endpoint.dsl.KubernetesConfigMapsEndpointBuilderFactory.KubernetesConfigMapsBuilders,
org.apache.camel.builder.endpoint.dsl.KubernetesCronJobEndpointBuilderFactory.KubernetesCronJobBuilders,
org.apache.camel.builder.endpoint.dsl.KubernetesCustomResourcesEndpointBuilderFactory.KubernetesCustomResourcesBuilders,
org.apache.camel.builder.endpoint.dsl.KubernetesDeploymentsEndpointBuilderFactory.KubernetesDeploymentsBuilders,
org.apache.camel.builder.endpoint.dsl.KubernetesEventsEndpointBuilderFactory.KubernetesEventsBuilders,
org.apache.camel.builder.endpoint.dsl.KubernetesHPAEndpointBuilderFactory.KubernetesHPABuilders,
org.apache.camel.builder.endpoint.dsl.KubernetesJobEndpointBuilderFactory.KubernetesJobBuilders,
org.apache.camel.builder.endpoint.dsl.KubernetesNamespacesEndpointBuilderFactory.KubernetesNamespacesBuilders,
org.apache.camel.builder.endpoint.dsl.KubernetesNodesEndpointBuilderFactory.KubernetesNodesBuilders,
org.apache.camel.builder.endpoint.dsl.KubernetesPersistentVolumesClaimsEndpointBuilderFactory.KubernetesPersistentVolumesClaimsBuilders,
org.apache.camel.builder.endpoint.dsl.KubernetesPersistentVolumesEndpointBuilderFactory.KubernetesPersistentVolumesBuilders,
org.apache.camel.builder.endpoint.dsl.KubernetesPodsEndpointBuilderFactory.KubernetesPodsBuilders,
org.apache.camel.builder.endpoint.dsl.KubernetesReplicationControllersEndpointBuilderFactory.KubernetesReplicationControllersBuilders,
org.apache.camel.builder.endpoint.dsl.KubernetesResourcesQuotaEndpointBuilderFactory.KubernetesResourcesQuotaBuilders,
org.apache.camel.builder.endpoint.dsl.KubernetesSecretsEndpointBuilderFactory.KubernetesSecretsBuilders,
org.apache.camel.builder.endpoint.dsl.KubernetesServiceAccountsEndpointBuilderFactory.KubernetesServiceAccountsBuilders,
org.apache.camel.builder.endpoint.dsl.KubernetesServicesEndpointBuilderFactory.KubernetesServicesBuilders,
org.apache.camel.builder.endpoint.dsl.KuduEndpointBuilderFactory.KuduBuilders,
org.apache.camel.builder.endpoint.dsl.Lambda2EndpointBuilderFactory.Lambda2Builders,
org.apache.camel.builder.endpoint.dsl.LangChain4jAgentEndpointBuilderFactory.LangChain4jAgentBuilders,
org.apache.camel.builder.endpoint.dsl.LangChain4jChatEndpointBuilderFactory.LangChain4jChatBuilders,
org.apache.camel.builder.endpoint.dsl.LangChain4jEmbeddingStoreEndpointBuilderFactory.LangChain4jEmbeddingStoreBuilders,
org.apache.camel.builder.endpoint.dsl.LangChain4jEmbeddingsEndpointBuilderFactory.LangChain4jEmbeddingsBuilders,
org.apache.camel.builder.endpoint.dsl.LangChain4jToolsEndpointBuilderFactory.LangChain4jToolsBuilders,
org.apache.camel.builder.endpoint.dsl.LangChain4jWebSearchEndpointBuilderFactory.LangChain4jWebSearchBuilders,
org.apache.camel.builder.endpoint.dsl.LanguageEndpointBuilderFactory.LanguageBuilders,
org.apache.camel.builder.endpoint.dsl.LdapEndpointBuilderFactory.LdapBuilders,
org.apache.camel.builder.endpoint.dsl.LdifEndpointBuilderFactory.LdifBuilders,
org.apache.camel.builder.endpoint.dsl.LogEndpointBuilderFactory.LogBuilders,
org.apache.camel.builder.endpoint.dsl.LuceneEndpointBuilderFactory.LuceneBuilders,
org.apache.camel.builder.endpoint.dsl.LumberjackEndpointBuilderFactory.LumberjackBuilders,
org.apache.camel.builder.endpoint.dsl.MQ2EndpointBuilderFactory.MQ2Builders,
org.apache.camel.builder.endpoint.dsl.MSK2EndpointBuilderFactory.MSK2Builders,
org.apache.camel.builder.endpoint.dsl.MailEndpointBuilderFactory.MailBuilders,
org.apache.camel.builder.endpoint.dsl.MapstructEndpointBuilderFactory.MapstructBuilders,
org.apache.camel.builder.endpoint.dsl.MasterEndpointBuilderFactory.MasterBuilders,
org.apache.camel.builder.endpoint.dsl.MetricsEndpointBuilderFactory.MetricsBuilders,
org.apache.camel.builder.endpoint.dsl.MicrometerEndpointBuilderFactory.MicrometerBuilders,
org.apache.camel.builder.endpoint.dsl.MiloBrowseEndpointBuilderFactory.MiloBrowseBuilders,
org.apache.camel.builder.endpoint.dsl.MiloClientEndpointBuilderFactory.MiloClientBuilders,
org.apache.camel.builder.endpoint.dsl.MiloServerEndpointBuilderFactory.MiloServerBuilders,
org.apache.camel.builder.endpoint.dsl.MilvusEndpointBuilderFactory.MilvusBuilders,
org.apache.camel.builder.endpoint.dsl.MinaEndpointBuilderFactory.MinaBuilders,
org.apache.camel.builder.endpoint.dsl.MinioEndpointBuilderFactory.MinioBuilders,
org.apache.camel.builder.endpoint.dsl.MllpEndpointBuilderFactory.MllpBuilders,
org.apache.camel.builder.endpoint.dsl.MockEndpointBuilderFactory.MockBuilders,
org.apache.camel.builder.endpoint.dsl.MongoDbEndpointBuilderFactory.MongoDbBuilders,
org.apache.camel.builder.endpoint.dsl.MustacheEndpointBuilderFactory.MustacheBuilders,
org.apache.camel.builder.endpoint.dsl.MvelEndpointBuilderFactory.MvelBuilders,
org.apache.camel.builder.endpoint.dsl.MyBatisBeanEndpointBuilderFactory.MyBatisBeanBuilders,
org.apache.camel.builder.endpoint.dsl.MyBatisEndpointBuilderFactory.MyBatisBuilders,
org.apache.camel.builder.endpoint.dsl.NatsEndpointBuilderFactory.NatsBuilders,
org.apache.camel.builder.endpoint.dsl.Neo4jEndpointBuilderFactory.Neo4jBuilders,
org.apache.camel.builder.endpoint.dsl.NetWeaverEndpointBuilderFactory.NetWeaverBuilders,
org.apache.camel.builder.endpoint.dsl.NettyEndpointBuilderFactory.NettyBuilders,
org.apache.camel.builder.endpoint.dsl.NettyHttpEndpointBuilderFactory.NettyHttpBuilders,
org.apache.camel.builder.endpoint.dsl.NeutronEndpointBuilderFactory.NeutronBuilders,
org.apache.camel.builder.endpoint.dsl.NitriteEndpointBuilderFactory.NitriteBuilders,
org.apache.camel.builder.endpoint.dsl.NovaEndpointBuilderFactory.NovaBuilders,
org.apache.camel.builder.endpoint.dsl.OAIPMHEndpointBuilderFactory.OAIPMHBuilders,
org.apache.camel.builder.endpoint.dsl.OBSEndpointBuilderFactory.OBSBuilders,
org.apache.camel.builder.endpoint.dsl.Olingo2EndpointBuilderFactory.Olingo2Builders,
org.apache.camel.builder.endpoint.dsl.Olingo4EndpointBuilderFactory.Olingo4Builders,
org.apache.camel.builder.endpoint.dsl.OpenTelemetryEndpointBuilderFactory.OpenTelemetryBuilders,
org.apache.camel.builder.endpoint.dsl.OpensearchEndpointBuilderFactory.OpensearchBuilders,
org.apache.camel.builder.endpoint.dsl.OpenshiftBuildConfigsEndpointBuilderFactory.OpenshiftBuildConfigsBuilders,
org.apache.camel.builder.endpoint.dsl.OpenshiftBuildsEndpointBuilderFactory.OpenshiftBuildsBuilders,
org.apache.camel.builder.endpoint.dsl.OpenshiftDeploymentConfigsEndpointBuilderFactory.OpenshiftDeploymentConfigsBuilders,
org.apache.camel.builder.endpoint.dsl.OptaPlannerEndpointBuilderFactory.OptaPlannerBuilders,
org.apache.camel.builder.endpoint.dsl.PQCEndpointBuilderFactory.PQCBuilders,
org.apache.camel.builder.endpoint.dsl.PahoEndpointBuilderFactory.PahoBuilders,
org.apache.camel.builder.endpoint.dsl.PahoMqtt5EndpointBuilderFactory.PahoMqtt5Builders,
org.apache.camel.builder.endpoint.dsl.PdfEndpointBuilderFactory.PdfBuilders,
org.apache.camel.builder.endpoint.dsl.PgEventEndpointBuilderFactory.PgEventBuilders,
org.apache.camel.builder.endpoint.dsl.PgReplicationSlotEndpointBuilderFactory.PgReplicationSlotBuilders,
org.apache.camel.builder.endpoint.dsl.PineconeVectorDbEndpointBuilderFactory.PineconeVectorDbBuilders,
org.apache.camel.builder.endpoint.dsl.PlatformHttpEndpointBuilderFactory.PlatformHttpBuilders,
org.apache.camel.builder.endpoint.dsl.Plc4XEndpointBuilderFactory.Plc4XBuilders,
org.apache.camel.builder.endpoint.dsl.PrinterEndpointBuilderFactory.PrinterBuilders,
org.apache.camel.builder.endpoint.dsl.PubNubEndpointBuilderFactory.PubNubBuilders,
org.apache.camel.builder.endpoint.dsl.PulsarEndpointBuilderFactory.PulsarBuilders,
org.apache.camel.builder.endpoint.dsl.QdrantEndpointBuilderFactory.QdrantBuilders,
org.apache.camel.builder.endpoint.dsl.QuartzEndpointBuilderFactory.QuartzBuilders,
org.apache.camel.builder.endpoint.dsl.QueueEndpointBuilderFactory.QueueBuilders,
org.apache.camel.builder.endpoint.dsl.QuickfixjEndpointBuilderFactory.QuickfixjBuilders,
org.apache.camel.builder.endpoint.dsl.ReactiveStreamsEndpointBuilderFactory.ReactiveStreamsBuilders,
org.apache.camel.builder.endpoint.dsl.RedisEndpointBuilderFactory.RedisBuilders,
org.apache.camel.builder.endpoint.dsl.RedshiftData2EndpointBuilderFactory.RedshiftData2Builders,
org.apache.camel.builder.endpoint.dsl.RefEndpointBuilderFactory.RefBuilders,
org.apache.camel.builder.endpoint.dsl.RestApiEndpointBuilderFactory.RestApiBuilders,
org.apache.camel.builder.endpoint.dsl.RestEndpointBuilderFactory.RestBuilders,
org.apache.camel.builder.endpoint.dsl.RestOpenApiEndpointBuilderFactory.RestOpenApiBuilders,
org.apache.camel.builder.endpoint.dsl.RobotFrameworkEndpointBuilderFactory.RobotFrameworkBuilders,
org.apache.camel.builder.endpoint.dsl.RocketMQEndpointBuilderFactory.RocketMQBuilders,
org.apache.camel.builder.endpoint.dsl.RssEndpointBuilderFactory.RssBuilders,
org.apache.camel.builder.endpoint.dsl.STS2EndpointBuilderFactory.STS2Builders,
org.apache.camel.builder.endpoint.dsl.SagaEndpointBuilderFactory.SagaBuilders,
org.apache.camel.builder.endpoint.dsl.SalesforceEndpointBuilderFactory.SalesforceBuilders,
org.apache.camel.builder.endpoint.dsl.SchedulerEndpointBuilderFactory.SchedulerBuilders,
org.apache.camel.builder.endpoint.dsl.SchematronEndpointBuilderFactory.SchematronBuilders,
org.apache.camel.builder.endpoint.dsl.ScpEndpointBuilderFactory.ScpBuilders,
org.apache.camel.builder.endpoint.dsl.SecretsManagerEndpointBuilderFactory.SecretsManagerBuilders,
org.apache.camel.builder.endpoint.dsl.SedaEndpointBuilderFactory.SedaBuilders,
org.apache.camel.builder.endpoint.dsl.ServerEndpointBuilderFactory.ServerBuilders,
org.apache.camel.builder.endpoint.dsl.ServiceBusEndpointBuilderFactory.ServiceBusBuilders,
org.apache.camel.builder.endpoint.dsl.ServiceEndpointBuilderFactory.ServiceBuilders,
org.apache.camel.builder.endpoint.dsl.ServiceNowEndpointBuilderFactory.ServiceNowBuilders,
org.apache.camel.builder.endpoint.dsl.ServletEndpointBuilderFactory.ServletBuilders,
org.apache.camel.builder.endpoint.dsl.Ses2EndpointBuilderFactory.Ses2Builders,
org.apache.camel.builder.endpoint.dsl.SftpEndpointBuilderFactory.SftpBuilders,
org.apache.camel.builder.endpoint.dsl.SimpleNotificationEndpointBuilderFactory.SimpleNotificationBuilders,
org.apache.camel.builder.endpoint.dsl.Sjms2EndpointBuilderFactory.Sjms2Builders,
org.apache.camel.builder.endpoint.dsl.SjmsEndpointBuilderFactory.SjmsBuilders,
org.apache.camel.builder.endpoint.dsl.SlackEndpointBuilderFactory.SlackBuilders,
org.apache.camel.builder.endpoint.dsl.SmbEndpointBuilderFactory.SmbBuilders,
org.apache.camel.builder.endpoint.dsl.SmooksEndpointBuilderFactory.SmooksBuilders,
org.apache.camel.builder.endpoint.dsl.SmppEndpointBuilderFactory.SmppBuilders,
org.apache.camel.builder.endpoint.dsl.SnmpEndpointBuilderFactory.SnmpBuilders,
org.apache.camel.builder.endpoint.dsl.Sns2EndpointBuilderFactory.Sns2Builders,
org.apache.camel.builder.endpoint.dsl.SolrEndpointBuilderFactory.SolrBuilders,
org.apache.camel.builder.endpoint.dsl.SplunkEndpointBuilderFactory.SplunkBuilders,
org.apache.camel.builder.endpoint.dsl.SplunkHECEndpointBuilderFactory.SplunkHECBuilders,
org.apache.camel.builder.endpoint.dsl.SpringAiChatEndpointBuilderFactory.SpringAiChatBuilders,
org.apache.camel.builder.endpoint.dsl.SpringAiEmbeddingsEndpointBuilderFactory.SpringAiEmbeddingsBuilders,
org.apache.camel.builder.endpoint.dsl.SpringAiToolsEndpointBuilderFactory.SpringAiToolsBuilders,
org.apache.camel.builder.endpoint.dsl.SpringAiVectorStoreEndpointBuilderFactory.SpringAiVectorStoreBuilders,
org.apache.camel.builder.endpoint.dsl.SpringBatchEndpointBuilderFactory.SpringBatchBuilders,
org.apache.camel.builder.endpoint.dsl.SpringJdbcEndpointBuilderFactory.SpringJdbcBuilders,
org.apache.camel.builder.endpoint.dsl.SpringLdapEndpointBuilderFactory.SpringLdapBuilders,
org.apache.camel.builder.endpoint.dsl.SpringRabbitMQEndpointBuilderFactory.SpringRabbitMQBuilders,
org.apache.camel.builder.endpoint.dsl.SpringWebserviceEndpointBuilderFactory.SpringWebserviceBuilders,
org.apache.camel.builder.endpoint.dsl.SqlEndpointBuilderFactory.SqlBuilders,
org.apache.camel.builder.endpoint.dsl.SqlStoredEndpointBuilderFactory.SqlStoredBuilders,
org.apache.camel.builder.endpoint.dsl.Sqs2EndpointBuilderFactory.Sqs2Builders,
org.apache.camel.builder.endpoint.dsl.SshEndpointBuilderFactory.SshBuilders,
org.apache.camel.builder.endpoint.dsl.StAXEndpointBuilderFactory.StAXBuilders,
org.apache.camel.builder.endpoint.dsl.StepFunctions2EndpointBuilderFactory.StepFunctions2Builders,
org.apache.camel.builder.endpoint.dsl.StitchEndpointBuilderFactory.StitchBuilders,
org.apache.camel.builder.endpoint.dsl.StompEndpointBuilderFactory.StompBuilders,
org.apache.camel.builder.endpoint.dsl.StreamEndpointBuilderFactory.StreamBuilders,
org.apache.camel.builder.endpoint.dsl.StringTemplateEndpointBuilderFactory.StringTemplateBuilders,
org.apache.camel.builder.endpoint.dsl.StubEndpointBuilderFactory.StubBuilders,
org.apache.camel.builder.endpoint.dsl.SwiftEndpointBuilderFactory.SwiftBuilders,
org.apache.camel.builder.endpoint.dsl.TahuEdgeEndpointBuilderFactory.TahuEdgeBuilders,
org.apache.camel.builder.endpoint.dsl.TahuHostEndpointBuilderFactory.TahuHostBuilders,
org.apache.camel.builder.endpoint.dsl.TelegramEndpointBuilderFactory.TelegramBuilders,
org.apache.camel.builder.endpoint.dsl.TensorFlowServingEndpointBuilderFactory.TensorFlowServingBuilders,
org.apache.camel.builder.endpoint.dsl.Textract2EndpointBuilderFactory.Textract2Builders,
org.apache.camel.builder.endpoint.dsl.ThriftEndpointBuilderFactory.ThriftBuilders,
org.apache.camel.builder.endpoint.dsl.ThymeleafEndpointBuilderFactory.ThymeleafBuilders,
org.apache.camel.builder.endpoint.dsl.TikaEndpointBuilderFactory.TikaBuilders,
org.apache.camel.builder.endpoint.dsl.TimerEndpointBuilderFactory.TimerBuilders,
org.apache.camel.builder.endpoint.dsl.Timestream2EndpointBuilderFactory.Timestream2Builders,
org.apache.camel.builder.endpoint.dsl.TorchServeEndpointBuilderFactory.TorchServeBuilders,
org.apache.camel.builder.endpoint.dsl.Transcribe2EndpointBuilderFactory.Transcribe2Builders,
org.apache.camel.builder.endpoint.dsl.Translate2EndpointBuilderFactory.Translate2Builders,
org.apache.camel.builder.endpoint.dsl.TwilioEndpointBuilderFactory.TwilioBuilders,
org.apache.camel.builder.endpoint.dsl.TwitterDirectMessageEndpointBuilderFactory.TwitterDirectMessageBuilders,
org.apache.camel.builder.endpoint.dsl.TwitterSearchEndpointBuilderFactory.TwitterSearchBuilders,
org.apache.camel.builder.endpoint.dsl.TwitterTimelineEndpointBuilderFactory.TwitterTimelineBuilders,
org.apache.camel.builder.endpoint.dsl.UndertowEndpointBuilderFactory.UndertowBuilders,
org.apache.camel.builder.endpoint.dsl.ValidatorEndpointBuilderFactory.ValidatorBuilders,
org.apache.camel.builder.endpoint.dsl.VelocityEndpointBuilderFactory.VelocityBuilders,
org.apache.camel.builder.endpoint.dsl.VertxEndpointBuilderFactory.VertxBuilders,
org.apache.camel.builder.endpoint.dsl.VertxHttpEndpointBuilderFactory.VertxHttpBuilders,
org.apache.camel.builder.endpoint.dsl.VertxWebsocketEndpointBuilderFactory.VertxWebsocketBuilders,
org.apache.camel.builder.endpoint.dsl.WasmEndpointBuilderFactory.WasmBuilders,
org.apache.camel.builder.endpoint.dsl.WatsonDiscoveryEndpointBuilderFactory.WatsonDiscoveryBuilders,
org.apache.camel.builder.endpoint.dsl.WatsonLanguageEndpointBuilderFactory.WatsonLanguageBuilders,
org.apache.camel.builder.endpoint.dsl.WatsonSpeechToTextEndpointBuilderFactory.WatsonSpeechToTextBuilders,
org.apache.camel.builder.endpoint.dsl.WatsonTextToSpeechEndpointBuilderFactory.WatsonTextToSpeechBuilders,
org.apache.camel.builder.endpoint.dsl.WeatherEndpointBuilderFactory.WeatherBuilders,
org.apache.camel.builder.endpoint.dsl.WeaviateVectorDbEndpointBuilderFactory.WeaviateVectorDbBuilders,
org.apache.camel.builder.endpoint.dsl.Web3jEndpointBuilderFactory.Web3jBuilders,
org.apache.camel.builder.endpoint.dsl.WebhookEndpointBuilderFactory.WebhookBuilders,
org.apache.camel.builder.endpoint.dsl.WhatsAppEndpointBuilderFactory.WhatsAppBuilders,
org.apache.camel.builder.endpoint.dsl.WordpressEndpointBuilderFactory.WordpressBuilders,
org.apache.camel.builder.endpoint.dsl.WorkdayEndpointBuilderFactory.WorkdayBuilders,
org.apache.camel.builder.endpoint.dsl.XChangeEndpointBuilderFactory.XChangeBuilders,
org.apache.camel.builder.endpoint.dsl.XJEndpointBuilderFactory.XJBuilders,
org.apache.camel.builder.endpoint.dsl.XQueryEndpointBuilderFactory.XQueryBuilders,
org.apache.camel.builder.endpoint.dsl.XmlSignerEndpointBuilderFactory.XmlSignerBuilders,
org.apache.camel.builder.endpoint.dsl.XmlVerifierEndpointBuilderFactory.XmlVerifierBuilders,
org.apache.camel.builder.endpoint.dsl.XmppEndpointBuilderFactory.XmppBuilders,
org.apache.camel.builder.endpoint.dsl.XsltEndpointBuilderFactory.XsltBuilders,
org.apache.camel.builder.endpoint.dsl.XsltSaxonEndpointBuilderFactory.XsltSaxonBuilders,
org.apache.camel.builder.endpoint.dsl.ZeebeEndpointBuilderFactory.ZeebeBuilders,
org.apache.camel.builder.endpoint.dsl.ZendeskEndpointBuilderFactory.ZendeskBuilders,
org.apache.camel.builder.endpoint.dsl.ZooKeeperEndpointBuilderFactory.ZooKeeperBuilders,
org.apache.camel.builder.endpoint.dsl.ZooKeeperMasterEndpointBuilderFactory.ZooKeeperMasterBuilders {
default org.apache.camel.Expression endpoints(
org.apache.camel.builder.EndpointProducerBuilder... endpoints) {
return new org.apache.camel.support.ExpressionAdapter() {
private List<org.apache.camel.Expression> expressions = null;
@Override
public Object evaluate(org.apache.camel.Exchange exchange) {
return expressions.stream().map(e -> e.evaluate(exchange, Object.class)).collect(Collectors.toList());
}
@Override
public void init(org.apache.camel.CamelContext context) {
super.init(context);
expressions = Stream.of(endpoints)
.map(epb -> epb.expr(context))
.collect(Collectors.toList());
}
};
}
} | EndpointBuilderFactory |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordImpl.java | {
"start": 1217,
"end": 2753
} | class ____ extends AbstractMetricsRecord {
protected static final String DEFAULT_CONTEXT = "default";
private final long timestamp;
private final MetricsInfo info;
private final List<MetricsTag> tags;
private final Iterable<AbstractMetric> metrics;
/**
* Construct a metrics record
* @param info {@link MetricsInfo} of the record
* @param timestamp of the record
* @param tags of the record
* @param metrics of the record
*/
public MetricsRecordImpl(MetricsInfo info, long timestamp,
List<MetricsTag> tags,
Iterable<AbstractMetric> metrics) {
this.timestamp = checkArg(timestamp, timestamp > 0, "timestamp");
this.info = checkNotNull(info, "info");
this.tags = checkNotNull(tags, "tags");
this.metrics = checkNotNull(metrics, "metrics");
}
@Override public long timestamp() {
return timestamp;
}
@Override public String name() {
return info.name();
}
MetricsInfo info() {
return info;
}
@Override public String description() {
return info.description();
}
@Override public String context() {
// usually the first tag
for (MetricsTag t : tags) {
if (t.info() == MsInfo.Context) {
return t.value();
}
}
return DEFAULT_CONTEXT;
}
@Override
public List<MetricsTag> tags() {
return tags; // already unmodifiable from MetricsRecordBuilderImpl#tags
}
@Override public Iterable<AbstractMetric> metrics() {
return metrics;
}
}
| MetricsRecordImpl |
java | apache__flink | flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/expressions/resolver/rules/ExpandColumnFunctionsRule.java | {
"start": 6655,
"end": 12274
} | class ____
extends ApiExpressionDefaultVisitor<List<UnresolvedReferenceExpression>> {
private final List<UnresolvedReferenceExpression> inputFieldReferences;
public ColumnsExpressionExpander(List<UnresolvedReferenceExpression> inputFieldReferences) {
this.inputFieldReferences = inputFieldReferences;
}
@Override
public List<UnresolvedReferenceExpression> visit(FieldReferenceExpression fieldReference) {
return Collections.singletonList(unresolvedRef(fieldReference.getName()));
}
@Override
public List<UnresolvedReferenceExpression> visit(ValueLiteralExpression valueLiteral) {
return ExpressionUtils.extractValue(valueLiteral, Integer.class)
.map(i -> Collections.singletonList(inputFieldReferences.get(i - 1)))
.orElseGet(() -> defaultMethod(valueLiteral));
}
@Override
public List<UnresolvedReferenceExpression> visit(
UnresolvedReferenceExpression unresolvedReference) {
if (unresolvedReference.getName().equals("*")) {
return inputFieldReferences;
} else {
return Collections.singletonList(unresolvedReference);
}
}
@Override
public List<UnresolvedReferenceExpression> visit(UnresolvedCallExpression unresolvedCall) {
if (isIndexRangeCall(unresolvedCall)) {
int start =
ExpressionUtils.extractValue(
unresolvedCall.getChildren().get(0), Integer.class)
.orElseThrow(
() ->
new ValidationException(
"Constant integer value expected."));
int end =
ExpressionUtils.extractValue(
unresolvedCall.getChildren().get(1), Integer.class)
.orElseThrow(
() ->
new ValidationException(
"Constant integer value expected."));
Preconditions.checkArgument(
start <= end,
String.format(
"The start:%s of %s() or %s() should not bigger than end:%s.",
start, WITH_COLUMNS.getName(), WITHOUT_COLUMNS.getName(), end));
return inputFieldReferences.subList(start - 1, end);
} else if (isNameRangeCall(unresolvedCall)) {
String startName =
((UnresolvedReferenceExpression) unresolvedCall.getChildren().get(0))
.getName();
String endName =
((UnresolvedReferenceExpression) unresolvedCall.getChildren().get(1))
.getName();
int start = indexOfName(inputFieldReferences, startName);
int end = indexOfName(inputFieldReferences, endName);
Preconditions.checkArgument(
start <= end,
String.format(
"The start name:%s of %s() or %s() should not behind the end:%s.",
startName,
WITH_COLUMNS.getName(),
WITHOUT_COLUMNS.getName(),
endName));
return inputFieldReferences.subList(start, end + 1);
} else {
return defaultMethod(unresolvedCall);
}
}
@Override
protected List<UnresolvedReferenceExpression> defaultMethod(Expression expression) {
throw new ValidationException(
String.format(
"The parameters of %s() or %s() only accept column names or column indices.",
WITH_COLUMNS.getName(), WITHOUT_COLUMNS.getName()));
}
/** Whether the expression is a column index range expression, e.g. withColumns(1 ~ 2). */
private boolean isIndexRangeCall(UnresolvedCallExpression expression) {
return expression.getFunctionDefinition() == RANGE_TO
&& expression.getChildren().get(0) instanceof ValueLiteralExpression
&& expression.getChildren().get(1) instanceof ValueLiteralExpression;
}
/** Whether the expression is a column name range expression, e.g. withColumns(a ~ b). */
private boolean isNameRangeCall(UnresolvedCallExpression expression) {
return expression.getFunctionDefinition() == RANGE_TO
&& expression.getChildren().get(0) instanceof UnresolvedReferenceExpression
&& expression.getChildren().get(1) instanceof UnresolvedReferenceExpression;
}
}
/** Find the index of targetName in the list. Return -1 if not found. */
private static int indexOfName(
List<UnresolvedReferenceExpression> inputFieldReferences, String targetName) {
int i;
for (i = 0; i < inputFieldReferences.size(); ++i) {
if (inputFieldReferences.get(i).getName().equals(targetName)) {
break;
}
}
return i == inputFieldReferences.size() ? -1 : i;
}
}
| ColumnsExpressionExpander |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/MiniKerberizedHadoopCluster.java | {
"start": 2730,
"end": 12403
} | class ____ extends CompositeService {
private static final Logger LOG =
LoggerFactory.getLogger(MiniKerberizedHadoopCluster.class);
public static final String ALICE = "alice";
public static final String BOB = "bob";
public static final String HTTP_LOCALHOST = "HTTP/localhost@$LOCALHOST";
/**
* The hostname is dynamically determined based on OS, either
* "localhost" (non-windows) or 127.0.0.1 (windows).
*/
public static final String LOCALHOST_NAME = Path.WINDOWS
? "127.0.0.1"
: "localhost";
private MiniKdc kdc;
private File keytab;
private File workDir;
private String krbInstance;
private String loginUsername;
private String loginPrincipal;
private String sslConfDir;
private String clientSSLConfigFileName;
private String serverSSLConfigFileName;
private String alicePrincipal;
private String bobPrincipal;
/**
* Create the cluster.
* If this class's log is at DEBUG level, this also turns
* Kerberos diagnostics on in the JVM.
*/
public MiniKerberizedHadoopCluster() {
super("MiniKerberizedHadoopCluster");
// load all the configs to force in the -default.xml files
new HdfsConfiguration();
new YarnConfiguration();
new JobConf();
if (LOG.isDebugEnabled()) {
// turn on kerberos logging @ debug.
System.setProperty(KDiag.SUN_SECURITY_KRB5_DEBUG, "true");
System.setProperty(KDiag.SUN_SECURITY_SPNEGO_DEBUG, "true");
}
}
public MiniKdc getKdc() {
return kdc;
}
public File getKeytab() {
return keytab;
}
public String getKeytabPath() {
return keytab.getAbsolutePath();
}
public UserGroupInformation createBobUser() throws IOException {
return loginUserFromKeytabAndReturnUGI(bobPrincipal,
keytab.getAbsolutePath());
}
public UserGroupInformation createAliceUser() throws IOException {
return loginUserFromKeytabAndReturnUGI(alicePrincipal,
keytab.getAbsolutePath());
}
public File getWorkDir() {
return workDir;
}
public String getKrbInstance() {
return krbInstance;
}
public String getLoginUsername() {
return loginUsername;
}
public String getLoginPrincipal() {
return loginPrincipal;
}
public String withRealm(String user) {
return user + "@EXAMPLE.COM";
}
/**
* Service init creates the KDC.
* @param conf configuration
*/
@Override
protected void serviceInit(final Configuration conf) throws Exception {
patchConfigAtInit(conf);
super.serviceInit(conf);
Properties kdcConf = MiniKdc.createConf();
workDir = GenericTestUtils.getTestDir("kerberos");
workDir.mkdirs();
kdc = new MiniKdc(kdcConf, workDir);
krbInstance = LOCALHOST_NAME;
}
/**
* Start the KDC, create the keytab and the alice and bob users,
* and UGI instances of them logged in from the keytab.
*/
@Override
protected void serviceStart() throws Exception {
super.serviceStart();
kdc.start();
keytab = new File(workDir, "keytab.bin");
loginUsername = UserGroupInformation.getLoginUser().getShortUserName();
loginPrincipal = loginUsername + "/" + krbInstance;
alicePrincipal = ALICE + "/" + krbInstance;
bobPrincipal = BOB + "/" + krbInstance;
kdc.createPrincipal(keytab,
alicePrincipal,
bobPrincipal,
"HTTP/" + krbInstance,
HTTP_LOCALHOST,
loginPrincipal);
final File keystoresDir = new File(workDir, "ssl");
keystoresDir.mkdirs();
sslConfDir = KeyStoreTestUtil.getClasspathDir(
this.getClass());
KeyStoreTestUtil.setupSSLConfig(keystoresDir.getAbsolutePath(),
sslConfDir, getConfig(), false);
clientSSLConfigFileName = KeyStoreTestUtil.getClientSSLConfigFileName();
serverSSLConfigFileName = KeyStoreTestUtil.getServerSSLConfigFileName();
}
@Override
protected void serviceStop() throws Exception {
super.serviceStop();
// this can throw an exception, but it will get caught by the superclass.
kdc.stop();
}
protected void patchConfigAtInit(final Configuration conf) {
// turn off some noise during debugging
int timeout = 60 * 60_1000;
conf.setInt("jvm.pause.info-threshold.ms", timeout);
conf.setInt("jvm.pause.warn-threshold.ms", timeout);
}
/**
* Set up HDFS to run securely.
* In secure mode, HDFS goes out of its way to refuse to start if it
* doesn't consider the configuration safe.
* This is good in production, and it stops an HDFS cluster coming
* up where things can't reliably talk to each other.
* But it does complicate test setup.
* Look at {@code org.apache.hadoop.hdfs.TestDFSInotifyEventInputStreamKerberized}
* and {@code org.apache.hadoop.hdfs.qjournal.TestSecureNNWithQJM}
* for the details on what options to set here.
* @param conf configuration to patch.
*/
protected void patchConfigWithHDFSBindings(final Configuration conf) {
Preconditions.checkState(isInState(STATE.STARTED));
enableKerberos(conf);
String path = getKeytabPath();
String spnegoPrincipal = "*";
String localhost = LOCALHOST_NAME;
String instance = getKrbInstance();
String hdfsPrincipal = getLoginPrincipal();
patchConfigAtInit(conf);
conf.setLong(CommonConfigurationKeys.FS_DU_INTERVAL_KEY, Long.MAX_VALUE);
conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, path);
conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, path);
conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
conf.set(DFS_JOURNALNODE_KEYTAB_FILE_KEY, path);
conf.set(DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
conf.set(DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
spnegoPrincipal);
conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication");
conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
conf.set(DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
KeyStoreTestUtil.getClientSSLConfigFileName());
conf.set(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
KeyStoreTestUtil.getServerSSLConfigFileName());
}
/**
* Patch the YARN settings.
* Note how the yarn principal has to include the realm.
* @param conf configuration to patch.
*/
protected void patchConfigWithYARNBindings(final Configuration conf) {
Preconditions.checkState(isInState(STATE.STARTED));
enableKerberos(conf);
patchConfigAtInit(conf);
String path = getKeytabPath();
String localhost = LOCALHOST_NAME;
String yarnPrincipal = withRealm(getLoginPrincipal());
conf.set(RM_PRINCIPAL, yarnPrincipal);
conf.set(RM_KEYTAB, path);
conf.set(RM_HOSTNAME, localhost);
conf.set(RM_BIND_HOST, localhost);
conf.set(RM_ADDRESS,
localhost + ":" + DEFAULT_RM_PORT);
conf.set(NM_PRINCIPAL, yarnPrincipal);
conf.set(NM_KEYTAB, path);
conf.set(NM_ADDRESS,
localhost + ":" + DEFAULT_NM_PORT);
conf.setBoolean(TIMELINE_SERVICE_ENABLED, false);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, false);
conf.set(JHAdminConfig.MR_HISTORY_KEYTAB, path);
conf.set(JHAdminConfig.MR_HISTORY_PRINCIPAL, yarnPrincipal);
conf.set(JHAdminConfig.MR_HISTORY_ADDRESS,
localhost + ":" + DEFAULT_MR_HISTORY_PORT);
conf.setBoolean(JHAdminConfig.MR_HISTORY_CLEANER_ENABLE, false);
conf.setInt(RM_AM_MAX_ATTEMPTS, 1);
conf.setInt(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS,
100);
conf.setInt(YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS,
10_000);
}
public void resetUGI() {
UserGroupInformation.reset();
}
/**
* Given a shortname, built a long name with the krb instance and realm info.
* @param shortname short name of the user
* @return a long name
*/
private String userOnHost(final String shortname) {
return shortname + "/" + krbInstance + "@" + getRealm();
}
public String getRealm() {
return kdc.getRealm();
}
/**
* Log in a user to UGI.currentUser.
* @param user user to log in from
* @throws IOException failure
*/
public void loginUser(final String user) throws IOException {
UserGroupInformation.loginUserFromKeytab(user, getKeytabPath());
}
/**
* Log in the login principal as the current user.
* @throws IOException failure
*/
public void loginPrincipal() throws IOException {
loginUser(getLoginPrincipal());
}
/**
* General assertion that security is turred on for a cluster.
*/
public static void assertSecurityEnabled() {
assertTrue(UserGroupInformation.isSecurityEnabled(),
"Security is needed for this test");
}
/**
* Close filesystems for a user, downgrading a null user to a no-op.
* @param ugi user
* @throws IOException if a close operation raised one.
*/
public static void closeUserFileSystems(UserGroupInformation ugi)
throws IOException {
if (ugi != null) {
FileSystem.closeAllForUGI(ugi);
}
}
/**
* Modify a configuration to use Kerberos as the auth method.
* @param conf configuration to patch.
*/
public static void enableKerberos(Configuration conf) {
conf.set(HADOOP_SECURITY_AUTHENTICATION,
UserGroupInformation.AuthenticationMethod.KERBEROS.name());
}
}
| MiniKerberizedHadoopCluster |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java | {
"start": 45661,
"end": 46470
} | class ____ extends SaslPropertiesResolver {
@Override
public Map<String, String> getServerProperties(InetAddress address) {
Map<String, String> newPropertes = new HashMap<String, String>(getDefaultProperties());
newPropertes.put(Sasl.QOP, QualityOfProtection.AUTHENTICATION.getSaslQop());
return newPropertes;
}
}
public static void main(String[] args) throws Exception {
System.out.println("Testing Kerberos authentication over RPC");
if (args.length != 2) {
System.err
.println("Usage: java <options> org.apache.hadoop.ipc.TestSaslRPC "
+ " <serverPrincipal> <keytabFile>");
System.exit(-1);
}
String principal = args[0];
String keytab = args[1];
testKerberosRpc(principal, keytab);
}
}
| AuthSaslPropertiesResolver |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/type/TypeFactoryTest.java | {
"start": 1861,
"end": 1964
} | class ____ {
public IntLongMap intMap;
public MyList longList;
}
static | SneakyBean |
java | apache__rocketmq | remoting/src/main/java/org/apache/rocketmq/remoting/protocol/header/QuerySubscriptionByConsumerRequestHeader.java | {
"start": 1525,
"end": 2112
} | class ____ extends TopicRequestHeader {
@CFNotNull
@RocketMQResource(ResourceType.GROUP)
private String group;
@RocketMQResource(ResourceType.TOPIC)
private String topic;
@Override
public void checkFields() throws RemotingCommandException {
}
public String getGroup() {
return group;
}
public void setGroup(String group) {
this.group = group;
}
public String getTopic() {
return topic;
}
public void setTopic(String topic) {
this.topic = topic;
}
}
| QuerySubscriptionByConsumerRequestHeader |
java | bumptech__glide | annotation/compiler/src/main/java/com/bumptech/glide/annotation/compiler/RequestManagerGenerator.java | {
"start": 15128,
"end": 16224
} | class ____ have just been generated and therefore may not be found if we try to obtain
// it via Elements, so use just the String version instead.
String generatedRequestOptionsQualifiedName =
generatedCodePackageName + "." + generatedRequestOptions.name;
String methodName = "setRequestOptions";
String parameterName = "toSet";
return MethodSpec.methodBuilder(methodName)
.addAnnotation(Override.class)
.addModifiers(Modifier.PROTECTED)
.addParameter(
ParameterSpec.builder(ClassName.get(requestOptionsType), parameterName)
.addAnnotation(processorUtil.nonNull())
.build())
.beginControlFlow(
"if ($N instanceof $L)", parameterName, generatedRequestOptionsQualifiedName)
.addStatement("super.$N($N)", methodName, parameterName)
.nextControlFlow("else")
.addStatement(
"super.setRequestOptions(new $L().apply($N))",
generatedRequestOptionsQualifiedName,
parameterName)
.endControlFlow()
.build();
}
}
| may |
java | apache__camel | components/camel-mybatis/src/main/java/org/apache/camel/component/mybatis/MyBatisBeanProducer.java | {
"start": 4277,
"end": 4709
} | class
____ = getEndpoint().getCamelContext().getClassResolver().resolveMandatoryClass(endpoint.getBeanName());
}
LOG.debug("Resolved MyBatis Bean: {} as class: {}", endpoint.getBeanName(), clazz);
// find the mapper
Object mapper = session.getMapper(clazz);
if (mapper == null) {
throw new IllegalArgumentException(
"No Mapper with typeAlias or | clazz |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/script/ScriptMetrics.java | {
"start": 608,
"end": 1978
} | class ____ {
final CounterMetric compilationLimitTriggered = new CounterMetric();
final TimeSeriesCounter compilations;
final TimeSeriesCounter cacheEvictions;
public ScriptMetrics(LongSupplier timeProvider) {
compilations = new TimeSeriesCounter(timeProvider);
cacheEvictions = new TimeSeriesCounter(timeProvider);
}
public void onCompilation() {
compilations.inc();
}
public void onCacheEviction() {
cacheEvictions.inc();
}
public void onCompilationLimit() {
compilationLimitTriggered.inc();
}
public ScriptStats stats() {
TimeSeries compilationsTimeSeries = compilations.timeSeries();
TimeSeries cacheEvictionsTimeSeries = cacheEvictions.timeSeries();
return new ScriptStats(
compilationsTimeSeries.total,
cacheEvictionsTimeSeries.total,
compilationLimitTriggered.count(),
compilationsTimeSeries,
cacheEvictionsTimeSeries
);
}
public ScriptContextStats stats(String context) {
TimeSeries compilationsTimeSeries = compilations.timeSeries();
TimeSeries cacheEvictionsTimeSeries = cacheEvictions.timeSeries();
return new ScriptContextStats(context, compilationLimitTriggered.count(), compilationsTimeSeries, cacheEvictionsTimeSeries);
}
}
| ScriptMetrics |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/OverridingMethodInconsistentArgumentNamesCheckerTest.java | {
"start": 2379,
"end": 2644
} | class ____ extends A {
@Override
void m(int p1, int p2) {}
}
""")
.doTest();
}
@Test
public void negative2() {
testHelper
.addSourceLines(
"A.java",
"""
| B |
java | elastic__elasticsearch | modules/reindex/src/main/java/org/elasticsearch/reindex/AsyncDeleteByQueryAction.java | {
"start": 1128,
"end": 2645
} | class ____ extends AbstractAsyncBulkByScrollAction<DeleteByQueryRequest, TransportDeleteByQueryAction> {
public AsyncDeleteByQueryAction(
BulkByScrollTask task,
Logger logger,
ParentTaskAssigningClient client,
ThreadPool threadPool,
DeleteByQueryRequest request,
ScriptService scriptService,
ActionListener<BulkByScrollResponse> listener
) {
super(task, false, true, false, logger, client, threadPool, request, listener, scriptService, null);
}
@Override
protected boolean accept(ScrollableHitSource.Hit doc) {
// Delete-by-query does not require the source to delete a document
// and the default implementation checks for it
return true;
}
@Override
protected RequestWrapper<DeleteRequest> buildRequest(ScrollableHitSource.Hit doc) {
DeleteRequest delete = new DeleteRequest();
delete.index(doc.getIndex());
delete.id(doc.getId());
delete.setIfSeqNo(doc.getSeqNo());
delete.setIfPrimaryTerm(doc.getPrimaryTerm());
return wrap(delete);
}
/**
* Overrides the parent's implementation is much more Update/Reindex oriented and so also copies things like timestamp/ttl which we
* don't care for a deletion.
*/
@Override
protected RequestWrapper<?> copyMetadata(RequestWrapper<?> request, ScrollableHitSource.Hit doc) {
request.setRouting(doc.getRouting());
return request;
}
}
| AsyncDeleteByQueryAction |
java | spring-projects__spring-boot | core/spring-boot-test/src/main/java/org/springframework/boot/test/json/GsonTester.java | {
"start": 1665,
"end": 2057
} | class ____<T> extends AbstractJsonMarshalTester<T> {
private final Gson gson;
/**
* Create a new uninitialized {@link GsonTester} instance.
* @param gson the Gson instance
*/
protected GsonTester(Gson gson) {
Assert.notNull(gson, "'gson' must not be null");
this.gson = gson;
}
/**
* Create a new {@link GsonTester} instance.
* @param resourceLoadClass the source | GsonTester |
java | spring-projects__spring-security | saml2/saml2-service-provider/src/main/java/org/springframework/security/saml2/provider/service/registration/IterableRelyingPartyRegistrationRepository.java | {
"start": 711,
"end": 988
} | interface ____ simplifies APIs which require the
* {@link RelyingPartyRegistrationRepository} to also be {@link Iterable}
*
* @author Josh Cummings
* @since 6.4
* @see InMemoryRelyingPartyRegistrationRepository
* @see CachingRelyingPartyRegistrationRepository
*/
public | that |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/refresh/RefreshHandler.java | {
"start": 1202,
"end": 1572
} | interface ____ to different case.
*
* <p>In continuous mode, the meta information maybe contains { "clusterType": "yarn", "clusterId":
* "xxx", "jobId": "yyyy" }.
*
* <p>In full mode, the meta information maybe contains { "endpoint": "xxx", "workflowId": "yyy" }.
* Due to user may use different workflow scheduler in this mode, user should implement this
* | according |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/procedure/StoreProcedureStatementsClosedTest.java | {
"start": 1055,
"end": 2920
} | class ____ extends BaseSessionFactoryFunctionalTest {
private final PreparedStatementSpyConnectionProvider connectionProvider = new PreparedStatementSpyConnectionProvider(
);
@Override
protected Class[] getAnnotatedClasses() {
return new Class[] {
SimpleEntity.class
};
}
@Override
protected void applySettings(StandardServiceRegistryBuilder builer) {
ConnectionProvider connectionProvider = (ConnectionProvider) builer.getSettings()
.get( AvailableSettings.CONNECTION_PROVIDER );
this.connectionProvider.setConnectionProvider( connectionProvider );
builer.applySetting( AvailableSettings.CONNECTION_PROVIDER, this.connectionProvider );
}
@BeforeEach
public void setUp() {
inTransaction(
session ->
session.createNativeQuery(
"CREATE ALIAS " + MyStoredProcedure.NAME + " FOR \"" + MyStoredProcedure.class.getName() + ".execute\"" )
.executeUpdate()
);
inTransaction(
session -> {
SimpleEntity entity = new SimpleEntity( "initial name" );
entity.setId( 1L );
session.persist( entity );
}
);
}
@AfterAll
public void tearDown() {
inTransaction(
session ->
session.createNativeQuery( "DROP ALIAS " + MyStoredProcedure.NAME ).executeUpdate()
);
connectionProvider.stop();
}
@Test
public void testIt() throws Exception {
inTransaction(
session -> {
StoredProcedureQuery storedProcedure = session.createStoredProcedureQuery( MyStoredProcedure.NAME );
storedProcedure.registerStoredProcedureParameter( 0, Long.class, ParameterMode.IN );
storedProcedure.setParameter( 0, 1L );
storedProcedure.execute();
storedProcedure.getSingleResult();
}
);
for ( PreparedStatement statement : connectionProvider.getPreparedStatements() ) {
assertTrue( statement.isClosed() );
}
}
public static | StoreProcedureStatementsClosedTest |
java | apache__camel | components/camel-joor/src/test/java/org/apache/camel/language/joor/CompilationUnitTest.java | {
"start": 1746,
"end": 2044
} | class ____ {
}
}
""",
"InnerClass");
}
@Test
void shouldSupportStaticNestedClass() {
compile(
"""
package com.foo;
| InnerClass |
java | google__dagger | javatests/dagger/functional/basic/BasicComponent.java | {
"start": 870,
"end": 3014
} | interface ____
extends Injector<Thing>,
// Implements two types that define the same method, not overridden here, to test that the
// method is implemented only once.
ComponentSupertypeOne,
ComponentSupertypeTwo {
byte getByte();
char getChar();
short getShort();
int getInt();
long getLong();
boolean getBoolean();
float getFloat();
double getDouble();
Byte getBoxedByte();
Character getBoxedChar();
Short getBoxedShort();
Integer getBoxedInt();
Long getBoxedLong();
Boolean getBoxedBoolean();
Float getBoxedFloat();
Double getBoxedDouble();
Provider<Byte> getByteProvider();
Provider<Character> getCharProvider();
Provider<Short> getShortProvider();
Provider<Integer> getIntProvider();
Provider<Long> getLongProvider();
Provider<Boolean> getBooleanProvider();
Provider<Float> getFloatProvider();
Provider<Double> getDoubleProvider();
byte[] getByteArray();
char[] getCharArray();
short[] getShortArray();
int[] getIntArray();
long[] getLongArray();
boolean[] getBooleanArray();
float[] getFloatArray();
double[] getDoubleArray();
Provider<byte[]> getByteArrayProvider();
Provider<char[]> getCharArrayProvider();
Provider<short[]> getShortArrayProvider();
Provider<int[]> getIntArrayProvider();
Provider<long[]> getLongArrayProvider();
Provider<boolean[]> getBooleanArrayProvider();
Provider<float[]> getFloatArrayProvider();
Provider<double[]> getDoubleArrayProvider();
Object noOpMembersInjection(Object obviouslyDoesNotHaveMembersToInject);
Thing thing();
InjectedThing injectedThing();
Provider<InjectedThing> injectedThingProvider();
Lazy<InjectedThing> lazyInjectedThing();
Provider<Lazy<InjectedThing>> lazyInjectedThingProvider();
MembersInjector<InjectedThing> injectedThingMembersInjector();
@Nullable Object nullObject();
Provider<Object> nullObjectProvider();
Lazy<Object> lazyNullObject();
TypeWithInheritedMembersInjection typeWithInheritedMembersInjection();
MembersInjector<TypeWithInheritedMembersInjection>
typeWithInheritedMembersInjectionMembersInjector();
}
| BasicComponent |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java | {
"start": 2805,
"end": 7108
} | class ____ {
private static final ShutdownHookManager MGR = new ShutdownHookManager();
private static final Logger LOG =
LoggerFactory.getLogger(ShutdownHookManager.class);
/** Minimum shutdown timeout: {@value} second(s). */
public static final long TIMEOUT_MINIMUM = 1;
/** The default time unit used: seconds. */
public static final TimeUnit TIME_UNIT_DEFAULT = TimeUnit.SECONDS;
private static final ExecutorService EXECUTOR =
HadoopExecutors.newSingleThreadExecutor(new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat("shutdown-hook-%01d")
.build());
static {
try {
Runtime.getRuntime().addShutdownHook(
new SubjectInheritingThread() {
@Override
public void work() {
if (MGR.shutdownInProgress.getAndSet(true)) {
LOG.info("Shutdown process invoked a second time: ignoring");
return;
}
long started = System.currentTimeMillis();
int timeoutCount = MGR.executeShutdown();
long ended = System.currentTimeMillis();
LOG.debug(String.format(
"Completed shutdown in %.3f seconds; Timeouts: %d",
(ended-started)/1000.0, timeoutCount));
// each of the hooks have executed; now shut down the
// executor itself.
shutdownExecutor(new Configuration());
}
}
);
} catch (IllegalStateException ex) {
// JVM is being shut down. Ignore
LOG.warn("Failed to add the ShutdownHook", ex);
}
}
/**
* Execute the shutdown.
* This is exposed purely for testing: do not invoke it.
* @return the number of shutdown hooks which timed out.
*/
@InterfaceAudience.Private
@VisibleForTesting
int executeShutdown() {
int timeouts = 0;
for (HookEntry entry: getShutdownHooksInOrder()) {
Future<?> future = EXECUTOR.submit(entry.getHook());
try {
future.get(entry.getTimeout(), entry.getTimeUnit());
} catch (TimeoutException ex) {
timeouts++;
future.cancel(true);
LOG.warn("ShutdownHook '" + entry.getHook().getClass().
getSimpleName() + "' timeout, " + ex.toString(), ex);
} catch (Throwable ex) {
LOG.warn("ShutdownHook '" + entry.getHook().getClass().
getSimpleName() + "' failed, " + ex.toString(), ex);
}
}
return timeouts;
}
/**
* Shutdown the executor thread itself.
* @param conf the configuration containing the shutdown timeout setting.
*/
private static void shutdownExecutor(final Configuration conf) {
try {
EXECUTOR.shutdown();
long shutdownTimeout = getShutdownTimeout(conf);
if (!EXECUTOR.awaitTermination(
shutdownTimeout,
TIME_UNIT_DEFAULT)) {
// timeout waiting for the
LOG.error("ShutdownHookManager shutdown forcefully after"
+ " {} seconds.", shutdownTimeout);
EXECUTOR.shutdownNow();
}
LOG.debug("ShutdownHookManager completed shutdown.");
} catch (InterruptedException ex) {
// interrupted.
LOG.error("ShutdownHookManager interrupted while waiting for " +
"termination.", ex);
EXECUTOR.shutdownNow();
Thread.currentThread().interrupt();
}
}
/**
* Return <code>ShutdownHookManager</code> singleton.
*
* @return <code>ShutdownHookManager</code> singleton.
*/
@InterfaceAudience.Public
public static ShutdownHookManager get() {
return MGR;
}
/**
* Get the shutdown timeout in seconds, from the supplied
* configuration.
* @param conf configuration to use.
* @return a timeout, always greater than or equal to {@link #TIMEOUT_MINIMUM}
*/
@InterfaceAudience.Private
@VisibleForTesting
static long getShutdownTimeout(Configuration conf) {
long duration = conf.getTimeDuration(
SERVICE_SHUTDOWN_TIMEOUT,
SERVICE_SHUTDOWN_TIMEOUT_DEFAULT,
TIME_UNIT_DEFAULT);
if (duration < TIMEOUT_MINIMUM) {
duration = TIMEOUT_MINIMUM;
}
return duration;
}
/**
* Private structure to store ShutdownHook, its priority and timeout
* settings.
*/
@InterfaceAudience.Private
@VisibleForTesting
static | ShutdownHookManager |
java | micronaut-projects__micronaut-core | test-suite/src/test/java/io/micronaut/docs/events/factory/EngineFactory.java | {
"start": 801,
"end": 1187
} | class ____ {
private V8Engine engine;
private double rodLength = 5.7;
@PostConstruct
public void initialize() {
engine = new V8Engine(rodLength); // <2>
}
@Singleton
public Engine v8Engine() {
return engine;// <3>
}
public void setRodLength(double rodLength) {
this.rodLength = rodLength;
}
}
// end::class[]
| EngineFactory |
java | quarkusio__quarkus | extensions/devui/deployment/src/main/java/io/quarkus/devui/deployment/build/BuildMetricsDevUIProcessor.java | {
"start": 740,
"end": 1565
} | class ____ {
@BuildStep
@Record(RUNTIME_INIT)
public void create(BuildMetricsDevUIRecorder recorder,
BuildSystemTargetBuildItem buildSystemTarget) {
recorder.setBuildMetricsPath(buildSystemTarget.getOutputDirectory().resolve("build-metrics.json").toString());
}
@BuildStep
AdditionalBeanBuildItem additionalBeans() {
return AdditionalBeanBuildItem
.builder()
.addBeanClass(BuildMetricsJsonRPCService.class)
.setUnremovable()
.setDefaultScope(DotNames.APPLICATION_SCOPED)
.build();
}
@BuildStep
JsonRPCProvidersBuildItem createJsonRPCService() {
return new JsonRPCProvidersBuildItem("devui-build-metrics", BuildMetricsJsonRPCService.class);
}
}
| BuildMetricsDevUIProcessor |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.