language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/annotation/MissingMergedAnnotationTests.java | {
"start": 8161,
"end": 8247
} | enum ____ {
ONE, TWO, THREE
}
@Retention(RetentionPolicy.RUNTIME)
private @ | TestEnum |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/OnKeywordTest.java | {
"start": 2012,
"end": 2686
} | class ____ {
@Test
public void basicTest(EntityManagerFactoryScope scope) {
scope.inEntityManager(
entityManager -> {
CriteriaQuery<Order> criteria = entityManager.getCriteriaBuilder().createQuery( Order.class );
Root<Order> root = criteria.from( Order.class );
criteria.select( root );
CollectionJoin<Order, LineItem> lineItemsJoin = root.join( Order_.lineItems );
lineItemsJoin.on(
entityManager.getCriteriaBuilder().gt(
lineItemsJoin.get( LineItem_.quantity ),
entityManager.getCriteriaBuilder().literal( 20 )
)
);
entityManager.createQuery( criteria ).getResultList();
}
);
}
}
| OnKeywordTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/checkreturnvalue/CanIgnoreReturnValueSuggesterTest.java | {
"start": 15537,
"end": 16121
} | interface ____ {}
private String name;
@CanIgnoreReturnValue
public Client setName(String name) {
this.name = name;
return this;
}
}
""")
.expectUnchanged()
.doTest();
}
@Test
public void simpleCaseAlreadyAnnotatedWithCrv() {
helper
.addInputLines(
"Client.java",
"package com.google.frobber;",
"import com.google.errorprone.annotations.CheckReturnValue;",
"public final | CanIgnoreReturnValue |
java | spring-projects__spring-boot | module/spring-boot-health/src/main/java/org/springframework/boot/health/autoconfigure/actuate/endpoint/AutoConfiguredHealthEndpointGroup.java | {
"start": 1415,
"end": 3620
} | class ____ implements HealthEndpointGroup {
private final Predicate<String> members;
private final StatusAggregator statusAggregator;
private final HttpCodeStatusMapper httpCodeStatusMapper;
private final @Nullable Show showComponents;
private final Show showDetails;
private final Collection<String> roles;
private final @Nullable AdditionalHealthEndpointPath additionalPath;
/**
* Create a new {@link AutoConfiguredHealthEndpointGroup} instance.
* @param members a predicate used to test for group membership
* @param statusAggregator the status aggregator to use
* @param httpCodeStatusMapper the HTTP code status mapper to use
* @param showComponents the show components setting
* @param showDetails the show details setting
* @param roles the roles to match
* @param additionalPath the additional path to use for this group
*/
AutoConfiguredHealthEndpointGroup(Predicate<String> members, StatusAggregator statusAggregator,
HttpCodeStatusMapper httpCodeStatusMapper, @Nullable Show showComponents, Show showDetails,
Collection<String> roles, @Nullable AdditionalHealthEndpointPath additionalPath) {
this.members = members;
this.statusAggregator = statusAggregator;
this.httpCodeStatusMapper = httpCodeStatusMapper;
this.showComponents = showComponents;
this.showDetails = showDetails;
this.roles = roles;
this.additionalPath = additionalPath;
}
@Override
public boolean isMember(String name) {
return this.members.test(name);
}
@Override
public boolean showComponents(SecurityContext securityContext) {
Show show = (this.showComponents != null) ? this.showComponents : this.showDetails;
return show.isShown(securityContext, this.roles);
}
@Override
public boolean showDetails(SecurityContext securityContext) {
return this.showDetails.isShown(securityContext, this.roles);
}
@Override
public StatusAggregator getStatusAggregator() {
return this.statusAggregator;
}
@Override
public HttpCodeStatusMapper getHttpCodeStatusMapper() {
return this.httpCodeStatusMapper;
}
@Override
public @Nullable AdditionalHealthEndpointPath getAdditionalPath() {
return this.additionalPath;
}
}
| AutoConfiguredHealthEndpointGroup |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetadataUpdater.java | {
"start": 2047,
"end": 21323
} | class ____ implements RoutingChangesObserver {
private final Map<ShardId, Updates> shardChanges = new HashMap<>();
@Override
public void shardInitialized(ShardRouting unassignedShard, ShardRouting initializedShard) {
assert initializedShard.isRelocationTarget() == false : "shardInitialized is not called on relocation target: " + initializedShard;
if (initializedShard.primary()) {
increasePrimaryTerm(initializedShard.shardId());
Updates updates = changes(initializedShard.shardId());
assert updates.initializedPrimary == null
: "Primary cannot be initialized more than once in same allocation round: "
+ "(previous: "
+ updates.initializedPrimary
+ ", next: "
+ initializedShard
+ ")";
updates.initializedPrimary = initializedShard;
}
}
@Override
public void shardStarted(ShardRouting initializingShard, ShardRouting startedShard) {
assert Objects.equals(initializingShard.allocationId().getId(), startedShard.allocationId().getId())
: "initializingShard.allocationId ["
+ initializingShard.allocationId().getId()
+ "] and startedShard.allocationId ["
+ startedShard.allocationId().getId()
+ "] have to have the same";
if (startedShard.isPromotableToPrimary()) {
Updates updates = changes(startedShard.shardId());
updates.addedAllocationIds.add(startedShard.allocationId().getId());
if (startedShard.primary()
// started shard has to have null recoverySource; have to pick up recoverySource from its initializing state
&& (initializingShard.recoverySource() == RecoverySource.ExistingStoreRecoverySource.FORCE_STALE_PRIMARY_INSTANCE)) {
updates.removedAllocationIds.add(RecoverySource.ExistingStoreRecoverySource.FORCED_ALLOCATION_ID);
}
}
}
@Override
public void shardFailed(ShardRouting failedShard, UnassignedInfo unassignedInfo) {
if (failedShard.active() && failedShard.primary()) {
Updates updates = changes(failedShard.shardId());
if (updates.firstFailedPrimary == null) {
// more than one primary can be failed (because of batching, primary can be failed, replica promoted and then failed...)
updates.firstFailedPrimary = failedShard;
}
increasePrimaryTerm(failedShard.shardId());
}
}
@Override
public void relocationCompleted(ShardRouting removedRelocationSource) {
removeAllocationId(removedRelocationSource);
}
/**
* Updates the current {@link Metadata} based on the changes of this RoutingChangesObserver. Specifically
* we update {@link IndexMetadata#getInSyncAllocationIds()} and {@link IndexMetadata#primaryTerm(int)} based on
* the changes made during this allocation.
*
* @param oldMetadata {@link Metadata} object from before the routing nodes was changed.
* @param newRoutingTable {@link RoutingTable} object after routing changes were applied.
* @return adapted {@link Metadata}, potentially the original one if no change was needed.
*/
public Metadata applyChanges(Metadata oldMetadata, GlobalRoutingTable newRoutingTable) {
final Map<Index, List<Map.Entry<ShardId, Updates>>> changesGroupedByIndex = shardChanges.entrySet()
.stream()
.collect(Collectors.groupingBy(e -> e.getKey().getIndex()));
final Map<ProjectMetadata, List<Index>> indicesByProject = changesGroupedByIndex.keySet()
.stream()
.collect(Collectors.groupingBy(oldMetadata::projectFor));
final Metadata.Builder updatedMetadata = Metadata.builder(oldMetadata);
indicesByProject.forEach((projectMetadata, indices) -> {
final Map<String, IndexMetadata> updatedIndices = Maps.newHashMapWithExpectedSize(indices.size());
for (Index index : indices) {
var indexChanges = changesGroupedByIndex.get(index);
final IndexMetadata oldIndexMetadata = projectMetadata.getIndexSafe(index);
IndexMetadata updatedIndexMetadata = oldIndexMetadata;
for (Map.Entry<ShardId, Updates> shardEntry : indexChanges) {
ShardId shardId = shardEntry.getKey();
Updates updates = shardEntry.getValue();
updatedIndexMetadata = updateInSyncAllocations(
newRoutingTable.routingTable(projectMetadata.id()),
oldIndexMetadata,
updatedIndexMetadata,
shardId,
updates
);
IndexReshardingMetadata reshardingMetadata = updatedIndexMetadata.getReshardingMetadata();
boolean splitTarget = reshardingMetadata != null
&& reshardingMetadata.isSplit()
&& reshardingMetadata.getSplit().isTargetShard(shardId.id());
updatedIndexMetadata = updates.increaseTerm
? splitTarget
? updatedIndexMetadata.withSetPrimaryTerm(
shardId.id(),
splitPrimaryTerm(updatedIndexMetadata, reshardingMetadata, shardId)
)
: updatedIndexMetadata.withIncrementedPrimaryTerm(shardId.id())
: updatedIndexMetadata;
}
if (updatedIndexMetadata != oldIndexMetadata) {
updatedIndices.put(updatedIndexMetadata.getIndex().getName(), updatedIndexMetadata.withIncrementedVersion());
}
}
updatedMetadata.put(projectMetadata.withAllocationAndTermUpdatesOnly(updatedIndices));
});
return updatedMetadata.build();
}
private static long splitPrimaryTerm(IndexMetadata updatedIndexMetadata, IndexReshardingMetadata reshardingMetadata, ShardId shardId) {
// We take the max of the source and target primary terms. This guarantees that the target primary term stays
// greater than or equal to the source.
return Math.max(
updatedIndexMetadata.primaryTerm(reshardingMetadata.getSplit().sourceShard(shardId.id())),
updatedIndexMetadata.primaryTerm(shardId.id()) + 1
);
}
/**
* Updates in-sync allocations with routing changes that were made to the routing table.
*/
private static IndexMetadata updateInSyncAllocations(
RoutingTable newRoutingTable,
IndexMetadata oldIndexMetadata,
IndexMetadata updatedIndexMetadata,
ShardId shardId,
Updates updates
) {
assert Sets.haveEmptyIntersection(updates.addedAllocationIds, updates.removedAllocationIds)
: "allocation ids cannot be both added and removed in the same allocation round, added ids: "
+ updates.addedAllocationIds
+ ", removed ids: "
+ updates.removedAllocationIds;
Set<String> oldInSyncAllocationIds = oldIndexMetadata.inSyncAllocationIds(shardId.id());
// check if we have been force-initializing an empty primary or a stale primary
if (updates.initializedPrimary != null
&& oldInSyncAllocationIds.isEmpty() == false
&& oldInSyncAllocationIds.contains(updates.initializedPrimary.allocationId().getId()) == false) {
// we're not reusing an existing in-sync allocation id to initialize a primary, which means that we're either force-allocating
// an empty or a stale primary (see AllocateEmptyPrimaryAllocationCommand or AllocateStalePrimaryAllocationCommand).
RecoverySource recoverySource = updates.initializedPrimary.recoverySource();
RecoverySource.Type recoverySourceType = recoverySource.getType();
boolean emptyPrimary = recoverySourceType == RecoverySource.Type.EMPTY_STORE;
assert updates.addedAllocationIds.isEmpty()
: (emptyPrimary ? "empty" : "stale")
+ " primary is not force-initialized in same allocation round where shards are started";
if (emptyPrimary) {
// forcing an empty primary resets the in-sync allocations to the empty set (ShardRouting.allocatedPostIndexCreate)
updatedIndexMetadata = updatedIndexMetadata.withInSyncAllocationIds(shardId.id(), Set.of());
} else {
final String allocationId;
if (recoverySource == RecoverySource.ExistingStoreRecoverySource.FORCE_STALE_PRIMARY_INSTANCE) {
allocationId = RecoverySource.ExistingStoreRecoverySource.FORCED_ALLOCATION_ID;
updatedIndexMetadata = updatedIndexMetadata.withTimestampRanges(
updatedIndexMetadata.getTimestampRange().removeShard(shardId.id(), oldIndexMetadata.getNumberOfShards()),
updatedIndexMetadata.getEventIngestedRange().removeShard(shardId.id(), oldIndexMetadata.getNumberOfShards())
);
} else {
assert recoverySource instanceof RecoverySource.SnapshotRecoverySource
|| isStatelessIndexRecovery(oldIndexMetadata, recoverySource) : recoverySource;
allocationId = updates.initializedPrimary.allocationId().getId();
}
// forcing a stale primary resets the in-sync allocations to the singleton set with the stale id
updatedIndexMetadata = updatedIndexMetadata.withInSyncAllocationIds(shardId.id(), Set.of(allocationId));
}
} else {
// standard path for updating in-sync ids
Set<String> inSyncAllocationIds = new HashSet<>(oldInSyncAllocationIds);
inSyncAllocationIds.addAll(updates.addedAllocationIds);
inSyncAllocationIds.removeAll(updates.removedAllocationIds);
assert oldInSyncAllocationIds.contains(RecoverySource.ExistingStoreRecoverySource.FORCED_ALLOCATION_ID) == false
|| inSyncAllocationIds.contains(RecoverySource.ExistingStoreRecoverySource.FORCED_ALLOCATION_ID) == false
: "fake allocation id has to be removed, inSyncAllocationIds:" + inSyncAllocationIds;
// Prevent set of inSyncAllocationIds to grow unboundedly. This can happen for example if we don't write to a primary
// but repeatedly shut down nodes that have active replicas.
// We use number_of_replicas + 1 (= possible active shard copies) to bound the inSyncAllocationIds set
// Only trim the set of allocation ids when it grows, otherwise we might trim too eagerly when the number
// of replicas was decreased while shards were unassigned.
int maxActiveShards = oldIndexMetadata.getNumberOfReplicas() + 1; // +1 for the primary
IndexShardRoutingTable newShardRoutingTable = newRoutingTable.shardRoutingTable(shardId);
assert newShardRoutingTable.assignedShards()
.stream()
.filter(ShardRouting::isRelocationTarget)
.map(s -> s.allocationId().getId())
.noneMatch(inSyncAllocationIds::contains) : newShardRoutingTable.assignedShards() + " vs " + inSyncAllocationIds;
if (inSyncAllocationIds.size() > oldInSyncAllocationIds.size() && inSyncAllocationIds.size() > maxActiveShards) {
// trim entries that have no corresponding shard routing in the cluster state (i.e. trim unavailable copies)
List<ShardRouting> assignedShards = newShardRoutingTable.assignedShards()
.stream()
.filter(s -> s.isRelocationTarget() == false)
.toList();
assert assignedShards.size() <= maxActiveShards
: "cannot have more assigned shards " + assignedShards + " than maximum possible active shards " + maxActiveShards;
Set<String> assignedAllocations = assignedShards.stream().map(s -> s.allocationId().getId()).collect(Collectors.toSet());
inSyncAllocationIds = inSyncAllocationIds.stream()
.sorted(Comparator.comparing(assignedAllocations::contains).reversed()) // values with routing entries first
.limit(maxActiveShards)
.collect(Collectors.toSet());
}
// only remove allocation id of failed active primary if there is at least one active shard remaining. Assume for example that
// the primary fails but there is no new primary to fail over to. If we were to remove the allocation id of the primary from the
// in-sync set, this could create an empty primary on the next allocation.
if (newShardRoutingTable.activeShards().isEmpty() && updates.firstFailedPrimary != null) {
// add back allocation id of failed primary
inSyncAllocationIds.add(updates.firstFailedPrimary.allocationId().getId());
}
assert inSyncAllocationIds.isEmpty() == false || oldInSyncAllocationIds.isEmpty()
: "in-sync allocations cannot become empty after they have been non-empty: " + oldInSyncAllocationIds;
// be extra safe here and only update in-sync set if it is non-empty
if (inSyncAllocationIds.isEmpty() == false) {
updatedIndexMetadata = updatedIndexMetadata.withInSyncAllocationIds(shardId.id(), inSyncAllocationIds);
}
}
return updatedIndexMetadata;
}
private static boolean isStatelessIndexRecovery(IndexMetadata indexMetadata, RecoverySource recoverySource) {
var allocator = indexMetadata.getSettings().get(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING.getKey());
return Objects.equals(allocator, "stateless") && recoverySource instanceof RecoverySource.ExistingStoreRecoverySource;
}
/**
* Removes allocation ids from the in-sync set for shard copies for which there is no routing entries in the routing table.
* This method is called in AllocationService before any changes to the routing table are made.
*/
public static ClusterState removeStaleIdsWithoutRoutings(ClusterState clusterState, List<StaleShard> staleShards, Logger logger) {
final Metadata oldMetadata = clusterState.metadata();
Metadata.Builder metadataBuilder = null;
// group staleShards entries by index
final var staleShardsByIndex = staleShards.stream().collect(Collectors.groupingBy(fs -> fs.shardId().getIndex()));
// group indices by project
final var indicesByProject = staleShardsByIndex.keySet().stream().collect(Collectors.groupingBy(oldMetadata::projectFor));
for (Map.Entry<ProjectMetadata, List<Index>> projectEntry : indicesByProject.entrySet()) {
final ProjectMetadata oldProject = projectEntry.getKey();
final RoutingTable oldRoutingTable = clusterState.routingTable(oldProject.id());
ProjectMetadata.Builder projectBuilder = null;
for (Index index : projectEntry.getValue()) {
final IndexMetadata oldIndexMetadata = oldProject.getIndexSafe(index);
IndexMetadata.Builder indexMetadataBuilder = null;
// group staleShards entries by shard id
for (Map.Entry<ShardId, List<StaleShard>> shardEntry : staleShardsByIndex.get(index)
.stream()
.collect(Collectors.groupingBy(StaleShard::shardId))
.entrySet()) {
int shardNumber = shardEntry.getKey().getId();
Set<String> oldInSyncAllocations = oldIndexMetadata.inSyncAllocationIds(shardNumber);
Set<String> idsToRemove = shardEntry.getValue().stream().map(StaleShard::allocationId).collect(Collectors.toSet());
assert idsToRemove.stream().allMatch(id -> oldRoutingTable.getByAllocationId(shardEntry.getKey(), id) == null)
: "removing stale ids: " + idsToRemove + ", some of which have still a routing entry: " + oldRoutingTable;
Set<String> remainingInSyncAllocations = Sets.difference(oldInSyncAllocations, idsToRemove);
assert remainingInSyncAllocations.isEmpty() == false
: "Set of in-sync ids cannot become empty for shard "
+ shardEntry.getKey()
+ " (before: "
+ oldInSyncAllocations
+ ", ids to remove: "
+ idsToRemove
+ ")";
// be extra safe here: if the in-sync set were to become empty, this would create an empty primary on the next
// allocation (see ShardRouting#allocatedPostIndexCreate)
if (remainingInSyncAllocations.isEmpty() == false) {
if (indexMetadataBuilder == null) {
indexMetadataBuilder = IndexMetadata.builder(oldIndexMetadata);
}
indexMetadataBuilder.putInSyncAllocationIds(shardNumber, remainingInSyncAllocations);
}
logger.warn("{} marking unavailable shards as stale: {}", shardEntry.getKey(), idsToRemove);
}
if (indexMetadataBuilder != null) {
if (projectBuilder == null) {
projectBuilder = ProjectMetadata.builder(oldProject);
}
projectBuilder.put(indexMetadataBuilder);
}
}
if (projectBuilder != null) {
if (metadataBuilder == null) {
metadataBuilder = Metadata.builder(oldMetadata);
}
metadataBuilder.put(projectBuilder);
}
}
if (metadataBuilder != null) {
return ClusterState.builder(clusterState).metadata(metadataBuilder).build();
} else {
return clusterState;
}
}
/**
* Helper method that creates update entry for the given shard id if such an entry does not exist yet.
*/
private Updates changes(ShardId shardId) {
return shardChanges.computeIfAbsent(shardId, k -> new Updates());
}
/**
* Remove allocation id of this shard from the set of in-sync shard copies
*/
void removeAllocationId(ShardRouting shardRouting) {
if (shardRouting.active()) {
changes(shardRouting.shardId()).removedAllocationIds.add(shardRouting.allocationId().getId());
}
}
/**
* Increase primary term for this shard id
*/
private void increasePrimaryTerm(ShardId shardId) {
changes(shardId).increaseTerm = true;
}
private static | IndexMetadataUpdater |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/web/reactive/server/DefaultWebTestClient.java | {
"start": 18040,
"end": 20100
} | class ____<B, S extends BodySpec<B, S>> implements BodySpec<B, S> {
private final EntityExchangeResult<B> result;
DefaultBodySpec(EntityExchangeResult<B> result) {
this.result = result;
}
protected EntityExchangeResult<B> getResult() {
return this.result;
}
@Override
public <T extends S> T isEqualTo(@Nullable B expected) {
this.result.assertWithDiagnostics(() ->
AssertionErrors.assertEquals("Response body", expected, this.result.getResponseBody()));
return self();
}
@SuppressWarnings("removal")
@Override
public <T extends S> T value(Matcher<? super @Nullable B> matcher) {
this.result.assertWithDiagnostics(() -> MatcherAssert.assertThat(this.result.getResponseBody(), matcher));
return self();
}
@Override
@SuppressWarnings({"NullAway", "removal"}) // https://github.com/uber/NullAway/issues/1129
public <T extends S, R> T value(Function<@Nullable B, @Nullable R> bodyMapper, Matcher<? super @Nullable R> matcher) {
this.result.assertWithDiagnostics(() -> {
B body = this.result.getResponseBody();
MatcherAssert.assertThat(bodyMapper.apply(body), matcher);
});
return self();
}
@Override
public <T extends S> T value(Consumer<@Nullable B> consumer) {
this.result.assertWithDiagnostics(() -> consumer.accept(this.result.getResponseBody()));
return self();
}
@Override
public <T extends S, R> T value(@NonNull Function<@Nullable B, @Nullable R> bodyMapper, Consumer<? super R> consumer) {
this.result.assertWithDiagnostics(() -> {
B body = this.result.getResponseBody();
consumer.accept(bodyMapper.apply(body));
});
return self();
}
@Override
public <T extends S> T consumeWith(Consumer<EntityExchangeResult<B>> consumer) {
this.result.assertWithDiagnostics(() -> consumer.accept(this.result));
return self();
}
@SuppressWarnings("unchecked")
private <T extends S> T self() {
return (T) this;
}
@Override
public EntityExchangeResult<B> returnResult() {
return this.result;
}
}
private static | DefaultBodySpec |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/context/filtersample/ExampleFilteredAutoConfiguration.java | {
"start": 846,
"end": 945
} | class ____ {
@Bean
String anotherExample() {
return "fail";
}
}
| ExampleFilteredAutoConfiguration |
java | spring-projects__spring-boot | module/spring-boot-data-jpa/src/main/java/org/springframework/boot/data/jpa/autoconfigure/DataJpaRepositoriesAutoConfiguration.java | {
"start": 5637,
"end": 6238
} | class ____ implements ImportSelector {
private static final boolean ENVERS_AVAILABLE = ClassUtils.isPresent(
"org.springframework.data.envers.repository.config.EnableEnversRepositories",
JpaRepositoriesImportSelector.class.getClassLoader());
@Override
public String[] selectImports(AnnotationMetadata importingClassMetadata) {
return new String[] { determineImport() };
}
private String determineImport() {
return ENVERS_AVAILABLE ? EnversRevisionRepositoriesRegistrar.class.getName()
: DataJpaRepositoriesRegistrar.class.getName();
}
}
}
| JpaRepositoriesImportSelector |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/SerializeEnumAsJavaBeanTest_private.java | {
"start": 256,
"end": 1212
} | class ____ extends TestCase {
public void test_serializeEnumAsJavaBean() throws Exception {
String text = JSON.toJSONString(OrderType.PayOrder);
assertEquals("{\"remark\":\"支付订单\",\"value\":1}", text);
}
public void test_field() throws Exception {
Model model = new Model();
model.orderType = OrderType.SettleBill;
String text = JSON.toJSONString(model);
assertEquals("{\"orderType\":{\"remark\":\"结算单\",\"value\":2}}", text);
}
public void test_field_2() throws Exception {
Model model = new Model();
model.orderType = OrderType.SettleBill;
model.orderType1 = OrderType.SettleBill;
String text = JSON.toJSONString(model);
assertEquals("{\"orderType\":{\"remark\":\"结算单\",\"value\":2},\"orderType1\":{\"remark\":\"结算单\",\"value\":2}}", text);
}
@JSONType(serializeEnumAsJavaBean = true)
private static | SerializeEnumAsJavaBeanTest_private |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/collection/document/entity/TimelineEntityDocument.java | {
"start": 1722,
"end": 8175
} | class ____ implements
TimelineDocument<TimelineEntityDocument> {
private final TimelineEntity timelineEntity;
private TimelineContext context;
private String flowVersion;
private String subApplicationUser;
private final Map<String, Set<TimelineMetricSubDoc>>
metrics = new HashMap<>();
private final Map<String, Set<TimelineEventSubDoc>>
events = new HashMap<>();
public TimelineEntityDocument() {
timelineEntity = new TimelineEntity();
}
public TimelineEntityDocument(TimelineEntity timelineEntity) {
this.timelineEntity = timelineEntity;
transformEvents(timelineEntity.getEvents());
timelineMetrics(timelineEntity.getMetrics());
}
// transforms TimelineMetric to TimelineMetricSubDoc
private void timelineMetrics(Set<TimelineMetric> timelineMetrics) {
for (TimelineMetric timelineMetric : timelineMetrics) {
if (this.metrics.containsKey(timelineMetric.getId())) {
this.metrics.get(timelineMetric.getId()).add(
new TimelineMetricSubDoc(timelineMetric));
} else {
Set<TimelineMetricSubDoc> metricSet = new HashSet<>();
metricSet.add(new TimelineMetricSubDoc(timelineMetric));
this.metrics.put(timelineMetric.getId(), metricSet);
}
}
}
// transforms TimelineEvent to TimelineEventSubDoc
private void transformEvents(Set<TimelineEvent> timelineEvents) {
for (TimelineEvent timelineEvent : timelineEvents) {
if (this.events.containsKey(timelineEvent.getId())) {
this.events.get(timelineEvent.getId())
.add(new TimelineEventSubDoc(timelineEvent));
} else {
Set<TimelineEventSubDoc> eventSet = new HashSet<>();
eventSet.add(new TimelineEventSubDoc(timelineEvent));
this.events.put(timelineEvent.getId(), eventSet);
}
}
}
/**
* Merge the TimelineEntityDocument that is passed with the current
* document for upsert.
*
* @param newTimelineDocument
* that has to be merged
*/
@Override
public void merge(TimelineEntityDocument newTimelineDocument) {
if(newTimelineDocument.getCreatedTime() > 0) {
timelineEntity.setCreatedTime(newTimelineDocument.getCreatedTime());
}
setMetrics(newTimelineDocument.getMetrics());
setEvents(newTimelineDocument.getEvents());
timelineEntity.getInfo().putAll(newTimelineDocument.getInfo());
timelineEntity.getConfigs().putAll(newTimelineDocument.getConfigs());
timelineEntity.getIsRelatedToEntities().putAll(newTimelineDocument
.getIsRelatedToEntities());
timelineEntity.getRelatesToEntities().putAll(newTimelineDocument
.getRelatesToEntities());
}
@Override
public String getId() {
return timelineEntity.getId();
}
public void setId(String key) {
timelineEntity.setId(key);
}
public String getType() {
return timelineEntity.getType();
}
public void setType(String type) {
timelineEntity.setType(type);
}
public Map<String, Object> getInfo() {
timelineEntity.getInfo().put(TimelineReaderUtils.FROMID_KEY, getId());
return timelineEntity.getInfo();
}
public void setInfo(Map<String, Object> info) {
timelineEntity.setInfo(info);
}
public Map<String, Set<TimelineMetricSubDoc>> getMetrics() {
return metrics;
}
public void setMetrics(Map<String, Set<TimelineMetricSubDoc>> metrics) {
for (Map.Entry<String, Set<TimelineMetricSubDoc>> metricEntry :
metrics.entrySet()) {
final String metricId = metricEntry.getKey();
final Set<TimelineMetricSubDoc> metricValue = metricEntry.getValue();
for(TimelineMetricSubDoc metricSubDoc : metricValue) {
timelineEntity.addMetric(metricSubDoc.fetchTimelineMetric());
}
if (this.metrics.containsKey(metricId)) {
this.metrics.get(metricId).addAll(metricValue);
} else {
this.metrics.put(metricId, new HashSet<>(metricValue));
}
}
}
public Map<String, Set<TimelineEventSubDoc>> getEvents() {
return events;
}
public void setEvents(Map<String, Set<TimelineEventSubDoc>> events) {
for (Map.Entry<String, Set<TimelineEventSubDoc>> eventEntry :
events.entrySet()) {
final String eventId = eventEntry.getKey();
final Set<TimelineEventSubDoc> eventValue = eventEntry.getValue();
for(TimelineEventSubDoc eventSubDoc : eventValue) {
timelineEntity.addEvent(eventSubDoc.fetchTimelineEvent());
}
if (this.events.containsKey(eventId)) {
this.events.get(eventId).addAll(events.get(eventId));
} else {
this.events.put(eventId, new HashSet<>(eventValue));
}
}
}
public Map<String, String> getConfigs() {
return timelineEntity.getConfigs();
}
public void setConfigs(Map<String, String> configs) {
timelineEntity.setConfigs(configs);
}
public Map<String, Set<String>> getIsRelatedToEntities() {
return timelineEntity.getIsRelatedToEntities();
}
public void setIsRelatedToEntities(Map<String, Set<String>>
isRelatedToEntities) {
timelineEntity.setIsRelatedToEntities(isRelatedToEntities);
}
public Map<String, Set<String>> getRelatesToEntities() {
return timelineEntity.getRelatesToEntities();
}
public void setRelatesToEntities(Map<String, Set<String>> relatesToEntities) {
timelineEntity.setRelatesToEntities(relatesToEntities);
}
public String getFlowVersion() {
return flowVersion;
}
public void setFlowVersion(String flowVersion) {
this.flowVersion = flowVersion;
}
public void setIdentifier(TimelineEntity.Identifier identifier) {
timelineEntity.setIdentifier(identifier);
}
public void setIdPrefix(long idPrefix) {
timelineEntity.setIdPrefix(idPrefix);
}
public String getSubApplicationUser() {
return subApplicationUser;
}
public void setSubApplicationUser(String subApplicationUser) {
this.subApplicationUser = subApplicationUser;
}
public long getCreatedTime() {
if (timelineEntity.getCreatedTime() == null) {
return 0;
}
return timelineEntity.getCreatedTime();
}
public void setCreatedTime(long createdTime) {
timelineEntity.setCreatedTime(createdTime);
}
public TimelineContext getContext() {
return context;
}
public void setContext(TimelineContext context) {
this.context = context;
}
public TimelineEntity fetchTimelineEntity() {
return timelineEntity;
}
} | TimelineEntityDocument |
java | spring-projects__spring-boot | module/spring-boot-restclient/src/test/java/org/springframework/boot/restclient/autoconfigure/service/HttpServiceClientAutoConfigurationTests.java | {
"start": 13369,
"end": 13445
} | interface ____ {
@GetExchange("/hello")
String hello();
}
| TestClientOne |
java | alibaba__nacos | naming/src/main/java/com/alibaba/nacos/naming/core/v2/metadata/ServiceMetadataProcessor.java | {
"start": 1800,
"end": 7266
} | class ____ extends RequestProcessor4CP {
private final NamingMetadataManager namingMetadataManager;
private final ServiceStorage serviceStorage;
private final Serializer serializer;
private final Type processType;
private final ReentrantReadWriteLock lock;
private final ReentrantReadWriteLock.ReadLock readLock;
@SuppressWarnings("unchecked")
public ServiceMetadataProcessor(NamingMetadataManager namingMetadataManager, ProtocolManager protocolManager,
ServiceStorage serviceStorage) {
this.namingMetadataManager = namingMetadataManager;
this.serviceStorage = serviceStorage;
this.serializer = SerializeFactory.getDefault();
this.processType = TypeUtils.parameterize(MetadataOperation.class, ServiceMetadata.class);
this.lock = new ReentrantReadWriteLock();
this.readLock = lock.readLock();
protocolManager.getCpProtocol().addRequestProcessors(Collections.singletonList(this));
}
@Override
public List<SnapshotOperation> loadSnapshotOperate() {
return Collections.singletonList(new ServiceMetadataSnapshotOperation(namingMetadataManager, lock));
}
@Override
public Response onRequest(ReadRequest request) {
return null;
}
@Override
public Response onApply(WriteRequest request) {
readLock.lock();
try {
MetadataOperation<ServiceMetadata> op = serializer.deserialize(request.getData().toByteArray(), processType);
switch (DataOperation.valueOf(request.getOperation())) {
case ADD:
addClusterMetadataToService(op);
break;
case CHANGE:
updateServiceMetadata(op);
break;
case DELETE:
deleteServiceMetadata(op);
break;
default:
return Response.newBuilder().setSuccess(false)
.setErrMsg("Unsupported operation " + request.getOperation()).build();
}
return Response.newBuilder().setSuccess(true).build();
} catch (Exception e) {
Loggers.RAFT.error("onApply {} service metadata operation failed. ", request.getOperation(), e);
String errorMessage = null == e.getMessage() ? e.getClass().getName() : e.getMessage();
return Response.newBuilder().setSuccess(false).setErrMsg(errorMessage).build();
} finally {
readLock.unlock();
}
}
private void addClusterMetadataToService(MetadataOperation<ServiceMetadata> op) {
Service service = Service
.newService(op.getNamespace(), op.getGroup(), op.getServiceName(), op.getMetadata().isEphemeral());
Optional<ServiceMetadata> currentMetadata = namingMetadataManager.getServiceMetadata(service);
if (currentMetadata.isPresent()) {
currentMetadata.get().getClusters().putAll(op.getMetadata().getClusters());
} else {
Service singleton = ServiceManager.getInstance().getSingleton(service);
namingMetadataManager.updateServiceMetadata(singleton, op.getMetadata());
}
}
private void updateServiceMetadata(MetadataOperation<ServiceMetadata> op) {
Service service = Service
.newService(op.getNamespace(), op.getGroup(), op.getServiceName(), op.getMetadata().isEphemeral());
Optional<ServiceMetadata> currentMetadata = namingMetadataManager.getServiceMetadata(service);
if (currentMetadata.isPresent()) {
ServiceMetadata newMetadata = mergeMetadata(currentMetadata.get(), op.getMetadata());
Service singleton = ServiceManager.getInstance().getSingleton(service);
namingMetadataManager.updateServiceMetadata(singleton, newMetadata);
} else {
Service singleton = ServiceManager.getInstance().getSingleton(service);
namingMetadataManager.updateServiceMetadata(singleton, op.getMetadata());
}
}
/**
* Do not modified old metadata directly to avoid read half status.
*
* <p>Ephemeral variable should only use the value the metadata create.
*
* @param oldMetadata old metadata
* @param newMetadata new metadata
* @return merged metadata
*/
private ServiceMetadata mergeMetadata(ServiceMetadata oldMetadata, ServiceMetadata newMetadata) {
ServiceMetadata result = new ServiceMetadata();
result.setEphemeral(oldMetadata.isEphemeral());
result.setClusters(oldMetadata.getClusters());
result.setProtectThreshold(newMetadata.getProtectThreshold());
result.setSelector(newMetadata.getSelector());
result.setExtendData(newMetadata.getExtendData());
return result;
}
private void deleteServiceMetadata(MetadataOperation<ServiceMetadata> op) {
Service service = Service.newService(op.getNamespace(), op.getGroup(), op.getServiceName());
namingMetadataManager.removeServiceMetadata(service);
Service removed = ServiceManager.getInstance().removeSingleton(service);
if (removed != null) {
service = removed;
}
serviceStorage.removeData(service);
}
@Override
public String group() {
return Constants.SERVICE_METADATA;
}
}
| ServiceMetadataProcessor |
java | spring-projects__spring-framework | spring-core/src/testFixtures/java/org/springframework/core/testfixture/aot/generator/visibility/ProtectedGenericParameter.java | {
"start": 729,
"end": 833
} | class ____ {
public ProtectedGenericParameter(List<ProtectedType> types) {
}
}
| ProtectedGenericParameter |
java | apache__hadoop | hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java | {
"start": 1240,
"end": 1619
} | class ____ starting mountd daemon. This daemon implements the NFS
* mount protocol. When receiving a MOUNT request from an NFS client, it checks
* the request against the list of currently exported file systems. If the
* client is permitted to mount the file system, rpc.mountd obtains a file
* handle for requested directory and returns it to the client.
*/
abstract public | for |
java | apache__camel | components/camel-ai/camel-chatscript/src/generated/java/org/apache/camel/component/chatscript/ChatScriptEndpointConfigurer.java | {
"start": 737,
"end": 2592
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
ChatScriptEndpoint target = (ChatScriptEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "chatusername":
case "chatUserName": target.setChatUserName(property(camelContext, java.lang.String.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "resetchat":
case "resetChat": target.setResetChat(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "chatusername":
case "chatUserName": return java.lang.String.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "resetchat":
case "resetChat": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
ChatScriptEndpoint target = (ChatScriptEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "chatusername":
case "chatUserName": return target.getChatUserName();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "resetchat":
case "resetChat": return target.isResetChat();
default: return null;
}
}
}
| ChatScriptEndpointConfigurer |
java | google__guava | android/guava-tests/test/com/google/common/reflect/TypeTokenTest.java | {
"start": 58318,
"end": 59341
} | class ____<V2> extends InnerType<String, V2> {}
}
public void testGetSubtype_innerTypeOfGenericClassTranslatesOwnerTypeVars() {
TypeToken<TwoTypeArgs<?, ?>.InnerType<?, ?>> supertype =
new TypeToken<TwoTypeArgs<?, ?>.InnerType<?, ?>>() {};
TypeToken<StringForFirstTypeArg<Integer>.StringInnerType<Long>> subtype =
new TypeToken<StringForFirstTypeArg<Integer>.StringInnerType<Long>>() {};
assertTrue(subtype.isSubtypeOf(supertype));
ParameterizedType actualSubtype =
(ParameterizedType) supertype.getSubtype(subtype.getRawType()).getType();
assertEquals(StringForFirstTypeArg.StringInnerType.class, actualSubtype.getRawType());
assertThat(actualSubtype.getActualTypeArguments()[0]).isInstanceOf(WildcardType.class);
ParameterizedType actualOwnerType = (ParameterizedType) actualSubtype.getOwnerType();
assertEquals(StringForFirstTypeArg.class, actualOwnerType.getRawType());
}
public void testGetSubtype_outerTypeVarTranslatesInnerTypeVar() {
| StringInnerType |
java | lettuce-io__lettuce-core | src/test/java/biz/paluch/redis/extensibility/LettuceGeoDemo.java | {
"start": 168,
"end": 2100
} | class ____ {
public static void main(String[] args) {
RedisClient redisClient = RedisClient.create(RedisURI.Builder.redis("localhost", 6379).build());
RedisCommands<String, String> redis = redisClient.connect().sync();
String key = "my-geo-set";
redis.geoadd(key, 8.6638775, 49.5282537, "Weinheim", 8.3796281, 48.9978127, "Office tower", 8.665351, 49.553302,
"Train station");
Set<String> georadius = redis.georadius(key, 8.6582861, 49.5285695, 5, GeoArgs.Unit.km);
System.out.println("Geo Radius: " + georadius);
// georadius contains "Weinheim" and "Train station"
Double distance = redis.geodist(key, "Weinheim", "Train station", GeoArgs.Unit.km);
System.out.println("Distance: " + distance + " km");
// distance ≈ 2.78km
GeoArgs geoArgs = new GeoArgs().withHash().withCoordinates().withDistance().withCount(2).asc();
List<GeoWithin<String>> georadiusWithArgs = redis.georadius(key, 8.665351, 49.5285695, 5, GeoArgs.Unit.km, geoArgs);
// georadiusWithArgs contains "Weinheim" and "Train station"
// ordered descending by distance and containing distance/coordinates
GeoWithin<String> weinheim = georadiusWithArgs.get(0);
System.out.println("Member: " + weinheim.getMember());
System.out.println("Geo hash: " + weinheim.getGeohash());
System.out.println("Distance: " + weinheim.getDistance());
System.out.println("Coordinates: " + weinheim.getCoordinates().getX() + "/" + weinheim.getCoordinates().getY());
List<GeoCoordinates> geopos = redis.geopos(key, "Weinheim", "Train station");
GeoCoordinates weinheimGeopos = geopos.get(0);
System.out.println("Coordinates: " + weinheimGeopos.getX() + "/" + weinheimGeopos.getY());
redis.getStatefulConnection().close();
redisClient.shutdown();
}
}
| LettuceGeoDemo |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/discriminator/Mapper.java | {
"start": 733,
"end": 924
} | interface ____ {
List<Vehicle> selectVehicles();
List<Owner> selectOwnersWithAVehicle();
List<Owner> selectOwnersWithAVehicleConstructor();
List<Contract> selectContracts();
}
| Mapper |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/bind/ArrayBinderTests.java | {
"start": 11555,
"end": 11950
} | class ____<T> implements Answer<T> {
private final int index;
private InvocationArgument(int index) {
this.index = index;
}
@Override
public T answer(InvocationOnMock invocation) throws Throwable {
return invocation.getArgument(this.index);
}
private static <T> InvocationArgument<T> index(int index) {
return new InvocationArgument<>(index);
}
}
}
| InvocationArgument |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/internal/creation/instance/ObjenesisInstantiator.java | {
"start": 495,
"end": 944
} | class ____ tries to instantiate ObjenesisStd and if it fails then
// show decent exception that dependency is missing
// TODO: for the same reason catch and give better feedback when hamcrest core is not found.
private final ObjenesisStd objenesis =
new ObjenesisStd(new GlobalConfiguration().enableClassCache());
@Override
public <T> T newInstance(Class<T> cls) {
return objenesis.newInstance(cls);
}
}
| that |
java | netty__netty | codec-http3/src/test/java/io/netty/handler/codec/http3/Http3PushStreamServerValidationHandlerTest.java | {
"start": 856,
"end": 1644
} | class ____ extends
AbstractHttp3FrameTypeValidationHandlerTest<Http3PushStreamFrame> {
public Http3PushStreamServerValidationHandlerTest() {
super(QuicStreamType.UNIDIRECTIONAL, false, true);
}
@Override
protected ChannelHandler newHandler(boolean server) {
return Http3PushStreamServerValidationHandler.INSTANCE;
}
@Override
protected List<Http3PushStreamFrame> newValidFrames() {
return Arrays.asList(new DefaultHttp3HeadersFrame(), new DefaultHttp3DataFrame(Unpooled.EMPTY_BUFFER));
}
@Override
protected List<Http3Frame> newInvalidFrames() {
return Arrays.asList(Http3TestUtils.newHttp3RequestStreamFrame(), Http3TestUtils.newHttp3ControlStreamFrame());
}
}
| Http3PushStreamServerValidationHandlerTest |
java | apache__dubbo | dubbo-plugin/dubbo-qos/src/test/java/org/apache/dubbo/qos/legacy/LogTelnetHandlerTest.java | {
"start": 1163,
"end": 1890
} | class ____ {
private static TelnetHandler log = new LogTelnetHandler();
private Channel mockChannel;
@Test
void testChangeLogLevel() throws RemotingException {
mockChannel = mock(Channel.class);
String result = log.telnet(mockChannel, "error");
assertTrue(result.contains("\r\nCURRENT LOG LEVEL:ERROR"));
String result2 = log.telnet(mockChannel, "warn");
assertTrue(result2.contains("\r\nCURRENT LOG LEVEL:WARN"));
}
@Test
void testPrintLog() throws RemotingException {
mockChannel = mock(Channel.class);
String result = log.telnet(mockChannel, "100");
assertTrue(result.contains("CURRENT LOG APPENDER"));
}
}
| LogTelnetHandlerTest |
java | apache__logging-log4j2 | log4j-core-test/src/test/java/org/apache/logging/log4j/core/util/ShutdownCallbackRegistryTest.java | {
"start": 2757,
"end": 3896
} | class ____ implements ShutdownCallbackRegistry {
private static final Logger LOGGER = StatusLogger.getLogger();
private static final Collection<Cancellable> CALLBACKS = new ConcurrentLinkedQueue<>();
@Override
public Cancellable addShutdownCallback(final Runnable callback) {
final Cancellable cancellable = new Cancellable() {
@Override
public void cancel() {
LOGGER.debug("Cancelled shutdown callback: {}", callback);
CALLBACKS.remove(this);
}
@Override
public void run() {
LOGGER.debug("Called shutdown callback: {}", callback);
callback.run();
}
};
CALLBACKS.add(cancellable);
return cancellable;
}
private static void shutdown() {
for (final Runnable callback : CALLBACKS) {
LOGGER.debug("Calling shutdown callback: {}", callback);
callback.run();
}
CALLBACKS.clear();
}
}
}
| Registry |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/UnnecessaryStringBuilder.java | {
"start": 2779,
"end": 7767
} | class ____ extends BugChecker implements NewClassTreeMatcher {
private static final Matcher<ExpressionTree> MATCHER =
constructor().forClass("java.lang.StringBuilder");
private static final Matcher<ExpressionTree> APPEND =
instanceMethod().onExactClass("java.lang.StringBuilder").named("append");
private static final Matcher<ExpressionTree> TO_STRING =
instanceMethod().onExactClass("java.lang.StringBuilder").named("toString");
@Override
public Description matchNewClass(NewClassTree tree, VisitorState state) {
if (!MATCHER.matches(tree, state)) {
return NO_MATCH;
}
List<ExpressionTree> parts = new ArrayList<>();
switch (tree.getArguments().size()) {
case 0 -> {}
case 1 -> {
ExpressionTree argument = getOnlyElement(tree.getArguments());
if (isSubtype(getType(argument), JAVA_LANG_CHARSEQUENCE.get(state), state)) {
parts.add(argument);
}
}
default -> {
return NO_MATCH;
}
}
TreePath path = state.getPath();
while (true) {
TreePath parentPath = path.getParentPath();
if (!(parentPath.getLeaf() instanceof MemberSelectTree)) {
break;
}
TreePath grandParent = parentPath.getParentPath();
if (!(grandParent.getLeaf() instanceof MethodInvocationTree methodInvocationTree)) {
break;
}
if (!methodInvocationTree.getMethodSelect().equals(parentPath.getLeaf())) {
break;
}
if (APPEND.matches(methodInvocationTree, state)) {
if (methodInvocationTree.getArguments().size() != 1) {
// an append method that doesn't transliterate to concat
return NO_MATCH;
}
parts.add(getOnlyElement(methodInvocationTree.getArguments()));
path = parentPath.getParentPath();
} else if (TO_STRING.matches(methodInvocationTree, state)) {
return describeMatch(
methodInvocationTree,
SuggestedFix.replace(methodInvocationTree, replacement(state, parts)));
} else {
// another instance method on StringBuilder
return NO_MATCH;
}
}
TargetType target = TargetType.targetType(state.withPath(path));
if (target == null) {
return NO_MATCH;
}
if (!isUsedAsStringBuilder(state, target)) {
return describeMatch(
path.getLeaf(), SuggestedFix.replace(path.getLeaf(), replacement(state, parts)));
}
Tree leaf = target.path().getLeaf();
if (leaf instanceof VariableTree variableTree) {
if (isRewritableVariable(variableTree, state)) {
SuggestedFix.Builder fix = SuggestedFix.builder();
if (!hasImplicitType(variableTree, state)) {
// If the variable is declared with `var`, there's no declaration type to change
fix.replace(variableTree.getType(), "String");
}
fix.replace(variableTree.getInitializer(), replacement(state, parts));
return describeMatch(variableTree, fix.build());
}
}
return NO_MATCH;
}
/**
* Returns true if the StringBuilder is assigned to a variable, and the type of the variable can
* safely be refactored to be a String.
*/
boolean isRewritableVariable(VariableTree variableTree, VisitorState state) {
Symbol sym = getSymbol(variableTree);
if (!sym.getKind().equals(ElementKind.LOCAL_VARIABLE)) {
return false;
}
boolean[] ok = {true};
new TreePathScanner<Void, Void>() {
@Override
public Void visitIdentifier(IdentifierTree tree, Void unused) {
if (sym.equals(getSymbol(tree))) {
TargetType target = targetType(state.withPath(getCurrentPath()));
if (isUsedAsStringBuilder(state, target)) {
ok[0] = false;
}
}
return super.visitIdentifier(tree, null);
}
}.scan(state.getPath().getCompilationUnit(), null);
return ok[0];
}
private static boolean isUsedAsStringBuilder(VisitorState state, TargetType target) {
if (target.path().getLeaf() instanceof MemberReferenceTree) {
// e.g. sb::append
return true;
}
return ASTHelpers.isSubtype(target.type(), JAVA_LANG_APPENDABLE.get(state), state);
}
private static String replacement(VisitorState state, List<ExpressionTree> parts) {
if (parts.isEmpty()) {
return "\"\"";
}
return parts.stream()
.map(
x -> {
String source = state.getSourceForNode(x);
if (requiresParentheses(x, state)) {
source = String.format("(%s)", source);
}
return source;
})
.collect(joining(" + "));
}
private static final Supplier<Type> JAVA_LANG_APPENDABLE =
VisitorState.memoize(state -> state.getTypeFromString("java.lang.Appendable"));
private static final Supplier<Type> JAVA_LANG_CHARSEQUENCE =
VisitorState.memoize(state -> state.getTypeFromString("java.lang.CharSequence"));
}
| UnnecessaryStringBuilder |
java | quarkusio__quarkus | extensions/keycloak-admin-client-common/deployment/src/test/java/io/quarkus/keycloak/admin/client/common/deployment/test/ConfigValidationTest.java | {
"start": 2733,
"end": 4081
} | class ____ implements KeycloakAdminClientConfig {
private Optional<String> password;
private Optional<String> username;
private Optional<String> clientSecret;
private Optional<String> scope;
private Optional<String> serverUrl;
private String realm;
private String clientId;
private KeycloakAdminClientConfig.GrantType grantType;
@Override
public Optional<String> serverUrl() {
return serverUrl;
}
@Override
public String realm() {
return realm;
}
@Override
public String clientId() {
return clientId;
}
@Override
public Optional<String> clientSecret() {
return clientSecret;
}
@Override
public Optional<String> username() {
return username;
}
@Override
public Optional<String> password() {
return password;
}
@Override
public Optional<String> scope() {
return scope;
}
@Override
public GrantType grantType() {
return grantType;
}
@Override
public Optional<String> tlsConfigurationName() {
return Optional.empty();
}
}
}
| KeycloakAdminClientConfigImpl |
java | elastic__elasticsearch | x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/SamlValidateAuthnRequestResponse.java | {
"start": 487,
"end": 2201
} | class ____ extends ActionResponse {
private final String spEntityId;
private final String assertionConsumerService;
private final boolean forceAuthn;
private final Map<String, Object> authnState;
public SamlValidateAuthnRequestResponse(String spEntityId, String acs, boolean forceAuthn, Map<String, Object> authnState) {
this.spEntityId = Objects.requireNonNull(spEntityId, "spEntityId is required for successful responses");
this.assertionConsumerService = Objects.requireNonNull(acs, "ACS is required for successful responses");
this.forceAuthn = forceAuthn;
this.authnState = Map.copyOf(Objects.requireNonNull(authnState));
}
public String getSpEntityId() {
return spEntityId;
}
public String getAssertionConsumerService() {
return assertionConsumerService;
}
public boolean isForceAuthn() {
return forceAuthn;
}
public Map<String, Object> getAuthnState() {
return authnState;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(spEntityId);
out.writeString(assertionConsumerService);
out.writeBoolean(forceAuthn);
out.writeGenericMap(authnState);
}
@Override
public String toString() {
return getClass().getSimpleName()
+ "{ spEntityId='"
+ getSpEntityId()
+ "',\n"
+ " acs='"
+ getAssertionConsumerService()
+ "',\n"
+ " forceAuthn='"
+ isForceAuthn()
+ "',\n"
+ " authnState='"
+ getAuthnState()
+ "' }";
}
}
| SamlValidateAuthnRequestResponse |
java | bumptech__glide | library/test/src/test/java/com/bumptech/glide/load/resource/bitmap/DrawableTransformationTest.java | {
"start": 1696,
"end": 7428
} | class ____ {
@Rule public final KeyTester keyTester = new KeyTester();
@Mock private Transformation<Bitmap> bitmapTransformation;
private BitmapPool bitmapPool;
private DrawableTransformation transformation;
private Context context;
@Before
public void setUp() {
MockitoAnnotations.initMocks(this);
transformation = new DrawableTransformation(bitmapTransformation, /* isRequired= */ true);
context = ApplicationProvider.getApplicationContext();
bitmapPool = new BitmapPoolAdapter();
Glide.init(context, new GlideBuilder().setBitmapPool(bitmapPool));
}
@After
public void tearDown() {
Glide.tearDown();
}
@Test
public void transform_withBitmapDrawable_andUnitBitmapTransformation_doesNotRecycle() {
when(bitmapTransformation.transform(
any(Context.class), anyBitmapResource(), anyInt(), anyInt()))
.thenAnswer(new ReturnGivenResource());
Bitmap bitmap = Bitmap.createBitmap(100, 200, Bitmap.Config.ARGB_8888);
BitmapDrawable drawable = new BitmapDrawable(context.getResources(), bitmap);
@SuppressWarnings("unchecked")
Resource<Drawable> input =
(Resource<Drawable>) (Resource<?>) new BitmapDrawableResource(drawable, bitmapPool);
transformation.transform(context, input, /* outWidth= */ 100, /* outHeight= */ 200);
assertThat(bitmap.isRecycled()).isFalse();
}
@Test
public void transform_withBitmapDrawable_andFunctionalBitmapTransformation_doesNotRecycle() {
when(bitmapTransformation.transform(
any(Context.class), anyBitmapResource(), anyInt(), anyInt()))
.thenAnswer(
new Answer<Resource<Bitmap>>() {
@Override
public Resource<Bitmap> answer(InvocationOnMock invocationOnMock) throws Throwable {
return BitmapResource.obtain(
Bitmap.createBitmap(200, 200, Bitmap.Config.ARGB_8888), bitmapPool);
}
});
Bitmap bitmap = Bitmap.createBitmap(100, 200, Bitmap.Config.ARGB_8888);
BitmapDrawable drawable = new BitmapDrawable(context.getResources(), bitmap);
@SuppressWarnings("unchecked")
Resource<Drawable> input =
(Resource<Drawable>) (Resource<?>) new BitmapDrawableResource(drawable, bitmapPool);
transformation.transform(context, input, /* outWidth= */ 100, /* outHeight= */ 200);
assertThat(bitmap.isRecycled()).isFalse();
}
@Test
public void transform_withColorDrawable_andUnitBitmapTransformation_recycles() {
bitmapPool = mock(BitmapPool.class);
Glide.tearDown();
Glide.init(context, new GlideBuilder().setBitmapPool(bitmapPool));
when(bitmapTransformation.transform(
any(Context.class), anyBitmapResource(), anyInt(), anyInt()))
.thenAnswer(new ReturnGivenResource());
ColorDrawable colorDrawable = new ColorDrawable(Color.RED);
final Resource<Drawable> input = new SimpleResource<Drawable>(colorDrawable);
doAnswer(
new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocationOnMock) throws Throwable {
Bitmap bitmap = (Bitmap) invocationOnMock.getArguments()[0];
assertThat(bitmap.getWidth()).isEqualTo(100);
assertThat(bitmap.getHeight()).isEqualTo(200);
return null;
}
})
.when(bitmapPool)
.put(any(Bitmap.class));
when(bitmapPool.get(anyInt(), anyInt(), any(Bitmap.Config.class)))
.thenAnswer(
new Answer<Bitmap>() {
@Override
public Bitmap answer(InvocationOnMock invocationOnMock) throws Throwable {
int width = (Integer) invocationOnMock.getArguments()[0];
int height = (Integer) invocationOnMock.getArguments()[1];
Bitmap.Config config = (Bitmap.Config) invocationOnMock.getArguments()[2];
return Bitmap.createBitmap(width, height, config);
}
});
transformation.transform(context, input, /* outWidth= */ 100, /* outHeight= */ 200);
verify(bitmapPool).put(isA(Bitmap.class));
}
@Test
public void testEquals() {
BitmapTransformation otherBitmapTransformation = mock(BitmapTransformation.class);
doAnswer(new Util.WriteDigest("bitmapTransformation"))
.when(bitmapTransformation)
.updateDiskCacheKey(any(MessageDigest.class));
doAnswer(new Util.WriteDigest("otherBitmapTransformation"))
.when(otherBitmapTransformation)
.updateDiskCacheKey(any(MessageDigest.class));
keyTester
.addEquivalenceGroup(
transformation,
new DrawableTransformation(bitmapTransformation, /* isRequired= */ true),
new DrawableTransformation(bitmapTransformation, /* isRequired= */ false))
.addEquivalenceGroup(bitmapTransformation)
.addEquivalenceGroup(otherBitmapTransformation)
.addEquivalenceGroup(
new DrawableTransformation(otherBitmapTransformation, /* isRequired= */ true),
new DrawableTransformation(otherBitmapTransformation, /* isRequired= */ false))
.addRegressionTest(
new DrawableTransformation(bitmapTransformation, /* isRequired= */ true),
"eddf60c557a6315a489b8a3a19b12439a90381256289fbe9a503afa726230bd9")
.addRegressionTest(
new DrawableTransformation(otherBitmapTransformation, /* isRequired= */ false),
"40931536ed0ec97c39d4be10c44f5b69a86030ec575317f5a0f17e15a0ea9be8")
.test();
}
@SuppressWarnings("unchecked")
private static Resource<Bitmap> anyBitmapResource() {
return any(Resource.class);
}
private static final | DrawableTransformationTest |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/functions/aggfunctions/LastValueWithRetractAggFunctionWithOrderTest.java | {
"start": 9523,
"end": 11898
} | class ____
extends LastValueWithRetractAggFunctionWithOrderTestBase<StringData> {
@Override
protected List<List<StringData>> getInputValueSets() {
return Arrays.asList(
Arrays.asList(
StringData.fromString("abc"),
StringData.fromString("def"),
StringData.fromString("ghi"),
null,
StringData.fromString("jkl"),
null,
StringData.fromString("zzz"),
StringData.fromString("abc"),
StringData.fromString("def"),
StringData.fromString("abc")),
Arrays.asList(null, null),
Arrays.asList(null, StringData.fromString("a")),
Arrays.asList(StringData.fromString("x"), null, StringData.fromString("e")));
}
@Override
protected List<List<Long>> getInputOrderSets() {
return Arrays.asList(
Arrays.asList(10L, 2L, 5L, null, 3L, 1L, 5L, 10L, 15L, 11L),
Arrays.asList(6L, 5L),
Arrays.asList(8L, 6L),
Arrays.asList(6L, 4L, 3L));
}
@Override
protected List<StringData> getExpectedResults() {
return Arrays.asList(
StringData.fromString("def"),
null,
StringData.fromString("a"),
StringData.fromString("x"));
}
@Override
protected AggregateFunction<StringData, LastValueWithRetractAccumulator<StringData>>
getAggregator() {
return new LastValueWithRetractAggFunction<>(DataTypes.STRING().getLogicalType());
}
}
// --------------------------------------------------------------------------------------------
// This section contain base classes that provide common inputs and accessor for retract
// function
// for tests declared above.
// --------------------------------------------------------------------------------------------
/** Test base for {@link LastValueWithRetractAggFunction} with order. */
abstract static | StringLastValueWithRetractAggFunctionWithOrderTest |
java | quarkusio__quarkus | extensions/micrometer/runtime/src/main/java/io/quarkus/micrometer/runtime/MicrometerTimedInterceptor.java | {
"start": 850,
"end": 6261
} | class ____ {
private static final Logger log = Logger.getLogger(MicrometerTimedInterceptor.class);
public static final String DEFAULT_METRIC_NAME = "method.timed";
private final MeterRegistry meterRegistry;
private final MeterTagsSupport meterTagsSupport;
public MicrometerTimedInterceptor(MeterRegistry meterRegistry, MeterTagsSupport meterTagsSupport) {
this.meterRegistry = meterRegistry;
this.meterTagsSupport = meterTagsSupport;
}
@AroundInvoke
@SuppressWarnings("unchecked")
Object timedMethod(ArcInvocationContext context) throws Exception {
final List<Sample> samples = getSamples(context);
if (samples.isEmpty()) {
// This should never happen - at least one @Timed binding must be present
return context.proceed();
}
Class<?> returnType = context.getMethod().getReturnType();
if (TypesUtil.isCompletionStage(returnType)) {
try {
return ((CompletionStage<?>) context.proceed()).whenComplete((result, throwable) -> {
stop(samples, MicrometerRecorder.getExceptionTag(throwable));
});
} catch (Exception ex) {
stop(samples, MicrometerRecorder.getExceptionTag(ex));
throw ex;
}
} else if (TypesUtil.isUni(returnType)) {
try {
return ((Uni<Object>) context.proceed()).onTermination().invoke(
new Functions.TriConsumer<>() {
@Override
public void accept(Object o, Throwable throwable, Boolean cancelled) {
stop(samples, MicrometerRecorder.getExceptionTag(throwable));
}
});
} catch (Exception ex) {
stop(samples, MicrometerRecorder.getExceptionTag(ex));
throw ex;
}
}
String exceptionClass = MicrometerRecorder.getExceptionTag(null);
try {
return context.proceed();
} catch (Exception ex) {
exceptionClass = MicrometerRecorder.getExceptionTag(ex);
throw ex;
} finally {
stop(samples, exceptionClass);
}
}
private List<Sample> getSamples(ArcInvocationContext context) {
List<Timed> timed = context.findIterceptorBindings(Timed.class);
if (timed.isEmpty()) {
return Collections.emptyList();
}
Tags tags = meterTagsSupport.getTags(context);
List<Sample> samples = new ArrayList<>(timed.size());
for (Timed t : timed) {
if (t.longTask()) {
samples.add(new LongTimerSample(t, tags));
} else {
samples.add(new TimerSample(t, tags));
}
}
return samples;
}
private void stop(List<Sample> samples, String throwableClassName) {
for (Sample sample : samples) {
sample.stop(throwableClassName);
}
}
private void record(Timed timed, Timer.Sample sample, String exceptionClass, Tags timerTags) {
final String metricName = timed.value().isEmpty() ? DEFAULT_METRIC_NAME : timed.value();
try {
Timer.Builder builder = Timer.builder(metricName)
.description(timed.description().isEmpty() ? null : timed.description())
.tags(timerTags)
.tag("exception", exceptionClass)
.publishPercentileHistogram(timed.histogram())
.publishPercentiles(timed.percentiles().length == 0 ? null : timed.percentiles());
sample.stop(builder.register(meterRegistry));
} catch (Exception e) {
// ignoring on purpose: possible meter registration error should not interrupt main code flow.
log.warnf(e, "Unable to record observed timer value for %s with exceptionClass %s",
metricName, exceptionClass);
}
}
LongTaskTimer.Sample startLongTaskTimer(Timed timed, Tags commonTags, String metricName) {
try {
// This will throw if the annotation is incorrect.
// Errors are checked for at build time, but ...
return LongTaskTimer.builder(metricName)
.description(timed.description().isEmpty() ? null : timed.description())
.tags(commonTags)
.tags(timed.extraTags())
.publishPercentileHistogram(timed.histogram())
.register(meterRegistry)
.start();
} catch (Exception e) {
// ignoring on purpose: possible meter registration error should not interrupt main code flow.
log.warnf(e, "Unable to create long task timer named %s", metricName);
return null;
}
}
private void stopLongTaskTimer(String metricName, LongTaskTimer.Sample sample) {
try {
sample.stop();
} catch (Exception e) {
// ignoring on purpose
log.warnf(e, "Unable to update long task timer named %s", metricName);
}
}
private Tags getCommonTags(String className, String methodName) {
return Tags.of("class", className, "method", methodName);
}
abstract static | MicrometerTimedInterceptor |
java | netty__netty | codec-http/src/main/java/io/netty/handler/codec/http/websocketx/ContinuationWebSocketFrame.java | {
"start": 983,
"end": 4015
} | class ____ extends WebSocketFrame {
/**
* Creates a new empty continuation frame.
*/
public ContinuationWebSocketFrame() {
this(Unpooled.buffer(0));
}
/**
* Creates a new continuation frame with the specified binary data. The final fragment flag is
* set to true.
*
* @param binaryData the content of the frame.
*/
public ContinuationWebSocketFrame(ByteBuf binaryData) {
super(binaryData);
}
/**
* Creates a new continuation frame with the specified binary data.
*
* @param finalFragment
* flag indicating if this frame is the final fragment
* @param rsv
* reserved bits used for protocol extensions
* @param binaryData
* the content of the frame.
*/
public ContinuationWebSocketFrame(boolean finalFragment, int rsv, ByteBuf binaryData) {
super(finalFragment, rsv, binaryData);
}
/**
* Creates a new continuation frame with the specified text data
*
* @param finalFragment
* flag indicating if this frame is the final fragment
* @param rsv
* reserved bits used for protocol extensions
* @param text
* text content of the frame.
*/
public ContinuationWebSocketFrame(boolean finalFragment, int rsv, String text) {
this(finalFragment, rsv, fromText(text));
}
/**
* Returns the text data in this frame.
*/
public String text() {
return content().toString(CharsetUtil.UTF_8);
}
/**
* Sets the string for this frame.
*
* @param text
* text to store.
*/
private static ByteBuf fromText(String text) {
if (text == null || text.isEmpty()) {
return Unpooled.EMPTY_BUFFER;
} else {
return Unpooled.copiedBuffer(text, CharsetUtil.UTF_8);
}
}
@Override
public ContinuationWebSocketFrame copy() {
return (ContinuationWebSocketFrame) super.copy();
}
@Override
public ContinuationWebSocketFrame duplicate() {
return (ContinuationWebSocketFrame) super.duplicate();
}
@Override
public ContinuationWebSocketFrame retainedDuplicate() {
return (ContinuationWebSocketFrame) super.retainedDuplicate();
}
@Override
public ContinuationWebSocketFrame replace(ByteBuf content) {
return new ContinuationWebSocketFrame(isFinalFragment(), rsv(), content);
}
@Override
public ContinuationWebSocketFrame retain() {
super.retain();
return this;
}
@Override
public ContinuationWebSocketFrame retain(int increment) {
super.retain(increment);
return this;
}
@Override
public ContinuationWebSocketFrame touch() {
super.touch();
return this;
}
@Override
public ContinuationWebSocketFrame touch(Object hint) {
super.touch(hint);
return this;
}
}
| ContinuationWebSocketFrame |
java | netty__netty | handler/src/main/java/io/netty/handler/ssl/OpenSslApplicationProtocolNegotiator.java | {
"start": 811,
"end": 1434
} | interface ____ extends ApplicationProtocolNegotiator {
/**
* Returns the {@link ApplicationProtocolConfig.Protocol} which should be used.
*/
ApplicationProtocolConfig.Protocol protocol();
/**
* Get the desired behavior for the peer who selects the application protocol.
*/
ApplicationProtocolConfig.SelectorFailureBehavior selectorFailureBehavior();
/**
* Get the desired behavior for the peer who is notified of the selected protocol.
*/
ApplicationProtocolConfig.SelectedListenerFailureBehavior selectedListenerFailureBehavior();
}
| OpenSslApplicationProtocolNegotiator |
java | apache__camel | components/camel-jasypt/src/main/java/org/apache/camel/component/jasypt/Main.java | {
"start": 2668,
"end": 8838
} | class ____ extends Option {
private String parameterName;
protected ParameterOption(String abbreviation, String fullName, String description, String parameterName) {
super(abbreviation, fullName, description);
this.parameterName = parameterName;
}
@Override
protected void doProcess(String arg, LinkedList<String> remainingArgs) {
if (remainingArgs.isEmpty()) {
System.err.println("Expected fileName for ");
showOptions();
} else {
String parameter = remainingArgs.removeFirst();
doProcess(arg, parameter, remainingArgs);
}
}
@Override
public String getInformation() {
return " " + getAbbreviation() + " or " + getFullName()
+ " <" + parameterName + "> = " + getDescription();
}
protected abstract void doProcess(String arg, String parameter, LinkedList<String> remainingArgs);
}
public Main() {
addOption(new Option("h", "help", "Displays the help screen") {
protected void doProcess(String arg, LinkedList<String> remainingArgs) {
showOptions();
// no need to process further if user just wants help
System.exit(0);
}
});
addOption(new ParameterOption("c", "command", "Command can be encrypt or decrypt", "command") {
protected void doProcess(String arg, String parameter, LinkedList<String> remainingArgs) {
if ("encrypt".equals(parameter) || "decrypt".equals(parameter)) {
command = parameter;
} else {
throw new IllegalArgumentException("Unknown command, was: " + parameter);
}
}
});
addOption(new ParameterOption("p", "password", "Password to use", "password") {
protected void doProcess(String arg, String parameter, LinkedList<String> remainingArgs) {
password = parameter;
}
});
addOption(new ParameterOption("i", "input", "Text to encrypt or decrypt", "input") {
protected void doProcess(String arg, String parameter, LinkedList<String> remainingArgs) {
input = parameter;
}
});
addOption(new ParameterOption("a", "algorithm", "Optional algorithm to use", "algorithm") {
protected void doProcess(String arg, String parameter, LinkedList<String> remainingArgs) {
algorithm = parameter;
}
});
addOption(new ParameterOption("rsga", "salt", "Optional random salt generator algorithm to use", "salt") {
protected void doProcess(String arg, String parameter, LinkedList<String> remainingArgs) {
randomSaltGeneratorAlgorithm = parameter;
}
});
addOption(new ParameterOption("riga", "iv", "Optional random iv generator algorithm to use", "iv") {
protected void doProcess(String arg, String parameter, LinkedList<String> remainingArgs) {
randomIvGeneratorAlgorithm = parameter;
}
});
}
private void addOption(Option option) {
options.add(option);
}
private void showOptions() {
System.out.println("Apache Camel Jasypt takes the following options:");
System.out.println();
for (Option option : options) {
System.out.println(option.getInformation());
}
System.out.println();
System.out.println();
}
private boolean parseArguments(String[] arguments) {
LinkedList<String> args = new LinkedList<>(Arrays.asList(arguments));
boolean valid = true;
while (!args.isEmpty()) {
String arg = args.removeFirst();
boolean handled = false;
for (Option option : options) {
if (option.processOption(arg, args)) {
handled = true;
break;
}
}
if (!handled) {
System.out.println("Error: Unknown option: " + arg);
System.out.println();
valid = false;
break;
}
}
return valid;
}
public void run(String[] args) {
if (!parseArguments(args)) {
showOptions();
return;
}
if (command == null) {
System.out.println("Error: Command is empty");
System.out.println();
showOptions();
return;
}
if (password == null) {
System.out.println("Error: Password is empty");
System.out.println();
showOptions();
return;
}
if (input == null) {
System.out.println("Error: Input is empty");
System.out.println();
showOptions();
return;
}
encryptor.setPassword(password);
if (algorithm != null) {
encryptor.setAlgorithm(algorithm);
}
if (randomSaltGeneratorAlgorithm != null) {
encryptor.setSaltGenerator(new RandomSaltGenerator(randomSaltGeneratorAlgorithm));
}
if (randomIvGeneratorAlgorithm != null) {
encryptor.setIvGenerator(new RandomIvGenerator(randomIvGeneratorAlgorithm));
}
if ("encrypt".equals(command)) {
System.out.println("Encrypted text: " + encryptor.encrypt(input));
} else {
System.out.println("Decrypted text: " + encryptor.decrypt(input));
}
}
/**
* The main entrypoint is required as the Jasypt dependency can be also used as CLI.
*
* @param args the main arguments
* @throws Exception throws any processing exception
*/
public static void main(String[] args) throws Exception {
Main main = new Main();
if (args.length == 0) {
main.showOptions();
return;
} else {
main.run(args);
}
}
}
| ParameterOption |
java | spring-projects__spring-boot | module/spring-boot-tomcat/src/main/java/org/springframework/boot/tomcat/servlet/NestedJarResourceSet.java | {
"start": 1481,
"end": 4535
} | class ____ extends AbstractSingleArchiveResourceSet {
private static final Name MULTI_RELEASE = new Name("Multi-Release");
private final URL url;
private @Nullable JarFile archive;
private long archiveUseCount;
private boolean useCaches;
private volatile @Nullable Boolean multiRelease;
NestedJarResourceSet(URL url, WebResourceRoot root, String webAppMount, String internalPath)
throws IllegalArgumentException {
this.url = url;
setRoot(root);
setWebAppMount(webAppMount);
setInternalPath(internalPath);
setStaticOnly(true);
if (getRoot().getState().isAvailable()) {
try {
start();
}
catch (LifecycleException ex) {
throw new IllegalStateException(ex);
}
}
}
@Override
protected WebResource createArchiveResource(JarEntry jarEntry, String webAppPath, Manifest manifest) {
return new JarResource(this, webAppPath, getBaseUrlString(), jarEntry);
}
@Override
protected void initInternal() throws LifecycleException {
try {
JarURLConnection connection = connect();
try {
setManifest(connection.getManifest());
setBaseUrl(connection.getJarFileURL());
}
finally {
if (!connection.getUseCaches()) {
connection.getJarFile().close();
}
}
}
catch (IOException ex) {
throw new IllegalStateException(ex);
}
}
@Override
protected JarFile openJarFile() throws IOException {
synchronized (this.archiveLock) {
if (this.archive == null) {
JarURLConnection connection = connect();
this.useCaches = connection.getUseCaches();
this.archive = connection.getJarFile();
}
this.archiveUseCount++;
return this.archive;
}
}
@Override
protected void closeJarFile() {
synchronized (this.archiveLock) {
this.archiveUseCount--;
}
}
@Override
protected boolean isMultiRelease() {
Boolean multiRelease = this.multiRelease;
if (multiRelease == null) {
synchronized (this.archiveLock) {
multiRelease = this.multiRelease;
if (multiRelease == null) {
// JarFile.isMultiRelease() is final so we must go to the manifest
Manifest manifest = getManifest();
Attributes attributes = (manifest != null) ? manifest.getMainAttributes() : null;
multiRelease = (attributes != null) && attributes.containsKey(MULTI_RELEASE);
this.multiRelease = multiRelease;
}
}
}
return multiRelease;
}
@Override
public void gc() {
synchronized (this.archiveLock) {
if (this.archive != null && this.archiveUseCount == 0) {
try {
if (!this.useCaches) {
this.archive.close();
}
}
catch (IOException ex) {
// Ignore
}
this.archive = null;
this.archiveEntries = null;
}
}
}
private JarURLConnection connect() throws IOException {
URLConnection connection = this.url.openConnection();
ResourceUtils.useCachesIfNecessary(connection);
Assert.state(connection instanceof JarURLConnection,
() -> "URL '%s' did not return a JAR connection".formatted(this.url));
connection.connect();
return (JarURLConnection) connection;
}
}
| NestedJarResourceSet |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/model/XmlTestSupport.java | {
"start": 1303,
"end": 2625
} | class ____ extends TestSupport {
protected final Logger log = LoggerFactory.getLogger(getClass());
protected JAXBContext jaxbContext;
protected RouteContainer assertParseAsJaxb(String uri) throws JAXBException {
Object value = parseUri(uri);
RouteContainer context = assertIsInstanceOf(RouteContainer.class, value);
log.info("Found: {}", context);
return context;
}
protected RestContainer assertParseRestAsJaxb(String uri) throws JAXBException {
Object value = parseUri(uri);
RestContainer context = assertIsInstanceOf(RestContainer.class, value);
log.info("Found: {}", context);
return context;
}
protected Object parseUri(String uri) throws JAXBException {
Unmarshaller unmarshaller = jaxbContext.createUnmarshaller();
URL resource = getClass().getResource(uri);
assertNotNull(resource, "Cannot find resource on the classpath: " + uri);
return unmarshaller.unmarshal(resource);
}
@Override
@BeforeEach
public void setUp() throws Exception {
super.setUp();
jaxbContext = createJaxbContext();
}
public static JAXBContext createJaxbContext() throws JAXBException {
return new DefaultModelJAXBContextFactory().newJAXBContext();
}
}
| XmlTestSupport |
java | junit-team__junit5 | platform-tests/src/test/java/org/junit/platform/engine/support/hierarchical/NodeTreeWalkerIntegrationTests.java | {
"start": 2065,
"end": 11885
} | class ____ {
LockManager lockManager = new LockManager();
NodeTreeWalker nodeTreeWalker = new NodeTreeWalker(lockManager);
@Test
void pullUpExclusiveChildResourcesToTestClass() {
var engineDescriptor = discover(TestCaseWithResourceLock.class);
var advisor = nodeTreeWalker.walk(engineDescriptor);
var testClassDescriptor = getOnlyElement(engineDescriptor.getChildren());
assertThat(advisor.getResourceLock(testClassDescriptor)).extracting(allLocks()) //
.isEqualTo(List.of(getLock(GLOBAL_READ), getReadWriteLock("a"), getReadWriteLock("b")));
assertThat(advisor.getForcedExecutionMode(testClassDescriptor)).isEmpty();
var testMethodDescriptor = getOnlyElement(testClassDescriptor.getChildren());
assertThat(advisor.getResourceLock(testMethodDescriptor)).extracting(allLocks()).isEqualTo(List.of());
assertThat(advisor.getForcedExecutionMode(testMethodDescriptor)).contains(SAME_THREAD);
}
@Test
void setsForceExecutionModeForChildrenWithWriteLocksOnClass() {
var engineDescriptor = discover(TestCaseWithResourceWriteLockOnClass.class);
var advisor = nodeTreeWalker.walk(engineDescriptor);
var testClassDescriptor = getOnlyElement(engineDescriptor.getChildren());
assertThat(advisor.getResourceLock(testClassDescriptor)).extracting(allLocks()) //
.isEqualTo(List.of(getLock(GLOBAL_READ), getReadWriteLock("a")));
assertThat(advisor.getForcedExecutionMode(testClassDescriptor)).isEmpty();
var testMethodDescriptor = getOnlyElement(testClassDescriptor.getChildren());
assertThat(advisor.getResourceLock(testMethodDescriptor)).extracting(allLocks()).isEqualTo(List.of());
assertThat(advisor.getForcedExecutionMode(testMethodDescriptor)).contains(SAME_THREAD);
}
@Test
void doesntSetForceExecutionModeForChildrenWithReadLocksOnClass() {
var engineDescriptor = discover(TestCaseWithResourceReadLockOnClass.class);
var advisor = nodeTreeWalker.walk(engineDescriptor);
var testClassDescriptor = getOnlyElement(engineDescriptor.getChildren());
assertThat(advisor.getResourceLock(testClassDescriptor)).extracting(allLocks()) //
.isEqualTo(List.of(getLock(GLOBAL_READ), getReadLock("a")));
assertThat(advisor.getForcedExecutionMode(testClassDescriptor)).isEmpty();
var testMethodDescriptor = getOnlyElement(testClassDescriptor.getChildren());
assertThat(advisor.getResourceLock(testMethodDescriptor)).extracting(allLocks()).isEqualTo(List.of());
assertThat(advisor.getForcedExecutionMode(testMethodDescriptor)).isEmpty();
}
@Test
void setsForceExecutionModeForChildrenWithReadLocksOnClassAndWriteLockOnTest() {
var engineDescriptor = discover(TestCaseWithResourceReadLockOnClassAndWriteClockOnTestCase.class);
var advisor = nodeTreeWalker.walk(engineDescriptor);
var testClassDescriptor = getOnlyElement(engineDescriptor.getChildren());
assertThat(advisor.getResourceLock(testClassDescriptor)).extracting(allLocks()) //
.isEqualTo(List.of(getLock(GLOBAL_READ), getReadWriteLock("a")));
assertThat(advisor.getForcedExecutionMode(testClassDescriptor)).isEmpty();
var testMethodDescriptor = getOnlyElement(testClassDescriptor.getChildren());
assertThat(advisor.getResourceLock(testMethodDescriptor)).extracting(allLocks()).isEqualTo(List.of());
assertThat(advisor.getForcedExecutionMode(testMethodDescriptor)).contains(SAME_THREAD);
}
@Test
void doesntSetForceExecutionModeForChildrenWithReadLocksOnClassAndReadLockOnTest() {
var engineDescriptor = discover(TestCaseWithResourceReadLockOnClassAndReadClockOnTestCase.class);
var advisor = nodeTreeWalker.walk(engineDescriptor);
var testClassDescriptor = getOnlyElement(engineDescriptor.getChildren());
assertThat(advisor.getResourceLock(testClassDescriptor)).extracting(allLocks()) //
.isEqualTo(List.of(getLock(GLOBAL_READ), getReadLock("a"), getReadLock("b")));
assertThat(advisor.getForcedExecutionMode(testClassDescriptor)).isEmpty();
var testMethodDescriptor = getOnlyElement(testClassDescriptor.getChildren());
assertThat(advisor.getResourceLock(testMethodDescriptor)).extracting(allLocks()).isEqualTo(List.of());
assertThat(advisor.getForcedExecutionMode(testMethodDescriptor)).isEmpty();
}
@Test
void leavesResourceLockOnTestMethodWhenClassDoesNotUseResource() {
var engineDescriptor = discover(TestCaseWithoutResourceLock.class);
var advisor = nodeTreeWalker.walk(engineDescriptor);
var testClassDescriptor = getOnlyElement(engineDescriptor.getChildren());
assertThat(advisor.getResourceLock(testClassDescriptor)).extracting(allLocks()) //
.isEqualTo(List.of(getLock(GLOBAL_READ)));
assertThat(advisor.getForcedExecutionMode(testClassDescriptor)).isEmpty();
assertThat(testClassDescriptor.getChildren()).hasSize(2);
var children = testClassDescriptor.getChildren().iterator();
var testMethodDescriptor = children.next();
assertThat(advisor.getResourceLock(testMethodDescriptor)).extracting(allLocks()) //
.isEqualTo(List.of(getReadWriteLock("a")));
assertThat(advisor.getForcedExecutionMode(testMethodDescriptor)).isEmpty();
var nestedTestClassDescriptor = children.next();
assertThat(advisor.getResourceLock(nestedTestClassDescriptor)).extracting(allLocks()) //
.isEqualTo(List.of(getReadWriteLock("b"), getReadWriteLock("c")));
assertThat(advisor.getForcedExecutionMode(nestedTestClassDescriptor)).isEmpty();
var nestedTestMethodDescriptor = getOnlyElement(nestedTestClassDescriptor.getChildren());
assertThat(advisor.getResourceLock(nestedTestMethodDescriptor)).extracting(allLocks()).isEqualTo(List.of());
assertThat(advisor.getForcedExecutionMode(nestedTestMethodDescriptor)).contains(SAME_THREAD);
}
@Test
void coarsensGlobalLockToEngineDescriptorChild() {
var engineDescriptor = discover(TestCaseWithGlobalLockRequiringChild.class);
var advisor = nodeTreeWalker.walk(engineDescriptor);
var testClassDescriptor = getOnlyElement(engineDescriptor.getChildren());
assertThat(advisor.getResourceLock(testClassDescriptor)).extracting(allLocks()) //
.isEqualTo(List.of(getLock(GLOBAL_READ_WRITE)));
assertThat(advisor.getForcedExecutionMode(testClassDescriptor)).isEmpty();
var nestedTestClassDescriptor = getOnlyElement(testClassDescriptor.getChildren());
assertThat(advisor.getResourceLock(nestedTestClassDescriptor)).extracting(allLocks()) //
.isEqualTo(List.of());
assertThat(advisor.getForcedExecutionMode(nestedTestClassDescriptor)).contains(SAME_THREAD);
var testMethodDescriptor = getOnlyElement(nestedTestClassDescriptor.getChildren());
assertThat(advisor.getResourceLock(testMethodDescriptor)).extracting(allLocks()) //
.isEqualTo(List.of());
assertThat(advisor.getForcedExecutionMode(testMethodDescriptor)).contains(SAME_THREAD);
}
@Test
void putsGlobalReadLockOnFirstNodeThatRequiresIt() {
var engineDescriptor = new EngineDescriptor(UniqueId.forEngine("dummy"), "Dummy");
var containerWithoutBehavior = new NodeStub(engineDescriptor.getUniqueId().append("container", "1"),
"Container 1") //
.withGlobalReadLockRequired(false);
var test1 = new NodeStub(containerWithoutBehavior.getUniqueId().append("test", "1"), "Test 1") //
.withExclusiveResource(new ExclusiveResource("key1", READ_WRITE));
containerWithoutBehavior.addChild(test1);
var containerWithBehavior = new NodeStub(engineDescriptor.getUniqueId().append("container", "2"), "Container 2") //
.withGlobalReadLockRequired(true);
var test2 = new NodeStub(containerWithBehavior.getUniqueId().append("test", "2"), "Test 2") //
.withExclusiveResource(new ExclusiveResource("key2", READ_WRITE));
containerWithBehavior.addChild(test2);
engineDescriptor.addChild(containerWithoutBehavior);
engineDescriptor.addChild(containerWithBehavior);
var advisor = nodeTreeWalker.walk(engineDescriptor);
assertThat(advisor.getResourceLock(containerWithoutBehavior)) //
.extracting(allLocks(), LIST) //
.isEmpty();
assertThat(advisor.getResourceLock(test1)) //
.extracting(allLocks(), LIST) //
.containsExactly(getLock(GLOBAL_READ), getReadWriteLock("key1"));
assertThat(advisor.getResourceLock(containerWithBehavior)) //
.extracting(allLocks(), LIST) //
.containsExactly(getLock(GLOBAL_READ));
assertThat(advisor.getResourceLock(test2)) //
.extracting(allLocks(), LIST) //
.containsExactly(getReadWriteLock("key2"));
}
@Test
void doesNotAllowExclusiveResourcesWithoutRequiringGlobalReadLock() {
var engineDescriptor = new EngineDescriptor(UniqueId.forEngine("dummy"), "Dummy");
var invalidNode = new NodeStub(engineDescriptor.getUniqueId().append("container", "1"), "Container") //
.withGlobalReadLockRequired(false) //
.withExclusiveResource(new ExclusiveResource("key", READ_WRITE));
engineDescriptor.addChild(invalidNode);
assertPreconditionViolationFor(() -> nodeTreeWalker.walk(engineDescriptor)) //
.withMessage("Node requiring exclusive resources must also require global read lock: " + invalidNode);
}
private static Function<org.junit.platform.engine.support.hierarchical.ResourceLock, List<Lock>> allLocks() {
return ResourceLockSupport::getLocks;
}
private Lock getReadWriteLock(String key) {
return getLock(new ExclusiveResource(key, READ_WRITE));
}
private Lock getReadLock(String key) {
return getLock(new ExclusiveResource(key, READ));
}
private Lock getLock(ExclusiveResource exclusiveResource) {
return getOnlyElement(ResourceLockSupport.getLocks(lockManager.getLockForResource(exclusiveResource)));
}
private TestDescriptor discover(Class<?> testClass) {
var discoveryRequest = request().selectors(selectClass(testClass)).build();
return new JupiterTestEngine().discover(discoveryRequest, UniqueId.forEngine("junit-jupiter"));
}
@SuppressWarnings("JUnitMalformedDeclaration")
@ResourceLock("a")
static | NodeTreeWalkerIntegrationTests |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/core/Emitter.java | {
"start": 1187,
"end": 1605
} | interface ____<@NonNull T> {
/**
* Signal a normal value.
* @param value the value to signal, not {@code null}
*/
void onNext(@NonNull T value);
/**
* Signal a {@link Throwable} exception.
* @param error the {@code Throwable} to signal, not {@code null}
*/
void onError(@NonNull Throwable error);
/**
* Signal a completion.
*/
void onComplete();
}
| Emitter |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/formatstring/InlineFormatStringTest.java | {
"start": 1403,
"end": 1677
} | class ____ {
private static final String FORMAT = "hello %s";
void f() {
System.err.printf(FORMAT, 42);
}
}
""")
.addOutputLines(
"Test.java",
"""
| Test |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/TruthContainsExactlyElementsInUsageTest.java | {
"start": 13197,
"end": 13402
} | class ____ {
void test() {
assertThat(ImmutableList.of(1, 2, 3)).containsExactly(1, 2, 3);
}
}
""")
.doTest();
}
}
| ExampleClassTest |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/checkpoints/CheckpointConfigInfo.java | {
"start": 11805,
"end": 12269
} | class ____ extends StdDeserializer<ProcessingMode> {
public ProcessingModeDeserializer() {
super(ProcessingMode.class);
}
@Override
public ProcessingMode deserialize(
JsonParser jsonParser, DeserializationContext deserializationContext)
throws IOException {
return ProcessingMode.valueOf(jsonParser.getValueAsString().toUpperCase());
}
}
}
| ProcessingModeDeserializer |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/hql/DeleteJoinTests.java | {
"start": 1329,
"end": 3033
} | class ____ {
@BeforeEach
public void prepareData(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
session.persist( new Contact(
1,
new Name( "A", "B" ),
Contact.Gender.FEMALE,
LocalDate.of( 2000, 1, 1 )
) );
session.persist( new BasicEntity( 1, "data" ) );
}
);
}
@AfterEach
public void cleanupData(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Test
public void testDeleteWithJoin(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
//tag::hql-delete-join-example[]
int updated = session.createMutationQuery(
"delete from BasicEntity b left join Contact c on b.id = c.id " +
"where c.id is not null"
).executeUpdate();
//end::hql-delete-join-example[]
assertEquals( 1, updated );
assertNull( session.find( BasicEntity.class, 1 ) );
}
);
}
@Test
public void testDeleteWithJoinCriteria(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
final HibernateCriteriaBuilder cb = session.getCriteriaBuilder();
final JpaCriteriaDelete<BasicEntity> criteriaDelete = cb.createCriteriaDelete( BasicEntity.class );
final JpaRoot<BasicEntity> b = criteriaDelete.from( BasicEntity.class );
final JpaEntityJoin<BasicEntity, Contact> c = b.join( Contact.class, JoinType.LEFT );
c.on( b.get( "id" ).equalTo( c.get( "id" ) ) );
criteriaDelete.where( c.get( "id" ).isNotNull() );
int updated = session.createMutationQuery( criteriaDelete ).executeUpdate();
assertEquals( 1, updated );
assertNull( session.find( BasicEntity.class, 1 ) );
}
);
}
}
| DeleteJoinTests |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/converted/converter/AttributeConverterTest.java | {
"start": 3705,
"end": 17635
} | class ____ implements AttributeConverter<String,String> {
public BlowsUpConverter() {
throw new BlewUpException();
}
@Override
public String convertToDatabaseColumn(String attribute) {
return null;
}
@Override
public String convertToEntityAttribute(String dbData) {
return null;
}
}
@Test
public void testBasicOperation() {
try ( StandardServiceRegistry serviceRegistry = ServiceRegistryUtil.serviceRegistry()) {
final MetadataBuildingContext buildingContext = new MetadataBuildingContextTestingImpl( serviceRegistry );
final JdbcTypeRegistry jdbcTypeRegistry = buildingContext.getBootstrapContext()
.getTypeConfiguration()
.getJdbcTypeRegistry();
final BasicValue basicValue = new BasicValue( buildingContext );
basicValue.setJpaAttributeConverterDescriptor(
ConverterDescriptors.of(
new StringClobConverter(),
new ClassmateContext()
)
);
basicValue.setTypeUsingReflection( IrrelevantEntity.class.getName(), "name" );
final Type type = basicValue.getType();
assertNotNull( type );
assertThat( type, instanceOf( ConvertedBasicTypeImpl.class ) );
final JdbcMapping jdbcMapping = (JdbcMapping) type;
assertThat( jdbcMapping.getJavaTypeDescriptor().getJavaTypeClass(), equalTo( String.class ) );
final JdbcType jdbcType = jdbcMapping.getJdbcType();
assertThat( jdbcType, is( jdbcTypeRegistry.getDescriptor( Types.CLOB ) ) );
}
}
@Test
public void testNonAutoApplyHandling() {
final StandardServiceRegistry ssr = ServiceRegistryUtil.serviceRegistry();
try {
MetadataImplementor metadata = (MetadataImplementor) new MetadataSources( ssr )
.addAnnotatedClass( Tester.class )
.getMetadataBuilder()
.applyAttributeConverter( NotAutoAppliedConverter.class, false )
.build();
PersistentClass tester = metadata.getEntityBinding( Tester.class.getName() );
Property nameProp = tester.getProperty( "name" );
SimpleValue nameValue = (SimpleValue) nameProp.getValue();
Type type = nameValue.getType();
assertNotNull( type );
if ( ConvertedBasicTypeImpl.class.isInstance( type ) ) {
fail( "AttributeConverter with autoApply=false was auto applied" );
}
}
finally {
StandardServiceRegistryBuilder.destroy( ssr );
}
}
@Test
public void testBasicConverterApplication() {
final StandardServiceRegistry ssr = ServiceRegistryUtil.serviceRegistry();
try {
final MetadataImplementor metadata = (MetadataImplementor) new MetadataSources( ssr )
.addAnnotatedClass( Tester.class )
.getMetadataBuilder()
.applyAttributeConverter( StringClobConverter.class, true )
.build();
final JdbcTypeRegistry jdbcTypeRegistry = metadata.getTypeConfiguration()
.getJdbcTypeRegistry();
final PersistentClass tester = metadata.getEntityBinding( Tester.class.getName() );
final Property nameProp = tester.getProperty( "name" );
final BasicValue nameValue = (BasicValue) nameProp.getValue();
final Type type = nameValue.getType();
assertNotNull( type );
assertThat( type, instanceOf( ConvertedBasicTypeImpl.class ) );
final JdbcMapping jdbcMapping = (JdbcMapping) type;
assertThat( jdbcMapping.getJavaTypeDescriptor().getJavaTypeClass(), Matchers.equalTo( String.class ) );
final JdbcType jdbcType = jdbcMapping.getJdbcType();
assertThat( jdbcType, is( jdbcTypeRegistry.getDescriptor( Types.CLOB ) ) );
}
finally {
StandardServiceRegistryBuilder.destroy( ssr );
}
}
@Test
@JiraKey(value = "HHH-8462")
public void testBasicOrmXmlConverterApplication() {
final StandardServiceRegistry ssr = ServiceRegistryUtil.serviceRegistry();
try {
MetadataImplementor metadata = (MetadataImplementor) new MetadataSources( ssr )
.addAnnotatedClass( Tester.class )
.addURL( ConfigHelper.findAsResource( "org/hibernate/test/converter/orm.xml" ) )
.getMetadataBuilder()
.build();
final JdbcTypeRegistry jdbcTypeRegistry = metadata.getTypeConfiguration()
.getJdbcTypeRegistry();
PersistentClass tester = metadata.getEntityBinding( Tester.class.getName() );
Property nameProp = tester.getProperty( "name" );
BasicValue nameValue = (BasicValue) nameProp.getValue();
Type type = nameValue.getType();
assertNotNull( type );
assertThat( type, instanceOf( ConvertedBasicTypeImpl.class ) );
final JdbcMapping jdbcMapping = (JdbcMapping) type;
assertThat( jdbcMapping.getJavaTypeDescriptor().getJavaTypeClass(), equalTo( String.class ) );
assertThat( jdbcMapping.getJdbcJavaType().getJavaTypeClass(), equalTo( Clob.class ) );
final JdbcType jdbcType = jdbcMapping.getJdbcType();
assertThat( jdbcType, is( jdbcTypeRegistry.getDescriptor( Types.CLOB ) ) );
}
finally {
StandardServiceRegistryBuilder.destroy( ssr );
}
}
@Test
@JiraKey(value = "HHH-14881")
public void testBasicOrmXmlConverterWithOrmXmlPackage() {
final StandardServiceRegistry ssr = ServiceRegistryUtil.serviceRegistry();
try {
MetadataImplementor metadata = (MetadataImplementor) new MetadataSources( ssr )
.addAnnotatedClass( Tester.class )
.addURL( ConfigHelper.findAsResource( "org/hibernate/test/converter/package.xml" ) )
.getMetadataBuilder()
.build();
final JdbcTypeRegistry jdbcTypeRegistry = metadata.getTypeConfiguration()
.getJdbcTypeRegistry();
PersistentClass tester = metadata.getEntityBinding( Tester.class.getName() );
Property nameProp = tester.getProperty( "name" );
SimpleValue nameValue = (SimpleValue) nameProp.getValue();
Type type = nameValue.getType();
assertNotNull( type );
if ( !ConvertedBasicTypeImpl.class.isInstance( type ) ) {
fail( "AttributeConverter not applied" );
}
final JdbcMapping jdbcMapping = (JdbcMapping) type;
assertThat( jdbcMapping.getJavaTypeDescriptor().getJavaTypeClass(), equalTo( String.class ) );
assertThat( jdbcMapping.getJdbcJavaType().getJavaTypeClass(), equalTo( Clob.class ) );
final JdbcType sqlTypeDescriptor = jdbcMapping.getJdbcType();
assertThat( sqlTypeDescriptor, is( jdbcTypeRegistry.getDescriptor( Types.CLOB ) ) );
}
finally {
StandardServiceRegistryBuilder.destroy( ssr );
}
}
@Test
public void testBasicConverterDisableApplication() {
final StandardServiceRegistry ssr = ServiceRegistryUtil.serviceRegistry();
try {
MetadataImplementor metadata = (MetadataImplementor) new MetadataSources( ssr )
.addAnnotatedClass( Tester2.class )
.getMetadataBuilder()
.applyAttributeConverter( StringClobConverter.class, true )
.build();
final JdbcTypeRegistry jdbcTypeRegistry = metadata.getTypeConfiguration()
.getJdbcTypeRegistry();
PersistentClass tester = metadata.getEntityBinding( Tester2.class.getName() );
Property nameProp = tester.getProperty( "name" );
SimpleValue nameValue = (SimpleValue) nameProp.getValue();
Type type = nameValue.getType();
assertNotNull( type );
if ( ConvertedBasicTypeImpl.class.isInstance( type ) ) {
fail( "AttributeConverter applied (should not have been)" );
}
AbstractStandardBasicType basicType = assertTyping( AbstractStandardBasicType.class, type );
assertSame( StringJavaType.INSTANCE, basicType.getJavaTypeDescriptor() );
assertEquals( jdbcTypeRegistry.getDescriptor( Types.VARCHAR ), basicType.getJdbcType() );
}
finally {
StandardServiceRegistryBuilder.destroy( ssr );
}
}
@Test
public void testBasicUsage() {
Configuration cfg = new Configuration();
ServiceRegistryUtil.applySettings( cfg.getStandardServiceRegistryBuilder() );
cfg.addAttributeConverter( IntegerToVarcharConverter.class, false );
cfg.addAnnotatedClass( Tester4.class );
cfg.setProperty( AvailableSettings.HBM2DDL_AUTO, "create-drop" );
cfg.setProperty( AvailableSettings.GENERATE_STATISTICS, "true" );
try (SessionFactory sf = cfg.buildSessionFactory()) {
Session session = sf.openSession();
session.beginTransaction();
session.persist( new Tester4( 1L, "steve", 200 ) );
session.getTransaction().commit();
session.close();
sf.getStatistics().clear();
session = sf.openSession();
session.beginTransaction();
session.find( Tester4.class, 1L );
session.getTransaction().commit();
session.close();
assertEquals( 0, sf.getStatistics().getEntityUpdateCount() );
session = sf.openSession();
session.beginTransaction();
Tester4 t4 = session.find( Tester4.class, 1L );
t4.code = 300;
session.getTransaction().commit();
session.close();
session = sf.openSession();
session.beginTransaction();
t4 = session.find( Tester4.class, 1L );
assertEquals( 300, t4.code.longValue() );
session.remove( t4 );
session.getTransaction().commit();
session.close();
}
}
@Test
@JiraKey( value = "HHH-14206" )
public void testPrimitiveTypeConverterAutoApplied() {
final StandardServiceRegistry ssr = ServiceRegistryUtil.serviceRegistry();
try {
final MetadataImplementor metadata = (MetadataImplementor) new MetadataSources( ssr )
.addAnnotatedClass( Tester5.class )
.getMetadataBuilder()
.applyAttributeConverter( IntegerToVarcharConverter.class, true )
.build();
final JdbcTypeRegistry jdbcTypeRegistry = metadata.getTypeConfiguration()
.getJdbcTypeRegistry();
final PersistentClass tester = metadata.getEntityBinding( Tester5.class.getName() );
final Property codeProp = tester.getProperty( "code" );
final BasicValue nameValue = (BasicValue) codeProp.getValue();
Type type = nameValue.getType();
assertNotNull( type );
assertThat( type, instanceOf( ConvertedBasicTypeImpl.class ) );
final JdbcMapping jdbcMapping = (JdbcMapping) type;
assertThat( jdbcMapping.getJavaTypeDescriptor().getJavaTypeClass(), equalTo( Integer.class ) );
assertThat( jdbcMapping.getJdbcJavaType().getJavaTypeClass(), equalTo( String.class ) );
assertThat( jdbcMapping.getJdbcType(), is( jdbcTypeRegistry.getDescriptor( Types.VARCHAR ) ) );
}
finally {
StandardServiceRegistryBuilder.destroy( ssr );
}
}
@Test
public void testBasicTimestampUsage() {
Configuration cfg = new Configuration();
ServiceRegistryUtil.applySettings( cfg.getStandardServiceRegistryBuilder() );
cfg.addAttributeConverter( InstantConverter.class, false );
cfg.addAnnotatedClass( IrrelevantInstantEntity.class );
cfg.setProperty( AvailableSettings.HBM2DDL_AUTO, "create-drop" );
cfg.setProperty( AvailableSettings.GENERATE_STATISTICS, "true" );
try (SessionFactory sf = cfg.buildSessionFactory()) {
Session session = sf.openSession();
session.beginTransaction();
session.persist( new IrrelevantInstantEntity( 1L ) );
session.getTransaction().commit();
session.close();
sf.getStatistics().clear();
session = sf.openSession();
session.beginTransaction();
IrrelevantInstantEntity e = session.find( IrrelevantInstantEntity.class, 1L );
session.getTransaction().commit();
session.close();
assertEquals( 0, sf.getStatistics().getEntityUpdateCount() );
session = sf.openSession();
session.beginTransaction();
session.remove( e );
session.getTransaction().commit();
session.close();
}
}
@Test
@JiraKey(value = "HHH-8866")
public void testEnumConverter() {
final StandardServiceRegistry ssr = ServiceRegistryUtil.serviceRegistryBuilder()
.applySetting( AvailableSettings.HBM2DDL_AUTO, "create-drop" )
.build();
try {
MetadataImplementor metadata = (MetadataImplementor) new MetadataSources( ssr )
.addAnnotatedClass( EntityWithConvertibleField.class )
.getMetadataBuilder()
.applyAttributeConverter( ConvertibleEnumConverter.class, true )
.build();
final JdbcTypeRegistry jdbcTypeRegistry = metadata.getTypeConfiguration()
.getJdbcTypeRegistry();
// first lets validate that the converter was applied...
final PersistentClass tester = metadata.getEntityBinding( EntityWithConvertibleField.class.getName() );
final Property nameProp = tester.getProperty( "convertibleEnum" );
final BasicValue nameValue = (BasicValue) nameProp.getValue();
final Type type = nameValue.getType();
assertNotNull( type );
assertThat( type, instanceOf( ConvertedBasicTypeImpl.class ) );
final JdbcMapping jdbcMapping = (JdbcMapping) type;
assertThat( jdbcMapping.getJavaTypeDescriptor(), instanceOf( EnumJavaType.class ) );
assertThat( jdbcMapping.getJdbcType(), is( jdbcTypeRegistry.getDescriptor( Types.VARCHAR ) ) );
// then lets build the SF and verify its use...
final SessionFactory sf = metadata.buildSessionFactory();
try {
Session s = sf.openSession();
s.getTransaction().begin();
EntityWithConvertibleField entity = new EntityWithConvertibleField();
entity.setId( "ID" );
entity.setConvertibleEnum( ConvertibleEnum.VALUE );
String entityID = entity.getId();
s.persist( entity );
s.getTransaction().commit();
s.close();
s = sf.openSession();
s.beginTransaction();
entity = s.getReference( EntityWithConvertibleField.class, entityID );
assertEquals( ConvertibleEnum.VALUE, entity.getConvertibleEnum() );
s.getTransaction().commit();
s.close();
s = sf.openSession();
s.beginTransaction();
s.createQuery( "FROM EntityWithConvertibleField e where e.convertibleEnum = org.hibernate.orm.test.mapping.converted.converter.AttributeConverterTest$ConvertibleEnum.VALUE" )
.list();
s.getTransaction().commit();
s.close();
s = sf.openSession();
s.beginTransaction();
s.remove( entity );
s.getTransaction().commit();
s.close();
}
finally {
try {
sf.close();
}
catch (Exception ignore) {
}
}
}
finally {
StandardServiceRegistryBuilder.destroy( ssr );
}
}
// Entity declarations used in the test ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@Entity(name = "T1")
@SuppressWarnings("unused")
public static | BlowsUpConverter |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/StatementSwitchToExpressionSwitchTest.java | {
"start": 6715,
"end": 7590
} | class ____ {
public void foo(Suit suit) {
switch (suit) {
case HEART -> {
// Comment before first case
// Explanatory comment
System.out.println("this block cannot complete normally");
{
throw new NullPointerException();
}
}
case CLUB, SPADE, DIAMOND -> System.out.println("non-heart");
default -> System.out.println("default");
}
}
}
""")
.setArgs("-XepOpt:StatementSwitchToExpressionSwitch:EnableDirectConversion")
.setFixChooser(FixChoosers.FIRST)
.doTest(TEXT_MATCH);
refactoringHelper2
.addInputLines(
"Test.java",
"""
| Test |
java | assertj__assertj-core | assertj-core/src/test/java/org/example/test/MyProjectClassAssert.java | {
"start": 751,
"end": 1292
} | class ____ extends AbstractAssert<MyProjectClassAssert, MyProjectClass> {
public MyProjectClassAssert(MyProjectClass actual) {
super(actual, MyProjectClassAssert.class);
}
public MyProjectClassAssert hasValue(Object value) throws Exception {
if (value == null) throw new IOException("does not mean anything, it's just for the test");
if (!Objects.equals(actual.getValue(), value)) {
failWithMessage("Expecting value to be <%s> but was <%s>:", value, actual.getValue());
}
return this;
}
} | MyProjectClassAssert |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/oracle/pl/Oracle_pl_forall_0.java | {
"start": 922,
"end": 4583
} | class ____ extends OracleTest {
public void test_0() throws Exception {
String sql = "DROP TABLE employees_temp;\n" +
"CREATE TABLE employees_temp AS SELECT * FROM employees;\n" +
"\n" +
"DECLARE\n" +
" TYPE NumList IS VARRAY(20) OF NUMBER;\n" +
" depts NumList := NumList(10, 30, 70); -- department numbers\n" +
"BEGIN\n" +
" FORALL i IN depts.FIRST..depts.LAST\n" +
" DELETE FROM employees_temp\n" +
" WHERE department_id = depts(i);\n" +
"END;";
List<SQLStatement> statementList = SQLUtils.parseStatements(sql, JdbcConstants.ORACLE);
assertEquals(3, statementList.size());
SchemaStatVisitor visitor = SQLUtils.createSchemaStatVisitor(JdbcConstants.ORACLE);
for (SQLStatement statement : statementList) {
statement.accept(visitor);
}
// System.out.println("Tables : " + visitor.getTables());
// System.out.println("fields : " + visitor.getColumns());
// System.out.println("coditions : " + visitor.getConditions());
// System.out.println("relationships : " + visitor.getRelationships());
// System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(2, visitor.getTables().size());
// assertTrue(visitor.getTables().containsKey(new TableStat.Name("employees")));
// assertTrue(visitor.getTables().containsKey(new TableStat.Name("emp_name")));
// assertEquals(7, visitor.getColumns().size());
// assertEquals(3, visitor.getConditions().size());
// assertEquals(1, visitor.getRelationships().size());
// assertTrue(visitor.getColumns().contains(new TableStat.Column("employees", "salary")));
{
String output = SQLUtils.toSQLString(statementList, JdbcConstants.ORACLE);
System.out.println(output);
assertEquals("DROP TABLE employees_temp;\n" +
"CREATE TABLE employees_temp\n" +
"AS\n" +
"SELECT *\n" +
"FROM employees;\n" +
"DECLARE\n" +
"\tTYPE NumList IS VARRAY(20) OF NUMBER;\n" +
"\tdepts NumList := NumList(10, 30, 70);\n" +
"BEGIN\n" +
"\tFORALL i IN depts.FIRST..depts.LAST\n" +
"\t\tDELETE FROM employees_temp\n" +
"\t\tWHERE department_id = depts(i);\n" +
"END;", //
output);
}
{
String output = SQLUtils.toSQLString(statementList, JdbcConstants.ORACLE, SQLUtils.DEFAULT_LCASE_FORMAT_OPTION);
assertEquals("drop table employees_temp;\n" +
"create table employees_temp\n" +
"as\n" +
"select *\n" +
"from employees;\n" +
"declare\n" +
"\ttype NumList is VARRAY(20) OF NUMBER;\n" +
"\tdepts NumList := NumList(10, 30, 70);\n" +
"begin\n" +
"\tforall i in depts.FIRST..depts.LAST\n" +
"\t\tdelete from employees_temp\n" +
"\t\twhere department_id = depts(i);\n" +
"end;", //
output);
}
}
}
| Oracle_pl_forall_0 |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumErrorTests.java | {
"start": 803,
"end": 1381
} | class ____ extends ErrorsForCasesWithoutExamplesTestCase {
@Override
protected List<TestCaseSupplier> cases() {
return paramsToSuppliers(MvSumTests.parameters());
}
@Override
protected Expression build(Source source, List<Expression> args) {
return new MvSum(source, args.get(0));
}
@Override
protected Matcher<String> expectedTypeErrorMatcher(List<Set<DataType>> validPerPosition, List<DataType> signature) {
return equalTo(typeErrorMessage(false, validPerPosition, signature, (v, p) -> "numeric"));
}
}
| MvSumErrorTests |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/WebMonitorEndpoint.java | {
"start": 13802,
"end": 62663
} | class ____<T extends RestfulGateway> extends RestServerEndpoint
implements LeaderContender, JsonArchivist {
protected final GatewayRetriever<? extends T> leaderRetriever;
protected final Configuration clusterConfiguration;
protected final RestHandlerConfiguration restConfiguration;
private final GatewayRetriever<ResourceManagerGateway> resourceManagerRetriever;
private final TransientBlobService transientBlobService;
protected final ScheduledExecutorService executor;
private final ExecutionGraphCache executionGraphCache;
private final CheckpointStatsCache checkpointStatsCache;
private final Cache<JobID, CompletableFuture<CheckpointStatsSnapshot>>
checkpointStatsSnapshotCache;
private final MetricFetcher metricFetcher;
private final LeaderElection leaderElection;
private final FatalErrorHandler fatalErrorHandler;
private boolean hasWebUI = false;
private final Collection<JsonArchivist> archivingHandlers = new ArrayList<>(16);
@Nullable private ScheduledFuture<?> executionGraphCleanupTask;
public WebMonitorEndpoint(
GatewayRetriever<? extends T> leaderRetriever,
Configuration clusterConfiguration,
RestHandlerConfiguration restConfiguration,
GatewayRetriever<ResourceManagerGateway> resourceManagerRetriever,
TransientBlobService transientBlobService,
ScheduledExecutorService executor,
MetricFetcher metricFetcher,
LeaderElection leaderElection,
ExecutionGraphCache executionGraphCache,
FatalErrorHandler fatalErrorHandler)
throws IOException, ConfigurationException {
super(clusterConfiguration);
this.leaderRetriever = Preconditions.checkNotNull(leaderRetriever);
this.clusterConfiguration = Preconditions.checkNotNull(clusterConfiguration);
this.restConfiguration = Preconditions.checkNotNull(restConfiguration);
this.resourceManagerRetriever = Preconditions.checkNotNull(resourceManagerRetriever);
this.transientBlobService = Preconditions.checkNotNull(transientBlobService);
this.executor = Preconditions.checkNotNull(executor);
this.executionGraphCache = executionGraphCache;
this.checkpointStatsCache =
new CheckpointStatsCache(restConfiguration.getCheckpointHistorySize());
this.checkpointStatsSnapshotCache =
CacheBuilder.newBuilder()
.maximumSize(restConfiguration.getCheckpointCacheSize())
.expireAfterWrite(restConfiguration.getCheckpointCacheExpireAfterWrite())
.build();
this.metricFetcher = metricFetcher;
this.leaderElection = Preconditions.checkNotNull(leaderElection);
this.fatalErrorHandler = Preconditions.checkNotNull(fatalErrorHandler);
}
private VertexThreadInfoTracker initializeThreadInfoTracker(ScheduledExecutorService executor) {
final Duration askTimeout = clusterConfiguration.get(RpcOptions.ASK_TIMEOUT_DURATION);
final Duration flameGraphCleanUpInterval =
clusterConfiguration.get(RestOptions.FLAMEGRAPH_CLEANUP_INTERVAL);
final ThreadInfoRequestCoordinator threadInfoRequestCoordinator =
new ThreadInfoRequestCoordinator(executor, askTimeout);
return VertexThreadInfoTrackerBuilder.newBuilder(
resourceManagerRetriever, executor, restConfiguration.getTimeout())
.setCoordinator(threadInfoRequestCoordinator)
.setCleanUpInterval(flameGraphCleanUpInterval)
.setNumSamples(clusterConfiguration.get(RestOptions.FLAMEGRAPH_NUM_SAMPLES))
.setStatsRefreshInterval(
clusterConfiguration.get(RestOptions.FLAMEGRAPH_REFRESH_INTERVAL))
.setDelayBetweenSamples(clusterConfiguration.get(RestOptions.FLAMEGRAPH_DELAY))
.setMaxThreadInfoDepth(
clusterConfiguration.get(RestOptions.FLAMEGRAPH_STACK_TRACE_DEPTH))
.build();
}
@Override
protected List<Tuple2<RestHandlerSpecification, ChannelInboundHandler>> initializeHandlers(
final CompletableFuture<String> localAddressFuture) {
ArrayList<Tuple2<RestHandlerSpecification, ChannelInboundHandler>> handlers =
new ArrayList<>(30);
final Collection<Tuple2<RestHandlerSpecification, ChannelInboundHandler>>
webSubmissionHandlers = initializeWebSubmissionHandlers(localAddressFuture);
handlers.addAll(webSubmissionHandlers);
final boolean hasWebSubmissionHandlers = !webSubmissionHandlers.isEmpty();
final Duration asyncOperationStoreDuration =
clusterConfiguration.get(RestOptions.ASYNC_OPERATION_STORE_DURATION);
final Duration timeout = restConfiguration.getTimeout();
ClusterOverviewHandler clusterOverviewHandler =
new ClusterOverviewHandler(
leaderRetriever,
timeout,
responseHeaders,
ClusterOverviewHeaders.getInstance());
DashboardConfigHandler dashboardConfigHandler =
new DashboardConfigHandler(
leaderRetriever,
timeout,
responseHeaders,
DashboardConfigurationHeaders.getInstance(),
restConfiguration.getRefreshInterval(),
hasWebSubmissionHandlers,
restConfiguration.isWebCancelEnabled(),
restConfiguration.isWebRescaleEnabled());
JobIdsHandler jobIdsHandler =
new JobIdsHandler(
leaderRetriever,
timeout,
responseHeaders,
JobIdsWithStatusesOverviewHeaders.getInstance());
JobStatusHandler jobStatusHandler =
new JobStatusHandler(
leaderRetriever,
timeout,
responseHeaders,
JobStatusInfoHeaders.getInstance());
JobsOverviewHandler jobsOverviewHandler =
new JobsOverviewHandler(
leaderRetriever,
timeout,
responseHeaders,
JobsOverviewHeaders.getInstance());
ClusterConfigHandler clusterConfigurationHandler =
new ClusterConfigHandler(
leaderRetriever,
timeout,
responseHeaders,
ClusterConfigurationInfoHeaders.getInstance(),
clusterConfiguration);
JobManagerEnvironmentHandler jobManagerEnvironmentHandler =
new JobManagerEnvironmentHandler(
leaderRetriever,
timeout,
responseHeaders,
JobManagerEnvironmentHeaders.getInstance());
JobManagerJobEnvironmentHandler jobManagerJobEnvironmentHandler =
new JobManagerJobEnvironmentHandler(
leaderRetriever,
timeout,
responseHeaders,
JobManagerJobEnvironmentHeaders.getInstance());
JobConfigHandler jobConfigHandler =
new JobConfigHandler(
leaderRetriever,
timeout,
responseHeaders,
JobConfigHeaders.getInstance(),
executionGraphCache,
executor);
JobManagerJobConfigurationHandler jobManagerJobConfigurationHandler =
new JobManagerJobConfigurationHandler(
leaderRetriever,
timeout,
responseHeaders,
JobManagerJobConfigurationHeaders.getInstance(),
clusterConfiguration);
CheckpointConfigHandler checkpointConfigHandler =
new CheckpointConfigHandler(
leaderRetriever,
timeout,
responseHeaders,
CheckpointConfigHeaders.getInstance(),
executionGraphCache,
executor);
CheckpointingStatisticsHandler checkpointStatisticsHandler =
new CheckpointingStatisticsHandler(
leaderRetriever,
timeout,
responseHeaders,
CheckpointingStatisticsHeaders.getInstance(),
checkpointStatsSnapshotCache,
executor);
CheckpointStatisticDetailsHandler checkpointStatisticDetailsHandler =
new CheckpointStatisticDetailsHandler(
leaderRetriever,
timeout,
responseHeaders,
CheckpointStatisticDetailsHeaders.getInstance(),
executor,
checkpointStatsSnapshotCache,
checkpointStatsCache);
JobPlanHandler jobPlanHandler =
new JobPlanHandler(
leaderRetriever,
timeout,
responseHeaders,
JobPlanHeaders.getInstance(),
executionGraphCache,
executor);
TaskCheckpointStatisticDetailsHandler taskCheckpointStatisticDetailsHandler =
new TaskCheckpointStatisticDetailsHandler(
leaderRetriever,
timeout,
responseHeaders,
TaskCheckpointStatisticsHeaders.getInstance(),
executor,
checkpointStatsSnapshotCache,
checkpointStatsCache);
JobExceptionsHandler jobExceptionsHandler =
new JobExceptionsHandler(
leaderRetriever,
timeout,
responseHeaders,
JobExceptionsHeaders.getInstance(),
executionGraphCache,
executor);
JobVertexAccumulatorsHandler jobVertexAccumulatorsHandler =
new JobVertexAccumulatorsHandler(
leaderRetriever,
timeout,
responseHeaders,
JobVertexAccumulatorsHeaders.getInstance(),
executionGraphCache,
executor);
SubtasksAllAccumulatorsHandler subtasksAllAccumulatorsHandler =
new SubtasksAllAccumulatorsHandler(
leaderRetriever,
timeout,
responseHeaders,
SubtasksAllAccumulatorsHeaders.getInstance(),
executionGraphCache,
executor);
TaskManagersHandler taskManagersHandler =
new TaskManagersHandler(
leaderRetriever,
timeout,
responseHeaders,
TaskManagersHeaders.getInstance(),
resourceManagerRetriever);
TaskManagerDetailsHandler taskManagerDetailsHandler =
new TaskManagerDetailsHandler(
leaderRetriever,
timeout,
responseHeaders,
TaskManagerDetailsHeaders.getInstance(),
resourceManagerRetriever,
metricFetcher);
final JobDetailsHandler jobDetailsHandler =
new JobDetailsHandler(
leaderRetriever,
timeout,
responseHeaders,
JobDetailsHeaders.getInstance(),
executionGraphCache,
executor,
metricFetcher);
JobAccumulatorsHandler jobAccumulatorsHandler =
new JobAccumulatorsHandler(
leaderRetriever,
timeout,
responseHeaders,
JobAccumulatorsHeaders.getInstance(),
executionGraphCache,
executor);
SubtasksTimesHandler subtasksTimesHandler =
new SubtasksTimesHandler(
leaderRetriever,
timeout,
responseHeaders,
SubtasksTimesHeaders.getInstance(),
executionGraphCache,
executor);
final JobVertexMetricsHandler jobVertexMetricsHandler =
new JobVertexMetricsHandler(
leaderRetriever, timeout, responseHeaders, metricFetcher);
final JobVertexWatermarksHandler jobVertexWatermarksHandler =
new JobVertexWatermarksHandler(
leaderRetriever,
timeout,
responseHeaders,
metricFetcher,
executionGraphCache,
executor);
final JobMetricsHandler jobMetricsHandler =
new JobMetricsHandler(leaderRetriever, timeout, responseHeaders, metricFetcher);
final SubtaskMetricsHandler subtaskMetricsHandler =
new SubtaskMetricsHandler(leaderRetriever, timeout, responseHeaders, metricFetcher);
final TaskManagerMetricsHandler taskManagerMetricsHandler =
new TaskManagerMetricsHandler(
leaderRetriever, timeout, responseHeaders, metricFetcher);
final JobManagerOperatorMetricsHandler jobManagerOperatorMetricsHandler =
new JobManagerOperatorMetricsHandler(
leaderRetriever, timeout, responseHeaders, metricFetcher);
final JobManagerMetricsHandler jobManagerMetricsHandler =
new JobManagerMetricsHandler(
leaderRetriever, timeout, responseHeaders, metricFetcher);
final AggregatingTaskManagersMetricsHandler aggregatingTaskManagersMetricsHandler =
new AggregatingTaskManagersMetricsHandler(
leaderRetriever, timeout, responseHeaders, executor, metricFetcher);
final AggregatingJobsMetricsHandler aggregatingJobsMetricsHandler =
new AggregatingJobsMetricsHandler(
leaderRetriever, timeout, responseHeaders, executor, metricFetcher);
final AggregatingSubtasksMetricsHandler aggregatingSubtasksMetricsHandler =
new AggregatingSubtasksMetricsHandler(
leaderRetriever, timeout, responseHeaders, executor, metricFetcher);
final JobVertexTaskManagersHandler jobVertexTaskManagersHandler =
new JobVertexTaskManagersHandler(
leaderRetriever,
timeout,
responseHeaders,
JobVertexTaskManagersHeaders.getInstance(),
executionGraphCache,
executor,
metricFetcher);
final JobExecutionResultHandler jobExecutionResultHandler =
new JobExecutionResultHandler(leaderRetriever, timeout, responseHeaders);
final String defaultSavepointDir =
clusterConfiguration.get(CheckpointingOptions.SAVEPOINT_DIRECTORY);
final SavepointHandlers savepointHandlers = new SavepointHandlers(defaultSavepointDir);
final SavepointHandlers.StopWithSavepointHandler stopWithSavepointHandler =
savepointHandlers
.new StopWithSavepointHandler(leaderRetriever, timeout, responseHeaders);
final SavepointHandlers.SavepointTriggerHandler savepointTriggerHandler =
savepointHandlers
.new SavepointTriggerHandler(leaderRetriever, timeout, responseHeaders);
final SavepointHandlers.SavepointStatusHandler savepointStatusHandler =
new SavepointHandlers.SavepointStatusHandler(
leaderRetriever, timeout, responseHeaders);
final CheckpointHandlers.CheckpointTriggerHandler checkpointTriggerHandler =
new CheckpointHandlers.CheckpointTriggerHandler(
leaderRetriever, timeout, responseHeaders);
final CheckpointHandlers.CheckpointStatusHandler checkpointStatusHandler =
new CheckpointHandlers.CheckpointStatusHandler(
leaderRetriever, timeout, responseHeaders);
final SubtaskExecutionAttemptDetailsHandler subtaskExecutionAttemptDetailsHandler =
new SubtaskExecutionAttemptDetailsHandler(
leaderRetriever,
timeout,
responseHeaders,
SubtaskExecutionAttemptDetailsHeaders.getInstance(),
executionGraphCache,
executor,
metricFetcher);
final SubtaskExecutionAttemptAccumulatorsHandler
subtaskExecutionAttemptAccumulatorsHandler =
new SubtaskExecutionAttemptAccumulatorsHandler(
leaderRetriever,
timeout,
responseHeaders,
SubtaskExecutionAttemptAccumulatorsHeaders.getInstance(),
executionGraphCache,
executor);
final SubtaskCurrentAttemptDetailsHandler subtaskCurrentAttemptDetailsHandler =
new SubtaskCurrentAttemptDetailsHandler(
leaderRetriever,
timeout,
responseHeaders,
SubtaskCurrentAttemptDetailsHeaders.getInstance(),
executionGraphCache,
executor,
metricFetcher);
final RescalingHandlers rescalingHandlers =
new RescalingHandlers(asyncOperationStoreDuration);
final RescalingHandlers.RescalingTriggerHandler rescalingTriggerHandler =
rescalingHandlers
.new RescalingTriggerHandler(leaderRetriever, timeout, responseHeaders);
final RescalingHandlers.RescalingStatusHandler rescalingStatusHandler =
rescalingHandlers
.new RescalingStatusHandler(leaderRetriever, timeout, responseHeaders);
final JobVertexBackPressureHandler jobVertexBackPressureHandler =
new JobVertexBackPressureHandler(
leaderRetriever,
timeout,
responseHeaders,
JobVertexBackPressureHeaders.getInstance(),
metricFetcher);
final JobCancellationHandler jobCancelTerminationHandler =
new JobCancellationHandler(
leaderRetriever,
timeout,
responseHeaders,
JobCancellationHeaders.getInstance(),
TerminationModeQueryParameter.TerminationMode.CANCEL);
// use a separate handler for the yarn-cancel to ensure close() is only called once
final JobCancellationHandler yarnJobCancelTerminationHandler =
new JobCancellationHandler(
leaderRetriever,
timeout,
responseHeaders,
JobCancellationHeaders.getInstance(),
TerminationModeQueryParameter.TerminationMode.CANCEL);
// this is kept just for legacy reasons. STOP has been replaced by STOP-WITH-SAVEPOINT.
final JobCancellationHandler jobStopTerminationHandler =
new JobCancellationHandler(
leaderRetriever,
timeout,
responseHeaders,
JobCancellationHeaders.getInstance(),
TerminationModeQueryParameter.TerminationMode.STOP);
final JobVertexDetailsHandler jobVertexDetailsHandler =
new JobVertexDetailsHandler(
leaderRetriever,
timeout,
responseHeaders,
JobVertexDetailsHeaders.getInstance(),
executionGraphCache,
executor,
metricFetcher);
final GeneratedLogUrlHandler jobManagerLogUrlHandler =
new GeneratedLogUrlHandler(
localAddressFuture.thenApply(url -> url + "/#/job-manager/logs"));
final GeneratedLogUrlHandler taskManagerLogUrlHandler =
new GeneratedLogUrlHandler(
localAddressFuture.thenApply(url -> url + "/#/task-manager/<tmid>/logs"));
final SavepointDisposalHandlers savepointDisposalHandlers =
new SavepointDisposalHandlers(asyncOperationStoreDuration);
final SavepointDisposalHandlers.SavepointDisposalTriggerHandler
savepointDisposalTriggerHandler =
savepointDisposalHandlers
.new SavepointDisposalTriggerHandler(
leaderRetriever, timeout, responseHeaders);
final SavepointDisposalHandlers.SavepointDisposalStatusHandler
savepointDisposalStatusHandler =
savepointDisposalHandlers
.new SavepointDisposalStatusHandler(
leaderRetriever, timeout, responseHeaders);
final ClusterDataSetListHandler clusterDataSetListHandler =
new ClusterDataSetListHandler(
leaderRetriever, timeout, responseHeaders, resourceManagerRetriever);
final ClusterDataSetDeleteHandlers clusterDataSetDeleteHandlers =
new ClusterDataSetDeleteHandlers(asyncOperationStoreDuration);
final ClusterDataSetDeleteHandlers.ClusterDataSetDeleteTriggerHandler
clusterDataSetDeleteTriggerHandler =
clusterDataSetDeleteHandlers
.new ClusterDataSetDeleteTriggerHandler(
leaderRetriever,
timeout,
responseHeaders,
resourceManagerRetriever);
final ClusterDataSetDeleteHandlers.ClusterDataSetDeleteStatusHandler
clusterDataSetDeleteStatusHandler =
clusterDataSetDeleteHandlers
.new ClusterDataSetDeleteStatusHandler(
leaderRetriever, timeout, responseHeaders);
final ClientCoordinationHandler clientCoordinationHandler =
new ClientCoordinationHandler(
leaderRetriever,
timeout,
responseHeaders,
ClientCoordinationHeaders.getInstance());
final ShutdownHandler shutdownHandler =
new ShutdownHandler(
leaderRetriever, timeout, responseHeaders, ShutdownHeaders.getInstance());
final JobClientHeartbeatHandler jobClientHeartbeatHandler =
new JobClientHeartbeatHandler(
leaderRetriever,
timeout,
responseHeaders,
JobClientHeartbeatHeaders.getInstance());
final File webUiDir = restConfiguration.getWebUiDir();
Optional<StaticFileServerHandler<T>> optWebContent;
try {
optWebContent = WebMonitorUtils.tryLoadWebContent(leaderRetriever, timeout, webUiDir);
} catch (IOException e) {
log.warn("Could not load web content handler.", e);
optWebContent = Optional.empty();
}
handlers.add(Tuple2.of(clusterOverviewHandler.getMessageHeaders(), clusterOverviewHandler));
handlers.add(
Tuple2.of(
clusterConfigurationHandler.getMessageHeaders(),
clusterConfigurationHandler));
handlers.add(
Tuple2.of(
jobManagerEnvironmentHandler.getMessageHeaders(),
jobManagerEnvironmentHandler));
handlers.add(
Tuple2.of(
jobManagerJobEnvironmentHandler.getMessageHeaders(),
jobManagerJobEnvironmentHandler));
handlers.add(Tuple2.of(dashboardConfigHandler.getMessageHeaders(), dashboardConfigHandler));
handlers.add(Tuple2.of(jobIdsHandler.getMessageHeaders(), jobIdsHandler));
handlers.add(Tuple2.of(jobStatusHandler.getMessageHeaders(), jobStatusHandler));
handlers.add(Tuple2.of(jobsOverviewHandler.getMessageHeaders(), jobsOverviewHandler));
handlers.add(Tuple2.of(jobConfigHandler.getMessageHeaders(), jobConfigHandler));
handlers.add(
Tuple2.of(checkpointConfigHandler.getMessageHeaders(), checkpointConfigHandler));
handlers.add(
Tuple2.of(
checkpointStatisticsHandler.getMessageHeaders(),
checkpointStatisticsHandler));
handlers.add(
Tuple2.of(
checkpointStatisticDetailsHandler.getMessageHeaders(),
checkpointStatisticDetailsHandler));
handlers.add(Tuple2.of(jobPlanHandler.getMessageHeaders(), jobPlanHandler));
handlers.add(
Tuple2.of(
taskCheckpointStatisticDetailsHandler.getMessageHeaders(),
taskCheckpointStatisticDetailsHandler));
handlers.add(Tuple2.of(jobExceptionsHandler.getMessageHeaders(), jobExceptionsHandler));
handlers.add(
Tuple2.of(
jobVertexAccumulatorsHandler.getMessageHeaders(),
jobVertexAccumulatorsHandler));
handlers.add(
Tuple2.of(
subtasksAllAccumulatorsHandler.getMessageHeaders(),
subtasksAllAccumulatorsHandler));
handlers.add(Tuple2.of(jobDetailsHandler.getMessageHeaders(), jobDetailsHandler));
handlers.add(Tuple2.of(jobAccumulatorsHandler.getMessageHeaders(), jobAccumulatorsHandler));
handlers.add(Tuple2.of(taskManagersHandler.getMessageHeaders(), taskManagersHandler));
handlers.add(
Tuple2.of(
taskManagerDetailsHandler.getMessageHeaders(), taskManagerDetailsHandler));
handlers.add(Tuple2.of(subtasksTimesHandler.getMessageHeaders(), subtasksTimesHandler));
handlers.add(
Tuple2.of(jobVertexMetricsHandler.getMessageHeaders(), jobVertexMetricsHandler));
handlers.add(
Tuple2.of(
jobVertexWatermarksHandler.getMessageHeaders(),
jobVertexWatermarksHandler));
handlers.add(Tuple2.of(jobMetricsHandler.getMessageHeaders(), jobMetricsHandler));
handlers.add(Tuple2.of(subtaskMetricsHandler.getMessageHeaders(), subtaskMetricsHandler));
handlers.add(
Tuple2.of(
taskManagerMetricsHandler.getMessageHeaders(), taskManagerMetricsHandler));
handlers.add(
Tuple2.of(jobManagerMetricsHandler.getMessageHeaders(), jobManagerMetricsHandler));
handlers.add(
Tuple2.of(
jobManagerOperatorMetricsHandler.getMessageHeaders(),
jobManagerOperatorMetricsHandler));
handlers.add(
Tuple2.of(
aggregatingTaskManagersMetricsHandler.getMessageHeaders(),
aggregatingTaskManagersMetricsHandler));
handlers.add(
Tuple2.of(
aggregatingJobsMetricsHandler.getMessageHeaders(),
aggregatingJobsMetricsHandler));
handlers.add(
Tuple2.of(
aggregatingSubtasksMetricsHandler.getMessageHeaders(),
aggregatingSubtasksMetricsHandler));
handlers.add(
Tuple2.of(
jobExecutionResultHandler.getMessageHeaders(), jobExecutionResultHandler));
handlers.add(
Tuple2.of(savepointTriggerHandler.getMessageHeaders(), savepointTriggerHandler));
handlers.add(
Tuple2.of(stopWithSavepointHandler.getMessageHeaders(), stopWithSavepointHandler));
handlers.add(Tuple2.of(savepointStatusHandler.getMessageHeaders(), savepointStatusHandler));
handlers.add(
Tuple2.of(checkpointTriggerHandler.getMessageHeaders(), checkpointTriggerHandler));
handlers.add(
Tuple2.of(checkpointStatusHandler.getMessageHeaders(), checkpointStatusHandler));
handlers.add(
Tuple2.of(
subtaskExecutionAttemptDetailsHandler.getMessageHeaders(),
subtaskExecutionAttemptDetailsHandler));
handlers.add(
Tuple2.of(
subtaskExecutionAttemptAccumulatorsHandler.getMessageHeaders(),
subtaskExecutionAttemptAccumulatorsHandler));
handlers.add(
Tuple2.of(
subtaskCurrentAttemptDetailsHandler.getMessageHeaders(),
subtaskCurrentAttemptDetailsHandler));
handlers.add(
Tuple2.of(
jobVertexTaskManagersHandler.getMessageHeaders(),
jobVertexTaskManagersHandler));
handlers.add(
Tuple2.of(
jobVertexBackPressureHandler.getMessageHeaders(),
jobVertexBackPressureHandler));
handlers.add(
Tuple2.of(
jobManagerJobConfigurationHandler.getMessageHeaders(),
jobManagerJobConfigurationHandler));
handlers.add(Tuple2.of(JobManagerLogUrlHeaders.getInstance(), jobManagerLogUrlHandler));
handlers.add(Tuple2.of(TaskManagerLogUrlHeaders.getInstance(), taskManagerLogUrlHandler));
final AbstractRestHandler<?, ?, ?, ?> jobVertexFlameGraphHandler;
if (clusterConfiguration.get(RestOptions.ENABLE_FLAMEGRAPH)) {
jobVertexFlameGraphHandler =
new JobVertexFlameGraphHandler(
leaderRetriever,
timeout,
responseHeaders,
executionGraphCache,
executor,
initializeThreadInfoTracker(executor));
} else {
jobVertexFlameGraphHandler =
JobVertexFlameGraphHandler.disabledHandler(
leaderRetriever, timeout, responseHeaders);
}
handlers.add(
Tuple2.of(
jobVertexFlameGraphHandler.getMessageHeaders(),
jobVertexFlameGraphHandler));
handlers.add(
Tuple2.of(
jobCancelTerminationHandler.getMessageHeaders(),
jobCancelTerminationHandler));
handlers.add(
Tuple2.of(jobVertexDetailsHandler.getMessageHeaders(), jobVertexDetailsHandler));
handlers.add(
Tuple2.of(rescalingTriggerHandler.getMessageHeaders(), rescalingTriggerHandler));
handlers.add(Tuple2.of(rescalingStatusHandler.getMessageHeaders(), rescalingStatusHandler));
handlers.add(
Tuple2.of(
savepointDisposalTriggerHandler.getMessageHeaders(),
savepointDisposalTriggerHandler));
handlers.add(
Tuple2.of(
savepointDisposalStatusHandler.getMessageHeaders(),
savepointDisposalStatusHandler));
handlers.add(
Tuple2.of(
clusterDataSetListHandler.getMessageHeaders(), clusterDataSetListHandler));
handlers.add(
Tuple2.of(
clusterDataSetDeleteTriggerHandler.getMessageHeaders(),
clusterDataSetDeleteTriggerHandler));
handlers.add(
Tuple2.of(
clusterDataSetDeleteStatusHandler.getMessageHeaders(),
clusterDataSetDeleteStatusHandler));
handlers.add(
Tuple2.of(
clientCoordinationHandler.getMessageHeaders(), clientCoordinationHandler));
// TODO: Remove once the Yarn proxy can forward all REST verbs
handlers.add(
Tuple2.of(
YarnCancelJobTerminationHeaders.getInstance(),
yarnJobCancelTerminationHandler));
handlers.add(
Tuple2.of(YarnStopJobTerminationHeaders.getInstance(), jobStopTerminationHandler));
handlers.add(Tuple2.of(shutdownHandler.getMessageHeaders(), shutdownHandler));
handlers.add(
Tuple2.of(
jobClientHeartbeatHandler.getMessageHeaders(), jobClientHeartbeatHandler));
optWebContent.ifPresent(
webContent -> {
handlers.add(
Tuple2.of(WebContentHandlerSpecification.getInstance(), webContent));
hasWebUI = true;
});
// load the log and stdout file handler for the main cluster component
final WebMonitorUtils.LogFileLocation logFileLocation =
WebMonitorUtils.LogFileLocation.find(clusterConfiguration);
final JobManagerLogFileHandler jobManagerLogFileHandler =
new JobManagerLogFileHandler(
leaderRetriever,
timeout,
responseHeaders,
JobManagerLogFileHeader.getInstance(),
logFileLocation.logFile);
final JobManagerLogFileHandler jobManagerStdoutFileHandler =
new JobManagerLogFileHandler(
leaderRetriever,
timeout,
responseHeaders,
JobManagerStdoutFileHeader.getInstance(),
logFileLocation.stdOutFile);
final JobManagerCustomLogHandler jobManagerCustomLogHandler =
new JobManagerCustomLogHandler(
leaderRetriever,
timeout,
responseHeaders,
JobManagerCustomLogHeaders.getInstance(),
logFileLocation.logDir);
final JobManagerLogListHandler jobManagerLogListHandler =
new JobManagerLogListHandler(
leaderRetriever,
timeout,
responseHeaders,
JobManagerLogListHeaders.getInstance(),
logFileLocation.logDir);
final JobManagerThreadDumpHandler jobManagerThreadDumpHandler =
new JobManagerThreadDumpHandler(
leaderRetriever,
timeout,
responseHeaders,
JobManagerThreadDumpHeaders.getInstance());
handlers.add(Tuple2.of(JobManagerLogFileHeader.getInstance(), jobManagerLogFileHandler));
handlers.add(
Tuple2.of(JobManagerStdoutFileHeader.getInstance(), jobManagerStdoutFileHandler));
handlers.add(
Tuple2.of(JobManagerCustomLogHeaders.getInstance(), jobManagerCustomLogHandler));
handlers.add(Tuple2.of(JobManagerLogListHeaders.getInstance(), jobManagerLogListHandler));
handlers.add(
Tuple2.of(JobManagerThreadDumpHeaders.getInstance(), jobManagerThreadDumpHandler));
final Duration cacheEntryDuration =
Duration.ofMillis(restConfiguration.getRefreshInterval());
// load profiler relative handlers
if (clusterConfiguration.get(RestOptions.ENABLE_PROFILER)) {
final JobManagerProfilingHandler jobManagerProfilingHandler =
new JobManagerProfilingHandler(
leaderRetriever,
timeout,
responseHeaders,
JobManagerProfilingHeaders.getInstance(),
clusterConfiguration);
final JobManagerProfilingListHandler jobManagerProfilingListHandler =
new JobManagerProfilingListHandler(
leaderRetriever,
timeout,
responseHeaders,
JobManagerProfilingListHeaders.getInstance(),
clusterConfiguration);
final JobManagerProfilingFileHandler jobManagerProfilingFileHandler =
new JobManagerProfilingFileHandler(
leaderRetriever,
timeout,
responseHeaders,
JobManagerProfilingFileHeaders.getInstance(),
clusterConfiguration);
final TaskManagerProfilingHandler taskManagerProfilingHandler =
new TaskManagerProfilingHandler(
leaderRetriever,
timeout,
responseHeaders,
TaskManagerProfilingHeaders.getInstance(),
resourceManagerRetriever,
clusterConfiguration);
final TaskManagerProfilingListHandler taskManagerProfilingListHandler =
new TaskManagerProfilingListHandler(
leaderRetriever,
timeout,
responseHeaders,
TaskManagerProfilingListHeaders.getInstance(),
resourceManagerRetriever);
final TaskManagerProfilingFileHandler taskManagerProfilingFileHandler =
new TaskManagerProfilingFileHandler(
leaderRetriever,
timeout,
responseHeaders,
TaskManagerProfilingFileHeaders.getInstance(),
resourceManagerRetriever,
transientBlobService,
cacheEntryDuration);
handlers.add(
Tuple2.of(
JobManagerProfilingHeaders.getInstance(), jobManagerProfilingHandler));
handlers.add(
Tuple2.of(
JobManagerProfilingListHeaders.getInstance(),
jobManagerProfilingListHandler));
handlers.add(
Tuple2.of(
JobManagerProfilingFileHeaders.getInstance(),
jobManagerProfilingFileHandler));
handlers.add(
Tuple2.of(
TaskManagerProfilingHeaders.getInstance(),
taskManagerProfilingHandler));
handlers.add(
Tuple2.of(
TaskManagerProfilingListHeaders.getInstance(),
taskManagerProfilingListHandler));
handlers.add(
Tuple2.of(
TaskManagerProfilingFileHeaders.getInstance(),
taskManagerProfilingFileHandler));
}
// TaskManager log and stdout file handler
final TaskManagerLogFileHandler taskManagerLogFileHandler =
new TaskManagerLogFileHandler(
leaderRetriever,
timeout,
responseHeaders,
TaskManagerLogFileHeaders.getInstance(),
resourceManagerRetriever,
transientBlobService,
cacheEntryDuration);
final TaskManagerStdoutFileHandler taskManagerStdoutFileHandler =
new TaskManagerStdoutFileHandler(
leaderRetriever,
timeout,
responseHeaders,
TaskManagerStdoutFileHeaders.getInstance(),
resourceManagerRetriever,
transientBlobService,
cacheEntryDuration);
final TaskManagerCustomLogHandler taskManagerCustomLogHandler =
new TaskManagerCustomLogHandler(
leaderRetriever,
timeout,
responseHeaders,
TaskManagerCustomLogHeaders.getInstance(),
resourceManagerRetriever,
transientBlobService,
cacheEntryDuration);
final TaskManagerLogListHandler taskManagerLogListHandler =
new TaskManagerLogListHandler(
leaderRetriever,
timeout,
responseHeaders,
TaskManagerLogsHeaders.getInstance(),
resourceManagerRetriever);
final TaskManagerThreadDumpHandler taskManagerThreadDumpFileHandler =
new TaskManagerThreadDumpHandler(
leaderRetriever,
timeout,
responseHeaders,
TaskManagerThreadDumpHeaders.getInstance(),
resourceManagerRetriever);
handlers.add(Tuple2.of(TaskManagerLogFileHeaders.getInstance(), taskManagerLogFileHandler));
handlers.add(
Tuple2.of(
TaskManagerStdoutFileHeaders.getInstance(), taskManagerStdoutFileHandler));
handlers.add(
Tuple2.of(TaskManagerCustomLogHeaders.getInstance(), taskManagerCustomLogHandler));
handlers.add(Tuple2.of(TaskManagerLogsHeaders.getInstance(), taskManagerLogListHandler));
handlers.add(
Tuple2.of(
TaskManagerThreadDumpHeaders.getInstance(),
taskManagerThreadDumpFileHandler));
final JobResourceRequirementsHandler jobResourceRequirementsHandler =
new JobResourceRequirementsHandler(leaderRetriever, timeout, responseHeaders);
final JobResourceRequirementsUpdateHandler jobResourceRequirementsUpdateHandler =
new JobResourceRequirementsUpdateHandler(leaderRetriever, timeout, responseHeaders);
handlers.add(
Tuple2.of(
jobResourceRequirementsHandler.getMessageHeaders(),
jobResourceRequirementsHandler));
handlers.add(
Tuple2.of(
jobResourceRequirementsUpdateHandler.getMessageHeaders(),
jobResourceRequirementsUpdateHandler));
handlers.stream()
.map(tuple -> tuple.f1)
.filter(handler -> handler instanceof JsonArchivist)
.forEachOrdered(handler -> archivingHandlers.add((JsonArchivist) handler));
return handlers;
}
protected Collection<Tuple2<RestHandlerSpecification, ChannelInboundHandler>>
initializeWebSubmissionHandlers(final CompletableFuture<String> localAddressFuture) {
return Collections.emptyList();
}
@Override
public void startInternal() throws Exception {
leaderElection.startLeaderElection(this);
startExecutionGraphCacheCleanupTask();
if (hasWebUI) {
log.info("Web frontend listening at {}.", getRestBaseUrl());
}
}
private void startExecutionGraphCacheCleanupTask() {
final long cleanupInterval = 2 * restConfiguration.getRefreshInterval();
executionGraphCleanupTask =
executor.scheduleWithFixedDelay(
executionGraphCache::cleanup,
cleanupInterval,
cleanupInterval,
TimeUnit.MILLISECONDS);
}
@Override
protected CompletableFuture<Void> shutDownInternal() {
if (executionGraphCleanupTask != null) {
executionGraphCleanupTask.cancel(false);
}
executionGraphCache.close();
final CompletableFuture<Void> shutdownFuture =
FutureUtils.runAfterwards(
super.shutDownInternal(),
() -> ExecutorUtils.gracefulShutdown(10, TimeUnit.SECONDS, executor));
final File webUiDir = restConfiguration.getWebUiDir();
return FutureUtils.runAfterwardsAsync(
shutdownFuture,
() -> {
Exception exception = null;
try {
log.info("Removing cache directory {}", webUiDir);
FileUtils.deleteDirectory(webUiDir);
} catch (Exception e) {
exception = e;
}
try {
if (leaderElection != null) {
leaderElection.close();
}
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
if (exception != null) {
throw exception;
}
});
}
// -------------------------------------------------------------------------
// LeaderContender
// -------------------------------------------------------------------------
@Override
public void grantLeadership(final UUID leaderSessionID) {
log.info(
"{} was granted leadership with leaderSessionID={}",
getRestBaseUrl(),
leaderSessionID);
leaderElection.confirmLeadershipAsync(leaderSessionID, getRestBaseUrl());
}
@Override
public void revokeLeadership() {
log.info("{} lost leadership", getRestBaseUrl());
}
@Override
public void handleError(final Exception exception) {
fatalErrorHandler.onFatalError(exception);
}
@Override
public Collection<ArchivedJson> archiveJsonWithPath(ExecutionGraphInfo executionGraphInfo)
throws IOException {
Collection<ArchivedJson> archivedJson = new ArrayList<>(archivingHandlers.size());
for (JsonArchivist archivist : archivingHandlers) {
Collection<ArchivedJson> subArchive = archivist.archiveJsonWithPath(executionGraphInfo);
archivedJson.addAll(subArchive);
}
return archivedJson;
}
public static ScheduledExecutorService createExecutorService(
int numThreads, int threadPriority, String componentName) {
if (threadPriority < Thread.MIN_PRIORITY || threadPriority > Thread.MAX_PRIORITY) {
throw new IllegalArgumentException(
String.format(
"The thread priority must be within (%s, %s) but it was %s.",
Thread.MIN_PRIORITY, Thread.MAX_PRIORITY, threadPriority));
}
return Executors.newScheduledThreadPool(
numThreads,
new ExecutorThreadFactory.Builder()
.setThreadPriority(threadPriority)
.setPoolName("Flink-" + componentName)
.build());
}
}
| WebMonitorEndpoint |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java | {
"start": 44823,
"end": 45790
} | class ____ implements FileSystemAccess.FileSystemExecutor<Void> {
private Path path;
private long mTime;
private long aTime;
/**
* Creates a set-times executor.
*
* @param path path to set the times.
* @param mTime modified time to set.
* @param aTime access time to set.
*/
public FSSetTimes(String path, long mTime, long aTime) {
this.path = new Path(path);
this.mTime = mTime;
this.aTime = aTime;
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return void.
*
* @throws IOException thrown if an IO error occurred.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
fs.setTimes(path, mTime, aTime);
return null;
}
}
/**
* Executor that performs a setxattr FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static | FSSetTimes |
java | apache__kafka | server-common/src/main/java/org/apache/kafka/server/util/json/DecodeJson.java | {
"start": 1114,
"end": 1588
} | interface ____<T> {
/**
* Decode the JSON node provided into an instance of `T`.
*
* @throws JsonMappingException if `node` cannot be decoded into `T`.
*/
T decode(JsonNode node) throws JsonMappingException;
static JsonMappingException throwJsonMappingException(String expectedType, JsonNode node) {
return new JsonMappingException(null, String.format("Expected `%s` value, received %s", expectedType, node));
}
final | DecodeJson |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/MlDenseEmbeddingResultsTests.java | {
"start": 640,
"end": 2323
} | class ____ extends InferenceResultsTestCase<MlDenseEmbeddingResults> {
public static MlDenseEmbeddingResults createRandomResults() {
int columns = randomIntBetween(1, 10);
double[] arr = new double[columns];
for (int i = 0; i < columns; i++) {
arr[i] = randomDouble();
}
return new MlDenseEmbeddingResults(DEFAULT_RESULTS_FIELD, arr, randomBoolean());
}
@Override
protected Writeable.Reader<MlDenseEmbeddingResults> instanceReader() {
return MlDenseEmbeddingResults::new;
}
@Override
protected MlDenseEmbeddingResults createTestInstance() {
return createRandomResults();
}
@Override
protected MlDenseEmbeddingResults mutateInstance(MlDenseEmbeddingResults instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
public void testAsMap() {
MlDenseEmbeddingResults testInstance = createTestInstance();
Map<String, Object> asMap = testInstance.asMap();
int size = testInstance.isTruncated ? 2 : 1;
assertThat(asMap.keySet(), hasSize(size));
assertArrayEquals(testInstance.getInference(), (double[]) asMap.get(DEFAULT_RESULTS_FIELD), 1e-10);
if (testInstance.isTruncated) {
assertThat(asMap.get("is_truncated"), is(true));
}
}
@Override
void assertFieldValues(MlDenseEmbeddingResults createdInstance, IngestDocument document, String parentField, String resultsField) {
assertArrayEquals(document.getFieldValue(parentField + resultsField, double[].class), createdInstance.getInference(), 1e-10);
}
}
| MlDenseEmbeddingResultsTests |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/inheritance/complex/AdditionalFooSource.java | {
"start": 207,
"end": 269
} | interface ____ {
int getAdditionalFoo();
}
| AdditionalFooSource |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/utils/SimpleAckingTaskManagerGateway.java | {
"start": 6873,
"end": 7139
} | interface ____ {
void accept(
ExecutionAttemptID executionAttemptID,
JobID jobId,
long checkpointId,
long timestamp,
CheckpointOptions checkpointOptions);
}
}
| CheckpointConsumer |
java | spring-projects__spring-framework | spring-expression/src/test/java/org/springframework/expression/spel/support/ReflectiveIndexAccessorTests.java | {
"start": 1455,
"end": 2910
} | class ____ {
@Test
void nonexistentReadMethod() {
Class<?> targetType = getClass();
assertThatIllegalArgumentException()
.isThrownBy(() -> new ReflectiveIndexAccessor(targetType, int.class, "bogus"))
.withMessage("Failed to find public read-method 'bogus(int)' in class '%s'.", targetType.getCanonicalName());
}
@Test
void nonPublicReadMethod() {
Class<?> targetType = PrivateReadMethod.class;
assertThatIllegalArgumentException()
.isThrownBy(() -> new ReflectiveIndexAccessor(targetType, int.class, "get"))
.withMessage("Failed to find public read-method 'get(int)' in class '%s'.", targetType.getCanonicalName());
}
@Test
void nonPublicWriteMethod() {
Class<?> targetType = PrivateWriteMethod.class;
assertThatIllegalArgumentException()
.isThrownBy(() -> new ReflectiveIndexAccessor(targetType, int.class, "get", "set"))
.withMessage("Failed to find public write-method 'set(int, java.lang.Object)' in class '%s'.",
targetType.getCanonicalName());
}
@Test
void nonPublicDeclaringClass() {
Class<?> targetType = NonPublicTargetType.class;
Method readMethod = ReflectionUtils.findMethod(targetType, "get", int.class);
ReflectiveIndexAccessor accessor = new ReflectiveIndexAccessor(targetType, int.class, "get");
assertThatIllegalStateException()
.isThrownBy(() -> accessor.generateCode(mock(), mock(), mock()))
.withMessage("Failed to find public declaring | ReflectiveIndexAccessorTests |
java | quarkusio__quarkus | extensions/redis-client/runtime/src/main/java/io/quarkus/redis/datasource/stream/XClaimArgs.java | {
"start": 288,
"end": 3990
} | class ____ implements RedisCommandExtraArguments {
private Duration idle;
private long time = -1;
private int retryCount = -1;
private boolean force;
private boolean justId;
private String lastId;
/**
* Set the idle time (last time it was delivered) of the message. If {@code IDLE} is not specified, an {@code IDLE}
* of 0 is assumed, that is, the time count is reset because the message has now a new owner trying to process it.
*
* @param idle the idle duration, must not be {@code null}
* @return the current {@code XClaimArgs}
*/
public XClaimArgs idle(Duration idle) {
this.idle = idle;
return this;
}
/**
* This is the same as {@code IDLE} but instead of a relative amount of milliseconds, it sets the idle time to a
* specific Unix time (in milliseconds). This is useful in order to rewrite the {@code AOF} file
* generating {@code XCLAIM} commands.
*
* @param time the timestamp
* @return the current {@code XClaimArgs}
*/
public XClaimArgs time(long time) {
this.time = time;
return this;
}
/**
* Set the retry counter to the specified value. This counter is incremented every time a message is delivered again.
* Normally {@code XCLAIM} does not alter this counter, which is just served to clients when the {@code XPENDING}
* command is called: this way clients can detect anomalies, like messages that are never processed for some reason
* after a big number of delivery attempts.
*
* @param retryCount the retry count, must be positive
* @return the current {@code XClaimArgs}
*/
public XClaimArgs retryCount(int retryCount) {
this.retryCount = retryCount;
return this;
}
/**
* Creates the pending message entry in the PEL even if certain specified IDs are not already in the PEL assigned
* to a different client.
* However, the message must exist in the stream, otherwise the IDs of non-existing messages are ignored.
*
* @return the current {@code XClaimArgs}
*/
public XClaimArgs force() {
this.force = true;
return this;
}
/**
* In the returned structure, only set the IDs of messages successfully claimed, without returning the actual message.
* Using this option means the retry counter is not incremented.
*
* @return the current {@code XClaimArgs}
*/
public XClaimArgs justId() {
this.justId = true;
return this;
}
/**
* Sets the last id of the message to claim.
*
* @param lastId the last id, must not be {@code null}
* @return the current {@code XClaimArgs}
*/
public XClaimArgs lastId(String lastId) {
this.lastId = lastId;
return this;
}
@Override
public List<Object> toArgs() {
List<Object> args = new ArrayList<>();
if (idle != null) {
args.add("IDLE");
args.add(Long.toString(idle.toMillis()));
if (time > 0) {
throw new IllegalStateException("Cannot combine `IDLE` and `TIME`");
}
}
if (time > 0) {
args.add("TIME");
args.add(Long.toString(time));
}
if (retryCount > 0) {
args.add("RETRYCOUNT");
args.add(Integer.toString(retryCount));
}
if (force) {
args.add("FORCE");
}
if (justId) {
args.add("JUSTID");
}
if (lastId != null) {
args.add("LASTID");
args.add(lastId);
}
return args;
}
}
| XClaimArgs |
java | elastic__elasticsearch | x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownPluginsIT.java | {
"start": 4262,
"end": 4873
} | class ____ extends Plugin implements ShutdownAwarePlugin {
@Override
public boolean safeToShutdown(String nodeId, SingleNodeShutdownMetadata.Type shutdownType) {
logger.info("--> checking whether safe to shutdown for node [{}], type [{}] answer: ({})", nodeId, shutdownType, safe.get());
return safe.get();
}
@Override
public void signalShutdown(Collection<String> shutdownNodeIds) {
logger.info("--> shutdown triggered for {}", shutdownNodeIds);
triggeredNodes.set(shutdownNodeIds);
}
}
}
| TestShutdownAwarePlugin |
java | google__guava | guava-testlib/test/com/google/common/testing/ArbitraryInstancesTest.java | {
"start": 5344,
"end": 20335
} | class ____ extends TestCase {
public void testGet_primitives() {
assertThat(ArbitraryInstances.get(void.class)).isNull();
assertThat(ArbitraryInstances.get(Void.class)).isNull();
assertEquals(Boolean.FALSE, ArbitraryInstances.get(boolean.class));
assertEquals(Boolean.FALSE, ArbitraryInstances.get(Boolean.class));
assertEquals(Character.valueOf('\0'), ArbitraryInstances.get(char.class));
assertEquals(Character.valueOf('\0'), ArbitraryInstances.get(Character.class));
assertEquals(Byte.valueOf((byte) 0), ArbitraryInstances.get(byte.class));
assertEquals(Byte.valueOf((byte) 0), ArbitraryInstances.get(Byte.class));
assertEquals(Short.valueOf((short) 0), ArbitraryInstances.get(short.class));
assertEquals(Short.valueOf((short) 0), ArbitraryInstances.get(Short.class));
assertEquals(Integer.valueOf(0), ArbitraryInstances.get(int.class));
assertEquals(Integer.valueOf(0), ArbitraryInstances.get(Integer.class));
assertEquals(Long.valueOf(0), ArbitraryInstances.get(long.class));
assertEquals(Long.valueOf(0), ArbitraryInstances.get(Long.class));
assertEquals(Float.valueOf(0), ArbitraryInstances.get(float.class));
assertEquals(Float.valueOf(0), ArbitraryInstances.get(Float.class));
assertThat(ArbitraryInstances.get(double.class)).isEqualTo(Double.valueOf(0));
assertThat(ArbitraryInstances.get(Double.class)).isEqualTo(Double.valueOf(0));
assertEquals(UnsignedInteger.ZERO, ArbitraryInstances.get(UnsignedInteger.class));
assertEquals(UnsignedLong.ZERO, ArbitraryInstances.get(UnsignedLong.class));
assertEquals(0, ArbitraryInstances.get(BigDecimal.class).intValue());
assertEquals(0, ArbitraryInstances.get(BigInteger.class).intValue());
assertEquals("", ArbitraryInstances.get(String.class));
assertEquals("", ArbitraryInstances.get(CharSequence.class));
assertEquals(SECONDS, ArbitraryInstances.get(TimeUnit.class));
assertThat(ArbitraryInstances.get(Object.class)).isNotNull();
assertEquals(0, ArbitraryInstances.get(Number.class));
assertEquals(UTF_8, ArbitraryInstances.get(Charset.class));
assertEquals(Optional.empty(), ArbitraryInstances.get(Optional.class));
assertEquals(OptionalInt.empty(), ArbitraryInstances.get(OptionalInt.class));
assertEquals(OptionalLong.empty(), ArbitraryInstances.get(OptionalLong.class));
assertEquals(OptionalDouble.empty(), ArbitraryInstances.get(OptionalDouble.class));
assertThat(ArbitraryInstances.get(UUID.class)).isNotNull();
}
public void testGet_collections() {
assertEquals(ImmutableSet.of().iterator(), ArbitraryInstances.get(Iterator.class));
assertFalse(ArbitraryInstances.get(PeekingIterator.class).hasNext());
assertFalse(ArbitraryInstances.get(ListIterator.class).hasNext());
assertEquals(ImmutableSet.of(), ArbitraryInstances.get(Iterable.class));
assertEquals(ImmutableSet.of(), ArbitraryInstances.get(Set.class));
assertEquals(ImmutableSet.of(), ArbitraryInstances.get(ImmutableSet.class));
assertEquals(ImmutableSortedSet.of(), ArbitraryInstances.get(SortedSet.class));
assertEquals(ImmutableSortedSet.of(), ArbitraryInstances.get(ImmutableSortedSet.class));
assertEquals(ImmutableList.of(), ArbitraryInstances.get(Collection.class));
assertEquals(ImmutableList.of(), ArbitraryInstances.get(ImmutableCollection.class));
assertEquals(ImmutableList.of(), ArbitraryInstances.get(List.class));
assertEquals(ImmutableList.of(), ArbitraryInstances.get(ImmutableList.class));
assertEquals(ImmutableMap.of(), ArbitraryInstances.get(Map.class));
assertEquals(ImmutableMap.of(), ArbitraryInstances.get(ImmutableMap.class));
assertEquals(ImmutableSortedMap.of(), ArbitraryInstances.get(SortedMap.class));
assertEquals(ImmutableSortedMap.of(), ArbitraryInstances.get(ImmutableSortedMap.class));
assertEquals(ImmutableMultiset.of(), ArbitraryInstances.get(Multiset.class));
assertEquals(ImmutableMultiset.of(), ArbitraryInstances.get(ImmutableMultiset.class));
assertTrue(ArbitraryInstances.get(SortedMultiset.class).isEmpty());
assertEquals(ImmutableMultimap.of(), ArbitraryInstances.get(Multimap.class));
assertEquals(ImmutableMultimap.of(), ArbitraryInstances.get(ImmutableMultimap.class));
assertTrue(ArbitraryInstances.get(SortedSetMultimap.class).isEmpty());
assertEquals(ImmutableTable.of(), ArbitraryInstances.get(Table.class));
assertEquals(ImmutableTable.of(), ArbitraryInstances.get(ImmutableTable.class));
assertTrue(ArbitraryInstances.get(RowSortedTable.class).isEmpty());
assertEquals(ImmutableBiMap.of(), ArbitraryInstances.get(BiMap.class));
assertEquals(ImmutableBiMap.of(), ArbitraryInstances.get(ImmutableBiMap.class));
assertTrue(ArbitraryInstances.get(ImmutableClassToInstanceMap.class).isEmpty());
assertTrue(ArbitraryInstances.get(ClassToInstanceMap.class).isEmpty());
assertTrue(ArbitraryInstances.get(ListMultimap.class).isEmpty());
assertTrue(ArbitraryInstances.get(ImmutableListMultimap.class).isEmpty());
assertTrue(ArbitraryInstances.get(SetMultimap.class).isEmpty());
assertTrue(ArbitraryInstances.get(ImmutableSetMultimap.class).isEmpty());
assertTrue(ArbitraryInstances.get(MapDifference.class).areEqual());
assertTrue(ArbitraryInstances.get(SortedMapDifference.class).areEqual());
assertEquals(Range.all(), ArbitraryInstances.get(Range.class));
assertTrue(ArbitraryInstances.get(NavigableSet.class).isEmpty());
assertTrue(ArbitraryInstances.get(NavigableMap.class).isEmpty());
assertTrue(ArbitraryInstances.get(LinkedList.class).isEmpty());
assertTrue(ArbitraryInstances.get(Deque.class).isEmpty());
assertTrue(ArbitraryInstances.get(Queue.class).isEmpty());
assertTrue(ArbitraryInstances.get(PriorityQueue.class).isEmpty());
assertTrue(ArbitraryInstances.get(BitSet.class).isEmpty());
assertTrue(ArbitraryInstances.get(TreeSet.class).isEmpty());
assertTrue(ArbitraryInstances.get(TreeMap.class).isEmpty());
assertFreshInstanceReturned(
LinkedList.class,
Deque.class,
Queue.class,
PriorityQueue.class,
BitSet.class,
TreeSet.class,
TreeMap.class);
}
public void testGet_misc() {
assertThat(ArbitraryInstances.get(CharMatcher.class)).isNotNull();
assertThat(ArbitraryInstances.get(Currency.class).getCurrencyCode()).isNotNull();
assertThat(ArbitraryInstances.get(Locale.class)).isNotNull();
assertThat(ArbitraryInstances.get(Joiner.class).join(ImmutableList.of("a"))).isNotNull();
assertThat(ArbitraryInstances.get(Splitter.class).split("a,b")).isNotNull();
assertThat(ArbitraryInstances.get(com.google.common.base.Optional.class)).isAbsent();
ArbitraryInstances.get(Stopwatch.class).start();
assertThat(ArbitraryInstances.get(Ticker.class)).isNotNull();
assertFreshInstanceReturned(Random.class);
assertEquals(
ArbitraryInstances.get(Random.class).nextInt(),
ArbitraryInstances.get(Random.class).nextInt());
}
public void testGet_concurrent() {
assertTrue(ArbitraryInstances.get(BlockingDeque.class).isEmpty());
assertTrue(ArbitraryInstances.get(BlockingQueue.class).isEmpty());
assertTrue(ArbitraryInstances.get(DelayQueue.class).isEmpty());
assertTrue(ArbitraryInstances.get(SynchronousQueue.class).isEmpty());
assertTrue(ArbitraryInstances.get(PriorityBlockingQueue.class).isEmpty());
assertTrue(ArbitraryInstances.get(ConcurrentMap.class).isEmpty());
assertTrue(ArbitraryInstances.get(ConcurrentNavigableMap.class).isEmpty());
ArbitraryInstances.get(Executor.class).execute(ArbitraryInstances.get(Runnable.class));
assertThat(ArbitraryInstances.get(ThreadFactory.class)).isNotNull();
assertFreshInstanceReturned(
BlockingQueue.class,
BlockingDeque.class,
PriorityBlockingQueue.class,
DelayQueue.class,
SynchronousQueue.class,
ConcurrentMap.class,
ConcurrentNavigableMap.class,
AtomicReference.class,
AtomicBoolean.class,
AtomicInteger.class,
AtomicLong.class,
AtomicDouble.class);
}
@SuppressWarnings("unchecked") // functor classes have no type parameters
public void testGet_functors() {
assertEquals(0, ArbitraryInstances.get(Comparator.class).compare("abc", 123));
assertTrue(ArbitraryInstances.get(Predicate.class).apply("abc"));
assertTrue(ArbitraryInstances.get(Equivalence.class).equivalent(1, 1));
assertFalse(ArbitraryInstances.get(Equivalence.class).equivalent(1, 2));
}
@SuppressWarnings("SelfComparison")
public void testGet_comparable() {
@SuppressWarnings("unchecked") // The null value can compare with any Object
Comparable<Object> comparable = ArbitraryInstances.get(Comparable.class);
assertEquals(0, comparable.compareTo(comparable));
assertThat(comparable.compareTo("")).isGreaterThan(0);
assertThrows(NullPointerException.class, () -> comparable.compareTo(null));
}
public void testGet_array() {
assertThat(ArbitraryInstances.get(int[].class)).isEmpty();
assertThat(ArbitraryInstances.get(Object[].class)).isEmpty();
assertThat(ArbitraryInstances.get(String[].class)).isEmpty();
}
public void testGet_enum() {
assertThat(ArbitraryInstances.get(EmptyEnum.class)).isNull();
assertEquals(Direction.UP, ArbitraryInstances.get(Direction.class));
}
public void testGet_interface() {
assertThat(ArbitraryInstances.get(SomeInterface.class)).isNull();
}
public void testGet_runnable() {
ArbitraryInstances.get(Runnable.class).run();
}
public void testGet_class() {
assertSame(SomeAbstractClass.INSTANCE, ArbitraryInstances.get(SomeAbstractClass.class));
assertSame(
WithPrivateConstructor.INSTANCE, ArbitraryInstances.get(WithPrivateConstructor.class));
assertThat(ArbitraryInstances.get(NoDefaultConstructor.class)).isNull();
assertSame(
WithExceptionalConstructor.INSTANCE,
ArbitraryInstances.get(WithExceptionalConstructor.class));
assertThat(ArbitraryInstances.get(NonPublicClass.class)).isNull();
}
public void testGet_mutable() {
assertEquals(0, ArbitraryInstances.get(ArrayList.class).size());
assertEquals(0, ArbitraryInstances.get(HashMap.class).size());
assertThat(ArbitraryInstances.get(Appendable.class).toString()).isEmpty();
assertThat(ArbitraryInstances.get(StringBuilder.class).toString()).isEmpty();
assertThat(ArbitraryInstances.get(StringBuffer.class).toString()).isEmpty();
assertFreshInstanceReturned(
ArrayList.class,
HashMap.class,
Appendable.class,
StringBuilder.class,
StringBuffer.class,
Throwable.class,
Exception.class);
}
public void testGet_io() throws IOException {
assertEquals(-1, ArbitraryInstances.get(InputStream.class).read());
assertEquals(-1, ArbitraryInstances.get(ByteArrayInputStream.class).read());
assertEquals(-1, ArbitraryInstances.get(Readable.class).read(CharBuffer.allocate(1)));
assertEquals(-1, ArbitraryInstances.get(Reader.class).read());
assertEquals(-1, ArbitraryInstances.get(StringReader.class).read());
assertEquals(0, ArbitraryInstances.get(Buffer.class).capacity());
assertEquals(0, ArbitraryInstances.get(CharBuffer.class).capacity());
assertEquals(0, ArbitraryInstances.get(ByteBuffer.class).capacity());
assertEquals(0, ArbitraryInstances.get(ShortBuffer.class).capacity());
assertEquals(0, ArbitraryInstances.get(IntBuffer.class).capacity());
assertEquals(0, ArbitraryInstances.get(LongBuffer.class).capacity());
assertEquals(0, ArbitraryInstances.get(FloatBuffer.class).capacity());
assertEquals(0, ArbitraryInstances.get(DoubleBuffer.class).capacity());
ArbitraryInstances.get(PrintStream.class).println("test");
ArbitraryInstances.get(PrintWriter.class).println("test");
assertThat(ArbitraryInstances.get(File.class)).isNotNull();
assertFreshInstanceReturned(
ByteArrayOutputStream.class, OutputStream.class,
Writer.class, StringWriter.class,
PrintStream.class, PrintWriter.class);
assertEquals(ByteSource.empty(), ArbitraryInstances.get(ByteSource.class));
assertEquals(CharSource.empty(), ArbitraryInstances.get(CharSource.class));
assertThat(ArbitraryInstances.get(ByteSink.class)).isNotNull();
assertThat(ArbitraryInstances.get(CharSink.class)).isNotNull();
}
public void testGet_reflect() {
assertThat(ArbitraryInstances.get(Type.class)).isNotNull();
assertThat(ArbitraryInstances.get(AnnotatedElement.class)).isNotNull();
assertThat(ArbitraryInstances.get(GenericDeclaration.class)).isNotNull();
}
public void testGet_regex() {
assertEquals(Pattern.compile("").pattern(), ArbitraryInstances.get(Pattern.class).pattern());
assertEquals(0, ArbitraryInstances.get(MatchResult.class).groupCount());
}
public void testGet_usePublicConstant() {
assertSame(WithPublicConstant.INSTANCE, ArbitraryInstances.get(WithPublicConstant.class));
}
public void testGet_useFirstPublicConstant() {
assertSame(WithPublicConstants.FIRST, ArbitraryInstances.get(WithPublicConstants.class));
}
public void testGet_nullConstantIgnored() {
assertSame(FirstConstantIsNull.SECOND, ArbitraryInstances.get(FirstConstantIsNull.class));
}
public void testGet_constantWithGenericsNotUsed() {
assertThat(ArbitraryInstances.get(WithGenericConstant.class)).isNull();
}
public void testGet_nullConstant() {
assertThat(ArbitraryInstances.get(WithNullConstant.class)).isNull();
}
public void testGet_constantTypeDoesNotMatch() {
assertThat(ArbitraryInstances.get(ParentClassHasConstant.class)).isNull();
}
public void testGet_nonPublicConstantNotUsed() {
assertThat(ArbitraryInstances.get(NonPublicConstantIgnored.class)).isNull();
}
public void testGet_nonStaticFieldNotUsed() {
assertThat(ArbitraryInstances.get(NonStaticFieldIgnored.class)).isNull();
}
public void testGet_constructorPreferredOverConstants() {
assertThat(ArbitraryInstances.get(WithPublicConstructorAndConstant.class)).isNotNull();
assertTrue(
ArbitraryInstances.get(WithPublicConstructorAndConstant.class)
!= ArbitraryInstances.get(WithPublicConstructorAndConstant.class));
}
public void testGet_nonFinalFieldNotUsed() {
assertThat(ArbitraryInstances.get(NonFinalFieldIgnored.class)).isNull();
}
private static void assertFreshInstanceReturned(Class<?>... mutableClasses) {
for (Class<?> mutableClass : mutableClasses) {
Object instance = ArbitraryInstances.get(mutableClass);
assertWithMessage("Expected to return non-null for: " + mutableClass)
.that(instance)
.isNotNull();
assertNotSame(
"Expected to return fresh instance for: " + mutableClass,
instance,
ArbitraryInstances.get(mutableClass));
}
}
private | ArbitraryInstancesTest |
java | spring-projects__spring-boot | smoke-test/spring-boot-smoke-test-aspectj/src/main/java/smoketest/aspectj/SampleAspectJApplication.java | {
"start": 972,
"end": 1391
} | class ____ implements CommandLineRunner {
// Simple example shows how an application can spy on itself with AOP
@Autowired
private HelloWorldService helloWorldService;
@Override
public void run(String... args) {
System.out.println(this.helloWorldService.getHelloMessage());
}
public static void main(String[] args) {
SpringApplication.run(SampleAspectJApplication.class, args);
}
}
| SampleAspectJApplication |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/IncompatibleModifiersCheckerTest.java | {
"start": 5205,
"end": 5633
} | class ____ {}
""")
.doTest();
}
// Regression test for #313
@Test
public void negativePackageAnnotation() {
compilationHelper
.addSourceLines(
"testdata/Anno.java",
"""
package testdata;
import java.lang.annotation.Target;
import java.lang.annotation.ElementType;
@Target(ElementType.PACKAGE)
public @ | Test |
java | redisson__redisson | redisson/src/main/java/org/redisson/connection/SequentialDnsAddressResolverFactory.java | {
"start": 1436,
"end": 1630
} | class ____ implements AddressResolverGroupFactory {
static final Logger log = LoggerFactory.getLogger(SequentialDnsAddressResolverFactory.class);
static | SequentialDnsAddressResolverFactory |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/project/ProjectDeletedListener.java | {
"start": 730,
"end": 848
} | class ____ make it easy to run a block of code whenever a project is deleted (e.g. to cleanup cache entries)
*/
public | to |
java | apache__kafka | raft/src/main/java/org/apache/kafka/raft/FileQuorumStateStore.java | {
"start": 2297,
"end": 7734
} | class ____ implements QuorumStateStore {
private static final Logger log = LoggerFactory.getLogger(FileQuorumStateStore.class);
private static final String DATA_VERSION = "data_version";
static final short LOWEST_SUPPORTED_VERSION = 0;
static final short HIGHEST_SUPPORTED_VERSION = 1;
public static final String DEFAULT_FILE_NAME = "quorum-state";
private final File stateFile;
public FileQuorumStateStore(final File stateFile) {
this.stateFile = stateFile;
}
private QuorumStateData readStateFromFile(File file) {
try (final BufferedReader reader = Files.newBufferedReader(file.toPath(), StandardCharsets.UTF_8)) {
final String line = reader.readLine();
if (line == null) {
throw new EOFException("File ended prematurely.");
}
final ObjectMapper objectMapper = new ObjectMapper();
JsonNode readNode = objectMapper.readTree(line);
if (!(readNode instanceof ObjectNode dataObject)) {
throw new IOException("Deserialized node " + readNode +
" is not an object node");
}
JsonNode dataVersionNode = dataObject.get(DATA_VERSION);
if (dataVersionNode == null) {
throw new IOException("Deserialized node " + readNode +
" does not have " + DATA_VERSION + " field");
}
final short dataVersion = dataVersionNode.shortValue();
if (dataVersion < LOWEST_SUPPORTED_VERSION || dataVersion > HIGHEST_SUPPORTED_VERSION) {
throw new IllegalStateException(
String.format(
"data_version (%d) is not within the min (%d) and max (%d) supported version",
dataVersion,
LOWEST_SUPPORTED_VERSION,
HIGHEST_SUPPORTED_VERSION
)
);
}
return QuorumStateDataJsonConverter.read(dataObject, dataVersion);
} catch (IOException e) {
throw new UncheckedIOException(
String.format("Error while reading the Quorum status from the file %s", file), e);
}
}
/**
* Reads the election state from local file.
*/
@Override
public Optional<ElectionState> readElectionState() {
if (!stateFile.exists()) {
return Optional.empty();
}
return Optional.of(ElectionState.fromQuorumStateData(readStateFromFile(stateFile)));
}
@Override
public void writeElectionState(ElectionState latest, KRaftVersion kraftVersion) {
short quorumStateVersion = kraftVersion.quorumStateVersion();
writeElectionStateToFile(
stateFile,
latest.toQuorumStateData(quorumStateVersion),
quorumStateVersion
);
}
@Override
public Path path() {
return stateFile.toPath();
}
private void writeElectionStateToFile(final File stateFile, QuorumStateData state, short version) {
if (version > HIGHEST_SUPPORTED_VERSION) {
throw new IllegalArgumentException(
String.format(
"Quorum state data version (%d) is greater than the supported version (%d)",
version,
HIGHEST_SUPPORTED_VERSION
)
);
}
final File temp = new File(stateFile.getAbsolutePath() + ".tmp");
deleteFileIfExists(temp);
log.trace("Writing tmp quorum state {}", temp.getAbsolutePath());
try {
try (final FileOutputStream fileOutputStream = new FileOutputStream(temp);
final BufferedWriter writer = new BufferedWriter(
new OutputStreamWriter(fileOutputStream, StandardCharsets.UTF_8)
)
) {
ObjectNode jsonState = (ObjectNode) QuorumStateDataJsonConverter.write(state, version);
jsonState.set(DATA_VERSION, new ShortNode(version));
writer.write(jsonState.toString());
writer.flush();
fileOutputStream.getFD().sync();
}
Utils.atomicMoveWithFallback(temp.toPath(), stateFile.toPath());
} catch (IOException e) {
throw new UncheckedIOException(
String.format(
"Error while writing the Quorum status from the file %s",
stateFile.getAbsolutePath()
),
e
);
} finally {
// cleanup the temp file when the write finishes (either success or fail).
deleteFileIfExists(temp);
}
}
/**
* Clear state store by deleting the local quorum state file
*/
@Override
public void clear() {
deleteFileIfExists(stateFile);
deleteFileIfExists(new File(stateFile.getAbsolutePath() + ".tmp"));
}
@Override
public String toString() {
return "Quorum state filepath: " + stateFile.getAbsolutePath();
}
private void deleteFileIfExists(File file) {
try {
Files.deleteIfExists(file.toPath());
} catch (IOException e) {
throw new UncheckedIOException(
String.format("Error while deleting file %s", file.getAbsoluteFile()), e);
}
}
}
| FileQuorumStateStore |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/CachedDataStream.java | {
"start": 1494,
"end": 2878
} | class ____<T> extends DataStream<T> {
/**
* Create a new {@link CachedDataStream} in the given execution environment that wrap the given
* physical transformation to indicates that the transformation should be cached.
*
* @param environment The StreamExecutionEnvironment
* @param transformation The physical transformation whose intermediate result should be cached.
*/
public CachedDataStream(
StreamExecutionEnvironment environment, Transformation<T> transformation) {
super(
environment,
new CacheTransformation<>(
transformation, String.format("Cache: %s", transformation.getName())));
final CacheTransformation<T> t = (CacheTransformation<T>) this.getTransformation();
environment.registerCacheTransformation(t.getDatasetId(), t);
}
/**
* Invalidate the cache intermediate result of this DataStream to release the physical
* resources. Users are not required to invoke this method to release physical resources unless
* they want to. Cache will be recreated if it is used after invalidated.
*/
public void invalidate() throws Exception {
final CacheTransformation<T> t = (CacheTransformation<T>) this.getTransformation();
environment.invalidateClusterDataset(t.getDatasetId());
}
}
| CachedDataStream |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsPage.java | {
"start": 1480,
"end": 2319
} | class ____ extends TwoColumnLayout {
/* (non-Javadoc)
* @see org.apache.hadoop.yarn.server.nodemanager.webapp.NMView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
*/
@Override
protected void preHead(Page.HTML<__> html) {
String logEntity = $(ENTITY_STRING);
if (logEntity == null || logEntity.isEmpty()) {
logEntity = $(CONTAINER_ID);
}
if (logEntity == null || logEntity.isEmpty()) {
logEntity = "UNKNOWN";
}
set(TITLE, join("Logs for ", logEntity));
set(ACCORDION_ID, "nav");
set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}");
}
@Override
protected Class<? extends SubView> content() {
return AggregatedLogsBlock.class;
}
@Override
protected Class<? extends SubView> nav() {
return AggregatedLogsNavBlock.class;
}
}
| AggregatedLogsPage |
java | google__guava | android/guava-tests/test/com/google/common/hash/Murmur3Hash128Test.java | {
"start": 1007,
"end": 3303
} | class ____ extends TestCase {
public void testKnownValues() {
assertHash(0, 0x629942693e10f867L, 0x92db0b82baeb5347L, "hell");
assertHash(1, 0xa78ddff5adae8d10L, 0x128900ef20900135L, "hello");
assertHash(2, 0x8a486b23f422e826L, 0xf962a2c58947765fL, "hello ");
assertHash(3, 0x2ea59f466f6bed8cL, 0xc610990acc428a17L, "hello w");
assertHash(4, 0x79f6305a386c572cL, 0x46305aed3483b94eL, "hello wo");
assertHash(5, 0xc2219d213ec1f1b5L, 0xa1d8e2e0a52785bdL, "hello wor");
assertHash(
0, 0xe34bbc7bbc071b6cL, 0x7a433ca9c49a9347L, "The quick brown fox jumps over the lazy dog");
assertHash(
0, 0x658ca970ff85269aL, 0x43fee3eaa68e5c3eL, "The quick brown fox jumps over the lazy cog");
// Known output from Python smhasher
HashCode foxHash =
murmur3_128(0).hashString("The quick brown fox jumps over the lazy dog", UTF_8);
assertEquals("6c1b07bc7bbc4be347939ac4a93c437a", foxHash.toString());
}
private static void assertHash(int seed, long expected1, long expected2, String stringInput) {
HashCode expected = toHashCode(expected1, expected2);
byte[] input = HashTestUtils.ascii(stringInput);
assertEquals(expected, murmur3_128(seed).hashBytes(input));
assertEquals(expected, murmur3_128(seed).newHasher().putBytes(input).hash());
}
/** Returns a {@link HashCode} for a sequence of longs, in big-endian order. */
private static HashCode toHashCode(long... longs) {
ByteBuffer bb = ByteBuffer.wrap(new byte[longs.length * 8]).order(ByteOrder.LITTLE_ENDIAN);
for (long x : longs) {
bb.putLong(x);
}
return HashCode.fromBytes(bb.array());
}
public void testParanoid() {
HashFn hf =
new HashFn() {
@Override
public byte[] hash(byte[] input, int seed) {
Hasher hasher = murmur3_128(seed).newHasher();
Funnels.byteArrayFunnel().funnel(input, hasher);
return hasher.hash().asBytes();
}
};
// Murmur3F, MurmurHash3 for x64, 128-bit (MurmurHash3_x64_128)
// From http://code.google.com/p/smhasher/source/browse/trunk/main.cpp
HashTestUtils.verifyHashFunction(hf, 128, 0x6384BA69);
}
public void testInvariants() {
HashTestUtils.assertInvariants(murmur3_128());
}
}
| Murmur3Hash128Test |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/util/xml/StaxSourceTests.java | {
"start": 1406,
"end": 4532
} | class ____ {
private static final String XML = "<root xmlns='namespace'><child/></root>";
private Transformer transformer;
private XMLInputFactory inputFactory;
private DocumentBuilder documentBuilder;
@BeforeEach
void setUp() throws Exception {
TransformerFactory transformerFactory = TransformerFactory.newInstance();
transformer = transformerFactory.newTransformer();
inputFactory = XMLInputFactory.newInstance();
DocumentBuilderFactory documentBuilderFactory = DocumentBuilderFactory.newInstance();
documentBuilderFactory.setNamespaceAware(true);
documentBuilder = documentBuilderFactory.newDocumentBuilder();
}
@Test
void streamReaderSourceToStreamResult() throws Exception {
XMLStreamReader streamReader = inputFactory.createXMLStreamReader(new StringReader(XML));
StaxSource source = new StaxSource(streamReader);
assertThat(source.getXMLStreamReader()).as("Invalid streamReader returned").isEqualTo(streamReader);
assertThat(source.getXMLEventReader()).as("EventReader returned").isNull();
StringWriter writer = new StringWriter();
transformer.transform(source, new StreamResult(writer));
assertThat(XmlContent.from(writer)).as("Invalid result").isSimilarTo(XML);
}
@Test
void streamReaderSourceToDOMResult() throws Exception {
XMLStreamReader streamReader = inputFactory.createXMLStreamReader(new StringReader(XML));
StaxSource source = new StaxSource(streamReader);
assertThat(source.getXMLStreamReader()).as("Invalid streamReader returned").isEqualTo(streamReader);
assertThat(source.getXMLEventReader()).as("EventReader returned").isNull();
Document expected = documentBuilder.parse(new InputSource(new StringReader(XML)));
Document result = documentBuilder.newDocument();
transformer.transform(source, new DOMResult(result));
assertThat(XmlContent.of(result)).as("Invalid result").isSimilarTo(expected);
}
@Test
void eventReaderSourceToStreamResult() throws Exception {
XMLEventReader eventReader = inputFactory.createXMLEventReader(new StringReader(XML));
StaxSource source = new StaxSource(eventReader);
assertThat(source.getXMLEventReader()).as("Invalid eventReader returned").isEqualTo(eventReader);
assertThat(source.getXMLStreamReader()).as("StreamReader returned").isNull();
StringWriter writer = new StringWriter();
transformer.transform(source, new StreamResult(writer));
assertThat(XmlContent.from(writer)).as("Invalid result").isSimilarTo(XML);
}
@Test
void eventReaderSourceToDOMResult() throws Exception {
XMLEventReader eventReader = inputFactory.createXMLEventReader(new StringReader(XML));
StaxSource source = new StaxSource(eventReader);
assertThat(source.getXMLEventReader()).as("Invalid eventReader returned").isEqualTo(eventReader);
assertThat(source.getXMLStreamReader()).as("StreamReader returned").isNull();
Document expected = documentBuilder.parse(new InputSource(new StringReader(XML)));
Document result = documentBuilder.newDocument();
transformer.transform(source, new DOMResult(result));
assertThat(XmlContent.of(result)).as("Invalid result").isSimilarTo(expected);
}
}
| StaxSourceTests |
java | apache__flink | flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/SourceStreamTaskTestBase.java | {
"start": 1952,
"end": 4420
} | class ____ {
public void testMetrics(
FunctionWithException<Environment, ? extends StreamTask<Integer, ?>, Exception>
taskFactory,
StreamOperatorFactory<?> operatorFactory,
Consumer<AbstractDoubleAssert<?>> busyTimeMatcher)
throws Exception {
long sleepTime = 42;
StreamTaskMailboxTestHarnessBuilder<Integer> builder =
new StreamTaskMailboxTestHarnessBuilder<>(taskFactory, INT_TYPE_INFO);
final Map<String, Metric> metrics = new ConcurrentHashMap<>();
final TaskMetricGroup taskMetricGroup =
StreamTaskTestHarness.createTaskMetricGroup(metrics);
try (StreamTaskMailboxTestHarness<Integer> harness =
builder.setupOutputForSingletonOperatorChain(operatorFactory)
.setTaskMetricGroup(taskMetricGroup)
.build()) {
Future<Boolean> triggerFuture =
harness.streamTask.triggerCheckpointAsync(
new CheckpointMetaData(1L, System.currentTimeMillis()),
CheckpointOptions.forCheckpointWithDefaultLocation());
OneShotLatch checkpointAcknowledgeLatch = new OneShotLatch();
harness.getCheckpointResponder().setAcknowledgeLatch(checkpointAcknowledgeLatch);
assertThat(triggerFuture).isNotDone();
Thread.sleep(sleepTime);
while (!triggerFuture.isDone()) {
harness.streamTask.runMailboxStep();
}
Gauge<Long> checkpointStartDelayGauge =
(Gauge<Long>) metrics.get(MetricNames.CHECKPOINT_START_DELAY_TIME);
assertThat(checkpointStartDelayGauge.getValue())
.isGreaterThanOrEqualTo(sleepTime * 1_000_000);
Gauge<Double> busyTimeGauge = (Gauge<Double>) metrics.get(MetricNames.TASK_BUSY_TIME);
busyTimeMatcher.accept(assertThat(busyTimeGauge.getValue()));
checkpointAcknowledgeLatch.await();
TestCheckpointResponder.AcknowledgeReport acknowledgeReport =
Iterables.getOnlyElement(
harness.getCheckpointResponder().getAcknowledgeReports());
assertThat(acknowledgeReport.getCheckpointMetrics().getCheckpointStartDelayNanos())
.isGreaterThanOrEqualTo(sleepTime * 1_000_000);
}
}
}
| SourceStreamTaskTestBase |
java | apache__camel | components/camel-stomp/src/test/java/org/apache/camel/component/stomp/StompConsumerHeaderFilterStrategyTest.java | {
"start": 3589,
"end": 3849
} | class ____ extends DefaultHeaderFilterStrategy {
ConsumerHeaderFilterStrategy() {
// allow all outbound headers to pass through except the below one
getInFilter().add("content-length");
}
}
}
| ConsumerHeaderFilterStrategy |
java | quarkusio__quarkus | devtools/project-core-extension-codestarts/src/main/resources/codestarts/quarkus/examples/google-cloud-functions-http-example/java/src/main/java/org/acme/googlecloudfunctions/GreetingServlet.java | {
"start": 376,
"end": 998
} | class ____ extends HttpServlet {
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
resp.setStatus(200);
resp.addHeader("Content-Type", "text/plain");
resp.getWriter().write("hello");
}
@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
String name = req.getReader().readLine();
resp.setStatus(200);
resp.addHeader("Content-Type", "text/plain");
resp.getWriter().write("hello " + name);
}
}
| GreetingServlet |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java | {
"start": 20592,
"end": 31995
} | class ____ extends AbstractRunnable {
@Override
protected void doRun() {
countDownLatch.countDown();
if (countDownLatch.getCount() > 0) {
executorService.execute(TestTask.this);
}
}
@Override
public void onFailure(Exception e) {
fail(e);
}
}
executorService.execute(new TestTask());
safeAwait(countDownLatch);
} finally {
ThreadPool.terminate(executorService, 10, TimeUnit.SECONDS);
}
}
public void testScalingDropOnShutdown() {
final var executor = EsExecutors.newScaling(
getName(),
0,
between(1, 5),
60,
TimeUnit.SECONDS,
false,
TestEsExecutors.testOnlyDaemonThreadFactory(getName()),
new ThreadContext(Settings.EMPTY)
);
ThreadPool.terminate(executor, 10, TimeUnit.SECONDS);
executor.execute(() -> fail("should not run")); // no-op
executor.execute(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
fail("should not call onFailure");
}
@Override
protected void doRun() {
fail("should not call doRun");
}
@Override
public boolean isForceExecution() {
return randomBoolean();
}
@Override
public void onRejection(Exception e) {
fail("should not call onRejection");
}
@Override
public void onAfter() {
fail("should not call onAfter");
}
});
}
public void testScalingRejectOnShutdown() {
runRejectOnShutdownTest(
EsExecutors.newScaling(
getName(),
0,
between(1, 5),
60,
TimeUnit.SECONDS,
true,
TestEsExecutors.testOnlyDaemonThreadFactory(getName()),
new ThreadContext(Settings.EMPTY)
)
);
}
public void testFixedBoundedRejectOnShutdown() {
runRejectOnShutdownTest(
EsExecutors.newFixed(
getName(),
between(1, 5),
between(1, 5),
TestEsExecutors.testOnlyDaemonThreadFactory(getName()),
threadContext,
randomFrom(DEFAULT, DO_NOT_TRACK)
)
);
}
public void testFixedUnboundedRejectOnShutdown() {
runRejectOnShutdownTest(
EsExecutors.newFixed(
getName(),
between(1, 5),
-1,
TestEsExecutors.testOnlyDaemonThreadFactory(getName()),
threadContext,
randomFrom(DEFAULT, DO_NOT_TRACK)
)
);
}
public void testParseExecutorName() throws InterruptedException {
final var executorName = randomAlphaOfLength(10);
final String nodeName = rarely() ? null : randomIdentifier();
final ThreadFactory threadFactory;
final boolean isSystem;
if (nodeName == null) {
isSystem = false;
threadFactory = EsExecutors.daemonThreadFactory(Settings.EMPTY, executorName);
} else if (randomBoolean()) {
isSystem = false;
threadFactory = EsExecutors.daemonThreadFactory(
Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), nodeName).build(),
executorName
);
} else {
isSystem = randomBoolean();
threadFactory = EsExecutors.daemonThreadFactory(nodeName, executorName, isSystem);
}
final var thread = threadFactory.newThread(() -> {});
try {
assertThat(EsExecutors.executorName(thread), equalTo(executorName));
assertThat(((EsExecutors.EsThread) thread).isSystem(), equalTo(isSystem));
} finally {
thread.join();
}
final var testThread = TestEsExecutors.testOnlyDaemonThreadFactory("test").newThread(() -> {});
try {
assertNull("No executor name expected for test thread factory", EsExecutors.executorName(testThread));
} finally {
testThread.join();
}
}
public void testScalingWithTaskTimeTracking() {
final int min = between(1, 3);
final int max = between(min + 1, 6);
{
var executionTimeEwma = randomDoubleBetween(0.01, 0.1, true);
ThreadPoolExecutor pool = EsExecutors.newScaling(
getClass().getName() + "/" + getTestName(),
min,
max,
between(1, 100),
randomTimeUnit(),
randomBoolean(),
TestEsExecutors.testOnlyDaemonThreadFactory("test"),
threadContext,
randomBoolean()
? EsExecutors.TaskTrackingConfig.builder().trackOngoingTasks().trackExecutionTime(executionTimeEwma).build()
: EsExecutors.TaskTrackingConfig.builder().trackExecutionTime(executionTimeEwma).build()
);
assertThat(pool, instanceOf(TaskExecutionTimeTrackingEsThreadPoolExecutor.class));
}
{
ThreadPoolExecutor pool = EsExecutors.newScaling(
getClass().getName() + "/" + getTestName(),
min,
max,
between(1, 100),
randomTimeUnit(),
randomBoolean(),
TestEsExecutors.testOnlyDaemonThreadFactory("test"),
threadContext
);
assertThat(pool, instanceOf(EsThreadPoolExecutor.class));
}
{
ThreadPoolExecutor pool = EsExecutors.newScaling(
getClass().getName() + "/" + getTestName(),
min,
max,
between(1, 100),
randomTimeUnit(),
randomBoolean(),
TestEsExecutors.testOnlyDaemonThreadFactory("test"),
threadContext,
DO_NOT_TRACK
);
assertThat(pool, instanceOf(EsThreadPoolExecutor.class));
}
}
private static void runRejectOnShutdownTest(ExecutorService executor) {
for (int i = between(0, 10); i > 0; i--) {
final var delayMillis = between(0, 100);
executor.execute(ActionRunnable.wrap(ActionListener.noop(), l -> safeSleep(delayMillis)));
}
try {
executor.shutdown();
assertShutdownAndRejectingTasks(executor);
} finally {
ThreadPool.terminate(executor, 10, TimeUnit.SECONDS);
}
assertShutdownAndRejectingTasks(executor);
}
private static void assertShutdownAndRejectingTasks(Executor executor) {
final var rejected = new AtomicBoolean();
final var shouldBeRejected = new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
fail("should not call onFailure");
}
@Override
protected void doRun() {
fail("should not call doRun");
}
@Override
public boolean isForceExecution() {
return randomBoolean();
}
@Override
public void onRejection(Exception e) {
assertTrue(asInstanceOf(EsRejectedExecutionException.class, e).isExecutorShutdown());
assertTrue(rejected.compareAndSet(false, true));
}
};
assertTrue(expectThrows(EsRejectedExecutionException.class, () -> executor.execute(shouldBeRejected::doRun)).isExecutorShutdown());
executor.execute(shouldBeRejected);
assertTrue(rejected.get());
}
public void testScalingWithEmptyCore() {
testScalingWithEmptyCoreAndMaxSingleThread(
EsExecutors.newScaling(
getTestName(),
0,
1,
0,
TimeUnit.MILLISECONDS,
true,
TestEsExecutors.testOnlyDaemonThreadFactory(getTestName()),
threadContext
)
);
}
public void testScalingWithEmptyCoreAndKeepAlive() {
testScalingWithEmptyCoreAndMaxSingleThread(
EsExecutors.newScaling(
getTestName(),
0,
1,
1,
TimeUnit.MILLISECONDS,
true,
TestEsExecutors.testOnlyDaemonThreadFactory(getTestName()),
threadContext
)
);
}
public void testScalingWithEmptyCoreAndLargerMaxSize() {
testScalingWithEmptyCoreAndMaxMultipleThreads(
EsExecutors.newScaling(
getTestName(),
0,
between(2, 5),
0,
TimeUnit.MILLISECONDS,
true,
TestEsExecutors.testOnlyDaemonThreadFactory(getTestName()),
threadContext
)
);
}
public void testScalingWithEmptyCoreAndKeepAliveAndLargerMaxSize() {
testScalingWithEmptyCoreAndMaxMultipleThreads(
EsExecutors.newScaling(
getTestName(),
0,
between(2, 5),
1,
TimeUnit.MILLISECONDS,
true,
TestEsExecutors.testOnlyDaemonThreadFactory(getTestName()),
threadContext
)
);
}
public void testScalingWithEmptyCoreAndWorkerPoolProbing() {
// the executor is created directly here, newScaling doesn't use ExecutorScalingQueue & probing if max pool size = 1.
testScalingWithEmptyCoreAndMaxSingleThread(
new EsThreadPoolExecutor(
getTestName(),
0,
1,
0,
TimeUnit.MILLISECONDS,
new EsExecutors.ExecutorScalingQueue<>(),
TestEsExecutors.testOnlyDaemonThreadFactory(getTestName()),
new EsExecutors.ForceQueuePolicy(true, true),
threadContext
)
);
}
public void testScalingWithEmptyCoreAndKeepAliveAndWorkerPoolProbing() {
// the executor is created directly here, newScaling doesn't use ExecutorScalingQueue & probing if max pool size = 1.
testScalingWithEmptyCoreAndMaxSingleThread(
new EsThreadPoolExecutor(
getTestName(),
0,
1,
1,
TimeUnit.MILLISECONDS,
new EsExecutors.ExecutorScalingQueue<>(),
TestEsExecutors.testOnlyDaemonThreadFactory(getTestName()),
new EsExecutors.ForceQueuePolicy(true, true),
threadContext
)
);
}
private void testScalingWithEmptyCoreAndMaxSingleThread(EsThreadPoolExecutor testSubject) {
try {
final var keepAliveNanos = testSubject.getKeepAliveTime(TimeUnit.NANOSECONDS);
| TestTask |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_3142/Source.java | {
"start": 232,
"end": 433
} | class ____ {
private final Nested nested;
public Source(Nested nested) {
this.nested = nested;
}
public Nested getNested() {
return nested;
}
public static | Source |
java | quarkusio__quarkus | extensions/smallrye-openapi/runtime/src/main/java/io/quarkus/smallrye/openapi/runtime/filter/AutoUrl.java | {
"start": 221,
"end": 1313
} | class ____ {
private String defaultValue;
private String configKey;
private String path;
public AutoUrl() {
}
public AutoUrl(String defaultValue, String configKey, String path) {
this.defaultValue = defaultValue;
this.configKey = configKey;
this.path = path;
}
public String getDefaultValue() {
return defaultValue;
}
public void setDefaultValue(String defaultValue) {
this.defaultValue = defaultValue;
}
public String getConfigKey() {
return configKey;
}
public void setConfigKey(String configKey) {
this.configKey = configKey;
}
public String getPath() {
return path;
}
public void setPath(String path) {
this.path = path;
}
public String getFinalUrlValue() {
Config c = ConfigProvider.getConfig();
String u = c.getOptionalValue(this.configKey, String.class).orElse(defaultValue);
if (u != null && path != null && !u.endsWith(path)) {
u = u + path;
}
return u;
}
}
| AutoUrl |
java | apache__commons-lang | src/test/java/org/apache/commons/lang3/reflect/TypeUtilsTest.java | {
"start": 6622,
"end": 8369
} | interface ____<K, V> {
// empty
}
public static Comparable<Integer> intComparable;
public static Comparable<Long> longComparable;
public static Comparable<String> stringComparable;
public static List<String>[] stringListArray;
public static URI uri;
public static Comparable<URI> uriComparable;
public static Comparable<?> wildcardComparable;
public static <G extends Comparable<G>> G stub() {
return null;
}
public static <G extends Comparable<? super G>> G stub2() {
return null;
}
public static <T extends Comparable<? extends T>> T stub3() {
return null;
}
static Stream<Type> testTypeToString() {
// @formatter:off
return Stream.of(Comparator.class, Comparable.class, ArrayList.class, HashMap.class)
.flatMap(cls -> Stream.of(cls.getDeclaredMethods()))
.flatMap(m ->
Stream.concat(Stream.of(m.getGenericExceptionTypes()),
Stream.concat(Stream.of(m.getGenericParameterTypes()),
Stream.concat(Stream.of(m.getGenericReturnType()), Stream.of(m.getTypeParameters())))));
// @formatter:on
}
public The<String, String> da;
public That<String, String> dat;
public TypeUtilsTest<String>.That<String, String> dat2;
public TypeUtilsTest<Number>.That<String, String> dat3;
public Thing ding;
public This<String, String> dis;
public Comparable<? extends Integer>[] intWildcardComparable;
public Iterable<? extends Map<Integer, ? extends Collection<?>>> iterable;
public TypeUtilsTest<String>.Tester tester;
public Tester tester2;
public Other<String> uhder;
/** The inner | This |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/builditem/Startable.java | {
"start": 239,
"end": 310
} | interface ____ use that in dev services
String getContainerId();
}
| and |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/orphan/onetomany/merge/MergeTest.java | {
"start": 3114,
"end": 3873
} | class ____ {
@Id
private Long id;
private String name;
@OneToMany
private List<Child> children;
public Parent() {
}
public Parent(Long id, String name) {
this.id = id;
this.name = name;
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public List<Child> getChildren() {
return children;
}
public void setChildren(List<Child> children) {
this.children = children;
}
public void addChild(Child child) {
if ( children == null ) {
children = new ArrayList<Child>();
}
children.add( child );
}
}
@Entity(name = "Child")
public static | Parent |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java | {
"start": 33824,
"end": 36475
} | class ____ extends HttpServlet {
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
UserGroupInformation ugi = HttpUserGroupInformation.get();
if (ugi != null) {
String ret = "remoteuser=" + req.getRemoteUser() + ":ugi=" +
ugi.getShortUserName();
if (ugi.getAuthenticationMethod() ==
UserGroupInformation.AuthenticationMethod.PROXY) {
ret = "realugi=" + ugi.getRealUser().getShortUserName() + ":" + ret;
}
resp.setStatus(HttpServletResponse.SC_OK);
resp.getWriter().write(ret);
} else {
resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
}
}
}
@Test
public void testHttpUGI() throws Exception {
final Server jetty = createJettyServer();
ServletContextHandler context = new ServletContextHandler();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(PseudoDTAFilter.class), "/*",
EnumSet.of(DispatcherType.REQUEST));
context.addServlet(new ServletHolder(UGIServlet.class), "/bar");
try {
jetty.start();
final URL url = new URL(getJettyURL() + "/foo/bar");
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER);
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
// user foo
HttpURLConnection conn = aUrl.openConnection(url, token);
assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
List<String> ret = IOUtils
.readLines(conn.getInputStream(), StandardCharsets.UTF_8);
assertEquals(1, ret.size());
assertEquals("remoteuser=" + FOO_USER+ ":ugi=" + FOO_USER,
ret.get(0));
// user ok-user via proxyuser foo
conn = aUrl.openConnection(url, token, OK_USER);
assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
ret = IOUtils.readLines(conn.getInputStream(), StandardCharsets.UTF_8);
assertEquals(1, ret.size());
assertEquals("realugi=" + FOO_USER + ":remoteuser=" + OK_USER +
":ugi=" + OK_USER, ret.get(0));
return null;
}
});
} finally {
jetty.stop();
}
}
public static | UGIServlet |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/common/runtime/src/main/java/org/jboss/resteasy/reactive/common/model/PreMatchInterceptorContainer.java | {
"start": 2037,
"end": 2245
} | class ____ '"
+ filter.getClassName() + "'");
}
} else {
unsetNonBlockingInterceptorFound = true;
}
}
}
}
| is |
java | apache__camel | components/camel-xmlsecurity/src/main/java/org/apache/camel/component/xmlsecurity/XmlVerifierEndpoint.java | {
"start": 1897,
"end": 3551
} | class ____ extends DefaultEndpoint {
@UriPath
@Metadata(required = true)
private String name;
@UriParam
private XmlVerifierConfiguration configuration;
public XmlVerifierEndpoint(String uri, XmlVerifierComponent component,
XmlVerifierConfiguration configuration) {
super(uri, component);
this.configuration = configuration;
}
@Override
public boolean isRemote() {
return false;
}
public String getName() {
return name;
}
/**
* The name part in the URI can be chosen by the user to distinguish between different verify endpoints within the
* camel context.
*/
public void setName(String name) {
this.name = name;
}
public XmlVerifierConfiguration getConfiguration() {
return configuration;
}
/**
* Configuration
*/
public void setConfiguration(XmlVerifierConfiguration configuration) {
this.configuration = configuration;
}
@Override
public Producer createProducer() throws Exception {
Processor processor = new XmlVerifierProcessor(getCamelContext(), getConfiguration());
return new XmlSecurityProducer(this, processor);
}
@Override
public Consumer createConsumer(Processor processor) throws Exception {
return null;
}
@Override
protected void doInit() throws Exception {
super.doInit();
Object ns = configuration.getOutputNodeSearch();
if (ns instanceof String && ns.toString().startsWith("#")) {
// its a reference lookup
}
}
}
| XmlVerifierEndpoint |
java | google__guava | android/guava/src/com/google/common/base/FinalizableReferenceQueue.java | {
"start": 2165,
"end": 2208
} | class ____:
*
* {@snippet :
* public | instead |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/language/simple/SimplePredicateParserNodesTest.java | {
"start": 1367,
"end": 3154
} | class ____ extends ExchangeTestSupport {
@Test
public void testParserNodes() {
exchange.getIn().setBody("foo");
SimplePredicateParser parser = new SimplePredicateParser(null, "${body} == 'foo'", true, null);
List<SimpleNode> nodes = parser.parseTokens();
Assertions.assertEquals(1, nodes.size());
BinaryExpression be = (BinaryExpression) nodes.get(0);
Assertions.assertEquals(BinaryOperatorType.EQ, be.getOperator());
SingleQuoteStart qe = (SingleQuoteStart) be.getRight();
LiteralNode ln = (LiteralNode) qe.getBlock().getChildren().get(0);
Assertions.assertEquals("foo", ln.getText());
SimpleFunctionStart fe = (SimpleFunctionStart) be.getLeft();
ln = (LiteralNode) fe.getBlock().getChildren().get(0);
Assertions.assertEquals("body", ln.toString());
}
@Test
public void testParserNodesEmbeddedFunction() {
exchange.getIn().setBody("foo");
SimplePredicateParser parser = new SimplePredicateParser(null, "${body} != 'Hello ${header.bar}'", true, null);
List<SimpleNode> nodes = parser.parseTokens();
Assertions.assertEquals(1, nodes.size());
BinaryExpression be = (BinaryExpression) nodes.get(0);
Assertions.assertEquals(BinaryOperatorType.NOT_EQ, be.getOperator());
SingleQuoteStart qe = (SingleQuoteStart) be.getRight();
LiteralNode ln = (LiteralNode) qe.getBlock().getChildren().get(0);
Assertions.assertEquals("Hello ", ln.getText());
SimpleFunctionStart fe = (SimpleFunctionStart) qe.getBlock().getChildren().get(1);
ln = (LiteralNode) fe.getBlock().getChildren().get(0);
Assertions.assertEquals("header.bar", ln.toString());
}
}
| SimplePredicateParserNodesTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/entitygraph/EntityGraphWithInheritanceTest.java | {
"start": 6479,
"end": 6612
} | class ____ extends Course {
@ManyToOne(fetch = FetchType.LAZY, cascade = CascadeType.ALL)
Teacher moneyReceiver;
}
}
| PayingCourse |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/sequencedmultisetstate/SequencedMultiSetState.java | {
"start": 1438,
"end": 2180
} | interface ____ managing an ordered multi-set state in Apache Flink. It
* provides methods to add, append, and remove elements while maintaining insertion order.
*
* <p>The state supports two types of semantics for adding elements:
*
* <ul>
* <li><b>Normal Set Semantics:</b> Replaces an existing matching element with the new one.
* <li><b>Multi-Set Semantics:</b> Appends the new element, allowing duplicates.
* </ul>
*
* <p>Removal operations are supported with different result types, indicating the outcome of the
* removal process, such as whether all elements were removed, the last added element was removed,
* or no elements were removed.
*
* @param <T> The type of elements stored in the state.
*/
@Internal
public | for |
java | spring-projects__spring-security | test/src/test/java/org/springframework/security/test/web/servlet/showcase/login/CustomConfigAuthenticationTests.java | {
"start": 4250,
"end": 5236
} | class ____ {
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.authorizeHttpRequests((requests) -> requests
.anyRequest().authenticated())
.securityContext((context) -> context
.securityContextRepository(securityContextRepository()))
.formLogin((login) -> login
.usernameParameter("user")
.passwordParameter("pass")
.loginPage("/authenticate"));
return http.build();
// @formatter:on
}
// @formatter:off
@Bean
UserDetailsService userDetailsService() {
UserDetails user = User.withDefaultPasswordEncoder().username("user").password("password").roles("USER").build();
return new InMemoryUserDetailsManager(user);
}
// @formatter:on
@Bean
SecurityContextRepository securityContextRepository() {
HttpSessionSecurityContextRepository repo = new HttpSessionSecurityContextRepository();
repo.setSpringSecurityContextKey("CUSTOM");
return repo;
}
}
}
| Config |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/bind/MapBinderTests.java | {
"start": 32345,
"end": 32684
} | class ____ extends AbstractMap<String, Object> {
private final String source;
CustomMap(String source) {
this.source = source;
}
@Override
public Set<Entry<String, Object>> entrySet() {
return Collections.emptySet();
}
String getSource() {
return this.source;
}
}
}
private static final | CustomMap |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/client/runtime/src/main/java/org/jboss/resteasy/reactive/client/impl/RequestSpec.java | {
"start": 125,
"end": 707
} | class ____ {
final ConfigurationImpl configuration;
final ClientRequestHeaders headers;
boolean chunked;
public RequestSpec(ConfigurationImpl configuration) {
this.configuration = configuration;
headers = new ClientRequestHeaders(configuration);
}
public RequestSpec(RequestSpec requestSpec) {
this.configuration = requestSpec.configuration;
this.headers = new ClientRequestHeaders(configuration);
this.headers.headers.putAll(requestSpec.headers.headers);
this.chunked = requestSpec.chunked;
}
}
| RequestSpec |
java | apache__camel | dsl/camel-yaml-dsl/camel-yaml-dsl-deserializers/src/generated/java/org/apache/camel/dsl/yaml/deserializers/ModelDeserializers.java | {
"start": 299894,
"end": 300056
} | class ____ the input data. Append a to the end of the name if you want the input to be an array type.", displayName = "Type")
}
)
public static | of |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/TempDirectoryCleanupTests.java | {
"start": 8256,
"end": 8569
} | class ____ {
@TempDir(cleanup = ON_SUCCESS)
static Path onSuccessFailingFieldDir;
@Test
@Order(1)
void failing() {
TempDirFieldTests.onSuccessFailingFieldDir = onSuccessFailingFieldDir;
fail();
}
@Test
@Order(2)
void passing() {
}
}
static | OnSuccessFailingStaticFieldCase |
java | spring-projects__spring-security | oauth2/oauth2-client/src/main/java/org/springframework/security/oauth2/client/web/DefaultOAuth2AuthorizationRequestResolver.java | {
"start": 2927,
"end": 12383
} | class ____ implements OAuth2AuthorizationRequestResolver {
/**
* The default base {@code URI} used for authorization requests.
*/
public static final String DEFAULT_AUTHORIZATION_REQUEST_BASE_URI = "/oauth2/authorization";
private static final String REGISTRATION_ID_URI_VARIABLE_NAME = "registrationId";
private static final char PATH_DELIMITER = '/';
private static final StringKeyGenerator DEFAULT_STATE_GENERATOR = new Base64StringKeyGenerator(
Base64.getUrlEncoder());
private static final StringKeyGenerator DEFAULT_SECURE_KEY_GENERATOR = new Base64StringKeyGenerator(
Base64.getUrlEncoder().withoutPadding(), 96);
private static final Consumer<OAuth2AuthorizationRequest.Builder> DEFAULT_PKCE_APPLIER = OAuth2AuthorizationRequestCustomizers
.withPkce();
private final ClientRegistrationRepository clientRegistrationRepository;
private final RequestMatcher authorizationRequestMatcher;
private Consumer<OAuth2AuthorizationRequest.Builder> authorizationRequestCustomizer = (customizer) -> {
};
/**
* Constructs a {@code DefaultOAuth2AuthorizationRequestResolver} using the provided
* parameters.
* @param clientRegistrationRepository the repository of client registrations
* authorization requests
*/
public DefaultOAuth2AuthorizationRequestResolver(ClientRegistrationRepository clientRegistrationRepository) {
this(clientRegistrationRepository, DEFAULT_AUTHORIZATION_REQUEST_BASE_URI);
}
/**
* Constructs a {@code DefaultOAuth2AuthorizationRequestResolver} using the provided
* parameters.
* @param clientRegistrationRepository the repository of client registrations
* @param authorizationRequestBaseUri the base {@code URI} used for resolving
* authorization requests
*/
public DefaultOAuth2AuthorizationRequestResolver(ClientRegistrationRepository clientRegistrationRepository,
String authorizationRequestBaseUri) {
Assert.notNull(clientRegistrationRepository, "clientRegistrationRepository cannot be null");
Assert.hasText(authorizationRequestBaseUri, "authorizationRequestBaseUri cannot be empty");
this.clientRegistrationRepository = clientRegistrationRepository;
this.authorizationRequestMatcher = PathPatternRequestMatcher.withDefaults()
.matcher(authorizationRequestBaseUri + "/{" + REGISTRATION_ID_URI_VARIABLE_NAME + "}");
}
@Override
public OAuth2AuthorizationRequest resolve(HttpServletRequest request) {
String registrationId = resolveRegistrationId(request);
if (registrationId == null) {
return null;
}
String redirectUriAction = getAction(request, "login");
return resolve(request, registrationId, redirectUriAction);
}
@Override
public OAuth2AuthorizationRequest resolve(HttpServletRequest request, String registrationId) {
if (registrationId == null) {
return null;
}
String redirectUriAction = getAction(request, "authorize");
return resolve(request, registrationId, redirectUriAction);
}
/**
* Sets the {@code Consumer} to be provided the
* {@link OAuth2AuthorizationRequest.Builder} allowing for further customizations.
* @param authorizationRequestCustomizer the {@code Consumer} to be provided the
* {@link OAuth2AuthorizationRequest.Builder}
* @since 5.3
* @see OAuth2AuthorizationRequestCustomizers
*/
public void setAuthorizationRequestCustomizer(
Consumer<OAuth2AuthorizationRequest.Builder> authorizationRequestCustomizer) {
Assert.notNull(authorizationRequestCustomizer, "authorizationRequestCustomizer cannot be null");
this.authorizationRequestCustomizer = authorizationRequestCustomizer;
}
private String getAction(HttpServletRequest request, String defaultAction) {
String action = request.getParameter("action");
if (action == null) {
return defaultAction;
}
return action;
}
private OAuth2AuthorizationRequest resolve(HttpServletRequest request, String registrationId,
String redirectUriAction) {
if (registrationId == null) {
return null;
}
ClientRegistration clientRegistration = this.clientRegistrationRepository.findByRegistrationId(registrationId);
if (clientRegistration == null) {
throw new InvalidClientRegistrationIdException("Invalid Client Registration with Id: " + registrationId);
}
OAuth2AuthorizationRequest.Builder builder = getBuilder(clientRegistration);
String redirectUriStr = expandRedirectUri(request, clientRegistration, redirectUriAction);
// @formatter:off
builder.clientId(clientRegistration.getClientId())
.authorizationUri(clientRegistration.getProviderDetails().getAuthorizationUri())
.redirectUri(redirectUriStr)
.scopes(clientRegistration.getScopes())
.state(DEFAULT_STATE_GENERATOR.generateKey());
// @formatter:on
this.authorizationRequestCustomizer.accept(builder);
return builder.build();
}
private OAuth2AuthorizationRequest.Builder getBuilder(ClientRegistration clientRegistration) {
if (AuthorizationGrantType.AUTHORIZATION_CODE.equals(clientRegistration.getAuthorizationGrantType())) {
// @formatter:off
OAuth2AuthorizationRequest.Builder builder = OAuth2AuthorizationRequest.authorizationCode()
.attributes((attrs) ->
attrs.put(OAuth2ParameterNames.REGISTRATION_ID, clientRegistration.getRegistrationId()));
// @formatter:on
if (!CollectionUtils.isEmpty(clientRegistration.getScopes())
&& clientRegistration.getScopes().contains(OidcScopes.OPENID)) {
// Section 3.1.2.1 Authentication Request -
// https://openid.net/specs/openid-connect-core-1_0.html#AuthRequest scope
// REQUIRED. OpenID Connect requests MUST contain the "openid" scope
// value.
applyNonce(builder);
}
if (ClientAuthenticationMethod.NONE.equals(clientRegistration.getClientAuthenticationMethod())
|| clientRegistration.getClientSettings().isRequireProofKey()) {
DEFAULT_PKCE_APPLIER.accept(builder);
}
return builder;
}
throw new IllegalArgumentException(
"Invalid Authorization Grant Type (" + clientRegistration.getAuthorizationGrantType().getValue()
+ ") for Client Registration with Id: " + clientRegistration.getRegistrationId());
}
private String resolveRegistrationId(HttpServletRequest request) {
if (this.authorizationRequestMatcher.matches(request)) {
return this.authorizationRequestMatcher.matcher(request)
.getVariables()
.get(REGISTRATION_ID_URI_VARIABLE_NAME);
}
return null;
}
/**
* Expands the {@link ClientRegistration#getRedirectUri()} with following provided
* variables:<br/>
* - baseUrl (e.g. https://localhost/app) <br/>
* - baseScheme (e.g. https) <br/>
* - baseHost (e.g. localhost) <br/>
* - basePort (e.g. :8080) <br/>
* - basePath (e.g. /app) <br/>
* - registrationId (e.g. google) <br/>
* - action (e.g. login) <br/>
* <p/>
* Null variables are provided as empty strings.
* <p/>
* Default redirectUri is:
* {@code org.springframework.security.config.oauth2.client.CommonOAuth2Provider#DEFAULT_REDIRECT_URL}
* @return expanded URI
*/
private static String expandRedirectUri(HttpServletRequest request, ClientRegistration clientRegistration,
String action) {
Map<String, String> uriVariables = new HashMap<>();
uriVariables.put("registrationId", clientRegistration.getRegistrationId());
// @formatter:off
UriComponents uriComponents = UriComponentsBuilder.fromUriString(UrlUtils.buildFullRequestUrl(request))
.replacePath(request.getContextPath())
.replaceQuery(null)
.fragment(null)
.build();
// @formatter:on
String scheme = uriComponents.getScheme();
uriVariables.put("baseScheme", (scheme != null) ? scheme : "");
String host = uriComponents.getHost();
uriVariables.put("baseHost", (host != null) ? host : "");
// following logic is based on HierarchicalUriComponents#toUriString()
int port = uriComponents.getPort();
uriVariables.put("basePort", (port == -1) ? "" : ":" + port);
String path = uriComponents.getPath();
if (StringUtils.hasLength(path)) {
if (path.charAt(0) != PATH_DELIMITER) {
path = PATH_DELIMITER + path;
}
}
uriVariables.put("basePath", (path != null) ? path : "");
uriVariables.put("baseUrl", uriComponents.toUriString());
uriVariables.put("action", (action != null) ? action : "");
return UriComponentsBuilder.fromUriString(clientRegistration.getRedirectUri())
.buildAndExpand(uriVariables)
.toUriString();
}
/**
* Creates nonce and its hash for use in OpenID Connect 1.0 Authentication Requests.
* @param builder where the {@link OidcParameterNames#NONCE} and hash is stored for
* the authentication request
*
* @since 5.2
* @see <a target="_blank" href=
* "https://openid.net/specs/openid-connect-core-1_0.html#AuthRequest">3.1.2.1.
* Authentication Request</a>
*/
private static void applyNonce(OAuth2AuthorizationRequest.Builder builder) {
try {
String nonce = DEFAULT_SECURE_KEY_GENERATOR.generateKey();
String nonceHash = createHash(nonce);
builder.attributes((attrs) -> attrs.put(OidcParameterNames.NONCE, nonce));
builder.additionalParameters((params) -> params.put(OidcParameterNames.NONCE, nonceHash));
}
catch (NoSuchAlgorithmException ex) {
}
}
private static String createHash(String value) throws NoSuchAlgorithmException {
MessageDigest md = MessageDigest.getInstance("SHA-256");
byte[] digest = md.digest(value.getBytes(StandardCharsets.US_ASCII));
return Base64.getUrlEncoder().withoutPadding().encodeToString(digest);
}
}
| DefaultOAuth2AuthorizationRequestResolver |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/nullness/ReturnMissingNullableTest.java | {
"start": 52461,
"end": 53034
} | class ____ {
public String getMessage() {
checkState(false);
return null;
}
}
""")
.doTest();
}
@Test
public void negativeCases_unreachableVerifyFalse() {
createCompilationTestHelper()
.addSourceLines(
"com/google/errorprone/bugpatterns/nullness/LiteralNullReturnTest.java",
"""
package com.google.errorprone.bugpatterns.nullness;
import static com.google.common.base.Verify.verify;
| LiteralNullReturnTest |
java | elastic__elasticsearch | x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/gen/pipeline/AggPathInput.java | {
"start": 631,
"end": 2233
} | class ____ extends CommonNonExecutableInput<AggRef> {
// used in case the agg itself is not returned in a suitable format (like date aggs)
private final Processor action;
public AggPathInput(Expression expression, AggRef context) {
this(Source.EMPTY, expression, context, null);
}
/**
*
* Constructs a new <code>AggPathInput</code> instance.
* The action is used for handling corner-case results such as date histogram which returns
* a full date object for year which requires additional extraction.
*/
public AggPathInput(Source source, Expression expression, AggRef context, Processor action) {
super(source, expression, context);
this.action = action;
}
@Override
protected NodeInfo<AggPathInput> info() {
return NodeInfo.create(this, AggPathInput::new, expression(), context(), action);
}
public Processor action() {
return action;
}
@Override
public boolean resolved() {
return true;
}
@Override
public final boolean supportedByAggsOnlyQuery() {
return true;
}
@Override
public int hashCode() {
return Objects.hash(context(), action);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
AggPathInput other = (AggPathInput) obj;
return Objects.equals(context(), other.context()) && Objects.equals(action, other.action);
}
}
| AggPathInput |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/FileEndpointBuilderFactory.java | {
"start": 82168,
"end": 114247
} | interface ____
extends
EndpointConsumerBuilder {
default FileEndpointConsumerBuilder basic() {
return (FileEndpointConsumerBuilder) this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder bridgeErrorHandler(String bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Similar to the startingDirectoryMustExist option, but this applies
* during polling (after starting the consumer).
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param directoryMustExist the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder directoryMustExist(boolean directoryMustExist) {
doSetProperty("directoryMustExist", directoryMustExist);
return this;
}
/**
* Similar to the startingDirectoryMustExist option, but this applies
* during polling (after starting the consumer).
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param directoryMustExist the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder directoryMustExist(String directoryMustExist) {
doSetProperty("directoryMustExist", directoryMustExist);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder exceptionHandler(org.apache.camel.spi.ExceptionHandler exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder exceptionHandler(String exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option is a: <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder exchangePattern(org.apache.camel.ExchangePattern exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option will be converted to a
* <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder exchangePattern(String exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* To define which file attributes of interest. Like
* posix:permissions,posix:owner,basic:lastAccessTime, it supports basic
* wildcard like posix:, basic:lastAccessTime.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer (advanced)
*
* @param extendedAttributes the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder extendedAttributes(String extendedAttributes) {
doSetProperty("extendedAttributes", extendedAttributes);
return this;
}
/**
* Whether to accept hidden directories. Directories which names starts
* with dot are regarded as a hidden directory, and by default are not
* included. Set this option to true to include hidden directories in
* the file consumer.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param includeHiddenDirs the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder includeHiddenDirs(boolean includeHiddenDirs) {
doSetProperty("includeHiddenDirs", includeHiddenDirs);
return this;
}
/**
* Whether to accept hidden directories. Directories which names starts
* with dot are regarded as a hidden directory, and by default are not
* included. Set this option to true to include hidden directories in
* the file consumer.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param includeHiddenDirs the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder includeHiddenDirs(String includeHiddenDirs) {
doSetProperty("includeHiddenDirs", includeHiddenDirs);
return this;
}
/**
* Whether to accept hidden files. Files which names starts with dot is
* regarded as a hidden file, and by default not included. Set this
* option to true to include hidden files in the file consumer.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param includeHiddenFiles the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder includeHiddenFiles(boolean includeHiddenFiles) {
doSetProperty("includeHiddenFiles", includeHiddenFiles);
return this;
}
/**
* Whether to accept hidden files. Files which names starts with dot is
* regarded as a hidden file, and by default not included. Set this
* option to true to include hidden files in the file consumer.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param includeHiddenFiles the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder includeHiddenFiles(String includeHiddenFiles) {
doSetProperty("includeHiddenFiles", includeHiddenFiles);
return this;
}
/**
* A pluggable in-progress repository
* org.apache.camel.spi.IdempotentRepository. The in-progress repository
* is used to account the current in progress files being consumed. By
* default a memory based repository is used.
*
* The option is a:
* <code>org.apache.camel.spi.IdempotentRepository</code> type.
*
* Group: consumer (advanced)
*
* @param inProgressRepository the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder inProgressRepository(org.apache.camel.spi.IdempotentRepository inProgressRepository) {
doSetProperty("inProgressRepository", inProgressRepository);
return this;
}
/**
* A pluggable in-progress repository
* org.apache.camel.spi.IdempotentRepository. The in-progress repository
* is used to account the current in progress files being consumed. By
* default a memory based repository is used.
*
* The option will be converted to a
* <code>org.apache.camel.spi.IdempotentRepository</code> type.
*
* Group: consumer (advanced)
*
* @param inProgressRepository the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder inProgressRepository(String inProgressRepository) {
doSetProperty("inProgressRepository", inProgressRepository);
return this;
}
/**
* When consuming, a local work directory can be used to store the
* remote file content directly in local files, to avoid loading the
* content into memory. This is beneficial, if you consume a very big
* remote file and thus can conserve memory.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer (advanced)
*
* @param localWorkDirectory the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder localWorkDirectory(String localWorkDirectory) {
doSetProperty("localWorkDirectory", localWorkDirectory);
return this;
}
/**
* To use a custom org.apache.camel.spi.ExceptionHandler to handle any
* thrown exceptions that happens during the file on completion process
* where the consumer does either a commit or rollback. The default
* implementation will log any exception at WARN level and ignore.
*
* The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
*
* Group: consumer (advanced)
*
* @param onCompletionExceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder onCompletionExceptionHandler(org.apache.camel.spi.ExceptionHandler onCompletionExceptionHandler) {
doSetProperty("onCompletionExceptionHandler", onCompletionExceptionHandler);
return this;
}
/**
* To use a custom org.apache.camel.spi.ExceptionHandler to handle any
* thrown exceptions that happens during the file on completion process
* where the consumer does either a commit or rollback. The default
* implementation will log any exception at WARN level and ignore.
*
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
*
* Group: consumer (advanced)
*
* @param onCompletionExceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder onCompletionExceptionHandler(String onCompletionExceptionHandler) {
doSetProperty("onCompletionExceptionHandler", onCompletionExceptionHandler);
return this;
}
/**
* A pluggable org.apache.camel.PollingConsumerPollingStrategy allowing
* you to provide your custom implementation to control error handling
* usually occurred during the poll operation before an Exchange have
* been created and being routed in Camel.
*
* The option is a:
* <code>org.apache.camel.spi.PollingConsumerPollStrategy</code> type.
*
* Group: consumer (advanced)
*
* @param pollStrategy the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder pollStrategy(org.apache.camel.spi.PollingConsumerPollStrategy pollStrategy) {
doSetProperty("pollStrategy", pollStrategy);
return this;
}
/**
* A pluggable org.apache.camel.PollingConsumerPollingStrategy allowing
* you to provide your custom implementation to control error handling
* usually occurred during the poll operation before an Exchange have
* been created and being routed in Camel.
*
* The option will be converted to a
* <code>org.apache.camel.spi.PollingConsumerPollStrategy</code> type.
*
* Group: consumer (advanced)
*
* @param pollStrategy the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder pollStrategy(String pollStrategy) {
doSetProperty("pollStrategy", pollStrategy);
return this;
}
/**
* Whether to enable probing of the content type. If enable then the
* consumer uses Files#probeContentType(java.nio.file.Path) to determine
* the content-type of the file, and store that as a header with key
* Exchange#FILE_CONTENT_TYPE on the Message.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param probeContentType the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder probeContentType(boolean probeContentType) {
doSetProperty("probeContentType", probeContentType);
return this;
}
/**
* Whether to enable probing of the content type. If enable then the
* consumer uses Files#probeContentType(java.nio.file.Path) to determine
* the content-type of the file, and store that as a header with key
* Exchange#FILE_CONTENT_TYPE on the Message.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param probeContentType the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder probeContentType(String probeContentType) {
doSetProperty("probeContentType", probeContentType);
return this;
}
/**
* A pluggable
* org.apache.camel.component.file.GenericFileProcessStrategy allowing
* you to implement your own readLock option or similar. Can also be
* used when special conditions must be met before a file can be
* consumed, such as a special ready file exists. If this option is set
* then the readLock option does not apply.
*
* The option is a:
* <code>org.apache.camel.component.file.GenericFileProcessStrategy<java.io.File></code> type.
*
* Group: consumer (advanced)
*
* @param processStrategy the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder processStrategy(org.apache.camel.component.file.GenericFileProcessStrategy<java.io.File> processStrategy) {
doSetProperty("processStrategy", processStrategy);
return this;
}
/**
* A pluggable
* org.apache.camel.component.file.GenericFileProcessStrategy allowing
* you to implement your own readLock option or similar. Can also be
* used when special conditions must be met before a file can be
* consumed, such as a special ready file exists. If this option is set
* then the readLock option does not apply.
*
* The option will be converted to a
* <code>org.apache.camel.component.file.GenericFileProcessStrategy<java.io.File></code> type.
*
* Group: consumer (advanced)
*
* @param processStrategy the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder processStrategy(String processStrategy) {
doSetProperty("processStrategy", processStrategy);
return this;
}
/**
* Whether the starting directory must exist. Mind that the autoCreate
* option is default enabled, which means the starting directory is
* normally auto created if it doesn't exist. You can disable autoCreate
* and enable this to ensure the starting directory must exist. Will
* throw an exception if the directory doesn't exist.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param startingDirectoryMustExist the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder startingDirectoryMustExist(boolean startingDirectoryMustExist) {
doSetProperty("startingDirectoryMustExist", startingDirectoryMustExist);
return this;
}
/**
* Whether the starting directory must exist. Mind that the autoCreate
* option is default enabled, which means the starting directory is
* normally auto created if it doesn't exist. You can disable autoCreate
* and enable this to ensure the starting directory must exist. Will
* throw an exception if the directory doesn't exist.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param startingDirectoryMustExist the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder startingDirectoryMustExist(String startingDirectoryMustExist) {
doSetProperty("startingDirectoryMustExist", startingDirectoryMustExist);
return this;
}
/**
* Whether the starting directory has access permissions. Mind that the
* startingDirectoryMustExist parameter must be set to true to verify
* that the directory exists. Will throw an exception if the directory
* doesn't have read and write permissions.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param startingDirectoryMustHaveAccess the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder startingDirectoryMustHaveAccess(boolean startingDirectoryMustHaveAccess) {
doSetProperty("startingDirectoryMustHaveAccess", startingDirectoryMustHaveAccess);
return this;
}
/**
* Whether the starting directory has access permissions. Mind that the
* startingDirectoryMustExist parameter must be set to true to verify
* that the directory exists. Will throw an exception if the directory
* doesn't have read and write permissions.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param startingDirectoryMustHaveAccess the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder startingDirectoryMustHaveAccess(String startingDirectoryMustHaveAccess) {
doSetProperty("startingDirectoryMustHaveAccess", startingDirectoryMustHaveAccess);
return this;
}
/**
* Automatically create missing directories in the file's pathname. For
* the file consumer, that means creating the starting directory. For
* the file producer, it means the directory the files should be written
* to.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autoCreate the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder autoCreate(boolean autoCreate) {
doSetProperty("autoCreate", autoCreate);
return this;
}
/**
* Automatically create missing directories in the file's pathname. For
* the file consumer, that means creating the starting directory. For
* the file producer, it means the directory the files should be written
* to.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autoCreate the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder autoCreate(String autoCreate) {
doSetProperty("autoCreate", autoCreate);
return this;
}
/**
* When auto-creating directories should each subdirectory be created
* one at a time. This may be needed due to security issues on some
* file-shares.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param autoCreateStepwise the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder autoCreateStepwise(boolean autoCreateStepwise) {
doSetProperty("autoCreateStepwise", autoCreateStepwise);
return this;
}
/**
* When auto-creating directories should each subdirectory be created
* one at a time. This may be needed due to security issues on some
* file-shares.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param autoCreateStepwise the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder autoCreateStepwise(String autoCreateStepwise) {
doSetProperty("autoCreateStepwise", autoCreateStepwise);
return this;
}
/**
* Maximum number of messages to keep in memory available for browsing.
* Use 0 for unlimited.
*
* The option is a: <code>int</code> type.
*
* Default: 100
* Group: advanced
*
* @param browseLimit the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder browseLimit(int browseLimit) {
doSetProperty("browseLimit", browseLimit);
return this;
}
/**
* Maximum number of messages to keep in memory available for browsing.
* Use 0 for unlimited.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 100
* Group: advanced
*
* @param browseLimit the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder browseLimit(String browseLimit) {
doSetProperty("browseLimit", browseLimit);
return this;
}
/**
* Buffer size in bytes used for writing files (or in case of FTP for
* downloading and uploading files).
*
* The option is a: <code>int</code> type.
*
* Default: 131072
* Group: advanced
*
* @param bufferSize the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder bufferSize(int bufferSize) {
doSetProperty("bufferSize", bufferSize);
return this;
}
/**
* Buffer size in bytes used for writing files (or in case of FTP for
* downloading and uploading files).
*
* The option will be converted to a <code>int</code> type.
*
* Default: 131072
* Group: advanced
*
* @param bufferSize the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder bufferSize(String bufferSize) {
doSetProperty("bufferSize", bufferSize);
return this;
}
/**
* Whether to fall back and do a copy and delete file, in case the file
* could not be renamed directly. This option is not available for the
* FTP component.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param copyAndDeleteOnRenameFail the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder copyAndDeleteOnRenameFail(boolean copyAndDeleteOnRenameFail) {
doSetProperty("copyAndDeleteOnRenameFail", copyAndDeleteOnRenameFail);
return this;
}
/**
* Whether to fall back and do a copy and delete file, in case the file
* could not be renamed directly. This option is not available for the
* FTP component.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param copyAndDeleteOnRenameFail the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder copyAndDeleteOnRenameFail(String copyAndDeleteOnRenameFail) {
doSetProperty("copyAndDeleteOnRenameFail", copyAndDeleteOnRenameFail);
return this;
}
/**
* Perform rename operations using a copy and delete strategy. This is
* primarily used in environments where the regular rename operation is
* unreliable (e.g., across different file systems or networks). This
* option takes precedence over the copyAndDeleteOnRenameFail parameter
* that will automatically fall back to the copy and delete strategy,
* but only after additional delays.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param renameUsingCopy the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder renameUsingCopy(boolean renameUsingCopy) {
doSetProperty("renameUsingCopy", renameUsingCopy);
return this;
}
/**
* Perform rename operations using a copy and delete strategy. This is
* primarily used in environments where the regular rename operation is
* unreliable (e.g., across different file systems or networks). This
* option takes precedence over the copyAndDeleteOnRenameFail parameter
* that will automatically fall back to the copy and delete strategy,
* but only after additional delays.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param renameUsingCopy the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder renameUsingCopy(String renameUsingCopy) {
doSetProperty("renameUsingCopy", renameUsingCopy);
return this;
}
/**
* Sets whether synchronous processing should be strictly used.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param synchronous the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder synchronous(boolean synchronous) {
doSetProperty("synchronous", synchronous);
return this;
}
/**
* Sets whether synchronous processing should be strictly used.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param synchronous the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointConsumerBuilder synchronous(String synchronous) {
doSetProperty("synchronous", synchronous);
return this;
}
}
/**
* Builder for endpoint producers for the File component.
*/
public | AdvancedFileEndpointConsumerBuilder |
java | apache__flink | flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/InProgressFileWriter.java | {
"start": 1132,
"end": 2337
} | interface ____<IN, BucketID>
extends PartFileInfo<BucketID>, RecordWiseCompactingFileWriter<IN> {
/**
* Write an element to the part file.
*
* @param element the element to be written.
* @param currentTime the writing time.
* @throws IOException Thrown if writing the element fails.
*/
void write(final IN element, final long currentTime) throws IOException;
/**
* @return The state of the current part file.
* @throws IOException Thrown if persisting the part file fails.
*/
InProgressFileRecoverable persist() throws IOException;
/**
* @return The state of the pending part file. {@link Bucket} uses this to commit the pending
* file.
* @throws IOException Thrown if an I/O error occurs.
*/
PendingFileRecoverable closeForCommit() throws IOException;
/** Dispose the part file. */
void dispose();
@Override
default void write(IN element) throws IOException {
write(element, System.currentTimeMillis());
}
// ------------------------------------------------------------------------
/** A handle can be used to recover in-progress file.. */
| InProgressFileWriter |
java | lettuce-io__lettuce-core | src/test/java/io/lettuce/core/cluster/ClusterReadOnlyCommandsUnitTests.java | {
"start": 411,
"end": 761
} | class ____ {
@Test
void testCount() {
assertThat(ClusterReadOnlyCommands.getReadOnlyCommands()).hasSize(101);
}
@Test
void testResolvableCommandNames() {
for (ProtocolKeyword readOnlyCommand : ClusterReadOnlyCommands.getReadOnlyCommands()) {
// Convert command string to | ClusterReadOnlyCommandsUnitTests |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java | {
"start": 16477,
"end": 24542
} | class ____ implements Comparator<ShardRouting> {
private final Map<String, Double> nodeRanks;
NodeRankComparator(Map<String, Double> nodeRanks) {
this.nodeRanks = nodeRanks;
}
@Override
public int compare(ShardRouting s1, ShardRouting s2) {
if (s1.currentNodeId().equals(s2.currentNodeId())) {
// these shards on the same node
return 0;
}
Double shard1rank = nodeRanks.get(s1.currentNodeId());
Double shard2rank = nodeRanks.get(s2.currentNodeId());
if (shard1rank != null) {
if (shard2rank != null) {
return shard1rank.compareTo(shard2rank);
} else {
// place non-nulls after null values
return 1;
}
} else {
if (shard2rank != null) {
// place nulls before non-null values
return -1;
} else {
// Both nodes do not have stats, they are equal
return 0;
}
}
}
}
/**
* Returns an iterator only on the primary shard.
*/
public ShardIterator primaryShardIt() {
if (primary != null) {
return new ShardIterator(shardId, Collections.singletonList(primary));
}
return new ShardIterator(shardId, Collections.emptyList());
}
public ShardIterator onlyNodeActiveInitializingShardsIt(String nodeId) {
ArrayList<ShardRouting> ordered = new ArrayList<>(activeShards.size() + allInitializingShards.size());
int seed = shuffler.nextSeed();
for (ShardRouting shardRouting : shuffler.shuffle(activeShards, seed)) {
if (nodeId.equals(shardRouting.currentNodeId())) {
ordered.add(shardRouting);
}
}
for (ShardRouting shardRouting : shuffler.shuffle(allInitializingShards, seed)) {
if (nodeId.equals(shardRouting.currentNodeId())) {
ordered.add(shardRouting);
}
}
return new ShardIterator(shardId, ordered);
}
public ShardIterator onlyNodeSelectorActiveInitializingShardsIt(String nodeAttributes, DiscoveryNodes discoveryNodes) {
return onlyNodeSelectorActiveInitializingShardsIt(new String[] { nodeAttributes }, discoveryNodes);
}
/**
* Returns shards based on nodeAttributes given such as node name , node attribute, node IP
* Supports node specifications in cluster API
*/
public ShardIterator onlyNodeSelectorActiveInitializingShardsIt(String[] nodeAttributes, DiscoveryNodes discoveryNodes) {
ArrayList<ShardRouting> ordered = new ArrayList<>(activeShards.size() + allInitializingShards.size());
Set<String> selectedNodes = Sets.newHashSet(discoveryNodes.resolveNodes(nodeAttributes));
int seed = shuffler.nextSeed();
for (ShardRouting shardRouting : shuffler.shuffle(activeShards, seed)) {
if (selectedNodes.contains(shardRouting.currentNodeId())) {
ordered.add(shardRouting);
}
}
for (ShardRouting shardRouting : shuffler.shuffle(allInitializingShards, seed)) {
if (selectedNodes.contains(shardRouting.currentNodeId())) {
ordered.add(shardRouting);
}
}
if (ordered.isEmpty()) {
final String message = String.format(
Locale.ROOT,
"no data nodes with %s [%s] found for shard: %s",
nodeAttributes.length == 1 ? "criteria" : "criterion",
String.join(",", nodeAttributes),
shardId()
);
throw new IllegalArgumentException(message);
}
return new ShardIterator(shardId, ordered);
}
public ShardIterator preferNodeActiveInitializingShardsIt(Set<String> nodeIds) {
ArrayList<ShardRouting> preferred = new ArrayList<>(activeShards.size() + allInitializingShards.size());
ArrayList<ShardRouting> notPreferred = new ArrayList<>(activeShards.size() + allInitializingShards.size());
// fill it in a randomized fashion
for (ShardRouting shardRouting : shuffler.shuffle(activeShards)) {
if (nodeIds.contains(shardRouting.currentNodeId())) {
preferred.add(shardRouting);
} else {
notPreferred.add(shardRouting);
}
}
preferred.addAll(notPreferred);
if (allInitializingShards.isEmpty() == false) {
preferred.addAll(allInitializingShards);
}
return new ShardIterator(shardId, preferred);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
IndexShardRoutingTable that = (IndexShardRoutingTable) o;
if (shardId.equals(that.shardId) == false) return false;
return Arrays.equals(shards, that.shards);
}
@Override
public int hashCode() {
int result = shardId.hashCode();
result = 31 * result + Arrays.hashCode(shards);
return result;
}
/**
* Returns <code>true</code> iff all shards in the routing table are started otherwise <code>false</code>
*/
public boolean allShardsStarted() {
return allShardsStarted;
}
/**
* @return the count of active searchable shards
*/
public int getActiveSearchShardCount() {
return activeSearchShardCount;
}
/**
* @return the total count of searchable shards
*/
public int getTotalSearchShardCount() {
return totalSearchShardCount;
}
public boolean hasSearchShards() {
return totalSearchShardCount > 0;
}
@Nullable
public ShardRouting getByAllocationId(String allocationId) {
for (ShardRouting shardRouting : assignedShards()) {
if (shardRouting.allocationId().getId().equals(allocationId)) {
return shardRouting;
}
}
return null;
}
public Set<String> getPromotableAllocationIds() {
assert MasterService.assertNotMasterUpdateThread("not using this on the master thread so we don't have to pre-compute this");
Set<String> allAllocationIds = new HashSet<>();
for (ShardRouting shard : shards) {
if (shard.isPromotableToPrimary()) {
if (shard.relocating()) {
allAllocationIds.add(shard.getTargetRelocatingShard().allocationId().getId());
}
if (shard.assignedToNode()) {
allAllocationIds.add(shard.allocationId().getId());
}
}
}
return allAllocationIds;
}
record AttributesKey(List<String> attributes) {}
public ShardRouting primaryShard() {
return primary;
}
public List<ShardRouting> replicaShards() {
return this.replicas;
}
public List<ShardRouting> replicaShardsWithState(ShardRoutingState... states) {
List<ShardRouting> shards = new ArrayList<>();
for (ShardRouting shardEntry : replicas) {
for (ShardRoutingState state : states) {
if (shardEntry.state() == state) {
shards.add(shardEntry);
}
}
}
return shards;
}
public List<ShardRouting> shardsWithState(ShardRoutingState state) {
if (state == ShardRoutingState.INITIALIZING) {
return allInitializingShards;
}
List<ShardRouting> shards = new ArrayList<>();
for (ShardRouting shardEntry : this.shards) {
if (shardEntry.state() == state) {
shards.add(shardEntry);
}
}
return shards;
}
public static Builder builder(ShardId shardId) {
return new Builder(shardId);
}
public static | NodeRankComparator |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserBaseVisitor.java | {
"start": 409,
"end": 754
} | class ____ an empty implementation of {@link EsqlBaseParserVisitor},
* which can be extended to create a visitor which only needs to handle a subset
* of the available methods.
*
* @param <T> The return type of the visit operation. Use {@link Void} for
* operations with no return type.
*/
@SuppressWarnings("CheckReturnValue")
public | provides |
java | quarkusio__quarkus | extensions/arc/deployment/src/test/java/io/quarkus/arc/test/profile/UnlessBuildProfileStereotypeTest.java | {
"start": 5600,
"end": 6709
} | class ____ {
static final String SIMPLE = "Producers.simple";
static final String TEST_NEVER_DIRECT = "Producers.testNeverDirect";
static final String TEST_NEVER_TRANSITIVE = "Producers.testNeverTransitive";
@Produces
@ApplicationScoped
MyService simple() {
return new MyService() {
@Override
public String hello() {
return SIMPLE;
}
};
}
@Produces
@ApplicationScoped
@TestNever
MyService testNeverDirect() {
return new MyService() {
@Override
public String hello() {
return TEST_NEVER_DIRECT;
}
};
}
@Produces
@ApplicationScoped
@TransitiveTestNever
MyService testNeverTransitive() {
return new MyService() {
@Override
public String hello() {
return TEST_NEVER_TRANSITIVE;
}
};
}
}
}
| Producers |
java | apache__camel | components/camel-servlet/src/test/java/org/apache/camel/component/servlet/MultipartUploadTest.java | {
"start": 1352,
"end": 3346
} | class ____ extends ServletCamelRouterTestSupport {
@Override
protected DeploymentInfo getDeploymentInfo() {
DeploymentInfo deploymentInfo = super.getDeploymentInfo();
String tmpDir = System.getProperty("java.io.tmpdir");
MultipartConfigElement defaultMultipartConfig = new MultipartConfigElement(tmpDir);
deploymentInfo.setDefaultMultipartConfig(defaultMultipartConfig);
return deploymentInfo;
}
@Test
void testMultipartUpload() throws IOException {
String content = "Hello World";
InputStream inputStream = context.getTypeConverter().convertTo(InputStream.class, content);
PostMethodWebRequest request
= new PostMethodWebRequest(
contextUrl + "/services/multipartUpload", inputStream, "multipart/form-data; boundary=----Boundary");
WebResponse response = query(request);
assertEquals(content, response.getText());
}
@Override
protected RoutesBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
from("servlet:multipartUpload?attachmentMultipartBinding=true")
.process(new Processor() {
@Override
public void process(Exchange exchange) throws Exception {
AttachmentMessage message = exchange.getMessage(AttachmentMessage.class);
DataHandler file = message.getAttachment("file");
if (file != null) {
exchange.getMessage().setBody(file.getContent());
} else {
exchange.getMessage().setBody(null);
}
}
});
}
};
}
}
| MultipartUploadTest |
java | apache__kafka | server/src/test/java/org/apache/kafka/server/MonitorablePluginsIntegrationTest.java | {
"start": 6176,
"end": 6605
} | class ____ extends RackAwareReplicaSelector implements Monitorable {
private static final int METRICS_COUNT = 1;
@Override
public void withPluginMetrics(PluginMetrics metrics) {
MetricName name = metrics.metricName("name", "description", new LinkedHashMap<>());
metrics.addMetric(name, (Measurable) (config, now) -> 123);
}
}
public static | MonitorableReplicaSelector |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TotalOrderPartitioner.java | {
"start": 1259,
"end": 2389
} | class ____<K ,V>
extends org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner<K, V>
implements Partitioner<K,V> {
public TotalOrderPartitioner() { }
public void configure(JobConf job) {
super.setConf(job);
}
/**
* Set the path to the SequenceFile storing the sorted partition keyset.
* It must be the case that for <code>R</code> reduces, there are <code>R-1</code>
* keys in the SequenceFile.
* @deprecated Use
* {@link #setPartitionFile(Configuration, Path)}
* instead
*/
@Deprecated
public static void setPartitionFile(JobConf job, Path p) {
org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner.
setPartitionFile(job, p);
}
/**
* Get the path to the SequenceFile storing the sorted partition keyset.
* @see #setPartitionFile(JobConf,Path)
* @deprecated Use
* {@link #getPartitionFile(Configuration)}
* instead
*/
@Deprecated
public static String getPartitionFile(JobConf job) {
return org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner.
getPartitionFile(job);
}
}
| TotalOrderPartitioner |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/mixed/ObservableSwitchMapCompletable.java | {
"start": 1453,
"end": 2201
} | class ____<T> extends Completable {
final Observable<T> source;
final Function<? super T, ? extends CompletableSource> mapper;
final boolean delayErrors;
public ObservableSwitchMapCompletable(Observable<T> source,
Function<? super T, ? extends CompletableSource> mapper, boolean delayErrors) {
this.source = source;
this.mapper = mapper;
this.delayErrors = delayErrors;
}
@Override
protected void subscribeActual(CompletableObserver observer) {
if (!ScalarXMapZHelper.tryAsCompletable(source, mapper, observer)) {
source.subscribe(new SwitchMapCompletableObserver<>(observer, mapper, delayErrors));
}
}
static final | ObservableSwitchMapCompletable |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsScaleTest.java | {
"start": 1249,
"end": 2046
} | class ____ extends AbstractAbfsIntegrationTest {
protected static final Logger LOG =
LoggerFactory.getLogger(AbstractAbfsScaleTest.class);
public AbstractAbfsScaleTest() throws Exception {
super();
}
@Override
protected int getTestTimeoutMillis() {
return AzureTestConstants.SCALE_TEST_TIMEOUT_MILLIS;
}
@BeforeEach
@Override
public void setup() throws Exception {
super.setup();
LOG.debug("Scale test operation count = {}", getOperationCount());
Configuration rawConfiguration = getRawConfiguration();
assumeScaleTestsEnabled(rawConfiguration);
}
protected long getOperationCount() {
return getConfiguration().getLong(AzureTestConstants.KEY_OPERATION_COUNT,
AzureTestConstants.DEFAULT_OPERATION_COUNT);
}
}
| AbstractAbfsScaleTest |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/InputSplitWithLocationInfo.java | {
"start": 1033,
"end": 1505
} | interface ____ extends InputSplit {
/**
* Gets info about which nodes the input split is stored on and how it is
* stored at each location.
*
* @return list of <code>SplitLocationInfo</code>s describing how the split
* data is stored at each location. A null value indicates that all the
* locations have the data stored on disk.
* @throws IOException
*/
SplitLocationInfo[] getLocationInfo() throws IOException;
}
| InputSplitWithLocationInfo |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.