language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/xml/XmlAttributeOverrideTest.java
|
{
"start": 457,
"end": 2412
}
|
class ____ {
@Test
public void testAttributeOverriding(EntityManagerFactoryScope scope) {
scope.inEntityManager(
entityManager -> {
try {
entityManager.getTransaction().begin();
Employee e = new Employee();
e.setId( 100L );
e.setName( "Bubba" );
e.setHomeAddress( new Address( "123 Main St", "New York", "NY", "11111" ) );
e.setMailAddress( new Address( "P.O. Box 123", "New York", "NY", "11111" ) );
entityManager.persist( e );
entityManager.flush();
entityManager.getTransaction().rollback();
}
catch (Exception e) {
if ( entityManager.getTransaction().isActive() ) {
entityManager.getTransaction().rollback();
}
throw e;
}
}
);
}
@Test
public void testDefaultEventListener(EntityManagerFactoryScope scope) {
scope.inEntityManager(
entityManager -> {
try {
entityManager.getTransaction().begin();
CounterListener.reset();
Employee e = new Employee();
e.setId( 100L );
e.setName( "Bubba" );
e.setHomeAddress( new Address( "123 Main St", "New York", "NY", "11111" ) );
e.setMailAddress( new Address( "P.O. Box 123", "New York", "NY", "11111" ) );
entityManager.persist( e );
entityManager.flush();
entityManager.clear();
entityManager.find( Employee.class, e.getId() ).setName( "Bibo" );
entityManager.flush();
entityManager.clear();
entityManager.remove( entityManager.find( Employee.class, e.getId() ) );
entityManager.flush();
entityManager.getTransaction().rollback();
}
catch (Exception e) {
if ( entityManager.getTransaction().isActive() ) {
entityManager.getTransaction().rollback();
}
throw e;
}
}
);
assertEquals( 1, CounterListener.insert );
assertEquals( 1, CounterListener.update );
assertEquals( 1, CounterListener.delete );
}
}
|
XmlAttributeOverrideTest
|
java
|
quarkusio__quarkus
|
independent-projects/tools/analytics-common/src/main/java/io/quarkus/analytics/rest/SegmentClient.java
|
{
"start": 301,
"end": 804
}
|
interface ____ {
/**
* Posts the anonymous identity to the upstream collection tool.
* Usually this is done once per user's UUID
*
* @param identity
*/
CompletableFuture<HttpResponse<String>> postIdentity(final Identity identity);
/**
* Posts the trace to the upstream collection tool.
* This contains the actual data to be collected.
*
* @param track
*/
CompletableFuture<HttpResponse<String>> postTrack(final Track track);
}
|
SegmentClient
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/time/JavaTimeDefaultTimeZoneTest.java
|
{
"start": 10866,
"end": 11463
}
|
class ____ {
// BUG: Diagnostic matches: REPLACEME
JapaneseDate now = JapaneseChronology.INSTANCE.dateNow();
JapaneseDate nowWithZone = JapaneseChronology.INSTANCE.dateNow(systemDefault());
}
""")
.doTest();
}
@Test
public void minguoChronology() {
helper
.addSourceLines(
"TestClass.java",
"""
import static java.time.ZoneId.systemDefault;
import java.time.chrono.MinguoChronology;
import java.time.chrono.MinguoDate;
public
|
TestClass
|
java
|
spring-projects__spring-framework
|
spring-aop/src/main/java/org/springframework/aop/framework/autoproxy/BeanNameAutoProxyCreator.java
|
{
"start": 1596,
"end": 3713
}
|
class ____ extends AbstractAutoProxyCreator {
private static final String[] NO_ALIASES = new String[0];
private @Nullable List<String> beanNames;
/**
* Set the names of the beans that should automatically get wrapped with proxies.
* A name can specify a prefix to match by ending with "*", for example, "myBean,tx*"
* will match the bean named "myBean" and all beans whose name start with "tx".
* <p><b>NOTE:</b> In case of a FactoryBean, only the objects created by the
* FactoryBean will get proxied. If you intend to proxy a FactoryBean instance
* itself (a rare use case), specify the bean name of the FactoryBean
* including the factory-bean prefix "&": for example, "&myFactoryBean".
* @see org.springframework.beans.factory.FactoryBean
* @see org.springframework.beans.factory.BeanFactory#FACTORY_BEAN_PREFIX
*/
public void setBeanNames(String... beanNames) {
Assert.notEmpty(beanNames, "'beanNames' must not be empty");
this.beanNames = new ArrayList<>(beanNames.length);
for (String mappedName : beanNames) {
this.beanNames.add(mappedName.strip());
}
}
/**
* Delegate to {@link AbstractAutoProxyCreator#getCustomTargetSource(Class, String)}
* if the bean name matches one of the names in the configured list of supported
* names, returning {@code null} otherwise.
* @since 5.3
* @see #setBeanNames(String...)
*/
@Override
protected @Nullable TargetSource getCustomTargetSource(Class<?> beanClass, String beanName) {
return (isSupportedBeanName(beanClass, beanName) ?
super.getCustomTargetSource(beanClass, beanName) : null);
}
/**
* Identify as a bean to proxy if the bean name matches one of the names in
* the configured list of supported names.
* @see #setBeanNames(String...)
*/
@Override
protected Object @Nullable [] getAdvicesAndAdvisorsForBean(
Class<?> beanClass, String beanName, @Nullable TargetSource targetSource) {
return (isSupportedBeanName(beanClass, beanName) ?
PROXY_WITHOUT_ADDITIONAL_INTERCEPTORS : DO_NOT_PROXY);
}
/**
* Determine if the bean name for the given bean
|
BeanNameAutoProxyCreator
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/boot/model/source/internal/hbm/EntityHierarchyBuilder.java
|
{
"start": 1086,
"end": 1483
}
|
class ____ to:<ol>
* <li>
* validate that all hierarchies are complete (make sure a mapping does not reference
* an unknown entity as its super)
* </li>
* <li>
* ultimately order the processing of every entity to make sure we process each
* hierarchy "downward" (from super to sub(s)).
* </li>
* </ol>
*
* @author Steve Ebersole
*/
public
|
is
|
java
|
apache__flink
|
flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceTest.java
|
{
"start": 4040,
"end": 5385
}
|
class ____ extends MockSplitEnumerator {
public ExtendedMockSplitEnumerator(
List<MockSourceSplit> splits, SplitEnumeratorContext<MockSourceSplit> context) {
super(splits, context);
}
}
@Test
void testBuilderWithEnumeratorSuperclass() {
HybridSource.SourceFactory<Integer, Source<Integer, ?, ?>, MockSplitEnumerator>
sourceFactory =
(HybridSource.SourceFactory<
Integer, Source<Integer, ?, ?>, MockSplitEnumerator>)
context -> {
MockSplitEnumerator enumerator =
context.getPreviousEnumerator();
return new MockBaseSource(1, 1, Boundedness.BOUNDED);
};
HybridSource<Integer> source =
new HybridSource.HybridSourceBuilder<Integer, MockSplitEnumerator>()
.<ExtendedMockSplitEnumerator, Source<Integer, ?, ?>>addSource(
new MockBaseSource(1, 1, Boundedness.BOUNDED))
.addSource(sourceFactory, Boundedness.BOUNDED)
.build();
assertThat(source).isNotNull();
}
}
|
ExtendedMockSplitEnumerator
|
java
|
apache__logging-log4j2
|
log4j-1.2-api/src/test/java/org/apache/log4j/config/XmlRollingWithPropertiesTest.java
|
{
"start": 1171,
"end": 2011
}
|
class ____ {
private static final String TEST_DIR = "target/" + XmlRollingWithPropertiesTest.class.getSimpleName();
@BeforeAll
static void setupSystemProperties() {
System.setProperty("test.directory", TEST_DIR);
System.setProperty("log4j.configuration", "target/test-classes/log4j1-rolling-properties.xml");
}
@Test
void testProperties() throws Exception {
// ${test.directory}/logs/etl.log
final Path path = Paths.get(TEST_DIR, "logs/etl.log");
Files.deleteIfExists(path);
final Logger logger = LogManager.getLogger("test");
logger.debug("This is a test of the root logger");
assertTrue(Files.exists(path), "Log file was not created " + path);
assertTrue(Files.size(path) > 0, "Log file is empty " + path);
}
}
|
XmlRollingWithPropertiesTest
|
java
|
square__retrofit
|
samples/src/main/java/com/example/retrofit/RxJavaObserveOnMainThread.java
|
{
"start": 1688,
"end": 2981
}
|
class ____ extends CallAdapter.Factory {
final Scheduler scheduler;
ObserveOnMainCallAdapterFactory(Scheduler scheduler) {
this.scheduler = scheduler;
}
@Override
public @Nullable CallAdapter<?, ?> get(
Type returnType, Annotation[] annotations, Retrofit retrofit) {
if (getRawType(returnType) != Observable.class) {
return null; // Ignore non-Observable types.
}
// Look up the next call adapter which would otherwise be used if this one was not present.
//noinspection unchecked returnType checked above to be Observable.
final CallAdapter<Object, Observable<?>> delegate =
(CallAdapter<Object, Observable<?>>)
retrofit.nextCallAdapter(this, returnType, annotations);
return new CallAdapter<Object, Object>() {
@Override
public Object adapt(Call<Object> call) {
// Delegate to get the normal Observable...
Observable<?> o = delegate.adapt(call);
// ...and change it to send notifications to the observer on the specified scheduler.
return o.observeOn(scheduler);
}
@Override
public Type responseType() {
return delegate.responseType();
}
};
}
}
}
|
ObserveOnMainCallAdapterFactory
|
java
|
apache__camel
|
dsl/camel-yaml-dsl/camel-yaml-dsl-deserializers/src/generated/java/org/apache/camel/dsl/yaml/deserializers/ModelDeserializers.java
|
{
"start": 673331,
"end": 676055
}
|
class ____ extends YamlDeserializerBase<OnFallbackDefinition> {
public OnFallbackDefinitionDeserializer() {
super(OnFallbackDefinition.class);
}
@Override
protected OnFallbackDefinition newInstance() {
return new OnFallbackDefinition();
}
@Override
protected boolean setProperty(OnFallbackDefinition target, String propertyKey,
String propertyName, Node node) {
propertyKey = org.apache.camel.util.StringHelper.dashToCamelCase(propertyKey);
switch(propertyKey) {
case "fallbackViaNetwork": {
String val = asText(node);
target.setFallbackViaNetwork(val);
break;
}
case "id": {
String val = asText(node);
target.setId(val);
break;
}
case "description": {
String val = asText(node);
target.setDescription(val);
break;
}
case "note": {
String val = asText(node);
target.setNote(val);
break;
}
case "steps": {
setSteps(target, node);
break;
}
default: {
return false;
}
}
return true;
}
}
@YamlType(
nodes = "onWhen",
types = org.apache.camel.model.OnWhenDefinition.class,
order = org.apache.camel.dsl.yaml.common.YamlDeserializerResolver.ORDER_LOWEST - 1,
displayName = "On When",
description = "To use a predicate to determine when to trigger this.",
deprecated = false,
properties = {
@YamlProperty(name = "__extends", type = "object:org.apache.camel.model.language.ExpressionDefinition", oneOf = "expression"),
@YamlProperty(name = "description", type = "string", description = "Sets the description of this node", displayName = "Description"),
@YamlProperty(name = "expression", type = "object:org.apache.camel.model.language.ExpressionDefinition", displayName = "Expression", oneOf = "expression"),
@YamlProperty(name = "id", type = "string", description = "Sets the id of this node", displayName = "Id"),
@YamlProperty(name = "note", type = "string", description = "Sets the note of this node", displayName = "Note")
}
)
public static
|
OnFallbackDefinitionDeserializer
|
java
|
apache__camel
|
core/camel-support/src/main/java/org/apache/camel/support/component/ApiCollection.java
|
{
"start": 1750,
"end": 2952
}
|
class ____ work with {@link ApiMethod}
*/
public final ApiMethodHelper<? extends ApiMethod> getHelper(E apiName) {
return apiHelpers.get(apiName);
}
/**
* Returns a list of API name strings.
*
* @return list of API names.
*/
public final Set<String> getApiNames() {
return apiNames;
}
public final E getApiName(Class<? extends ApiMethod> apiMethod) {
return apiMethods.get(apiMethod);
}
/**
* Creates an endpoint configuration for a particular API
*
* @param apiName name of the API.
* @return Endpoint configuration object for the API.
*/
public abstract T getEndpointConfiguration(E apiName);
protected final void setApiHelpers(Map<E, ApiMethodHelper<? extends ApiMethod>> apiHelpers) {
this.apiHelpers = Collections.unmodifiableMap(apiHelpers);
this.apiNames = apiHelpers.keySet()
.stream()
.map(ApiName::getName).collect(Collectors.toUnmodifiableSet());
}
protected final void setApiMethods(Map<Class<? extends ApiMethod>, E> apiMethods) {
this.apiMethods = Collections.unmodifiableMap(apiMethods);
}
}
|
to
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/engine/spi/Resolution.java
|
{
"start": 255,
"end": 345
}
|
interface ____ {
Object getNaturalIdValue();
boolean isSame(Object otherValue);
}
|
Resolution
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java
|
{
"start": 2380,
"end": 30297
}
|
class ____ implements ClusterStateTaskExecutor<JoinTask> {
private static final Logger logger = LogManager.getLogger(NodeJoinExecutor.class);
/**
* The transport versions that are forbidden from joining a cluster with this node due to critical bugs with those versions
*/
private static final Set<TransportVersion> FORBIDDEN_VERSIONS = Set.of();
private final AllocationService allocationService;
private final RerouteService rerouteService;
private final FeatureService featureService;
private final Function<ClusterState, ClusterState> maybeReconfigureAfterMasterElection;
public NodeJoinExecutor(AllocationService allocationService, RerouteService rerouteService, FeatureService featureService) {
this(allocationService, rerouteService, featureService, Function.identity());
}
public NodeJoinExecutor(
AllocationService allocationService,
RerouteService rerouteService,
FeatureService featureService,
Function<ClusterState, ClusterState> maybeReconfigureAfterMasterElection
) {
this.allocationService = allocationService;
this.rerouteService = rerouteService;
this.featureService = featureService;
this.maybeReconfigureAfterMasterElection = maybeReconfigureAfterMasterElection;
}
@Override
public ClusterState execute(BatchExecutionContext<JoinTask> batchExecutionContext) throws Exception {
// The current state that MasterService uses might have been updated by a (different) master in a higher term already. If so, stop
// processing the current cluster state update, there's no point in continuing to compute it as it will later be rejected by
// Coordinator#publish anyhow.
assert batchExecutionContext.taskContexts().isEmpty() == false : "Expected to have non empty join tasks list";
var term = batchExecutionContext.taskContexts().stream().mapToLong(t -> t.getTask().term()).max().getAsLong();
var split = batchExecutionContext.taskContexts().stream().collect(Collectors.partitioningBy(t -> t.getTask().term() == term));
for (TaskContext<JoinTask> outdated : split.get(false)) {
outdated.onFailure(
new NotMasterException("Higher term encountered (encountered: " + term + " > used: " + outdated.getTask().term() + ")")
);
}
final var joinTaskContexts = split.get(true);
final var initialState = batchExecutionContext.initialState();
if (initialState.term() > term) {
logger.trace("encountered higher term {} than current {}, there is a newer master", initialState.term(), term);
throw new NotMasterException(
"Higher term encountered (current: " + initialState.term() + " > used: " + term + "), there is a newer master"
);
}
final boolean isBecomingMaster = joinTaskContexts.stream().anyMatch(t -> t.getTask().isBecomingMaster());
final DiscoveryNodes currentNodes = initialState.nodes();
boolean nodesChanged = false;
ClusterState.Builder newState;
if (currentNodes.getMasterNode() == null && isBecomingMaster) {
assert initialState.term() < term : "there should be at most one become master task per election (= by term)";
// use these joins to try and become the master.
// Note that we don't have to do any validation of the amount of joining nodes - the commit
// during the cluster state publishing guarantees that we have enough
try (var ignored = batchExecutionContext.dropHeadersContext()) {
// suppress deprecation warnings e.g. from reroute()
newState = becomeMasterAndTrimConflictingNodes(initialState, joinTaskContexts, term);
}
nodesChanged = true;
} else if (currentNodes.isLocalNodeElectedMaster()) {
assert initialState.term() == term : "term should be stable for the same master";
newState = ClusterState.builder(initialState);
} else {
logger.trace("processing node joins, but we are not the master. current master: {}", currentNodes.getMasterNode());
throw new NotMasterException(
Strings.format(
"Node [%s] not master for join request. Current known master [%s], current term [%d]",
currentNodes.getLocalNode(),
currentNodes.getMasterNode(),
term
)
);
}
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(newState.nodes());
Map<String, CompatibilityVersions> compatibilityVersionsMap = new HashMap<>(newState.compatibilityVersions());
Map<String, Set<String>> nodeFeatures = new HashMap<>(newState.nodeFeatures()); // as present in cluster state
Set<String> effectiveClusterFeatures = calculateEffectiveClusterFeatures(newState.nodes(), nodeFeatures);
assert nodesBuilder.isLocalNodeElectedMaster();
Version minClusterNodeVersion = newState.nodes().getMinNodeVersion();
Version maxClusterNodeVersion = newState.nodes().getMaxNodeVersion();
// if the cluster is not fully-formed then the min version is not meaningful
final boolean enforceVersionBarrier = initialState.getBlocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) == false;
// processing any joins
Map<String, String> joinedNodeIdsByNodeName = new HashMap<>();
for (final var joinTaskContext : joinTaskContexts) {
final var joinTask = joinTaskContext.getTask();
final List<Runnable> onTaskSuccess = new ArrayList<>(joinTask.nodeCount());
for (final JoinTask.NodeJoinTask nodeJoinTask : joinTask.nodeJoinTasks()) {
final DiscoveryNode node = nodeJoinTask.node();
if (currentNodes.nodeExistsWithSameRoles(node)) {
logger.debug("received a join request for an existing node [{}]", node);
// update the node's feature set if it has one
// this can happen if the master has just moved from a pre-features version to a post-features version
if (Objects.equals(nodeFeatures.get(node.getId()), nodeJoinTask.features()) == false) {
logger.debug("updating node [{}] features {}", node.getId(), nodeJoinTask.features());
nodeFeatures.put(node.getId(), nodeJoinTask.features());
nodesChanged = true;
}
} else {
try {
CompatibilityVersions compatibilityVersions = nodeJoinTask.compatibilityVersions();
Set<String> features = nodeJoinTask.features();
if (enforceVersionBarrier) {
ensureVersionBarrier(node.getVersion(), minClusterNodeVersion);
CompatibilityVersions.ensureVersionsCompatibility(compatibilityVersions, compatibilityVersionsMap.values());
}
blockForbiddenVersions(compatibilityVersions.transportVersion());
ensureNodesCompatibility(node.getVersion(), minClusterNodeVersion, maxClusterNodeVersion);
Set<String> newNodeEffectiveFeatures = enforceNodeFeatureBarrier(node, effectiveClusterFeatures, features);
// we do this validation quite late to prevent race conditions between nodes joining and importing dangling indices
// we have to reject nodes that don't support all indices we have in this cluster
ensureIndexCompatibility(
node.getMinIndexVersion(),
node.getMinReadOnlyIndexVersion(),
node.getMaxIndexVersion(),
initialState.getMetadata()
);
nodesBuilder.add(node);
compatibilityVersionsMap.put(node.getId(), compatibilityVersions);
// store the actual node features here, not including assumed features, as this is persisted in cluster state
nodeFeatures.put(node.getId(), features);
effectiveClusterFeatures.retainAll(newNodeEffectiveFeatures);
nodesChanged = true;
minClusterNodeVersion = Version.min(minClusterNodeVersion, node.getVersion());
maxClusterNodeVersion = Version.max(maxClusterNodeVersion, node.getVersion());
if (node.isMasterNode()) {
joinedNodeIdsByNodeName.put(node.getName(), node.getId());
}
} catch (IllegalArgumentException | IllegalStateException e) {
onTaskSuccess.add(() -> nodeJoinTask.listener().onFailure(e));
continue;
}
}
onTaskSuccess.add(() -> {
final var reason = nodeJoinTask.reason();
if (reason.guidanceDocs() == null) {
logger.info(
"node-join: [{}] with reason [{}]",
nodeJoinTask.node().descriptionWithoutAttributes(),
reason.message()
);
} else {
logger.warn(
"node-join: [{}] with reason [{}]; for troubleshooting guidance, see {}",
nodeJoinTask.node().descriptionWithoutAttributes(),
reason.message(),
reason.guidanceDocs()
);
}
nodeJoinTask.listener().onResponse(null);
});
}
joinTaskContext.success(() -> {
for (Runnable joinCompleter : onTaskSuccess) {
joinCompleter.run();
}
});
}
if (nodesChanged) {
rerouteService.reroute(
"post-join reroute",
Priority.HIGH,
ActionListener.wrap(r -> logger.trace("post-join reroute completed"), e -> logger.debug("post-join reroute failed", e))
);
if (joinedNodeIdsByNodeName.isEmpty() == false) {
final var currentVotingConfigExclusions = initialState.getVotingConfigExclusions();
final var newVotingConfigExclusions = currentVotingConfigExclusions.stream().map(e -> {
// Update nodeId in VotingConfigExclusion when a new node with excluded node name joins
if (CoordinationMetadata.VotingConfigExclusion.MISSING_VALUE_MARKER.equals(e.getNodeId())
&& joinedNodeIdsByNodeName.containsKey(e.getNodeName())) {
return new CoordinationMetadata.VotingConfigExclusion(
joinedNodeIdsByNodeName.get(e.getNodeName()),
e.getNodeName()
);
} else {
return e;
}
}).collect(Collectors.toSet());
// if VotingConfigExclusions did get updated
if (newVotingConfigExclusions.equals(currentVotingConfigExclusions) == false) {
final var coordMetadataBuilder = CoordinationMetadata.builder(initialState.coordinationMetadata())
.term(term)
.clearVotingConfigExclusions();
newVotingConfigExclusions.forEach(coordMetadataBuilder::addVotingConfigExclusion);
newState.metadata(Metadata.builder(initialState.metadata()).coordinationMetadata(coordMetadataBuilder.build()).build());
}
}
final ClusterState clusterStateWithNewNodesAndDesiredNodes = DesiredNodes.updateDesiredNodesStatusIfNeeded(
newState.nodes(nodesBuilder).nodeIdsToCompatibilityVersions(compatibilityVersionsMap).nodeFeatures(nodeFeatures).build()
);
final ClusterState updatedState = allocationService.adaptAutoExpandReplicas(clusterStateWithNewNodesAndDesiredNodes);
assert enforceVersionBarrier == false
|| updatedState.nodes().getMinNodeVersion().onOrAfter(initialState.nodes().getMinNodeVersion())
: "min node version decreased from ["
+ initialState.nodes().getMinNodeVersion()
+ "] to ["
+ updatedState.nodes().getMinNodeVersion()
+ "]";
return updatedState;
} else {
// we must return a new cluster state instance to force publishing. This is important
// for the joining node to finalize its join and set us as a master
return newState.build();
}
}
protected ClusterState.Builder becomeMasterAndTrimConflictingNodes(
ClusterState currentState,
List<? extends TaskContext<JoinTask>> taskContexts,
long term
) {
final ClusterState initialState = currentState;
currentState = taskContexts.stream()
.map(TaskContext::getTask)
.map(JoinTask::initialState)
.filter(Objects::nonNull)
.max(Comparator.comparingLong(ClusterState::term).thenComparingLong(ClusterState::version))
.filter(
clusterState -> clusterState.term() > initialState.term()
|| (clusterState.term() == initialState.term() && clusterState.version() > initialState.version())
)
.orElse(currentState);
assert currentState.nodes().getMasterNodeId() == null : currentState;
assert currentState.term() < term : term + " vs " + currentState;
ClusterState.Builder builder = ClusterState.builder(currentState);
DiscoveryNodes currentNodes = builder.nodes();
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentNodes);
Map<String, CompatibilityVersions> compatibilityVersions = new HashMap<>(builder.compatibilityVersions());
Map<String, Set<String>> nodeFeatures = new HashMap<>(builder.nodeFeatures());
nodesBuilder.masterNodeId(currentState.nodes().getLocalNodeId());
nodesBuilder.resetNodeLeftGeneration();
for (final var taskContext : taskContexts) {
for (final var joiningNode : taskContext.getTask().nodes()) {
final DiscoveryNode nodeWithSameId = nodesBuilder.get(joiningNode.getId());
if (nodeWithSameId != null && nodeWithSameId.equals(joiningNode) == false) {
logger.debug("removing existing node [{}], which conflicts with incoming join from [{}]", nodeWithSameId, joiningNode);
nodesBuilder.remove(nodeWithSameId.getId());
compatibilityVersions.remove(nodeWithSameId.getId());
nodeFeatures.remove(nodeWithSameId.getId());
}
final DiscoveryNode nodeWithSameAddress = currentNodes.findByAddress(joiningNode.getAddress());
if (nodeWithSameAddress != null && nodeWithSameAddress.equals(joiningNode) == false) {
logger.debug(
"removing existing node [{}], which conflicts with incoming join from [{}]",
nodeWithSameAddress,
joiningNode
);
nodesBuilder.remove(nodeWithSameAddress.getId());
compatibilityVersions.remove(nodeWithSameAddress.getId());
nodeFeatures.remove(nodeWithSameAddress.getId());
}
}
}
// now trim any left over dead nodes - either left there when the previous master stepped down
// or removed by us above
ClusterState tmpState = builder.nodes(nodesBuilder)
.nodeIdsToCompatibilityVersions(compatibilityVersions)
.nodeFeatures(nodeFeatures)
.blocks(ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_ID))
.metadata(
Metadata.builder(currentState.metadata())
.coordinationMetadata(CoordinationMetadata.builder(currentState.coordinationMetadata()).term(term).build())
.build()
)
.build();
logger.trace("becomeMasterAndTrimConflictingNodes: {}", tmpState.nodes());
allocationService.cleanCaches();
tmpState = PersistentTasksCustomMetadata.disassociateDeadNodes(tmpState);
tmpState = maybeReconfigureAfterMasterElection.apply(tmpState);
return ClusterState.builder(allocationService.disassociateDeadNodes(tmpState, false, "removed dead nodes on election"));
}
@Override
public boolean runOnlyOnMaster() {
// we validate that we are allowed to change the cluster state during cluster state processing
return false;
}
private static void blockForbiddenVersions(TransportVersion joiningTransportVersion) {
if (FORBIDDEN_VERSIONS.contains(joiningTransportVersion)) {
throw new IllegalStateException(
"A node with transport version " + joiningTransportVersion.toReleaseVersion() + " is forbidden from joining this cluster"
);
}
}
/**
* Calculate the cluster's effective features. This includes all features that are assumed on any nodes in the cluster,
* that are also present across the whole cluster as a result.
*/
private Set<String> calculateEffectiveClusterFeatures(DiscoveryNodes nodes, Map<String, Set<String>> nodeFeatures) {
if (FeatureService.featuresCanBeAssumedForNodes(nodes)) {
Set<String> assumedFeatures = featureService.getNodeFeatures()
.values()
.stream()
.filter(NodeFeature::assumedAfterNextCompatibilityBoundary)
.map(NodeFeature::id)
.collect(Collectors.toSet());
// add all assumed features to the featureset of all nodes of the next major version
nodeFeatures = new HashMap<>(nodeFeatures);
for (var node : nodes.getNodes().entrySet()) {
if (FeatureService.featuresCanBeAssumedForNode(node.getValue())) {
assert nodeFeatures.containsKey(node.getKey()) : "Node " + node.getKey() + " does not have any features";
nodeFeatures.computeIfPresent(node.getKey(), (k, v) -> {
var newFeatures = new HashSet<>(v);
return newFeatures.addAll(assumedFeatures) ? newFeatures : v;
});
}
}
}
return ClusterFeatures.calculateAllNodeFeatures(nodeFeatures.values());
}
/**
* Ensures that all indices are compatible with the given index version. This will ensure that all indices in the given metadata
* will not be created with a newer version of elasticsearch as well as that all indices are newer or equal to the minimum index
* compatibility version.
* @see IndexVersions#MINIMUM_COMPATIBLE
* @see IndexVersions#MINIMUM_READONLY_COMPATIBLE
* @throws IllegalStateException if any index is incompatible with the given version
*/
public static void ensureIndexCompatibility(
IndexVersion minSupportedVersion,
IndexVersion minReadOnlySupportedVersion,
IndexVersion maxSupportedVersion,
Metadata metadata
) {
// we ensure that all indices in the cluster we join are compatible with us no matter if they are
// closed or not we can't read mappings of these indices so we need to reject the join...
for (IndexMetadata idxMetadata : metadata.indicesAllProjects()) {
if (idxMetadata.getCompatibilityVersion().after(maxSupportedVersion)) {
throw new IllegalStateException(
"index "
+ idxMetadata.getIndex()
+ " version not supported: "
+ idxMetadata.getCompatibilityVersion().toReleaseVersion()
+ " maximum compatible index version is: "
+ maxSupportedVersion.toReleaseVersion()
);
}
if (idxMetadata.getCompatibilityVersion().before(minSupportedVersion)) {
boolean isReadOnlySupported = isReadOnlySupportedVersion(idxMetadata, minSupportedVersion, minReadOnlySupportedVersion);
if (isReadOnlySupported == false) {
throw new IllegalStateException(
"index "
+ idxMetadata.getIndex()
+ " version not supported: "
+ idxMetadata.getCompatibilityVersion().toReleaseVersion()
+ " minimum compatible index version is: "
+ minSupportedVersion.toReleaseVersion()
);
}
}
}
}
/** ensures that the joining node has a version that's compatible with all current nodes*/
public static void ensureNodesCompatibility(final Version joiningNodeVersion, DiscoveryNodes currentNodes) {
final Version minNodeVersion = currentNodes.getMinNodeVersion();
final Version maxNodeVersion = currentNodes.getMaxNodeVersion();
ensureNodesCompatibility(joiningNodeVersion, minNodeVersion, maxNodeVersion);
}
/** ensures that the joining node has a version that's compatible with a given version range */
public static void ensureNodesCompatibility(Version joiningNodeVersion, Version minClusterNodeVersion, Version maxClusterNodeVersion) {
assert minClusterNodeVersion.onOrBefore(maxClusterNodeVersion) : minClusterNodeVersion + " > " + maxClusterNodeVersion;
if (joiningNodeVersion.isCompatible(maxClusterNodeVersion) == false) {
throw new IllegalStateException(
"node version ["
+ joiningNodeVersion
+ "] is not supported. "
+ "The cluster contains nodes with version ["
+ maxClusterNodeVersion
+ "], which is incompatible."
);
}
if (joiningNodeVersion.isCompatible(minClusterNodeVersion) == false) {
throw new IllegalStateException(
"node version ["
+ joiningNodeVersion
+ "] is not supported."
+ "The cluster contains nodes with version ["
+ minClusterNodeVersion
+ "], which is incompatible."
);
}
}
/**
* ensures that the joining node's transport version is equal or higher to the minClusterTransportVersion. This is needed
* to ensure that the minimum transport version of the cluster doesn't go backwards.
**/
static void ensureTransportVersionBarrier(
CompatibilityVersions joiningCompatibilityVersions,
Collection<CompatibilityVersions> existingTransportVersions
) {
TransportVersion minClusterTransportVersion = existingTransportVersions.stream()
.map(CompatibilityVersions::transportVersion)
.min(Comparator.naturalOrder())
.orElse(TransportVersion.current());
if (joiningCompatibilityVersions.transportVersion().before(minClusterTransportVersion)) {
throw new IllegalStateException(
"node with transport version ["
+ joiningCompatibilityVersions.transportVersion().toReleaseVersion()
+ "] may not join a cluster with minimum transport version ["
+ minClusterTransportVersion.toReleaseVersion()
+ "]"
);
}
}
/**
* ensures that the joining node's version is equal or higher to the minClusterNodeVersion. This is needed
* to ensure that if the master is already fully operating under the new version, it doesn't go back to mixed
* version mode
**/
public static void ensureVersionBarrier(Version joiningNodeVersion, Version minClusterNodeVersion) {
if (joiningNodeVersion.before(minClusterNodeVersion)) {
throw new IllegalStateException(
"node version ["
+ joiningNodeVersion
+ "] may not join a cluster comprising only nodes of version ["
+ minClusterNodeVersion
+ "] or greater"
);
}
}
/**
* Enforces the feature join barrier - a joining node should have all features already present in all existing nodes in the cluster
*
* @return The set of features that this node has (including assumed features)
*/
private Set<String> enforceNodeFeatureBarrier(DiscoveryNode node, Set<String> effectiveClusterFeatures, Set<String> newNodeFeatures) {
// prevent join if it does not have one or more features that all other nodes have
Set<String> missingFeatures = new HashSet<>(effectiveClusterFeatures);
missingFeatures.removeAll(newNodeFeatures);
if (missingFeatures.isEmpty()) {
// nothing missing - all ok
return newNodeFeatures;
}
if (FeatureService.featuresCanBeAssumedForNode(node)) {
// it might still be ok for this node to join if this node can have assumed features,
// and all the missing features are assumed
// we can get the NodeFeature object direct from this node's registered features
// as all existing nodes in the cluster have the features present in existingNodesFeatures, including this one
newNodeFeatures = new HashSet<>(newNodeFeatures);
for (Iterator<String> it = missingFeatures.iterator(); it.hasNext();) {
String feature = it.next();
NodeFeature nf = featureService.getNodeFeatures().get(feature);
if (nf.assumedAfterNextCompatibilityBoundary()) {
// its ok for this feature to be missing from this node
it.remove();
// and it should be assumed to still be in the cluster
newNodeFeatures.add(feature);
}
// even if we don't remove it, still continue, so the exception message below is accurate
}
}
if (missingFeatures.isEmpty()) {
return newNodeFeatures;
} else {
throw new IllegalStateException("Node " + node.getId() + " is missing required features " + missingFeatures);
}
}
public static Collection<BiConsumer<DiscoveryNode, ClusterState>> addBuiltInJoinValidators(
Collection<BiConsumer<DiscoveryNode, ClusterState>> onJoinValidators
) {
final Collection<BiConsumer<DiscoveryNode, ClusterState>> validators = new ArrayList<>();
validators.add((node, state) -> {
ensureNodesCompatibility(node.getVersion(), state.getNodes());
ensureIndexCompatibility(
node.getMinIndexVersion(),
node.getMinReadOnlyIndexVersion(),
node.getMaxIndexVersion(),
state.getMetadata()
);
});
validators.addAll(onJoinValidators);
return Collections.unmodifiableCollection(validators);
}
}
|
NodeJoinExecutor
|
java
|
spring-projects__spring-boot
|
documentation/spring-boot-docs/src/main/java/org/springframework/boot/docs/features/springapplication/applicationexit/MyApplication.java
|
{
"start": 963,
"end": 1201
}
|
class ____ {
@Bean
public ExitCodeGenerator exitCodeGenerator() {
return () -> 42;
}
public static void main(String[] args) {
System.exit(SpringApplication.exit(SpringApplication.run(MyApplication.class, args)));
}
}
|
MyApplication
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/util/TestSampleStat.java
|
{
"start": 1006,
"end": 2702
}
|
class ____ {
private static final double EPSILON = 1e-42;
/**
* Some simple use cases
*/
@Test public void testSimple() {
SampleStat stat = new SampleStat();
assertEquals(0, stat.numSamples(), "num samples");
assertEquals(0.0, stat.mean(), EPSILON, "mean");
assertEquals(0.0, stat.variance(), EPSILON, "variance");
assertEquals(0.0, stat.stddev(), EPSILON, "stddev");
assertEquals(SampleStat.MinMax.DEFAULT_MIN_VALUE, stat.min(), EPSILON, "min");
assertEquals(SampleStat.MinMax.DEFAULT_MAX_VALUE, stat.max(), EPSILON, "max");
stat.add(3);
assertEquals(1L, stat.numSamples(), "num samples");
assertEquals(3.0, stat.mean(), EPSILON, "mean");
assertEquals(0.0, stat.variance(), EPSILON, "variance");
assertEquals(0.0, stat.stddev(), EPSILON, "stddev");
assertEquals(3.0, stat.min(), EPSILON, "min");
assertEquals(3.0, stat.max(), EPSILON, "max");
stat.add(2).add(1);
assertEquals(3L, stat.numSamples(), "num samples");
assertEquals(2.0, stat.mean(), EPSILON, "mean");
assertEquals(1.0, stat.variance(), EPSILON, "variance");
assertEquals(1.0, stat.stddev(), EPSILON, "stddev");
assertEquals(1.0, stat.min(), EPSILON, "min");
assertEquals(3.0, stat.max(), EPSILON, "max");
stat.reset();
assertEquals(0, stat.numSamples(), "num samples");
assertEquals(0.0, stat.mean(), EPSILON, "mean");
assertEquals(0.0, stat.variance(), EPSILON, "variance");
assertEquals(0.0, stat.stddev(), EPSILON, "stddev");
assertEquals(SampleStat.MinMax.DEFAULT_MIN_VALUE, stat.min(), EPSILON, "min");
assertEquals(SampleStat.MinMax.DEFAULT_MAX_VALUE, stat.max(), EPSILON, "max");
}
}
|
TestSampleStat
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/annotations/CollectionId.java
|
{
"start": 772,
"end": 1363
}
|
interface ____ {
/**
* The column containing the collection id.
*/
Column column() default @Column;
/**
* Implementation for generating values.
*
* @apiNote Mutually exclusive with {@link #generator()}
*/
Class<? extends IdentifierGenerator> generatorImplementation() default IdentifierGenerator.class;
/**
* The generator name.
* <p>
* Can specify either a built-in strategy ({@code "sequence"}, for example)
* or a named JPA id generator.
*
* @apiNote Mutually exclusive with {@link #generatorImplementation()}
*/
String generator() default "";
}
|
CollectionId
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/main/java/io/vertx/core/impl/VertxImpl.java
|
{
"start": 44886,
"end": 45058
}
|
class ____ {
ContextInternal context;
ClassLoader topLevelTCCL;
}
/**
* Begin the emission of a context event.
* <p>
* This is a low level
|
ContextDispatch
|
java
|
dropwizard__dropwizard
|
dropwizard-lifecycle/src/main/java/io/dropwizard/lifecycle/AutoCloseableManager.java
|
{
"start": 675,
"end": 1325
}
|
class ____ implements Managed {
private final AutoCloseable autoCloseable;
/**
* @param autoCloseable instance to close when the HTTP server stops.
*/
public AutoCloseableManager(final AutoCloseable autoCloseable) {
this.autoCloseable = autoCloseable;
}
/**
* Calls {@link AutoCloseable#close()} on the closable provided in
* {@link AutoCloseableManager#AutoCloseableManager(AutoCloseable)}.
*
* @throws Exception propagates {@link AutoCloseable#close()} exception
*/
@Override
public void stop() throws Exception {
this.autoCloseable.close();
}
}
|
AutoCloseableManager
|
java
|
spring-projects__spring-framework
|
spring-jdbc/src/main/java/org/springframework/jdbc/support/xml/SqlXmlHandler.java
|
{
"start": 7067,
"end": 8899
}
|
class ____ be used
* @return the content as character stream, or {@code null} in case of SQL NULL
* @throws SQLException if thrown by JDBC methods
* @see java.sql.ResultSet#getSQLXML
* @see java.sql.SQLXML#getSource
*/
@Nullable Source getXmlAsSource(ResultSet rs, int columnIndex, @Nullable Class<? extends Source> sourceClass) throws SQLException;
//-------------------------------------------------------------------------
// Convenience methods for building XML content
//-------------------------------------------------------------------------
/**
* Create a {@code SqlXmlValue} instance for the given XML data,
* as supported by the underlying JDBC driver.
* @param value the XML String value providing XML data
* @return the implementation specific instance
* @see SqlXmlValue
* @see java.sql.SQLXML#setString(String)
*/
SqlXmlValue newSqlXmlValue(String value);
/**
* Create a {@code SqlXmlValue} instance for the given XML data,
* as supported by the underlying JDBC driver.
* @param provider the {@code XmlBinaryStreamProvider} providing XML data
* @return the implementation specific instance
* @see SqlXmlValue
* @see java.sql.SQLXML#setBinaryStream()
*/
SqlXmlValue newSqlXmlValue(XmlBinaryStreamProvider provider);
/**
* Create a {@code SqlXmlValue} instance for the given XML data,
* as supported by the underlying JDBC driver.
* @param provider the {@code XmlCharacterStreamProvider} providing XML data
* @return the implementation specific instance
* @see SqlXmlValue
* @see java.sql.SQLXML#setCharacterStream()
*/
SqlXmlValue newSqlXmlValue(XmlCharacterStreamProvider provider);
/**
* Create a {@code SqlXmlValue} instance for the given XML data,
* as supported by the underlying JDBC driver.
* @param resultClass the Result implementation
|
to
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/InfiniteRecursionTest.java
|
{
"start": 4526,
"end": 4854
}
|
class ____ {
Test test;
void f() {
test.f();
}
}
""")
.doTest();
}
@Test
public void positiveDelegateCannotBeOverridden() {
compilationHelper
.addSourceLines(
"Test.java",
"""
final
|
Test
|
java
|
apache__camel
|
components/camel-minio/src/main/java/org/apache/camel/component/minio/MinioOperations.java
|
{
"start": 853,
"end": 1065
}
|
enum ____ {
copyObject,
listObjects,
deleteObject,
deleteObjects,
deleteBucket,
listBuckets,
getObject,
getPartialObject,
createDownloadLink,
createUploadLink
}
|
MinioOperations
|
java
|
alibaba__nacos
|
config/src/main/java/com/alibaba/nacos/config/server/service/ConfigDetailService.java
|
{
"start": 6723,
"end": 8356
}
|
class ____ {
private String type;
private int pageNo;
private int pageSize;
private String dataId;
private String group;
private String tenant;
private Map<String, Object> configAdvanceInfo;
private Page<ConfigInfo> response;
public SearchEvent() {
}
public SearchEvent(String type, int pageNo, int pageSize, String dataId, String group, String tenant,
Map<String, Object> configAdvanceInfo) {
this.type = type;
this.pageNo = pageNo;
this.pageSize = pageSize;
this.dataId = dataId;
this.group = group;
this.tenant = tenant;
this.configAdvanceInfo = configAdvanceInfo;
}
public String getType() {
return type;
}
public int getPageNo() {
return pageNo;
}
public int getPageSize() {
return pageSize;
}
public String getDataId() {
return dataId;
}
public String getGroup() {
return group;
}
public String getTenant() {
return tenant;
}
public Map<String, Object> getConfigAdvanceInfo() {
return configAdvanceInfo;
}
public Page<ConfigInfo> getResponse() {
return response;
}
public void setResponse(Page<ConfigInfo> response) {
this.response = response;
}
}
}
|
SearchEvent
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/functions/Functions.java
|
{
"start": 4887,
"end": 5475
}
|
class ____ implements Action {
final Future<?> future;
FutureAction(Future<?> future) {
this.future = future;
}
@Override
public void run() throws Exception {
future.get();
}
}
/**
* Wraps the blocking get call of the Future into an Action.
* @param future the future to call get() on, not null
* @return the new Action instance
*/
@NonNull
public static Action futureAction(@NonNull Future<?> future) {
return new FutureAction(future);
}
static final
|
FutureAction
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/parser/bug/Bug2.java
|
{
"start": 207,
"end": 844
}
|
class ____ extends TestCase {
public void test_0() throws Exception {
String text = "{children:[{id:3}]}";
Page page = JSON.parseObject(text, Page.class);
Assert.assertEquals(1, page.getChildren().size());
Assert.assertEquals(JSONObject.class, page.getChildren().get(0).getClass());
}
public void test_1() throws Exception {
String text = "{children:['aa']}";
Page page = JSON.parseObject(text, Page.class);
Assert.assertEquals(1, page.getChildren().size());
Assert.assertEquals(String.class, page.getChildren().get(0).getClass());
}
public static
|
Bug2
|
java
|
quarkusio__quarkus
|
extensions/reactive-routes/deployment/src/main/java/io/quarkus/vertx/web/deployment/Methods.java
|
{
"start": 2186,
"end": 14727
}
|
class ____ {
static final MethodDesc GET_HEADERS = MethodDesc.of(HttpServerResponse.class, "headers", MultiMap.class);
static final MethodDesc MULTIMAP_GET = MethodDesc.of(MultiMap.class, "get", String.class, String.class);
static final MethodDesc MULTIMAP_SET = MethodDesc.of(MultiMap.class, "set", MultiMap.class, String.class, String.class);
static final MethodDesc MULTIMAP_GET_ALL = MethodDesc.of(MultiMap.class, "getAll", List.class, String.class);
static final MethodDesc REQUEST = MethodDesc.of(RoutingContext.class, "request", HttpServerRequest.class);
static final MethodDesc REQUEST_GET_PARAM = MethodDesc.of(HttpServerRequest.class, "getParam", String.class, String.class);
static final MethodDesc REQUEST_GET_HEADER = MethodDesc.of(HttpServerRequest.class, "getHeader",
String.class, String.class);
static final MethodDesc GET_BODY = MethodDesc.of(RoutingContext.class, "getBody", Buffer.class);
static final MethodDesc GET_BODY_AS_STRING = MethodDesc.of(RoutingContext.class, "getBodyAsString", String.class);
static final MethodDesc GET_BODY_AS_JSON = MethodDesc.of(RoutingContext.class, "getBodyAsJson", JsonObject.class);
static final MethodDesc GET_BODY_AS_JSON_ARRAY = MethodDesc.of(RoutingContext.class, "getBodyAsJsonArray", JsonArray.class);
static final MethodDesc JSON_OBJECT_MAP_TO = MethodDesc.of(JsonObject.class, "mapTo", Object.class, Class.class);
static final MethodDesc REQUEST_PARAMS = MethodDesc.of(HttpServerRequest.class, "params", MultiMap.class);
static final MethodDesc REQUEST_HEADERS = MethodDesc.of(HttpServerRequest.class, "headers", MultiMap.class);
static final MethodDesc RESPONSE = MethodDesc.of(RoutingContext.class, "response", HttpServerResponse.class);
static final MethodDesc FAIL = MethodDesc.of(RoutingContext.class, "fail", void.class, Throwable.class);
static final MethodDesc FAILURE = MethodDesc.of(RoutingContext.class, "failure", Throwable.class);
static final MethodDesc NEXT = MethodDesc.of(RoutingContext.class, "next", void.class);
static final MethodDesc UNI_SUBSCRIBE = MethodDesc.of(Uni.class, "subscribe", UniSubscribe.class);
static final MethodDesc UNI_SUBSCRIBE_WITH = MethodDesc.of(UniSubscribe.class, "with",
Cancellable.class, Consumer.class, Consumer.class);
static final MethodDesc MULTI_SUBSCRIBE_VOID = MethodDesc.of(MultiSupport.class, "subscribeVoid",
void.class, Multi.class, RoutingContext.class);
static final MethodDesc MULTI_SUBSCRIBE_STRING = MethodDesc.of(MultiSupport.class, "subscribeString",
void.class, Multi.class, RoutingContext.class);
static final MethodDesc MULTI_SUBSCRIBE_BUFFER = MethodDesc.of(MultiSupport.class, "subscribeBuffer",
void.class, Multi.class, RoutingContext.class);
static final MethodDesc MULTI_SUBSCRIBE_MUTINY_BUFFER = MethodDesc.of(MultiSupport.class, "subscribeMutinyBuffer",
void.class, Multi.class, RoutingContext.class);
static final MethodDesc MULTI_SUBSCRIBE_OBJECT = MethodDesc.of(MultiSupport.class, "subscribeObject",
void.class, Multi.class, RoutingContext.class);
static final MethodDesc IS_SSE = MethodDesc.of(MultiSseSupport.class, "isSSE", boolean.class, Multi.class);
static final MethodDesc MULTI_SSE_SUBSCRIBE_STRING = MethodDesc.of(MultiSseSupport.class, "subscribeString",
void.class, Multi.class, RoutingContext.class);
static final MethodDesc MULTI_SSE_SUBSCRIBE_BUFFER = MethodDesc.of(MultiSseSupport.class, "subscribeBuffer",
void.class, Multi.class, RoutingContext.class);
static final MethodDesc MULTI_SSE_SUBSCRIBE_MUTINY_BUFFER = MethodDesc.of(MultiSseSupport.class, "subscribeMutinyBuffer",
void.class, Multi.class, RoutingContext.class);
static final MethodDesc MULTI_SSE_SUBSCRIBE_OBJECT = MethodDesc.of(MultiSseSupport.class, "subscribeObject",
void.class, Multi.class, RoutingContext.class);
static final MethodDesc IS_NDJSON = MethodDesc.of(MultiNdjsonSupport.class, "isNdjson",
boolean.class, Multi.class);
static final MethodDesc MULTI_NDJSON_SUBSCRIBE_STRING = MethodDesc.of(MultiNdjsonSupport.class, "subscribeString",
void.class, Multi.class, RoutingContext.class);
static final MethodDesc MULTI_NDJSON_SUBSCRIBE_OBJECT = MethodDesc.of(MultiNdjsonSupport.class, "subscribeObject",
void.class, Multi.class, RoutingContext.class);
static final MethodDesc IS_JSON_ARRAY = MethodDesc.of(MultiJsonArraySupport.class, "isJsonArray",
boolean.class, Multi.class);
static final MethodDesc MULTI_JSON_SUBSCRIBE_VOID = MethodDesc.of(MultiJsonArraySupport.class, "subscribeVoid",
void.class, Multi.class, RoutingContext.class);
static final MethodDesc MULTI_JSON_SUBSCRIBE_STRING = MethodDesc.of(MultiJsonArraySupport.class, "subscribeString",
void.class, Multi.class, RoutingContext.class);
static final MethodDesc MULTI_JSON_SUBSCRIBE_OBJECT = MethodDesc.of(MultiJsonArraySupport.class, "subscribeObject",
void.class, Multi.class, RoutingContext.class);
static final MethodDesc MULTI_JSON_FAIL = MethodDesc.of(MultiJsonArraySupport.class, "fail",
void.class, RoutingContext.class);
static final MethodDesc END = MethodDesc.of(HttpServerResponse.class, "end", Future.class);
static final MethodDesc END_WITH_STRING = MethodDesc.of(HttpServerResponse.class, "end", Future.class, String.class);
static final MethodDesc END_WITH_BUFFER = MethodDesc.of(HttpServerResponse.class, "end", Future.class, Buffer.class);
static final MethodDesc SET_STATUS = MethodDesc.of(HttpServerResponse.class, "setStatusCode",
HttpServerResponse.class, Integer.TYPE);
static final MethodDesc MUTINY_GET_DELEGATE = MethodDesc.of(io.vertx.mutiny.core.buffer.Buffer.class, "getDelegate",
Buffer.class);
static final MethodDesc JSON_ENCODE = MethodDesc.of(Json.class, "encode", String.class, Object.class);
static final MethodDesc ARC_CONTAINER = MethodDesc.of(Arc.class, "container", ArcContainer.class);
static final MethodDesc ARC_CONTAINER_GET_ACTIVE_CONTEXT = MethodDesc.of(ArcContainer.class, "getActiveContext",
InjectableContext.class, Class.class);
static final MethodDesc ARC_CONTAINER_BEAN = MethodDesc.of(ArcContainer.class, "bean",
InjectableBean.class, String.class);
static final MethodDesc BEAN_GET_SCOPE = MethodDesc.of(InjectableBean.class, "getScope", Class.class);
static final MethodDesc CONTEXT_GET = MethodDesc.of(Context.class, "get",
Object.class, Contextual.class, CreationalContext.class);
static final MethodDesc CONTEXT_GET_IF_PRESENT = MethodDesc.of(Context.class, "get",
Object.class, Contextual.class);
static final MethodDesc INJECTABLE_REF_PROVIDER_GET = MethodDesc.of(InjectableReferenceProvider.class, "get",
Object.class, CreationalContext.class);
static final MethodDesc INJECTABLE_BEAN_DESTROY = MethodDesc.of(InjectableBean.class, "destroy",
void.class, Object.class, CreationalContext.class);
static final ConstructorDesc CREATIONAL_CONTEXT_IMPL_CTOR = ConstructorDesc.of(CreationalContextImpl.class,
Contextual.class);
static final ConstructorDesc ROUTE_HANDLER_CTOR = ConstructorDesc.of(RouteHandler.class);
static final MethodDesc ROUTE_HANDLERS_SET_CONTENT_TYPE = MethodDesc.of(RouteHandlers.class, "setContentType",
void.class, RoutingContext.class, String.class);
static final MethodDesc OPTIONAL_OF_NULLABLE = MethodDesc.of(Optional.class, "ofNullable", Optional.class, Object.class);
private static final String VALIDATOR = "jakarta.validation.Validator";
private static final String CONSTRAINT_VIOLATION_EXCEPTION = "jakarta.validation.ConstraintViolationException";
static final ClassDesc VALIDATION_VALIDATOR = ClassDesc.of(VALIDATOR);
static final ClassDesc VALIDATION_CONSTRAINT_VIOLATION_EXCEPTION = ClassDesc.of(CONSTRAINT_VIOLATION_EXCEPTION);
static final MethodDesc VALIDATOR_VALIDATE = InterfaceMethodDesc.of(VALIDATION_VALIDATOR, "validate",
Set.class, Object.class, Class[].class);
static final MethodDesc VALIDATION_GET_VALIDATOR = MethodDesc.of(ValidationSupport.class, "getValidator",
MethodTypeDesc.of(VALIDATION_VALIDATOR, classDescOf(ArcContainer.class)));
static final MethodDesc VALIDATION_MAP_VIOLATIONS_TO_JSON = MethodDesc.of(ValidationSupport.class, "mapViolationsToJson",
String.class, Set.class, HttpServerResponse.class);
static final MethodDesc VALIDATION_HANDLE_VIOLATION = MethodDesc.of(ValidationSupport.class, "handleViolationException",
MethodTypeDesc.of(CD_void, VALIDATION_CONSTRAINT_VIOLATION_EXCEPTION, classDescOf(RoutingContext.class),
CD_boolean));
static final MethodDesc IS_ASSIGNABLE_FROM = MethodDesc.of(Class.class, "isAssignableFrom", boolean.class, Class.class);
static final MethodDesc INTEGER_VALUE_OF = MethodDesc.of(Integer.class, "valueOf", Integer.class, String.class);
static final MethodDesc LONG_VALUE_OF = MethodDesc.of(Long.class, "valueOf", Long.class, String.class);
static final MethodDesc BOOLEAN_VALUE_OF = MethodDesc.of(Boolean.class, "valueOf", Boolean.class, String.class);
static final MethodDesc CHARACTER_VALUE_OF = MethodDesc.of(Character.class, "valueOf", Character.class, char.class);
static final MethodDesc FLOAT_VALUE_OF = MethodDesc.of(Float.class, "valueOf", Float.class, String.class);
static final MethodDesc DOUBLE_VALUE_OF = MethodDesc.of(Double.class, "valueOf", Double.class, String.class);
static final MethodDesc SHORT_VALUE_OF = MethodDesc.of(Short.class, "valueOf", Short.class, String.class);
static final MethodDesc BYTE_VALUE_OF = MethodDesc.of(Byte.class, "valueOf", Byte.class, String.class);
public static final MethodDesc CS_WHEN_COMPLETE = MethodDesc.of(CompletionStage.class, "whenComplete",
CompletionStage.class, BiConsumer.class);
private Methods() {
// Avoid direct instantiation
}
static boolean isNoContent(HandlerDescriptor descriptor) {
return descriptor.getPayloadType().name().equals(DotName.createSimple(Void.class));
}
static Expr createNpeItemIsNull(BlockCreator bc) {
return bc.new_(ConstructorDesc.of(NullPointerException.class, String.class),
Const.of("Invalid value returned by Uni: `null`"));
}
static MethodDesc getEndMethodForContentType(HandlerDescriptor descriptor) {
if (descriptor.isPayloadBuffer() || descriptor.isPayloadMutinyBuffer()) {
return END_WITH_BUFFER;
}
return END_WITH_STRING;
}
static void setContentTypeToJson(Var response, BlockCreator b0) {
Const contentType = Const.of("Content-Type");
LocalVar headers = b0.localVar("headers", b0.invokeInterface(GET_HEADERS, response));
Expr current = b0.invokeInterface(MULTIMAP_GET, headers, contentType);
b0.ifNull(current, b1 -> {
b1.invokeInterface(MULTIMAP_SET, headers, contentType, Const.of("application/json"));
});
}
/**
* Generate the following code:
*
* <pre>
* String result = null;
* Set<ConstraintViolation<Object>> violations = validator.validate(res);
* if (violations.isEmpty()) {
* result = res.encode()
* } else {
* result = ValidationSupport.mapViolationsToJson(violations, response);
* }
* </pre>
*
* Note that {@code this_} is always either {@code This} or a captured {@code Var}.
*
*/
public static Var validateProducedItem(Var response, BlockCreator b0, Var res,
Expr this_, FieldDesc validatorField) {
FieldVar validator = this_.field(validatorField);
LocalVar violations = b0.localVar("violations",
b0.invokeInterface(VALIDATOR_VALIDATE, validator, res, b0.newArray(Class.class)));
LocalVar result = b0.localVar("result", Const.ofDefault(String.class));
b0.ifElse(b0.withSet(violations).isEmpty(), b1 -> {
Expr json = b1.invokeStatic(JSON_ENCODE, res);
b1.set(result, json);
}, b1 -> {
Expr json = b1.invokeStatic(VALIDATION_MAP_VIOLATIONS_TO_JSON, violations, response);
b1.set(result, json);
});
return result;
}
}
|
Methods
|
java
|
apache__flink
|
flink-python/src/test/java/org/apache/flink/table/runtime/typeutils/serializers/python/StringSerializerTest.java
|
{
"start": 1046,
"end": 1510
}
|
class ____ extends SerializerTestBase<String> {
@Override
protected TypeSerializer<String> createSerializer() {
return StringSerializer.INSTANCE;
}
@Override
protected int getLength() {
return -1;
}
@Override
protected Class<String> getTypeClass() {
return String.class;
}
@Override
protected String[] getTestData() {
return new String[] {"pyflink", "flink"};
}
}
|
StringSerializerTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/tool/schema/internal/StandardTableMigrator.java
|
{
"start": 1317,
"end": 3776
}
|
class ____ implements TableMigrator {
private static final Logger LOG = Logger.getLogger( Table.class );
protected final Dialect dialect;
public StandardTableMigrator(Dialect dialect) {
this.dialect = dialect;
}
@Override
public String[] getSqlAlterStrings(
Table table,
Metadata metadata,
TableInformation tableInfo,
SqlStringGenerationContext context) {
if ( table.isView() ) {
// perhaps we could execute a 'create or replace view'
return EMPTY_STRING_ARRAY;
}
else {
return sqlAlterStrings( table, dialect, metadata, tableInfo, context )
.toArray( EMPTY_STRING_ARRAY );
}
}
@Internal
public static List<String> sqlAlterStrings(
Table table,
Dialect dialect,
Metadata metadata,
TableInformation tableInformation,
SqlStringGenerationContext context) throws HibernateException {
final String tableName = getTableName( table, context );
final String alterTable = dialect.getAlterTableString( tableName ) + ' ';
final List<String> results = new ArrayList<>();
for ( var column : table.getColumns() ) {
final var columnInformation =
tableInformation.getColumn( toIdentifier( column.getName(), column.isQuoted() ) );
if ( columnInformation == null ) {
// the column doesn't exist at all
final String addColumn =
dialect.getAddColumnString() + ' '
+ getFullColumnDeclaration( column, table, metadata, dialect, context )
+ dialect.getAddColumnSuffixString();
results.add( alterTable + addColumn );
}
else if ( dialect.supportsAlterColumnType() ) {
if ( !hasMatchingType( column, columnInformation, metadata, dialect )
|| !hasMatchingLength( column, columnInformation, metadata, dialect ) ) {
final String alterColumn =
dialect.getAlterColumnTypeString(
column.getQuotedName( dialect ),
column.getSqlType(metadata),
getColumnDefinition( column, metadata, dialect )
);
results.add( alterTable + alterColumn );
}
}
}
if ( results.isEmpty() ) {
LOG.debugf( "No alter strings for table: %s", table.getQuotedName() );
}
return results;
}
private static String getTableName(Table table, SqlStringGenerationContext context) {
return context.format( new QualifiedTableName(
toIdentifier( table.getCatalog(), table.isCatalogQuoted() ),
toIdentifier( table.getSchema(), table.isSchemaQuoted() ),
table.getNameIdentifier() )
);
}
}
|
StandardTableMigrator
|
java
|
apache__flink
|
flink-table/flink-sql-gateway/src/main/java/org/apache/flink/table/gateway/rest/handler/AbstractSqlGatewayRestHandler.java
|
{
"start": 2250,
"end": 5188
}
|
class ____<
R extends RequestBody, P extends ResponseBody, M extends MessageParameters>
extends AbstractHandler<NonLeaderRetrievalRestfulGateway, R, M> {
protected SqlGatewayService service;
private final MessageHeaders<R, P, M> messageHeaders;
protected AbstractSqlGatewayRestHandler(
SqlGatewayService service,
Map<String, String> responseHeaders,
MessageHeaders<R, P, M> messageHeaders) {
super(
() -> CompletableFuture.completedFuture(NonLeaderRetrievalRestfulGateway.INSTANCE),
// SqlGatewayRestHandler doesn't support RPC timeout option, this property is used
// for placeholder
Duration.ofSeconds(1),
responseHeaders,
messageHeaders);
this.service = service;
this.messageHeaders = messageHeaders;
}
@Override
protected CompletableFuture<Void> respondToRequest(
ChannelHandlerContext ctx,
HttpRequest httpRequest,
HandlerRequest<R> handlerRequest,
NonLeaderRetrievalRestfulGateway gateway) {
CompletableFuture<P> response;
try {
response =
handleRequest(
SqlGatewayRestAPIVersion.fromURIToVersion(httpRequest.uri()),
handlerRequest);
} catch (RestHandlerException e) {
response = FutureUtils.completedExceptionally(e);
}
return response.thenAccept(
resp ->
HandlerUtils.sendResponse(
ctx,
httpRequest,
resp,
messageHeaders.getResponseStatusCode(),
responseHeaders));
}
/**
* This method is called for every incoming request and returns a {@link CompletableFuture}
* containing a the response.
*
* <p>Implementations may decide whether to throw {@link RestHandlerException}s or fail the
* returned {@link CompletableFuture} with a {@link RestHandlerException}.
*
* <p>Failing the future with another exception type or throwing unchecked exceptions is
* regarded as an implementation error as it does not allow us to provide a meaningful HTTP
* status code. In this case a {@link HttpResponseStatus#INTERNAL_SERVER_ERROR} will be
* returned.
*
* @param version request version
* @param request request that should be handled
* @return future containing a handler response
* @throws RestHandlerException if the handling failed
*/
protected abstract CompletableFuture<P> handleRequest(
@Nullable SqlGatewayRestAPIVersion version, @Nonnull HandlerRequest<R> request)
throws RestHandlerException;
}
|
AbstractSqlGatewayRestHandler
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/IgnoreNullMetrics.java
|
{
"start": 1840,
"end": 3255
}
|
class ____ extends OptimizerRules.OptimizerRule<TimeSeriesAggregate> {
@Override
public LogicalPlan rule(TimeSeriesAggregate agg) {
Set<Attribute> metrics = new HashSet<>();
agg.forEachExpression(Attribute.class, attr -> {
if (attr.isMetric()) {
metrics.add(attr);
}
});
if (metrics.isEmpty()) {
return agg;
}
Expression conditional = null;
for (Attribute metric : metrics) {
// Create an is not null check for each metric
if (conditional == null) {
conditional = new IsNotNull(agg.source(), metric);
} else {
// Join the is not null checks with OR nodes
conditional = new Or(agg.source(), conditional, new IsNotNull(agg.source(), metric));
}
}
return agg.replaceChild(new Filter(agg.source(), agg.child(), conditional));
}
/**
* Scans the given {@link LogicalPlan} to see if it is a "metrics mode" query
*/
private static boolean isMetricsQuery(LogicalPlan logicalPlan) {
if (logicalPlan instanceof EsRelation r) {
return r.indexMode() == IndexMode.TIME_SERIES;
}
if (logicalPlan instanceof UnresolvedRelation r) {
return r.indexMode() == IndexMode.TIME_SERIES;
}
return false;
}
}
|
IgnoreNullMetrics
|
java
|
apache__flink
|
flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/typeutils/WritableTypeInfo.java
|
{
"start": 5127,
"end": 5200
}
|
class ____ no subclass of " + Writable.class.getName());
}
}
}
|
is
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/observable/ObservableReduceSeedSingle.java
|
{
"start": 1803,
"end": 3633
}
|
class ____<T, R> implements Observer<T>, Disposable {
final SingleObserver<? super R> downstream;
final BiFunction<R, ? super T, R> reducer;
R value;
Disposable upstream;
ReduceSeedObserver(SingleObserver<? super R> actual, BiFunction<R, ? super T, R> reducer, R value) {
this.downstream = actual;
this.value = value;
this.reducer = reducer;
}
@Override
public void onSubscribe(Disposable d) {
if (DisposableHelper.validate(this.upstream, d)) {
this.upstream = d;
downstream.onSubscribe(this);
}
}
@Override
public void onNext(T value) {
R v = this.value;
if (v != null) {
try {
this.value = Objects.requireNonNull(reducer.apply(v, value), "The reducer returned a null value");
} catch (Throwable ex) {
Exceptions.throwIfFatal(ex);
upstream.dispose();
onError(ex);
}
}
}
@Override
public void onError(Throwable e) {
R v = value;
if (v != null) {
value = null;
downstream.onError(e);
} else {
RxJavaPlugins.onError(e);
}
}
@Override
public void onComplete() {
R v = value;
if (v != null) {
value = null;
downstream.onSuccess(v);
}
}
@Override
public void dispose() {
upstream.dispose();
}
@Override
public boolean isDisposed() {
return upstream.isDisposed();
}
}
}
|
ReduceSeedObserver
|
java
|
apache__kafka
|
streams/src/test/java/org/apache/kafka/streams/utils/UniqueTopicSerdeScope.java
|
{
"start": 1378,
"end": 2137
}
|
class ____ {
private final Map<String, Class<?>> topicTypeRegistry = new TreeMap<>();
public <T> UniqueTopicSerdeDecorator<T> decorateSerde(final Serde<T> delegate,
final Properties config,
final boolean isKey) {
final UniqueTopicSerdeDecorator<T> decorator = new UniqueTopicSerdeDecorator<>(delegate);
decorator.configure(config.entrySet().stream().collect(Collectors.toMap(e -> e.getKey().toString(), Map.Entry::getValue)), isKey);
return decorator;
}
public Set<String> registeredTopics() {
return Collections.unmodifiableSet(topicTypeRegistry.keySet());
}
public
|
UniqueTopicSerdeScope
|
java
|
apache__flink
|
flink-core/src/test/java/org/apache/flink/api/common/io/BinaryInputFormatTest.java
|
{
"start": 1541,
"end": 1633
}
|
class ____ {
@TempDir private Path tempDir;
private static final
|
BinaryInputFormatTest
|
java
|
apache__rocketmq
|
store/src/main/java/org/apache/rocketmq/store/PutMessageLock.java
|
{
"start": 888,
"end": 954
}
|
interface ____ {
void lock();
void unlock();
}
|
PutMessageLock
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/mysql/select/MySqlSelectTest_BinaryConcat.java
|
{
"start": 1128,
"end": 2723
}
|
class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "select campaignId from Campaign" +
" where advertiserId = ?" +
" and deleteStatus = false" +
" and campaignName like binary CONCAT('%',?,'%')";
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement stmt = statementList.get(0);
SQLSelectStatement selectStmt = (SQLSelectStatement) stmt;
SQLSelect select = selectStmt.getSelect();
assertNotNull(select.getQuery());
MySqlSelectQueryBlock queryBlock = (MySqlSelectQueryBlock) select.getQuery();
assertNull(queryBlock.getOrderBy());
// print(statementList);
assertEquals(1, statementList.size());
MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor();
stmt.accept(visitor);
// System.out.println("Tables : " + visitor.getTables());
// System.out.println("fields : " + visitor.getColumns());
// System.out.println("coditions : " + visitor.getConditions());
// System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(1, visitor.getTables().size());
assertEquals(4, visitor.getColumns().size());
assertEquals(3, visitor.getConditions().size());
assertEquals(0, visitor.getOrderByColumns().size());
// assertTrue(visitor.getTables().containsKey(new TableStat.Name("mytable")));
}
}
|
MySqlSelectTest_BinaryConcat
|
java
|
google__error-prone
|
core/src/main/java/com/google/errorprone/bugpatterns/IsInstanceOfClass.java
|
{
"start": 1793,
"end": 3455
}
|
class ____ extends BugChecker implements MethodInvocationTreeMatcher {
private static final Matcher<MethodInvocationTree> INSTANCE_OF_CLASS =
Matchers.allOf(
instanceMethod().onExactClass("java.lang.Class").named("isInstance"),
argument(
0,
// Class is final so we could just use isSameType, but we want to
// test for the same _erased_ type.
Matchers.<ExpressionTree>isSubtypeOf("java.lang.Class")));
/** Suggests removing getClass() or changing to Class.class. */
@Override
public Description matchMethodInvocation(MethodInvocationTree tree, VisitorState state) {
if (!INSTANCE_OF_CLASS.matches(tree, state)) {
return Description.NO_MATCH;
}
return describeMatch(tree, SuggestedFix.replace(tree, buildReplacement(tree, state)));
}
static String buildReplacement(MethodInvocationTree tree, VisitorState state) {
Operand lhs = classify((JCTree) ASTHelpers.getReceiver(tree.getMethodSelect()), state);
Operand rhs = classify((JCTree) Iterables.getOnlyElement(tree.getArguments()), state);
// expr.getClass().isInstance(Bar.class) -> expr instanceof Bar
if (lhs.kind() == Kind.GET_CLASS && rhs.kind() == Kind.LITERAL) {
return String.format("%s instanceof %s", lhs.value(), rhs.value());
}
// expr1.getClass().isInstance(expr2.getClass()) -> expr2.getClass().isInstance(expr1)
if (lhs.kind() == Kind.GET_CLASS && rhs.kind() == Kind.GET_CLASS) {
return String.format("%s.getClass().isInstance(%s)", rhs.value(), lhs.value());
}
// Foo.class.isInstance(Bar.class) -> Bar.class == Class.
|
IsInstanceOfClass
|
java
|
spring-projects__spring-framework
|
framework-docs/src/main/java/org/springframework/docs/core/aot/hints/importruntimehints/SpellCheckService.java
|
{
"start": 1093,
"end": 1281
}
|
class ____ {
public void loadDictionary(Locale locale) {
ClassPathResource resource = new ClassPathResource("dicts/" + locale.getLanguage() + ".txt");
//...
}
static
|
SpellCheckService
|
java
|
ReactiveX__RxJava
|
src/test/java/io/reactivex/rxjava3/subscribers/SafeSubscriberTest.java
|
{
"start": 3993,
"end": 9827
}
|
class ____ implements Publisher<String> {
Subscriber<? super String> subscriber;
/* used to simulate subscription */
public void sendOnCompleted() {
subscriber.onComplete();
}
/* used to simulate subscription */
public void sendOnNext(String value) {
subscriber.onNext(value);
}
/* used to simulate subscription */
public void sendOnError(Throwable e) {
subscriber.onError(e);
}
@Override
public void subscribe(Subscriber<? super String> subscriber) {
this.subscriber = subscriber;
subscriber.onSubscribe(new Subscription() {
@Override
public void cancel() {
// going to do nothing to pretend I'm a bad Observable that keeps allowing events to be sent
System.out.println("==> SynchronizeTest unsubscribe that does nothing!");
}
@Override
public void request(long n) {
}
});
}
}
@Test
public void onNextFailure() {
AtomicReference<Throwable> onError = new AtomicReference<>();
try {
OBSERVER_ONNEXT_FAIL(onError).onNext("one");
fail("expects exception to be thrown");
} catch (Exception e) {
assertNull(onError.get());
assertTrue(e instanceof SafeSubscriberTestException);
assertEquals("onNextFail", e.getMessage());
}
}
@Test
public void onNextFailureSafe() {
AtomicReference<Throwable> onError = new AtomicReference<>();
try {
SafeSubscriber<String> safeObserver = new SafeSubscriber<>(OBSERVER_ONNEXT_FAIL(onError));
safeObserver.onSubscribe(new BooleanSubscription());
safeObserver.onNext("one");
assertNotNull(onError.get());
assertTrue(onError.get() instanceof SafeSubscriberTestException);
assertEquals("onNextFail", onError.get().getMessage());
} catch (Exception e) {
fail("expects exception to be passed to onError");
}
}
@Test
public void onCompleteFailure() {
AtomicReference<Throwable> onError = new AtomicReference<>();
try {
OBSERVER_ONCOMPLETED_FAIL(onError).onComplete();
fail("expects exception to be thrown");
} catch (Exception e) {
assertNull(onError.get());
assertTrue(e instanceof SafeSubscriberTestException);
assertEquals("onCompleteFail", e.getMessage());
}
}
@Test
public void onErrorFailure() {
try {
subscriberOnErrorFail().onError(new SafeSubscriberTestException("error!"));
fail("expects exception to be thrown");
} catch (Exception e) {
assertTrue(e instanceof SafeSubscriberTestException);
assertEquals("onErrorFail", e.getMessage());
}
}
@Test
public void onNextOnErrorFailure() {
try {
OBSERVER_ONNEXT_ONERROR_FAIL().onNext("one");
fail("expects exception to be thrown");
} catch (Exception e) {
e.printStackTrace();
assertTrue(e instanceof SafeSubscriberTestException);
assertEquals("onNextFail", e.getMessage());
}
}
static final Subscription THROWING_DISPOSABLE = new Subscription() {
@Override
public void cancel() {
// break contract by throwing exception
throw new SafeSubscriberTestException("failure from unsubscribe");
}
@Override
public void request(long n) {
// ignored
}
};
private static Subscriber<String> OBSERVER_ONNEXT_FAIL(final AtomicReference<Throwable> onError) {
return new DefaultSubscriber<String>() {
@Override
public void onComplete() {
}
@Override
public void onError(Throwable e) {
onError.set(e);
}
@Override
public void onNext(String args) {
throw new SafeSubscriberTestException("onNextFail");
}
};
}
private static Subscriber<String> OBSERVER_ONNEXT_ONERROR_FAIL() {
return new DefaultSubscriber<String>() {
@Override
public void onComplete() {
}
@Override
public void onError(Throwable e) {
throw new SafeSubscriberTestException("onErrorFail");
}
@Override
public void onNext(String args) {
throw new SafeSubscriberTestException("onNextFail");
}
};
}
private static Subscriber<String> subscriberOnErrorFail() {
return new DefaultSubscriber<String>() {
@Override
public void onComplete() {
}
@Override
public void onError(Throwable e) {
throw new SafeSubscriberTestException("onErrorFail");
}
@Override
public void onNext(String args) {
}
};
}
private static Subscriber<String> OBSERVER_ONCOMPLETED_FAIL(final AtomicReference<Throwable> onError) {
return new DefaultSubscriber<String>() {
@Override
public void onComplete() {
throw new SafeSubscriberTestException("onCompleteFail");
}
@Override
public void onError(Throwable e) {
onError.set(e);
}
@Override
public void onNext(String args) {
}
};
}
@SuppressWarnings("serial")
private static
|
TestObservable
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/notfound/OptionalEagerNotFoundTest.java
|
{
"start": 7521,
"end": 8087
}
|
class ____ extends Person {
@Id
private Long id;
@OneToOne(cascade = CascadeType.PERSIST)
@NotFound(action = NotFoundAction.IGNORE)
@JoinColumn(foreignKey = @ForeignKey(ConstraintMode.NO_CONSTRAINT))
@Fetch(FetchMode.JOIN)
private City city;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public City getCity() {
return city;
}
@Override
public void setCity(City city) {
this.city = city;
}
}
@Entity
@Table(name = "PersonOneToOneSelectIgnore")
public static
|
PersonOneToOneJoinIgnore
|
java
|
quarkusio__quarkus
|
independent-projects/arc/runtime/src/main/java/io/quarkus/arc/impl/AroundInvokeInvocationContext.java
|
{
"start": 1164,
"end": 3273
}
|
class ____ extends AbstractInvocationContext {
static Object perform(Object target, Object[] args, InterceptedMethodMetadata metadata) throws Exception {
if (metadata.chain.isEmpty()) {
return metadata.aroundInvokeForward.apply(target, new AroundInvokeInvocationContext(target, args, metadata));
}
return metadata.chain.get(0).invoke(new AroundInvokeInvocationContext(target, args, metadata));
}
private final InterceptedMethodMetadata metadata;
AroundInvokeInvocationContext(Object target, Object[] args, InterceptedMethodMetadata metadata) {
super(target, args, new ContextDataMap(metadata.bindings));
this.metadata = metadata;
}
@Override
public Set<Annotation> getInterceptorBindings() {
return metadata.bindings;
}
public Method getMethod() {
return metadata.method;
}
@Override
public Object[] getParameters() {
return parameters;
}
@Override
public void setParameters(Object[] params) {
validateParameters(metadata.method, params);
this.parameters = params;
}
@Override
public Object proceed() throws Exception {
return proceed(1);
}
private Object proceed(int currentPosition) throws Exception {
try {
if (currentPosition < metadata.chain.size()) {
// Invoke the next interceptor in the chain
return metadata.chain.get(currentPosition)
.invoke(new NextAroundInvokeInvocationContext(currentPosition + 1));
} else {
// Invoke the target method
return metadata.aroundInvokeForward.apply(target, this);
}
} catch (InvocationTargetException e) {
Throwable cause = e.getCause();
if (cause instanceof Error) {
throw (Error) cause;
}
if (cause instanceof Exception) {
throw (Exception) cause;
}
throw new RuntimeException(cause);
}
}
|
AroundInvokeInvocationContext
|
java
|
quarkusio__quarkus
|
extensions/resteasy-classic/resteasy-client/runtime/src/test/java/io/quarkus/restclient/runtime/RestClientBaseTest.java
|
{
"start": 11753,
"end": 11838
}
|
interface ____ {
String echo(String message);
}
public static
|
TestClient
|
java
|
apache__logging-log4j2
|
log4j-jpl/src/main/java/org/apache/logging/log4j/jpl/Log4jSystemLoggerAdapter.java
|
{
"start": 1219,
"end": 1724
}
|
class ____ extends AbstractLoggerAdapter<Logger> {
@Override
protected Logger newLogger(final String name, final LoggerContext context) {
return new Log4jSystemLogger(context.getLogger(name));
}
@Override
protected LoggerContext getContext() {
return getContext(
LogManager.getFactory().isClassLoaderDependent()
? StackLocatorUtil.getCallerClass(LoggerFinder.class)
: null);
}
}
|
Log4jSystemLoggerAdapter
|
java
|
FasterXML__jackson-databind
|
src/main/java/tools/jackson/databind/cfg/MutableConfigOverride.java
|
{
"start": 586,
"end": 2396
}
|
class ____
extends ConfigOverride
implements java.io.Serializable
{
private static final long serialVersionUID = 1L;
public MutableConfigOverride() { super(); }
protected MutableConfigOverride(MutableConfigOverride src) {
super(src);
}
public MutableConfigOverride copy() {
return new MutableConfigOverride(this);
}
public MutableConfigOverride setFormat(JsonFormat.Value v) {
_format = v;
return this;
}
/**
* Override inclusion setting for all properties contained in POJOs of the
* associated type.
*
* @param v Inclusion setting to apply contained properties.
*/
public MutableConfigOverride setInclude(JsonInclude.Value v) {
_include = v;
return this;
}
/**
* Override inclusion setting for properties of the associated type
* regardless of the type of the POJO containing it.
*
* @param v Inclusion setting to apply for properties of associated type.
*
* @since 2.9
*/
public MutableConfigOverride setIncludeAsProperty(JsonInclude.Value v) {
_includeAsProperty = v;
return this;
}
public MutableConfigOverride setIgnorals(JsonIgnoreProperties.Value v) {
_ignorals = v;
return this;
}
public MutableConfigOverride setIsIgnoredType(Boolean v) {
_isIgnoredType = v;
return this;
}
public MutableConfigOverride setNullHandling(JsonSetter.Value v) {
_nullHandling = v;
return this;
}
public MutableConfigOverride setVisibility(JsonAutoDetect.Value v) {
_visibility = v;
return this;
}
public MutableConfigOverride setMergeable(Boolean v) {
_mergeable = v;
return this;
}
}
|
MutableConfigOverride
|
java
|
netty__netty
|
codec-classes-quic/src/main/java/io/netty/handler/codec/quic/QuicChannel.java
|
{
"start": 1140,
"end": 12470
}
|
interface ____ extends Channel {
@Override
default ChannelFuture bind(SocketAddress localAddress) {
return pipeline().bind(localAddress);
}
@Override
default ChannelFuture connect(SocketAddress remoteAddress) {
return pipeline().connect(remoteAddress);
}
@Override
default ChannelFuture connect(SocketAddress remoteAddress, SocketAddress localAddress) {
return pipeline().connect(remoteAddress, localAddress);
}
@Override
default ChannelFuture disconnect() {
return pipeline().disconnect();
}
@Override
default ChannelFuture close() {
return pipeline().close();
}
@Override
default ChannelFuture deregister() {
return pipeline().deregister();
}
@Override
default ChannelFuture bind(SocketAddress localAddress, ChannelPromise promise) {
return pipeline().bind(localAddress, promise);
}
@Override
default ChannelFuture connect(SocketAddress remoteAddress, ChannelPromise promise) {
return pipeline().connect(remoteAddress, promise);
}
@Override
default ChannelFuture connect(SocketAddress remoteAddress, SocketAddress localAddress, ChannelPromise promise) {
return pipeline().connect(remoteAddress, localAddress, promise);
}
@Override
default ChannelFuture disconnect(ChannelPromise promise) {
return pipeline().disconnect(promise);
}
@Override
default ChannelFuture close(ChannelPromise promise) {
return pipeline().close(promise);
}
@Override
default ChannelFuture deregister(ChannelPromise promise) {
return pipeline().deregister(promise);
}
@Override
default ChannelFuture write(Object msg) {
return pipeline().write(msg);
}
@Override
default ChannelFuture write(Object msg, ChannelPromise promise) {
return pipeline().write(msg, promise);
}
@Override
default ChannelFuture writeAndFlush(Object msg, ChannelPromise promise) {
return pipeline().writeAndFlush(msg, promise);
}
@Override
default ChannelFuture writeAndFlush(Object msg) {
return pipeline().writeAndFlush(msg);
}
@Override
default ChannelPromise newPromise() {
return pipeline().newPromise();
}
@Override
default ChannelProgressivePromise newProgressivePromise() {
return pipeline().newProgressivePromise();
}
@Override
default ChannelFuture newSucceededFuture() {
return pipeline().newSucceededFuture();
}
@Override
default ChannelFuture newFailedFuture(Throwable cause) {
return pipeline().newFailedFuture(cause);
}
@Override
default ChannelPromise voidPromise() {
return pipeline().voidPromise();
}
@Override
QuicChannel read();
@Override
QuicChannel flush();
/**
* Returns the configuration of this channel.
*/
@Override
QuicChannelConfig config();
/**
* Returns the used {@link SSLEngine} or {@code null} if none is used (yet).
*
* @return the engine.
*/
@Nullable
SSLEngine sslEngine();
/**
* Returns the number of streams that can be created before stream creation will fail
* with {@link QuicTransportError#STREAM_LIMIT_ERROR} error.
*
* @param type the stream type.
* @return the number of streams left.
*/
long peerAllowedStreams(QuicStreamType type);
/**
* Returns {@code true} if the connection was closed because of idle timeout.
*
* @return {@code true} if the connection was closed because of idle timeout, {@code false}.
*/
boolean isTimedOut();
/**
* Returns the {@link QuicTransportParameters} of the peer once received, or {@code null} if not known yet.
*
* @return peerTransportParams.
*/
@Nullable
QuicTransportParameters peerTransportParameters();
/**
* Returns the local {@link QuicConnectionAddress}. This address might change over the life-time of the
* channel.
*
* @return local the local {@link QuicConnectionAddress} or {@code null} if none is assigned yet,
* or assigned anymore.
*/
@Override
@Nullable
QuicConnectionAddress localAddress();
/**
* Returns the remote {@link QuicConnectionAddress}. This address might change over the life-time of the
* channel.
*
* @return remote the remote {@link QuicConnectionAddress} or {@code null} if none is assigned yet,
* or assigned anymore.
*/
@Override
@Nullable
QuicConnectionAddress remoteAddress();
/**
* Returns the local {@link SocketAddress} of the underlying transport that received the data.
* This address might change over the life-time of the channel.
*
* @return local the local {@link SocketAddress} of the underlying transport or {@code null} if none is assigned
* yet, or assigned anymore.
*/
@Nullable
SocketAddress localSocketAddress();
/**
* Returns the remote {@link SocketAddress} of the underlying transport to which the data is sent.
* This address might change over the life-time of the channel.
*
* @return local the remote {@link SocketAddress} of the underlying transport or {@code null} if none is assigned
* yet, or assigned anymore.
*/
@Nullable
SocketAddress remoteSocketAddress();
/**
* Creates a stream that is using this {@link QuicChannel} and notifies the {@link Future} once done.
* The {@link ChannelHandler} (if not {@code null}) is added to the {@link io.netty.channel.ChannelPipeline} of the
* {@link QuicStreamChannel} automatically.
*
* @param type the {@link QuicStreamType} of the {@link QuicStreamChannel}.
* @param handler the {@link ChannelHandler} that will be added to the {@link QuicStreamChannel}s
* {@link io.netty.channel.ChannelPipeline} during the stream creation.
* @return the {@link Future} that will be notified once the operation completes.
*/
default Future<QuicStreamChannel> createStream(QuicStreamType type, @Nullable ChannelHandler handler) {
return createStream(type, handler, eventLoop().newPromise());
}
/**
* Creates a stream that is using this {@link QuicChannel} and notifies the {@link Promise} once done.
* The {@link ChannelHandler} (if not {@code null}) is added to the {@link io.netty.channel.ChannelPipeline} of the
* {@link QuicStreamChannel} automatically.
*
* @param type the {@link QuicStreamType} of the {@link QuicStreamChannel}.
* @param handler the {@link ChannelHandler} that will be added to the {@link QuicStreamChannel}s
* {@link io.netty.channel.ChannelPipeline} during the stream creation.
* @param promise the {@link ChannelPromise} that will be notified once the operation completes.
* @return the {@link Future} that will be notified once the operation completes.
*/
Future<QuicStreamChannel> createStream(QuicStreamType type, @Nullable ChannelHandler handler,
Promise<QuicStreamChannel> promise);
/**
* Returns a new {@link QuicStreamChannelBootstrap} which makes it easy to bootstrap new {@link QuicStreamChannel}s
* with custom options and attributes. For simpler use-cases you may want to consider using
* {@link #createStream(QuicStreamType, ChannelHandler)} or
* {@link #createStream(QuicStreamType, ChannelHandler, Promise)} directly.
*
* @return {@link QuicStreamChannelBootstrap} that can be used to bootstrap a {@link QuicStreamChannel}.
*/
default QuicStreamChannelBootstrap newStreamBootstrap() {
return new QuicStreamChannelBootstrap(this);
}
/**
* Close the {@link QuicChannel}
*
* @param applicationClose {@code true} if an application close should be used,
* {@code false} if a normal close should be used.
* @param error the application error number, or {@code 0} if no special error should be signaled.
* @param reason the reason for the closure (which may be an empty {@link ByteBuf}.
* @return the future that is notified.
*/
default ChannelFuture close(boolean applicationClose, int error, ByteBuf reason) {
return close(applicationClose, error, reason, newPromise());
}
/**
* Close the {@link QuicChannel}
*
* @param applicationClose {@code true} if an application close should be used,
* {@code false} if a normal close should be used.
* @param error the application error number, or {@code 0} if no special error should be signaled.
* @param reason the reason for the closure (which may be an empty {@link ByteBuf}.
* @param promise the {@link ChannelPromise} that will be notified.
* @return the future that is notified.
*/
ChannelFuture close(boolean applicationClose, int error, ByteBuf reason, ChannelPromise promise);
/**
* Collects statistics about the connection and notifies the {@link Future} once done.
*
* @return the {@link Future} that is notified once the stats were collected.
*/
default Future<QuicConnectionStats> collectStats() {
return collectStats(eventLoop().newPromise());
}
/**
* Collects statistics about the connection and notifies the {@link Promise} once done.
*
* @param promise the {@link ChannelPromise} that is notified once the stats were collected.
* @return the {@link Future} that is notified once the stats were collected.
*/
Future<QuicConnectionStats> collectStats(Promise<QuicConnectionStats> promise);
/**
* Collects statistics about the path of the connection and notifies the {@link Future} once done.
*
* @return the {@link Future} that is notified once the stats were collected.
*/
default Future<QuicConnectionPathStats> collectPathStats(int pathIdx) {
return collectPathStats(pathIdx, eventLoop().newPromise());
}
/**
* Collects statistics about the path of the connection and notifies the {@link Promise} once done.
*
* @param promise the {@link ChannelPromise} that is notified once the stats were collected.
* @return the {@link Future} that is notified once the stats were collected.
*/
Future<QuicConnectionPathStats> collectPathStats(int pathIdx, Promise<QuicConnectionPathStats> promise);
/**
* Creates a new {@link QuicChannelBootstrap} that can be used to create and connect new {@link QuicChannel}s to
* endpoints using the given {@link Channel} as transport layer.
*
* @param channel the {@link Channel} that is used as transport layer.
* @return {@link QuicChannelBootstrap} that can be used to bootstrap a client side {@link QuicChannel}.
*/
static QuicChannelBootstrap newBootstrap(Channel channel) {
return new QuicChannelBootstrap(channel);
}
}
|
QuicChannel
|
java
|
apache__camel
|
components/camel-clickup/src/main/java/org/apache/camel/component/clickup/model/TaskTimeTrackedUpdatedEventAction.java
|
{
"start": 861,
"end": 943
}
|
enum ____ {
CREATION,
UPDATE,
DELETION
}
|
TaskTimeTrackedUpdatedEventAction
|
java
|
apache__logging-log4j2
|
log4j-mongodb/src/test/java/org/apache/logging/log4j/mongodb/MongoDbIT.java
|
{
"start": 1542,
"end": 2833
}
|
class ____ {
@Test
void test(final LoggerContext ctx, final MongoClient mongoClient) {
final Logger logger = ctx.getLogger(MongoDbIT.class);
logger.info("Hello log 1");
logger.info("Hello log 2", new RuntimeException("Hello ex 2"));
final MongoDatabase database = mongoClient.getDatabase(MongoDbTestConstants.DATABASE_NAME);
assertNotNull(database);
final MongoCollection<Document> collection =
database.getCollection(getClass().getSimpleName());
assertNotNull(collection);
final FindIterable<Document> found = collection.find();
final Document first = found.first();
assertNotNull(first, "first");
assertEquals("Hello log 1", first.getString("message"), first.toJson());
assertEquals("INFO", first.getString("level"), first.toJson());
//
found.skip(1);
final Document second = found.first();
assertNotNull(second);
assertEquals("Hello log 2", second.getString("message"), second.toJson());
assertEquals("INFO", second.getString("level"), second.toJson());
final Document thrown = second.get("thrown", Document.class);
assertEquals("Hello ex 2", thrown.getString("message"), thrown.toJson());
}
}
|
MongoDbIT
|
java
|
spring-projects__spring-boot
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/shell/ShellExitException.java
|
{
"start": 845,
"end": 1011
}
|
class ____ extends CommandException {
private static final long serialVersionUID = 1L;
public ShellExitException() {
super(Option.RETHROW);
}
}
|
ShellExitException
|
java
|
grpc__grpc-java
|
api/src/main/java/io/grpc/Grpc.java
|
{
"start": 2238,
"end": 5271
}
|
interface ____ {}
/**
* Creates a channel builder with a target string and credentials. The target can be either a
* valid {@link NameResolver}-compliant URI, or an authority string.
*
* <p>A {@code NameResolver}-compliant URI is an absolute hierarchical URI as defined by {@link
* java.net.URI}. Example URIs:
* <ul>
* <li>{@code "dns:///foo.googleapis.com:8080"}</li>
* <li>{@code "dns:///foo.googleapis.com"}</li>
* <li>{@code "dns:///%5B2001:db8:85a3:8d3:1319:8a2e:370:7348%5D:443"}</li>
* <li>{@code "dns://8.8.8.8/foo.googleapis.com:8080"}</li>
* <li>{@code "dns://8.8.8.8/foo.googleapis.com"}</li>
* <li>{@code "zookeeper://zk.example.com:9900/example_service"}</li>
* </ul>
*
* <p>An authority string will be converted to a {@code NameResolver}-compliant URI, which has
* the scheme from the name resolver with the highest priority (e.g. {@code "dns"}),
* no authority, and the original authority string as its path after properly escaped.
* We recommend libraries to specify the schema explicitly if it is known, since libraries cannot
* know which NameResolver will be default during runtime.
* Example authority strings:
* <ul>
* <li>{@code "localhost"}</li>
* <li>{@code "127.0.0.1"}</li>
* <li>{@code "localhost:8080"}</li>
* <li>{@code "foo.googleapis.com:8080"}</li>
* <li>{@code "127.0.0.1:8080"}</li>
* <li>{@code "[2001:db8:85a3:8d3:1319:8a2e:370:7348]"}</li>
* <li>{@code "[2001:db8:85a3:8d3:1319:8a2e:370:7348]:443"}</li>
* </ul>
*/
public static ManagedChannelBuilder<?> newChannelBuilder(
String target, ChannelCredentials creds) {
return ManagedChannelRegistry.getDefaultRegistry().newChannelBuilder(target, creds);
}
/**
* Creates a channel builder from a host, port, and credentials. The host and port are combined to
* form an authority string and then passed to {@link #newChannelBuilder(String,
* ChannelCredentials)}. IPv6 addresses are properly surrounded by square brackets ("[]").
*/
public static ManagedChannelBuilder<?> newChannelBuilderForAddress(
String host, int port, ChannelCredentials creds) {
return newChannelBuilder(authorityFromHostAndPort(host, port), creds);
}
/**
* Combine a host and port into an authority string.
*/
// A copy of GrpcUtil.authorityFromHostAndPort
private static String authorityFromHostAndPort(String host, int port) {
try {
return new URI(null, null, host, port, null, null, null).getAuthority();
} catch (URISyntaxException ex) {
throw new IllegalArgumentException("Invalid host or port: " + host + " " + port, ex);
}
}
/**
* Static factory for creating a new ServerBuilder.
*
* @param port the port to listen on
* @param creds the server identity
*/
public static ServerBuilder<?> newServerBuilderForPort(int port, ServerCredentials creds) {
return ServerRegistry.getDefaultRegistry().newServerBuilderForPort(port, creds);
}
}
|
TransportAttr
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/loaders/SqlSelectTest.java
|
{
"start": 1855,
"end": 2436
}
|
class ____ {
@Id @GeneratedValue
@Column(name = "sql_select_id")
Long id;
String name;
@ElementCollection
@CollectionTable(name = "With_Uuids",
joinColumns = @JoinColumn(name = "sql_select_id", referencedColumnName = "sql_select_id"))
@SQLSelect(sql = "select Random_Uuids as uuid from With_Uuids where sql_select_id = ?",
resultSetMapping = @SqlResultSetMapping(name = "",
columns = @ColumnResult(name = "uuid", type = UUID.class)),
querySpaces = "With_Uuids")
@Column(name = "Random_Uuids")
List<UUID> uuids = new ArrayList<>();
}
}
|
WithSqlSelect
|
java
|
apache__kafka
|
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/MockAssignor.java
|
{
"start": 1199,
"end": 4900
}
|
class ____ implements TaskAssignor {
public static final String MOCK_ASSIGNOR_NAME = "mock";
@Override
public String name() {
return MOCK_ASSIGNOR_NAME;
}
@Override
public String toString() {
return name();
}
@Override
public GroupAssignment assign(
final GroupSpec groupSpec,
final TopologyDescriber topologyDescriber
) throws TaskAssignorException {
Map<String, MemberAssignment> newTargetAssignment = new HashMap<>();
Map<String, String[]> subtopologyToActiveMember = new HashMap<>();
for (String subtopology : topologyDescriber.subtopologies()) {
int numberOfPartitions = topologyDescriber.maxNumInputPartitions(subtopology);
subtopologyToActiveMember.put(subtopology, new String[numberOfPartitions]);
}
// Copy existing assignment and fill temporary data structures
for (Map.Entry<String, AssignmentMemberSpec> memberEntry : groupSpec.members().entrySet()) {
final String memberId = memberEntry.getKey();
final AssignmentMemberSpec memberSpec = memberEntry.getValue();
Map<String, Set<Integer>> activeTasks = new HashMap<>(memberSpec.activeTasks());
newTargetAssignment.put(memberId, new MemberAssignment(activeTasks, new HashMap<>(), new HashMap<>()));
for (Map.Entry<String, Set<Integer>> entry : activeTasks.entrySet()) {
final String subtopologyId = entry.getKey();
final Set<Integer> taskIds = entry.getValue();
final String[] activeMembers = subtopologyToActiveMember.get(subtopologyId);
for (int taskId : taskIds) {
if (activeMembers[taskId] != null) {
throw new TaskAssignorException(
"Task " + taskId + " of subtopology " + subtopologyId + " is assigned to multiple members");
}
activeMembers[taskId] = memberId;
}
}
}
// Define priority queue to sort members by task count
PriorityQueue<MemberAndTaskCount> memberAndTaskCount = new PriorityQueue<>(Comparator.comparingInt(m -> m.taskCount));
memberAndTaskCount.addAll(
newTargetAssignment.keySet().stream()
.map(memberId -> new MemberAndTaskCount(memberId,
newTargetAssignment.get(memberId).activeTasks().values().stream().mapToInt(Set::size).sum()))
.collect(Collectors.toSet())
);
// Assign unassigned tasks to members with the fewest tasks
for (Map.Entry<String, String[]> entry : subtopologyToActiveMember.entrySet()) {
final String subtopologyId = entry.getKey();
final String[] activeMembers = entry.getValue();
for (int i = 0; i < activeMembers.length; i++) {
if (activeMembers[i] == null) {
final MemberAndTaskCount m = memberAndTaskCount.poll();
if (m == null) {
throw new TaskAssignorException("No member available to assign task " + i + " of subtopology " + subtopologyId);
}
newTargetAssignment.get(m.memberId).activeTasks().computeIfAbsent(subtopologyId, k -> new HashSet<>()).add(i);
activeMembers[i] = m.memberId;
memberAndTaskCount.add(new MemberAndTaskCount(m.memberId, m.taskCount + 1));
}
}
}
return new GroupAssignment(newTargetAssignment);
}
private record MemberAndTaskCount(String memberId, int taskCount) {
}
}
|
MockAssignor
|
java
|
spring-projects__spring-framework
|
spring-webflux/src/main/java/org/springframework/web/reactive/function/client/WebClientResponseException.java
|
{
"start": 12850,
"end": 13263
}
|
class ____ extends WebClientResponseException {
Forbidden(
String statusText, HttpHeaders headers, byte[] body, @Nullable Charset charset,
@Nullable HttpRequest request) {
super(HttpStatus.FORBIDDEN, statusText, headers, body, charset, request);
}
}
/**
* {@link WebClientResponseException} for status HTTP 404 Not Found.
* @since 5.1
*/
@SuppressWarnings("serial")
public static
|
Forbidden
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/ZooKeeperLeaderElectionDriverTest.java
|
{
"start": 20436,
"end": 21503
}
|
class ____ {
protected final TestingLeaderElectionListener leaderElectionListener;
protected final CuratorFrameworkWithUnhandledErrorListener curatorFramework;
protected final ZooKeeperLeaderElectionDriver leaderElectionDriver;
private Context() throws Exception {
this.leaderElectionListener = new TestingLeaderElectionListener();
this.curatorFramework = startCuratorFramework();
this.leaderElectionDriver =
new ZooKeeperLeaderElectionDriver(
curatorFramework.asCuratorFramework(), leaderElectionListener);
}
protected final void runTest(RunnableWithException test) throws Exception {
try {
test.run();
} finally {
close();
leaderElectionListener.failIfErrorEventHappened();
}
}
private void close() throws Exception {
this.leaderElectionDriver.close();
this.curatorFramework.close();
}
}
}
|
Context
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfig.java
|
{
"start": 1586,
"end": 11219
}
|
class ____ implements NamedWriteable, ToXContentObject {
private static final String NAME = "xpack/rollup/jobconfig";
private static final TimeValue DEFAULT_TIMEOUT = TimeValue.timeValueSeconds(20);
private static final String ID = "id";
private static final String TIMEOUT = "timeout";
private static final String CRON = "cron";
private static final String PAGE_SIZE = "page_size";
private static final String INDEX_PATTERN = "index_pattern";
private static final String ROLLUP_INDEX = "rollup_index";
private final String id;
private final String indexPattern;
private final String rollupIndex;
private final GroupConfig groupConfig;
private final List<MetricConfig> metricsConfig;
private final TimeValue timeout;
private final String cron;
private final int pageSize;
private final String[] indices;
private static final ConstructingObjectParser<RollupJobConfig, String> PARSER;
static {
PARSER = new ConstructingObjectParser<>(NAME, false, (args, optionalId) -> {
String id = args[0] != null ? (String) args[0] : optionalId;
String indexPattern = (String) args[1];
String rollupIndex = (String) args[2];
GroupConfig groupConfig = (GroupConfig) args[3];
@SuppressWarnings("unchecked")
List<MetricConfig> metricsConfig = (List<MetricConfig>) args[4];
TimeValue timeout = (TimeValue) args[5];
String cron = (String) args[6];
int pageSize = (int) args[7];
return new RollupJobConfig(id, indexPattern, rollupIndex, cron, pageSize, groupConfig, metricsConfig, timeout);
});
PARSER.declareString(optionalConstructorArg(), new ParseField(ID));
PARSER.declareString(constructorArg(), new ParseField(INDEX_PATTERN));
PARSER.declareString(constructorArg(), new ParseField(ROLLUP_INDEX));
PARSER.declareObject(optionalConstructorArg(), (p, c) -> GroupConfig.fromXContent(p), new ParseField(GroupConfig.NAME));
PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> MetricConfig.fromXContent(p), new ParseField(MetricConfig.NAME));
PARSER.declareField(
optionalConstructorArg(),
(p, c) -> TimeValue.parseTimeValue(p.textOrNull(), TIMEOUT),
new ParseField(TIMEOUT),
ObjectParser.ValueType.STRING_OR_NULL
);
PARSER.declareString(constructorArg(), new ParseField(CRON));
PARSER.declareInt(constructorArg(), new ParseField(PAGE_SIZE));
}
public RollupJobConfig(
final String id,
final String indexPattern,
final String rollupIndex,
final String cron,
final int pageSize,
final GroupConfig groupConfig,
final List<MetricConfig> metricsConfig,
final @Nullable TimeValue timeout
) {
if (id == null || id.isEmpty()) {
throw new IllegalArgumentException("Id must be a non-null, non-empty string");
}
if (indexPattern == null || indexPattern.isEmpty()) {
throw new IllegalArgumentException("Index pattern must be a non-null, non-empty string");
}
this.indices = Strings.splitStringByCommaToArray(indexPattern);
for (String index : this.indices) {
if (Regex.isMatchAllPattern(index)) {
throw new IllegalArgumentException("Index pattern must not match all indices (as it would match it's own rollup index");
}
if (Regex.isSimpleMatchPattern(index)) {
if (Regex.simpleMatch(index, rollupIndex)) {
throw new IllegalArgumentException("Index pattern would match rollup index name which is not allowed");
}
}
if (index.equals(rollupIndex)) {
throw new IllegalArgumentException("Rollup index may not be the same as the index pattern");
}
}
if (rollupIndex == null || rollupIndex.isEmpty()) {
throw new IllegalArgumentException("Rollup index must be a non-null, non-empty string");
}
if (cron == null || cron.isEmpty()) {
throw new IllegalArgumentException("Cron schedule must be a non-null, non-empty string");
}
if (pageSize <= 0) {
throw new IllegalArgumentException("Page size is mandatory and must be a positive long");
}
if (groupConfig == null && (metricsConfig == null || metricsConfig.isEmpty())) {
throw new IllegalArgumentException("At least one grouping or metric must be configured");
}
this.id = id;
this.indexPattern = indexPattern;
this.rollupIndex = rollupIndex;
this.groupConfig = groupConfig;
this.metricsConfig = metricsConfig != null ? metricsConfig : Collections.emptyList();
this.timeout = timeout != null ? timeout : DEFAULT_TIMEOUT;
this.cron = cron;
this.pageSize = pageSize;
}
public RollupJobConfig(final StreamInput in) throws IOException {
id = in.readString();
indexPattern = in.readString();
rollupIndex = in.readString();
cron = in.readString();
groupConfig = in.readOptionalWriteable(GroupConfig::new);
metricsConfig = in.readCollectionAsList(MetricConfig::new);
timeout = in.readTimeValue();
pageSize = in.readInt();
indices = Strings.splitStringByCommaToArray(indexPattern);
}
public String getId() {
return id;
}
public GroupConfig getGroupConfig() {
return groupConfig;
}
public List<MetricConfig> getMetricsConfig() {
return metricsConfig;
}
public TimeValue getTimeout() {
return timeout;
}
public String getIndexPattern() {
return indexPattern;
}
public String getRollupIndex() {
return rollupIndex;
}
public String getCron() {
return cron;
}
public int getPageSize() {
return pageSize;
}
@Override
public String getWriteableName() {
return NAME;
}
public Set<String> getAllFields() {
final Set<String> fields = new HashSet<>();
if (groupConfig != null) {
fields.addAll(groupConfig.getAllFields());
}
if (metricsConfig != null) {
for (MetricConfig metric : metricsConfig) {
fields.add(metric.getField());
}
}
return Collections.unmodifiableSet(fields);
}
public String[] indices() {
return indices;
}
public void validateMappings(
final Map<String, Map<String, FieldCapabilities>> fieldCapsResponse,
final ActionRequestValidationException validationException
) {
groupConfig.validateMappings(fieldCapsResponse, validationException);
for (MetricConfig m : metricsConfig) {
m.validateMappings(fieldCapsResponse, validationException);
}
}
@Override
public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException {
builder.startObject();
{
builder.field(ID, id);
builder.field(INDEX_PATTERN, indexPattern);
builder.field(ROLLUP_INDEX, rollupIndex);
builder.field(CRON, cron);
if (groupConfig != null) {
builder.field(GroupConfig.NAME, groupConfig);
}
if (metricsConfig != null) {
builder.startArray(MetricConfig.NAME);
for (MetricConfig metric : metricsConfig) {
metric.toXContent(builder, params);
}
builder.endArray();
}
if (timeout != null) {
builder.field(TIMEOUT, timeout.getStringRep());
}
builder.field(PAGE_SIZE, pageSize);
}
builder.endObject();
return builder;
}
@Override
public void writeTo(final StreamOutput out) throws IOException {
out.writeString(id);
out.writeString(indexPattern);
out.writeString(rollupIndex);
out.writeString(cron);
out.writeOptionalWriteable(groupConfig);
out.writeCollection(metricsConfig);
out.writeTimeValue(timeout);
out.writeInt(pageSize);
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
final RollupJobConfig that = (RollupJobConfig) other;
return Objects.equals(this.id, that.id)
&& Objects.equals(this.indexPattern, that.indexPattern)
&& Objects.equals(this.rollupIndex, that.rollupIndex)
&& Objects.equals(this.cron, that.cron)
&& Objects.equals(this.groupConfig, that.groupConfig)
&& Objects.equals(this.metricsConfig, that.metricsConfig)
&& Objects.equals(this.timeout, that.timeout)
&& Objects.equals(this.pageSize, that.pageSize);
}
@Override
public int hashCode() {
return Objects.hash(id, indexPattern, rollupIndex, cron, groupConfig, metricsConfig, timeout, pageSize);
}
@Override
public String toString() {
return Strings.toString(this, true, true);
}
public static RollupJobConfig fromXContent(final XContentParser parser, @Nullable final String optionalJobId) throws IOException {
return PARSER.parse(parser, optionalJobId);
}
}
|
RollupJobConfig
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/UnnecessaryStaticImportTest.java
|
{
"start": 1472,
"end": 1698
}
|
class ____ {}
""")
.doTest();
}
@Test
public void positiveRename() {
compilationHelper
.addSourceLines(
"a/A.java",
"""
package a;
public
|
Test
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/iterable/IterableAssert_areNot_Test.java
|
{
"start": 1086,
"end": 1563
}
|
class ____ extends IterableAssertBaseTest {
private static Condition<Object> condition;
@BeforeAll
static void beforeOnce() {
condition = new TestCondition<>();
}
@Override
protected ConcreteIterableAssert<Object> invoke_api_method() {
return assertions.areNot(condition);
}
@Override
protected void verify_internal_effects() {
verify(iterables).assertAreNot(getInfo(assertions), getActual(assertions), condition);
}
}
|
IterableAssert_areNot_Test
|
java
|
spring-projects__spring-boot
|
module/spring-boot-jackson/src/main/java/org/springframework/boot/jackson/ObjectValueDeserializer.java
|
{
"start": 1343,
"end": 5159
}
|
class ____<T> extends ValueDeserializer<T> {
@Override
public final T deserialize(JsonParser jp, DeserializationContext ctxt) {
TreeNode tree = jp.readValueAsTree();
if (tree instanceof JsonNode jsonNode) {
return deserializeObject(jp, ctxt, jsonNode);
}
throw new IllegalStateException(
"JsonParser " + jp + " produced " + tree.getClass() + " that is not a JsonNode");
}
/**
* Deserialize JSON content into the value type this serializer handles.
* @param jsonParser the source parser used for reading JSON content
* @param context context that can be used to access information about this
* deserialization activity
* @param tree deserialized JSON content as tree expressed using set of
* {@link TreeNode} instances
* @return the deserialized object
* @see #deserialize(JsonParser, DeserializationContext)
*/
protected abstract T deserializeObject(JsonParser jsonParser, DeserializationContext context, JsonNode tree);
/**
* Helper method to extract a value from the given {@code jsonNode} or return
* {@code null} when the node itself is {@code null}.
* @param jsonNode the source node (may be {@code null})
* @param type the data type. May be {@link String}, {@link Boolean}, {@link Long},
* {@link Integer}, {@link Short}, {@link Double}, {@link Float}, {@link BigDecimal}
* or {@link BigInteger}.
* @param <D> the data type requested
* @param <R> the result type
* @param mapper a mapper to convert the value when it is not {@code null}
* @return the node value or {@code null}
*/
protected final <D, R> @Nullable R nullSafeValue(@Nullable JsonNode jsonNode, Class<D> type,
Function<D, R> mapper) {
D value = nullSafeValue(jsonNode, type);
return (value != null) ? mapper.apply(value) : null;
}
/**
* Helper method to extract a value from the given {@code jsonNode} or return
* {@code null} when the node itself is {@code null}.
* @param jsonNode the source node (may be {@code null})
* @param type the data type. May be {@link String}, {@link Boolean}, {@link Long},
* {@link Integer}, {@link Short}, {@link Double}, {@link Float}, {@link BigDecimal}
* or {@link BigInteger}.
* @param <D> the data type requested
* @return the node value or {@code null}
*/
@SuppressWarnings({ "unchecked" })
protected final <D> @Nullable D nullSafeValue(@Nullable JsonNode jsonNode, Class<D> type) {
Assert.notNull(type, "'type' must not be null");
if (jsonNode == null) {
return null;
}
if (type == String.class) {
return (D) jsonNode.stringValue();
}
if (type == Boolean.class) {
return (D) Boolean.valueOf(jsonNode.booleanValue());
}
if (type == Long.class) {
return (D) Long.valueOf(jsonNode.longValue());
}
if (type == Integer.class) {
return (D) Integer.valueOf(jsonNode.intValue());
}
if (type == Short.class) {
return (D) Short.valueOf(jsonNode.shortValue());
}
if (type == Double.class) {
return (D) Double.valueOf(jsonNode.doubleValue());
}
if (type == Float.class) {
return (D) Float.valueOf(jsonNode.floatValue());
}
if (type == BigDecimal.class) {
return (D) jsonNode.decimalValue();
}
if (type == BigInteger.class) {
return (D) jsonNode.bigIntegerValue();
}
throw new IllegalArgumentException("Unsupported value type " + type.getName());
}
/**
* Helper method to return a {@link JsonNode} from the tree.
* @param tree the source tree
* @param fieldName the field name to extract
* @return the {@link JsonNode}
*/
protected final JsonNode getRequiredNode(JsonNode tree, String fieldName) {
Assert.notNull(tree, "'tree' must not be null");
JsonNode node = tree.get(fieldName);
Assert.state(node != null && !(node instanceof NullNode), () -> "Missing JSON field '" + fieldName + "'");
return node;
}
}
|
ObjectValueDeserializer
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/sql/results/spi/ScrollableResultsConsumer.java
|
{
"start": 917,
"end": 2707
}
|
class ____<R> implements ResultsConsumer<ScrollableResultsImplementor<R>, R> {
/**
* Singleton access to the standard scrollable-results consumer instance
*
* @deprecated in favor of {@link #instance()}
*/
@SuppressWarnings( "rawtypes" )
@Deprecated( forRemoval = true )
public static final ScrollableResultsConsumer INSTANCE = new ScrollableResultsConsumer();
@SuppressWarnings("unchecked")
public static <R> ScrollableResultsConsumer<R> instance() {
return INSTANCE;
}
@Override
public ScrollableResultsImplementor<R> consume(
JdbcValues jdbcValues,
SharedSessionContractImplementor session,
JdbcValuesSourceProcessingOptions processingOptions,
JdbcValuesSourceProcessingState jdbcValuesSourceProcessingState,
RowProcessingStateStandardImpl rowProcessingState,
RowReader<R> rowReader) {
rowReader.startLoading( rowProcessingState );
if ( containsCollectionFetches( jdbcValues.getValuesMapping() ) ) {
return new FetchingScrollableResultsImpl<>(
jdbcValues,
processingOptions,
jdbcValuesSourceProcessingState,
rowProcessingState,
rowReader,
session
);
}
else {
return new ScrollableResultsImpl<>(
jdbcValues,
processingOptions,
jdbcValuesSourceProcessingState,
rowProcessingState,
rowReader,
session
);
}
}
@Override
public boolean canResultsBeCached() {
return false;
}
private boolean containsCollectionFetches(JdbcValuesMapping valuesMapping) {
final List<DomainResult<?>> domainResults = valuesMapping.getDomainResults();
for ( DomainResult<?> domainResult : domainResults ) {
if ( domainResult instanceof EntityResult entityResult && entityResult.containsCollectionFetches() ) {
return true;
}
}
return false;
}
}
|
ScrollableResultsConsumer
|
java
|
redisson__redisson
|
redisson-spring-data/redisson-spring-data-26/src/main/java/org/redisson/spring/data/connection/ObjectMapEntryReplayDecoder.java
|
{
"start": 1025,
"end": 1749
}
|
class ____ implements MultiDecoder<List<Entry<Object, Object>>> {
@Override
public Decoder<Object> getDecoder(Codec codec, int paramNum, State state, long size) {
if (paramNum % 2 != 0) {
return codec.getMapValueDecoder();
}
return codec.getMapKeyDecoder();
}
@Override
public List<Entry<Object, Object>> decode(List<Object> parts, State state) {
Map<Object, Object> result = new LinkedHashMap<>(parts.size() / 2);
for (int i = 0; i < parts.size(); i++) {
if (i % 2 != 0) {
result.put(parts.get(i-1), parts.get(i));
}
}
return new ArrayList<>(result.entrySet());
}
}
|
ObjectMapEntryReplayDecoder
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/TimerService.java
|
{
"start": 929,
"end": 1715
}
|
interface ____ life cycle methods.
*
* <p>The registration of timers follows a life cycle of three phases:
*
* <ol>
* <li>In the initial state, it accepts timer registrations and triggers when the time is reached.
* <li>After calling {@link #quiesce()}, further calls to {@link #registerTimer(long,
* ProcessingTimeCallback)} will not register any further timers, and will return a "dummy"
* future as a result. This is used for clean shutdown, where currently firing timers are
* waited for and no future timers can be scheduled, without causing hard exceptions.
* <li>After a call to {@link #shutdownService()}, all calls to {@link #registerTimer(long,
* ProcessingTimeCallback)} will result in a hard exception.
* </ol>
*/
@Internal
public
|
with
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/valuehandlingmode/inline/NonPkAssociationEqualityPredicateTest.java
|
{
"start": 1102,
"end": 2284
}
|
class ____ {
@Test
public void testEqualityCheck(EntityManagerFactoryScope scope) {
scope.inTransaction(
entityManager -> {
CriteriaBuilder builder = entityManager.getCriteriaBuilder();
CriteriaQuery<Order> orderCriteria = builder.createQuery( Order.class );
Root<Order> orderRoot = orderCriteria.from( Order.class );
orderCriteria.select( orderRoot );
Customer c = new Customer();
c.customerNumber = 123L;
orderCriteria.where(
builder.equal( orderRoot.get( "customer" ), c )
);
List<Order> orders = entityManager.createQuery( orderCriteria ).getResultList();
assertEquals( 0, orders.size() );
}
);
}
@Test
public void testDifferentAssociationsEqualityCheck(EntityManagerFactoryScope scope) {
scope.inTransaction(
entityManager -> {
// This fails because we compare a ToOne with non-PK to something with a EntityValuedModelPart which defaults to the PK mapping
entityManager.createQuery( "from Order o, Customer c where o.customer = c", Object[].class ).getResultList();
}
);
}
@Entity(name = "Order")
@Table(name = "ORDER_TABLE")
public static
|
NonPkAssociationEqualityPredicateTest
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java
|
{
"start": 2421,
"end": 47735
}
|
class ____ extends AbstractCatAction {
private static final Set<String> RESPONSE_PARAMS = addToCopy(AbstractCatAction.RESPONSE_PARAMS, "local", "health");
private static final DateFormatter STRICT_DATE_TIME_FORMATTER = DateFormatter.forPattern("strict_date_time");
private final ProjectIdResolver projectIdResolver;
public RestIndicesAction(ProjectIdResolver projectIdResolver) {
this.projectIdResolver = projectIdResolver;
}
@Override
public List<Route> routes() {
return List.of(new Route(GET, "/_cat/indices"), new Route(GET, "/_cat/indices/{index}"));
}
@Override
public String getName() {
return "cat_indices_action";
}
@Override
public boolean allowSystemIndexAccessByDefault() {
return true;
}
@Override
protected void documentation(StringBuilder sb) {
sb.append("/_cat/indices\n");
sb.append("/_cat/indices/{index}\n");
}
@Override
public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) {
final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
final IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, IndicesOptions.strictExpand());
final TimeValue masterNodeTimeout = getMasterNodeTimeout(request);
final boolean includeUnloadedSegments = request.paramAsBoolean("include_unloaded_segments", false);
return channel -> {
final var indexSettingsRef = new AtomicReference<GetSettingsResponse>();
final var indicesStatsRef = new AtomicReference<IndicesStatsResponse>();
final var clusterStateRef = new AtomicReference<ClusterStateResponse>();
try (var listeners = new RefCountingListener(new RestResponseListener<>(channel) {
@Override
public RestResponse buildResponse(Void ignored) throws Exception {
return RestTable.buildResponse(
buildTable(
request,
indexSettingsRef.get().getIndexToSettings(),
clusterStateRef.get().getState(),
indicesStatsRef.get().getIndices()
),
channel
);
}
})) {
// [NOTE: WHY GET SETTINGS] Use the Get Settings API to determine the indices we should return because:
//
// - the Cluster State API does not filter output based on index privileges, so they can't be used to determine which
// indices are authorized or not.
//
// - the Indices Stats API does not provide information for all existing indices (for example recovering indices or
// non-replicated closed indices are not reported in its response).
//
// Note that the user must be authorized to request the cluster state to use this API, which means they can use the cluster
// state API to get a list of all the indices in the cluster anyway. Thus filtering out the unauthorized indices has limited
// security benefits - it's more of a convenience thing.
client.admin()
.indices()
.prepareGetSettings(masterNodeTimeout, indices)
.setIndicesOptions(indicesOptions)
.execute(listeners.acquire(indexSettingsRef::set));
// The other requests just provide additional detail, and wildcards may be resolved differently depending on the type of
// request in the presence of security plugins, so set the IndicesOptions for all the sub-requests to be as inclusive as
// possible.
final IndicesOptions subRequestIndicesOptions = IndicesOptions.lenientExpandHidden();
client.admin()
.cluster()
.prepareState(masterNodeTimeout)
.clear()
.setMetadata(true)
.setRoutingTable(true)
.setIndices(indices)
.setIndicesOptions(subRequestIndicesOptions)
.execute(listeners.acquire(clusterStateRef::set));
client.admin()
.indices()
.prepareStats(indices)
.setIndicesOptions(subRequestIndicesOptions)
.all()
.setIncludeUnloadedSegments(includeUnloadedSegments)
.execute(listeners.acquire(indicesStatsRef::set));
}
};
}
@Override
protected Set<String> responseParams() {
return RESPONSE_PARAMS;
}
@Override
protected Table getTableWithHeader(final RestRequest request) {
Table table = new Table();
table.startHeaders();
table.addCell("health", "alias:h;desc:current health status");
table.addCell("status", "alias:s;desc:open/close status");
table.addCell("index", "alias:i,idx;desc:index name");
table.addCell("uuid", "alias:id,uuid;desc:index uuid");
table.addCell("pri", "alias:p,shards.primary,shardsPrimary;text-align:right;desc:number of primary shards");
table.addCell("rep", "alias:r,shards.replica,shardsReplica;text-align:right;desc:number of replica shards");
table.addCell("docs.count", "alias:dc,docsCount;text-align:right;desc:available docs");
table.addCell("docs.deleted", "alias:dd,docsDeleted;text-align:right;desc:deleted docs");
table.addCell("creation.date", "alias:cd;default:false;desc:index creation date (millisecond value)");
table.addCell("creation.date.string", "alias:cds;default:false;desc:index creation date (as string)");
table.addCell("store.size", "sibling:pri;alias:ss,storeSize;text-align:right;desc:store size of primaries & replicas");
table.addCell("pri.store.size", "text-align:right;desc:store size of primaries");
table.addCell("dataset.size", "text-align:right;desc:total size of dataset");
table.addCell("completion.size", "sibling:pri;alias:cs,completionSize;default:false;text-align:right;desc:size of completion");
table.addCell("pri.completion.size", "default:false;text-align:right;desc:size of completion");
table.addCell(
"fielddata.memory_size",
"sibling:pri;alias:fm,fielddataMemory;default:false;text-align:right;desc:used fielddata cache"
);
table.addCell("pri.fielddata.memory_size", "default:false;text-align:right;desc:used fielddata cache");
table.addCell(
"fielddata.evictions",
"sibling:pri;alias:fe,fielddataEvictions;default:false;text-align:right;desc:fielddata evictions"
);
table.addCell("pri.fielddata.evictions", "default:false;text-align:right;desc:fielddata evictions");
table.addCell(
"query_cache.memory_size",
"sibling:pri;alias:qcm,queryCacheMemory;default:false;text-align:right;desc:used query cache"
);
table.addCell("pri.query_cache.memory_size", "default:false;text-align:right;desc:used query cache");
table.addCell(
"query_cache.evictions",
"sibling:pri;alias:qce,queryCacheEvictions;default:false;text-align:right;desc:query cache evictions"
);
table.addCell("pri.query_cache.evictions", "default:false;text-align:right;desc:query cache evictions");
table.addCell(
"request_cache.memory_size",
"sibling:pri;alias:rcm,requestCacheMemory;default:false;text-align:right;desc:used request cache"
);
table.addCell("pri.request_cache.memory_size", "default:false;text-align:right;desc:used request cache");
table.addCell(
"request_cache.evictions",
"sibling:pri;alias:rce,requestCacheEvictions;default:false;text-align:right;desc:request cache evictions"
);
table.addCell("pri.request_cache.evictions", "default:false;text-align:right;desc:request cache evictions");
table.addCell(
"request_cache.hit_count",
"sibling:pri;alias:rchc,requestCacheHitCount;default:false;text-align:right;desc:request cache hit count"
);
table.addCell("pri.request_cache.hit_count", "default:false;text-align:right;desc:request cache hit count");
table.addCell(
"request_cache.miss_count",
"sibling:pri;alias:rcmc,requestCacheMissCount;default:false;text-align:right;desc:request cache miss count"
);
table.addCell("pri.request_cache.miss_count", "default:false;text-align:right;desc:request cache miss count");
table.addCell("flush.total", "sibling:pri;alias:ft,flushTotal;default:false;text-align:right;desc:number of flushes");
table.addCell("pri.flush.total", "default:false;text-align:right;desc:number of flushes");
table.addCell("flush.total_time", "sibling:pri;alias:ftt,flushTotalTime;default:false;text-align:right;desc:time spent in flush");
table.addCell("pri.flush.total_time", "default:false;text-align:right;desc:time spent in flush");
table.addCell("get.current", "sibling:pri;alias:gc,getCurrent;default:false;text-align:right;desc:number of current get ops");
table.addCell("pri.get.current", "default:false;text-align:right;desc:number of current get ops");
table.addCell("get.time", "sibling:pri;alias:gti,getTime;default:false;text-align:right;desc:time spent in get");
table.addCell("pri.get.time", "default:false;text-align:right;desc:time spent in get");
table.addCell("get.total", "sibling:pri;alias:gto,getTotal;default:false;text-align:right;desc:number of get ops");
table.addCell("pri.get.total", "default:false;text-align:right;desc:number of get ops");
table.addCell(
"get.exists_time",
"sibling:pri;alias:geti,getExistsTime;default:false;text-align:right;desc:time spent in successful gets"
);
table.addCell("pri.get.exists_time", "default:false;text-align:right;desc:time spent in successful gets");
table.addCell(
"get.exists_total",
"sibling:pri;alias:geto,getExistsTotal;default:false;text-align:right;desc:number of successful gets"
);
table.addCell("pri.get.exists_total", "default:false;text-align:right;desc:number of successful gets");
table.addCell(
"get.missing_time",
"sibling:pri;alias:gmti,getMissingTime;default:false;text-align:right;desc:time spent in failed gets"
);
table.addCell("pri.get.missing_time", "default:false;text-align:right;desc:time spent in failed gets");
table.addCell(
"get.missing_total",
"sibling:pri;alias:gmto,getMissingTotal;default:false;text-align:right;desc:number of failed gets"
);
table.addCell("pri.get.missing_total", "default:false;text-align:right;desc:number of failed gets");
table.addCell(
"indexing.delete_current",
"sibling:pri;alias:idc,indexingDeleteCurrent;default:false;text-align:right;desc:number of current deletions"
);
table.addCell("pri.indexing.delete_current", "default:false;text-align:right;desc:number of current deletions");
table.addCell(
"indexing.delete_time",
"sibling:pri;alias:idti,indexingDeleteTime;default:false;text-align:right;desc:time spent in deletions"
);
table.addCell("pri.indexing.delete_time", "default:false;text-align:right;desc:time spent in deletions");
table.addCell(
"indexing.delete_total",
"sibling:pri;alias:idto,indexingDeleteTotal;default:false;text-align:right;desc:number of delete ops"
);
table.addCell("pri.indexing.delete_total", "default:false;text-align:right;desc:number of delete ops");
table.addCell(
"indexing.index_current",
"sibling:pri;alias:iic,indexingIndexCurrent;default:false;text-align:right;desc:number of current indexing ops"
);
table.addCell("pri.indexing.index_current", "default:false;text-align:right;desc:number of current indexing ops");
table.addCell(
"indexing.index_time",
"sibling:pri;alias:iiti,indexingIndexTime;default:false;text-align:right;desc:time spent in indexing"
);
table.addCell("pri.indexing.index_time", "default:false;text-align:right;desc:time spent in indexing");
table.addCell(
"indexing.index_total",
"sibling:pri;alias:iito,indexingIndexTotal;default:false;text-align:right;desc:number of indexing ops"
);
table.addCell("pri.indexing.index_total", "default:false;text-align:right;desc:number of indexing ops");
table.addCell(
"indexing.index_failed",
"sibling:pri;alias:iif,indexingIndexFailed;default:false;text-align:right;desc:number of failed indexing ops"
);
table.addCell("pri.indexing.index_failed", "default:false;text-align:right;desc:number of failed indexing ops");
table.addCell(
"indexing.index_failed_due_to_version_conflict",
"sibling:pri;alias:iifvc,indexingIndexFailedDueToVersionConflict;default:false;text-align:right;"
+ "desc:number of failed indexing ops due to version conflict"
);
table.addCell(
"pri.indexing.index_failed_due_to_version_conflict",
"default:false;text-align:right;desc:number of failed indexing ops due to version conflict"
);
table.addCell("merges.current", "sibling:pri;alias:mc,mergesCurrent;default:false;text-align:right;desc:number of current merges");
table.addCell("pri.merges.current", "default:false;text-align:right;desc:number of current merges");
table.addCell(
"merges.current_docs",
"sibling:pri;alias:mcd,mergesCurrentDocs;default:false;text-align:right;desc:number of current merging docs"
);
table.addCell("pri.merges.current_docs", "default:false;text-align:right;desc:number of current merging docs");
table.addCell(
"merges.current_size",
"sibling:pri;alias:mcs,mergesCurrentSize;default:false;text-align:right;desc:size of current merges"
);
table.addCell("pri.merges.current_size", "default:false;text-align:right;desc:size of current merges");
table.addCell("merges.total", "sibling:pri;alias:mt,mergesTotal;default:false;text-align:right;desc:number of completed merge ops");
table.addCell("pri.merges.total", "default:false;text-align:right;desc:number of completed merge ops");
table.addCell("merges.total_docs", "sibling:pri;alias:mtd,mergesTotalDocs;default:false;text-align:right;desc:docs merged");
table.addCell("pri.merges.total_docs", "default:false;text-align:right;desc:docs merged");
table.addCell("merges.total_size", "sibling:pri;alias:mts,mergesTotalSize;default:false;text-align:right;desc:size merged");
table.addCell("pri.merges.total_size", "default:false;text-align:right;desc:size merged");
table.addCell(
"merges.total_time",
"sibling:pri;alias:mtt,mergesTotalTime;default:false;text-align:right;desc:time spent in merges"
);
table.addCell("pri.merges.total_time", "default:false;text-align:right;desc:time spent in merges");
table.addCell("refresh.total", "sibling:pri;alias:rto,refreshTotal;default:false;text-align:right;desc:total refreshes");
table.addCell("pri.refresh.total", "default:false;text-align:right;desc:total refreshes");
table.addCell("refresh.time", "sibling:pri;alias:rti,refreshTime;default:false;text-align:right;desc:time spent in refreshes");
table.addCell("pri.refresh.time", "default:false;text-align:right;desc:time spent in refreshes");
table.addCell(
"refresh.external_total",
"sibling:pri;alias:rto,refreshTotal;default:false;text-align:right;desc:total external refreshes"
);
table.addCell("pri.refresh.external_total", "default:false;text-align:right;desc:total external refreshes");
table.addCell(
"refresh.external_time",
"sibling:pri;alias:rti,refreshTime;default:false;text-align:right;desc:time spent in external refreshes"
);
table.addCell("pri.refresh.external_time", "default:false;text-align:right;desc:time spent in external refreshes");
table.addCell(
"refresh.listeners",
"sibling:pri;alias:rli,refreshListeners;default:false;text-align:right;desc:number of pending refresh listeners"
);
table.addCell("pri.refresh.listeners", "default:false;text-align:right;desc:number of pending refresh listeners");
table.addCell(
"search.fetch_current",
"sibling:pri;alias:sfc,searchFetchCurrent;default:false;text-align:right;desc:current fetch phase ops"
);
table.addCell("pri.search.fetch_current", "default:false;text-align:right;desc:current fetch phase ops");
table.addCell(
"search.fetch_time",
"sibling:pri;alias:sfti,searchFetchTime;default:false;text-align:right;desc:time spent in fetch phase"
);
table.addCell("pri.search.fetch_time", "default:false;text-align:right;desc:time spent in fetch phase");
table.addCell("search.fetch_total", "sibling:pri;alias:sfto,searchFetchTotal;default:false;text-align:right;desc:total fetch ops");
table.addCell("pri.search.fetch_total", "default:false;text-align:right;desc:total fetch ops");
table.addCell(
"search.open_contexts",
"sibling:pri;alias:so,searchOpenContexts;default:false;text-align:right;desc:open search contexts"
);
table.addCell("pri.search.open_contexts", "default:false;text-align:right;desc:open search contexts");
table.addCell(
"search.query_current",
"sibling:pri;alias:sqc,searchQueryCurrent;default:false;text-align:right;desc:current query phase ops"
);
table.addCell("pri.search.query_current", "default:false;text-align:right;desc:current query phase ops");
table.addCell(
"search.query_time",
"sibling:pri;alias:sqti,searchQueryTime;default:false;text-align:right;desc:time spent in query phase"
);
table.addCell("pri.search.query_time", "default:false;text-align:right;desc:time spent in query phase");
table.addCell(
"search.query_total",
"sibling:pri;alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops"
);
table.addCell("pri.search.query_total", "default:false;text-align:right;desc:total query phase ops");
table.addCell(
"search.scroll_current",
"sibling:pri;alias:scc,searchScrollCurrent;default:false;text-align:right;desc:open scroll contexts"
);
table.addCell("pri.search.scroll_current", "default:false;text-align:right;desc:open scroll contexts");
table.addCell(
"search.scroll_time",
"sibling:pri;alias:scti,searchScrollTime;default:false;text-align:right;desc:time scroll contexts held open"
);
table.addCell("pri.search.scroll_time", "default:false;text-align:right;desc:time scroll contexts held open");
table.addCell(
"search.scroll_total",
"sibling:pri;alias:scto,searchScrollTotal;default:false;text-align:right;desc:completed scroll contexts"
);
table.addCell("pri.search.scroll_total", "default:false;text-align:right;desc:completed scroll contexts");
table.addCell("segments.count", "sibling:pri;alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments");
table.addCell("pri.segments.count", "default:false;text-align:right;desc:number of segments");
table.addCell("segments.memory", "sibling:pri;alias:sm,segmentsMemory;default:false;text-align:right;desc:memory used by segments");
table.addCell("pri.segments.memory", "default:false;text-align:right;desc:memory used by segments");
table.addCell(
"segments.index_writer_memory",
"sibling:pri;alias:siwm,segmentsIndexWriterMemory;default:false;text-align:right;desc:memory used by index writer"
);
table.addCell("pri.segments.index_writer_memory", "default:false;text-align:right;desc:memory used by index writer");
table.addCell(
"segments.version_map_memory",
"sibling:pri;alias:svmm,segmentsVersionMapMemory;default:false;text-align:right;desc:memory used by version map"
);
table.addCell("pri.segments.version_map_memory", "default:false;text-align:right;desc:memory used by version map");
table.addCell(
"segments.fixed_bitset_memory",
"sibling:pri;alias:sfbm,fixedBitsetMemory;default:false;text-align:right;desc:memory used by fixed bit sets for"
+ " nested object field types and type filters for types referred in _parent fields"
);
table.addCell(
"pri.segments.fixed_bitset_memory",
"default:false;text-align:right;desc:memory used by fixed bit sets for nested object"
+ " field types and type filters for types referred in _parent fields"
);
table.addCell("warmer.current", "sibling:pri;alias:wc,warmerCurrent;default:false;text-align:right;desc:current warmer ops");
table.addCell("pri.warmer.current", "default:false;text-align:right;desc:current warmer ops");
table.addCell("warmer.total", "sibling:pri;alias:wto,warmerTotal;default:false;text-align:right;desc:total warmer ops");
table.addCell("pri.warmer.total", "default:false;text-align:right;desc:total warmer ops");
table.addCell(
"warmer.total_time",
"sibling:pri;alias:wtt,warmerTotalTime;default:false;text-align:right;desc:time spent in warmers"
);
table.addCell("pri.warmer.total_time", "default:false;text-align:right;desc:time spent in warmers");
table.addCell(
"suggest.current",
"sibling:pri;alias:suc,suggestCurrent;default:false;text-align:right;desc:number of current suggest ops"
);
table.addCell("pri.suggest.current", "default:false;text-align:right;desc:number of current suggest ops");
table.addCell("suggest.time", "sibling:pri;alias:suti,suggestTime;default:false;text-align:right;desc:time spend in suggest");
table.addCell("pri.suggest.time", "default:false;text-align:right;desc:time spend in suggest");
table.addCell("suggest.total", "sibling:pri;alias:suto,suggestTotal;default:false;text-align:right;desc:number of suggest ops");
table.addCell("pri.suggest.total", "default:false;text-align:right;desc:number of suggest ops");
table.addCell("memory.total", "sibling:pri;alias:tm,memoryTotal;default:false;text-align:right;desc:total used memory");
table.addCell("pri.memory.total", "default:false;text-align:right;desc:total user memory");
table.addCell(
"bulk.total_operations",
"sibling:pri;alias:bto,bulkTotalOperation;default:false;text-align:right;desc:number of bulk shard ops"
);
table.addCell("pri.bulk.total_operations", "default:false;text-align:right;desc:number of bulk shard ops");
table.addCell(
"bulk.total_time",
"sibling:pri;alias:btti,bulkTotalTime;default:false;text-align:right;desc:time spend in shard bulk"
);
table.addCell("pri.bulk.total_time", "default:false;text-align:right;desc:time spend in shard bulk");
table.addCell(
"bulk.total_size_in_bytes",
"sibling:pri;alias:btsi,bulkTotalSizeInBytes;default:false;text-align:right;desc:total size in bytes of shard bulk"
);
table.addCell("pri.bulk.total_size_in_bytes", "default:false;text-align:right;desc:total size in bytes of shard bulk");
table.addCell(
"bulk.avg_time",
"sibling:pri;alias:bati,bulkAvgTime;default:false;text-align:right;desc:average time spend in shard bulk"
);
table.addCell("pri.bulk.avg_time", "default:false;text-align:right;desc:average time spend in shard bulk");
table.addCell(
"bulk.avg_size_in_bytes",
"sibling:pri;alias:basi,bulkAvgSizeInBytes;default:false;text-align:right;desc:average size in bytes of shard bulk"
);
table.addCell("pri.bulk.avg_size_in_bytes", "default:false;text-align:right;desc:average size in bytes of shard bulk");
table.addCell(
"dense_vector.value_count",
"sibling:pri;alias:dvc,denseVectorCount;default:false;text-align:right;desc:total count of indexed dense vector"
);
table.addCell("pri.dense_vector.value_count", "default:false;text-align:right;desc:total count of indexed dense vector");
table.addCell(
"sparse_vector.value_count",
"sibling:pri;alias:svc,sparseVectorCount;default:false;text-align:right;desc:total count of indexed sparse vectors"
);
table.addCell("pri.sparse_vector.value_count", "default:false;text-align:right;desc:total count of indexed sparse vectors");
table.endHeaders();
return table;
}
// package private for testing
Table buildTable(
final RestRequest request,
final Map<String, Settings> indicesSettings,
final ClusterState clusterState,
final Map<String, IndexStats> indicesStats
) {
final String healthParam = request.param("health");
final ClusterHealthStatus healthStatusFilter = healthParam == null ? null : ClusterHealthStatus.fromString(healthParam);
final Table table = getTableWithHeader(request);
if (clusterState.metadata().projects().size() != 1) {
throw new IllegalStateException(
clusterState.metadata().projects().isEmpty() ? "No project available" : "Cluster has multiple projects"
);
}
final ProjectMetadata project = clusterState.metadata().getProject(projectIdResolver.getProjectId());
// Use indicesSettings to determine the indices returned - see [NOTE: WHY GET SETTINGS] above for details.
indicesSettings.forEach((indexName, settings) -> {
final IndexMetadata indexMetadata = project.index(indexName);
if (indexMetadata == null) {
// The index exists in indicesSettings but its metadata is missing, which means it was created or deleted
// concurrently with this action. However none of the requests returned an IndexNotFoundException so we do not require it to
// exist, and therefore we can just skip it.
return;
}
final IndexMetadata.State indexState = indexMetadata.getState();
final IndexStats indexStats = indicesStats.get(indexName);
final IndexRoutingTable indexRoutingTable = clusterState.routingTable(project.id()).index(indexName);
final ClusterHealthStatus indexHealthStatus = indexRoutingTable == null
? ClusterHealthStatus.RED // no routing table => cluster not recovered
: new ClusterIndexHealth(indexMetadata, indexRoutingTable).getStatus();
if (healthStatusFilter != null && indexHealthStatus != healthStatusFilter) {
// index health does not match the one requested
return;
}
final String health;
if (indexRoutingTable != null) {
health = indexHealthStatus.toString().toLowerCase(Locale.ROOT);
} else if (indexStats != null) {
health = "red*";
} else {
health = "";
}
final CommonStats primaryStats;
final CommonStats totalStats;
if (indexStats == null || indexState == IndexMetadata.State.CLOSE) {
// TODO: expose docs stats for replicated closed indices
primaryStats = new CommonStats();
totalStats = new CommonStats();
} else {
primaryStats = indexStats.getPrimaries();
totalStats = indexStats.getTotal();
}
table.startRow();
table.addCell(health);
table.addCell(indexState.toString().toLowerCase(Locale.ROOT));
table.addCell(indexName);
table.addCell(indexMetadata.getIndexUUID());
table.addCell(indexMetadata.getNumberOfShards());
table.addCell(indexMetadata.getNumberOfReplicas());
table.addCell(primaryStats.getDocs() == null ? null : primaryStats.getDocs().getCount());
table.addCell(primaryStats.getDocs() == null ? null : primaryStats.getDocs().getDeleted());
table.addCell(indexMetadata.getCreationDate());
ZonedDateTime creationTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(indexMetadata.getCreationDate()), ZoneOffset.UTC);
table.addCell(STRICT_DATE_TIME_FORMATTER.format(creationTime));
table.addCell(totalStats.getStore() == null ? null : totalStats.getStore().size());
table.addCell(primaryStats.getStore() == null ? null : primaryStats.getStore().size());
table.addCell(primaryStats.getStore() == null ? null : primaryStats.getStore().totalDataSetSize());
table.addCell(totalStats.getCompletion() == null ? null : totalStats.getCompletion().getSize());
table.addCell(primaryStats.getCompletion() == null ? null : primaryStats.getCompletion().getSize());
table.addCell(totalStats.getFieldData() == null ? null : totalStats.getFieldData().getMemorySize());
table.addCell(primaryStats.getFieldData() == null ? null : primaryStats.getFieldData().getMemorySize());
table.addCell(totalStats.getFieldData() == null ? null : totalStats.getFieldData().getEvictions());
table.addCell(primaryStats.getFieldData() == null ? null : primaryStats.getFieldData().getEvictions());
table.addCell(totalStats.getQueryCache() == null ? null : totalStats.getQueryCache().getMemorySize());
table.addCell(primaryStats.getQueryCache() == null ? null : primaryStats.getQueryCache().getMemorySize());
table.addCell(totalStats.getQueryCache() == null ? null : totalStats.getQueryCache().getEvictions());
table.addCell(primaryStats.getQueryCache() == null ? null : primaryStats.getQueryCache().getEvictions());
table.addCell(totalStats.getRequestCache() == null ? null : totalStats.getRequestCache().getMemorySize());
table.addCell(primaryStats.getRequestCache() == null ? null : primaryStats.getRequestCache().getMemorySize());
table.addCell(totalStats.getRequestCache() == null ? null : totalStats.getRequestCache().getEvictions());
table.addCell(primaryStats.getRequestCache() == null ? null : primaryStats.getRequestCache().getEvictions());
table.addCell(totalStats.getRequestCache() == null ? null : totalStats.getRequestCache().getHitCount());
table.addCell(primaryStats.getRequestCache() == null ? null : primaryStats.getRequestCache().getHitCount());
table.addCell(totalStats.getRequestCache() == null ? null : totalStats.getRequestCache().getMissCount());
table.addCell(primaryStats.getRequestCache() == null ? null : primaryStats.getRequestCache().getMissCount());
table.addCell(totalStats.getFlush() == null ? null : totalStats.getFlush().getTotal());
table.addCell(primaryStats.getFlush() == null ? null : primaryStats.getFlush().getTotal());
table.addCell(totalStats.getFlush() == null ? null : totalStats.getFlush().getTotalTime());
table.addCell(primaryStats.getFlush() == null ? null : primaryStats.getFlush().getTotalTime());
table.addCell(totalStats.getGet() == null ? null : totalStats.getGet().current());
table.addCell(primaryStats.getGet() == null ? null : primaryStats.getGet().current());
table.addCell(totalStats.getGet() == null ? null : totalStats.getGet().getTime());
table.addCell(primaryStats.getGet() == null ? null : primaryStats.getGet().getTime());
table.addCell(totalStats.getGet() == null ? null : totalStats.getGet().getCount());
table.addCell(primaryStats.getGet() == null ? null : primaryStats.getGet().getCount());
table.addCell(totalStats.getGet() == null ? null : totalStats.getGet().getExistsTime());
table.addCell(primaryStats.getGet() == null ? null : primaryStats.getGet().getExistsTime());
table.addCell(totalStats.getGet() == null ? null : totalStats.getGet().getExistsCount());
table.addCell(primaryStats.getGet() == null ? null : primaryStats.getGet().getExistsCount());
table.addCell(totalStats.getGet() == null ? null : totalStats.getGet().getMissingTime());
table.addCell(primaryStats.getGet() == null ? null : primaryStats.getGet().getMissingTime());
table.addCell(totalStats.getGet() == null ? null : totalStats.getGet().getMissingCount());
table.addCell(primaryStats.getGet() == null ? null : primaryStats.getGet().getMissingCount());
table.addCell(totalStats.getIndexing() == null ? null : totalStats.getIndexing().getTotal().getDeleteCurrent());
table.addCell(primaryStats.getIndexing() == null ? null : primaryStats.getIndexing().getTotal().getDeleteCurrent());
table.addCell(totalStats.getIndexing() == null ? null : totalStats.getIndexing().getTotal().getDeleteTime());
table.addCell(primaryStats.getIndexing() == null ? null : primaryStats.getIndexing().getTotal().getDeleteTime());
table.addCell(totalStats.getIndexing() == null ? null : totalStats.getIndexing().getTotal().getDeleteCount());
table.addCell(primaryStats.getIndexing() == null ? null : primaryStats.getIndexing().getTotal().getDeleteCount());
table.addCell(totalStats.getIndexing() == null ? null : totalStats.getIndexing().getTotal().getIndexCurrent());
table.addCell(primaryStats.getIndexing() == null ? null : primaryStats.getIndexing().getTotal().getIndexCurrent());
table.addCell(totalStats.getIndexing() == null ? null : totalStats.getIndexing().getTotal().getIndexTime());
table.addCell(primaryStats.getIndexing() == null ? null : primaryStats.getIndexing().getTotal().getIndexTime());
table.addCell(totalStats.getIndexing() == null ? null : totalStats.getIndexing().getTotal().getIndexCount());
table.addCell(primaryStats.getIndexing() == null ? null : primaryStats.getIndexing().getTotal().getIndexCount());
table.addCell(totalStats.getIndexing() == null ? null : totalStats.getIndexing().getTotal().getIndexFailedCount());
table.addCell(primaryStats.getIndexing() == null ? null : primaryStats.getIndexing().getTotal().getIndexFailedCount());
table.addCell(
totalStats.getIndexing() == null ? null : totalStats.getIndexing().getTotal().getIndexFailedDueToVersionConflictCount()
);
table.addCell(
primaryStats.getIndexing() == null ? null : primaryStats.getIndexing().getTotal().getIndexFailedDueToVersionConflictCount()
);
table.addCell(totalStats.getMerge() == null ? null : totalStats.getMerge().getCurrent());
table.addCell(primaryStats.getMerge() == null ? null : primaryStats.getMerge().getCurrent());
table.addCell(totalStats.getMerge() == null ? null : totalStats.getMerge().getCurrentNumDocs());
table.addCell(primaryStats.getMerge() == null ? null : primaryStats.getMerge().getCurrentNumDocs());
table.addCell(totalStats.getMerge() == null ? null : totalStats.getMerge().getCurrentSize());
table.addCell(primaryStats.getMerge() == null ? null : primaryStats.getMerge().getCurrentSize());
table.addCell(totalStats.getMerge() == null ? null : totalStats.getMerge().getTotal());
table.addCell(primaryStats.getMerge() == null ? null : primaryStats.getMerge().getTotal());
table.addCell(totalStats.getMerge() == null ? null : totalStats.getMerge().getTotalNumDocs());
table.addCell(primaryStats.getMerge() == null ? null : primaryStats.getMerge().getTotalNumDocs());
table.addCell(totalStats.getMerge() == null ? null : totalStats.getMerge().getTotalSize());
table.addCell(primaryStats.getMerge() == null ? null : primaryStats.getMerge().getTotalSize());
table.addCell(totalStats.getMerge() == null ? null : totalStats.getMerge().getTotalTime());
table.addCell(primaryStats.getMerge() == null ? null : primaryStats.getMerge().getTotalTime());
table.addCell(totalStats.getRefresh() == null ? null : totalStats.getRefresh().getTotal());
table.addCell(primaryStats.getRefresh() == null ? null : primaryStats.getRefresh().getTotal());
table.addCell(totalStats.getRefresh() == null ? null : totalStats.getRefresh().getTotalTime());
table.addCell(primaryStats.getRefresh() == null ? null : primaryStats.getRefresh().getTotalTime());
table.addCell(totalStats.getRefresh() == null ? null : totalStats.getRefresh().getExternalTotal());
table.addCell(primaryStats.getRefresh() == null ? null : primaryStats.getRefresh().getExternalTotal());
table.addCell(totalStats.getRefresh() == null ? null : totalStats.getRefresh().getExternalTotalTime());
table.addCell(primaryStats.getRefresh() == null ? null : primaryStats.getRefresh().getExternalTotalTime());
table.addCell(totalStats.getRefresh() == null ? null : totalStats.getRefresh().getListeners());
table.addCell(primaryStats.getRefresh() == null ? null : primaryStats.getRefresh().getListeners());
table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getFetchCurrent());
table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getFetchCurrent());
table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getFetchTime());
table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getFetchTime());
table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getFetchCount());
table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getFetchCount());
table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getOpenContexts());
table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getOpenContexts());
table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getQueryCurrent());
table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getQueryCurrent());
table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getQueryTime());
table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getQueryTime());
table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getQueryCount());
table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getQueryCount());
table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getScrollCurrent());
table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getScrollCurrent());
table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getScrollTime());
table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getScrollTime());
table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getScrollCount());
table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getScrollCount());
table.addCell(totalStats.getSegments() == null ? null : totalStats.getSegments().getCount());
table.addCell(primaryStats.getSegments() == null ? null : primaryStats.getSegments().getCount());
table.addCell(totalStats.getSegments() == null ? null : ByteSizeValue.ZERO);
table.addCell(primaryStats.getSegments() == null ? null : ByteSizeValue.ZERO);
table.addCell(totalStats.getSegments() == null ? null : totalStats.getSegments().getIndexWriterMemory());
table.addCell(primaryStats.getSegments() == null ? null : primaryStats.getSegments().getIndexWriterMemory());
table.addCell(totalStats.getSegments() == null ? null : totalStats.getSegments().getVersionMapMemory());
table.addCell(primaryStats.getSegments() == null ? null : primaryStats.getSegments().getVersionMapMemory());
table.addCell(totalStats.getSegments() == null ? null : totalStats.getSegments().getBitsetMemory());
table.addCell(primaryStats.getSegments() == null ? null : primaryStats.getSegments().getBitsetMemory());
table.addCell(totalStats.getWarmer() == null ? null : totalStats.getWarmer().current());
table.addCell(primaryStats.getWarmer() == null ? null : primaryStats.getWarmer().current());
table.addCell(totalStats.getWarmer() == null ? null : totalStats.getWarmer().total());
table.addCell(primaryStats.getWarmer() == null ? null : primaryStats.getWarmer().total());
table.addCell(totalStats.getWarmer() == null ? null : totalStats.getWarmer().totalTime());
table.addCell(primaryStats.getWarmer() == null ? null : primaryStats.getWarmer().totalTime());
table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getSuggestCurrent());
table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getSuggestCurrent());
table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getSuggestTime());
table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getSuggestTime());
table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getSuggestCount());
table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getSuggestCount());
table.addCell(totalStats.getTotalMemory());
table.addCell(primaryStats.getTotalMemory());
table.addCell(totalStats.getBulk() == null ? null : totalStats.getBulk().getTotalOperations());
table.addCell(primaryStats.getBulk() == null ? null : primaryStats.getBulk().getTotalOperations());
table.addCell(totalStats.getBulk() == null ? null : totalStats.getBulk().getTotalTime());
table.addCell(primaryStats.getBulk() == null ? null : primaryStats.getBulk().getTotalTime());
table.addCell(totalStats.getBulk() == null ? null : totalStats.getBulk().getTotalSizeInBytes());
table.addCell(primaryStats.getBulk() == null ? null : primaryStats.getBulk().getTotalSizeInBytes());
table.addCell(totalStats.getBulk() == null ? null : totalStats.getBulk().getAvgTime());
table.addCell(primaryStats.getBulk() == null ? null : primaryStats.getBulk().getAvgTime());
table.addCell(totalStats.getBulk() == null ? null : totalStats.getBulk().getAvgSizeInBytes());
table.addCell(primaryStats.getBulk() == null ? null : primaryStats.getBulk().getAvgSizeInBytes());
table.addCell(totalStats.getDenseVectorStats() == null ? null : totalStats.getDenseVectorStats().getValueCount());
table.addCell(primaryStats.getDenseVectorStats() == null ? null : primaryStats.getDenseVectorStats().getValueCount());
table.addCell(totalStats.getSparseVectorStats() == null ? null : totalStats.getSparseVectorStats().getValueCount());
table.addCell(primaryStats.getSparseVectorStats() == null ? null : primaryStats.getSparseVectorStats().getValueCount());
table.endRow();
});
return table;
}
}
|
RestIndicesAction
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/type/descriptor/java/FloatPrimitiveArrayJavaType.java
|
{
"start": 782,
"end": 5919
}
|
class ____ extends AbstractArrayJavaType<float[], Float> {
public static final FloatPrimitiveArrayJavaType INSTANCE = new FloatPrimitiveArrayJavaType();
private FloatPrimitiveArrayJavaType() {
this( FloatJavaType.INSTANCE );
}
protected FloatPrimitiveArrayJavaType(JavaType<Float> baseDescriptor) {
super( float[].class, baseDescriptor, new ArrayMutabilityPlan() );
}
@Override
public boolean isInstance(Object value) {
return value instanceof float[];
}
@Override
public String extractLoggableRepresentation(float[] value) {
return value == null ? super.extractLoggableRepresentation( null ) : Arrays.toString( value );
}
@Override
public boolean areEqual(float[] one, float[] another) {
return Arrays.equals( one, another );
}
@Override
public int extractHashCode(float[] value) {
return Arrays.hashCode( value );
}
@Override
public String toString(float[] value) {
if ( value == null ) {
return null;
}
final StringBuilder sb = new StringBuilder();
sb.append( '{' );
sb.append( value[0] );
for ( int i = 1; i < value.length; i++ ) {
sb.append( value[i] );
sb.append( ',' );
}
sb.append( '}' );
return sb.toString();
}
@Override
public float[] fromString(CharSequence charSequence) {
if ( charSequence == null ) {
return null;
}
final List<Float> list = new ArrayList<>();
final char lastChar = charSequence.charAt( charSequence.length() - 1 );
final char firstChar = charSequence.charAt( 0 );
if ( firstChar != '{' || lastChar != '}' ) {
throw new IllegalArgumentException( "Cannot parse given string into array of Floats. First and last character must be { and }" );
}
final int len = charSequence.length();
int elementStart = 1;
for ( int i = elementStart; i < len; i ++ ) {
final char c = charSequence.charAt( i );
if ( c == ',' ) {
list.add( Float.parseFloat( charSequence.subSequence( elementStart, i ).toString() ) );
elementStart = i + 1;
}
}
final float[] result = new float[list.size()];
for ( int i = 0; i < result.length; i ++ ) {
result[ i ] = list.get( i );
}
return result;
}
@Override
public <X> X unwrap(float[] value, Class<X> type, WrapperOptions options) {
if ( value == null ) {
return null;
}
if ( type.isInstance( value ) ) {
return (X) value;
}
else if ( Object[].class.isAssignableFrom( type ) ) {
final Class<?> preferredJavaTypeClass = type.getComponentType();
final Object[] unwrapped = (Object[]) Array.newInstance( preferredJavaTypeClass, value.length );
for ( int i = 0; i < value.length; i++ ) {
unwrapped[i] = getElementJavaType().unwrap( value[i], preferredJavaTypeClass, options );
}
return (X) unwrapped;
}
else if ( type == byte[].class ) {
// byte[] can only be requested if the value should be serialized
return (X) SerializationHelper.serialize( value );
}
else if ( type == BinaryStream.class ) {
// BinaryStream can only be requested if the value should be serialized
//noinspection unchecked
return (X) new ArrayBackedBinaryStream( SerializationHelper.serialize( value ) );
}
else if ( type.isArray() ) {
final Class<?> preferredJavaTypeClass = type.getComponentType();
final Object unwrapped = Array.newInstance( preferredJavaTypeClass, value.length );
for ( int i = 0; i < value.length; i++ ) {
Array.set( unwrapped, i, getElementJavaType().unwrap( value[i], preferredJavaTypeClass, options ) );
}
return (X) unwrapped;
}
throw unknownUnwrap( type );
}
@Override
public <X> float[] wrap(X value, WrapperOptions options) {
if ( value == null ) {
return null;
}
if ( value instanceof java.sql.Array array ) {
try {
//noinspection unchecked
value = (X) array.getArray();
}
catch ( SQLException ex ) {
// This basically shouldn't happen unless you've lost connection to the database.
throw new HibernateException( ex );
}
}
if ( value instanceof float[] floats ) {
return floats;
}
else if ( value instanceof byte[] bytes ) {
// When the value is a byte[], this is a deserialization request
return (float[]) SerializationHelper.deserialize( bytes );
}
else if ( value instanceof BinaryStream binaryStream ) {
// When the value is a BinaryStream, this is a deserialization request
return (float[]) SerializationHelper.deserialize( binaryStream.getBytes() );
}
else if ( value.getClass().isArray() ) {
final float[] wrapped = new float[Array.getLength( value )];
for ( int i = 0; i < wrapped.length; i++ ) {
wrapped[i] = getElementJavaType().wrap( Array.get( value, i ), options );
}
return wrapped;
}
else if ( value instanceof Float floatValue) {
// Support binding a single element as parameter value
return new float[]{ floatValue };
}
else if ( value instanceof Collection<?> collection ) {
final float[] wrapped = new float[collection.size()];
int i = 0;
for ( Object e : collection ) {
wrapped[i++] = getElementJavaType().wrap( e, options );
}
return wrapped;
}
throw unknownWrap( value.getClass() );
}
private static
|
FloatPrimitiveArrayJavaType
|
java
|
apache__maven
|
impl/maven-core/src/test/java/org/apache/maven/lifecycle/internal/builder/multithreaded/SmartProjectComparatorTest.java
|
{
"start": 1606,
"end": 9162
}
|
class ____ {
private SmartProjectComparator comparator;
private ProjectDependencyGraph dependencyGraph;
@BeforeEach
void setUp() {
dependencyGraph = new ProjectDependencyGraphStub();
comparator = new SmartProjectComparator(dependencyGraph);
}
@Test
void testProjectWeightCalculation() {
// Test that projects with longer downstream chains get higher weights
// Graph: A -> B,C; B -> X,Y; C -> X,Z
MavenProject projectA = ProjectDependencyGraphStub.A;
MavenProject projectB = ProjectDependencyGraphStub.B;
MavenProject projectC = ProjectDependencyGraphStub.C;
MavenProject projectX = ProjectDependencyGraphStub.X;
long weightA = comparator.getProjectWeight(projectA);
long weightB = comparator.getProjectWeight(projectB);
long weightC = comparator.getProjectWeight(projectC);
long weightX = comparator.getProjectWeight(projectX);
// Project A should have the highest weight as it's at the root
assertTrue(weightA > weightB, "Project A should have weight > Project B");
assertTrue(weightA > weightC, "Project A should have weight > Project C");
assertTrue(weightB > weightX, "Project B should have weight > Project X");
assertTrue(weightC > weightX, "Project C should have weight > Project X");
}
@Test
void testComparatorOrdering() {
List<MavenProject> projects = Arrays.asList(
ProjectDependencyGraphStub.X,
ProjectDependencyGraphStub.C,
ProjectDependencyGraphStub.A,
ProjectDependencyGraphStub.B);
// Sort using the comparator
projects.sort(comparator.getComparator());
// Project A should come first (highest weight)
assertEquals(
ProjectDependencyGraphStub.A,
projects.get(0),
"Project A should be first (highest critical path weight)");
// B and C should come before X (they have higher weights)
assertTrue(
projects.indexOf(ProjectDependencyGraphStub.B) < projects.indexOf(ProjectDependencyGraphStub.X),
"Project B should come before X");
assertTrue(
projects.indexOf(ProjectDependencyGraphStub.C) < projects.indexOf(ProjectDependencyGraphStub.X),
"Project C should come before X");
}
@Test
void testWeightConsistency() {
// Test that weights are consistent across multiple calls
MavenProject project = ProjectDependencyGraphStub.A;
long weight1 = comparator.getProjectWeight(project);
long weight2 = comparator.getProjectWeight(project);
assertEquals(weight1, weight2, "Project weight should be consistent");
}
@Test
void testDependencyChainLength() {
// Test that projects with longer dependency chains get higher weights
// In the stub: A -> B,C; B -> X,Y; C -> X,Z
long weightA = comparator.getProjectWeight(ProjectDependencyGraphStub.A);
long weightB = comparator.getProjectWeight(ProjectDependencyGraphStub.B);
long weightC = comparator.getProjectWeight(ProjectDependencyGraphStub.C);
long weightX = comparator.getProjectWeight(ProjectDependencyGraphStub.X);
long weightY = comparator.getProjectWeight(ProjectDependencyGraphStub.Y);
long weightZ = comparator.getProjectWeight(ProjectDependencyGraphStub.Z);
// Verify the actual chain length calculation
// Leaf nodes (no downstream dependencies)
assertEquals(1L, weightX, "Project X should have weight 1 (1 + 0)");
assertEquals(1L, weightY, "Project Y should have weight 1 (1 + 0)");
assertEquals(1L, weightZ, "Project Z should have weight 1 (1 + 0)");
// Middle nodes
assertEquals(2L, weightB, "Project B should have weight 2 (1 + max(X=1, Y=1))");
assertEquals(2L, weightC, "Project C should have weight 2 (1 + max(X=1, Z=1))");
// Root node
assertEquals(3L, weightA, "Project A should have weight 3 (1 + max(B=2, C=2))");
}
@Test
void testSameWeightOrdering() {
// Test that projects with the same weight are ordered by project ID
// Projects B and C both have weight 2, so they should be ordered by project ID
List<MavenProject> projects = Arrays.asList(
ProjectDependencyGraphStub.C, // weight=2, ID contains "C"
ProjectDependencyGraphStub.B // weight=2, ID contains "B"
);
projects.sort(comparator.getComparator());
// Both have same weight (2), so ordering should be by project ID
// Project B should come before C alphabetically by project ID
assertEquals(
ProjectDependencyGraphStub.B,
projects.get(0),
"Project B should come before C when they have the same weight (ordered by project ID)");
assertEquals(
ProjectDependencyGraphStub.C,
projects.get(1),
"Project C should come after B when they have the same weight (ordered by project ID)");
// Verify they actually have the same weight
long weightB = comparator.getProjectWeight(ProjectDependencyGraphStub.B);
long weightC = comparator.getProjectWeight(ProjectDependencyGraphStub.C);
assertEquals(weightB, weightC, "Projects B and C should have the same weight");
}
@Test
void testConcurrentWeightCalculation() throws Exception {
// Test that concurrent weight calculation doesn't cause recursive update issues
// This test simulates the scenario that causes the IllegalStateException
int numThreads = 10;
int numIterations = 100;
ExecutorService executor = Executors.newFixedThreadPool(numThreads);
CountDownLatch latch = new CountDownLatch(numThreads);
AtomicReference<Exception> exception = new AtomicReference<>();
for (int i = 0; i < numThreads; i++) {
executor.submit(() -> {
try {
for (int j = 0; j < numIterations; j++) {
// Simulate concurrent access to weight calculation
// This can trigger the recursive update issue
List<MavenProject> projects = Arrays.asList(
ProjectDependencyGraphStub.A,
ProjectDependencyGraphStub.B,
ProjectDependencyGraphStub.C,
ProjectDependencyGraphStub.X,
ProjectDependencyGraphStub.Y,
ProjectDependencyGraphStub.Z);
// Sort projects concurrently - this triggers weight calculation
projects.sort(comparator.getComparator());
// Also directly access weights to increase contention
for (MavenProject project : projects) {
comparator.getProjectWeight(project);
}
}
} catch (Exception e) {
exception.set(e);
} finally {
latch.countDown();
}
});
}
latch.await(30, TimeUnit.SECONDS);
executor.shutdown();
if (exception.get() != null) {
throw exception.get();
}
}
}
|
SmartProjectComparatorTest
|
java
|
processing__processing4
|
java/test/processing/mode/java/preproc/MissingVariableNameMessageSimplifierStrategyTest.java
|
{
"start": 864,
"end": 924
}
|
class ____ {");
Assert.assertTrue(msg.isEmpty());
}
}
|
test
|
java
|
apache__kafka
|
streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedWindowStoreWithoutIndexTest.java
|
{
"start": 857,
"end": 1068
}
|
class ____ extends AbstractRocksDBWindowStoreTest {
@Override
StoreType storeType() {
return StoreType.RocksDBTimeOrderedWindowStoreWithoutIndex;
}
}
|
RocksDBTimeOrderedWindowStoreWithoutIndexTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/score/DecayDoubleEvaluator.java
|
{
"start": 4600,
"end": 5801
}
|
class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory value;
private final double origin;
private final double scale;
private final double offset;
private final double decay;
private final Decay.DecayFunction decayFunction;
public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory value, double origin,
double scale, double offset, double decay, Decay.DecayFunction decayFunction) {
this.source = source;
this.value = value;
this.origin = origin;
this.scale = scale;
this.offset = offset;
this.decay = decay;
this.decayFunction = decayFunction;
}
@Override
public DecayDoubleEvaluator get(DriverContext context) {
return new DecayDoubleEvaluator(source, value.get(context), origin, scale, offset, decay, decayFunction, context);
}
@Override
public String toString() {
return "DecayDoubleEvaluator[" + "value=" + value + ", origin=" + origin + ", scale=" + scale + ", offset=" + offset + ", decay=" + decay + ", decayFunction=" + decayFunction + "]";
}
}
}
|
Factory
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/detached/reference/DetachedReferenceInitializationBatchFetchTest.java
|
{
"start": 1157,
"end": 6249
}
|
class ____ {
@Test
public void testDetachedAndPersistentEntity(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final var entityB = session.find( EntityB.class, 1L );
session.clear();
// put a different instance of EntityB in the persistence context
final var ignored = session.find( EntityB.class, 1L );
fetchQuery( entityB, session );
} );
}
@Test
public void testDetachedEntityAndPersistentInitializedProxy(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final var entityB = session.find( EntityB.class, 1L );
session.clear();
// put a different instance of EntityB in the persistence context
final var ignored = session.getReference( EntityB.class, 1L );
Hibernate.initialize( ignored );
fetchQuery( entityB, session );
} );
}
@Test
public void testDetachedEntityAndPersistentProxy(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final var entityB = session.find( EntityB.class, 1L );
session.clear();
// put a different instance of EntityB in the persistence context
final var ignored = session.getReference( EntityB.class, 1L );
fetchQuery( entityB, session );
} );
}
@Test
public void testDetachedProxyAndPersistentEntity(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final var entityB = session.getReference( EntityB.class, 1L );
session.clear();
// put a different instance of EntityB in the persistence context
final var ignored = session.find( EntityB.class, 1L );
fetchQuery( entityB, session );
} );
}
@Test
public void testDetachedProxyAndPersistentInitializedProxy(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final var entityB = session.getReference( EntityB.class, 1L );
session.clear();
// put a different instance of EntityB in the persistence context
final var ignored = session.getReference( EntityB.class, 1L );
Hibernate.initialize( ignored );
fetchQuery( entityB, session );
} );
}
@Test
public void testDetachedAndPersistentProxy(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final var entityB = session.getReference( EntityB.class, 1L );
session.clear();
// put a different instance of EntityB in the persistence context
final var ignored = session.getReference( EntityB.class, 1L );
fetchQuery( entityB, session );
} );
}
@Test
public void testDetachedInitializedProxyAndPersistentEntity(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final var entityB = session.getReference( EntityB.class, 1L );
Hibernate.initialize( entityB );
session.clear();
// put a different instance of EntityB in the persistence context
final var ignored = session.find( EntityB.class, 1L );
fetchQuery( entityB, session );
} );
}
@Test
public void testDetachedAndPersistentInitializedProxy(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final var entityB = session.getReference( EntityB.class, 1L );
Hibernate.initialize( entityB );
session.clear();
// put a different instance of EntityB in the persistence context
final var ignored = session.getReference( EntityB.class, 1L );
Hibernate.initialize( ignored );
fetchQuery( entityB, session );
} );
}
@Test
public void testDetachedInitializedProxyAndPersistentProxy(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final var entityB = session.getReference( EntityB.class, 1L );
Hibernate.initialize( entityB );
session.clear();
// put a different instance of EntityB in the persistence context
final var ignored = session.getReference( EntityB.class, 1L );
fetchQuery( entityB, session );
} );
}
@BeforeAll
public void setUp(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final var entityB = new EntityB();
entityB.id = 1L;
entityB.name = "b_1";
session.persist( entityB );
} );
}
@AfterEach
public void tearDown(SessionFactoryScope scope) {
scope.inTransaction( session -> {
session.createMutationQuery( "delete from EntityA" ).executeUpdate();
} );
}
private void fetchQuery(EntityB entityB, SessionImplementor session) {
final var entityA = new EntityA();
entityA.id = 1L;
entityA.b = entityB;
session.persist( entityA );
final var entityA2 = new EntityA();
entityA2.id = 2L;
session.persist( entityA2 );
final var wasInitialized = Hibernate.isInitialized( entityB );
final var result = session.createQuery(
"from EntityA a order by a.id",
EntityA.class
).getResultList().get( 0 );
assertThat( Hibernate.isInitialized( entityB ) ).isEqualTo( wasInitialized );
assertThat( result.b ).isSameAs( entityB );
final var id = session.getSessionFactory().getPersistenceUnitUtil().getIdentifier( entityB );
final var reference = session.getReference( EntityB.class, id );
assertThat( Hibernate.isInitialized( reference ) ).isTrue();
assertThat( reference ).isNotSameAs( entityB );
}
@Entity(name = "EntityA")
static
|
DetachedReferenceInitializationBatchFetchTest
|
java
|
spring-projects__spring-framework
|
spring-test/src/main/java/org/springframework/test/context/bean/override/mockito/MockitoSpyBeanOverrideHandler.java
|
{
"start": 3744,
"end": 4009
}
|
class ____ implements VerificationStartedListener {
@Override
public void onVerificationStarted(VerificationStartedEvent event) {
event.setMock(SpringMockResolver.getUltimateTargetObject(event.getMock()));
}
}
}
|
SpringAopBypassingVerificationStartedListener
|
java
|
apache__logging-log4j2
|
log4j-1.2-api/src/main/java/org/apache/log4j/builders/layout/PatternLayoutBuilder.java
|
{
"start": 1816,
"end": 4577
}
|
class ____ extends AbstractBuilder<Layout> implements LayoutBuilder {
private static final Logger LOGGER = StatusLogger.getLogger();
private static final String PATTERN = "ConversionPattern";
public PatternLayoutBuilder() {}
public PatternLayoutBuilder(final String prefix, final Properties props) {
super(prefix, props);
}
@Override
public Layout parse(final Element layoutElement, final XmlConfiguration config) {
final NodeList params = layoutElement.getElementsByTagName("param");
final int length = params.getLength();
String pattern = null;
for (int index = 0; index < length; ++index) {
final Node currentNode = params.item(index);
if (currentNode.getNodeType() == Node.ELEMENT_NODE) {
final Element currentElement = (Element) currentNode;
if (currentElement.getTagName().equals(PARAM_TAG)) {
if (PATTERN.equalsIgnoreCase(currentElement.getAttribute("name"))) {
pattern = currentElement.getAttribute("value");
break;
}
}
}
}
return createLayout(pattern, config);
}
@Override
public Layout parse(final PropertiesConfiguration config) {
final String pattern = getProperty(PATTERN);
return createLayout(pattern, config);
}
Layout createLayout(String pattern, final Log4j1Configuration config) {
if (pattern == null) {
LOGGER.info("No pattern provided for pattern layout, using default pattern");
pattern = PatternLayout.DEFAULT_CONVERSION_PATTERN;
}
return LayoutWrapper.adapt(PatternLayout.newBuilder()
.setPattern(pattern
// Log4j 2 and Log4j 1 level names differ for custom levels
.replaceAll("%([-\\.\\d]*)p(?!\\w)", "%$1v1Level")
// Log4j 2's %x (NDC) is not compatible with Log4j 1's
// %x
// Log4j 1: "foo bar baz"
// Log4j 2: "[foo, bar, baz]"
// Use %ndc to get the Log4j 1 format
.replaceAll("%([-\\.\\d]*)x(?!\\w)", "%$1ndc")
// Log4j 2's %X (MDC) is not compatible with Log4j 1's
// %X
// Log4j 1: "{{foo,bar}{hoo,boo}}"
// Log4j 2: "{foo=bar,hoo=boo}"
// Use %properties to get the Log4j 1 format
.replaceAll("%([-\\.\\d]*)X(?!\\w)", "%$1properties"))
.setConfiguration(config)
.build());
}
}
|
PatternLayoutBuilder
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/tofix/ObjectIdWithInjectable639Test.java
|
{
"start": 348,
"end": 542
}
|
class ____ extends DatabindTestUtil {
// for [databind#639]
@JsonIdentityInfo(generator = ObjectIdGenerators.IntSequenceGenerator.class)
public static final
|
ObjectIdWithInjectable639Test
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java
|
{
"start": 1802,
"end": 3850
}
|
class ____
extends AbstractFSNodeStore<CommonNodeLabelsManager>
implements NodeLabelsStore {
protected static final Logger LOG =
LoggerFactory.getLogger(FileSystemNodeLabelsStore.class);
protected static final String DEFAULT_DIR_NAME = "node-labels";
protected static final String MIRROR_FILENAME = "nodelabel.mirror";
protected static final String EDITLOG_FILENAME = "nodelabel.editlog";
FileSystemNodeLabelsStore() {
super(StoreType.NODE_LABEL_STORE);
}
private String getDefaultFSNodeLabelsRootDir() throws IOException {
// default is in local: /tmp/hadoop-yarn-${user}/node-labels/
return "file:///tmp/hadoop-yarn-" + UserGroupInformation.getCurrentUser()
.getShortUserName() + "/" + DEFAULT_DIR_NAME;
}
@Override
public void init(Configuration conf, CommonNodeLabelsManager mgr)
throws Exception {
StoreSchema schema = new StoreSchema(EDITLOG_FILENAME, MIRROR_FILENAME);
initStore(conf, new Path(
conf.get(YarnConfiguration.FS_NODE_LABELS_STORE_ROOT_DIR,
getDefaultFSNodeLabelsRootDir())), schema, mgr);
}
@Override
public void close() throws IOException {
super.closeFSStore();
}
@Override
public void updateNodeToLabelsMappings(Map<NodeId, Set<String>> nodeToLabels)
throws IOException {
NodeToLabelOp op = new NodeToLabelOp();
writeToLog(op.setNodeToLabels(nodeToLabels));
}
@Override
public void storeNewClusterNodeLabels(List<NodeLabel> labels)
throws IOException {
AddClusterLabelOp op = new AddClusterLabelOp();
writeToLog(op.setLabels(labels));
}
@Override
public void removeClusterNodeLabels(Collection<String> labels)
throws IOException {
RemoveClusterLabelOp op = new RemoveClusterLabelOp();
writeToLog(op.setLabels(labels));
}
/* (non-Javadoc)
* @see org.apache.hadoop.yarn.nodelabels.NodeLabelsStore#recover(boolean)
*/
@Override
public void recover() throws YarnException, IOException {
super.recoverFromStore();
}
}
|
FileSystemNodeLabelsStore
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/WakeupTrigger.java
|
{
"start": 7414,
"end": 7732
}
|
class ____ implements Wakeupable {
private final ShareFetchBuffer fetchBuffer;
public ShareFetchAction(ShareFetchBuffer fetchBuffer) {
this.fetchBuffer = fetchBuffer;
}
public ShareFetchBuffer fetchBuffer() {
return fetchBuffer;
}
}
}
|
ShareFetchAction
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/index/mapper/IpRangeFieldMapperTests.java
|
{
"start": 1180,
"end": 8908
}
|
class ____ extends RangeFieldMapperTests {
@Override
protected void minimalMapping(XContentBuilder b) throws IOException {
b.field("type", "ip_range");
}
@Override
protected XContentBuilder rangeSource(XContentBuilder in) throws IOException {
return in.startObject("field").field("gt", "::ffff:c0a8:107").field("lt", "2001:db8::").endObject();
}
@Override
protected String storedValue() {
return InetAddresses.toAddrString(InetAddresses.forString("192.168.1.7"))
+ " : "
+ InetAddresses.toAddrString(InetAddresses.forString("2001:db8:0:0:0:0:0:0"));
}
@Override
protected boolean supportsCoerce() {
return false;
}
@Override
protected Object rangeValue() {
return "192.168.1.7";
}
@Override
protected boolean supportsDecimalCoerce() {
return false;
}
public void testStoreCidr() throws Exception {
DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "ip_range").field("store", true)));
final Map<String, String> cases = new HashMap<>();
cases.put("192.168.0.0/15", "192.169.255.255");
cases.put("192.168.0.0/16", "192.168.255.255");
cases.put("192.168.0.0/17", "192.168.127.255");
for (final Map.Entry<String, String> entry : cases.entrySet()) {
ParsedDocument doc = mapper.parse(source(b -> b.field("field", entry.getKey())));
List<IndexableField> fields = doc.rootDoc().getFields("field");
assertEquals(3, fields.size());
IndexableField dvField = fields.get(0);
assertEquals(DocValuesType.BINARY, dvField.fieldType().docValuesType());
IndexableField pointField = fields.get(1);
assertEquals(2, pointField.fieldType().pointIndexDimensionCount());
IndexableField storedField = fields.get(2);
assertTrue(storedField.fieldType().stored());
String strVal = InetAddresses.toAddrString(InetAddresses.forString("192.168.0.0"))
+ " : "
+ InetAddresses.toAddrString(InetAddresses.forString(entry.getValue()));
assertThat(storedField.stringValue(), containsString(strVal));
}
}
@SuppressWarnings("unchecked")
public void testValidSyntheticSource() throws IOException {
CheckedConsumer<XContentBuilder, IOException> mapping = b -> {
b.startObject("field");
b.field("type", "ip_range");
if (rarely()) {
b.field("index", false);
}
if (rarely()) {
b.field("store", false);
}
b.endObject();
};
var values = randomList(1, 5, this::generateValue);
var inputValues = values.stream().map(Tuple::v1).toList();
var expectedValues = values.stream().map(Tuple::v2).toList();
var source = getSourceFor(mapping, inputValues);
// This is the main reason why we need custom logic.
// IP ranges are serialized into binary doc values in unpredictable order
// because API uses a set.
// So this assert needs to be not sensitive to order and in "reference"
// implementation of tests from MapperTestCase it is.
var actual = source.source().get("field");
var expected = new HashSet<>(expectedValues);
if (expected.size() == 1) {
assertEquals(expectedValues.get(0), actual);
} else {
assertThat(actual, instanceOf(List.class));
assertTrue(((List<Object>) actual).containsAll(expected));
}
}
private Tuple<Object, Map<String, Object>> generateValue() {
String cidr = randomCidrBlock();
InetAddresses.IpRange range = InetAddresses.parseIpRangeFromCidr(cidr);
var includeFrom = randomBoolean();
var includeTo = randomBoolean();
Object input;
// "to" field always comes first.
Map<String, Object> output = new LinkedHashMap<>();
if (randomBoolean()) {
// CIDRs are always inclusive ranges.
input = cidr;
var from = InetAddresses.toAddrString(range.lowerBound());
inclusiveFrom(output, from);
var to = InetAddresses.toAddrString(range.upperBound());
inclusiveTo(output, to);
} else {
var fromKey = includeFrom ? "gte" : "gt";
var toKey = includeTo ? "lte" : "lt";
var from = rarely() ? null : InetAddresses.toAddrString(range.lowerBound());
var to = rarely() ? null : InetAddresses.toAddrString(range.upperBound());
input = (ToXContent) (builder, params) -> {
builder.startObject();
if (includeFrom && from == null && randomBoolean()) {
// skip field entirely since it is equivalent to a default value
} else {
builder.field(fromKey, from);
}
if (includeTo && to == null && randomBoolean()) {
// skip field entirely since it is equivalent to a default value
} else {
builder.field(toKey, to);
}
return builder.endObject();
};
if (includeFrom) {
inclusiveFrom(output, from);
} else {
var fromWithDefaults = from != null ? range.lowerBound() : (InetAddress) rangeType().minValue();
var adjustedFrom = (InetAddress) rangeType().nextUp(fromWithDefaults);
output.put("gte", InetAddresses.toAddrString(adjustedFrom));
}
if (includeTo) {
inclusiveTo(output, to);
} else {
var toWithDefaults = to != null ? range.upperBound() : (InetAddress) rangeType().maxValue();
var adjustedTo = (InetAddress) rangeType().nextDown(toWithDefaults);
output.put("lte", InetAddresses.toAddrString(adjustedTo));
}
}
return Tuple.tuple(input, output);
}
private void inclusiveFrom(Map<String, Object> output, String from) {
// This is helpful since different representations can map to "::"
var normalizedMin = InetAddresses.toAddrString((InetAddress) rangeType().minValue());
if (from != null && from.equals(normalizedMin) == false) {
output.put("gte", from);
} else {
output.put("gte", null);
}
}
private void inclusiveTo(Map<String, Object> output, String to) {
var normalizedMax = InetAddresses.toAddrString((InetAddress) rangeType().maxValue());
if (to != null && to.equals(normalizedMax) == false) {
output.put("lte", to);
} else {
output.put("lte", null);
}
}
@Override
protected SyntheticSourceSupport syntheticSourceSupport(boolean ignoreMalformed) {
throw new AssumptionViolatedException("custom version of synthetic source tests is implemented");
}
private static String randomCidrBlock() {
boolean ipv4 = randomBoolean();
InetAddress address = randomIp(ipv4);
// exclude smallest prefix lengths to avoid empty ranges
int prefixLength = ipv4 ? randomIntBetween(0, 30) : randomIntBetween(0, 126);
return InetAddresses.toCidrString(address, prefixLength);
}
@Override
protected RangeType rangeType() {
return RangeType.IP;
}
@Override
protected IngestScriptSupport ingestScriptSupport() {
throw new AssumptionViolatedException("not supported");
}
}
|
IpRangeFieldMapperTests
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/dialect/function/json/HANAJsonObjectFunction.java
|
{
"start": 642,
"end": 4749
}
|
class ____ extends JsonObjectFunction {
public HANAJsonObjectFunction(TypeConfiguration typeConfiguration) {
super( typeConfiguration, true );
}
@Override
public void render(
SqlAppender sqlAppender,
List<? extends SqlAstNode> sqlAstArguments,
ReturnableType<?> returnType,
SqlAstTranslator<?> walker) {
if ( sqlAstArguments.isEmpty() ) {
sqlAppender.appendSql( "'{}'" );
return;
}
final JsonNullBehavior nullBehavior;
final int argumentsCount;
if ( ( sqlAstArguments.size() & 1 ) == 1 ) {
nullBehavior = (JsonNullBehavior) sqlAstArguments.get( sqlAstArguments.size() - 1 );
argumentsCount = sqlAstArguments.size() - 1;
}
else {
nullBehavior = JsonNullBehavior.NULL;
argumentsCount = sqlAstArguments.size();
}
final List<String> jsonArgumentFields = getJsonArgumentFields( sqlAstArguments, argumentsCount, walker );
sqlAppender.appendSql( '(' );
replaceJsonArgumentsEscaping(
sqlAppender,
sqlAstArguments,
walker,
0,
jsonArgumentFields,
argumentsCount,
nullBehavior
);
sqlAppender.appendSql( ')' );
}
private static void replaceJsonArgumentsEscaping(
SqlAppender sqlAppender,
List<? extends SqlAstNode> sqlAstArguments,
SqlAstTranslator<?> walker,
int jsonArg,
List<String> jsonArgumentFields,
int argumentsCount,
JsonNullBehavior nullBehavior) {
if ( jsonArg < jsonArgumentFields.size() ) {
// Take the substring before the match
sqlAppender.appendSql( "select substring(t.x, 1, locate_regexpr(r.x in t.x) - 2)" );
// The match itself after replacing double backslashes and backslash escaped quotes
sqlAppender.appendSql( "|| replace(replace(substr_regexpr(r.x in t.x),'\\\\','\\'),'\\\"','\"')" );
// And the rest of the string after the match
sqlAppender.appendSql( "|| substring(t.x, locate_regexpr(r.x in t.x) + length(substr_regexpr(r.x in t.x)) + 1) x");
sqlAppender.appendSql( " from (" );
replaceJsonArgumentsEscaping(
sqlAppender,
sqlAstArguments,
walker,
jsonArg + 1,
jsonArgumentFields,
argumentsCount,
nullBehavior
);
sqlAppender.appendSql( ") t" );
sqlAppender.appendSql( ",(select '" );
sqlAppender.appendSql( valueExtractionPattern( jsonArgumentFields.get( jsonArg ) ) );
sqlAppender.appendSql( "' x from sys.dummy) r" );
}
else {
sqlAppender.appendSql( "select t.jsonresult x from (select" );
char separator = ' ';
for ( int i = 0; i < argumentsCount; i += 2 ) {
sqlAppender.appendSql( separator );
final SqlAstNode key = sqlAstArguments.get( i );
final SqlAstNode value = sqlAstArguments.get( i + 1 );
value.accept( walker );
sqlAppender.appendSql( ' ' );
final String literalValue = walker.getLiteralValue( (Expression) key );
sqlAppender.appendDoubleQuoteEscapedString( literalValue );
separator = ',';
}
sqlAppender.appendSql( " from sys.dummy for json('arraywrap'='no'" );
if ( nullBehavior == JsonNullBehavior.NULL ) {
sqlAppender.appendSql( ",'omitnull'='no'" );
}
sqlAppender.appendSql( ")) t" );
}
}
private List<String> getJsonArgumentFields(
List<? extends SqlAstNode> sqlAstArguments,
int argumentsCount,
SqlAstTranslator<?> walker) {
final ArrayList<String> jsonArgumentIndexes = new ArrayList<>();
for ( int i = 0; i < argumentsCount; i += 2 ) {
if ( ExpressionTypeHelper.isJson( sqlAstArguments.get( i + 1 ) ) ) {
jsonArgumentIndexes.add( walker.getLiteralValue( (Expression) sqlAstArguments.get( i ) ) );
}
}
return jsonArgumentIndexes;
}
private static String valueExtractionPattern(String attributeName) {
// (?<!\\) ensures the next character is not preceded by a backslash
// (?<=\"" + attributeName + "\":\") ensures the match is preceded by `"attributeName":"`
// .*? is a non-greedy match for all chars
// (?<!\\) ensures that the next character is not preceded by a backslash
// (?=") ensures that the character after our match is a double quote
return "(?<!\\\\)(?<=\"" + Pattern.quote( attributeName ) + "\":\").*?(?<!\\\\)(?=\")";
}
}
|
HANAJsonObjectFunction
|
java
|
spring-projects__spring-boot
|
module/spring-boot-security-saml2/src/main/java/org/springframework/boot/security/saml2/autoconfigure/Saml2RelyingPartyProperties.java
|
{
"start": 4916,
"end": 5274
}
|
class ____ {
/**
* Credentials used for decrypting the SAML authentication request.
*/
private List<Credential> credentials = new ArrayList<>();
public List<Credential> getCredentials() {
return this.credentials;
}
public void setCredentials(List<Credential> credentials) {
this.credentials = credentials;
}
public static
|
Decryption
|
java
|
apache__kafka
|
connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/Plugins.java
|
{
"start": 25559,
"end": 28525
}
|
class ____ not be found
*/
public HeaderConverter newHeaderConverter(AbstractConfig config, String classPropertyName, String versionPropertyName) {
ClassLoaderUsage classLoader = config.getString(versionPropertyName) == null ? ClassLoaderUsage.CURRENT_CLASSLOADER : ClassLoaderUsage.PLUGINS;
return newHeaderConverter(config, classPropertyName, versionPropertyName, classLoader);
}
private HeaderConverter newHeaderConverter(AbstractConfig config, String classPropertyName, String versionPropertyName, ClassLoaderUsage classLoaderUsage) {
if (config.getClass(classPropertyName) == null && classLoaderUsage == ClassLoaderUsage.CURRENT_CLASSLOADER) {
// This configuration does not define the Header Converter via the specified property name
return null;
}
HeaderConverter plugin = newVersionedPlugin(config, classPropertyName, versionPropertyName,
HeaderConverter.class, classLoaderUsage, scanResult.headerConverters());
String configPrefix = classPropertyName + ".";
Map<String, Object> converterConfig = config.originalsWithPrefix(configPrefix);
converterConfig.put(ConverterConfig.TYPE_CONFIG, ConverterType.HEADER.getName());
log.debug("Configuring the header converter with configuration keys:{}{}", System.lineSeparator(), converterConfig.keySet());
try (LoaderSwap loaderSwap = safeLoaderSwapper().apply(plugin.getClass().getClassLoader())) {
plugin.configure(converterConfig);
}
return plugin;
}
@SuppressWarnings({"unchecked", "rawtypes"})
private <U> U newVersionedPlugin(
AbstractConfig config,
String classPropertyName,
String versionPropertyName,
Class basePluginClass,
ClassLoaderUsage classLoaderUsage,
SortedSet<PluginDesc<U>> availablePlugins
) {
String version = versionPropertyName == null ? null : config.getString(versionPropertyName);
VersionRange range = null;
if (version != null) {
try {
range = PluginUtils.connectorVersionRequirement(version);
} catch (InvalidVersionSpecificationException e) {
throw new ConnectException(String.format("Invalid version range for %s: %s", classPropertyName, version), e);
}
}
assert range == null || classLoaderUsage == ClassLoaderUsage.PLUGINS;
Class<? extends U> klass = null;
String basePluginClassName = basePluginClass.getSimpleName();
switch (classLoaderUsage) {
case CURRENT_CLASSLOADER:
// Attempt to load first with the current classloader, and plugins as a fallback.
klass = pluginClassFromConfig(config, classPropertyName, basePluginClass, availablePlugins);
break;
case PLUGINS:
// Attempt to load with the plugin
|
could
|
java
|
assertj__assertj-core
|
assertj-core/src/main/java/org/assertj/core/api/AssertWithComparator.java
|
{
"start": 724,
"end": 5211
}
|
interface ____<SELF extends Assert<SELF, ACTUAL>, ACTUAL> {
/**
* Use the given custom comparator instead of relying on actual type A equals method for incoming assertion checks.
* <p>
* The custom comparator is bound to assertion instance, meaning that if a new assertion instance is created, the default
* comparison strategy will be used.
* <p>
* Examples :
* <pre><code class='java'> // frodo and sam are instances of Character with Hobbit race (obviously :).
* // raceComparator implements Comparator<Character>
* assertThat(frodo).usingComparator(raceComparator).isEqualTo(sam);</code></pre>
*
* @param customComparator the comparator to use for the incoming assertion checks.
* @return {@code this} assertion object.
* @throws NullPointerException if the given comparator is {@code null}.
*/
default SELF usingComparator(Comparator<? super ACTUAL> customComparator) {
return usingComparator(customComparator, null);
}
/**
* Use the given custom comparator instead of relying on actual type A equals method for incoming assertion checks.
* <p>
* The custom comparator is bound to the current assertion chain, meaning that if a new assertion instance is created, the default
* comparison strategy will be used.
* <p>
* Examples :
* <pre><code class='java'> // frodo and sam are instances of Character with Hobbit race (obviously :).
* // raceComparator implements Comparator<Character>
* assertThat(frodo).usingComparator(raceComparator, "Hobbit Race Comparator").isEqualTo(sam);</code></pre>
*
* @param customComparator the comparator to use for the incoming assertion checks.
* @param customComparatorDescription comparator description to be used in assertion error messages
* @return {@code this} assertion object.
* @throws NullPointerException if the given comparator is {@code null}.
*/
SELF usingComparator(Comparator<? super ACTUAL> customComparator, String customComparatorDescription);
/**
* Uses the given custom {@link BiPredicate} instead of relying on actual type A {@code equals} method
* for incoming assertion checks.
* <p>
* The custom equals is bound to the current assertion chain, meaning that if a new assertion instance is created, the default
* comparison strategy will be used.
* <p>
* Examples:
* <pre><code class='java'> // frodo and sam are instances of Character of Hobbit race (obviously :).
* assertThat(frodo).usingEquals((f, s) -> f.race() == s.race()).isEqualTo(sam);</code></pre>
*
* @param predicate the predicate to use for the incoming assertion checks.
* @return {@code this} assertion object.
* @throws NullPointerException if the given biPredicate is {@code null}.
*/
default SELF usingEquals(BiPredicate<? super ACTUAL, ? super ACTUAL> predicate) {
return usingEquals(predicate, null);
}
/**
* Uses the given custom {@link BiPredicate} instead of relying on actual type A {@code equals} method
* for incoming assertion checks. The given description is present in the assertion error if the assertion fails.
* <p>
* The custom equals is bound to the current assertion chain, meaning that if a new assertion instance is created, the default
* comparison strategy will be used.
* <p>
* Examples:
* <pre><code class='java'> // frodo and sam are instances of Character of Hobbit race (obviously :).
* assertThat(frodo).usingEquals((f, s) -> f.race() == s.race(), "comparing race").isEqualTo(sam);</code></pre>
*
* @param predicate the predicate to use for the incoming assertion checks.
* @param customEqualsDescription comparator description to be used in assertion error messages
* @return {@code this} assertion object.
* @throws NullPointerException if the given comparator is {@code null}.
*/
@SuppressWarnings("ComparatorMethodParameterNotUsed")
default SELF usingEquals(BiPredicate<? super ACTUAL, ? super ACTUAL> predicate, String customEqualsDescription) {
return usingComparator((o1, o2) -> predicate.test(o1, o2) ? 0 : -1, customEqualsDescription);
}
/**
* Revert to standard comparison for the incoming assertion checks.
* <p>
* This method should be used to disable a custom comparison strategy set by calling {@link #usingComparator(Comparator) usingComparator}.
*
* @return {@code this} assertion object.
*/
SELF usingDefaultComparator();
}
|
AssertWithComparator
|
java
|
quarkusio__quarkus
|
integration-tests/jpa-postgresql/src/main/java/io/quarkus/it/jpa/postgresql/JPATestReflectionEndpoint.java
|
{
"start": 909,
"end": 6457
}
|
class ____ {
@GET
public String test() throws SQLException, TransformerException, IOException {
List<String> errors = new ArrayList<>();
makeSureNonEntityAreDCE(errors);
makeSureEntitiesAreAccessibleViaReflection(errors);
makeSureNonAnnotatedEmbeddableAreAccessibleViaReflection(errors);
makeSureAnnotatedEmbeddableAreAccessibleViaReflection(errors);
String packageName = this.getClass().getPackage().getName();
makeSureClassAreAccessibleViaReflection(packageName + ".defaultpu.Human", "Unable to enlist @MappedSuperclass", errors);
makeSureClassAreAccessibleViaReflection(packageName + ".defaultpu.Animal", "Unable to enlist entity superclass",
errors);
if (errors.isEmpty()) {
return "OK";
} else {
return String.join("\n", errors);
}
}
private void makeSureClassAreAccessibleViaReflection(String className, String errorMessage, List<String> errors)
throws IOException {
try {
className = getTrickedClassName(className);
Class<?> custClass = Class.forName(className);
Object instance = custClass.getDeclaredConstructor().newInstance();
} catch (Exception e) {
reportException(errorMessage, e, errors);
}
}
private void makeSureEntitiesAreAccessibleViaReflection(List<String> errors) throws IOException {
try {
String className = getTrickedClassName(Customer.class.getName());
Class<?> custClass = Class.forName(className);
Object instance = custClass.getDeclaredConstructor().newInstance();
Field id = custClass.getDeclaredField("id");
id.setAccessible(true);
if (id.get(instance) != null) {
errors.add("id should be reachable and null");
}
Method setter = custClass.getDeclaredMethod("setName", String.class);
Method getter = custClass.getDeclaredMethod("getName");
setter.invoke(instance, "Emmanuel");
if (!"Emmanuel".equals(getter.invoke(instance))) {
errors.add("getter / setter should be reachable and usable");
}
} catch (Exception e) {
reportException(e, errors);
}
}
private void makeSureAnnotatedEmbeddableAreAccessibleViaReflection(List<String> errors) throws IOException {
try {
String className = getTrickedClassName(WorkAddress.class.getName());
Class<?> custClass = Class.forName(className);
Object instance = custClass.getDeclaredConstructor().newInstance();
Method setter = custClass.getDeclaredMethod("setCompany", String.class);
Method getter = custClass.getDeclaredMethod("getCompany");
setter.invoke(instance, "Red Hat");
if (!"Red Hat".equals(getter.invoke(instance))) {
errors.add("@Embeddable embeddable should be reachable and usable");
}
} catch (Exception e) {
reportException(e, errors);
}
}
private void makeSureNonAnnotatedEmbeddableAreAccessibleViaReflection(List<String> errors) throws IOException {
try {
String className = getTrickedClassName(Address.class.getName());
Class<?> custClass = Class.forName(className);
Object instance = custClass.getDeclaredConstructor().newInstance();
Method setter = custClass.getDeclaredMethod("setStreet1", String.class);
Method getter = custClass.getDeclaredMethod("getStreet1");
setter.invoke(instance, "1 rue du General Leclerc");
if (!"1 rue du General Leclerc".equals(getter.invoke(instance))) {
errors.add("Non @Embeddable embeddable getter / setter should be reachable and usable");
}
} catch (Exception e) {
reportException(e, errors);
}
}
private void makeSureNonEntityAreDCE(List<String> errors) {
try {
String className = getTrickedClassName(NotAnEntityNotReferenced.class.getName());
Class<?> custClass = Class.forName(className);
errors.add("Should not be able to find a non referenced non entity class");
Object instance = custClass.getDeclaredConstructor().newInstance();
} catch (Exception e) {
// Expected outcome
}
}
/**
* Trick SubstrateVM not to detect a simple use of Class.forname
*/
private String getTrickedClassName(String className) {
className = className + " ITrickYou";
className = className.subSequence(0, className.indexOf(' ')).toString();
return className;
}
private void reportException(final Exception e, final List<String> errors) throws IOException {
reportException(null, e, errors);
}
private void reportException(String errorMessage, final Exception e, final List<String> errors) throws IOException {
StringWriter stringWriter = new StringWriter();
final PrintWriter writer = new PrintWriter(stringWriter);
if (errorMessage != null) {
writer.write(errorMessage);
writer.write(" ");
}
if (e.getMessage() != null) {
writer.write(e.getMessage());
}
writer.append("\n\t");
e.printStackTrace(writer);
writer.append("\n\t");
errors.add(stringWriter.toString());
}
}
|
JPATestReflectionEndpoint
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/cache/LocalCacheView.java
|
{
"start": 6858,
"end": 11126
}
|
class ____ extends AbstractMap<K, V> {
@Override
public V get(Object key) {
CacheKey cacheKey = toCacheKey(key);
CacheValue e = cache.get(cacheKey);
if (e != null) {
return (V) e.getValue();
}
return null;
}
@Override
public boolean containsKey(Object key) {
CacheKey cacheKey = toCacheKey(key);
return cache.containsKey(cacheKey);
}
@Override
public boolean containsValue(Object value) {
CacheValue cacheValue = new CacheValue(null, value);
return cache.containsValue(cacheValue);
}
@Override
public Set<Entry<K, V>> entrySet() {
return cachedEntrySet();
}
}
protected V toValue(CacheValue cv) {
return (V) cv.getValue();
}
public CacheKey toCacheKey(Object key) {
CacheKey cacheKey;
if (useObjectAsCacheKey) {
cacheKey = cacheKeyMap.get(key);
if (cacheKey != null) {
return cacheKey;
}
}
ByteBuf encoded = encodeMapKey(key);
try {
return toCacheKey(encoded);
} finally {
encoded.release();
}
}
protected ByteBuf encodeMapKey(Object key) {
return object.encodeMapKey(key);
}
public void putCacheKey(Object key, CacheKey cacheKey) {
if (useObjectAsCacheKey) {
cacheKeyMap.put(key, cacheKey);
}
}
public CacheKey toCacheKey(ByteBuf encodedKey) {
return new CacheKey(Hash.hash128toArray(encodedKey));
}
public <K1, V1> ConcurrentMap<K1, V1> getCache() {
return (ConcurrentMap<K1, V1>) cache;
}
public ConcurrentMap<Object, CacheKey> getCacheKeyMap() {
return cacheKeyMap;
}
public <K1, V1> ConcurrentMap<K1, V1> createCache(LocalCachedMapOptions<?, ?> options) {
if (options.getCacheSize() == -1) {
return new NoOpCacheMap<>();
}
if (options.getCacheProvider() == LocalCachedMapOptions.CacheProvider.CAFFEINE) {
Caffeine<Object, Object> caffeineBuilder = Caffeine.newBuilder();
if (options.getTimeToLiveInMillis() > 0) {
caffeineBuilder.expireAfterWrite(options.getTimeToLiveInMillis(), TimeUnit.MILLISECONDS);
}
if (options.getMaxIdleInMillis() > 0) {
caffeineBuilder.expireAfterAccess(options.getMaxIdleInMillis(), TimeUnit.MILLISECONDS);
}
if (options.getCacheSize() > 0) {
caffeineBuilder.maximumSize(options.getCacheSize());
}
if (options.getEvictionPolicy() == LocalCachedMapOptions.EvictionPolicy.SOFT) {
caffeineBuilder.softValues();
}
if (options.getEvictionPolicy() == LocalCachedMapOptions.EvictionPolicy.WEAK) {
caffeineBuilder.weakValues();
}
return caffeineBuilder.<K1, V1>build().asMap();
}
if (options.getEvictionPolicy() == LocalCachedMapOptions.EvictionPolicy.NONE) {
return new NoneCacheMap<>(options.getTimeToLiveInMillis(), options.getMaxIdleInMillis());
}
if (options.getEvictionPolicy() == LocalCachedMapOptions.EvictionPolicy.LRU) {
return new LRUCacheMap<>(options.getCacheSize(), options.getTimeToLiveInMillis(), options.getMaxIdleInMillis());
}
if (options.getEvictionPolicy() == LocalCachedMapOptions.EvictionPolicy.LFU) {
return new LFUCacheMap<>(options.getCacheSize(), options.getTimeToLiveInMillis(), options.getMaxIdleInMillis());
}
if (options.getEvictionPolicy() == LocalCachedMapOptions.EvictionPolicy.SOFT) {
return ReferenceCacheMap.soft(options.getTimeToLiveInMillis(), options.getMaxIdleInMillis());
}
if (options.getEvictionPolicy() == LocalCachedMapOptions.EvictionPolicy.WEAK) {
return ReferenceCacheMap.weak(options.getTimeToLiveInMillis(), options.getMaxIdleInMillis());
}
throw new IllegalArgumentException("Invalid eviction policy: " + options.getEvictionPolicy());
}
}
|
LocalMap
|
java
|
quarkusio__quarkus
|
extensions/arc/deployment/src/test/java/io/quarkus/arc/test/transform/injectionPoint/SomeDecorator.java
|
{
"start": 280,
"end": 581
}
|
class ____ implements PrivateFieldInjectionTest.DecoratedBean {
@Inject
@Delegate
PrivateFieldInjectionTest.DecoratedBean delegate;
@Inject
private DummyBean bean;
@Override
public String ping() {
return bean.generateString() + delegate.ping();
}
}
|
SomeDecorator
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/boot/jaxb/mapping/spi/db/JaxbColumnStandard.java
|
{
"start": 281,
"end": 533
}
|
interface ____
extends JaxbColumn, JaxbColumnMutable, JaxbCheckable, JaxbColumnNullable, JaxbColumnUniqueable,
JaxbColumnDefinable, JaxbColumnSizable, JaxbColumnDefaultable, JaxbCommentable {
String getRead();
String getWrite();
}
|
JaxbColumnStandard
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/FederationPolicyManager.java
|
{
"start": 2279,
"end": 3661
}
|
interface ____ {
/**
* If the current instance is compatible, this method returns the same
* instance of {@link FederationAMRMProxyPolicy} reinitialized with the
* current context, otherwise a new instance initialized with the current
* context is provided. If the instance is compatible with the current class
* the implementors should attempt to reinitialize (retaining state). To affect
* a complete policy reset oldInstance should be null.
*
* @param policyContext the current context
* @param oldInstance the existing (possibly null) instance.
*
* @return an updated {@link FederationAMRMProxyPolicy }.
*
* @throws FederationPolicyInitializationException if the initialization
* cannot be completed properly. The oldInstance should be still
* valid in case of failed initialization.
*/
FederationAMRMProxyPolicy getAMRMPolicy(
FederationPolicyInitializationContext policyContext,
FederationAMRMProxyPolicy oldInstance)
throws FederationPolicyInitializationException;
/**
* If the current instance is compatible, this method returns the same
* instance of {@link FederationRouterPolicy} reinitialized with the current
* context, otherwise a new instance initialized with the current context is
* provided. If the instance is compatible with the current
|
FederationPolicyManager
|
java
|
netty__netty
|
handler/src/test/java/io/netty/handler/ssl/SSLEngineTest.java
|
{
"start": 199245,
"end": 217338
}
|
class ____ extends KeyManagerFactory {
TestKeyManagerFactory(final KeyManagerFactory factory) {
super(new KeyManagerFactorySpi() {
private final KeyManager[] managers = factory.getKeyManagers();
@Override
protected void engineInit(KeyStore keyStore, char[] chars) {
throw new UnsupportedOperationException();
}
@Override
protected void engineInit(ManagerFactoryParameters managerFactoryParameters) {
throw new UnsupportedOperationException();
}
@Override
protected KeyManager[] engineGetKeyManagers() {
KeyManager[] array = new KeyManager[managers.length];
for (int i = 0 ; i < array.length; i++) {
final X509ExtendedKeyManager x509ExtendedKeyManager = (X509ExtendedKeyManager) managers[i];
array[i] = new X509ExtendedKeyManager() {
@Override
public String[] getClientAliases(String s, Principal[] principals) {
fail();
return null;
}
@Override
public String chooseClientAlias(
String[] strings, Principal[] principals, Socket socket) {
fail();
return null;
}
@Override
public String[] getServerAliases(String s, Principal[] principals) {
fail();
return null;
}
@Override
public String chooseServerAlias(String s, Principal[] principals, Socket socket) {
fail();
return null;
}
@Override
public String chooseEngineClientAlias(
String[] strings, Principal[] principals, SSLEngine sslEngine) {
assertNotEquals(0, ((ExtendedSSLSession) sslEngine.getHandshakeSession())
.getPeerSupportedSignatureAlgorithms().length);
assertNotEquals(0, ((ExtendedSSLSession) sslEngine.getHandshakeSession())
.getLocalSupportedSignatureAlgorithms().length);
return x509ExtendedKeyManager.chooseEngineClientAlias(
strings, principals, sslEngine);
}
@Override
public String chooseEngineServerAlias(
String s, Principal[] principals, SSLEngine sslEngine) {
assertNotEquals(0, ((ExtendedSSLSession) sslEngine.getHandshakeSession())
.getPeerSupportedSignatureAlgorithms().length);
assertNotEquals(0, ((ExtendedSSLSession) sslEngine.getHandshakeSession())
.getLocalSupportedSignatureAlgorithms().length);
return x509ExtendedKeyManager.chooseEngineServerAlias(s, principals, sslEngine);
}
@Override
public java.security.cert.X509Certificate[] getCertificateChain(String s) {
return x509ExtendedKeyManager.getCertificateChain(s);
}
@Override
public PrivateKey getPrivateKey(String s) {
return x509ExtendedKeyManager.getPrivateKey(s);
}
};
}
return array;
}
}, factory.getProvider(), factory.getAlgorithm());
}
}
clientSslCtx = wrapContext(param, SslContextBuilder.forClient()
.keyManager(new TestKeyManagerFactory(newKeyManagerFactory(ssc)))
.trustManager(InsecureTrustManagerFactory.INSTANCE)
.sslProvider(sslClientProvider())
.sslContextProvider(clientSslContextProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
serverSslCtx = wrapContext(param, SslContextBuilder.forServer(
new TestKeyManagerFactory(newKeyManagerFactory(ssc)))
.trustManager(InsecureTrustManagerFactory.INSTANCE)
.sslContextProvider(serverSslContextProvider())
.sslProvider(sslServerProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.clientAuth(ClientAuth.REQUIRE)
.build());
SSLEngine clientEngine = null;
SSLEngine serverEngine = null;
try {
clientEngine = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
serverEngine = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
handshake(param.type(), param.delegate(), clientEngine, serverEngine);
} finally {
cleanupClientSslEngine(clientEngine);
cleanupServerSslEngine(serverEngine);
}
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testHandshakeSession(SSLEngineTestParam param) throws Exception {
final SelfSignedCertificate ssc = CachedSelfSignedCertificate.getCachedCertificate();
final TestTrustManagerFactory clientTmf = new TestTrustManagerFactory(ssc.cert());
final TestTrustManagerFactory serverTmf = new TestTrustManagerFactory(ssc.cert());
clientSslCtx = wrapContext(param, SslContextBuilder.forClient()
.trustManager(new SimpleTrustManagerFactory() {
@Override
protected void engineInit(KeyStore keyStore) {
// NOOP
}
@Override
protected void engineInit(ManagerFactoryParameters managerFactoryParameters) {
// NOOP
}
@Override
protected TrustManager[] engineGetTrustManagers() {
return new TrustManager[] { clientTmf };
}
})
.keyManager(newKeyManagerFactory(ssc))
.sslProvider(sslClientProvider())
.sslContextProvider(clientSslContextProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
serverSslCtx = wrapContext(param, SslContextBuilder.forServer(newKeyManagerFactory(ssc))
.trustManager(new SimpleTrustManagerFactory() {
@Override
protected void engineInit(KeyStore keyStore) {
// NOOP
}
@Override
protected void engineInit(ManagerFactoryParameters managerFactoryParameters) {
// NOOP
}
@Override
protected TrustManager[] engineGetTrustManagers() {
return new TrustManager[] { serverTmf };
}
})
.sslProvider(sslServerProvider())
.sslContextProvider(serverSslContextProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.clientAuth(ClientAuth.REQUIRE)
.build());
SSLEngine clientEngine = null;
SSLEngine serverEngine = null;
try {
clientEngine = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
serverEngine = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
handshake(param.type(), param.delegate(), clientEngine, serverEngine);
assertTrue(clientTmf.isVerified());
assertTrue(serverTmf.isVerified());
} finally {
cleanupClientSslEngine(clientEngine);
cleanupServerSslEngine(serverEngine);
}
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testSessionLocalWhenNonMutualWithKeyManager(SSLEngineTestParam param) throws Exception {
testSessionLocalWhenNonMutual(param, true);
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testSessionLocalWhenNonMutualWithoutKeyManager(SSLEngineTestParam param) throws Exception {
testSessionLocalWhenNonMutual(param, false);
}
private void testSessionLocalWhenNonMutual(SSLEngineTestParam param, boolean useKeyManager) throws Exception {
final SelfSignedCertificate ssc = CachedSelfSignedCertificate.getCachedCertificate();
SslContextBuilder clientSslCtxBuilder = SslContextBuilder.forClient()
.trustManager(InsecureTrustManagerFactory.INSTANCE)
.sslProvider(sslClientProvider())
.sslContextProvider(clientSslContextProvider())
.protocols(param.protocols())
.ciphers(param.ciphers());
if (useKeyManager) {
clientSslCtxBuilder.keyManager(newKeyManagerFactory(ssc));
} else {
clientSslCtxBuilder.keyManager(ssc.certificate(), ssc.privateKey());
}
clientSslCtx = wrapContext(param, clientSslCtxBuilder.build());
final SslContextBuilder serverSslCtxBuilder;
if (useKeyManager) {
serverSslCtxBuilder = SslContextBuilder.forServer(newKeyManagerFactory(ssc));
} else {
serverSslCtxBuilder = SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey());
}
serverSslCtxBuilder.trustManager(InsecureTrustManagerFactory.INSTANCE)
.sslProvider(sslServerProvider())
.sslContextProvider(serverSslContextProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.clientAuth(ClientAuth.NONE);
serverSslCtx = wrapContext(param, serverSslCtxBuilder.build());
SSLEngine clientEngine = null;
SSLEngine serverEngine = null;
try {
clientEngine = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
serverEngine = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
handshake(param.type(), param.delegate(), clientEngine, serverEngine);
SSLSession clientSession = clientEngine.getSession();
assertNull(clientSession.getLocalCertificates());
assertNull(clientSession.getLocalPrincipal());
SSLSession serverSession = serverEngine.getSession();
assertNotNull(serverSession.getLocalCertificates());
assertNotNull(serverSession.getLocalPrincipal());
} finally {
cleanupClientSslEngine(clientEngine);
cleanupServerSslEngine(serverEngine);
}
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testEnabledProtocolsAndCiphers(SSLEngineTestParam param) throws Exception {
clientSslCtx = wrapContext(param, SslContextBuilder.forClient()
.trustManager(InsecureTrustManagerFactory.INSTANCE)
.sslProvider(sslClientProvider())
.sslContextProvider(clientSslContextProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
SelfSignedCertificate ssc = CachedSelfSignedCertificate.getCachedCertificate();
serverSslCtx = wrapContext(param, SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey())
.sslProvider(sslServerProvider())
.sslContextProvider(serverSslContextProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
SSLEngine clientEngine = null;
SSLEngine serverEngine = null;
try {
clientEngine = wrapEngine(clientSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
serverEngine = wrapEngine(serverSslCtx.newEngine(UnpooledByteBufAllocator.DEFAULT));
handshake(param.type(), param.delegate(), clientEngine, serverEngine);
assertEnabledProtocolsAndCipherSuites(clientEngine);
assertEnabledProtocolsAndCipherSuites(serverEngine);
} finally {
cleanupClientSslEngine(clientEngine);
cleanupServerSslEngine(serverEngine);
}
}
private static void assertEnabledProtocolsAndCipherSuites(SSLEngine engine) {
String protocol = engine.getSession().getProtocol();
String cipherSuite = engine.getSession().getCipherSuite();
assertArrayContains(protocol, engine.getEnabledProtocols());
assertArrayContains(cipherSuite, engine.getEnabledCipherSuites());
}
private static void assertArrayContains(String expected, String[] array) {
for (String value: array) {
if (expected.equals(value)) {
return;
}
}
fail("Array did not contain '" + expected + "':" + Arrays.toString(array));
}
@MethodSource("newTestParams")
@ParameterizedTest
public void testMasterKeyLogging(final SSLEngineTestParam param) throws Exception {
if (param.combo() != ProtocolCipherCombo.tlsv12()) {
return;
}
/*
* At the moment master key logging is not supported for conscrypt
*/
assumeFalse(serverSslContextProvider() instanceof OpenSSLProvider);
/*
* The JDK SSL engine master key retrieval relies on being able to set field access to true.
* That is not available in JDK9+
*/
assumeFalse(sslServerProvider() == SslProvider.JDK && PlatformDependent.javaVersion() > 8);
String originalSystemPropertyValue = SystemPropertyUtil.get(SslMasterKeyHandler.SYSTEM_PROP_KEY);
System.setProperty(SslMasterKeyHandler.SYSTEM_PROP_KEY, Boolean.TRUE.toString());
SelfSignedCertificate ssc = CachedSelfSignedCertificate.getCachedCertificate();
serverSslCtx = wrapContext(param, SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey())
.sslProvider(sslServerProvider())
.sslContextProvider(serverSslContextProvider())
.protocols(param.protocols())
.ciphers(param.ciphers())
.build());
try {
sb = new ServerBootstrap();
sb.group(new MultiThreadIoEventLoopGroup(NioIoHandler.newFactory()));
sb.channel(NioServerSocketChannel.class);
final Promise<SecretKey> promise = sb.config().group().next().newPromise();
serverChannel = sb.childHandler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) {
ch.config().setAllocator(new TestByteBufAllocator(ch.config().getAllocator(), param.type()));
SslHandler sslHandler = !param.delegate() ?
serverSslCtx.newHandler(ch.alloc()) :
serverSslCtx.newHandler(ch.alloc(), delegatingExecutor);
ch.pipeline().addLast(sslHandler);
ch.pipeline().addLast(new SslMasterKeyHandler() {
@Override
protected void accept(SecretKey masterKey, SSLSession session) {
promise.setSuccess(masterKey);
}
});
serverConnectedChannel = ch;
}
}).bind(new InetSocketAddress(0)).sync().channel();
int port = ((InetSocketAddress) serverChannel.localAddress()).getPort();
SSLContext sslContext = SSLContext.getInstance("TLS");
sslContext.init(null, InsecureTrustManagerFactory.INSTANCE.getTrustManagers(), null);
try (Socket socket = sslContext.getSocketFactory().createSocket(NetUtil.LOCALHOST, port)) {
OutputStream out = socket.getOutputStream();
out.write(1);
out.flush();
assertTrue(promise.await(10, TimeUnit.SECONDS));
SecretKey key = promise.get();
assertEquals(48, key.getEncoded().length, "AES secret key must be 48 bytes");
}
} finally {
if (originalSystemPropertyValue != null) {
System.setProperty(SslMasterKeyHandler.SYSTEM_PROP_KEY, originalSystemPropertyValue);
} else {
System.clearProperty(SslMasterKeyHandler.SYSTEM_PROP_KEY);
}
}
}
private static KeyManagerFactory newKeyManagerFactory(SelfSignedCertificate ssc)
throws UnrecoverableKeyException, KeyStoreException, NoSuchAlgorithmException,
CertificateException, IOException {
return SslContext.buildKeyManagerFactory(
new java.security.cert.X509Certificate[] { ssc.cert() }, null, ssc.key(), null, null, null);
}
private static final
|
TestKeyManagerFactory
|
java
|
spring-projects__spring-boot
|
core/spring-boot/src/test/java/org/springframework/boot/context/config/ConfigTreeConfigDataResourceTests.java
|
{
"start": 1013,
"end": 2393
}
|
class ____ {
@Test
@SuppressWarnings("NullAway") // Test null check
void constructorWhenPathStringIsNullThrowsException() {
assertThatIllegalArgumentException().isThrownBy(() -> new ConfigTreeConfigDataResource((String) null))
.withMessage("'path' must not be null");
}
@Test
@SuppressWarnings("NullAway") // Test null check
void constructorWhenPathIsNullThrowsException() {
assertThatIllegalArgumentException().isThrownBy(() -> new ConfigTreeConfigDataResource((Path) null))
.withMessage("'path' must not be null");
}
@Test
void equalsWhenPathIsTheSameReturnsTrue() {
ConfigTreeConfigDataResource location = new ConfigTreeConfigDataResource("/etc/config");
ConfigTreeConfigDataResource other = new ConfigTreeConfigDataResource("/etc/config");
assertThat(location).isEqualTo(other);
}
@Test
void equalsWhenPathIsDifferentReturnsFalse() {
ConfigTreeConfigDataResource location = new ConfigTreeConfigDataResource("/etc/config");
ConfigTreeConfigDataResource other = new ConfigTreeConfigDataResource("other-location");
assertThat(location).isNotEqualTo(other);
}
@Test
void toStringReturnsDescriptiveString() {
ConfigTreeConfigDataResource location = new ConfigTreeConfigDataResource("/etc/config");
assertThat(location).hasToString("config tree [" + new File("/etc/config").getAbsolutePath() + "]");
}
}
|
ConfigTreeConfigDataResourceTests
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
|
{
"start": 3135,
"end": 10911
}
|
class ____ extends CompositeService {
private static final Logger LOG =
LoggerFactory.getLogger(TimelineReaderServer.class);
private static final int SHUTDOWN_HOOK_PRIORITY = 30;
static final String TIMELINE_READER_MANAGER_ATTR =
"timeline.reader.manager";
private HttpServer2 readerWebServer;
private TimelineReaderManager timelineReaderManager;
private String webAppURLWithoutScheme;
public TimelineReaderServer() {
super(TimelineReaderServer.class.getName());
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
if (!YarnConfiguration.timelineServiceV2Enabled(conf)) {
throw new YarnException("timeline service v.2 is not enabled");
}
webAppURLWithoutScheme =
WebAppUtils.getTimelineReaderWebAppURLWithoutScheme(conf);
InetSocketAddress bindAddr =
NetUtils.createSocketAddr(webAppURLWithoutScheme);
// Login from keytab if security is enabled.
try {
SecurityUtil.login(conf, YarnConfiguration.TIMELINE_SERVICE_KEYTAB,
YarnConfiguration.TIMELINE_SERVICE_PRINCIPAL, bindAddr.getHostName());
} catch(IOException e) {
throw new YarnRuntimeException("Failed to login from keytab", e);
}
TimelineReader timelineReaderStore = createTimelineReaderStore(conf);
timelineReaderStore.init(conf);
addService(timelineReaderStore);
timelineReaderManager = createTimelineReaderManager(timelineReaderStore);
addService(timelineReaderManager);
super.serviceInit(conf);
}
private TimelineReader createTimelineReaderStore(final Configuration conf) {
String timelineReaderClassName = conf.get(
YarnConfiguration.TIMELINE_SERVICE_READER_CLASS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_READER_CLASS);
LOG.info("Using store: {}", timelineReaderClassName);
try {
Class<?> timelineReaderClazz = Class.forName(timelineReaderClassName);
if (TimelineReader.class.isAssignableFrom(timelineReaderClazz)) {
return (TimelineReader) ReflectionUtils.newInstance(
timelineReaderClazz, conf);
} else {
throw new YarnRuntimeException("Class: " + timelineReaderClassName
+ " not instance of " + TimelineReader.class.getCanonicalName());
}
} catch (ClassNotFoundException e) {
throw new YarnRuntimeException("Could not instantiate TimelineReader: "
+ timelineReaderClassName, e);
}
}
private TimelineReaderManager createTimelineReaderManager(
TimelineReader timelineReaderStore) {
return new TimelineReaderManager(timelineReaderStore);
}
@Override
protected void serviceStart() throws Exception {
super.serviceStart();
startTimelineReaderWebApp();
}
private void join() {
// keep the main thread that started the server up until it receives a stop
// signal
if (readerWebServer != null) {
try {
readerWebServer.join();
} catch (InterruptedException ignore) {}
}
}
@Override
protected void serviceStop() throws Exception {
if (readerWebServer != null) {
readerWebServer.stop();
}
super.serviceStop();
}
protected void addFilters(Configuration conf) {
boolean enableCorsFilter = conf.getBoolean(
YarnConfiguration.TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED,
YarnConfiguration.TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED_DEFAULT);
// Setup CORS
if (enableCorsFilter) {
conf.setBoolean(HttpCrossOriginFilterInitializer.PREFIX
+ HttpCrossOriginFilterInitializer.ENABLED_SUFFIX, true);
}
String initializers = conf.get("hadoop.http.filter.initializers", "");
Set<String> defaultInitializers = new LinkedHashSet<String>();
if (!initializers.contains(
ProxyUserAuthenticationFilterInitializer.class.getName())) {
if (!initializers.contains(
TimelineReaderAuthenticationFilterInitializer.class.getName())) {
defaultInitializers.add(
TimelineReaderAuthenticationFilterInitializer.class.getName());
}
} else {
defaultInitializers.add(
ProxyUserAuthenticationFilterInitializer.class.getName());
}
defaultInitializers.add(
TimelineReaderWhitelistAuthorizationFilterInitializer.class.getName());
TimelineServerUtils.setTimelineFilters(
conf, initializers, defaultInitializers);
}
private void startTimelineReaderWebApp() {
Configuration conf = getConfig();
addFilters(conf);
String hostProperty = YarnConfiguration.TIMELINE_SERVICE_READER_BIND_HOST;
String host = conf.getTrimmed(hostProperty);
if (host == null || host.isEmpty()) {
// if reader bind-host is not set, fall back to timeline-service.bind-host
// to maintain compatibility
hostProperty = YarnConfiguration.TIMELINE_SERVICE_BIND_HOST;
}
String bindAddress = WebAppUtils
.getWebAppBindURL(conf, hostProperty, webAppURLWithoutScheme);
LOG.info("Instantiating TimelineReaderWebApp at {}", bindAddress);
try {
String httpScheme = WebAppUtils.getHttpSchemePrefix(conf);
HttpServer2.Builder builder = new HttpServer2.Builder()
.setName("timeline")
.setConf(conf)
.addEndpoint(URI.create(httpScheme + bindAddress));
if (httpScheme.equals(WebAppUtils.HTTPS_PREFIX)) {
WebAppUtils.loadSslConfiguration(builder, conf);
}
readerWebServer = builder.build();
readerWebServer.addJerseyResourceConfig(configure(), "/*", null);
readerWebServer.setAttribute(TIMELINE_READER_MANAGER_ATTR,
timelineReaderManager);
readerWebServer.start();
} catch (Exception e) {
String msg = "TimelineReaderWebApp failed to start.";
LOG.error(msg, e);
throw new YarnRuntimeException(msg, e);
}
}
@VisibleForTesting
public int getWebServerPort() {
return readerWebServer.getConnectorAddress(0).getPort();
}
static TimelineReaderServer startTimelineReaderServer(String[] args,
Configuration conf) {
Thread.setDefaultUncaughtExceptionHandler(
new YarnUncaughtExceptionHandler());
StringUtils.startupShutdownMessage(TimelineReaderServer.class,
args, LOG);
TimelineReaderServer timelineReaderServer = null;
try {
timelineReaderServer = new TimelineReaderServer();
ShutdownHookManager.get().addShutdownHook(
new CompositeServiceShutdownHook(timelineReaderServer),
SHUTDOWN_HOOK_PRIORITY);
timelineReaderServer.init(conf);
timelineReaderServer.start();
} catch (Throwable t) {
LOG.error("Error starting TimelineReaderWebServer", t);
ExitUtil.terminate(-1, "Error starting TimelineReaderWebServer");
}
return timelineReaderServer;
}
protected static ResourceConfig configure() {
ResourceConfig config = new ResourceConfig();
config.packages("org.apache.hadoop.yarn.server.timelineservice.reader");
config.packages("org.apache.hadoop.yarn.api.records.writer");
config.register(LogWebService.class);
config.register(GenericExceptionHandler.class);
config.register(TimelineReaderWebServices.class);
config.register(TimelineEntitySetWriter.class);
config.register(TimelineEntityWriter.class);
config.register(TimelineHealthWriter.class);
config.register(new JettisonFeature()).register(YarnJacksonJaxbJsonProvider.class);
return config;
}
public static void main(String[] args) {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSIONS, 2.0f);
TimelineReaderServer server = startTimelineReaderServer(args, conf);
server.join();
}
}
|
TimelineReaderServer
|
java
|
spring-projects__spring-framework
|
spring-aop/src/test/java/org/springframework/aop/framework/SentenceFragment.java
|
{
"start": 1263,
"end": 1313
}
|
interface ____ {
String value();
}
|
SentenceFragment
|
java
|
quarkusio__quarkus
|
extensions/spring-cloud-config-client/runtime/src/main/java/io/quarkus/spring/cloud/config/client/runtime/SpringCloudConfigClientConfig.java
|
{
"start": 4161,
"end": 4596
}
|
interface ____ {
/**
* Enable discovery of the Spring Cloud Config Server
*/
@WithDefault("false")
boolean enabled();
/**
* The service ID to use when discovering the Spring Cloud Config Server
*/
Optional<String> serviceId();
/**
* Eureka server configuration
*/
Optional<EurekaConfig> eurekaConfig();
|
DiscoveryConfig
|
java
|
apache__camel
|
components/camel-netty-http/src/test/java/org/apache/camel/component/netty/http/NettyRecipientListHttpBaseTest.java
|
{
"start": 1027,
"end": 2371
}
|
class ____ extends BaseNettyTest {
@Test
public void testRecipientListHttpBase() throws Exception {
getMockEndpoint("mock:foo").expectedHeaderValuesReceivedInAnyOrder(Exchange.HTTP_PATH, "/bar", "/baz", "/bar/baz",
"/baz/bar");
getMockEndpoint("mock:foo").expectedHeaderValuesReceivedInAnyOrder("num", "1", "2", "3", "4");
template.sendBodyAndHeader("direct:start", "A", Exchange.HTTP_PATH, "/foo/bar?num=1");
template.sendBodyAndHeader("direct:start", "B", Exchange.HTTP_PATH, "/foo/baz?num=2");
template.sendBodyAndHeader("direct:start", "C", Exchange.HTTP_PATH, "/foo/bar/baz?num=3");
template.sendBodyAndHeader("direct:start", "D", Exchange.HTTP_PATH, "/foo/baz/bar?num=4");
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("netty-http:http://0.0.0.0:{{port}}/foo?matchOnUriPrefix=true")
.to("mock:foo")
.transform(body().prepend("Bye "));
from("direct:start")
.recipientList().constant("netty-http:http://localhost:{{port}}");
}
};
}
}
|
NettyRecipientListHttpBaseTest
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/error/ShouldBeEqual_newAssertionError_Test.java
|
{
"start": 1365,
"end": 2227
}
|
class ____ {
@ParameterizedTest
@MethodSource("parameters")
void should_create_AssertionFailedError_if_JUnit5_is_present_and_trim_spaces_in_formatted_description(String formattedDescription) {
// GIVEN
Description description = new TestDescription("Jedi");
var shouldBeEqual = shouldBeEqual("Luke", "Yoda", STANDARD_REPRESENTATION);
// WHEN
var assertionError = shouldBeEqual.toAssertionError(description, STANDARD_REPRESENTATION);
// THEN
then(assertionError).isInstanceOf(AssertionFailedError.class)
.hasMessage(format("[Jedi] %n" +
"expected: \"Yoda\"%n" +
" but was: \"Luke\""));
}
public static Stream<String> parameters() {
return Stream.of("[Jedi]", "[Jedi] ");
}
}
|
ShouldBeEqual_newAssertionError_Test
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/type/descriptor/java/ZoneOffsetJavaType.java
|
{
"start": 491,
"end": 653
}
|
class ____ extends AbstractClassJavaType<ZoneOffset> {
public static final ZoneOffsetJavaType INSTANCE = new ZoneOffsetJavaType();
public static
|
ZoneOffsetJavaType
|
java
|
spring-projects__spring-security
|
web/src/test/java/org/springframework/security/web/authentication/logout/SimpleUrlLogoutSuccessHandlerTests.java
|
{
"start": 1043,
"end": 2144
}
|
class ____ {
@Test
public void doesntRedirectIfResponseIsCommitted() throws Exception {
SimpleUrlLogoutSuccessHandler lsh = new SimpleUrlLogoutSuccessHandler();
lsh.setDefaultTargetUrl("/target");
MockHttpServletRequest request = new MockHttpServletRequest();
MockHttpServletResponse response = new MockHttpServletResponse();
response.setCommitted(true);
lsh.onLogoutSuccess(request, response, mock(Authentication.class));
assertThat(request.getSession(false)).isNull();
assertThat(response.getRedirectedUrl()).isNull();
assertThat(response.getForwardedUrl()).isNull();
}
@Test
public void absoluteUrlIsSupported() throws Exception {
SimpleUrlLogoutSuccessHandler lsh = new SimpleUrlLogoutSuccessHandler();
lsh.setDefaultTargetUrl("https://someurl.com/");
MockHttpServletRequest request = new MockHttpServletRequest();
MockHttpServletResponse response = new MockHttpServletResponse();
lsh.onLogoutSuccess(request, response, mock(Authentication.class));
assertThat(response.getRedirectedUrl()).isEqualTo("https://someurl.com/");
}
}
|
SimpleUrlLogoutSuccessHandlerTests
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/contextual/ContextAttributeWithDeserTest.java
|
{
"start": 1118,
"end": 4237
}
|
class ____
{
@JsonDeserialize(using=PrefixStringDeserializer.class)
public String value;
}
/*
/**********************************************************
/* Test methods
/**********************************************************
*/
final ObjectMapper MAPPER = newJsonMapper();
@Test
public void testSimplePerCall() throws Exception
{
final String INPUT = a2q("[{'value':'a'},{'value':'b'}]");
TestPOJO[] pojos = MAPPER.readerFor(TestPOJO[].class).readValue(INPUT);
assertEquals(2, pojos.length);
assertEquals("a/0", pojos[0].value);
assertEquals("b/1", pojos[1].value);
// and verify that state does not linger
TestPOJO[] pojos2 = MAPPER.readerFor(TestPOJO[].class).readValue(INPUT);
assertEquals(2, pojos2.length);
assertEquals("a/0", pojos2[0].value);
assertEquals("b/1", pojos2[1].value);
}
@Test
public void testSimpleDefaults() throws Exception
{
final String INPUT = a2q("{'value':'x'}");
TestPOJO pojo = MAPPER.readerFor(TestPOJO.class)
.withAttribute(KEY, Integer.valueOf(3))
.readValue(INPUT);
assertEquals("x/3", pojo.value);
// as above, should not carry on state
TestPOJO pojo2 = MAPPER.readerFor(TestPOJO.class)
.withAttribute(KEY, Integer.valueOf(5))
.readValue(INPUT);
assertEquals("x/5", pojo2.value);
}
@Test
public void testHierarchic() throws Exception
{
final String INPUT = a2q("[{'value':'x'},{'value':'y'}]");
ObjectReader r = MAPPER.readerFor(TestPOJO[].class).withAttribute(KEY, Integer.valueOf(2));
TestPOJO[] pojos = r.readValue(INPUT);
assertEquals(2, pojos.length);
assertEquals("x/2", pojos[0].value);
assertEquals("y/3", pojos[1].value);
// and once more to verify transiency of per-call state
TestPOJO[] pojos2 = r.readValue(INPUT);
assertEquals(2, pojos2.length);
assertEquals("x/2", pojos2[0].value);
assertEquals("y/3", pojos2[1].value);
}
@Test
// [databind#3001]
public void testDefaultsViaMapper() throws Exception
{
final String INPUT = a2q("{'value':'x'}");
ContextAttributes attrs = ContextAttributes.getEmpty()
.withSharedAttribute(KEY, Integer.valueOf(72));
ObjectMapper mapper = jsonMapperBuilder()
.defaultAttributes(attrs)
.build();
TestPOJO pojo = mapper.readerFor(TestPOJO.class)
.readValue(INPUT);
assertEquals("x/72", pojo.value);
// as above, should not carry on state
TestPOJO pojo2 = mapper.readerFor(TestPOJO.class)
.readValue(INPUT);
assertEquals("x/72", pojo2.value);
// And should be overridable too
TestPOJO pojo3 = mapper.readerFor(TestPOJO.class)
.withAttribute(KEY, Integer.valueOf(19))
.readValue(INPUT);
assertEquals("x/19", pojo3.value);
}
}
|
TestPOJO
|
java
|
playframework__playframework
|
documentation/manual/working/javaGuide/main/forms/code/javaguide/forms/JavaForms.java
|
{
"start": 10806,
"end": 11648
}
|
class ____ extends javaguide.forms.u1.User {
User(String email, String password) {
this.email = email;
this.password = password;
}
}
Form<javaguide.forms.u1.User> userForm = formFactory().form(javaguide.forms.u1.User.class);
// #fill
userForm = userForm.fill(new User("bob@gmail.com", "secret"));
// #fill
assertThat(userForm.field("email").value()).hasValue("bob@gmail.com");
assertThat(userForm.field("password").value()).hasValue("secret");
}
@Test
public void dynamicForm() {
Result result =
call(
new Controller3(instanceOf(JavaHandlerComponents.class)),
fakeRequest("POST", "/").bodyForm(ImmutableMap.of("firstname", "a", "lastname", "b")),
mat);
assertThat(contentAsString(result)).isEqualTo("Hello a b");
}
public
|
User
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/registration/DefaultTestRegistrationGateway.java
|
{
"start": 1079,
"end": 2180
}
|
class ____ implements TestRegistrationGateway {
private final String address;
private final String hostname;
private final BiFunction<UUID, Long, CompletableFuture<RegistrationResponse>>
registrationFunction;
private DefaultTestRegistrationGateway(
String address,
String hostname,
BiFunction<UUID, Long, CompletableFuture<RegistrationResponse>> registrationFunction) {
this.address = address;
this.hostname = hostname;
this.registrationFunction = registrationFunction;
}
@Override
public String getAddress() {
return address;
}
@Override
public String getHostname() {
return hostname;
}
@Override
public CompletableFuture<RegistrationResponse> registrationCall(UUID leaderId, long timeout) {
return registrationFunction.apply(leaderId, timeout);
}
public static Builder newBuilder() {
return new Builder();
}
/** Builder for the {@link DefaultTestRegistrationGateway}. */
public static final
|
DefaultTestRegistrationGateway
|
java
|
grpc__grpc-java
|
benchmarks/src/generated/main/grpc/io/grpc/benchmarks/proto/ReportQpsScenarioServiceGrpc.java
|
{
"start": 10318,
"end": 12141
}
|
class ____<Req, Resp> implements
io.grpc.stub.ServerCalls.UnaryMethod<Req, Resp>,
io.grpc.stub.ServerCalls.ServerStreamingMethod<Req, Resp>,
io.grpc.stub.ServerCalls.ClientStreamingMethod<Req, Resp>,
io.grpc.stub.ServerCalls.BidiStreamingMethod<Req, Resp> {
private final AsyncService serviceImpl;
private final int methodId;
MethodHandlers(AsyncService serviceImpl, int methodId) {
this.serviceImpl = serviceImpl;
this.methodId = methodId;
}
@java.lang.Override
@java.lang.SuppressWarnings("unchecked")
public void invoke(Req request, io.grpc.stub.StreamObserver<Resp> responseObserver) {
switch (methodId) {
case METHODID_REPORT_SCENARIO:
serviceImpl.reportScenario((io.grpc.benchmarks.proto.Control.ScenarioResult) request,
(io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Control.Void>) responseObserver);
break;
default:
throw new AssertionError();
}
}
@java.lang.Override
@java.lang.SuppressWarnings("unchecked")
public io.grpc.stub.StreamObserver<Req> invoke(
io.grpc.stub.StreamObserver<Resp> responseObserver) {
switch (methodId) {
default:
throw new AssertionError();
}
}
}
public static final io.grpc.ServerServiceDefinition bindService(AsyncService service) {
return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor())
.addMethod(
getReportScenarioMethod(),
io.grpc.stub.ServerCalls.asyncUnaryCall(
new MethodHandlers<
io.grpc.benchmarks.proto.Control.ScenarioResult,
io.grpc.benchmarks.proto.Control.Void>(
service, METHODID_REPORT_SCENARIO)))
.build();
}
private static abstract
|
MethodHandlers
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/ZAggregateArgs.java
|
{
"start": 1769,
"end": 5304
}
|
class ____ {
/**
* Utility constructor.
*/
Builder() {
}
/**
* Creates new {@link ZAggregateArgs} setting {@literal WEIGHTS}.
*
* @return new {@link ZAddArgs} with {@literal WEIGHTS} set.
* @see ZAggregateArgs#weights(double...)
*/
public static ZAggregateArgs weights(double... weights) {
return new ZAggregateArgs().weights(weights);
}
/**
* Creates new {@link ZAggregateArgs} setting {@literal AGGREGATE SUM}.
*
* @return new {@link ZAddArgs} with {@literal AGGREGATE SUM} set.
* @see ZAggregateArgs#sum()
*/
public static ZAggregateArgs sum() {
return new ZAggregateArgs().sum();
}
/**
* Creates new {@link ZAggregateArgs} setting {@literal AGGREGATE MIN}.
*
* @return new {@link ZAddArgs} with {@literal AGGREGATE MIN} set.
* @see ZAggregateArgs#sum()
*/
public static ZAggregateArgs min() {
return new ZAggregateArgs().min();
}
/**
* Creates new {@link ZAggregateArgs} setting {@literal AGGREGATE MAX}.
*
* @return new {@link ZAddArgs} with {@literal AGGREGATE MAX} set.
* @see ZAggregateArgs#sum()
*/
public static ZAggregateArgs max() {
return new ZAggregateArgs().max();
}
}
/**
* Specify a multiplication factor for each input sorted set.
*
* @param weights must not be {@code null}.
* @return {@code this} {@link ZAggregateArgs}.
*/
public ZAggregateArgs weights(double... weights) {
LettuceAssert.notNull(weights, "Weights must not be null");
this.weights = new ArrayList<>(weights.length);
for (double weight : weights) {
this.weights.add(weight);
}
return this;
}
/**
* Aggregate scores of elements existing across multiple sets by summing up.
*
* @return {@code this} {@link ZAggregateArgs}.
*/
public ZAggregateArgs sum() {
this.aggregate = Aggregate.SUM;
return this;
}
/**
* Aggregate scores of elements existing across multiple sets by using the lowest score.
*
* @return {@code this} {@link ZAggregateArgs}.
*/
public ZAggregateArgs min() {
this.aggregate = Aggregate.MIN;
return this;
}
/**
* Aggregate scores of elements existing across multiple sets by using the highest score.
*
* @return {@code this} {@link ZAggregateArgs}.
*/
public ZAggregateArgs max() {
this.aggregate = Aggregate.MAX;
return this;
}
@Override
public <K, V> void build(CommandArgs<K, V> args) {
if (weights != null && !weights.isEmpty()) {
args.add(WEIGHTS);
for (double weight : weights) {
args.add(weight);
}
}
if (aggregate != null) {
args.add(AGGREGATE);
switch (aggregate) {
case SUM:
args.add(SUM);
break;
case MIN:
args.add(MIN);
break;
case MAX:
args.add(MAX);
break;
default:
throw new IllegalArgumentException("Aggregation " + aggregate + " not supported");
}
}
}
}
|
Builder
|
java
|
spring-projects__spring-framework
|
framework-docs/src/main/java/org/springframework/docs/integration/jms/jmsjcamessageendpointmanager/AlternativeJmsConfiguration.java
|
{
"start": 1054,
"end": 1673
}
|
class ____ {
// tag::snippet[]
@Bean
JmsMessageEndpointManager jmsMessageEndpointManager(ResourceAdapter resourceAdapter,
MessageListener myMessageListener) {
ActiveMQActivationSpec spec = new ActiveMQActivationSpec();
spec.setDestination("myQueue");
spec.setDestinationType("jakarta.jms.Queue");
JmsMessageEndpointManager endpointManager = new JmsMessageEndpointManager();
endpointManager.setResourceAdapter(resourceAdapter);
endpointManager.setActivationSpec(spec);
endpointManager.setMessageListener(myMessageListener);
return endpointManager;
}
// end::snippet[]
}
|
AlternativeJmsConfiguration
|
java
|
netty__netty
|
transport/src/main/java/io/netty/channel/SingleThreadIoEventLoop.java
|
{
"start": 1347,
"end": 13121
}
|
class ____ extends SingleThreadEventLoop implements IoEventLoop {
// TODO: Is this a sensible default ?
private static final long DEFAULT_MAX_TASK_PROCESSING_QUANTUM_NS = TimeUnit.MILLISECONDS.toNanos(Math.max(100,
SystemPropertyUtil.getInt("io.netty.eventLoop.maxTaskProcessingQuantumMs", 1000)));
private final long maxTaskProcessingQuantumNs;
private final IoHandlerContext context = new IoHandlerContext() {
@Override
public boolean canBlock() {
assert inEventLoop();
return !hasTasks() && !hasScheduledTasks();
}
@Override
public long delayNanos(long currentTimeNanos) {
assert inEventLoop();
return SingleThreadIoEventLoop.this.delayNanos(currentTimeNanos);
}
@Override
public long deadlineNanos() {
assert inEventLoop();
return SingleThreadIoEventLoop.this.deadlineNanos();
}
@Override
public void reportActiveIoTime(long activeNanos) {
SingleThreadIoEventLoop.this.reportActiveIoTime(activeNanos);
}
@Override
public boolean shouldReportActiveIoTime() {
return isSuspensionSupported();
}
};
private final IoHandler ioHandler;
private final AtomicInteger numRegistrations = new AtomicInteger();
/**
* Creates a new instance
*
* @param parent the parent that holds this {@link IoEventLoop}.
* @param threadFactory the {@link ThreadFactory} that is used to create the underlying {@link Thread}.
* @param ioHandlerFactory the {@link IoHandlerFactory} that should be used to obtain {@link IoHandler} to
* handle IO.
*/
public SingleThreadIoEventLoop(IoEventLoopGroup parent, ThreadFactory threadFactory,
IoHandlerFactory ioHandlerFactory) {
super(parent, threadFactory, false,
ObjectUtil.checkNotNull(ioHandlerFactory, "ioHandlerFactory").isChangingThreadSupported());
this.maxTaskProcessingQuantumNs = DEFAULT_MAX_TASK_PROCESSING_QUANTUM_NS;
this.ioHandler = ioHandlerFactory.newHandler(this);
}
/**
* Creates a new instance
*
* @param parent the parent that holds this {@link IoEventLoop}.
* @param executor the {@link Executor} that is used for dispatching the work.
* @param ioHandlerFactory the {@link IoHandlerFactory} that should be used to obtain {@link IoHandler} to
* handle IO.
*/
public SingleThreadIoEventLoop(IoEventLoopGroup parent, Executor executor, IoHandlerFactory ioHandlerFactory) {
super(parent, executor, false,
ObjectUtil.checkNotNull(ioHandlerFactory, "ioHandlerFactory").isChangingThreadSupported());
this.maxTaskProcessingQuantumNs = DEFAULT_MAX_TASK_PROCESSING_QUANTUM_NS;
this.ioHandler = ioHandlerFactory.newHandler(this);
}
/**
* Creates a new instance
*
* @param parent the parent that holds this {@link IoEventLoop}.
* @param threadFactory the {@link ThreadFactory} that is used to create the underlying
* {@link Thread}.
* @param ioHandlerFactory the {@link IoHandlerFactory} that should be used to obtain {@link IoHandler}
* to handle IO.
* @param maxPendingTasks the maximum pending tasks that are allowed before
* {@link RejectedExecutionHandler#rejected(Runnable,
* SingleThreadEventExecutor)}
* is called to handle it.
* @param rejectedExecutionHandler the {@link RejectedExecutionHandler} that handles when more tasks are added
* then allowed per {@code maxPendingTasks}.
* @param maxTaskProcessingQuantumMs the maximum number of milliseconds that will be spent to run tasks before
* trying to run IO again.
*/
public SingleThreadIoEventLoop(IoEventLoopGroup parent, ThreadFactory threadFactory,
IoHandlerFactory ioHandlerFactory, int maxPendingTasks,
RejectedExecutionHandler rejectedExecutionHandler, long maxTaskProcessingQuantumMs) {
super(parent, threadFactory, false,
ObjectUtil.checkNotNull(ioHandlerFactory, "ioHandlerFactory").isChangingThreadSupported(),
maxPendingTasks, rejectedExecutionHandler);
this.maxTaskProcessingQuantumNs =
ObjectUtil.checkPositiveOrZero(maxTaskProcessingQuantumMs, "maxTaskProcessingQuantumMs") == 0 ?
DEFAULT_MAX_TASK_PROCESSING_QUANTUM_NS :
TimeUnit.MILLISECONDS.toNanos(maxTaskProcessingQuantumMs);
this.ioHandler = ioHandlerFactory.newHandler(this);
}
/**
* Creates a new instance
*
* @param parent the parent that holds this {@link IoEventLoop}.
* @param ioHandlerFactory the {@link IoHandlerFactory} that should be used to obtain {@link IoHandler}
* to handle IO.
* @param maxPendingTasks the maximum pending tasks that are allowed before
* {@link RejectedExecutionHandler#rejected(Runnable,
* SingleThreadEventExecutor)}
* is called to handle it.
* @param rejectedExecutionHandler the {@link RejectedExecutionHandler} that handles when more tasks are added
* then allowed per {@code maxPendingTasks}.
* @param maxTaskProcessingQuantumMs the maximum number of milliseconds that will be spent to run tasks before
* trying to run IO again.
*/
public SingleThreadIoEventLoop(IoEventLoopGroup parent, Executor executor,
IoHandlerFactory ioHandlerFactory, int maxPendingTasks,
RejectedExecutionHandler rejectedExecutionHandler,
long maxTaskProcessingQuantumMs) {
super(parent, executor, false,
ObjectUtil.checkNotNull(ioHandlerFactory, "ioHandlerFactory").isChangingThreadSupported(),
maxPendingTasks, rejectedExecutionHandler);
this.maxTaskProcessingQuantumNs =
ObjectUtil.checkPositiveOrZero(maxTaskProcessingQuantumMs, "maxTaskProcessingQuantumMs") == 0 ?
DEFAULT_MAX_TASK_PROCESSING_QUANTUM_NS :
TimeUnit.MILLISECONDS.toNanos(maxTaskProcessingQuantumMs);
this.ioHandler = ioHandlerFactory.newHandler(this);
}
/**
*
* Creates a new instance
*
* @param parent the parent that holds this {@link IoEventLoop}.
* @param executor the {@link Executor} that is used for dispatching the work.
* @param ioHandlerFactory the {@link IoHandlerFactory} that should be used to obtain {@link IoHandler}
* to handle IO.
* @param taskQueue the {@link Queue} used for storing pending tasks.
* @param tailTaskQueue the {@link Queue} used for storing tail pending tasks.
* @param rejectedExecutionHandler the {@link RejectedExecutionHandler} that handles when more tasks are added
* then allowed.
*/
protected SingleThreadIoEventLoop(IoEventLoopGroup parent, Executor executor,
IoHandlerFactory ioHandlerFactory, Queue<Runnable> taskQueue,
Queue<Runnable> tailTaskQueue,
RejectedExecutionHandler rejectedExecutionHandler) {
super(parent, executor, false,
ObjectUtil.checkNotNull(ioHandlerFactory, "ioHandlerFactory").isChangingThreadSupported(),
taskQueue, tailTaskQueue, rejectedExecutionHandler);
this.maxTaskProcessingQuantumNs = DEFAULT_MAX_TASK_PROCESSING_QUANTUM_NS;
this.ioHandler = ioHandlerFactory.newHandler(this);
}
@Override
protected void run() {
assert inEventLoop();
ioHandler.initialize();
do {
runIo();
if (isShuttingDown()) {
ioHandler.prepareToDestroy();
}
// Now run all tasks for the maximum configured amount of time before trying to run IO again.
runAllTasks(maxTaskProcessingQuantumNs);
// We should continue with our loop until we either confirmed a shutdown or we can suspend it.
} while (!confirmShutdown() && !canSuspend());
}
protected final IoHandler ioHandler() {
return ioHandler;
}
@Override
protected boolean canSuspend(int state) {
// We should only allow to suspend if there are no registrations on this loop atm.
return super.canSuspend(state) && numRegistrations.get() == 0;
}
/**
* Called when IO will be processed for all the {@link IoHandle}s on this {@link SingleThreadIoEventLoop}.
* This method returns the number of {@link IoHandle}s for which IO was processed.
*
* This method must be called from the {@link EventLoop} thread.
*/
protected int runIo() {
assert inEventLoop();
return ioHandler.run(context);
}
@Override
public IoEventLoop next() {
return this;
}
@Override
public final Future<IoRegistration> register(final IoHandle handle) {
Promise<IoRegistration> promise = newPromise();
if (inEventLoop()) {
registerForIo0(handle, promise);
} else {
execute(() -> registerForIo0(handle, promise));
}
return promise;
}
@Override
protected int getNumOfRegisteredChannels() {
return numRegistrations.get();
}
private void registerForIo0(final IoHandle handle, Promise<IoRegistration> promise) {
assert inEventLoop();
final IoRegistration registration;
try {
registration = ioHandler.register(handle);
} catch (Exception e) {
promise.setFailure(e);
return;
}
numRegistrations.incrementAndGet();
promise.setSuccess(new IoRegistrationWrapper(registration));
}
@Override
protected final void wakeup(boolean inEventLoop) {
ioHandler.wakeup();
}
@Override
protected final void cleanup() {
assert inEventLoop();
ioHandler.destroy();
}
@Override
public boolean isCompatible(Class<? extends IoHandle> handleType) {
return ioHandler.isCompatible(handleType);
}
@Override
public boolean isIoType(Class<? extends IoHandler> handlerType) {
return ioHandler.getClass().equals(handlerType);
}
@Override
protected Queue<Runnable> newTaskQueue(int maxPendingTasks) {
return newTaskQueue0(maxPendingTasks);
}
protected static Queue<Runnable> newTaskQueue0(int maxPendingTasks) {
// This event loop never calls takeTask()
return maxPendingTasks == Integer.MAX_VALUE ? PlatformDependent.<Runnable>newMpscQueue()
: PlatformDependent.<Runnable>newMpscQueue(maxPendingTasks);
}
private final
|
SingleThreadIoEventLoop
|
java
|
apache__spark
|
core/src/main/java/org/apache/spark/api/resource/ResourceDiscoveryPlugin.java
|
{
"start": 1402,
"end": 3002
}
|
interface ____ {
/**
* Discover the addresses of the requested resource.
* <p>
* This method is called early in the initialization of the Spark Executor/Driver/Worker.
* This function is responsible for discovering the addresses of the resource which Spark will
* then use for scheduling and eventually providing to the user.
* Depending on the deployment mode and and configuration of custom resources, this could be
* called by the Spark Driver, the Spark Executors, in standalone mode the Workers, or all of
* them. The ResourceRequest has a ResourceID component that can be used to distinguish which
* component it is called from and what resource its being called for.
* This will get called once for each resource type requested and its the responsibility of
* this function to return enough addresses of that resource based on the request. If
* the addresses do not meet the requested amount, Spark will fail.
* If this plugin doesn't handle a particular resource, it should return an empty Optional
* and Spark will try other plugins and then last fall back to the default discovery script
* plugin.
*
* @param request The ResourceRequest that to be discovered.
* @param sparkConf SparkConf
* @return An {@link Optional} containing a {@link ResourceInformation} object containing
* the resource name and the addresses of the resource. If it returns {@link Optional#EMPTY}
* other plugins will be called.
*/
Optional<ResourceInformation> discoverResource(ResourceRequest request, SparkConf sparkConf);
}
|
ResourceDiscoveryPlugin
|
java
|
spring-projects__spring-framework
|
spring-webmvc/src/test/java/org/springframework/web/servlet/mvc/method/annotation/ReactiveTypeHandlerTests.java
|
{
"start": 18532,
"end": 18887
}
|
class ____ {
String handleString() { return null; }
Mono<String> handleMono() { return null; }
Single<String> handleSingle() { return null; }
Flux<Bar> handleFlux() { return null; }
Flux<String> handleFluxString() { return null; }
Flux<ServerSentEvent<String>> handleFluxSseEventBuilder() { return null; }
}
private static
|
TestController
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/search/query/QuerySearchRequest.java
|
{
"start": 1282,
"end": 3580
}
|
class ____ extends AbstractTransportRequest implements IndicesRequest {
private final ShardSearchContextId contextId;
private final AggregatedDfs dfs;
private final OriginalIndices originalIndices;
private final ShardSearchRequest shardSearchRequest;
public QuerySearchRequest(
OriginalIndices originalIndices,
ShardSearchContextId contextId,
ShardSearchRequest shardSearchRequest,
AggregatedDfs dfs
) {
this.contextId = contextId;
this.dfs = dfs;
this.shardSearchRequest = shardSearchRequest;
this.originalIndices = originalIndices;
}
public QuerySearchRequest(StreamInput in) throws IOException {
super(in);
contextId = new ShardSearchContextId(in);
dfs = new AggregatedDfs(in);
originalIndices = OriginalIndices.readOriginalIndices(in);
this.shardSearchRequest = in.readOptionalWriteable(ShardSearchRequest::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
contextId.writeTo(out);
dfs.writeTo(out);
OriginalIndices.writeOriginalIndices(originalIndices, out);
out.writeOptionalWriteable(shardSearchRequest);
}
public ShardSearchContextId contextId() {
return contextId;
}
public AggregatedDfs dfs() {
return dfs;
}
@Nullable
public ShardSearchRequest shardSearchRequest() {
return shardSearchRequest;
}
@Override
public String[] indices() {
return originalIndices.indices();
}
@Override
public IndicesOptions indicesOptions() {
return originalIndices.indicesOptions();
}
@Override
public Task createTask(long id, String type, String action, TaskId parentTaskId, Map<String, String> headers) {
return new SearchShardTask(id, type, action, getDescription(), parentTaskId, headers);
}
public String getDescription() {
StringBuilder sb = new StringBuilder();
sb.append("id[");
sb.append(contextId);
sb.append("], ");
sb.append("indices[");
Strings.arrayToDelimitedString(originalIndices.indices(), ",", sb);
sb.append("]");
return sb.toString();
}
}
|
QuerySearchRequest
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.