language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__camel | components/camel-pqc/src/main/java/org/apache/camel/component/pqc/crypto/PQCDefaultMAYOMaterial.java | {
"start": 1133,
"end": 2370
} | class ____ {
public static final KeyPair keyPair;
public static final Signature signer;
static {
if (Security.getProvider(BouncyCastleProvider.PROVIDER_NAME) == null) {
Security.addProvider(new BouncyCastleProvider());
}
if (Security.getProvider(BouncyCastlePQCProvider.PROVIDER_NAME) == null) {
Security.addProvider(new BouncyCastlePQCProvider());
}
KeyPairGenerator generator;
try {
generator = prepareKeyPair();
keyPair = generator.generateKeyPair();
signer = Signature.getInstance(PQCSignatureAlgorithms.MAYO.getAlgorithm(),
PQCSignatureAlgorithms.MAYO.getBcProvider());
} catch (Exception e) {
throw new RuntimeException(e);
}
}
protected static KeyPairGenerator prepareKeyPair()
throws NoSuchAlgorithmException, NoSuchProviderException, InvalidAlgorithmParameterException {
KeyPairGenerator kpGen = KeyPairGenerator.getInstance(PQCSignatureAlgorithms.MAYO.getAlgorithm(),
PQCSignatureAlgorithms.MAYO.getBcProvider());
kpGen.initialize(MayoParameterSpec.mayo5);
return kpGen;
}
}
| PQCDefaultMAYOMaterial |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/stream/NestedCollectionFetchStreamTest.java | {
"start": 3777,
"end": 3818
} | class ____ extends BasicEntity {
}
}
| EntityC |
java | apache__camel | tooling/maven/camel-repackager-maven-plugin/src/main/java/org/apache/camel/maven/RepackageMojo.java | {
"start": 2181,
"end": 2557
} | class ____ extends AbstractMojo {
/**
* The Maven project.
*/
@Parameter(defaultValue = "${project}", readonly = true, required = true)
private MavenProject project;
/**
* The source JAR file to repackage. If not specified, uses the project's main artifact.
*/
@Parameter
private File sourceJar;
/**
* The main | RepackageMojo |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java | {
"start": 3727,
"end": 67734
} | class ____ implements ClusterStateListener {
private final ClusterService clusterService;
private final TransportService transportService;
private final Executor clusterCoordinationExecutor;
private final Coordinator coordinator;
private final MasterHistoryService masterHistoryService;
/**
* This is the amount of time we use to make the initial decision -- have we seen a master node in the very recent past?
*/
private final TimeValue nodeHasMasterLookupTimeframe;
/**
* If the master transitions from a non-null master to a null master at least this many times it starts impacting the health status.
*/
private final int unacceptableNullTransitions;
/**
* If the master transitions from one non-null master to a different non-null master at least this many times it starts impacting the
* health status.
*/
private final int unacceptableIdentityChanges;
// ThreadLocal because our unit testing framework does not like sharing Randoms across threads
private final ThreadLocal<Random> random = ThreadLocal.withInitial(Randomness::get);
/*
* This is a Map of tasks that are periodically reaching out to other master eligible nodes to get their ClusterFormationStates for
* diagnosis. The key is the DiscoveryNode for the master eligible node being polled, and the value is a Cancellable.
* The field is accessed (reads/writes) from multiple threads, but the reference itself is only ever changed on the cluster change
* event thread.
*/
// Non-private for testing
volatile Map<DiscoveryNode, Scheduler.Cancellable> clusterFormationInfoTasks = null;
/*
* This field holds the results of the tasks in the clusterFormationInfoTasks field above. The field is accessed (reads/writes) from
* multiple threads, but the reference itself is only ever changed on the cluster change event thread.
*/
// Non-private for testing
volatile ConcurrentMap<DiscoveryNode, ClusterFormationStateOrException> clusterFormationResponses = null;
/*
* This is a reference to the task that is periodically reaching out to a master eligible node to get its CoordinationDiagnosticsResult
* for diagnosis. It is null when no polling is occurring.
* The field is accessed (reads/writes) from multiple threads. It is only reassigned on the initialization thread and the cluster
* change event thread.
*/
volatile AtomicReference<Scheduler.Cancellable> remoteCoordinationDiagnosisTask = null;
/*
* This field holds the result of the task in the remoteCoordinationDiagnosisTask field above. The field is accessed
* (reads/writes) from multiple threads, but is only ever reassigned on the initialization thread and the cluster change event thread.
*/
volatile AtomicReference<RemoteMasterHealthResult> remoteCoordinationDiagnosisResult = null;
/**
* This is the amount of time that we wait before scheduling a remote request to gather diagnostic information. It is not
* user-configurable, but is non-final so that integration tests don't have to waste 10 seconds.
*/
// Non-private for testing
static TimeValue remoteRequestInitialDelay = new TimeValue(10, TimeUnit.SECONDS);
private static final Logger logger = LogManager.getLogger(CoordinationDiagnosticsService.class);
/**
* This is the default amount of time we look back to see if we have had a master at all, before moving on with other checks
*/
public static final Setting<TimeValue> NODE_HAS_MASTER_LOOKUP_TIMEFRAME_SETTING = Setting.timeSetting(
"health.master_history.has_master_lookup_timeframe",
new TimeValue(30, TimeUnit.SECONDS),
new TimeValue(1, TimeUnit.SECONDS),
Setting.Property.NodeScope
);
/**
* This is the number of times that it is not OK to have a master go null. This many transitions or more will be reported as a problem.
*/
public static final Setting<Integer> NO_MASTER_TRANSITIONS_THRESHOLD_SETTING = Setting.intSetting(
"health.master_history.no_master_transitions_threshold",
4,
0,
Setting.Property.NodeScope
);
/**
* This is the number of times that it is not OK to have a master change identity. This many changes or more will be reported as a
* problem.
*/
public static final Setting<Integer> IDENTITY_CHANGES_THRESHOLD_SETTING = Setting.intSetting(
"health.master_history.identity_changes_threshold",
4,
0,
Setting.Property.NodeScope
);
public CoordinationDiagnosticsService(
ClusterService clusterService,
TransportService transportService,
Coordinator coordinator,
MasterHistoryService masterHistoryService
) {
this.clusterService = clusterService;
this.transportService = transportService;
this.clusterCoordinationExecutor = transportService.getThreadPool().executor(ThreadPool.Names.CLUSTER_COORDINATION);
this.coordinator = coordinator;
this.masterHistoryService = masterHistoryService;
this.nodeHasMasterLookupTimeframe = NODE_HAS_MASTER_LOOKUP_TIMEFRAME_SETTING.get(clusterService.getSettings());
this.unacceptableNullTransitions = NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.get(clusterService.getSettings());
this.unacceptableIdentityChanges = IDENTITY_CHANGES_THRESHOLD_SETTING.get(clusterService.getSettings());
}
/**
* This method completes the initialization of the CoordinationDiagnosticsService. It kicks off polling for remote master stability
* results on non-master-eligible nodes, and registers the service as a cluster service listener on all nodes.
*/
public void start() {
/*
* This is called here to cover an edge case -- when there are master-eligible nodes in the cluster but none of them has been
* elected master. In the most common case this node will receive a ClusterChangedEvent that results in this polling being
* cancelled almost immediately. If that does not happen, then we do in fact need to be polling. Note that
* beginPollingRemoteMasterStabilityDiagnostic results in several internal transport actions being called, so it must run in the
* system context.
*/
if (clusterService.localNode().isMasterNode() == false) {
try (var ignored = transportService.getThreadPool().getThreadContext().newEmptySystemContext()) {
beginPollingRemoteMasterStabilityDiagnostic();
}
}
clusterService.addListener(this);
}
/**
* This method calculates the master stability as seen from this node.
* @param verbose If true, the result will contain a non-empty CoordinationDiagnosticsDetails if the resulting status is non-GREEN
* @return Information about the current stability of the master node, as seen from this node
*/
public CoordinationDiagnosticsResult diagnoseMasterStability(boolean verbose) {
MasterHistory localMasterHistory = masterHistoryService.getLocalMasterHistory();
if (hasSeenMasterInHasMasterLookupTimeframe()) {
return diagnoseOnHaveSeenMasterRecently(localMasterHistory, verbose);
} else {
return diagnoseOnHaveNotSeenMasterRecently(localMasterHistory, verbose);
}
}
/**
* Returns the health result for the case when we have seen a master recently (at some point in the last 30 seconds).
* @param localMasterHistory The master history as seen from the local machine
* @param verbose Whether to calculate and include the details and user actions in the result
* @return The CoordinationDiagnosticsResult for the given localMasterHistory
*/
private CoordinationDiagnosticsResult diagnoseOnHaveSeenMasterRecently(MasterHistory localMasterHistory, boolean verbose) {
int masterChanges = MasterHistory.getNumberOfMasterIdentityChanges(localMasterHistory.getNodes());
logger.trace(
"Have seen a master in the last {}): {}",
nodeHasMasterLookupTimeframe,
localMasterHistory.getMostRecentNonNullMaster()
);
final CoordinationDiagnosticsResult result;
if (masterChanges >= unacceptableIdentityChanges) {
result = diagnoseOnMasterHasChangedIdentity(localMasterHistory, masterChanges, verbose);
} else if (localMasterHistory.hasMasterGoneNullAtLeastNTimes(unacceptableNullTransitions)) {
result = diagnoseOnMasterHasFlappedNull(localMasterHistory, verbose);
} else {
result = getMasterIsStableResult(verbose, localMasterHistory);
}
return result;
}
/**
* Returns the health result when we have detected locally that the master has changed identity repeatedly (by default more than 3
* times in the last 30 minutes)
* @param localMasterHistory The master history as seen from the local machine
* @param masterChanges The number of times that the local machine has seen the master identity change in the last 30 minutes
* @param verbose Whether to calculate and include the details in the result
* @return The CoordinationDiagnosticsResult for the given localMasterHistory
*/
private static CoordinationDiagnosticsResult diagnoseOnMasterHasChangedIdentity(
MasterHistory localMasterHistory,
int masterChanges,
boolean verbose
) {
logger.trace("Have seen {} master changes in the last {}", masterChanges, localMasterHistory.getMaxHistoryAge());
CoordinationDiagnosticsStatus coordinationDiagnosticsStatus = CoordinationDiagnosticsStatus.YELLOW;
String summary = String.format(
Locale.ROOT,
"The elected master node has changed %d times in the last %s",
masterChanges,
localMasterHistory.getMaxHistoryAge()
);
CoordinationDiagnosticsDetails details = getDetails(verbose, localMasterHistory, null, null);
return new CoordinationDiagnosticsResult(coordinationDiagnosticsStatus, summary, details);
}
/**
* This returns CoordinationDiagnosticsDetails.EMPTY if verbose is false, otherwise a CoordinationDiagnosticsDetails object
* containing only a "current_master" object and a "recent_masters" array. The "current_master" object will have "node_id" and "name"
* fields for the master node. Both will be null if the last-seen master was null. The "recent_masters" array will contain
* "recent_master" objects. Each "recent_master" object will have "node_id" and "name" fields for the master node. These fields will
* never be null because null masters are not written to this array.
* @param verbose If true, the CoordinationDiagnosticsDetails will contain "current_master" and "recent_masters". Otherwise it will
* be empty.
* @param localMasterHistory The MasterHistory object to pull current and recent master info from
* @return An empty CoordinationDiagnosticsDetails if verbose is false, otherwise a CoordinationDiagnosticsDetails containing only
* "current_master" and "recent_masters"
*/
private static CoordinationDiagnosticsDetails getDetails(
boolean verbose,
MasterHistory localMasterHistory,
@Nullable Exception remoteException,
@Nullable Map<String, String> clusterFormationMessages
) {
if (verbose == false) {
return CoordinationDiagnosticsDetails.EMPTY;
}
DiscoveryNode masterNode = localMasterHistory.getMostRecentMaster();
List<DiscoveryNode> recentNonNullMasters = localMasterHistory.getNodes().stream().filter(Objects::nonNull).toList();
return new CoordinationDiagnosticsDetails(masterNode, recentNonNullMasters, remoteException, clusterFormationMessages);
}
/**
* Returns the health result when we have detected locally that the master has changed to null repeatedly (by default more than 3 times
* in the last 30 minutes). This method attempts to use the master history from a remote node to confirm what we are seeing locally.
* If the information from the remote node confirms that the master history has been unstable, a YELLOW status is returned. If the
* information from the remote node shows that the master history has been stable, then we assume that the problem is with this node
* and a GREEN status is returned (the problems with this node will be covered in a separate health indicator). If there had been
* problems fetching the remote master history, the exception seen will be included in the details of the result.
* @param localMasterHistory The master history as seen from the local machine
* @param verbose Whether to calculate and include the details in the result
* @return The CoordinationDiagnosticsResult for the given localMasterHistory
*/
private CoordinationDiagnosticsResult diagnoseOnMasterHasFlappedNull(MasterHistory localMasterHistory, boolean verbose) {
DiscoveryNode master = localMasterHistory.getMostRecentNonNullMaster();
boolean localNodeIsMaster = clusterService.localNode().equals(master);
List<DiscoveryNode> remoteHistory;
Exception remoteHistoryException = null;
if (localNodeIsMaster) {
remoteHistory = null; // We don't need to fetch the remote master's history if we are that remote master
} else {
try {
remoteHistory = masterHistoryService.getRemoteMasterHistory();
} catch (Exception e) {
remoteHistory = null;
remoteHistoryException = e;
}
}
/*
* If the local node is master, then we have a confirmed problem (since we now know that from this node's point of view the
* master is unstable).
* If the local node is not master but the remote history is null then we have a problem (since from this node's point of view the
* master is unstable, and we were unable to get the master's own view of its history). It could just be a short-lived problem
* though if the remote history has not arrived yet.
* If the local node is not master and the master history from the master itself reports that the master has gone null repeatedly
* or changed identity repeatedly, then we have a problem (the master has confirmed what the local node saw).
*/
boolean masterConfirmedUnstable = localNodeIsMaster
|| remoteHistoryException != null
|| (remoteHistory != null
&& (MasterHistory.hasMasterGoneNullAtLeastNTimes(remoteHistory, unacceptableNullTransitions)
|| MasterHistory.getNumberOfMasterIdentityChanges(remoteHistory) >= unacceptableIdentityChanges));
if (masterConfirmedUnstable) {
logger.trace("The master node {} thinks it is unstable", master);
String summary = String.format(
Locale.ROOT,
"The cluster's master has alternated between %s and no master multiple times in the last %s",
localMasterHistory.getNodes().stream().filter(Objects::nonNull).collect(Collectors.toSet()),
localMasterHistory.getMaxHistoryAge()
);
final CoordinationDiagnosticsDetails details = getDetails(verbose, localMasterHistory, remoteHistoryException, null);
return new CoordinationDiagnosticsResult(CoordinationDiagnosticsStatus.YELLOW, summary, details);
} else {
logger.trace("This node thinks the master is unstable, but the master node {} thinks it is stable", master);
return getMasterIsStableResult(verbose, localMasterHistory);
}
}
/**
* Returns a CoordinationDiagnosticsResult for the case when the master is seen as stable
* @return A CoordinationDiagnosticsResult for the case when the master is seen as stable (GREEN status, no impacts or details)
*/
private static CoordinationDiagnosticsResult getMasterIsStableResult(boolean verbose, MasterHistory localMasterHistory) {
String summary = "The cluster has a stable master node";
logger.trace("The cluster has a stable master node");
CoordinationDiagnosticsDetails details = getDetails(verbose, localMasterHistory, null, null);
return new CoordinationDiagnosticsResult(CoordinationDiagnosticsStatus.GREEN, summary, details);
}
/**
* Returns the health result for the case when we have NOT seen a master recently (at some point in the last 30 seconds).
* @param localMasterHistory The master history as seen from the local machine
* @param verbose Whether to calculate and include the details in the result
* @return The CoordinationDiagnosticsResult for the given localMasterHistory
*/
private CoordinationDiagnosticsResult diagnoseOnHaveNotSeenMasterRecently(MasterHistory localMasterHistory, boolean verbose) {
Collection<DiscoveryNode> masterEligibleNodes = getMasterEligibleNodes();
final CoordinationDiagnosticsResult result;
boolean clusterHasLeader = coordinator.getPeerFinder().getLeader().isPresent();
boolean noLeaderAndNoMasters = clusterHasLeader == false && masterEligibleNodes.isEmpty();
boolean isLocalNodeMasterEligible = clusterService.localNode().isMasterNode();
if (noLeaderAndNoMasters) {
result = getResultOnNoMasterEligibleNodes(localMasterHistory, verbose);
} else if (clusterHasLeader) {
DiscoveryNode currentMaster = coordinator.getPeerFinder().getLeader().get();
result = getResultOnCannotJoinLeader(localMasterHistory, currentMaster, verbose);
} else if (isLocalNodeMasterEligible == false) { // none is elected master and we aren't master eligible
result = diagnoseOnHaveNotSeenMasterRecentlyAndWeAreNotMasterEligible(
localMasterHistory,
coordinator,
nodeHasMasterLookupTimeframe,
remoteCoordinationDiagnosisResult,
verbose
);
} else { // none is elected master and we are master eligible
result = diagnoseOnHaveNotSeenMasterRecentlyAndWeAreMasterEligible(
localMasterHistory,
masterEligibleNodes,
coordinator,
clusterFormationResponses,
nodeHasMasterLookupTimeframe,
verbose
);
}
return result;
}
/**
* This method handles the case when we have not had an elected master node recently, and we are on a node that is not
* master-eligible. In this case we reach out to some master-eligible node in order to see what it knows about master stability.
* @param localMasterHistory The master history, as seen from this node
* @param coordinator The Coordinator for this node
* @param nodeHasMasterLookupTimeframe The value of health.master_history.has_master_lookup_timeframe
* @param remoteCoordinationDiagnosisResult A reference to the result of polling a master-eligible node for diagnostic information
* @param verbose If true, details are returned
* @return A CoordinationDiagnosticsResult that will be determined by the CoordinationDiagnosticsResult returned by the remote
* master-eligible node
*/
static CoordinationDiagnosticsResult diagnoseOnHaveNotSeenMasterRecentlyAndWeAreNotMasterEligible(
MasterHistory localMasterHistory,
Coordinator coordinator,
TimeValue nodeHasMasterLookupTimeframe,
AtomicReference<RemoteMasterHealthResult> remoteCoordinationDiagnosisResult,
boolean verbose
) {
RemoteMasterHealthResult remoteResultOrException = remoteCoordinationDiagnosisResult == null
? null
: remoteCoordinationDiagnosisResult.get();
final CoordinationDiagnosticsStatus status;
final String summary;
final CoordinationDiagnosticsDetails details;
if (remoteResultOrException == null) {
status = CoordinationDiagnosticsStatus.RED;
summary = String.format(
Locale.ROOT,
"No master node observed in the last %s, and this node is not master eligible. Reaching out to a master-eligible node"
+ " for more information",
nodeHasMasterLookupTimeframe
);
if (verbose) {
details = getDetails(
true,
localMasterHistory,
null,
Map.of(coordinator.getLocalNode().getId(), coordinator.getClusterFormationState().getDescription())
);
} else {
details = CoordinationDiagnosticsDetails.EMPTY;
}
} else {
DiscoveryNode remoteNode = remoteResultOrException.node;
CoordinationDiagnosticsResult remoteResult = remoteResultOrException.result;
Exception exception = remoteResultOrException.remoteException;
if (remoteResult != null) {
if (remoteResult.status().equals(CoordinationDiagnosticsStatus.GREEN) == false) {
status = remoteResult.status();
summary = remoteResult.summary();
} else {
status = CoordinationDiagnosticsStatus.RED;
summary = String.format(
Locale.ROOT,
"No master node observed in the last %s from this node, but %s reports that the status is GREEN. This "
+ "indicates that there is a discovery problem on %s",
nodeHasMasterLookupTimeframe,
remoteNode.getName(),
coordinator.getLocalNode().getName()
);
}
if (verbose) {
details = remoteResult.details();
} else {
details = CoordinationDiagnosticsDetails.EMPTY;
}
} else {
status = CoordinationDiagnosticsStatus.RED;
summary = String.format(
Locale.ROOT,
"No master node observed in the last %s from this node, and received an exception while reaching out to %s for "
+ "diagnosis",
nodeHasMasterLookupTimeframe,
remoteNode.getName()
);
if (verbose) {
details = getDetails(true, localMasterHistory, exception, null);
} else {
details = CoordinationDiagnosticsDetails.EMPTY;
}
}
}
return new CoordinationDiagnosticsResult(status, summary, details);
}
/**
* This method handles the case when we have not had an elected master node recently, and we are on a master-eligible node. In this
* case we look at the cluster formation information from all master-eligible nodes, trying to understand if we have a discovery
* problem, a problem forming a quorum, or something else.
* @param localMasterHistory The master history, as seen from this node
* @param masterEligibleNodes The known master eligible nodes in the cluster
* @param coordinator The Coordinator for this node
* @param clusterFormationResponses A map that contains the cluster formation information (or exception encountered while requesting
* it) from each master eligible node in the cluster
* @param nodeHasMasterLookupTimeframe The value of health.master_history.has_master_lookup_timeframe
* @param verbose If true, details are returned
* @return A CoordinationDiagnosticsResult with a RED status
*/
static CoordinationDiagnosticsResult diagnoseOnHaveNotSeenMasterRecentlyAndWeAreMasterEligible(
MasterHistory localMasterHistory,
Collection<DiscoveryNode> masterEligibleNodes,
Coordinator coordinator,
ConcurrentMap<DiscoveryNode, ClusterFormationStateOrException> clusterFormationResponses,
TimeValue nodeHasMasterLookupTimeframe,
boolean verbose
) {
final CoordinationDiagnosticsResult result;
/*
* We want to make sure that the same elements are in this set every time we loop through it. We don't care if values are added
* while we're copying it, which is why this is not synchronized. We only care that once we have a copy it is not changed.
*/
final Map<DiscoveryNode, ClusterFormationStateOrException> nodeToClusterFormationResponses = clusterFormationResponses == null
? Map.of()
: Map.copyOf(clusterFormationResponses);
for (Map.Entry<DiscoveryNode, ClusterFormationStateOrException> entry : nodeToClusterFormationResponses.entrySet()) {
Exception remoteException = entry.getValue().exception();
if (remoteException != null) {
return new CoordinationDiagnosticsResult(
CoordinationDiagnosticsStatus.RED,
String.format(
Locale.ROOT,
"No master node observed in the last %s, and an exception occurred while reaching out to %s for diagnosis",
nodeHasMasterLookupTimeframe,
entry.getKey().getName()
),
getDetails(
verbose,
localMasterHistory,
remoteException,
Map.of(coordinator.getLocalNode().getId(), coordinator.getClusterFormationState().getDescription())
)
);
}
}
Map<DiscoveryNode, ClusterFormationFailureHelper.ClusterFormationState> nodeClusterFormationStateMap =
nodeToClusterFormationResponses.entrySet()
.stream()
.collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().clusterFormationState()));
if (nodeClusterFormationStateMap.isEmpty()) {
/*
* The most likely reason we are here is that polling for cluster formation info never began because there has been no cluster
* changed event because there has never been a master node. So we just use the local cluster formation state.
*/
nodeClusterFormationStateMap = Map.of(coordinator.getLocalNode(), coordinator.getClusterFormationState());
}
Map<String, String> nodeIdToClusterFormationDescription = nodeClusterFormationStateMap.entrySet()
.stream()
.collect(Collectors.toMap(entry -> entry.getKey().getId(), entry -> entry.getValue().getDescription()));
if (anyNodeInClusterReportsDiscoveryProblems(masterEligibleNodes, nodeClusterFormationStateMap)) {
result = new CoordinationDiagnosticsResult(
CoordinationDiagnosticsStatus.RED,
String.format(
Locale.ROOT,
"No master node observed in the last %s, and some master eligible nodes are unable to discover other master "
+ "eligible nodes",
nodeHasMasterLookupTimeframe
),
getDetails(verbose, localMasterHistory, null, nodeIdToClusterFormationDescription)
);
} else {
if (anyNodeInClusterReportsQuorumProblems(nodeClusterFormationStateMap)) {
result = new CoordinationDiagnosticsResult(
CoordinationDiagnosticsStatus.RED,
String.format(
Locale.ROOT,
"No master node observed in the last %s, and the master eligible nodes are unable to form a quorum",
nodeHasMasterLookupTimeframe
),
getDetails(verbose, localMasterHistory, null, nodeIdToClusterFormationDescription)
);
} else {
result = new CoordinationDiagnosticsResult(
CoordinationDiagnosticsStatus.RED,
String.format(
Locale.ROOT,
"No master node observed in the last %s, and the cause has not been determined.",
nodeHasMasterLookupTimeframe
),
getDetails(verbose, localMasterHistory, null, nodeIdToClusterFormationDescription)
);
}
}
return result;
}
/**
* This method checks whether each master eligible node has discovered each of the other master eligible nodes. For the sake of this
* method, a discovery problem is when the foundPeers of any ClusterFormationState on any node we have that information for does not
* contain all of the nodes in the local coordinator.getFoundPeers().
* @param masterEligibleNodes The collection of all master eligible nodes
* @param nodeToClusterFormationStateMap A map of each master node to its ClusterFormationState
* @return true if there are discovery problems, false otherwise
*/
static boolean anyNodeInClusterReportsDiscoveryProblems(
Collection<DiscoveryNode> masterEligibleNodes,
Map<DiscoveryNode, ClusterFormationFailureHelper.ClusterFormationState> nodeToClusterFormationStateMap
) {
Map<DiscoveryNode, Collection<DiscoveryNode>> nodesNotDiscoveredMap = new HashMap<>();
for (Map.Entry<DiscoveryNode, ClusterFormationFailureHelper.ClusterFormationState> entry : nodeToClusterFormationStateMap
.entrySet()) {
Set<DiscoveryNode> foundPeersOnNode = new HashSet<>(entry.getValue().foundPeers());
if (foundPeersOnNode.containsAll(masterEligibleNodes) == false) {
Collection<DiscoveryNode> nodesNotDiscovered = masterEligibleNodes.stream()
.filter(node -> foundPeersOnNode.contains(node) == false)
.toList();
nodesNotDiscoveredMap.put(entry.getKey(), nodesNotDiscovered);
}
}
if (nodesNotDiscoveredMap.isEmpty()) {
return false;
} else {
String nodeDiscoveryProblemsMessage = nodesNotDiscoveredMap.entrySet()
.stream()
.map(
entry -> String.format(
Locale.ROOT,
"%s cannot discover [%s]",
entry.getKey().getName(),
entry.getValue().stream().map(DiscoveryNode::getName).collect(Collectors.joining(", "))
)
)
.collect(Collectors.joining("; "));
logger.debug("The following nodes report discovery problems: {}", nodeDiscoveryProblemsMessage);
return true;
}
}
/**
* This method checks that each master eligible node in the quorum thinks that it can form a quorum. If there are nodes that report a
* problem forming a quorum, this method returns true. This method determines whether a node thinks that a quorum can be formed by
* checking the value of that node's ClusterFormationState.hasDiscoveredQuorum field.
* @param nodeToClusterFormationStateMap A map of each master node to its ClusterFormationState
* @return True if any nodes in nodeToClusterFormationStateMap report a problem forming a quorum, false otherwise.
*/
static boolean anyNodeInClusterReportsQuorumProblems(
Map<DiscoveryNode, ClusterFormationFailureHelper.ClusterFormationState> nodeToClusterFormationStateMap
) {
Map<DiscoveryNode, String> quorumProblems = new HashMap<>();
for (Map.Entry<DiscoveryNode, ClusterFormationFailureHelper.ClusterFormationState> entry : nodeToClusterFormationStateMap
.entrySet()) {
ClusterFormationFailureHelper.ClusterFormationState clusterFormationState = entry.getValue();
if (clusterFormationState.hasDiscoveredQuorum() == false) {
quorumProblems.put(entry.getKey(), clusterFormationState.getDescription());
}
}
if (quorumProblems.isEmpty()) {
return false;
} else {
String quorumProblemsMessage = quorumProblems.entrySet()
.stream()
.map(
entry -> String.format(
Locale.ROOT,
"%s reports that a quorum cannot be formed: [%s]",
entry.getKey().getName(),
entry.getValue()
)
)
.collect(Collectors.joining("; "));
logger.debug("Some master eligible nodes report that a quorum cannot be formed: {}", quorumProblemsMessage);
return true;
}
}
/**
* Creates a CoordinationDiagnosticsResult in the case that there has been no master in the last few seconds, there is no elected
* master known, and there are no master eligible nodes. The status will be RED, and the details (if verbose is true) will contain
* the list of any masters seen previously and a description of known problems from this node's Coordinator.
* @param localMasterHistory Used to pull recent master nodes for the details if verbose is true
* @param verbose If true, details are returned
* @return A CoordinationDiagnosticsResult with a RED status
*/
private CoordinationDiagnosticsResult getResultOnNoMasterEligibleNodes(MasterHistory localMasterHistory, boolean verbose) {
String summary = "No master eligible nodes found in the cluster";
CoordinationDiagnosticsDetails details = getDetails(
verbose,
localMasterHistory,
null,
Map.of(coordinator.getLocalNode().getId(), coordinator.getClusterFormationState().getDescription())
);
return new CoordinationDiagnosticsResult(CoordinationDiagnosticsStatus.RED, summary, details);
}
/**
* Creates a CoordinationDiagnosticsResult in the case that there has been no master in the last few seconds in this node's cluster
* state, but PeerFinder reports that there is an elected master. The assumption is that this node is having a problem joining the
* elected master. The status will be RED, and the details (if verbose is true) will contain the list of any masters seen previously
* and a description of known problems from this node's Coordinator.
* @param localMasterHistory Used to pull recent master nodes for the details if verbose is true
* @param currentMaster The node that PeerFinder reports as the elected master
* @param verbose If true, details are returned
* @return A CoordinationDiagnosticsResult with a RED status
*/
private CoordinationDiagnosticsResult getResultOnCannotJoinLeader(
MasterHistory localMasterHistory,
DiscoveryNode currentMaster,
boolean verbose
) {
String summary = String.format(
Locale.ROOT,
"%s has been elected master, but the node being queried, %s, is unable to join it",
currentMaster,
clusterService.localNode()
);
CoordinationDiagnosticsDetails details = getDetails(
verbose,
localMasterHistory,
null,
Map.of(coordinator.getLocalNode().getId(), coordinator.getClusterFormationState().getDescription())
);
return new CoordinationDiagnosticsResult(CoordinationDiagnosticsStatus.RED, summary, details);
}
/**
* Returns the master eligible nodes as found in this node's Coordinator, plus the local node if it is master eligible.
* @return All known master eligible nodes in this cluster
*/
private Collection<DiscoveryNode> getMasterEligibleNodes() {
Set<DiscoveryNode> masterEligibleNodes = new HashSet<>();
coordinator.getFoundPeers().forEach(node -> {
if (node.isMasterNode()) {
masterEligibleNodes.add(node);
}
});
// Coordinator does not report the local node, so add it:
if (clusterService.localNode().isMasterNode()) {
masterEligibleNodes.add(clusterService.localNode());
}
return masterEligibleNodes;
}
/**
* Returns a random master eligible node, or null if this node does not know about any master eligible nodes
* @return A random master eligible node or null
*/
// Non-private for unit testing
@Nullable
DiscoveryNode getRandomMasterEligibleNode() {
Collection<DiscoveryNode> masterEligibleNodes = getMasterEligibleNodes();
if (masterEligibleNodes.isEmpty()) {
return null;
}
return masterEligibleNodes.toArray(new DiscoveryNode[0])[random.get().nextInt(masterEligibleNodes.size())];
}
/**
* This returns true if this node has seen a master node within the last few seconds
* @return true if this node has seen a master node within the last few seconds, false otherwise
*/
private boolean hasSeenMasterInHasMasterLookupTimeframe() {
return masterHistoryService.getLocalMasterHistory().hasSeenMasterInLastNSeconds((int) nodeHasMasterLookupTimeframe.seconds());
}
/*
* If we detect that the master has gone null 3 or more times (by default), we ask the MasterHistoryService to fetch the master
* history as seen from the most recent master node so that it is ready in case a health API request comes in. The request to the
* MasterHistoryService is made asynchronously, and populates the value that MasterHistoryService.getRemoteMasterHistory() will return.
* The remote master history is ordinarily returned very quickly if it is going to be returned, so the odds are very good it will be
* in place by the time a request for it comes in. If not, this service's status will briefly switch to yellow.
*/
@Override
public void clusterChanged(ClusterChangedEvent event) {
DiscoveryNode currentMaster = event.state().nodes().getMasterNode();
DiscoveryNode previousMaster = event.previousState().nodes().getMasterNode();
if ((currentMaster == null && previousMaster != null) || (currentMaster != null && previousMaster == null)) {
if (masterHistoryService.getLocalMasterHistory().hasMasterGoneNullAtLeastNTimes(unacceptableNullTransitions)) {
/*
* If the master node has been going to null repeatedly, we want to make a remote request to it to see what it thinks of
* master stability. We want to query the most recent master whether the current master has just transitioned to null or
* just transitioned from null to not null. The reason that we make the latter request is that sometimes when the elected
* master goes to null the most recent master is not responsive for the duration of the request timeout (for example if
* that node is in the middle of a long GC pause which would be both the reason for it not being master and the reason it
* does not respond quickly to transport requests).
*/
DiscoveryNode master = masterHistoryService.getLocalMasterHistory().getMostRecentNonNullMaster();
/*
* If the most recent master was this box, there is no point in making a transport request -- we already know what this
* box's view of the master history is
*/
if (master != null && clusterService.localNode().equals(master) == false) {
masterHistoryService.refreshRemoteMasterHistory(master);
}
}
}
if (currentMaster == null && clusterService.localNode().isMasterNode()) {
/*
* This begins polling all master-eligible nodes for cluster formation information. However there's a 10-second delay
* before it starts, so in the normal situation where during a master transition it flips from master1 -> null ->
* master2 the polling tasks will be canceled before any requests are actually made.
*/
beginPollingClusterFormationInfo();
} else {
cancelPollingClusterFormationInfo();
}
if (clusterService.localNode().isMasterNode() == false) {
if (currentMaster == null) {
beginPollingRemoteMasterStabilityDiagnostic();
} else {
cancelPollingRemoteMasterStabilityDiagnostic();
}
}
}
/**
* This method begins polling all known master-eligible nodes for cluster formation information. After a 10-second initial delay, it
* polls each node every 10 seconds until cancelPollingClusterFormationInfo() is called.
*/
void beginPollingClusterFormationInfo() {
assert ThreadPool.assertCurrentThreadPool(ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME);
assert ThreadPool.assertInSystemContext(transportService.getThreadPool());
cancelPollingClusterFormationInfo();
ConcurrentMap<DiscoveryNode, ClusterFormationStateOrException> responses = new ConcurrentHashMap<>();
Map<DiscoveryNode, Scheduler.Cancellable> cancellables = new ConcurrentHashMap<>();
/*
* Assignment of clusterFormationInfoTasks must be done before the call to beginPollingClusterFormationInfo because it is used
* asynchronously by rescheduleClusterFormationFetchConsumer, called from beginPollingClusterFormationInfo.
*/
clusterFormationInfoTasks = cancellables;
clusterFormationResponses = responses;
beginPollingClusterFormationInfo(getMasterEligibleNodes(), responses::put, cancellables);
}
/**
* This method returns quickly, but in the background schedules to query the remote node's cluster formation state in 10 seconds, and
* repeats doing that until cancel() is called on all of the Cancellable that this method inserts into cancellables. This method
* exists (rather than being just part of the beginPollingClusterFormationInfo() above) in order to facilitate unit testing.
* @param nodeResponseConsumer A consumer for any results produced for a node by this method
* @param cancellables The Map of Cancellables, one for each node being polled
*/
// Non-private for testing
void beginPollingClusterFormationInfo(
Collection<DiscoveryNode> masterEligibleNodes,
BiConsumer<DiscoveryNode, ClusterFormationStateOrException> nodeResponseConsumer,
Map<DiscoveryNode, Scheduler.Cancellable> cancellables
) {
masterEligibleNodes.forEach(masterEligibleNode -> {
Consumer<ClusterFormationStateOrException> responseConsumer = result -> nodeResponseConsumer.accept(masterEligibleNode, result);
try {
cancellables.put(
masterEligibleNode,
fetchClusterFormationInfo(
masterEligibleNode,
responseConsumer.andThen(
rescheduleClusterFormationFetchConsumer(masterEligibleNode, responseConsumer, cancellables)
)
)
);
} catch (EsRejectedExecutionException e) {
if (e.isExecutorShutdown()) {
logger.trace("Not rescheduling request for cluster coordination info because this node is being shutdown", e);
} else {
throw e;
}
}
});
}
/**
* This wraps the responseConsumer in a Consumer that will run rescheduleClusterFormationFetchConsumer() after responseConsumer has
* completed, adding the resulting Cancellable to cancellables.
* @param masterEligibleNode The node being polled
* @param responseConsumer The response consumer to be wrapped
* @param cancellables The Map of Cancellables, one for each node being polled
* @return
*/
private Consumer<CoordinationDiagnosticsService.ClusterFormationStateOrException> rescheduleClusterFormationFetchConsumer(
DiscoveryNode masterEligibleNode,
Consumer<CoordinationDiagnosticsService.ClusterFormationStateOrException> responseConsumer,
Map<DiscoveryNode, Scheduler.Cancellable> cancellables
) {
return response -> {
/*
* If clusterFormationInfoTasks is null, that means that cancelPollingClusterFormationInfo() has been called, so we don't
* want to run anything new, and we want to cancel anything that might still be running in our cancellables just to be safe.
*/
if (clusterFormationInfoTasks != null) {
/*
* If cancellables is not the same as clusterFormationInfoTasks, that means that the current polling track has been
* cancelled and a new polling track has been started. So we don't want to run anything new, and we want to cancel
* anything that might still be running in our cancellables just to be safe. Note that it is possible for
* clusterFormationInfoTasks to be null at this point (since it is assigned in a different thread), so it is important
* that we don't call equals on it.
*/
if (cancellables.equals(clusterFormationInfoTasks)) {
/*
* As mentioned in the comment in cancelPollingClusterFormationInfo(), there is a slim possibility here that we will
* add a task here for a poll that has already been cancelled. But when it completes and runs
* rescheduleClusterFormationFetchConsumer() we will then see that clusterFormationInfoTasks does not equal
* cancellables, so it will not be run again.
*/
try {
cancellables.put(
masterEligibleNode,
fetchClusterFormationInfo(
masterEligibleNode,
responseConsumer.andThen(
rescheduleClusterFormationFetchConsumer(masterEligibleNode, responseConsumer, cancellables)
)
)
);
} catch (EsRejectedExecutionException e) {
if (e.isExecutorShutdown()) {
logger.trace("Not rescheduling request for cluster coordination info because this node is being shutdown", e);
} else {
throw e;
}
}
} else {
cancellables.values().forEach(Scheduler.Cancellable::cancel);
}
} else {
cancellables.values().forEach(Scheduler.Cancellable::cancel);
}
};
}
void cancelPollingClusterFormationInfo() {
assert ThreadPool.assertCurrentThreadPool(ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME);
if (clusterFormationInfoTasks != null) {
/*
* There is a slight risk here that a new Cancellable is added to clusterFormationInfoTasks after we begin iterating in the next
* line. We are calling this an acceptable risk because it will result in an un-cancelled un-cancellable task, but it will not
* reschedule itself so it will not be around long. It is possible that cancel() will be called on a Cancellable concurrently
* by multiple threads, but that will not cause any problems.
*/
clusterFormationInfoTasks.values().forEach(Scheduler.Cancellable::cancel);
clusterFormationInfoTasks = null;
clusterFormationResponses = null;
}
}
/**
* This method returns quickly, but in the background schedules to query the remote node's cluster formation state in 10 seconds
* unless cancel() is called on the Cancellable that this method returns.
* @param node The node to poll for cluster formation information
* @param responseConsumer The consumer of the cluster formation info for the node, or the exception encountered while contacting it
* @return A Cancellable for the task that is scheduled to fetch cluster formation information
* @throws EsRejectedExecutionException If the task cannot be scheduled, possibly because the node is shutting down.
*/
private Scheduler.Cancellable fetchClusterFormationInfo(
DiscoveryNode node,
Consumer<ClusterFormationStateOrException> responseConsumer
) {
return sendTransportRequest(
node,
responseConsumer,
ClusterFormationInfoAction.NAME,
ClusterFormationInfoAction.Response::new,
new ClusterFormationInfoAction.Request(),
(response, e) -> {
assert response != null || e != null : "a response or an exception must be provided";
if (response != null) {
return new ClusterFormationStateOrException(response.getClusterFormationState());
} else {
return new ClusterFormationStateOrException(e);
}
}
);
}
void beginPollingRemoteMasterStabilityDiagnostic() {
assert ThreadPool.assertInSystemContext(transportService.getThreadPool());
AtomicReference<Scheduler.Cancellable> cancellableReference = new AtomicReference<>();
AtomicReference<RemoteMasterHealthResult> resultReference = new AtomicReference<>();
remoteCoordinationDiagnosisTask = cancellableReference;
remoteCoordinationDiagnosisResult = resultReference;
beginPollingRemoteMasterStabilityDiagnostic(resultReference::set, cancellableReference);
}
/**
* This method returns quickly, but in the background schedules to query a remote master node's cluster diagnostics in 10 seconds, and
* repeats doing that until cancelPollingRemoteMasterStabilityDiagnostic() is called. This method
* exists (rather than being just part of the beginPollingRemoteMasterStabilityDiagnostic() above) in order to facilitate
* unit testing.
* @param responseConsumer A consumer for any results produced for a node by this method
* @param cancellableReference The Cancellable reference to assign the current Cancellable for this polling attempt
*/
// Non-private for testing
void beginPollingRemoteMasterStabilityDiagnostic(
Consumer<RemoteMasterHealthResult> responseConsumer,
AtomicReference<Scheduler.Cancellable> cancellableReference
) {
DiscoveryNode masterEligibleNode = getRandomMasterEligibleNode();
try {
cancellableReference.set(
fetchCoordinationDiagnostics(
masterEligibleNode,
responseConsumer.andThen(rescheduleDiagnosticsFetchConsumer(responseConsumer, cancellableReference))
)
);
} catch (EsRejectedExecutionException e) {
if (e.isExecutorShutdown()) {
logger.trace("Not rescheduling request for cluster coordination info because this node is being shutdown", e);
} else {
throw e;
}
}
}
/**
* This wraps the responseConsumer in a Consumer that will run rescheduleDiagnosticsFetchConsumer() after responseConsumer has
* completed, adding the resulting Cancellable to cancellableReference.
* @param responseConsumer The response consumer to be wrapped
* @param cancellableReference The Cancellable reference to assign the current Cancellable for this polling attempt
* @return A wrapped Consumer that will run fetchCoordinationDiagnostics()
*/
private Consumer<RemoteMasterHealthResult> rescheduleDiagnosticsFetchConsumer(
Consumer<RemoteMasterHealthResult> responseConsumer,
AtomicReference<Scheduler.Cancellable> cancellableReference
) {
return response -> {
/*
* If the cancellableReference for this poll attempt is equal to remoteCoordinationDiagnosisTask, then that means that
* this poll attempt is the current one. If they are not equal, that means that
* cancelPollingRemoteMasterStabilityDiagnostic() has been called on this poll attempt but this thread is not yet
* aware. So we cancel the Cancellable in cancellableReference if it is not null. Note that
* remoteCoordinationDiagnosisTask can be null.
*/
if (cancellableReference.equals(remoteCoordinationDiagnosisTask)) {
/*
* Because this is not synchronized with the cancelPollingRemoteMasterStabilityDiagnostic() method, there is a
* slim chance that we will add a task here for a poll that has already been cancelled. But when it completes and runs
* rescheduleDiagnosticsFetchConsumer() we will then see that remoteCoordinationDiagnosisTask does not equal
* cancellableReference, so it will not be run again.
*/
try {
DiscoveryNode masterEligibleNode = getRandomMasterEligibleNode();
cancellableReference.set(
fetchCoordinationDiagnostics(
masterEligibleNode,
responseConsumer.andThen(rescheduleDiagnosticsFetchConsumer(responseConsumer, cancellableReference))
)
);
} catch (EsRejectedExecutionException e) {
if (e.isExecutorShutdown()) {
logger.trace("Not rescheduling request for cluster coordination info because this node is being shutdown", e);
} else {
throw e;
}
}
} else {
Scheduler.Cancellable cancellable = cancellableReference.get();
if (cancellable != null) {
cancellable.cancel();
}
}
};
}
/**
* This method returns quickly, but in the background schedules to query the remote masterEligibleNode's cluster diagnostics in 10
* seconds unless cancel() is called on the Cancellable that this method returns.
* @param masterEligibleNode The masterEligibleNode to poll for cluster diagnostics. This masterEligibleNode can be null in the case
* when there are not yet any master-eligible nodes known to this masterEligibleNode's PeerFinder.
* @param responseConsumer The consumer of the cluster diagnostics for the masterEligibleNode, or the exception encountered while
* contacting it
* @return A Cancellable for the task that is scheduled to fetch cluster diagnostics
*/
private Scheduler.Cancellable fetchCoordinationDiagnostics(
@Nullable DiscoveryNode masterEligibleNode,
Consumer<RemoteMasterHealthResult> responseConsumer
) {
return sendTransportRequest(
masterEligibleNode,
responseConsumer,
CoordinationDiagnosticsAction.NAME,
CoordinationDiagnosticsAction.Response::new,
new CoordinationDiagnosticsAction.Request(true),
(response, e) -> {
assert response != null || e != null : "a response or an exception must be provided";
if (response != null) {
return new RemoteMasterHealthResult(masterEligibleNode, response.getCoordinationDiagnosticsResult(), null);
} else {
return new RemoteMasterHealthResult(masterEligibleNode, null, e);
}
}
);
}
/**
* This method connects to masterEligibleNode and sends it a transport request for a response of type R. The response or exception
* are transformed into a common type T with responseToResultFunction or exceptionToResultFunction, and then consumed by
* responseConsumer. This method is meant to be used when there is potentially no elected master node, so it first calls
* connectToNode before sending the request.
* @param masterEligibleNode The master eligible node to be queried, or null if we do not yet know of a master eligible node.
* If this is null, the responseConsumer will be given a null response
* @param responseConsumer The consumer of the transformed response
* @param actionName The name of the transport action
* @param responseReader How to deserialize the transport response
* @param transportActionRequest The ActionRequest to be sent
* @param responseTransformationFunction A function that converts a response or exception to the response type expected by the
* responseConsumer
* @return A Cancellable for the task that is scheduled to fetch the remote information
*/
private <R extends ActionResponse, T> Scheduler.Cancellable sendTransportRequest(
@Nullable DiscoveryNode masterEligibleNode,
Consumer<T> responseConsumer,
String actionName,
Writeable.Reader<R> responseReader,
ActionRequest transportActionRequest,
BiFunction<R, Exception, T> responseTransformationFunction
) {
ListenableFuture<Releasable> connectionListener = new ListenableFuture<>();
ListenableFuture<R> fetchRemoteResultListener = new ListenableFuture<>();
long startTimeMillis = transportService.getThreadPool().relativeTimeInMillis();
connectionListener.addListener(ActionListener.wrap(releasable -> {
if (masterEligibleNode == null) {
Releasables.close(releasable);
responseConsumer.accept(null);
} else {
logger.trace("Opened connection to {}, making transport request", masterEligibleNode);
// If we don't get a response in 10 seconds that is a failure worth capturing on its own:
final TimeValue transportTimeout = TimeValue.timeValueSeconds(10);
transportService.sendRequest(
masterEligibleNode,
actionName,
transportActionRequest,
TransportRequestOptions.timeout(transportTimeout),
new ActionListenerResponseHandler<>(
ActionListener.runBefore(fetchRemoteResultListener, () -> Releasables.close(releasable)),
responseReader,
clusterCoordinationExecutor
)
);
}
}, e -> {
logger.warn("Exception connecting to master " + masterEligibleNode, e);
responseConsumer.accept(responseTransformationFunction.apply(null, e));
}));
fetchRemoteResultListener.addListener(ActionListener.wrap(response -> {
long endTimeMillis = transportService.getThreadPool().relativeTimeInMillis();
logger.trace(
"Received remote response from {} in {}",
masterEligibleNode,
TimeValue.timeValueMillis(endTimeMillis - startTimeMillis)
);
responseConsumer.accept(responseTransformationFunction.apply(response, null));
}, e -> {
logger.warn("Exception in remote request to master " + masterEligibleNode, e);
responseConsumer.accept(responseTransformationFunction.apply(null, e));
}));
return transportService.getThreadPool().schedule(new Runnable() {
@Override
public void run() {
if (masterEligibleNode == null) {
/*
* This node's PeerFinder hasn't yet discovered the master-eligible nodes. By notifying the responseConsumer with a null
* value we effectively do nothing, and allow this request to be rescheduled.
*/
responseConsumer.accept(null);
} else {
Version minSupportedVersion = Version.V_8_4_0;
if (masterEligibleNode.getVersion().onOrAfter(minSupportedVersion) == false) {
logger.trace(
"Cannot get remote result from {} because it is at version {} and {} is required",
masterEligibleNode,
masterEligibleNode.getVersion(),
minSupportedVersion
);
} else {
transportService.connectToNode(
// Note: This connection must be explicitly closed in the connectionListener
masterEligibleNode,
connectionListener
);
}
}
}
@Override
public String toString() {
return "delayed retrieval of coordination diagnostics info from " + masterEligibleNode;
}
}, remoteRequestInitialDelay, clusterCoordinationExecutor);
}
void cancelPollingRemoteMasterStabilityDiagnostic() {
assert ThreadPool.assertCurrentThreadPool(ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME);
if (remoteCoordinationDiagnosisTask != null) {
Scheduler.Cancellable task = remoteCoordinationDiagnosisTask.get();
if (task != null) {
task.cancel();
}
remoteCoordinationDiagnosisResult = null;
remoteCoordinationDiagnosisTask = null;
}
}
// Non-private for testing
record ClusterFormationStateOrException(
ClusterFormationFailureHelper.ClusterFormationState clusterFormationState,
Exception exception
) {
ClusterFormationStateOrException {
if (clusterFormationState != null && exception != null) {
throw new IllegalArgumentException("Cluster formation state and exception cannot both be non-null");
}
}
ClusterFormationStateOrException(ClusterFormationFailureHelper.ClusterFormationState clusterFormationState) {
this(clusterFormationState, null);
}
ClusterFormationStateOrException(Exception exception) {
this(null, exception);
}
}
public record CoordinationDiagnosticsResult(
CoordinationDiagnosticsStatus status,
String summary,
CoordinationDiagnosticsDetails details
) implements Writeable {
public CoordinationDiagnosticsResult(StreamInput in) throws IOException {
this(CoordinationDiagnosticsStatus.fromStreamInput(in), in.readString(), new CoordinationDiagnosticsDetails(in));
}
@Override
public void writeTo(StreamOutput out) throws IOException {
status.writeTo(out);
out.writeString(summary);
details.writeTo(out);
}
}
public | CoordinationDiagnosticsService |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilder.java | {
"start": 80394,
"end": 82708
} | class ____ extends AbstractNode implements TopologyDescription.Source {
private final Set<String> topics;
private final Pattern topicPattern;
public Source(final String name,
final Set<String> topics,
final Pattern pattern) {
super(name);
if (topics == null && pattern == null) {
throw new IllegalArgumentException("Either topics or pattern must be not-null, but both are null.");
}
if (topics != null && pattern != null) {
throw new IllegalArgumentException("Either topics or pattern must be null, but both are not null.");
}
this.topics = topics;
this.topicPattern = pattern;
}
@Override
public Set<String> topicSet() {
return topics;
}
@Override
public Pattern topicPattern() {
return topicPattern;
}
@Override
public void addPredecessor(final TopologyDescription.Node predecessor) {
throw new UnsupportedOperationException("Sources don't have predecessors.");
}
@Override
public String toString() {
final String topicsString = topics == null ? topicPattern.toString() : topics.toString();
return "Source: " + name + " (topics: " + topicsString + ")\n --> " + nodeNames(successors);
}
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final Source source = (Source) o;
// omit successor to avoid infinite loops
return name.equals(source.name)
&& Objects.equals(topics, source.topics)
&& (topicPattern == null ?
source.topicPattern == null :
topicPattern.pattern().equals(source.topicPattern.pattern()));
}
@Override
public int hashCode() {
// omit successor as it might change and alter the hash code
return Objects.hash(name, topics, topicPattern);
}
}
public static final | Source |
java | spring-projects__spring-framework | spring-webmvc/src/test/java/org/springframework/web/servlet/mvc/method/annotation/ServletAnnotationControllerHandlerMethodTests.java | {
"start": 141606,
"end": 141891
} | class ____ extends EntityPredicate<Article> {
public String query;
@Override
public boolean accept(Article entity) {
return super.accept(entity) && (query == null || (entity.title.contains(query) || entity.content.contains(query)));
}
}
@Controller
static | ArticlePredicate |
java | micronaut-projects__micronaut-core | http-server-tck/src/main/java/io/micronaut/http/server/tck/tests/CookiesTest.java | {
"start": 1447,
"end": 2946
} | class ____ {
public static final String SPEC_NAME = "CookiesTest";
@Test
void testCookieBind() throws IOException {
asserts(SPEC_NAME,
HttpRequest.GET("/cookies-test/bind")
.cookie(Cookie.of("one", "foo"))
.cookie(Cookie.of("two", "bar")),
(server, request) -> AssertionUtils.assertDoesNotThrow(server, request, HttpResponseAssertion.builder()
.status(HttpStatus.OK)
.body("{\"one\":\"foo\",\"two\":\"bar\"}")
.build()));
}
@Test
void testGetCookiesMethod() throws IOException {
asserts(SPEC_NAME,
HttpRequest.GET("/cookies-test/all")
.cookie(Cookie.of("one", "foo"))
.cookie(Cookie.of("two", "bar")),
(server, request) -> AssertionUtils.assertDoesNotThrow(server, request, HttpResponseAssertion.builder()
.status(HttpStatus.OK)
.body("{\"one\":\"foo\",\"two\":\"bar\"}")
.build()));
}
@Test
void testNoCookie() throws IOException {
asserts(SPEC_NAME,
HttpRequest.GET("/cookies-test/all"),
(server, request) -> AssertionUtils.assertDoesNotThrow(server, request, HttpResponseAssertion.builder()
.status(HttpStatus.OK)
.body("{}")
.build()));
}
@Controller("/cookies-test")
@Requires(property = "spec.name", value = SPEC_NAME)
static | CookiesTest |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/modifiedflags/entities/EnumEntity.java | {
"start": 443,
"end": 973
} | class ____ {
@Id
private Integer id;
@Enumerated(EnumType.STRING)
@Column(name = "client_option")
@Audited(modifiedColumnName = "client_option_mod")
private EnumOption option;
EnumEntity() {
}
public EnumEntity(Integer id, EnumOption option) {
this.id = id;
this.option = option;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public EnumOption getOption() {
return option;
}
public void setOption(EnumOption option) {
this.option = option;
}
}
| EnumEntity |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java | {
"start": 1551,
"end": 9014
} | class ____ {
/**
* Maximum time we'll wait for jobs to gracefully persist their state and stop their associated
* processes. We expect this to take a minute or two at most if all goes to plan. The longer
* timeout here is to avoid the need for user intervention if something doesn't work and the
* graceful shutdown gets stuck.
*/
public static final Duration MAX_GRACEFUL_SHUTDOWN_TIME = Duration.of(10, ChronoUnit.MINUTES);
private static final Logger logger = LogManager.getLogger(MlLifeCycleService.class);
private final ClusterService clusterService;
private final DatafeedRunner datafeedRunner;
private final MlController mlController;
private final AutodetectProcessManager autodetectProcessManager;
private final DataFrameAnalyticsManager analyticsManager;
private final MlMemoryTracker memoryTracker;
private final Map<String, Instant> shutdownStartTimes = new ConcurrentHashMap<>();
MlLifeCycleService(
ClusterService clusterService,
DatafeedRunner datafeedRunner,
MlController mlController,
AutodetectProcessManager autodetectProcessManager,
DataFrameAnalyticsManager analyticsManager,
MlMemoryTracker memoryTracker
) {
this.clusterService = Objects.requireNonNull(clusterService);
this.datafeedRunner = Objects.requireNonNull(datafeedRunner);
this.mlController = Objects.requireNonNull(mlController);
this.autodetectProcessManager = Objects.requireNonNull(autodetectProcessManager);
this.analyticsManager = Objects.requireNonNull(analyticsManager);
this.memoryTracker = Objects.requireNonNull(memoryTracker);
clusterService.addLifecycleListener(new LifecycleListener() {
@Override
public void beforeStop() {
stop();
}
});
}
public synchronized void stop() {
try {
// This prevents data frame analytics from being marked as failed due to exceptions occurring while the node is shutting down.
analyticsManager.markNodeAsShuttingDown();
// This prevents datafeeds from sending data to autodetect processes WITHOUT stopping the datafeeds, so they get reassigned.
// We have to do this first, otherwise the datafeeds could fail if they send data to a dead autodetect process.
datafeedRunner.prepareForImmediateShutdown();
// This kills autodetect processes WITHOUT closing the jobs, so they get reassigned.
autodetectProcessManager.killAllProcessesOnThisNode();
mlController.stop();
} catch (IOException e) {
// We're stopping anyway, so don't let this complicate the shutdown sequence
}
memoryTracker.stop();
}
/**
* Is it safe to shut down a particular node without any ML rework being required?
* @param nodeId ID of the node being shut down.
* @return Has all active ML work vacated the specified node?
*/
public boolean isNodeSafeToShutdown(String nodeId) {
return isNodeSafeToShutdown(nodeId, clusterService.state(), shutdownStartTimes.get(nodeId), Clock.systemUTC());
}
static boolean isNodeSafeToShutdown(String nodeId, ClusterState state, Instant shutdownStartTime, Clock clock) {
// If the shutdown has taken too long then any remaining tasks will just be cut off when the node dies
if (shutdownStartTime != null && shutdownStartTime.isBefore(clock.instant().minus(MAX_GRACEFUL_SHUTDOWN_TIME))) {
return true;
}
logger.debug(() -> format("Checking shutdown safety for node id [%s]", nodeId));
boolean nodeHasRunningDeployments = nodeHasRunningDeployments(nodeId, state);
logger.debug(() -> format("Node id [%s] has running deployments: %s", nodeId, nodeHasRunningDeployments));
PersistentTasksCustomMetadata tasks = state.metadata().getProject().custom(PersistentTasksCustomMetadata.TYPE);
// Ignore failed jobs - the persistent task still exists to remember the failure (because no
// persistent task means closed), but these don't need to be relocated to another node.
return MlTasks.nonFailedJobTasksOnNode(tasks, nodeId).isEmpty()
&& MlTasks.nonFailedSnapshotUpgradeTasksOnNode(tasks, nodeId).isEmpty()
&& nodeHasRunningDeployments == false;
}
private static boolean nodeHasRunningDeployments(String nodeId, ClusterState state) {
TrainedModelAssignmentMetadata metadata = TrainedModelAssignmentMetadata.fromState(state);
return metadata.allAssignments().values().stream().anyMatch(assignment -> {
if (assignment.isRoutedToNode(nodeId)) {
RoutingInfo routingInfo = assignment.getNodeRoutingTable().get(nodeId);
logger.debug(
() -> format(
"Assignment deployment id [%s] is routed to shutting down nodeId %s state: %s",
assignment.getDeploymentId(),
nodeId,
routingInfo.getState()
)
);
// A routing could exist in the stopped state if the deployment has successfully drained any remaining requests
// If a route is starting, started, or stopping then the node is not ready to shut down yet
return routingInfo.getState().isNoneOf(RoutingState.STOPPED, RoutingState.FAILED);
}
return false;
});
}
/**
* Called when nodes have been marked for shutdown.
* This method will only react if the local node is in the collection provided.
* (The assumption is that this method will be called on every node, so each node will get to react.)
* If the local node is marked for shutdown then ML jobs running on it will be told to gracefully
* persist state and then unassigned so that they relocate to a different node.
* @param shutdownNodeIds IDs of all nodes being shut down.
*/
public void signalGracefulShutdown(Collection<String> shutdownNodeIds) {
signalGracefulShutdown(clusterService.state(), shutdownNodeIds, Clock.systemUTC());
}
void signalGracefulShutdown(ClusterState state, Collection<String> shutdownNodeIds, Clock clock) {
String localNodeId = state.nodes().getLocalNodeId();
updateShutdownStartTimes(shutdownNodeIds, localNodeId, clock);
if (shutdownNodeIds.contains(localNodeId)) {
datafeedRunner.vacateAllDatafeedsOnThisNode(
"previously assigned node [" + state.nodes().getLocalNode().getName() + "] is shutting down"
);
autodetectProcessManager.vacateOpenJobsOnThisNode();
}
}
Instant getShutdownStartTime(String nodeId) {
return shutdownStartTimes.get(nodeId);
}
private void updateShutdownStartTimes(Collection<String> shutdownNodeIds, String localNodeId, Clock clock) {
for (String shutdownNodeId : shutdownNodeIds) {
shutdownStartTimes.computeIfAbsent(shutdownNodeId, key -> {
if (key.equals(localNodeId)) {
logger.info("Starting node shutdown sequence for ML");
}
return Instant.now(clock);
});
}
shutdownStartTimes.keySet().retainAll(shutdownNodeIds);
}
}
| MlLifeCycleService |
java | micronaut-projects__micronaut-core | websocket/src/main/java/io/micronaut/websocket/event/WebSocketSessionClosedEvent.java | {
"start": 835,
"end": 1090
} | class ____ extends WebSocketEvent {
/**
* Default constructor.
*
* @param session The web socket session
*/
public WebSocketSessionClosedEvent(WebSocketSession session) {
super(session);
}
}
| WebSocketSessionClosedEvent |
java | spring-projects__spring-boot | module/spring-boot-restdocs/src/main/java/org/springframework/boot/restdocs/test/autoconfigure/RestDocsTestExecutionListener.java | {
"start": 1381,
"end": 2112
} | class ____ extends AbstractTestExecutionListener {
private static final boolean REST_DOCS_PRESENT = ClassUtils.isPresent(
"org.springframework.restdocs.ManualRestDocumentation",
RestDocsTestExecutionListener.class.getClassLoader());
@Override
public int getOrder() {
return Ordered.LOWEST_PRECEDENCE - 100;
}
@Override
public void beforeTestMethod(TestContext testContext) throws Exception {
if (REST_DOCS_PRESENT) {
new DocumentationHandler().beforeTestMethod(testContext);
}
}
@Override
public void afterTestMethod(TestContext testContext) throws Exception {
if (REST_DOCS_PRESENT) {
new DocumentationHandler().afterTestMethod(testContext);
}
}
private static final | RestDocsTestExecutionListener |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/condition/SpringBootConditionTests.java | {
"start": 2133,
"end": 2358
} | class ____ extends SpringBootCondition {
@Override
public ConditionOutcome getMatchOutcome(ConditionContext context, AnnotatedTypeMetadata metadata) {
throw new RuntimeException("Oh no!");
}
}
}
| AlwaysThrowsCondition |
java | spring-projects__spring-boot | module/spring-boot-micrometer-metrics/src/test/java/org/springframework/boot/micrometer/metrics/autoconfigure/export/statsd/StatsdMetricsExportAutoConfigurationTests.java | {
"start": 3885,
"end": 4064
} | class ____ {
@Bean
StatsdMeterRegistry customRegistry(StatsdConfig config, Clock clock) {
return new StatsdMeterRegistry(config, clock);
}
}
}
| CustomRegistryConfiguration |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/TypeConverter.java | {
"start": 970,
"end": 2938
} | class ____ {
private final String evaluatorName;
private final ExpressionEvaluator convertEvaluator;
private TypeConverter(String evaluatorName, ExpressionEvaluator convertEvaluator) {
this.evaluatorName = evaluatorName;
this.convertEvaluator = convertEvaluator;
}
public static TypeConverter fromScalarFunction(EsqlScalarFunction convertFunction) {
DriverContext driverContext1 = new DriverContext(
BigArrays.NON_RECYCLING_INSTANCE,
new org.elasticsearch.compute.data.BlockFactory(
new NoopCircuitBreaker(CircuitBreaker.REQUEST),
BigArrays.NON_RECYCLING_INSTANCE
)
);
return new TypeConverter(convertFunction.functionName(), convertFunction.toEvaluator(new EvaluatorMapper.ToEvaluator() {
@Override
public ExpressionEvaluator.Factory apply(Expression expression) {
return driverContext -> new ExpressionEvaluator() {
@Override
public org.elasticsearch.compute.data.Block eval(Page page) {
// This is a pass-through evaluator, since it sits directly on the source loading (no prior expressions)
return page.getBlock(0);
}
@Override
public long baseRamBytesUsed() {
throw new UnsupportedOperationException("not used");
}
@Override
public void close() {}
};
}
@Override
public FoldContext foldCtx() {
throw new IllegalStateException("not folding");
}
}).get(driverContext1));
}
public Block convert(Block block) {
return convertEvaluator.eval(new Page(block));
}
@Override
public String toString() {
return evaluatorName;
}
}
| TypeConverter |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-server-webmvc/src/test/java/org/springframework/cloud/gateway/server/mvc/filter/RetryFilterFunctionSelectionLogicTests.java | {
"start": 1221,
"end": 5028
} | class ____ {
private ListAppender<ILoggingEvent> listAppender;
@BeforeEach
void setUp() {
Logger logger = (Logger) LoggerFactory.getLogger(RetryFilterFunctions.class);
listAppender = new ListAppender<>();
listAppender.start();
logger.addAppender(listAppender);
logger.setLevel(ch.qos.logback.classic.Level.DEBUG);
}
@AfterEach
void tearDown() {
if (listAppender != null) {
Logger logger = (Logger) LoggerFactory.getLogger(RetryFilterFunctions.class);
logger.detachAppender(listAppender);
listAppender.stop();
}
}
@AfterEach
void reset() {
// Reset the useFrameworkRetry flag after each test
RetryFilterFunctions.setUseFrameworkRetry(false);
}
@Test
void retryWithIntDelegatesCorrectly() {
// Test that retry(int) delegates to the correct implementation
HandlerFilterFunction<ServerResponse, ServerResponse> filter = RetryFilterFunctions.retry(3);
assertThat(filter).isNotNull();
// The actual implementation depends on classpath, but it should not be null
// Verify log message indicates filter selection
assertThat(listAppender.list).hasSizeGreaterThanOrEqualTo(1);
assertThat(listAppender.list.get(0).getMessage()).contains("Retry filter selection");
}
@Test
void retryWithConfigConsumerDelegatesCorrectly() {
// Test that retry(Consumer<RetryConfig>) delegates to the correct implementation
HandlerFilterFunction<ServerResponse, ServerResponse> filter = RetryFilterFunctions
.retry(config -> config.setRetries(3));
assertThat(filter).isNotNull();
}
@Test
void retryWithConfigDelegatesCorrectly() {
// Test that retry(RetryConfig) delegates to the correct implementation
RetryFilterFunctions.RetryConfig config = new RetryFilterFunctions.RetryConfig();
config.setRetries(3);
HandlerFilterFunction<ServerResponse, ServerResponse> filter = RetryFilterFunctions.retry(config);
assertThat(filter).isNotNull();
}
@Test
void forcedFrameworkRetryUsesFrameworkRetry() {
// When forced to use Framework Retry, should use FrameworkRetryFilterFunctions
RetryFilterFunctions.setUseFrameworkRetry(true);
HandlerFilterFunction<ServerResponse, ServerResponse> filter = RetryFilterFunctions.retry(3);
assertThat(filter).isNotNull();
// Verify log message indicates FrameworkRetryFilterFunctions is selected
assertThat(listAppender.list).hasSizeGreaterThanOrEqualTo(1);
String logMessage = listAppender.list.get(listAppender.list.size() - 1).getMessage();
assertThat(logMessage).contains("Retry filter selection");
assertThat(logMessage).contains("selected filter=FrameworkRetryFilterFunctions");
assertThat(logMessage).contains("useFrameworkRetry=true");
// Reset for other tests
RetryFilterFunctions.setUseFrameworkRetry(false);
}
@Test
void defaultBehaviorRespectsClasspath() {
// Default behavior should use Spring Retry if on classpath, Framework Retry
// otherwise
RetryFilterFunctions.setUseFrameworkRetry(false);
HandlerFilterFunction<ServerResponse, ServerResponse> filter = RetryFilterFunctions.retry(3);
assertThat(filter).isNotNull();
// The actual implementation depends on whether Spring Retry is on classpath
// In this project, Spring Retry is on classpath, so it should use
// GatewayRetryFilterFunctions
// Verify log message indicates correct filter selection
assertThat(listAppender.list).hasSizeGreaterThanOrEqualTo(1);
String logMessage = listAppender.list.get(listAppender.list.size() - 1).getMessage();
assertThat(logMessage).contains("Retry filter selection");
// Since Spring Retry is on classpath in this project, it should select
// GatewayRetryFilterFunctions
assertThat(logMessage).contains("selected filter=GatewayRetryFilterFunctions");
assertThat(logMessage).contains("useFrameworkRetry=false");
}
}
| RetryFilterFunctionSelectionLogicTests |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/CommonAnnotationBeanPostProcessorTests.java | {
"start": 20964,
"end": 21920
} | class ____ implements DestructionAwareBeanPostProcessor {
@Override
public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException {
if (bean instanceof AnnotatedInitDestroyBean) {
assertThat(((AnnotatedInitDestroyBean) bean).initCalled).isFalse();
}
return bean;
}
@Override
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
if (bean instanceof AnnotatedInitDestroyBean) {
assertThat(((AnnotatedInitDestroyBean) bean).initCalled).isTrue();
}
return bean;
}
@Override
public void postProcessBeforeDestruction(Object bean, String beanName) throws BeansException {
if (bean instanceof AnnotatedInitDestroyBean) {
assertThat(((AnnotatedInitDestroyBean) bean).destroyCalled).isFalse();
}
}
@Override
public boolean requiresDestruction(Object bean) {
return true;
}
}
public static | InitDestroyBeanPostProcessor |
java | elastic__elasticsearch | plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/lucene/ReplaceCharToNumber.java | {
"start": 680,
"end": 1114
} | class ____ extends MappingCharFilter {
public ReplaceCharToNumber(Reader in, String oldChar, int newNumber) {
super(charMap(oldChar, newNumber), in);
}
private static NormalizeCharMap charMap(String oldChar, int newNumber) {
NormalizeCharMap.Builder builder = new NormalizeCharMap.Builder();
builder.add(oldChar, String.valueOf(newNumber));
return builder.build();
}
}
| ReplaceCharToNumber |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/TreatedLeftJoinInheritanceTest.java | {
"start": 7269,
"end": 7428
} | class ____ {
@Id
@GeneratedValue
private Long id;
}
@SuppressWarnings("unused")
@Entity( name = "SingleTableSubEntity" )
public static | SingleTableEntity |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/MonoFlatMapMany.java | {
"start": 5574,
"end": 6555
} | class ____<R> implements InnerConsumer<R> {
final FlatMapManyMain<?, R> parent;
final CoreSubscriber<? super R> actual;
FlatMapManyInner(FlatMapManyMain<?, R> parent,
CoreSubscriber<? super R> actual) {
this.parent = parent;
this.actual = actual;
}
@Override
public Context currentContext() {
return actual.currentContext();
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.PARENT) return parent.inner;
if (key == Attr.ACTUAL) return parent;
if (key == Attr.REQUESTED_FROM_DOWNSTREAM) return parent.requested;
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
return null;
}
@Override
public void onSubscribe(Subscription s) {
parent.onSubscribeInner(s);
}
@Override
public void onNext(R t) {
actual.onNext(t);
}
@Override
public void onError(Throwable t) {
actual.onError(t);
}
@Override
public void onComplete() {
actual.onComplete();
}
}
}
| FlatMapManyInner |
java | spring-projects__spring-boot | smoke-test/spring-boot-smoke-test-web-application-type/src/main/java/smoketest/webapplicationtype/SampleWebApplicationTypeApplication.java | {
"start": 816,
"end": 983
} | class ____ {
public static void main(String[] args) {
SpringApplication.run(SampleWebApplicationTypeApplication.class, args);
}
}
| SampleWebApplicationTypeApplication |
java | apache__camel | components/camel-cxf/camel-cxf-transport/src/main/java/org/apache/camel/component/cxf/transport/message/CxfMessageHelper.java | {
"start": 1227,
"end": 2769
} | class ____ {
private CxfMessageHelper() {
//Helper class
}
public static org.apache.cxf.message.Message getCxfInMessage(
HeaderFilterStrategy headerFilterStrategy,
org.apache.camel.Exchange exchange,
boolean isClient) {
MessageImpl answer = new MessageImpl();
org.apache.cxf.message.Exchange cxfExchange = exchange
.getProperty(CamelTransportConstants.CXF_EXCHANGE, org.apache.cxf.message.Exchange.class);
org.apache.camel.Message message;
if (isClient) {
message = exchange.getMessage();
} else {
message = exchange.getIn();
}
ObjectHelper.notNull(message, "message");
if (cxfExchange == null) {
cxfExchange = new ExchangeImpl();
exchange.setProperty(CamelTransportConstants.CXF_EXCHANGE, cxfExchange);
}
CxfHeaderHelper.propagateCamelToCxf(headerFilterStrategy, message.getHeaders(), answer, exchange);
// body can be empty in case of GET etc.
InputStream body = message.getBody(InputStream.class);
if (body != null) {
answer.setContent(InputStream.class, body);
} else if (message.getBody() != null) {
// fallback and set the body as what it is
answer.setContent(Object.class, body);
}
answer.putAll(message.getHeaders());
answer.setExchange(cxfExchange);
cxfExchange.setInMessage(answer);
return answer;
}
}
| CxfMessageHelper |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/source/coordinator/SourceCoordinatorProvider.java | {
"start": 1614,
"end": 4345
} | class ____<SplitT extends SourceSplit>
extends RecreateOnResetOperatorCoordinator.Provider {
private static final long serialVersionUID = -1921681440009738462L;
private final String operatorName;
private final Source<?, SplitT, ?> source;
private final int numWorkerThreads;
private final WatermarkAlignmentParams alignmentParams;
@Nullable private final String coordinatorListeningID;
/**
* Construct the {@link SourceCoordinatorProvider}.
*
* @param operatorName the name of the operator.
* @param operatorID the ID of the operator this coordinator corresponds to.
* @param source the Source that will be used for this coordinator.
* @param numWorkerThreads the number of threads the should provide to the SplitEnumerator for
* doing async calls. See {@link
* org.apache.flink.api.connector.source.SplitEnumeratorContext#callAsync(Callable,
* BiConsumer) SplitEnumeratorContext.callAsync()}.
*/
public SourceCoordinatorProvider(
String operatorName,
OperatorID operatorID,
Source<?, SplitT, ?> source,
int numWorkerThreads,
WatermarkAlignmentParams alignmentParams,
@Nullable String coordinatorListeningID) {
super(operatorID);
this.operatorName = operatorName;
this.source = source;
this.numWorkerThreads = numWorkerThreads;
this.alignmentParams = alignmentParams;
this.coordinatorListeningID = coordinatorListeningID;
}
@Override
public OperatorCoordinator getCoordinator(OperatorCoordinator.Context context) {
final String coordinatorThreadName = "SourceCoordinator-" + operatorName;
CoordinatorExecutorThreadFactory coordinatorThreadFactory =
new CoordinatorExecutorThreadFactory(coordinatorThreadName, context);
SimpleVersionedSerializer<SplitT> splitSerializer = source.getSplitSerializer();
SourceCoordinatorContext<SplitT> sourceCoordinatorContext =
new SourceCoordinatorContext<>(
context.getJobID(),
coordinatorThreadFactory,
numWorkerThreads,
context,
splitSerializer,
context.isConcurrentExecutionAttemptsSupported());
return new SourceCoordinator<>(
context.getJobID(),
operatorName,
source,
sourceCoordinatorContext,
context.getCoordinatorStore(),
alignmentParams,
coordinatorListeningID);
}
/**
* A thread factory | SourceCoordinatorProvider |
java | apache__camel | components/camel-braintree/src/test/java/org/apache/camel/component/braintree/AddressGatewayIT.java | {
"start": 1739,
"end": 8935
} | class ____ extends AbstractBraintreeTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(AddressGatewayIT.class);
private static final String PATH_PREFIX
= BraintreeApiCollection.getCollection().getApiName(AddressGatewayApiMethod.class).getName();
private BraintreeGateway gateway;
private Customer customer;
private final List<String> addressIds;
// *************************************************************************
//
// *************************************************************************
public AddressGatewayIT() {
this.customer = null;
this.gateway = null;
this.addressIds = new LinkedList<>();
}
@Override
protected void doPostSetup() {
this.gateway = getGateway();
this.customer = gateway.customer().create(
new CustomerRequest()
.firstName("user")
.lastName(UUID.randomUUID().toString()))
.getTarget();
if (customer != null) {
LOG.info("Customer created - id={}", this.customer.getId());
}
}
@Override
public void doPostTearDown() {
if (this.gateway != null && customer != null) {
for (String id : this.addressIds) {
if (this.gateway.address().delete(customer.getId(), id).isSuccess()) {
LOG.info("Address deleted - customer={}, id={}", customer.getId(), id);
} else {
LOG.warn("Unable to delete address - customer={}, id={}", customer.getId(), id);
}
}
this.addressIds.clear();
if (this.gateway.customer().delete(this.customer.getId()).isSuccess()) {
LOG.info("Customer deleted - id={}", this.customer.getId());
} else {
LOG.warn("Unable to delete customer - id={}", this.customer.getId());
}
}
}
private Address createAddress() {
// Create address
final Result<Address> result = gateway.address().create(
this.customer.getId(),
new AddressRequest()
.company("Apache")
.streetAddress("1901 Munsey Drive")
.locality("Forest Hill"));
assertNotNull(result, "create");
assertTrue(result.isSuccess());
LOG.info("Address created - customer={}, id={}", this.customer.getId(), result.getTarget().getId());
return result.getTarget();
}
// *************************************************************************
//
// *************************************************************************
@Test
public void testCreate() {
assertNotNull(this.gateway, "BraintreeGateway can't be null");
assertNotNull(this.customer, "Customer can't be null");
final Result<Address> address = requestBodyAndHeaders(
"direct://CREATE",
null,
new BraintreeHeaderBuilder()
.add("customerId", customer.getId())
.add("request", new AddressRequest()
.company("Apache")
.streetAddress("1901 Munsey Drive")
.locality("Forest Hill"))
.build(),
Result.class);
assertNotNull(address, "create");
assertTrue(address.isSuccess());
LOG.info("Address created - customer={}, id={}", customer.getId(), address.getTarget().getId());
this.addressIds.add(address.getTarget().getId());
}
@Test
public void testDelete() {
assertNotNull(this.gateway, "BraintreeGateway can't be null");
assertNotNull(this.customer, "Customer can't be null");
final Address address = createAddress();
final Result<Address> result = requestBodyAndHeaders(
"direct://DELETE",
null,
new BraintreeHeaderBuilder()
.add("customerId", customer.getId())
.add("id", address.getId())
.build(),
Result.class);
assertNotNull(address, "delete");
assertTrue(result.isSuccess());
LOG.info("Address deleted - customer={}, id={}", customer.getId(), address.getId());
}
@Test
public void testFind() {
assertNotNull(this.gateway, "BraintreeGateway can't be null");
assertNotNull(this.customer, "Customer can't be null");
final Address addressRef = createAddress();
this.addressIds.add(addressRef.getId());
final Address address = requestBodyAndHeaders(
"direct://FIND", null,
new BraintreeHeaderBuilder()
.add("customerId", customer.getId())
.add("id", addressRef.getId())
.build(),
Address.class);
assertNotNull(address, "find");
LOG.info("Address found - customer={}, id={}", customer.getId(), address.getId());
}
@Test
public void testUpdate() {
assertNotNull(this.gateway, "BraintreeGateway can't be null");
assertNotNull(this.customer, "Customer can't be null");
final Address addressRef = createAddress();
this.addressIds.add(addressRef.getId());
final Result<Address> result = requestBodyAndHeaders(
"direct://UPDATE", null,
new BraintreeHeaderBuilder()
.add("customerId", customer.getId())
.add("id", addressRef.getId())
.add("request", new AddressRequest()
.company("Apache")
.streetAddress(customer.getId())
.locality(customer.getId()))
.build(),
Result.class);
assertNotNull(result, "update");
assertTrue(result.isSuccess());
LOG.info("Address updated - customer={}, id={}", customer.getId(), result.getTarget().getId());
}
// *************************************************************************
// Routes
// *************************************************************************
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
// test route for create
from("direct://CREATE")
.to("braintree://" + PATH_PREFIX + "/create");
// test route for delete
from("direct://DELETE")
.to("braintree://" + PATH_PREFIX + "/delete");
// test route for find
from("direct://FIND")
.to("braintree://" + PATH_PREFIX + "/find");
// test route for update
from("direct://UPDATE")
.to("braintree://" + PATH_PREFIX + "/update");
}
};
}
}
| AddressGatewayIT |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/http/converter/FormHttpMessageConverter.java | {
"start": 23386,
"end": 24854
} | class ____ implements HttpOutputMessage {
private final OutputStream outputStream;
private final Charset charset;
private final HttpHeaders headers = new HttpHeaders();
private boolean headersWritten = false;
public MultipartHttpOutputMessage(OutputStream outputStream, Charset charset) {
this.outputStream = new MultipartOutputStream(outputStream);
this.charset = charset;
}
@Override
public HttpHeaders getHeaders() {
return (this.headersWritten ? HttpHeaders.readOnlyHttpHeaders(this.headers) : this.headers);
}
@Override
public OutputStream getBody() throws IOException {
writeHeaders();
return this.outputStream;
}
private void writeHeaders() throws IOException {
if (!this.headersWritten) {
for (Map.Entry<String, List<String>> entry : this.headers.headerSet()) {
byte[] headerName = getBytes(entry.getKey());
for (String headerValueString : entry.getValue()) {
byte[] headerValue = getBytes(headerValueString);
this.outputStream.write(headerName);
this.outputStream.write(':');
this.outputStream.write(' ');
this.outputStream.write(headerValue);
writeNewLine(this.outputStream);
}
}
writeNewLine(this.outputStream);
this.headersWritten = true;
}
}
private byte[] getBytes(String name) {
return name.getBytes(this.charset);
}
}
/**
* OutputStream that neither flushes nor closes.
*/
private static | MultipartHttpOutputMessage |
java | junit-team__junit5 | junit-jupiter-api/src/main/java/org/junit/jupiter/api/extension/ConditionEvaluationResult.java | {
"start": 750,
"end": 3855
} | class ____ {
/**
* Factory for creating <em>enabled</em> results.
*
* @param reason the reason why the container or test should be enabled; may
* be {@code null} or <em>blank</em> if the reason is unknown
* @return an enabled {@code ConditionEvaluationResult} with the given reason
* or an <em>empty</em> reason if the reason is unknown
* @see StringUtils#isBlank(String)
*/
public static ConditionEvaluationResult enabled(@Nullable String reason) {
return new ConditionEvaluationResult(true, reason);
}
/**
* Factory for creating <em>disabled</em> results.
*
* @param reason the reason why the container or test should be disabled; may
* be {@code null} or <em>blank</em> if the reason is unknown
* @return a disabled {@code ConditionEvaluationResult} with the given reason
* or an <em>empty</em> reason if the reason is unknown
* @see StringUtils#isBlank(String)
*/
public static ConditionEvaluationResult disabled(@Nullable String reason) {
return new ConditionEvaluationResult(false, reason);
}
/**
* Factory for creating <em>disabled</em> results with custom reasons
* added by the user.
*
* <p>If non-blank default and custom reasons are provided, they will be
* concatenated using the format: <code>"reason ==> customReason"</code>.
*
* @param reason the default reason why the container or test should be disabled;
* may be {@code null} or <em>blank</em> if the default reason is unknown
* @param customReason the custom reason why the container or test should be
* disabled; may be {@code null} or <em>blank</em> if the custom reason is unknown
* @return a disabled {@code ConditionEvaluationResult} with the given reason(s)
* or an <em>empty</em> reason if the reasons are unknown
* @since 5.7
* @see StringUtils#isBlank(String)
*/
@API(status = STABLE, since = "5.7")
public static ConditionEvaluationResult disabled(@Nullable String reason, @Nullable String customReason) {
if (StringUtils.isBlank(reason)) {
return disabled(customReason);
}
if (StringUtils.isBlank(customReason)) {
return disabled(reason);
}
return disabled("%s ==> %s".formatted(reason.strip(), customReason.strip()));
}
private final boolean enabled;
private final Optional<String> reason;
private ConditionEvaluationResult(boolean enabled, @Nullable String reason) {
this.enabled = enabled;
this.reason = StringUtils.isNotBlank(reason) ? Optional.of(reason.strip()) : Optional.empty();
}
/**
* Whether the container or test should be disabled.
*
* @return {@code true} if the container or test should be disabled
*/
public boolean isDisabled() {
return !this.enabled;
}
/**
* Get the reason why the container or test should be enabled or disabled,
* if available.
*/
public Optional<String> getReason() {
return this.reason;
}
@Override
public String toString() {
// @formatter:off
return new ToStringBuilder(this)
.append("enabled", this.enabled)
.append("reason", this.reason.orElse("<unknown>"))
.toString();
// @formatter:on
}
}
| ConditionEvaluationResult |
java | alibaba__nacos | client-basic/src/main/java/com/alibaba/nacos/client/address/AbstractServerListProvider.java | {
"start": 1099,
"end": 2709
} | class ____ implements ServerListProvider {
protected String contextPath = ClientBasicParamUtil.getDefaultContextPath();
protected String namespace = "";
@Override
public void init(final NacosClientProperties properties, final NacosRestTemplate nacosRestTemplate) throws NacosException {
if (null == properties) {
throw new NacosException(NacosException.INVALID_PARAM, "properties is null");
}
initContextPath(properties);
initNameSpace(properties);
}
/**
* Get server list.
* @return server list
*/
@Override
public abstract List<String> getServerList();
/**
* Get server name.
* @return server name
*/
@Override
public abstract String getServerName();
/**
* Get order.
* @return order
*/
@Override
public abstract int getOrder();
public String getContextPath() {
return contextPath;
}
public String getNamespace() {
return namespace;
}
private void initContextPath(NacosClientProperties properties) {
String contentPathTmp = properties.getProperty(PropertyKeyConst.CONTEXT_PATH);
if (!StringUtils.isBlank(contentPathTmp)) {
this.contextPath = contentPathTmp;
}
}
private void initNameSpace(NacosClientProperties properties) {
String namespace = properties.getProperty(PropertyKeyConst.NAMESPACE);
if (StringUtils.isNotBlank(namespace)) {
this.namespace = namespace;
}
}
}
| AbstractServerListProvider |
java | apache__camel | components/camel-box/camel-box-component/src/generated/java/org/apache/camel/component/box/internal/BoxFilesManagerApiMethod.java | {
"start": 665,
"end": 5723
} | enum ____ implements ApiMethod {
CHECK_UPLOAD(
void.class,
"checkUpload",
arg("fileName", String.class),
arg("parentFolderId", String.class),
arg("size", Long.class)),
COPY_FILE(
com.box.sdk.BoxFile.class,
"copyFile",
arg("fileId", String.class),
arg("destinationFolderId", String.class),
arg("newName", String.class)),
CREATE_FILE_METADATA(
com.box.sdk.Metadata.class,
"createFileMetadata",
arg("fileId", String.class),
arg("metadata", com.box.sdk.Metadata.class),
arg("typeName", String.class)),
CREATE_FILE_SHARED_LINK(
com.box.sdk.BoxSharedLink.class,
"createFileSharedLink",
arg("fileId", String.class),
arg("access", com.box.sdk.BoxSharedLink.Access.class),
arg("unshareDate", java.util.Date.class),
arg("permissions", com.box.sdk.BoxSharedLink.Permissions.class)),
DELETE_FILE(
void.class,
"deleteFile",
arg("fileId", String.class)),
DELETE_FILE_METADATA(
void.class,
"deleteFileMetadata",
arg("fileId", String.class)),
DELETE_FILE_VERSION(
void.class,
"deleteFileVersion",
arg("fileId", String.class),
arg("version", Integer.class)),
DOWNLOAD_FILE(
java.io.OutputStream.class,
"downloadFile",
arg("fileId", String.class),
arg("output", java.io.OutputStream.class),
arg("rangeStart", Long.class),
arg("rangeEnd", Long.class),
arg("listener", com.box.sdk.ProgressListener.class)),
DOWNLOAD_PREVIOUS_FILE_VERSION(
java.io.OutputStream.class,
"downloadPreviousFileVersion",
arg("fileId", String.class),
arg("version", Integer.class),
arg("output", java.io.OutputStream.class),
arg("listener", com.box.sdk.ProgressListener.class)),
GET_DOWNLOAD_URL(
java.net.URL.class,
"getDownloadURL",
arg("fileId", String.class)),
GET_FILE_INFO(
com.box.sdk.BoxFile.Info.class,
"getFileInfo",
arg("fileId", String.class),
arg("fields", new String[0].getClass())),
GET_FILE_METADATA(
com.box.sdk.Metadata.class,
"getFileMetadata",
arg("fileId", String.class),
arg("typeName", String.class)),
GET_FILE_PREVIEW_LINK(
java.net.URL.class,
"getFilePreviewLink",
arg("fileId", String.class)),
GET_FILE_VERSIONS(
java.util.Collection.class,
"getFileVersions",
arg("fileId", String.class)),
MOVE_FILE(
com.box.sdk.BoxFile.class,
"moveFile",
arg("fileId", String.class),
arg("destinationFolderId", String.class),
arg("newName", String.class)),
PROMOTE_FILE_VERSION(
com.box.sdk.BoxFileVersion.class,
"promoteFileVersion",
arg("fileId", String.class),
arg("version", Integer.class)),
RENAME_FILE(
com.box.sdk.BoxFile.class,
"renameFile",
arg("fileId", String.class),
arg("newFileName", String.class)),
UPDATE_FILE_INFO(
com.box.sdk.BoxFile.class,
"updateFileInfo",
arg("fileId", String.class),
arg("info", com.box.sdk.BoxFile.Info.class)),
UPDATE_FILE_METADATA(
com.box.sdk.Metadata.class,
"updateFileMetadata",
arg("fileId", String.class),
arg("metadata", com.box.sdk.Metadata.class)),
UPLOAD_FILE(
com.box.sdk.BoxFile.class,
"uploadFile",
arg("parentFolderId", String.class),
arg("content", java.io.InputStream.class),
arg("fileName", String.class),
arg("created", java.util.Date.class),
arg("modified", java.util.Date.class),
arg("size", Long.class),
arg("check", Boolean.class),
arg("listener", com.box.sdk.ProgressListener.class)),
UPLOAD_NEW_FILE_VERSION(
com.box.sdk.BoxFile.class,
"uploadNewFileVersion",
arg("fileId", String.class),
arg("fileContent", java.io.InputStream.class),
arg("modified", java.util.Date.class),
arg("fileSize", Long.class),
arg("listener", com.box.sdk.ProgressListener.class));
private final ApiMethod apiMethod;
BoxFilesManagerApiMethod(Class<?> resultType, String name, ApiMethodArg... args) {
this.apiMethod = new ApiMethodImpl(BoxFilesManager.class, resultType, name, args);
}
@Override
public String getName() { return apiMethod.getName(); }
@Override
public Class<?> getResultType() { return apiMethod.getResultType(); }
@Override
public List<String> getArgNames() { return apiMethod.getArgNames(); }
@Override
public List<String> getSetterArgNames() { return apiMethod.getSetterArgNames(); }
@Override
public List<Class<?>> getArgTypes() { return apiMethod.getArgTypes(); }
@Override
public Method getMethod() { return apiMethod.getMethod(); }
}
| BoxFilesManagerApiMethod |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/FluxJoin.java | {
"start": 3480,
"end": 12519
} | class ____<TLeft, TRight, TLeftEnd, TRightEnd, R>
implements JoinSupport<R> {
final Queue<Object> queue;
final BiPredicate<Object, Object> queueBiOffer;
final Disposable.Composite cancellations;
final Map<Integer, TLeft> lefts;
final Map<Integer, TRight> rights;
final Function<? super TLeft, ? extends Publisher<TLeftEnd>> leftEnd;
final Function<? super TRight, ? extends Publisher<TRightEnd>> rightEnd;
final BiFunction<? super TLeft, ? super TRight, ? extends R> resultSelector;
final CoreSubscriber<? super R> actual;
volatile int wip;
static final AtomicIntegerFieldUpdater<JoinSubscription> WIP =
AtomicIntegerFieldUpdater.newUpdater(JoinSubscription.class, "wip");
volatile int active;
static final AtomicIntegerFieldUpdater<JoinSubscription> ACTIVE =
AtomicIntegerFieldUpdater.newUpdater(JoinSubscription.class,
"active");
volatile long requested;
static final AtomicLongFieldUpdater<JoinSubscription> REQUESTED =
AtomicLongFieldUpdater.newUpdater(JoinSubscription.class,
"requested");
volatile @Nullable Throwable error;
// https://github.com/uber/NullAway/issues/1157
@SuppressWarnings("DataFlowIssue")
static final AtomicReferenceFieldUpdater<JoinSubscription, @Nullable Throwable> ERROR =
AtomicReferenceFieldUpdater.newUpdater(JoinSubscription.class,
Throwable.class,
"error");
int leftIndex;
int rightIndex;
static final Integer LEFT_VALUE = 1;
static final Integer RIGHT_VALUE = 2;
static final Integer LEFT_CLOSE = 3;
static final Integer RIGHT_CLOSE = 4;
@SuppressWarnings("unchecked")
JoinSubscription(CoreSubscriber<? super R> actual,
Function<? super TLeft, ? extends Publisher<TLeftEnd>> leftEnd,
Function<? super TRight, ? extends Publisher<TRightEnd>> rightEnd,
BiFunction<? super TLeft, ? super TRight, ? extends R> resultSelector) {
this.actual = actual;
this.cancellations = Disposables.composite();
this.queue = Queues.unboundedMultiproducer().get();
this.queueBiOffer = (BiPredicate) queue;
this.lefts = new LinkedHashMap<>();
this.rights = new LinkedHashMap<>();
this.leftEnd = leftEnd;
this.rightEnd = rightEnd;
this.resultSelector = resultSelector;
ACTIVE.lazySet(this, 2);
}
@Override
public final CoreSubscriber<? super R> actual() {
return actual;
}
@Override
public Stream<? extends Scannable> inners() {
return Scannable.from(cancellations).inners();
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.REQUESTED_FROM_DOWNSTREAM) return requested;
if (key == Attr.CANCELLED) return cancellations.isDisposed();
if (key == Attr.BUFFERED) return queue.size() / 2;
if (key == Attr.TERMINATED) return active == 0;
if (key == Attr.ERROR) return error;
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
return JoinSupport.super.scanUnsafe(key);
}
@Override
public void request(long n) {
if (Operators.validate(n)) {
Operators.addCap(REQUESTED, this, n);
}
}
@Override
public void cancel() {
if (cancellations.isDisposed()) {
return;
}
cancellations.dispose();
if (WIP.getAndIncrement(this) == 0) {
queue.clear();
}
}
void errorAll(Subscriber<?> a) {
Throwable ex = Exceptions.terminate(ERROR, this);
lefts.clear();
rights.clear();
a.onError(ex);
}
void drain() {
if (WIP.getAndIncrement(this) != 0) {
return;
}
int missed = 1;
Queue<Object> q = queue;
Subscriber<? super R> a = actual;
for (; ; ) {
for (; ; ) {
if (cancellations.isDisposed()) {
q.clear();
return;
}
Throwable ex = error;
if (ex != null) {
q.clear();
cancellations.dispose();
errorAll(a);
return;
}
boolean d = active == 0;
Integer mode = (Integer) q.poll();
boolean empty = mode == null;
if (d && empty) {
lefts.clear();
rights.clear();
cancellations.dispose();
a.onComplete();
return;
}
if (empty) {
break;
}
Object val = q.poll();
if (mode == LEFT_VALUE) {
@SuppressWarnings("unchecked") TLeft left = (TLeft) val;
int idx = leftIndex++;
lefts.put(idx, left);
Publisher<TLeftEnd> p;
try {
p = Objects.requireNonNull(leftEnd.apply(left),
"The leftEnd returned a null Publisher");
}
catch (Throwable exc) {
Exceptions.addThrowable(ERROR,
this,
Operators.onOperatorError(this, exc, left,
actual.currentContext()));
errorAll(a);
return;
}
LeftRightEndSubscriber end =
new LeftRightEndSubscriber(this, true, idx);
cancellations.add(end);
p = Operators.toFluxOrMono(p);
p.subscribe(end);
ex = error;
if (ex != null) {
q.clear();
cancellations.dispose();
errorAll(a);
return;
}
long r = requested;
long e = 0L;
for (TRight right : rights.values()) {
R w;
try {
w = Objects.requireNonNull(resultSelector.apply(left,
right),
"The resultSelector returned a null value");
}
catch (Throwable exc) {
Exceptions.addThrowable(ERROR,
this,
Operators.onOperatorError(this,
exc, right, actual.currentContext()));
errorAll(a);
return;
}
if (e != r) {
a.onNext(w);
e++;
}
else {
Exceptions.addThrowable(ERROR,
this,
Exceptions.failWithOverflow("Could not " + "emit value due to lack of requests"));
q.clear();
cancellations.dispose();
errorAll(a);
return;
}
}
if (e != 0L) {
Operators.produced(REQUESTED, this, e);
}
}
else if (mode == RIGHT_VALUE) {
@SuppressWarnings("unchecked") TRight right = (TRight) val;
int idx = rightIndex++;
rights.put(idx, right);
Publisher<TRightEnd> p;
try {
p = Objects.requireNonNull(rightEnd.apply(right),
"The rightEnd returned a null Publisher");
}
catch (Throwable exc) {
Exceptions.addThrowable(ERROR,
this,
Operators.onOperatorError(this, exc, right,
actual.currentContext()));
errorAll(a);
return;
}
LeftRightEndSubscriber end =
new LeftRightEndSubscriber(this, false, idx);
cancellations.add(end);
p = Operators.toFluxOrMono(p);
p.subscribe(end);
ex = error;
if (ex != null) {
q.clear();
cancellations.dispose();
errorAll(a);
return;
}
long r = requested;
long e = 0L;
for (TLeft left : lefts.values()) {
R w;
try {
w = Objects.requireNonNull(resultSelector.apply(left,
right),
"The resultSelector returned a null value");
}
catch (Throwable exc) {
Exceptions.addThrowable(ERROR,
this,
Operators.onOperatorError(this, exc, left,
actual.currentContext()));
errorAll(a);
return;
}
if (e != r) {
a.onNext(w);
e++;
}
else {
Exceptions.addThrowable(ERROR,
this,
Exceptions.failWithOverflow("Could not emit " + "value due to lack of requests"));
q.clear();
cancellations.dispose();
errorAll(a);
return;
}
}
if (e != 0L) {
Operators.produced(REQUESTED, this, e);
}
}
else if (mode == LEFT_CLOSE) {
LeftRightEndSubscriber end = (LeftRightEndSubscriber) val;
lefts.remove(end.index);
cancellations.remove(end);
}
else if (mode == RIGHT_CLOSE) {
LeftRightEndSubscriber end = (LeftRightEndSubscriber) val;
rights.remove(end.index);
cancellations.remove(end);
}
}
missed = WIP.addAndGet(this, -missed);
if (missed == 0) {
break;
}
}
}
@Override
public void innerError(Throwable ex) {
if (Exceptions.addThrowable(ERROR, this, ex)) {
ACTIVE.decrementAndGet(this);
drain();
}
else {
Operators.onErrorDropped(ex, actual.currentContext());
}
}
@Override
public void innerComplete(LeftRightSubscriber sender) {
cancellations.remove(sender);
ACTIVE.decrementAndGet(this);
drain();
}
@Override
public void innerValue(boolean isLeft, Object o) {
queueBiOffer.test(isLeft ? LEFT_VALUE : RIGHT_VALUE, o);
drain();
}
@Override
public void innerClose(boolean isLeft, LeftRightEndSubscriber index) {
queueBiOffer.test(isLeft ? LEFT_CLOSE : RIGHT_CLOSE, index);
drain();
}
@Override
public void innerCloseError(Throwable ex) {
if (Exceptions.addThrowable(ERROR, this, ex)) {
drain();
}
else {
Operators.onErrorDropped(ex, actual.currentContext());
}
}
}
}
| JoinSubscription |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/dynamic/ReactiveTypeAdapters.java | {
"start": 14693,
"end": 15077
} | enum ____ implements Function<io.reactivex.Single<?>, Flux<?>> {
INSTANCE;
@Override
public Flux<?> apply(io.reactivex.Single<?> source) {
return Flux.from(source.toFlowable());
}
}
/**
* An adapter {@link Function} to adopt a {@link io.reactivex.Completable} to {@link Publisher}.
*/
public | RxJava2SingleToFluxAdapter |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/collection/embeddable/Product.java | {
"start": 552,
"end": 1933
} | class ____ {
@Id
private Integer id;
private String name;
@ElementCollection
@CollectionTable(name = "items", joinColumns = @JoinColumn(name = "productId"))
@OrderColumn(name = "position")
private List<Item> items = new ArrayList<Item>();
Product() {
}
Product(Integer id, String name) {
this.id = id;
this.name = name;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public List<Item> getItems() {
return items;
}
public void setItems(List<Item> items) {
this.items = items;
}
@Override
public int hashCode() {
int result = id != null ? id.hashCode() : 0;
result = 31 * result + ( name != null ? name.hashCode() : 0 );
result = 31 * result + ( items != null ? items.hashCode() : 0 );
return result;
}
@Override
public boolean equals(Object object) {
if ( this == object ) {
return true;
}
if ( object == null | getClass() != object.getClass() ) {
return false;
}
Product that = (Product) object;
if ( id != null ? !id.equals( that.id ) : that.id != null ) {
return false;
}
if ( name != null ? !name.equals( that.name ) : that.name != null ) {
return false;
}
return !( items != null ? !items.equals( that.items ) : that.items != null );
}
}
| Product |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/ThreadPriorityCheckTest.java | {
"start": 1992,
"end": 2711
} | class ____ {
public void foo() {
Thread thread =
new Thread(
new Runnable() {
@Override
public void run() {
System.out.println("Run, thread, run!");
}
});
thread.start();
// BUG: Diagnostic contains: ThreadPriorityCheck
thread.setPriority(Thread.MAX_PRIORITY);
}
}
""")
.doTest();
}
@Test
public void negative() {
compilationHelper
.addSourceLines(
"Test.java",
"""
| Test |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/context/junit4/SpringJUnit4ClassRunner.java | {
"start": 6394,
"end": 6968
} | class ____ be run
* @see #createTestContextManager(Class)
*/
public SpringJUnit4ClassRunner(Class<?> clazz) throws InitializationError {
super(clazz);
if (logger.isTraceEnabled()) {
logger.trace("SpringJUnit4ClassRunner constructor called with [%s]"
.formatted((clazz != null ? clazz.getName() : null)));
}
ensureSpringRulesAreNotPresent(clazz);
this.testContextManager = createTestContextManager(clazz);
}
/**
* Create a new {@link TestContextManager} for the supplied test class.
* <p>Can be overridden by subclasses.
* @param clazz the test | to |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/HttpSecuritySecurityMatchersTests.java | {
"start": 8360,
"end": 9183
} | class ____ {
@Bean
@Order(Ordered.HIGHEST_PRECEDENCE)
SecurityFilterChain first(HttpSecurity http) throws Exception {
// @formatter:off
http
.securityMatchers((security) -> security
.requestMatchers("/test-1")
.requestMatchers("/test-2")
.requestMatchers("/test-3"))
.authorizeHttpRequests((authorize) -> authorize
.anyRequest().denyAll())
.httpBasic(withDefaults());
// @formatter:on
return http.build();
}
@Bean
SecurityFilterChain second(HttpSecurity http) throws Exception {
// @formatter:off
http
.securityMatchers((security) -> security
.requestMatchers("/test-1"))
.authorizeHttpRequests((authorize) -> authorize
.anyRequest().permitAll());
// @formatter:on
return http.build();
}
@RestController
static | MultiMvcMatcherConfig |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/source/coordinator/SourceCoordinatorContextTest.java | {
"start": 2145,
"end": 13043
} | class ____ extends SourceCoordinatorTestBase {
@Test
void testRegisterReader() throws Exception {
sourceReady();
List<ReaderInfo> readerInfo = registerReaders();
assertThat(context.registeredReaders()).containsKey(0);
assertThat(context.registeredReaders()).containsKey(1);
assertThat(context.registeredReaders().get(0)).isEqualTo(readerInfo.get(0));
assertThat(context.registeredReaders().get(1)).isEqualTo(readerInfo.get(1));
final TestingSplitEnumerator<?> enumerator = getEnumerator();
assertThat(enumerator.getRegisteredReaders()).containsExactlyInAnyOrder(0, 1, 2);
ReaderInfo readerInfoOfSubtask1 =
ReaderInfo.createReaderInfo(
1, "subtask_1_location", Collections.singletonList(new MockSourceSplit(1)));
sourceCoordinator.subtaskReset(1, 1);
sourceCoordinator.handleEventFromOperator(
1,
1,
ReaderRegistrationEvent.createReaderRegistrationEvent(
readerInfoOfSubtask1.getSubtaskId(),
readerInfoOfSubtask1.getLocation(),
readerInfoOfSubtask1.getReportedSplitsOnRegistration(),
new MockSourceSplitSerializer()));
waitForCoordinatorToProcessActions();
assertThat(context.registeredReaders().get(1)).isEqualTo(readerInfoOfSubtask1);
}
@Test
void testTaskFailureUnregistersReader() throws Exception {
sourceReady();
List<ReaderInfo> readerInfo = registerReaders();
sourceCoordinator.executionAttemptFailed(0, 0, null);
waitForCoordinatorToProcessActions();
assertThat(context.registeredReaders())
.as("Only reader 2 should be registered.")
.hasSize(2);
assertThat(context.registeredReaders().get(0)).isNull();
assertThat(context.registeredReaders().get(1)).isEqualTo(readerInfo.get(1));
assertThat(context.registeredReaders().get(2)).isEqualTo(readerInfo.get(2));
}
@Test
void testUnregisterUnregisteredReader() {
context.unregisterSourceReader(0, 0);
}
@Test
void testAssignSplitsFromCoordinatorExecutor() throws Exception {
testAssignSplits(true);
}
@Test
void testAssignSplitsFromOtherThread() throws Exception {
testAssignSplits(false);
}
@SuppressWarnings("unchecked")
private void testAssignSplits(boolean fromCoordinatorExecutor) throws Exception {
sourceReady();
registerReaders();
// Assign splits to the readers.
SplitsAssignment<MockSourceSplit> splitsAssignment = getSplitsAssignment(2, 0);
if (fromCoordinatorExecutor) {
context.submitTask(() -> context.assignSplits(splitsAssignment)).get();
} else {
context.assignSplits(splitsAssignment);
}
// The tracker should have recorded the assignments.
verifyAssignment(
Collections.singletonList("0"),
splitSplitAssignmentTracker.uncheckpointedAssignments().get(0));
verifyAssignment(
Arrays.asList("1", "2"),
splitSplitAssignmentTracker.uncheckpointedAssignments().get(1));
// The OperatorCoordinatorContext should have received the event sending call.
assertThat(receivingTasks.getNumberOfSentEvents())
.as("There should be two events sent to the subtasks.")
.isEqualTo(2);
// Assert the events to subtask0.
List<OperatorEvent> eventsToSubtask0 = receivingTasks.getSentEventsForSubtask(0);
assertThat(eventsToSubtask0).hasSize(1);
OperatorEvent event = eventsToSubtask0.get(0);
assertThat(event).isInstanceOf(AddSplitEvent.class);
verifyAssignment(
Collections.singletonList("0"),
((AddSplitEvent<MockSourceSplit>) event).splits(new MockSourceSplitSerializer()));
}
@Test
void testAssignSplitToUnregisteredReaderFromCoordinatorExecutor() throws Exception {
testAssignSplitToUnregisterdReader(true);
}
@Test
void testAssignSplitToUnregisteredReaderFromOtherThread() throws Exception {
testAssignSplitToUnregisterdReader(false);
}
private void testAssignSplitToUnregisterdReader(boolean fromCoordinatorExecutor)
throws Exception {
sourceReady();
SplitsAssignment<MockSourceSplit> splitsAssignment = getSplitsAssignment(2, 0);
verifyException(
() -> {
if (fromCoordinatorExecutor) {
context.submitTask(() -> context.assignSplits(splitsAssignment)).get();
} else {
context.assignSplits(splitsAssignment);
}
},
"assignSplits() should fail to assign the splits to a reader that is not registered.",
"Cannot assign splits " + splitsAssignment.assignment().get(0));
}
@Test
void testExceptionInRunnableFailsTheJob() throws InterruptedException, ExecutionException {
ManuallyTriggeredScheduledExecutorService manualWorkerExecutor =
new ManuallyTriggeredScheduledExecutorService();
// need the factory to have the exception handler set
final ManuallyTriggeredScheduledExecutorService coordinatorExecutorWithExceptionHandler =
new ManuallyTriggeredScheduledExecutorService();
SourceCoordinatorContext<MockSourceSplit> testingContext =
new SourceCoordinatorContext<>(
new JobID(),
coordinatorExecutorWithExceptionHandler,
manualWorkerExecutor,
new SourceCoordinatorProvider.CoordinatorExecutorThreadFactory(
coordinatorThreadName, operatorCoordinatorContext),
operatorCoordinatorContext,
new MockSourceSplitSerializer(),
splitSplitAssignmentTracker,
false);
testingContext.runInCoordinatorThread(
() -> {
throw new RuntimeException();
});
manualWorkerExecutor.triggerAll();
// shutdown coordinatorExecutor and blocks until tasks are finished
testingContext.close();
coordinatorExecutorWithExceptionHandler.triggerAll();
// blocks until the job is failed: wait that the uncaught exception handler calls
// operatorCoordinatorContext#failJob() which completes the future
operatorCoordinatorContext.getJobFailedFuture().get();
assertThat(operatorCoordinatorContext.isJobFailed()).isTrue();
}
@Test
void testCallableInterruptedDuringShutdownDoNotFailJob() throws InterruptedException {
AtomicReference<Throwable> expectedError = new AtomicReference<>(null);
ManuallyTriggeredScheduledExecutorService manualWorkerExecutor =
new ManuallyTriggeredScheduledExecutorService();
ManuallyTriggeredScheduledExecutorService manualCoordinatorExecutor =
new ManuallyTriggeredScheduledExecutorService();
SourceCoordinatorContext<MockSourceSplit> testingContext =
new SourceCoordinatorContext<>(
new JobID(),
manualCoordinatorExecutor,
manualWorkerExecutor,
new SourceCoordinatorProvider.CoordinatorExecutorThreadFactory(
TEST_OPERATOR_ID.toHexString(), operatorCoordinatorContext),
operatorCoordinatorContext,
new MockSourceSplitSerializer(),
splitSplitAssignmentTracker,
false);
testingContext.callAsync(
() -> {
throw new InterruptedException();
},
(ignored, e) -> {
if (e != null) {
expectedError.set(e);
throw new RuntimeException(e);
}
});
manualWorkerExecutor.triggerAll();
testingContext.close();
manualCoordinatorExecutor.triggerAll();
assertThat(expectedError.get()).isInstanceOf(InterruptedException.class);
assertThat(operatorCoordinatorContext.isJobFailed()).isFalse();
}
@Test
void testSupportsIntermediateNoMoreSplits() throws Exception {
sourceReady();
registerReaders();
SplitsAssignment<MockSourceSplit> splitsAssignment = getSplitsAssignment(2, 0);
context.assignSplits(splitsAssignment);
context.signalIntermediateNoMoreSplits(0);
context.signalIntermediateNoMoreSplits(1);
assertThat(context.hasNoMoreSplits(0)).isFalse();
assertThat(context.hasNoMoreSplits(1)).isFalse();
assertThat(context.hasNoMoreSplits(2)).isFalse();
context.signalNoMoreSplits(0);
context.signalNoMoreSplits(1);
assertThat(context.hasNoMoreSplits(0)).isTrue();
assertThat(context.hasNoMoreSplits(1)).isTrue();
assertThat(context.hasNoMoreSplits(2)).isFalse();
}
// ------------------------
private List<ReaderInfo> registerReaders() {
final List<ReaderInfo> infos =
Arrays.asList(
new ReaderInfo(0, "subtask_0_location"),
new ReaderInfo(1, "subtask_1_location"),
new ReaderInfo(2, "subtask_2_location"));
for (ReaderInfo info : infos) {
sourceCoordinator.handleEventFromOperator(
info.getSubtaskId(),
0,
new ReaderRegistrationEvent(info.getSubtaskId(), info.getLocation()));
}
waitForCoordinatorToProcessActions();
return infos;
}
@Test
void testSetIsProcessingBacklog() throws Exception {
sourceReady();
registerReader(0, 0);
context.setIsProcessingBacklog(true);
for (int i = 0; i < context.currentParallelism(); ++i) {
final List<OperatorEvent> events = receivingTasks.getSentEventsForSubtask(i);
assertThat(events.get(events.size() - 1)).isEqualTo(new IsProcessingBacklogEvent(true));
}
registerReader(1, 0);
context.setIsProcessingBacklog(false);
registerReader(2, 0);
for (int i = 0; i < context.currentParallelism(); ++i) {
final List<OperatorEvent> events = receivingTasks.getSentEventsForSubtask(i);
assertThat(events.get(events.size() - 1))
.isEqualTo(new IsProcessingBacklogEvent(false));
}
}
}
| SourceCoordinatorContextTest |
java | google__guava | android/guava-tests/test/com/google/common/collect/MultimapsCollectionTest.java | {
"start": 17915,
"end": 19164
} | class ____<M extends Multimap<String, Integer>>
implements TestMultimapGenerator<String, Integer, M> {
@Override
public SampleElements<Entry<String, Integer>> samples() {
return new SampleElements<>(
mapEntry("one", 114),
mapEntry("two", 37),
mapEntry("three", 42),
mapEntry("four", 19),
mapEntry("five", 82));
}
@SuppressWarnings("unchecked")
@Override
public Entry<String, Integer>[] createArray(int length) {
return (Entry<String, Integer>[]) new Entry<?, ?>[length];
}
@Override
public Iterable<Entry<String, Integer>> order(List<Entry<String, Integer>> insertionOrder) {
return insertionOrder;
}
@Override
public String[] createKeyArray(int length) {
return new String[length];
}
@Override
public Integer[] createValueArray(int length) {
return new Integer[length];
}
@Override
public SampleElements<String> sampleKeys() {
return new SampleElements<>("one", "two", "three", "four", "five");
}
@Override
public SampleElements<Integer> sampleValues() {
return new SampleElements<>(114, 37, 42, 19, 82);
}
}
abstract static | TestFilteredMultimapGenerator |
java | spring-projects__spring-framework | spring-jdbc/src/main/java/org/springframework/jdbc/support/SQLErrorCodesFactory.java | {
"start": 2312,
"end": 5059
} | class ____.
*/
public static final String SQL_ERROR_CODE_DEFAULT_PATH = "org/springframework/jdbc/support/sql-error-codes.xml";
private static final Log logger = LogFactory.getLog(SQLErrorCodesFactory.class);
/**
* Keep track of a single instance, so we can return it to classes that request it.
* Lazily initialized in order to avoid making {@code SQLErrorCodesFactory} constructor
* reachable on native images when not needed.
*/
private static @Nullable SQLErrorCodesFactory instance;
/**
* Return the singleton instance.
*/
public static SQLErrorCodesFactory getInstance() {
if (instance == null) {
instance = new SQLErrorCodesFactory();
}
return instance;
}
/**
* Map to hold error codes for all databases defined in the config file.
* Key is the database product name, value is the SQLErrorCodes instance.
*/
private final Map<String, SQLErrorCodes> errorCodesMap;
/**
* Map to cache the SQLErrorCodes instance per DataSource.
*/
private final Map<DataSource, SQLErrorCodes> dataSourceCache = new ConcurrentReferenceHashMap<>(16);
/**
* Create a new instance of the {@link SQLErrorCodesFactory} class.
* <p>Not public to enforce Singleton design pattern. Would be private
* except to allow testing via overriding the
* {@link #loadResource(String)} method.
* <p><b>Do not subclass in application code.</b>
* @see #loadResource(String)
*/
protected SQLErrorCodesFactory() {
Map<String, SQLErrorCodes> errorCodes;
try {
DefaultListableBeanFactory lbf = new DefaultListableBeanFactory();
lbf.setBeanClassLoader(getClass().getClassLoader());
XmlBeanDefinitionReader bdr = new XmlBeanDefinitionReader(lbf);
// Load default SQL error codes.
Resource resource = loadResource(SQL_ERROR_CODE_DEFAULT_PATH);
if (resource != null && resource.exists()) {
bdr.loadBeanDefinitions(resource);
}
else {
logger.info("Default sql-error-codes.xml not found (should be included in spring-jdbc jar)");
}
// Load custom SQL error codes, overriding defaults.
resource = loadResource(SQL_ERROR_CODE_OVERRIDE_PATH);
if (resource != null && resource.exists()) {
bdr.loadBeanDefinitions(resource);
logger.debug("Found custom sql-error-codes.xml file at the root of the classpath");
}
// Check all beans of type SQLErrorCodes.
errorCodes = lbf.getBeansOfType(SQLErrorCodes.class, true, false);
if (logger.isTraceEnabled()) {
logger.trace("SQLErrorCodes loaded: " + errorCodes.keySet());
}
}
catch (BeansException ex) {
logger.warn("Error loading SQL error codes from config file", ex);
errorCodes = Collections.emptyMap();
}
this.errorCodesMap = errorCodes;
}
/**
* Load the given resource from the | path |
java | elastic__elasticsearch | modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UpperCaseTokenFilterFactory.java | {
"start": 902,
"end": 1299
} | class ____ extends AbstractTokenFilterFactory implements NormalizingTokenFilterFactory {
public UpperCaseTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(name);
}
@Override
public TokenStream create(TokenStream tokenStream) {
return new UpperCaseFilter(tokenStream);
}
}
| UpperCaseTokenFilterFactory |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/condition/ConditionalOnPropertyTests.java | {
"start": 12969,
"end": 13118
} | class ____ {
@Bean
String foo() {
return "foo";
}
}
@Configuration(proxyBeanMethods = false)
@ConditionalOnProperty
static | ValueAttribute |
java | apache__camel | components/camel-jms/src/test/java/org/apache/camel/component/jms/integration/issues/JmsToFileMessageIdIT.java | {
"start": 1704,
"end": 4054
} | class ____ extends AbstractJMSTest {
@Order(2)
@RegisterExtension
public static CamelContextExtension camelContextExtension = new DefaultCamelContextExtension();
protected CamelContext context;
protected ProducerTemplate template;
protected ConsumerTemplate consumer;
@Test
public void testFromJmsToFileAndMessageId() throws Exception {
// Mock endpoint to collect message at the end of the route
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(1);
template.sendBody("activemq:JmsToFileMessageIdTest", "Hello World");
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected String getComponentName() {
return "activemq";
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
// Make a route from an activemq queue to a file endpoint, then try to call getMessageId()
from("activemq:JmsToFileMessageIdTest")
.process(exchange -> {
// assert camel id is based on jms id
String camelId = exchange.getIn().getMessageId();
assertNotNull(camelId);
JmsMessage jms = exchange.getIn(JmsMessage.class);
String jmsId = jms.getJmsMessage().getJMSMessageID();
assertNotNull(jmsId);
assertEquals(jmsId, camelId);
})
.to("file://target/tofile")
.process(exchange -> {
// in Camel 1.4 or older this caused a NPE
assertNotNull(exchange.getIn().getMessageId());
})
.to("mock:result");
}
};
}
@Override
public CamelContextExtension getCamelContextExtension() {
return camelContextExtension;
}
@BeforeEach
void setUpRequirements() {
context = camelContextExtension.getContext();
template = camelContextExtension.getProducerTemplate();
consumer = camelContextExtension.getConsumerTemplate();
}
}
| JmsToFileMessageIdIT |
java | apache__flink | flink-formats/flink-json/src/main/java/org/apache/flink/formats/json/JsonParserRowDataDeserializationSchema.java | {
"start": 1578,
"end": 1713
} | class ____ to convert fields from {@link JsonParser} to {@link RowData} which has a higher
* parsing efficiency.
*/
@Internal
public | used |
java | quarkusio__quarkus | integration-tests/picocli-native/src/test/java/io/quarkus/it/picocli/PicocliTest.java | {
"start": 610,
"end": 5214
} | class ____ {
private String value;
@Test
public void testExitCode(QuarkusMainLauncher launcher) {
LaunchResult result = launcher.launch("exitcode", "--code", Integer.toString(42));
assertThat(result.exitCode()).isEqualTo(42);
result = launcher.launch("exitcode", "--code", Integer.toString(0));
assertThat(result.exitCode()).isEqualTo(0);
result = launcher.launch("exitcode", "--code", Integer.toString(2));
assertThat(result.exitCode()).isEqualTo(2);
}
@Test
@Launch({ "test-command", "-f", "test.txt", "-f", "test2.txt", "-f", "test3.txt", "-s", "ERROR", "-h", "SOCKS=5.5.5.5",
"-p", "privateValue", "pos1", "pos2" })
public void testBasicReflection(LaunchResult result) throws UnknownHostException {
assertThat(result.getOutput())
.contains("-s", "ERROR")
.contains("-p:privateValue")
.contains("-p:privateValue")
.contains("positional:[pos1, pos2]");
assertThat(value).isNotNull();
}
@Test
public void testMethodSubCommand(QuarkusMainLauncher launcher) {
LaunchResult result = launcher.launch("with-method-sub-command", "hello", "-n", "World!");
assertThat(result.exitCode()).isZero();
assertThat(result.getOutput()).contains("Hello World!");
result = launcher.launch("with-method-sub-command", "goodBye", "-n", "Test?");
assertThat(result.exitCode()).isZero();
assertThat(result.getOutput()).contains("Goodbye Test?");
}
@Test
public void testExcludeLogCapturing(QuarkusMainLauncher launcher) {
org.jboss.logging.Logger.getLogger("test").error("error");
LaunchResult result = launcher.launch("with-method-sub-command", "hello", "-n", "World!");
assertThat(result.exitCode()).isZero();
assertThat(result.getOutput()).contains("Hello World!");
}
@Test
public void testIncludeLogCommand(QuarkusMainLauncher launcher) {
org.jboss.logging.Logger.getLogger("test").error("error");
LaunchResult result = launcher.launch("with-method-sub-command", "loggingHello", "-n", "World!");
assertThat(result.exitCode()).isZero();
assertThat(result.getOutput()).contains("ERROR [io.quarkus.it.picocli.WithMethodSubCommand] (main) Hello World!");
assertThat(result.getOutput()).doesNotContain("ERROR [test] (main) error");
}
@Test
@Launch({ "command-used-as-parent", "-p", "testValue", "child" })
public void testParentCommand(LaunchResult result) {
assertThat(result.getOutput()).contains("testValue");
assertThat(value).isNotNull();
}
@Test
@Launch({ "exclusivedemo", "-b", "150" })
public void testCommandWithArgGroup(LaunchResult result) {
assertThat(result.getOutput())
.contains("-a:0")
.contains("-b:150")
.contains("-c:0");
assertThat(value).isNotNull();
}
@Test
@Launch({ "dynamic-proxy" })
public void testDynamicProxy(LaunchResult result) {
assertThat(result.getOutput()).contains("2007-12-03T10:15:30");
assertThat(value).isNotNull();
}
@Test
@Launch("quarkus")
public void testDynamicVersionProvider(LaunchResult launchResult) {
assertThat(launchResult.getOutput()).contains("quarkus version 1.0");
assertThat(value).isNotNull();
}
@Test
@Launch({ "unmatched", "-x", "-a", "AAA", "More" })
public void testUnmatched(LaunchResult launchResult) {
assertThat(launchResult.getOutput())
.contains("-a:AAA")
.contains("-b:null")
.contains("remainder:[More]")
.contains("unmatched[-x]");
assertThat(value).isNotNull();
}
@Test
public void testI18s(QuarkusMainLauncher launcher) {
LaunchResult result = launcher.launch("localized-command-one", "--help");
assertThat(result.getOutput())
.contains("First in CommandOne");
result = launcher.launch("localized-command-two", "--help");
assertThat(result.getOutput())
.contains("First in CommandTwo");
}
@Test
@Launch({ "completion-reflection", "test" })
public void testCompletionReflection() {
}
@Test
@Launch("default-value-provider")
public void testDefaultValueProvider(LaunchResult result) {
assertThat(result.getOutput()).contains("default:default-value");
assertThat(value).isNotNull();
}
public static | PicocliTest |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client-jackson/deployment/src/test/java/io/quarkus/rest/client/reactive/jackson/test/DifferentObjectMapperForClientAndServerTest.java | {
"start": 5118,
"end": 5885
} | class ____ {
private String value;
public Request() {
}
public Request(String value) {
this.value = value;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
Request request = (Request) o;
return Objects.equals(value, request.value);
}
@Override
public int hashCode() {
return Objects.hash(value);
}
}
public static | Request |
java | spring-projects__spring-security | oauth2/oauth2-resource-server/src/main/java/org/springframework/security/oauth2/server/resource/web/access/server/BearerTokenServerAccessDeniedHandler.java | {
"start": 1996,
"end": 4262
} | class ____ implements ServerAccessDeniedHandler {
private static final Collection<String> WELL_KNOWN_SCOPE_ATTRIBUTE_NAMES = Arrays.asList("scope", "scp");
private String realmName;
@Override
public Mono<Void> handle(ServerWebExchange exchange, AccessDeniedException denied) {
Map<String, String> parameters = new LinkedHashMap<>();
if (this.realmName != null) {
parameters.put("realm", this.realmName);
}
// @formatter:off
return exchange.getPrincipal()
.filter(AbstractOAuth2TokenAuthenticationToken.class::isInstance)
.map((token) -> errorMessageParameters(parameters))
.switchIfEmpty(Mono.just(parameters))
.flatMap((params) -> respond(exchange, params));
// @formatter:on
}
/**
* Set the default realm name to use in the bearer token error response
* @param realmName
*/
public final void setRealmName(String realmName) {
this.realmName = realmName;
}
private static Map<String, String> errorMessageParameters(Map<String, String> parameters) {
parameters.put("error", BearerTokenErrorCodes.INSUFFICIENT_SCOPE);
parameters.put("error_description",
"The request requires higher privileges than provided by the access token.");
parameters.put("error_uri", "https://tools.ietf.org/html/rfc6750#section-3.1");
return parameters;
}
private static Mono<Void> respond(ServerWebExchange exchange, Map<String, String> parameters) {
String wwwAuthenticate = computeWWWAuthenticateHeaderValue(parameters);
exchange.getResponse().setStatusCode(HttpStatus.FORBIDDEN);
exchange.getResponse().getHeaders().set(HttpHeaders.WWW_AUTHENTICATE, wwwAuthenticate);
return exchange.getResponse().setComplete();
}
private static String computeWWWAuthenticateHeaderValue(Map<String, String> parameters) {
StringBuilder wwwAuthenticate = new StringBuilder();
wwwAuthenticate.append("Bearer");
if (!parameters.isEmpty()) {
wwwAuthenticate.append(" ");
int i = 0;
for (Map.Entry<String, String> entry : parameters.entrySet()) {
wwwAuthenticate.append(entry.getKey()).append("=\"").append(entry.getValue()).append("\"");
if (i != parameters.size() - 1) {
wwwAuthenticate.append(", ");
}
i++;
}
}
return wwwAuthenticate.toString();
}
}
| BearerTokenServerAccessDeniedHandler |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/annotation/SpyAnnotationTest.java | {
"start": 10557,
"end": 10688
} | class ____ {
@Spy private InnerPrivateStaticAbstract spy_field;
private abstract static | WithInnerPrivateStaticAbstract |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/security/AbstractAuthFailureResponseBodyDevModeTest.java | {
"start": 1153,
"end": 3130
} | enum ____ {
AUTH_FAILED_WITH_BODY(() -> new AuthenticationFailedException(RESPONSE_BODY), true),
AUTH_COMPLETION_WITH_BODY(() -> new AuthenticationCompletionException(RESPONSE_BODY), true),
AUTH_FAILED_WITHOUT_BODY(AuthenticationFailedException::new, false),
AUTH_COMPLETION_WITHOUT_BODY(AuthenticationCompletionException::new, false);
public final Supplier<Throwable> failureSupplier;
private final boolean expectBody;
AuthFailure(Supplier<Throwable> failureSupplier, boolean expectBody) {
this.failureSupplier = failureSupplier;
this.expectBody = expectBody;
}
}
@Test
public void testAuthenticationFailedExceptionBody() {
assertExceptionBody(AuthFailure.AUTH_FAILED_WITHOUT_BODY, false);
assertExceptionBody(AuthFailure.AUTH_FAILED_WITHOUT_BODY, true);
assertExceptionBody(AuthFailure.AUTH_FAILED_WITH_BODY, false);
assertExceptionBody(AuthFailure.AUTH_FAILED_WITH_BODY, true);
}
@Test
public void testAuthenticationCompletionExceptionBody() {
assertExceptionBody(AuthFailure.AUTH_COMPLETION_WITHOUT_BODY, false);
assertExceptionBody(AuthFailure.AUTH_COMPLETION_WITH_BODY, false);
}
private static void assertExceptionBody(AuthFailure failure, boolean challenge) {
int statusCode = challenge ? 302 : 401;
boolean expectBody = failure.expectBody && statusCode == 401;
RestAssured
.given()
.redirects().follow(false)
.header("auth-failure", failure.toString())
.header("challenge-data", challenge)
.get("/secured")
.then()
.statusCode(statusCode)
.body(expectBody ? Matchers.equalTo(RESPONSE_BODY)
: Matchers.not(Matchers.containsString(RESPONSE_BODY)));
}
@Authenticated
@Path("secured")
public static | AuthFailure |
java | netty__netty | testsuite-jpms/src/test/java/io/netty/testsuite_jpms/test/NativeTransportTest.java | {
"start": 2158,
"end": 6046
} | class ____ {
@EnabledOnOs(OS.MAC)
@Test
public void testKQueue() throws Exception {
mySetupClientHostnameValidation(
KQueueIoHandler.newFactory(),
KQueueServerSocketChannel.class,
KQueueSocketChannel.class);
}
@EnabledOnOs(OS.LINUX)
@Test
public void testEpoll() throws Exception {
mySetupClientHostnameValidation(
EpollIoHandler.newFactory(),
EpollServerSocketChannel.class,
EpollSocketChannel.class);
}
@EnabledOnOs(OS.LINUX)
@Test
public void testIoUring() throws Exception {
mySetupClientHostnameValidation(
IoUringIoHandler.newFactory(),
IoUringServerSocketChannel.class,
IoUringSocketChannel.class);
}
private void mySetupClientHostnameValidation(IoHandlerFactory ioHandlerFactory,
Class<? extends ServerSocketChannel> serverSocketChannelFactory,
Class<? extends SocketChannel> socketChannelFactory
) throws InterruptedException {
ServerBootstrap sb = new ServerBootstrap();
Bootstrap cb = new Bootstrap();
sb.group(new MultiThreadIoEventLoopGroup(ioHandlerFactory));
sb.channel(serverSocketChannelFactory);
sb.childHandler(new ChannelInitializer() {
@Override
protected void initChannel(Channel ch) throws Exception {
ChannelPipeline p = ch.pipeline();
p.addLast(new ChannelInboundHandlerAdapter() {
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
if (msg instanceof ByteBuf) {
ctx.write(msg);
} else {
super.channelRead(ctx, msg);
}
}
@Override
public void channelReadComplete(ChannelHandlerContext ctx) throws Exception {
ctx.flush();
}
});
}
});
List<String> received = Collections.synchronizedList(new ArrayList<>());
cb.group(new MultiThreadIoEventLoopGroup(ioHandlerFactory));
cb.channel(socketChannelFactory);
cb.handler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
ChannelPipeline p = ch.pipeline();
p.addLast(new ChannelInboundHandlerAdapter() {
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
if (msg instanceof ByteBuf) {
ByteBuf buff = (ByteBuf) msg;
try {
received.add(buff.toString(StandardCharsets.UTF_8));
} finally {
buff.release();
}
}
}
});
}
});
Channel serverChannel = sb.bind(new InetSocketAddress("localhost", 0)).sync().channel();
final int port = ((InetSocketAddress) serverChannel.localAddress()).getPort();
ChannelFuture ccf = cb.connect(new InetSocketAddress("localhost", port));
assertTrue(ccf.awaitUninterruptibly().isSuccess());
Channel clientChannel = ccf.channel();
clientChannel.writeAndFlush(Unpooled.copiedBuffer("hello", StandardCharsets.UTF_8)).sync();
while (received.isEmpty()) {
Thread.sleep(100);
}
assertEquals("hello", received.get(0));
}
}
| NativeTransportTest |
java | square__retrofit | retrofit/java-test/src/test/java/retrofit2/RequestFactoryTest.java | {
"start": 8351,
"end": 8846
} | class ____ {
@Multipart //
@GET("/") //
Call<ResponseBody> method() {
return null;
}
}
try {
buildRequest(Example.class);
fail();
} catch (IllegalArgumentException e) {
assertThat(e)
.hasMessageThat()
.isEqualTo(
"Multipart can only be specified on HTTP methods with request body (e.g., @POST).\n for method Example.method");
}
}
@Test
public void multipartFailsWithNoParts() {
| Example |
java | mapstruct__mapstruct | processor/src/main/java/org/mapstruct/ap/internal/model/BeanMappingMethod.java | {
"start": 95446,
"end": 106359
} | class ____ {
private final boolean hasError;
private final List<ParameterBinding> parameterBindings;
private final Map<String, Accessor> constructorAccessors;
private ConstructorAccessor(
List<ParameterBinding> parameterBindings,
Map<String, Accessor> constructorAccessors) {
this( false, parameterBindings, constructorAccessors );
}
private ConstructorAccessor(boolean hasError, List<ParameterBinding> parameterBindings,
Map<String, Accessor> constructorAccessors) {
this.hasError = hasError;
this.parameterBindings = parameterBindings;
this.constructorAccessors = constructorAccessors;
}
}
//CHECKSTYLE:OFF
private BeanMappingMethod(Method method,
List<Annotation> annotations,
Collection<String> existingVariableNames,
List<PropertyMapping> propertyMappings,
MethodReference factoryMethod,
boolean mapNullToDefault,
Type returnTypeToConstruct,
BuilderType returnTypeBuilder,
List<LifecycleCallbackMethodReference> beforeMappingReferences,
List<LifecycleCallbackMethodReference> afterMappingReferences,
List<LifecycleCallbackMethodReference> beforeMappingReferencesWithFinalizedReturnType,
List<LifecycleCallbackMethodReference> afterMappingReferencesWithFinalizedReturnType,
MethodReference finalizerMethod,
MappingReferences mappingReferences,
List<SubclassMapping> subclassMappings,
Map<String, PresenceCheck> presenceChecksByParameter,
Type subclassExhaustiveException) {
super(
method,
annotations,
existingVariableNames,
factoryMethod,
mapNullToDefault,
beforeMappingReferences,
afterMappingReferences
);
//CHECKSTYLE:ON
this.propertyMappings = propertyMappings;
this.returnTypeBuilder = returnTypeBuilder;
this.finalizerMethod = finalizerMethod;
this.subclassExhaustiveException = subclassExhaustiveException;
if ( this.finalizerMethod != null ) {
this.finalizedResultName =
Strings.getSafeVariableName( getResultName() + "Result", existingVariableNames );
existingVariableNames.add( this.finalizedResultName );
}
else {
this.finalizedResultName = null;
}
this.mappingReferences = mappingReferences;
this.beforeMappingReferencesWithFinalizedReturnType = beforeMappingReferencesWithFinalizedReturnType;
this.afterMappingReferencesWithFinalizedReturnType = afterMappingReferencesWithFinalizedReturnType;
// initialize constant mappings as all mappings, but take out the ones that can be contributed to a
// parameter mapping.
this.mappingsByParameter = new HashMap<>();
this.constantMappings = new ArrayList<>( propertyMappings.size() );
this.presenceChecksByParameter = presenceChecksByParameter;
this.constructorMappingsByParameter = new LinkedHashMap<>();
this.constructorConstantMappings = new ArrayList<>();
Set<String> sourceParameterNames = new HashSet<>();
for ( Parameter sourceParameter : getSourceParameters() ) {
sourceParameterNames.add( sourceParameter.getName() );
}
for ( PropertyMapping mapping : propertyMappings ) {
if ( mapping.isConstructorMapping() ) {
if ( sourceParameterNames.contains( mapping.getSourceBeanName() ) ) {
constructorMappingsByParameter.computeIfAbsent(
mapping.getSourceBeanName(),
key -> new ArrayList<>()
).add( mapping );
}
else {
constructorConstantMappings.add( mapping );
}
}
else if ( sourceParameterNames.contains( mapping.getSourceBeanName() ) ) {
mappingsByParameter.computeIfAbsent( mapping.getSourceBeanName(), key -> new ArrayList<>() )
.add( mapping );
}
else {
constantMappings.add( mapping );
}
}
this.returnTypeToConstruct = returnTypeToConstruct;
this.subclassMappings = subclassMappings;
}
public Type getSubclassExhaustiveException() {
return subclassExhaustiveException;
}
public List<PropertyMapping> getConstantMappings() {
return constantMappings;
}
public List<PropertyMapping> getConstructorConstantMappings() {
return constructorConstantMappings;
}
public List<SubclassMapping> getSubclassMappings() {
return subclassMappings;
}
public String getFinalizedResultName() {
return finalizedResultName;
}
public List<LifecycleCallbackMethodReference> getBeforeMappingReferencesWithFinalizedReturnType() {
return beforeMappingReferencesWithFinalizedReturnType;
}
public List<LifecycleCallbackMethodReference> getAfterMappingReferencesWithFinalizedReturnType() {
return afterMappingReferencesWithFinalizedReturnType;
}
public List<PropertyMapping> propertyMappingsByParameter(Parameter parameter) {
// issues: #909 and #1244. FreeMarker has problem getting values from a map when the search key is size or value
return mappingsByParameter.getOrDefault( parameter.getName(), Collections.emptyList() );
}
public List<PropertyMapping> constructorPropertyMappingsByParameter(Parameter parameter) {
// issues: #909 and #1244. FreeMarker has problem getting values from a map when the search key is size or value
return constructorMappingsByParameter.getOrDefault( parameter.getName(), Collections.emptyList() );
}
public Type getReturnTypeToConstruct() {
return returnTypeToConstruct;
}
public boolean hasSubclassMappings() {
return !subclassMappings.isEmpty();
}
public boolean isAbstractReturnType() {
return getFactoryMethod() == null && returnTypeToConstruct != null
&& returnTypeToConstruct.isAbstract();
}
public boolean hasConstructorMappings() {
return !constructorMappingsByParameter.isEmpty() || !constructorConstantMappings.isEmpty();
}
public MethodReference getFinalizerMethod() {
return finalizerMethod;
}
@Override
public Set<Type> getImportTypes() {
Set<Type> types = super.getImportTypes();
for ( PropertyMapping propertyMapping : propertyMappings ) {
types.addAll( propertyMapping.getImportTypes() );
if ( propertyMapping.isConstructorMapping() ) {
// We need to add the target type imports for a constructor mapper since we define its parameters
types.addAll( propertyMapping.getTargetType().getImportTypes() );
}
}
for ( SubclassMapping subclassMapping : subclassMappings ) {
types.addAll( subclassMapping.getImportTypes() );
}
if ( returnTypeToConstruct != null ) {
types.addAll( returnTypeToConstruct.getImportTypes() );
}
if ( returnTypeBuilder != null ) {
types.add( returnTypeBuilder.getOwningType() );
}
for ( LifecycleCallbackMethodReference reference : beforeMappingReferencesWithFinalizedReturnType ) {
types.addAll( reference.getImportTypes() );
}
for ( LifecycleCallbackMethodReference reference : afterMappingReferencesWithFinalizedReturnType ) {
types.addAll( reference.getImportTypes() );
}
return types;
}
public Collection<PresenceCheck> getSourcePresenceChecks() {
return presenceChecksByParameter.values();
}
public Map<String, PresenceCheck> getPresenceChecksByParameter() {
return presenceChecksByParameter;
}
public PresenceCheck getPresenceCheckByParameter(Parameter parameter) {
return presenceChecksByParameter.get( parameter.getName() );
}
public List<Parameter> getSourceParametersNeedingPresenceCheck() {
return getSourceParameters().stream()
.filter( this::needsPresenceCheck )
.collect( Collectors.toList() );
}
public List<Parameter> getSourceParametersNotNeedingPresenceCheck() {
return getSourceParameters().stream()
.filter( parameter -> !needsPresenceCheck( parameter ) )
.collect( Collectors.toList() );
}
private boolean needsPresenceCheck(Parameter parameter) {
if ( !presenceChecksByParameter.containsKey( parameter.getName() ) ) {
return false;
}
List<PropertyMapping> mappings = propertyMappingsByParameter( parameter );
if ( mappings.size() == 1 && doesNotNeedPresenceCheckForSourceParameter( mappings.get( 0 ) ) ) {
return false;
}
mappings = constructorPropertyMappingsByParameter( parameter );
if ( mappings.size() == 1 && doesNotNeedPresenceCheckForSourceParameter( mappings.get( 0 ) ) ) {
return false;
}
return true;
}
private boolean doesNotNeedPresenceCheckForSourceParameter(PropertyMapping mapping) {
if ( mapping.getAssignment().isCallingUpdateMethod() ) {
// If the mapping assignment is calling an update method then we should do a null check
// in the bean mapping
return false;
}
return mapping.getAssignment().isSourceReferenceParameter();
}
@Override
public int hashCode() {
//Needed for Checkstyle, otherwise it fails due to EqualsHashCode rule
return super.hashCode();
}
@Override
public boolean equals(Object obj) {
if ( this == obj ) {
return true;
}
if ( obj == null || getClass() != obj.getClass() ) {
return false;
}
BeanMappingMethod that = (BeanMappingMethod) obj;
if ( !super.equals( obj ) ) {
return false;
}
if ( !Objects.equals( propertyMappings, that.propertyMappings ) ) {
return false;
}
if ( !Objects.equals( mappingReferences, that.mappingReferences ) ) {
return false;
}
return true;
}
}
| ConstructorAccessor |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/jmx/access/MBeanProxyFactoryBean.java | {
"start": 1827,
"end": 2035
} | interface ____ lead
* to an {@code InvalidInvocationException}.
*
* @author Rob Harrop
* @author Juergen Hoeller
* @since 1.2
* @see MBeanClientInterceptor
* @see InvalidInvocationException
*/
public | will |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/PagerUtilsTest_Count_SQLServer_0.java | {
"start": 163,
"end": 1958
} | class ____ extends TestCase {
public void test_sqlserver_0() throws Exception {
String sql = "select * from t";
String result = PagerUtils.count(sql, JdbcConstants.SQL_SERVER);
assertEquals("SELECT COUNT(*)\n" +
"FROM t", result);
}
public void test_sqlserver_1() throws Exception {
String sql = "select id, name from t";
String result = PagerUtils.count(sql, JdbcConstants.SQL_SERVER);
assertEquals("SELECT COUNT(*)\n" +
"FROM t", result);
}
public void test_sqlserver_2() throws Exception {
String sql = "select id, name from t order by id";
String result = PagerUtils.count(sql, JdbcConstants.SQL_SERVER);
assertEquals("SELECT COUNT(*)\n" +
"FROM t", result);
}
public void test_sqlserver_group_0() throws Exception {
String sql = "select type, count(*) from t group by type";
String result = PagerUtils.count(sql, JdbcConstants.SQL_SERVER);
assertEquals("SELECT COUNT(*)\n" +
"FROM (\n" +
"\tSELECT type, count(*)\n" +
"\tFROM t\n" +
"\tGROUP BY type\n" +
") ALIAS_COUNT", result);
}
public void test_sqlserver_union_0() throws Exception {
String sql = "select id, name from t1 union select id, name from t2 order by id";
String result = PagerUtils.count(sql, JdbcConstants.SQL_SERVER);
assertEquals("SELECT COUNT(*)\n" +
"FROM (\n" +
"\tSELECT id, name\n" +
"\tFROM t1\n" +
"\tUNION\n" +
"\tSELECT id, name\n" +
"\tFROM t2\n" +
") ALIAS_COUNT", result);
}
}
| PagerUtilsTest_Count_SQLServer_0 |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/type/ConvertedBasicCollectionType.java | {
"start": 673,
"end": 2505
} | class ____<C extends Collection<E>, E> extends BasicCollectionType<C, E> {
private final BasicValueConverter<C, ?> converter;
private final ValueExtractor<C> jdbcValueExtractor;
private final ValueBinder<C> jdbcValueBinder;
private final JdbcLiteralFormatter<C> jdbcLiteralFormatter;
@SuppressWarnings("unchecked")
public ConvertedBasicCollectionType(
BasicType<E> baseDescriptor,
JdbcType arrayJdbcType,
BasicCollectionJavaType<C, E> arrayTypeDescriptor,
BasicValueConverter<C, ?> converter) {
super( baseDescriptor, arrayJdbcType, arrayTypeDescriptor );
this.converter = converter;
this.jdbcValueBinder = (ValueBinder<C>) arrayJdbcType.getBinder( converter.getRelationalJavaType() );
this.jdbcValueExtractor = (ValueExtractor<C>) arrayJdbcType.getExtractor( converter.getRelationalJavaType() );
this.jdbcLiteralFormatter = (JdbcLiteralFormatter<C>) arrayJdbcType.getJdbcLiteralFormatter( converter.getRelationalJavaType() );
}
@Override
public BasicValueConverter<C, ?> getValueConverter() {
return converter;
}
@Override
public JavaType<?> getJdbcJavaType() {
return converter.getRelationalJavaType();
}
@Override
public ValueExtractor<C> getJdbcValueExtractor() {
return jdbcValueExtractor;
}
@Override
public ValueBinder<C> getJdbcValueBinder() {
return jdbcValueBinder;
}
@Override
public JdbcLiteralFormatter<C> getJdbcLiteralFormatter() {
return jdbcLiteralFormatter;
}
@Override
public boolean equals(Object object) {
return object == this || super.equals( object )
&& object instanceof ConvertedBasicCollectionType<?, ?> that
&& Objects.equals( converter, that.converter );
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + converter.hashCode();
return result;
}
}
| ConvertedBasicCollectionType |
java | apache__camel | core/camel-support/src/main/java/org/apache/camel/support/resume/Resumables.java | {
"start": 1370,
"end": 4597
} | class ____<K, V> implements Resumable {
private final K addressable;
private V offset;
/**
* Creates a new anonymous resumable type
*
* @param addressable the key, name or object that can be addressed by the given offset
*/
public AnonymousResumable(K addressable) {
this.addressable = addressable;
}
/**
* Creates a new anonymous resumable type
*
* @param addressable the key, name or object that can be addressed by the given offset
* @param offset the offset value
*/
public AnonymousResumable(K addressable, V offset) {
this.addressable = addressable;
this.offset = offset;
}
@Override
public Offset<V> getLastOffset() {
return Offsets.of(offset);
}
@Override
public OffsetKey<K> getOffsetKey() {
return new OffsetKey<>() {
private final K key = addressable;
@Override
public void setValue(K key) {
throw new UnsupportedOperationException("Setting offset keys for anonymous resumables is unsupported");
}
@Override
public K getValue() {
return key;
}
};
}
}
private Resumables() {
}
/**
* Creates a new resumable for an addressable
*
* @param addressable the key, name or object that can be addressed by the given offset
* @param offset the offset value
* @param <K> the type of the key, name or object that can be addressed by the given offset (aka
* addressable)
* @param <V> the type of offset
* @return A new resumable entity for the given addressable with the given offset value
*/
public static <K, V> Resumable of(K addressable, V offset) {
return new AnonymousResumable<>(addressable, offset);
}
/**
* Iterates over the set of input checking if they should be resumed or not.
*
* @param input the input array to check for resumables
* @param resumableCheck a checker method that returns true if a single entry of the input should be resumed or
* false otherwise. For instance: given a set A, B and C, where B has already been processed,
* then a test for A and C returns true, whereas a test for B returns false.
* @return a new array containing the elements that still need to be processed
*/
public static <T> T[] resumeEach(T[] input, Predicate<T> resumableCheck) {
@SuppressWarnings("unchecked")
T[] tmp = (T[]) Array.newInstance(input.getClass().getComponentType(), input.length);
int count = 0;
for (T entry : input) {
if (resumableCheck.test(entry)) {
tmp[count] = entry;
count++;
}
}
if (count != input.length) {
return Arrays.copyOf(tmp, count);
}
return input;
}
}
| AnonymousResumable |
java | alibaba__nacos | core/src/main/java/com/alibaba/nacos/core/ability/RemoteAbilityInitializer.java | {
"start": 794,
"end": 1017
} | class ____ implements ServerAbilityInitializer {
@Override
public void initialize(ServerAbilities abilities) {
abilities.getRemoteAbility().setSupportRemoteConnection(true);
}
}
| RemoteAbilityInitializer |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java | {
"start": 1561,
"end": 11006
} | class ____ extends AbstractQueryBuilder<PrefixQueryBuilder> implements MultiTermQueryBuilder {
public static final String NAME = "prefix";
private static final ParseField PREFIX_FIELD = new ParseField("value");
private static final ParseField REWRITE_FIELD = new ParseField("rewrite");
private final String fieldName;
private final String value;
public static final boolean DEFAULT_CASE_INSENSITIVITY = false;
private static final ParseField CASE_INSENSITIVE_FIELD = new ParseField("case_insensitive");
private boolean caseInsensitive = DEFAULT_CASE_INSENSITIVITY;
private String rewrite;
/**
* A Query that matches documents containing terms with a specified prefix.
*
* @param fieldName The name of the field
* @param value The prefix query
*/
public PrefixQueryBuilder(String fieldName, String value) {
if (Strings.isEmpty(fieldName)) {
throw new IllegalArgumentException("field name is null or empty");
}
if (value == null) {
throw new IllegalArgumentException("value cannot be null");
}
this.fieldName = fieldName;
this.value = value;
}
/**
* Read from a stream.
*/
public PrefixQueryBuilder(StreamInput in) throws IOException {
super(in);
fieldName = in.readString();
value = in.readString();
rewrite = in.readOptionalString();
caseInsensitive = in.readBoolean();
}
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
out.writeString(fieldName);
out.writeString(value);
out.writeOptionalString(rewrite);
out.writeBoolean(caseInsensitive);
}
@Override
public String fieldName() {
return this.fieldName;
}
public String value() {
return this.value;
}
public PrefixQueryBuilder caseInsensitive(boolean caseInsensitive) {
this.caseInsensitive = caseInsensitive;
return this;
}
public boolean caseInsensitive() {
return this.caseInsensitive;
}
public PrefixQueryBuilder rewrite(String rewrite) {
this.rewrite = rewrite;
return this;
}
public String rewrite() {
return this.rewrite;
}
@Override
public void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(NAME);
builder.startObject(fieldName);
builder.field(PREFIX_FIELD.getPreferredName(), this.value);
if (rewrite != null) {
builder.field(REWRITE_FIELD.getPreferredName(), rewrite);
}
if (caseInsensitive != DEFAULT_CASE_INSENSITIVITY) {
builder.field(CASE_INSENSITIVE_FIELD.getPreferredName(), caseInsensitive);
}
printBoostAndQueryName(builder);
builder.endObject();
builder.endObject();
}
public static PrefixQueryBuilder fromXContent(XContentParser parser) throws IOException {
String fieldName = null;
String value = null;
String rewrite = null;
String queryName = null;
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
boolean caseInsensitive = DEFAULT_CASE_INSENSITIVITY;
String currentFieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, currentFieldName);
fieldName = currentFieldName;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else {
if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
queryName = parser.text();
} else if (PREFIX_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
value = parser.textOrNull();
} else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
boost = parser.floatValue();
} else if (REWRITE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
rewrite = parser.textOrNull();
} else if (CASE_INSENSITIVE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
caseInsensitive = parser.booleanValue();
} else {
throw new ParsingException(
parser.getTokenLocation(),
"[prefix] query does not support [" + currentFieldName + "]"
);
}
}
}
} else {
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, parser.currentName());
fieldName = currentFieldName;
value = parser.textOrNull();
}
}
PrefixQueryBuilder result = new PrefixQueryBuilder(fieldName, value).rewrite(rewrite).boost(boost).queryName(queryName);
result.caseInsensitive(caseInsensitive);
return result;
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) {
MappedFieldType fieldType = context.getFieldType(this.fieldName);
if (fieldType == null) {
return new MatchNoneQueryBuilder("The \"" + getName() + "\" query is against a field that does not exist");
}
return maybeRewriteBasedOnConstantFields(fieldType, context);
}
@Override
protected QueryBuilder doCoordinatorRewrite(CoordinatorRewriteContext coordinatorRewriteContext) {
MappedFieldType fieldType = coordinatorRewriteContext.getFieldType(this.fieldName);
// we don't rewrite a null field type to `match_none` on the coordinator because the coordinator has access
// to only a subset of fields see {@link CoordinatorRewriteContext#getFieldType}
return maybeRewriteBasedOnConstantFields(fieldType, coordinatorRewriteContext);
}
private QueryBuilder maybeRewriteBasedOnConstantFields(@Nullable MappedFieldType fieldType, QueryRewriteContext context) {
if (fieldType instanceof ConstantFieldType constantFieldType) {
// This logic is correct for all field types, but by only applying it to constant
// fields we also have the guarantee that it doesn't perform I/O, which is important
// since rewrites might happen on a network thread.
Query query = constantFieldType.prefixQuery(value, caseInsensitive, context);
if (query instanceof MatchAllDocsQuery) {
return new MatchAllQueryBuilder();
} else if (query instanceof MatchNoDocsQuery) {
return new MatchNoneQueryBuilder("The \"" + getName() + "\" query was rewritten to a \"match_none\" query.");
} else {
assert false : "Constant fields must produce match-all or match-none queries, got " + query;
}
}
return this;
}
@Override
protected Query doToQuery(SearchExecutionContext context) throws IOException {
final int maxAllowedRegexLength = context.getIndexSettings().getMaxRegexLength();
if (value.length() > maxAllowedRegexLength) {
throw new IllegalArgumentException(
"The length of prefix ["
+ value.length()
+ "] used in the Prefix Query request has exceeded "
+ "the allowed maximum of ["
+ maxAllowedRegexLength
+ "]. "
+ "This maximum can be set by changing the ["
+ IndexSettings.MAX_REGEX_LENGTH_SETTING.getKey()
+ "] index level setting."
);
}
MultiTermQuery.RewriteMethod method = QueryParsers.parseRewriteMethod(rewrite, null, LoggingDeprecationHandler.INSTANCE);
MappedFieldType fieldType = context.getFieldType(fieldName);
if (fieldType == null) {
throw new IllegalStateException("Rewrite first");
}
return fieldType.prefixQuery(value, method, caseInsensitive, context);
}
@Override
protected final int doHashCode() {
return Objects.hash(fieldName, value, rewrite, caseInsensitive);
}
@Override
protected boolean doEquals(PrefixQueryBuilder other) {
return Objects.equals(fieldName, other.fieldName)
&& Objects.equals(value, other.value)
&& Objects.equals(rewrite, other.rewrite)
&& Objects.equals(caseInsensitive, other.caseInsensitive);
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersion.zero();
}
}
| PrefixQueryBuilder |
java | quarkusio__quarkus | integration-tests/spring-data-jpa/src/main/java/io/quarkus/it/spring/data/jpa/CountryResource.java | {
"start": 487,
"end": 2721
} | class ____ {
private final CountryRepository countryRepository;
public CountryResource(CountryRepository countryRepository) {
this.countryRepository = countryRepository;
}
@GET
@Path("/all")
@Produces("application/json")
public List<Country> all() {
return countryRepository.findAll(Sort.by(new Sort.Order(Sort.Direction.ASC, "iso3")));
}
@GET
@Path("/page/{size}/{num}")
public String page(@PathParam("size") int pageSize, @PathParam("num") int pageNum) {
Page<Country> page = countryRepository.findAll(PageRequest.of(pageNum, pageSize));
return page.hasPrevious() + " - " + page.hasNext() + " / " + page.getNumberOfElements();
}
@GET
@Path("/page-sorted/{size}/{num}")
@Produces("text/plain")
public String pageSorted(@PathParam("size") int pageSize, @PathParam("num") int pageNum) {
Page<Country> page = countryRepository.findAll(PageRequest.of(pageNum, pageSize, Sort.by(Sort.Direction.DESC, "id")));
return page.stream().map(Country::getId).map(Object::toString).collect(Collectors.joining(","));
}
@GET
@Path("/new/{name}/{iso3}")
@Produces("application/json")
public Country newCountry(@PathParam("name") String name, @PathParam("iso3") String iso3) {
countryRepository.flush();
return countryRepository.saveAndFlush(new Country(name, iso3));
}
@GET
@Path("/editIso3/{id}/{iso3}")
@Produces("application/json")
public Country editIso3(@PathParam("id") Long id, @PathParam("iso3") String iso3) {
Optional<Country> optional = countryRepository.findById(id);
if (optional.isPresent()) {
Country country = optional.get();
country.setIso3(iso3);
return countryRepository.save(country);
} else {
throw new NoResultException("No Country found with id =" + id);
}
}
@GET
@Path("/getOne/{id}")
@Produces("application/json")
public Country getOne(@PathParam("id") Long id) {
return countryRepository.getOne(id);
}
@DELETE
@Path("/")
public void deleteAllInBatch() {
this.countryRepository.deleteAllInBatch();
}
}
| CountryResource |
java | apache__dubbo | dubbo-metrics/dubbo-metrics-default/src/main/java/org/apache/dubbo/metrics/aot/DefaultMetricsReflectionTypeDescriberRegistrar.java | {
"start": 1179,
"end": 1924
} | class ____ implements ReflectionTypeDescriberRegistrar {
@Override
public List<TypeDescriber> getTypeDescribers() {
List<TypeDescriber> typeDescribers = new ArrayList<>();
typeDescribers.add(buildTypeDescriberWithDeclaredConstructors(HistogramMetricsCollector.class));
return typeDescribers;
}
private TypeDescriber buildTypeDescriberWithDeclaredConstructors(Class<?> cl) {
Set<MemberCategory> memberCategories = new HashSet<>();
memberCategories.add(MemberCategory.INVOKE_DECLARED_CONSTRUCTORS);
return new TypeDescriber(
cl.getName(), null, new HashSet<>(), new HashSet<>(), new HashSet<>(), memberCategories);
}
}
| DefaultMetricsReflectionTypeDescriberRegistrar |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/jmx/export/MBeanExporterTests.java | {
"start": 27258,
"end": 27824
} | class ____ implements MBeanExporterListener {
private List<ObjectName> registered = new ArrayList<>();
private List<ObjectName> unregistered = new ArrayList<>();
@Override
public void mbeanRegistered(ObjectName objectName) {
registered.add(objectName);
}
@Override
public void mbeanUnregistered(ObjectName objectName) {
unregistered.add(objectName);
}
public List<ObjectName> getRegistered() {
return registered;
}
public List<ObjectName> getUnregistered() {
return unregistered;
}
}
private static | MockMBeanExporterListener |
java | quarkusio__quarkus | integration-tests/test-extension/tests/src/test/java/io/quarkus/it/extension/it/ParameterTestModeIT.java | {
"start": 973,
"end": 2791
} | class ____ extends RunAndCheckMojoTestBase {
@Override
protected LaunchMode getDefaultLaunchMode() {
return LaunchMode.TEST;
}
@Override
public void shutdownTheApp() {
if (running != null) {
running.stop();
}
// There's no http server, so there's nothing to check to make sure we're stopped, except by the maven invoker itself, or the logs
}
/**
* This is actually more like runAndDoNotCheck, because
* we can't really check anything via a HTTP get, because this is a test mode application
*/
@Override
protected void runAndCheck(boolean performCompile, LaunchMode mode, String... options)
throws FileNotFoundException, MavenInvocationException {
run(performCompile, mode, options);
// We don't need to try and pause, because the continuous testing utils will wait for tests to finish
}
@Test
public void testThatTheTestsPassed() throws MavenInvocationException, IOException {
//we also check continuous testing
String executionDir = "projects/project-using-test-parameter-injection-processed";
testDir = initProject("projects/project-using-test-parameter-injection", executionDir);
runAndCheck();
ContinuousTestingMavenTestUtils testingTestUtils = new ContinuousTestingMavenTestUtils(getPort());
ContinuousTestingMavenTestUtils.TestStatus results = testingTestUtils.waitForNextCompletion();
// This is a bit brittle when we add tests, but failures are often so catastrophic they're not even reported as failures,
// so we need to check the pass count explicitly
Assertions.assertEquals(0, results.getTestsFailed());
Assertions.assertEquals(1, results.getTestsPassed());
}
}
| ParameterTestModeIT |
java | spring-projects__spring-security | oauth2/oauth2-jose/src/main/java/org/springframework/security/oauth2/jwt/JwtTypeValidator.java | {
"start": 1317,
"end": 2763
} | class ____ implements OAuth2TokenValidator<Jwt> {
private final Collection<String> validTypes;
private boolean allowEmpty;
public JwtTypeValidator(Collection<String> validTypes) {
Assert.notEmpty(validTypes, "validTypes cannot be empty");
this.validTypes = new ArrayList<>(validTypes);
}
public JwtTypeValidator(String... validTypes) {
this(List.of(validTypes));
}
/**
* Require that the {@code typ} header be {@code JWT} or absent
*/
public static JwtTypeValidator jwt() {
JwtTypeValidator validator = new JwtTypeValidator(List.of("JWT"));
validator.setAllowEmpty(true);
return validator;
}
/**
* Whether to allow the {@code typ} header to be empty. The default value is
* {@code false}
*/
public void setAllowEmpty(boolean allowEmpty) {
this.allowEmpty = allowEmpty;
}
@Override
public OAuth2TokenValidatorResult validate(Jwt token) {
String typ = (String) token.getHeaders().get(JoseHeaderNames.TYP);
if (this.allowEmpty && !StringUtils.hasText(typ)) {
return OAuth2TokenValidatorResult.success();
}
for (String validType : this.validTypes) {
if (validType.equalsIgnoreCase(typ)) {
return OAuth2TokenValidatorResult.success();
}
}
return OAuth2TokenValidatorResult.failure(new OAuth2Error(OAuth2ErrorCodes.INVALID_TOKEN,
"the given typ value needs to be one of " + this.validTypes,
"https://datatracker.ietf.org/doc/html/rfc7515#section-4.1.9"));
}
}
| JwtTypeValidator |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstLongEvaluator.java | {
"start": 2784,
"end": 3273
} | class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final EvalOperator.ExpressionEvaluator.Factory field;
public Factory(EvalOperator.ExpressionEvaluator.Factory field) {
this.field = field;
}
@Override
public MvFirstLongEvaluator get(DriverContext context) {
return new MvFirstLongEvaluator(field.get(context), context);
}
@Override
public String toString() {
return "MvFirst[field=" + field + "]";
}
}
}
| Factory |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/pool/dynamic/MaxActiveChangeTest.java | {
"start": 886,
"end": 4335
} | class ____ extends PoolTestCase {
private DruidDataSource dataSource;
protected void setUp() throws Exception {
super.setUp();
dataSource = new DruidDataSource();
dataSource.setUrl("jdbc:mock:xxx");
dataSource.setTestOnBorrow(false);
dataSource.setMaxActive(3);
dataSource.setMinIdle(2);
dataSource.setMinEvictableIdleTimeMillis(1000 * 60 * 5);
dataSource.setMaxWait(20);
dataSource.init();
}
protected void tearDown() throws Exception {
dataSource.close();
super.tearDown();
}
public void test_maxActive() throws Exception {
for (int i = 0; i < 10; ++i) {
assertEquals(1, connect(1));
assertEquals(1, dataSource.getPoolingCount());
}
for (int i = 0; i < 10; ++i) {
assertEquals(2, connect(2));
assertEquals(2, dataSource.getPoolingCount());
}
for (int i = 0; i < 10; ++i) {
assertEquals(3, connect(3));
assertEquals(3, dataSource.getPoolingCount());
}
for (int i = 0; i < 10; ++i) {
assertEquals(3, connect(4));
assertEquals(3, dataSource.getPoolingCount());
}
dataSource.setMaxActive(5);
for (int i = 0; i < 10; ++i) {
assertEquals(5, connect(5));
assertEquals(5, dataSource.getPoolingCount());
}
dataSource.shrink();
assertEquals(2, dataSource.getPoolingCount());
for (int i = 0; i < 10; ++i) {
assertEquals(5, connect(5));
assertEquals(5, dataSource.getPoolingCount());
}
assertEquals(5, dataSource.getPoolingCount());
dataSource.setMaxActive(3);
assertEquals(5, dataSource.getPoolingCount());
dataSource.shrink();
assertEquals(2, dataSource.getPoolingCount());
// 确保收缩之后不会再长上去
for (int i = 0; i < 10; ++i) {
assertEquals(3, connect(5));
assertEquals(3, dataSource.getPoolingCount());
}
dataSource.setMaxActive(2);
dataSource.shrink();
assertEquals(2, dataSource.getPoolingCount());
for (int i = 0; i < 10; ++i) {
assertEquals(2, connect(3));
assertEquals(2, dataSource.getPoolingCount());
}
dataSource.setMinIdle(1);
dataSource.setMaxActive(1);
dataSource.shrink();
assertEquals(1, dataSource.getPoolingCount());
for (int i = 0; i < 10; ++i) {
assertEquals(1, connect(2));
assertEquals(1, dataSource.getPoolingCount());
}
Exception error = null;
try {
dataSource.setMaxActive(0);
} catch (IllegalArgumentException e) {
error = e;
}
assertNotNull(error);
assertEquals(1, dataSource.getMaxActive());
}
public int connect(int count) throws Exception {
int successCount = 0;
Connection[] connections = new Connection[count];
for (int i = 0; i < count; ++i) {
try {
connections[i] = dataSource.getConnection();
successCount++;
} catch (GetConnectionTimeoutException e) {
// skip
}
}
for (int i = 0; i < count; ++i) {
JdbcUtils.close(connections[i]);
}
return successCount;
}
}
| MaxActiveChangeTest |
java | spring-projects__spring-boot | module/spring-boot-actuator/src/test/java/org/springframework/boot/actuate/endpoint/OperationFilterTests.java | {
"start": 932,
"end": 3165
} | class ____ {
private final EndpointAccessResolver accessResolver = mock(EndpointAccessResolver.class);
private final Operation operation = mock(Operation.class);
private final OperationFilter<Operation> filter = OperationFilter.byAccess(this.accessResolver);
@Test
void whenAccessIsUnrestrictedThenMatchReturnsTrue() {
EndpointId endpointId = EndpointId.of("test");
Access defaultAccess = Access.READ_ONLY;
given(this.accessResolver.accessFor(endpointId, defaultAccess)).willReturn(Access.UNRESTRICTED);
assertThat(this.filter.match(this.operation, endpointId, defaultAccess)).isTrue();
}
@Test
void whenAccessIsNoneThenMatchReturnsFalse() {
EndpointId endpointId = EndpointId.of("test");
Access defaultAccess = Access.READ_ONLY;
given(this.accessResolver.accessFor(endpointId, defaultAccess)).willReturn(Access.NONE);
assertThat(this.filter.match(this.operation, endpointId, defaultAccess)).isFalse();
}
@Test
void whenAccessIsReadOnlyAndOperationTypeIsReadThenMatchReturnsTrue() {
EndpointId endpointId = EndpointId.of("test");
Access defaultAccess = Access.READ_ONLY;
given(this.accessResolver.accessFor(endpointId, defaultAccess)).willReturn(Access.READ_ONLY);
given(this.operation.getType()).willReturn(OperationType.READ);
assertThat(this.filter.match(this.operation, endpointId, defaultAccess)).isTrue();
}
@Test
void whenAccessIsReadOnlyAndOperationTypeIsWriteThenMatchReturnsFalse() {
EndpointId endpointId = EndpointId.of("test");
Access defaultAccess = Access.READ_ONLY;
given(this.accessResolver.accessFor(endpointId, defaultAccess)).willReturn(Access.READ_ONLY);
given(this.operation.getType()).willReturn(OperationType.WRITE);
assertThat(this.filter.match(this.operation, endpointId, defaultAccess)).isFalse();
}
@Test
void whenAccessIsReadOnlyAndOperationTypeIsDeleteThenMatchReturnsFalse() {
EndpointId endpointId = EndpointId.of("test");
Access defaultAccess = Access.READ_ONLY;
given(this.accessResolver.accessFor(endpointId, defaultAccess)).willReturn(Access.READ_ONLY);
given(this.operation.getType()).willReturn(OperationType.DELETE);
assertThat(this.filter.match(this.operation, endpointId, defaultAccess)).isFalse();
}
}
| OperationFilterTests |
java | resilience4j__resilience4j | resilience4j-micronaut/src/main/java/io/github/resilience4j/micronaut/circuitbreaker/CircuitBreakerProperties.java | {
"start": 2498,
"end": 2863
} | class ____ extends CommonCircuitBreakerConfigurationProperties.InstanceProperties implements Named {
private final String name;
public InstancePropertiesInstances(@Parameter String name) {
this.name = name;
}
@Override
public String getName() {
return name;
}
}
}
| InstancePropertiesInstances |
java | netty__netty | microbench/src/main/java/io/netty/util/AsciiStringCaseConversionBenchmark.java | {
"start": 1382,
"end": 3628
} | class ____ {
@Param({ "7", "16", "23", "32" })
int size;
@Param({ "4", "11" })
int logPermutations;
@Param({ "0" })
int seed;
int permutations;
AsciiString[] asciiStringData;
String[] stringData;
byte[] ret;
private int i;
@Param({ "true", "false" })
private boolean noUnsafe;
@Setup(Level.Trial)
public void init() {
System.setProperty("io.netty.noUnsafe", Boolean.valueOf(noUnsafe).toString());
final SplittableRandom random = new SplittableRandom(seed);
permutations = 1 << logPermutations;
ret = new byte[size];
asciiStringData = new AsciiString[permutations];
stringData = new String[permutations];
for (int i = 0; i < permutations; ++i) {
final int foundIndex = random.nextInt(Math.max(0, size - 8), size);
final byte[] byteArray = new byte[size];
int j = 0;
for (; j < size; j++) {
byte value = (byte) random.nextInt(0, (int) Byte.MAX_VALUE + 1);
// turn any found value into something different
if (j < foundIndex) {
if (AsciiStringUtil.isUpperCase(value)) {
value = AsciiStringUtil.toLowerCase(value);
}
}
if (j == foundIndex) {
value = 'N';
}
byteArray[j] = value;
}
asciiStringData[i] = new AsciiString(byteArray, false);
stringData[i] = asciiStringData[i].toString();
}
}
private AsciiString getData() {
return asciiStringData[i++ & permutations - 1];
}
private String getStringData() {
return stringData[i++ & permutations - 1];
}
@Benchmark
public AsciiString toLowerCase() {
return getData().toLowerCase();
}
@Benchmark
public AsciiString toUpperCase() {
return getData().toUpperCase();
}
@Benchmark
public String stringToLowerCase() {
return getStringData().toLowerCase();
}
@Benchmark
public String stringtoUpperCase() {
return getStringData().toUpperCase();
}
}
| AsciiStringCaseConversionBenchmark |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java | {
"start": 1278,
"end": 1358
} | class ____ be able to compare arrays that start at non-zero offsets.
*/
abstract | to |
java | junit-team__junit5 | junit-jupiter-engine/src/main/java/org/junit/jupiter/engine/extension/SameThreadTimeoutInvocation.java | {
"start": 771,
"end": 2364
} | class ____<T extends @Nullable Object> implements Invocation<T> {
private final Invocation<T> delegate;
private final TimeoutDuration timeout;
private final ScheduledExecutorService executor;
private final Supplier<String> descriptionSupplier;
private final PreInterruptCallbackInvocation preInterruptCallback;
SameThreadTimeoutInvocation(Invocation<T> delegate, TimeoutDuration timeout, ScheduledExecutorService executor,
Supplier<String> descriptionSupplier, PreInterruptCallbackInvocation preInterruptCallback) {
this.delegate = delegate;
this.timeout = timeout;
this.executor = executor;
this.descriptionSupplier = descriptionSupplier;
this.preInterruptCallback = preInterruptCallback;
}
@SuppressWarnings("NullAway")
@Override
public T proceed() throws Throwable {
InterruptTask interruptTask = new InterruptTask(Thread.currentThread(), preInterruptCallback);
ScheduledFuture<?> future = executor.schedule(interruptTask, timeout.value(), timeout.unit());
Throwable failure = null;
T result = null;
try {
result = delegate.proceed();
}
catch (Throwable t) {
UnrecoverableExceptions.rethrowIfUnrecoverable(t);
failure = t;
}
finally {
boolean cancelled = future.cancel(false);
if (!cancelled) {
future.get();
}
if (interruptTask.executed) {
Thread.interrupted();
failure = TimeoutExceptionFactory.create(descriptionSupplier.get(), timeout, failure);
interruptTask.attachSuppressedExceptions(failure);
}
}
if (failure != null) {
throw failure;
}
return result;
}
static | SameThreadTimeoutInvocation |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/annotations/Parameter.java | {
"start": 588,
"end": 715
} | interface ____ {
/**
* The parameter name.
*/
String name();
/**
* The parameter value.
*/
String value();
}
| Parameter |
java | google__guice | core/test/com/google/inject/DefaultMethodInterceptionTest.java | {
"start": 2074,
"end": 2179
} | interface ____ {}
/** Interface with a default method annotated to be intercepted. */
public | InterceptMe |
java | micronaut-projects__micronaut-core | inject-java/src/test/java/io/micronaut/aop/JdkRuntimeProxy.java | {
"start": 921,
"end": 1268
} | interface ____ only: " + targetType);
}
InvocationHandler handler = new RuntimeInvocationHandler<>(proxyDefinition, targetType);
Object proxy = Proxy.newProxyInstance(proxyDefinition.beanContext().getClassLoader(), new Class[]{targetType}, handler);
return targetType.cast(proxy);
}
private static final | types |
java | elastic__elasticsearch | x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/TextClassificationProcessorTests.java | {
"start": 1344,
"end": 4150
} | class ____ extends ESTestCase {
public void testInvalidResult() {
{
PyTorchInferenceResult torchResult = new PyTorchInferenceResult(new double[][][] {});
var e = expectThrows(
ElasticsearchStatusException.class,
() -> TextClassificationProcessor.processResult(
null,
torchResult,
randomInt(),
List.of("a", "b"),
randomAlphaOfLength(10),
false
)
);
assertThat(e, instanceOf(ElasticsearchStatusException.class));
assertThat(e.getMessage(), containsString("Text classification result has no data"));
}
{
PyTorchInferenceResult torchResult = new PyTorchInferenceResult(new double[][][] { { { 1.0 } } });
var e = expectThrows(
ElasticsearchStatusException.class,
() -> TextClassificationProcessor.processResult(
null,
torchResult,
randomInt(),
List.of("a", "b"),
randomAlphaOfLength(10),
false
)
);
assertThat(e, instanceOf(ElasticsearchStatusException.class));
assertThat(e.getMessage(), containsString("Expected exactly [2] values in text classification result; got [1]"));
}
}
@SuppressWarnings("unchecked")
public void testBuildRequest() throws IOException {
NlpTokenizer tokenizer = NlpTokenizer.build(
new Vocabulary(TEST_CASED_VOCAB, randomAlphaOfLength(10), List.of(), List.of()),
new BertTokenization(null, null, 512, Tokenization.Truncate.NONE, -1)
);
TextClassificationConfig config = new TextClassificationConfig(
new VocabularyConfig("test-index"),
null,
List.of("a", "b"),
null,
null
);
TextClassificationProcessor processor = new TextClassificationProcessor(tokenizer, config);
NlpTask.Request request = processor.getRequestBuilder(config)
.buildRequest(List.of("Elasticsearch fun"), "request1", Tokenization.Truncate.NONE, -1, null);
Map<String, Object> jsonDocAsMap = XContentHelper.convertToMap(request.processInput(), true, XContentType.JSON).v2();
assertThat(jsonDocAsMap.keySet(), hasSize(5));
assertEquals("request1", jsonDocAsMap.get("request_id"));
assertEquals(Arrays.asList(12, 0, 1, 3, 13), ((List<List<Integer>>) jsonDocAsMap.get("tokens")).get(0));
assertEquals(Arrays.asList(1, 1, 1, 1, 1), ((List<List<Integer>>) jsonDocAsMap.get("arg_1")).get(0));
}
}
| TextClassificationProcessorTests |
java | apache__camel | core/camel-main/src/test/java/org/apache/camel/main/MainIoCBeanConfigInjectConfigurerTest.java | {
"start": 2352,
"end": 2748
} | class ____ {
private String name;
private int age;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public int getAge() {
return age;
}
public void setAge(int age) {
this.age = age;
}
}
public static | MyBarConfig |
java | apache__camel | components/camel-netty-http/src/main/java/org/apache/camel/component/netty/http/SecurityAuthenticator.java | {
"start": 1301,
"end": 1384
} | class ____ (separated by comma)
* <p/>
* By default if no explicit role | names |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java | {
"start": 886,
"end": 5103
} | class ____ implements ToXContentObject, Writeable {
/**
* Field Names
*/
public static final ParseField TYPE = new ParseField("flush");
public static final ParseField ID = new ParseField("id");
public static final ParseField LAST_FINALIZED_BUCKET_END = new ParseField("last_finalized_bucket_end");
public static final ParseField REFRESH_REQUIRED = new ParseField("refresh_required");
public static final ConstructingObjectParser<FlushAcknowledgement, Void> PARSER = new ConstructingObjectParser<>(
TYPE.getPreferredName(),
a -> new FlushAcknowledgement((String) a[0], (Long) a[1], (Boolean) a[2])
);
static {
PARSER.declareString(ConstructingObjectParser.constructorArg(), ID);
PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), LAST_FINALIZED_BUCKET_END);
PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), REFRESH_REQUIRED);
}
private final String id;
private final Instant lastFinalizedBucketEnd;
private final boolean refreshRequired;
public FlushAcknowledgement(String id, Long lastFinalizedBucketEndMs, Boolean refreshRequired) {
this.id = id;
// The C++ passes 0 when last finalized bucket end is not available, so treat 0 as null
this.lastFinalizedBucketEnd = (lastFinalizedBucketEndMs != null && lastFinalizedBucketEndMs > 0)
? Instant.ofEpochMilli(lastFinalizedBucketEndMs)
: null;
this.refreshRequired = refreshRequired == null || refreshRequired;
}
public FlushAcknowledgement(String id, Instant lastFinalizedBucketEnd, Boolean refreshRequired) {
this.id = id;
// Round to millisecond accuracy to ensure round-tripping via XContent results in an equal object
long epochMillis = (lastFinalizedBucketEnd != null) ? lastFinalizedBucketEnd.toEpochMilli() : 0;
this.lastFinalizedBucketEnd = (epochMillis > 0) ? Instant.ofEpochMilli(epochMillis) : null;
this.refreshRequired = refreshRequired == null || refreshRequired;
}
public FlushAcknowledgement(StreamInput in) throws IOException {
id = in.readString();
lastFinalizedBucketEnd = in.readOptionalInstant();
if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) {
refreshRequired = in.readBoolean();
} else {
refreshRequired = true;
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(id);
out.writeOptionalInstant(lastFinalizedBucketEnd);
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) {
out.writeBoolean(refreshRequired);
}
}
public String getId() {
return id;
}
public Instant getLastFinalizedBucketEnd() {
return lastFinalizedBucketEnd;
}
public boolean getRefreshRequired() {
return refreshRequired;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(ID.getPreferredName(), id);
if (lastFinalizedBucketEnd != null) {
builder.timestampFieldsFromUnixEpochMillis(
LAST_FINALIZED_BUCKET_END.getPreferredName(),
LAST_FINALIZED_BUCKET_END.getPreferredName() + "_string",
lastFinalizedBucketEnd.toEpochMilli()
);
}
builder.field(REFRESH_REQUIRED.getPreferredName(), refreshRequired);
builder.endObject();
return builder;
}
@Override
public int hashCode() {
return Objects.hash(id, lastFinalizedBucketEnd, refreshRequired);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
FlushAcknowledgement other = (FlushAcknowledgement) obj;
return Objects.equals(id, other.id)
&& Objects.equals(lastFinalizedBucketEnd, other.lastFinalizedBucketEnd)
&& refreshRequired == other.refreshRequired;
}
}
| FlushAcknowledgement |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/context/ContextHierarchy.java | {
"start": 5401,
"end": 6270
} | class ____ extends BaseTests {}</pre>
*
* <h4>Class Hierarchy with Overridden Context Hierarchy Configuration</h4>
*
* <p>In contrast to the previous example, this example demonstrates how to
* <em>override</em> the configuration for a given named level in a context hierarchy
* by setting the {@link ContextConfiguration#inheritLocations} flag to {@code false}.
* Consequently, the application context for {@code ExtendedTests} will be loaded
* only from {@code "/test-user-config.xml"} and will have its parent set to the
* context loaded from {@code "/app-config.xml"}.
*
* <pre class="code">
* @ExtendWith(SpringExtension.class)
* @ContextHierarchy({
* @ContextConfiguration(name = "parent", locations = "/app-config.xml"),
* @ContextConfiguration(name = "child", locations = "/user-config.xml")
* })
* public | ExtendedTests |
java | spring-projects__spring-security | core/src/test/java/org/springframework/security/core/annotation/UniqueSecurityAnnotationScannerTests.java | {
"start": 17386,
"end": 17534
} | interface ____ extends AnnotationOnInterface {
@PreAuthorize("eight")
String method();
}
private | InterfaceMethodOverridingAnnotationOnInterface |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapper.java | {
"start": 2083,
"end": 5711
} | class ____ implements CheckedFunction<DirectoryReader, DirectoryReader, IOException> {
private static final Logger logger = LogManager.getLogger(SecurityIndexReaderWrapper.class);
private final Function<ShardId, SearchExecutionContext> searchExecutionContextProvider;
private final DocumentSubsetBitsetCache bitsetCache;
private final XPackLicenseState licenseState;
private final SecurityContext securityContext;
private final ScriptService scriptService;
public SecurityIndexReaderWrapper(
Function<ShardId, SearchExecutionContext> searchExecutionContextProvider,
DocumentSubsetBitsetCache bitsetCache,
SecurityContext securityContext,
XPackLicenseState licenseState,
ScriptService scriptService
) {
this.scriptService = scriptService;
this.searchExecutionContextProvider = searchExecutionContextProvider;
this.bitsetCache = bitsetCache;
this.securityContext = securityContext;
this.licenseState = licenseState;
}
@Override
public DirectoryReader apply(final DirectoryReader reader) {
if (false == DOCUMENT_LEVEL_SECURITY_FEATURE.checkWithoutTracking(licenseState)) {
return reader;
}
try {
final IndicesAccessControl indicesAccessControl = getIndicesAccessControl();
assert indicesAccessControl.isGranted();
ShardId shardId = ShardUtils.extractShardId(reader);
if (shardId == null) {
throw new IllegalStateException(LoggerMessageFormat.format("couldn't extract shardId from reader [{}]", reader));
}
final IndicesAccessControl.IndexAccessControl permissions = indicesAccessControl.getIndexPermissions(shardId.getIndexName());
// No permissions have been defined for an index, so don't intercept the index reader for access control
if (permissions == null) {
return reader;
}
DirectoryReader wrappedReader = reader;
DocumentPermissions documentPermissions = permissions.getDocumentPermissions();
if (documentPermissions.hasDocumentLevelPermissions()) {
BooleanQuery filterQuery = documentPermissions.filter(getUser(), scriptService, shardId, searchExecutionContextProvider);
if (filterQuery != null) {
wrappedReader = DocumentSubsetReader.wrap(wrappedReader, bitsetCache, new ConstantScoreQuery(filterQuery));
}
}
var searchContext = searchExecutionContextProvider.apply(shardId);
var indexVersionCreated = searchContext.indexVersionCreated();
Function<String, Boolean> isMapped = searchContext::isFieldMapped;
return permissions.getFieldPermissions().filter(wrappedReader, indexVersionCreated, isMapped);
} catch (IOException e) {
logger.error("Unable to apply field level security");
throw ExceptionsHelper.convertToElastic(e);
}
}
protected IndicesAccessControl getIndicesAccessControl() {
final ThreadContext threadContext = securityContext.getThreadContext();
IndicesAccessControl indicesAccessControl = AuthorizationServiceField.INDICES_PERMISSIONS_VALUE.get(threadContext);
if (indicesAccessControl == null) {
throw Exceptions.authorizationError("no indices permissions found");
}
return indicesAccessControl;
}
protected User getUser() {
return Objects.requireNonNull(securityContext.getUser());
}
}
| SecurityIndexReaderWrapper |
java | micronaut-projects__micronaut-core | http/src/main/java/io/micronaut/http/body/stream/UpstreamBalancer.java | {
"start": 10300,
"end": 11548
} | class ____ extends UpstreamImpl {
PassthroughUpstreamImpl() {
super(false);
}
@Override
public void start() {
upstream.start();
}
@Override
public void onBytesConsumed(long bytesConsumed) {
// save already-demanded bytes to delta to calculate demand for other side in case of disregardBackpressure
DELTA.updateAndGet(UpstreamBalancer.this, old -> subtractSaturating(old, bytesConsumed));
upstream.onBytesConsumed(bytesConsumed);
}
@Override
protected void disregardBackpressureThisSide() {
// when disregardBackpressure is called on this side, the previously "ignoring" side takes over backpressure.
pushSomeFromIgnored();
}
}
/**
* Pair of {@link BufferConsumer.Upstream} objects.
*
* @param left left {@link BufferConsumer.Upstream} object
* @param right right {@link BufferConsumer.Upstream} object
*/
public record UpstreamPair(
BufferConsumer.Upstream left,
BufferConsumer.Upstream right
) {
UpstreamPair flip() {
return new UpstreamPair(right, left);
}
}
}
| PassthroughUpstreamImpl |
java | apache__camel | core/camel-core-processor/src/main/java/org/apache/camel/processor/errorhandler/NoErrorHandler.java | {
"start": 1344,
"end": 3633
} | class ____ extends ErrorHandlerSupport implements AsyncProcessor, ErrorHandler {
private final AsyncProcessor output;
public NoErrorHandler(Processor processor) {
this.output = AsyncProcessorConverterHelper.convert(processor);
}
@Override
public void process(Exchange exchange) throws Exception {
AsyncProcessorHelper.process(this, exchange);
}
@Override
public boolean process(Exchange exchange, final AsyncCallback callback) {
return output.process(exchange, new AsyncCallback() {
@Override
public void done(boolean doneSync) {
exchange.getExchangeExtension().setRedeliveryExhausted(false);
callback.done(doneSync);
}
});
}
@Override
public CompletableFuture<Exchange> processAsync(Exchange exchange) {
AsyncCallbackToCompletableFutureAdapter<Exchange> callback = new AsyncCallbackToCompletableFutureAdapter<>(exchange);
process(exchange, callback);
return callback.getFuture();
}
@Override
public String toString() {
if (output == null) {
// if no output then dont do any description
return "";
}
return "NoErrorHandler[" + output + "]";
}
@Override
public boolean supportTransacted() {
return false;
}
@Override
public Processor getOutput() {
return output;
}
@Override
public ErrorHandler clone(Processor output) {
return new NoErrorHandler(output);
}
@Override
protected void doBuild() throws Exception {
ServiceHelper.buildService(output);
}
@Override
protected void doInit() throws Exception {
ServiceHelper.initService(output);
}
@Override
protected void doStart() throws Exception {
ServiceHelper.startService(output);
}
@Override
protected void doStop() throws Exception {
// noop, do not stop any services which we only do when shutting down
// as the error handler can be context scoped, and should not stop in case
// a route stops
}
@Override
protected void doShutdown() throws Exception {
ServiceHelper.stopAndShutdownServices(output);
}
}
| NoErrorHandler |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/function/json/H2JsonTableFunction.java | {
"start": 5733,
"end": 14119
} | class ____ implements QueryTransformer {
private final FunctionTableGroup functionTableGroup;
private final JsonTableArguments arguments;
private final int maximumArraySize;
public JsonTableQueryTransformer(FunctionTableGroup functionTableGroup, JsonTableArguments arguments, int maximumArraySize) {
this.functionTableGroup = functionTableGroup;
this.arguments = arguments;
this.maximumArraySize = maximumArraySize;
}
@Override
public QuerySpec transform(CteContainer cteContainer, QuerySpec querySpec, SqmToSqlAstConverter converter) {
final boolean isArray;
if ( arguments.jsonPath() != null ) {
if ( !( arguments.jsonPath() instanceof Literal literal) ) {
throw new QueryException( "H2 json_table() only supports literal json paths, but got " + arguments.jsonPath() );
}
final String rawJsonPath = (String) literal.getLiteralValue();
isArray = isArrayAccess( rawJsonPath );
}
else {
// We have to assume this is an array
isArray = true;
}
if ( isArray ) {
final TableGroup parentTableGroup = querySpec.getFromClause().queryTableGroups(
tg -> tg.findTableGroupJoin( functionTableGroup ) == null ? null : tg
);
final PredicateContainer predicateContainer;
if ( parentTableGroup != null ) {
predicateContainer = parentTableGroup.findTableGroupJoin( functionTableGroup );
}
else {
predicateContainer = querySpec;
}
final BasicType<Integer> integerType = converter.getSqmCreationContext()
.getNodeBuilder()
.getIntegerType();
final Expression jsonDocument;
if ( arguments.jsonDocument().getColumnReference() == null ) {
jsonDocument = new ColumnReference(
functionTableGroup.getPrimaryTableReference().getIdentificationVariable() + "_",
"d",
false,
null,
arguments.jsonDocument().getExpressionType().getSingleJdbcMapping()
);
}
else {
jsonDocument = arguments.jsonDocument();
}
final Expression lhs = new ArrayLengthExpression( jsonDocument, integerType );
final Expression rhs = new ColumnReference(
functionTableGroup.getPrimaryTableReference().getIdentificationVariable(),
// The default column name for the system_range function
"x",
false,
null,
integerType
);
predicateContainer.applyPredicate(
new ComparisonPredicate( lhs, ComparisonOperator.GREATER_THAN_OR_EQUAL, rhs ) );
}
final int lastArrayIndex = getLastArrayIndex( arguments.columnsClause(), 0 );
if ( lastArrayIndex != 0 ) {
// Create a synthetic function table group which will render system_range() joins
// for every nested path for arrays
final String tableIdentifierVariable = functionTableGroup.getPrimaryTableReference()
.getIdentificationVariable();
final Expression jsonDocument;
if ( arguments.jsonDocument().getColumnReference() == null ) {
jsonDocument = new ColumnReference(
tableIdentifierVariable + "_",
"d",
false,
null,
arguments.jsonDocument().getExpressionType().getSingleJdbcMapping()
);
}
else {
jsonDocument = arguments.jsonDocument();
}
final TableGroup tableGroup = new FunctionTableGroup(
functionTableGroup.getNavigablePath().append( "{synthetic}" ),
null,
new SelfRenderingFunctionSqlAstExpression(
"json_table_emulation",
new NestedPathFunctionRenderer(
tableIdentifierVariable,
arguments,
jsonDocument,
maximumArraySize,
lastArrayIndex
),
emptyList(),
null,
null
),
tableIdentifierVariable + "_synthetic_",
emptyList(),
Set.of( "" ),
false,
false,
true,
converter.getCreationContext().getSessionFactory()
);
final BasicType<Integer> integerType = converter.getSqmCreationContext()
.getNodeBuilder()
.getIntegerType();
// The join predicate compares the length of the last array expression against system_range() index.
// Since a table function expression can't render its own `on` clause, this split of logic is necessary
final Expression lhs = new ArrayLengthExpression(
determineLastArrayExpression( tableIdentifierVariable, arguments, jsonDocument ),
integerType
);
final Expression rhs = new ColumnReference(
tableIdentifierVariable + "_" + lastArrayIndex + "_",
// The default column name for the system_range function
"x",
false,
null,
integerType
);
final Predicate predicate = new ComparisonPredicate( lhs, ComparisonOperator.GREATER_THAN_OR_EQUAL, rhs );
functionTableGroup.addTableGroupJoin(
new TableGroupJoin( tableGroup.getNavigablePath(), SqlAstJoinType.LEFT, tableGroup, predicate )
);
}
return querySpec;
}
private static Expression determineLastArrayExpression(String tableIdentifierVariable, JsonTableArguments arguments, Expression jsonDocument) {
final ArrayExpressionEntry arrayExpressionEntry = determineLastArrayExpression(
tableIdentifierVariable,
determineJsonElement( tableIdentifierVariable, arguments, jsonDocument ),
arguments.columnsClause(),
new ArrayExpressionEntry( 0, null )
);
return NullnessUtil.castNonNull( arrayExpressionEntry.expression() );
}
record ArrayExpressionEntry(int arrayIndex, @Nullable Expression expression) {
}
private static ArrayExpressionEntry determineLastArrayExpression(String tableIdentifierVariable, Expression parentJson, JsonTableColumnsClause jsonTableColumnsClause, ArrayExpressionEntry parentEntry) {
// Depth-first traversal to obtain the last nested path that refers to an array within this tree
ArrayExpressionEntry currentArrayEntry = parentEntry;
for ( JsonTableColumnDefinition columnDefinition : jsonTableColumnsClause.getColumnDefinitions() ) {
if ( columnDefinition instanceof JsonTableNestedColumnDefinition nestedColumnDefinition ) {
final String rawJsonPath = nestedColumnDefinition.jsonPath();
final boolean isArray = isArrayAccess( rawJsonPath );
final String jsonPath = isArray ? rawJsonPath.substring( 0, rawJsonPath.length() - 3 ) : rawJsonPath;
final Expression jsonQueryResult = new JsonValueExpression( parentJson, jsonPath, null );
final Expression jsonElement;
final ArrayExpressionEntry nextArrayExpression;
if ( isArray ) {
final int nextArrayIndex = currentArrayEntry.arrayIndex() + 1;
jsonElement = new ArrayAccessExpression( jsonQueryResult, ordinalityExpression( tableIdentifierVariable, nextArrayIndex ) );
nextArrayExpression = new ArrayExpressionEntry( nextArrayIndex, jsonQueryResult );
}
else {
jsonElement = jsonQueryResult;
nextArrayExpression = currentArrayEntry;
}
currentArrayEntry = determineLastArrayExpression(
tableIdentifierVariable,
jsonElement,
nestedColumnDefinition.columns(),
nextArrayExpression
);
}
}
return currentArrayEntry;
}
private static Expression determineJsonElement(String tableIdentifierVariable, JsonTableArguments arguments, Expression jsonDocument) {
// Applies the json path and array index access to obtain the "current" processing element
final boolean isArray;
final Expression jsonQueryResult;
if ( arguments.jsonPath() != null ) {
if ( !(arguments.jsonPath() instanceof Literal literal) ) {
throw new QueryException(
"H2 json_table() only supports literal json paths, but got " + arguments.jsonPath() );
}
final String rawJsonPath = (String) literal.getLiteralValue();
isArray = isArrayAccess( rawJsonPath );
final String jsonPath = isArray ? rawJsonPath.substring( 0, rawJsonPath.length() - 3 ) : rawJsonPath;
jsonQueryResult = "$".equals( jsonPath )
? jsonDocument
: new JsonValueExpression( jsonDocument, arguments.isJsonType(), jsonPath, arguments.passingClause() );
}
else {
// We have to assume this is an array
isArray = true;
jsonQueryResult = jsonDocument;
}
final Expression jsonElement;
if ( isArray ) {
jsonElement = new ArrayAccessExpression( jsonQueryResult, tableIdentifierVariable + ".x" );
}
else {
jsonElement = jsonQueryResult;
}
return jsonElement;
}
private static | JsonTableQueryTransformer |
java | micronaut-projects__micronaut-core | core/src/main/java/io/micronaut/core/beans/AbstractBeanIntrospectionReference.java | {
"start": 1135,
"end": 1827
} | class ____<T> implements BeanIntrospectionReference<T> {
private Boolean present = null;
/**
* Default constructor.
*/
@UsedByGeneratedCode
@Internal
protected AbstractBeanIntrospectionReference() {
}
@SuppressWarnings("ConstantConditions")
@Override
public final boolean isPresent() {
if (present == null) {
try {
present = getBeanType() != null;
} catch (Throwable e) {
present = false;
}
}
return present;
}
@NonNull
@Override
public String getName() {
return getBeanType().getName();
}
}
| AbstractBeanIntrospectionReference |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/context/erroneous/ErroneousNodeMapperWithNonUniqueContextTypes.java | {
"start": 447,
"end": 622
} | interface ____ {
NodeDto nodeToNodeDto(Node node, @Context CycleContext cycleContext, @Context CycleContext otherCycleContext);
}
| ErroneousNodeMapperWithNonUniqueContextTypes |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/listeners/StubbingLookupListener.java | {
"start": 1239,
"end": 1612
} | interface ____ {
/**
* Called by the framework when Mockito looked up an answer for invocation on a mock.
* For details, see {@link StubbingLookupListener}.
*
* @param stubbingLookupEvent - Information about the looked up stubbing
* @since 2.24.6
*/
void onStubbingLookup(StubbingLookupEvent stubbingLookupEvent);
}
| StubbingLookupListener |
java | apache__kafka | clients/src/test/java/org/apache/kafka/common/requests/AddPartitionsToTxnRequestTest.java | {
"start": 1918,
"end": 8494
} | class ____ {
private static final int PRODUCER_ID = 10;
private static final short PRODUCER_EPOCH = 1;
private static final int THROTTLE_TIME_MS = 10;
private static final TopicPartition TP_0 = new TopicPartition("topic", 0);
private static final TopicPartition TP_1 = new TopicPartition("topic", 1);
private final String transactionalId1 = "transaction1";
private final String transactionalId2 = "transaction2";
@ParameterizedTest
@ApiKeyVersionsSource(apiKey = ApiKeys.ADD_PARTITIONS_TO_TXN)
public void testConstructor(short version) {
AddPartitionsToTxnRequest request;
if (version < 4) {
List<TopicPartition> partitions = new ArrayList<>();
partitions.add(TP_0);
partitions.add(TP_1);
AddPartitionsToTxnRequest.Builder builder = AddPartitionsToTxnRequest.Builder.forClient(transactionalId1, PRODUCER_ID, PRODUCER_EPOCH, partitions);
request = builder.build(version);
assertEquals(transactionalId1, request.data().v3AndBelowTransactionalId());
assertEquals(PRODUCER_ID, request.data().v3AndBelowProducerId());
assertEquals(PRODUCER_EPOCH, request.data().v3AndBelowProducerEpoch());
assertEquals(partitions, AddPartitionsToTxnRequest.getPartitions(request.data().v3AndBelowTopics()));
} else {
AddPartitionsToTxnTransactionCollection transactions = createTwoTransactionCollection();
AddPartitionsToTxnRequest.Builder builder = AddPartitionsToTxnRequest.Builder.forBroker(transactions);
request = builder.build(version);
AddPartitionsToTxnTransaction reqTxn1 = request.data().transactions().find(transactionalId1);
AddPartitionsToTxnTransaction reqTxn2 = request.data().transactions().find(transactionalId2);
assertEquals(transactions.find(transactionalId1), reqTxn1);
assertEquals(transactions.find(transactionalId2), reqTxn2);
}
AddPartitionsToTxnResponse response = request.getErrorResponse(THROTTLE_TIME_MS, Errors.UNKNOWN_TOPIC_OR_PARTITION.exception());
assertEquals(THROTTLE_TIME_MS, response.throttleTimeMs());
if (version >= 4) {
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), response.data().errorCode());
// Since the error is top level, we count it as one error in the counts.
assertEquals(Collections.singletonMap(Errors.UNKNOWN_TOPIC_OR_PARTITION, 1), response.errorCounts());
} else {
assertEquals(Collections.singletonMap(Errors.UNKNOWN_TOPIC_OR_PARTITION, 2), response.errorCounts());
}
}
@Test
public void testBatchedRequests() {
AddPartitionsToTxnTransactionCollection transactions = createTwoTransactionCollection();
AddPartitionsToTxnRequest.Builder builder = AddPartitionsToTxnRequest.Builder.forBroker(transactions);
AddPartitionsToTxnRequest request = builder.build(ApiKeys.ADD_PARTITIONS_TO_TXN.latestVersion());
Map<String, List<TopicPartition>> expectedMap = new HashMap<>();
expectedMap.put(transactionalId1, Collections.singletonList(TP_0));
expectedMap.put(transactionalId2, Collections.singletonList(TP_1));
assertEquals(expectedMap, request.partitionsByTransaction());
AddPartitionsToTxnResponseData.AddPartitionsToTxnResultCollection results = new AddPartitionsToTxnResponseData.AddPartitionsToTxnResultCollection();
results.add(request.errorResponseForTransaction(transactionalId1, Errors.UNKNOWN_TOPIC_OR_PARTITION));
results.add(request.errorResponseForTransaction(transactionalId2, Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED));
AddPartitionsToTxnResponse response = new AddPartitionsToTxnResponse(new AddPartitionsToTxnResponseData()
.setResultsByTransaction(results)
.setThrottleTimeMs(THROTTLE_TIME_MS));
assertEquals(Collections.singletonMap(TP_0, Errors.UNKNOWN_TOPIC_OR_PARTITION), errorsForTransaction(response.getTransactionTopicResults(transactionalId1)));
assertEquals(Collections.singletonMap(TP_1, Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED), errorsForTransaction(response.getTransactionTopicResults(transactionalId2)));
}
@Test
public void testNormalizeRequest() {
List<TopicPartition> partitions = new ArrayList<>();
partitions.add(TP_0);
partitions.add(TP_1);
AddPartitionsToTxnRequest.Builder builder = AddPartitionsToTxnRequest.Builder.forClient(transactionalId1, PRODUCER_ID, PRODUCER_EPOCH, partitions);
AddPartitionsToTxnRequest request = builder.build((short) 3);
AddPartitionsToTxnRequest singleton = request.normalizeRequest();
assertEquals(partitions, singleton.partitionsByTransaction().get(transactionalId1));
AddPartitionsToTxnTransaction transaction = singleton.data().transactions().find(transactionalId1);
assertEquals(PRODUCER_ID, transaction.producerId());
assertEquals(PRODUCER_EPOCH, transaction.producerEpoch());
}
private AddPartitionsToTxnTransactionCollection createTwoTransactionCollection() {
AddPartitionsToTxnTopicCollection topics0 = new AddPartitionsToTxnTopicCollection();
topics0.add(new AddPartitionsToTxnTopic()
.setName(TP_0.topic())
.setPartitions(Collections.singletonList(TP_0.partition())));
AddPartitionsToTxnTopicCollection topics1 = new AddPartitionsToTxnTopicCollection();
topics1.add(new AddPartitionsToTxnTopic()
.setName(TP_1.topic())
.setPartitions(Collections.singletonList(TP_1.partition())));
AddPartitionsToTxnTransactionCollection transactions = new AddPartitionsToTxnTransactionCollection();
transactions.add(new AddPartitionsToTxnTransaction()
.setTransactionalId(transactionalId1)
.setProducerId(PRODUCER_ID)
.setProducerEpoch(PRODUCER_EPOCH)
.setVerifyOnly(true)
.setTopics(topics0));
transactions.add(new AddPartitionsToTxnTransaction()
.setTransactionalId(transactionalId2)
.setProducerId(PRODUCER_ID + 1)
.setProducerEpoch((short) (PRODUCER_EPOCH + 1))
.setVerifyOnly(false)
.setTopics(topics1));
return transactions;
}
}
| AddPartitionsToTxnRequestTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/JUnitParameterMethodNotFoundTest.java | {
"start": 5963,
"end": 6565
} | class ____ {
public Object named1() {
return new Object[] {1};
}
}\
""")
.addSourceLines(
"JUnitParameterMethodNotFoundNegativeCaseSuperClass.java",
"""
package com.google.errorprone.bugpatterns.testdata;
import junitparams.JUnitParamsRunner;
import junitparams.Parameters;
import org.junit.Test;
import org.junit.runner.RunWith;
@RunWith(JUnitParamsRunner.class)
public | JUnitParameterMethodNotFoundNegativeCaseBaseClass |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/internal/objectarrays/ObjectArrays_assertDoesNotHaveAnyElementsOfTypes_Test.java | {
"start": 1224,
"end": 2937
} | class ____ extends ObjectArraysBaseTest {
private static final Object[] array = { 6, 7.0, 8L };
@Test
void should_pass_if_actual_does_not_have_any_elements_of_the_unexpected_types() {
arrays.assertDoesNotHaveAnyElementsOfTypes(INFO, array, array(Float.class, BigDecimal.class));
}
@Test
void should_fail_if_actual_is_null() {
// WHEN
var error = expectAssertionError(() -> arrays.assertDoesNotHaveAnyElementsOfTypes(INFO, null, Integer.class));
// THEN
then(error).hasMessage(actualIsNull());
}
@Test
void should_fail_if_one_element_is_one_of_the_unexpected_types() {
// GIVEN
Map<Class<?>, List<Object>> nonMatchingElementsByType = new LinkedHashMap<>();
nonMatchingElementsByType.put(Long.class, newArrayList(8L));
Class<?>[] unexpectedTypes = { Long.class };
// WHEN
var error = expectAssertionError(() -> arrays.assertDoesNotHaveAnyElementsOfTypes(INFO, array, Long.class));
// THEN
then(error).hasMessage(shouldNotHaveAnyElementsOfTypes(array, unexpectedTypes, nonMatchingElementsByType).create());
}
@Test
void should_fail_if_one_element_type_is_a_subclass_one_of_the_unexpected_types() {
// GIVEN
Map<Class<?>, List<Object>> nonMatchingElementsByType = new LinkedHashMap<>();
nonMatchingElementsByType.put(Number.class, newArrayList(6, 7.0, 8L));
Class<?>[] unexpectedTypes = { Number.class };
// WHEN
var error = expectAssertionError(() -> arrays.assertDoesNotHaveAnyElementsOfTypes(INFO, array, Number.class));
// THEN
then(error).hasMessage(shouldNotHaveAnyElementsOfTypes(array, unexpectedTypes, nonMatchingElementsByType).create());
}
}
| ObjectArrays_assertDoesNotHaveAnyElementsOfTypes_Test |
java | elastic__elasticsearch | x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtSignatureValidator.java | {
"start": 2107,
"end": 11401
} | class ____ implements JwtSignatureValidator {
private static final Logger logger = LogManager.getLogger(DelegatingJwtSignatureValidator.class);
private final RealmConfig realmConfig;
final List<String> allowedJwksAlgsPkc;
final List<String> allowedJwksAlgsHmac;
@Nullable
private final HmacJwtSignatureValidator hmacJwtSignatureValidator;
@Nullable
private final PkcJwtSignatureValidator pkcJwtSignatureValidator;
public DelegatingJwtSignatureValidator(
final RealmConfig realmConfig,
final SSLService sslService,
final PkcJwkSetReloadNotifier reloadNotifier,
final ThreadPool threadPool
) {
this.realmConfig = realmConfig;
// Split configured signature algorithms by PKC and HMAC. Useful during validation, error logging, and JWK vs Alg filtering.
final List<String> algs = realmConfig.getSetting(JwtRealmSettings.ALLOWED_SIGNATURE_ALGORITHMS);
this.allowedJwksAlgsHmac = algs.stream().filter(JwtRealmSettings.SUPPORTED_SIGNATURE_ALGORITHMS_HMAC::contains).toList();
this.allowedJwksAlgsPkc = algs.stream().filter(JwtRealmSettings.SUPPORTED_SIGNATURE_ALGORITHMS_PKC::contains).toList();
final String jwkSetPath = realmConfig.getSetting(JwtRealmSettings.PKC_JWKSET_PATH);
final SecureString hmacJwkSetContents = realmConfig.getSetting(JwtRealmSettings.HMAC_JWKSET);
final SecureString hmacKeyContents = realmConfig.getSetting(JwtRealmSettings.HMAC_KEY);
final boolean isConfiguredJwkSetPkc = Strings.hasText(jwkSetPath);
final boolean isConfiguredJwkSetHmac = Strings.hasText(hmacJwkSetContents);
final boolean isConfiguredJwkOidcHmac = Strings.hasText(hmacKeyContents);
validateJwkSettings(realmConfig, isConfiguredJwkSetPkc, isConfiguredJwkSetHmac, isConfiguredJwkOidcHmac);
final List<JWK> jwksHmac;
if (isConfiguredJwkSetHmac) {
jwksHmac = JwkValidateUtil.loadJwksFromJwkSetString(
RealmSettings.getFullSettingKey(realmConfig, JwtRealmSettings.HMAC_JWKSET),
hmacJwkSetContents.toString()
);
} else if (isConfiguredJwkOidcHmac) {
final OctetSequenceKey hmacKey = JwkValidateUtil.loadHmacJwkFromJwkString(
RealmSettings.getFullSettingKey(realmConfig, JwtRealmSettings.HMAC_KEY),
hmacKeyContents
);
assert hmacKey != null : "Null HMAC key should not happen here";
jwksHmac = List.of(hmacKey);
} else {
jwksHmac = null;
}
if (jwksHmac != null) {
final JwkSetLoader.JwksAlgs jwksAlgs = JwkValidateUtil.filterJwksAndAlgorithms(jwksHmac, allowedJwksAlgsHmac);
logger.info("Usable HMAC: JWKs [{}]. Algorithms [{}].", jwksAlgs.jwks().size(), String.join(",", jwksAlgs.algs()));
// Filter JWK(s) vs signature algorithms. Only keep JWKs with a matching alg. Only keep algorithms with a matching JWK.
this.hmacJwtSignatureValidator = new HmacJwtSignatureValidator(jwksAlgs);
} else {
this.hmacJwtSignatureValidator = null;
}
if (isConfiguredJwkSetPkc) {
this.pkcJwtSignatureValidator = new PkcJwtSignatureValidator(
new JwkSetLoader(realmConfig, allowedJwksAlgsPkc, sslService, threadPool, reloadNotifier)
);
} else {
this.pkcJwtSignatureValidator = null;
}
logWarnIfAuthenticationWillAlwaysFail();
}
@Override
public void validate(String tokenPrincipal, SignedJWT jwt, ActionListener<Void> listener) {
final String algorithm = jwt.getHeader().getAlgorithm().getName();
if (allowedJwksAlgsHmac.contains(algorithm)) {
if (hmacJwtSignatureValidator != null) {
hmacJwtSignatureValidator.validate(tokenPrincipal, jwt, listener);
} else {
listener.onFailure(
new ElasticsearchSecurityException(
"algorithm [%s] is a HMAC signing algorithm, but none of the HMAC JWK settings ["
+ RealmSettings.getFullSettingKey(realmConfig, JwtRealmSettings.HMAC_KEY)
+ ", "
+ RealmSettings.getFullSettingKey(realmConfig, JwtRealmSettings.HMAC_JWKSET)
+ "] is configured",
RestStatus.BAD_REQUEST,
algorithm
)
);
}
} else if (allowedJwksAlgsPkc.contains(algorithm)) {
if (pkcJwtSignatureValidator != null) {
pkcJwtSignatureValidator.validate(tokenPrincipal, jwt, listener);
} else {
listener.onFailure(
new ElasticsearchSecurityException(
"algorithm [%s] is a PKC signing algorithm, but PKC JWK setting ["
+ RealmSettings.getFullSettingKey(realmConfig, JwtRealmSettings.PKC_JWKSET_PATH)
+ "] is not configured",
RestStatus.BAD_REQUEST,
algorithm
)
);
}
} else {
listener.onFailure(
new ElasticsearchSecurityException(
"algorithm [%s] is not in the list of supported algorithms [%s]",
RestStatus.BAD_REQUEST,
algorithm,
Strings.collectionToCommaDelimitedString(
Stream.of(allowedJwksAlgsHmac.stream(), allowedJwksAlgsPkc.stream()).toList()
)
)
);
}
}
@Override
public void close() {
if (pkcJwtSignatureValidator != null) {
pkcJwtSignatureValidator.close();
}
}
private void logWarnIfAuthenticationWillAlwaysFail() {
final boolean hasUsableJwksAndAlgorithms = (hmacJwtSignatureValidator != null
&& false == hmacJwtSignatureValidator.jwksAlgs.isEmpty())
|| (pkcJwtSignatureValidator != null
&& false == pkcJwtSignatureValidator.jwkSetLoader.getContentAndJwksAlgs().jwksAlgs().isEmpty());
if (false == hasUsableJwksAndAlgorithms) {
logger.warn(
"No available JWK and algorithm for HMAC or PKC. JWT realm authentication expected to fail until this is fixed."
);
}
}
private static void validateJwkSettings(
RealmConfig realmConfig,
boolean isConfiguredJwkSetPkc,
boolean isConfiguredJwkSetHmac,
boolean isConfiguredJwkOidcHmac
) {
if (isConfiguredJwkSetPkc == false && isConfiguredJwkSetHmac == false && isConfiguredJwkOidcHmac == false) {
throw new SettingsException(
"At least one of ["
+ RealmSettings.getFullSettingKey(realmConfig, JwtRealmSettings.HMAC_KEY)
+ "] or ["
+ RealmSettings.getFullSettingKey(realmConfig, JwtRealmSettings.HMAC_JWKSET)
+ "] or ["
+ RealmSettings.getFullSettingKey(realmConfig, JwtRealmSettings.PKC_JWKSET_PATH)
+ "] must be set"
);
}
// HMAC Key vs HMAC JWKSet settings must be mutually exclusive
if (isConfiguredJwkSetHmac && isConfiguredJwkOidcHmac) {
throw new SettingsException(
"Settings ["
+ RealmSettings.getFullSettingKey(realmConfig, JwtRealmSettings.HMAC_JWKSET)
+ "] and ["
+ RealmSettings.getFullSettingKey(realmConfig, JwtRealmSettings.HMAC_KEY)
+ "] are not allowed at the same time."
);
}
}
// Package private for testing Only
Tuple<JwkSetLoader.JwksAlgs, JwkSetLoader.JwksAlgs> getAllJwksAlgs() {
final JwkSetLoader.JwksAlgs jwksAlgsHmac;
if (hmacJwtSignatureValidator == null) {
jwksAlgsHmac = new JwkSetLoader.JwksAlgs(List.of(), List.of());
} else {
jwksAlgsHmac = hmacJwtSignatureValidator.jwksAlgs;
}
final JwkSetLoader.JwksAlgs jwksAlgsPkc;
if (pkcJwtSignatureValidator == null) {
jwksAlgsPkc = new JwkSetLoader.JwksAlgs(List.of(), List.of());
} else {
jwksAlgsPkc = pkcJwtSignatureValidator.jwkSetLoader.getContentAndJwksAlgs().jwksAlgs();
}
return new Tuple<>(jwksAlgsHmac, jwksAlgsPkc);
}
}
| DelegatingJwtSignatureValidator |
java | alibaba__nacos | naming/src/test/java/com/alibaba/nacos/naming/selector/SelectorManagerTest.java | {
"start": 1278,
"end": 3363
} | class ____ {
private SelectorManager selectorManager;
@BeforeEach
void setUp() {
selectorManager = new SelectorManager();
selectorManager.init();
}
@Test
void testGetAllSelectorTypes() {
List<String> selectorTypes = selectorManager.getAllSelectorTypes();
assertTrue(selectorTypes.contains("mock"));
}
@Test
void testParseSelector() throws NacosException {
Selector selector = selectorManager.parseSelector("mock", "key=value");
assertTrue(selector instanceof MockSelector);
assertEquals("mock", selector.getType());
}
@Test
void testSelect() throws NacosException {
Selector selector = selectorManager.parseSelector("mock", "key=value");
Instance instance = new Instance();
instance.setIp("2.2.2.2");
List<Instance> providers = Collections.singletonList(instance);
List<Instance> instances0 = selectorManager.select(selector, "1.1.1.1", providers);
assertEquals(1, instances0.size());
assertEquals("2.2.2.2", instances0.get(0).getIp());
// test json serial for Selector
Serializer serializer0 = SerializeFactory.getSerializer("JSON");
byte[] bytes = serializer0.serialize(selector);
Selector jsonSelector = serializer0.deserialize(bytes, Selector.class);
List<Instance> instances1 = selectorManager.select(jsonSelector, "1.1.1.1", providers);
assertEquals(1, instances1.size());
assertEquals("2.2.2.2", instances1.get(0).getIp());
// test hessian serial for Selector
Serializer serializer1 = SerializeFactory.getDefault();
byte[] bytes1 = serializer1.serialize(selector);
Selector hessianSelector = serializer1.deserialize(bytes1);
List<Instance> instances2 = selectorManager.select(hessianSelector, "1.1.1.1", providers);
assertEquals(1, instances2.size());
assertEquals("2.2.2.2", instances2.get(0).getIp());
}
}
| SelectorManagerTest |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/error/FakeFile.java | {
"start": 731,
"end": 1507
} | class ____ extends File {
private final String absolutePath;
private boolean noParent;
@SuppressWarnings("unused")
private String parent;
FakeFile(String absolutePath) {
super(absolutePath);
this.absolutePath = absolutePath;
}
FakeFile(String absolutePath, boolean noParent) {
super(absolutePath);
this.absolutePath = absolutePath;
this.noParent = noParent;
}
FakeFile(String absolutePath, String parent) {
super(absolutePath);
this.absolutePath = absolutePath;
this.parent = parent;
}
@Override
public String getAbsolutePath() {
// ToStringOf uses absolute path instead of toString
return absolutePath;
}
@Override
public String getParent() {
return noParent ? null : super.getParent();
}
} | FakeFile |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/deser/SettableBeanProperty.java | {
"start": 1986,
"end": 24809
} | class ____ is
* deserialized using deserializer that contains this property.
*/
protected final transient Annotations _contextAnnotations;
/**
* Deserializer used for handling property value.
*/
protected final ValueDeserializer<Object> _valueDeserializer;
/**
* If value will contain type information (to support
* polymorphic handling), this is the type deserializer
* used to handle type resolution.
*/
protected final TypeDeserializer _valueTypeDeserializer;
/**
* Entity used for possible translation from `null` into non-null
* value of type of this property.
* Often same as <code>_valueDeserializer</code>, but not always.
*/
protected final NullValueProvider _nullProvider;
/*
/**********************************************************************
/* Configuration that is not yet immutable; generally assigned
/* during initialization process but cannot be passed to constructor.
/**********************************************************************
*/
/**
* If property represents a managed (forward) reference, we will need
* the name of reference for later linking.
*<p>
* TODO: should try to make immutable.
*/
protected String _managedReferenceName;
/**
* This is the information for object identity associated with the property.
* <p>
* TODO: should try to make immutable.
*/
protected ObjectIdInfo _objectIdInfo;
/**
* Helper object used for checking whether this property is to
* be included in the active view, if property is view-specific;
* null otherwise.
*<p>
* TODO: should try to make immutable.
*/
protected ViewMatcher _viewMatcher;
/**
* Index of property (within all property of a bean); assigned
* when all properties have been collected. Order of entries
* is arbitrary, but once indexes are assigned they are not
* changed.
*<p>
* TODO: should try to make immutable if at all possible
*/
protected int _propertyIndex = -1;
/*
/**********************************************************************
/* Life-cycle (construct & configure)
/**********************************************************************
*/
protected SettableBeanProperty(BeanPropertyDefinition propDef,
JavaType type, TypeDeserializer typeDeser, Annotations contextAnnotations)
{
this(propDef.getFullName(), type, propDef.getWrapperName(), typeDeser,
contextAnnotations, propDef.getMetadata());
}
protected SettableBeanProperty(PropertyName propName, JavaType type, PropertyName wrapper,
TypeDeserializer typeDeser, Annotations contextAnnotations,
PropertyMetadata metadata)
{
super(metadata);
// 09-Jan-2009, tatu: Intern()ing makes sense since Jackson parsed
// field names are (usually) interned too, hence lookups will be faster.
// 23-Oct-2009, tatu: should this be disabled wrt [JACKSON-180]?
// Probably need not, given that namespace of field/method names
// is not unbounded, unlike potential JSON names.
if (propName == null) {
_propName = PropertyName.NO_NAME;
} else {
_propName = propName.internSimpleName();
}
_type = type;
_wrapperName = wrapper;
_contextAnnotations = contextAnnotations;
_viewMatcher = null;
// 30-Jan-2012, tatu: Important: contextualize TypeDeserializer now...
if (typeDeser != null) {
typeDeser = typeDeser.forProperty(this);
}
_valueTypeDeserializer = typeDeser;
_valueDeserializer = MISSING_VALUE_DESERIALIZER;
_nullProvider = MISSING_VALUE_DESERIALIZER;
}
/**
* Constructor only used by {@link tools.jackson.databind.deser.impl.ObjectIdValueProperty}.
*/
protected SettableBeanProperty(PropertyName propName, JavaType type,
PropertyMetadata metadata, ValueDeserializer<Object> valueDeser)
{
super(metadata);
// as with above ctor, intern()ing probably fine
if (propName == null) {
_propName = PropertyName.NO_NAME;
} else {
_propName = propName.internSimpleName();
}
_type = type;
_wrapperName = null;
_contextAnnotations = null;
_viewMatcher = null;
_valueTypeDeserializer = null;
_valueDeserializer = valueDeser;
// 29-Jan-2017, tatu: Presumed to be irrelevant for ObjectId values...
_nullProvider = valueDeser;
}
/**
* Basic copy-constructor for sub-classes to use.
*/
protected SettableBeanProperty(SettableBeanProperty src)
{
super(src);
_propName = src._propName;
_type = src._type;
_wrapperName = src._wrapperName;
_contextAnnotations = src._contextAnnotations;
_valueDeserializer = src._valueDeserializer;
_valueTypeDeserializer = src._valueTypeDeserializer;
_managedReferenceName = src._managedReferenceName;
_propertyIndex = src._propertyIndex;
_viewMatcher = src._viewMatcher;
_objectIdInfo = src._objectIdInfo;
_nullProvider = src._nullProvider;
}
/**
* Copy-with-deserializer-change constructor for sub-classes to use.
*/
@SuppressWarnings("unchecked")
protected SettableBeanProperty(SettableBeanProperty src,
ValueDeserializer<?> deser, NullValueProvider nuller)
{
super(src);
_propName = src._propName;
_type = src._type;
_wrapperName = src._wrapperName;
_contextAnnotations = src._contextAnnotations;
_valueTypeDeserializer = src._valueTypeDeserializer;
_managedReferenceName = src._managedReferenceName;
_propertyIndex = src._propertyIndex;
if (deser == null) {
_valueDeserializer = MISSING_VALUE_DESERIALIZER;
} else {
_valueDeserializer = (ValueDeserializer<Object>) deser;
}
_viewMatcher = src._viewMatcher;
_objectIdInfo = src._objectIdInfo;
// 29-Jan-2017, tatu: Bit messy, but for now has to do...
if (nuller == MISSING_VALUE_DESERIALIZER) {
nuller = _valueDeserializer;
}
_nullProvider = nuller;
}
/**
* Copy-with-deserializer-change constructor for sub-classes to use.
*/
protected SettableBeanProperty(SettableBeanProperty src, PropertyName newName)
{
super(src);
_propName = newName;
_type = src._type;
_wrapperName = src._wrapperName;
_contextAnnotations = src._contextAnnotations;
_valueDeserializer = src._valueDeserializer;
_valueTypeDeserializer = src._valueTypeDeserializer;
_managedReferenceName = src._managedReferenceName;
_propertyIndex = src._propertyIndex;
_viewMatcher = src._viewMatcher;
_objectIdInfo = src._objectIdInfo;
_nullProvider = src._nullProvider;
}
/**
* Copy-with-type-deserializer-change constructor for sub-classes to use.
*/
protected SettableBeanProperty(SettableBeanProperty src, TypeDeserializer typeDeser)
{
super(src);
_propName = src._propName;
_type = src._type;
_wrapperName = src._wrapperName;
_contextAnnotations = src._contextAnnotations;
_valueDeserializer = src._valueDeserializer;
if (typeDeser != null) {
typeDeser = typeDeser.forProperty(this);
}
_valueTypeDeserializer = typeDeser;
_managedReferenceName = src._managedReferenceName;
_propertyIndex = src._propertyIndex;
_viewMatcher = src._viewMatcher;
_objectIdInfo = src._objectIdInfo;
_nullProvider = src._nullProvider;
}
/**
* Fluent factory method for constructing and returning a new instance
* with specified value deserializer.
* Note that this method should NOT change configuration of this instance.
*
* @param deser Deserializer to assign to the new property instance
*
* @return Newly constructed instance, if value deserializer differs from the
* one used for this instance; or 'this' if not.
*/
public abstract SettableBeanProperty withValueDeserializer(ValueDeserializer<?> deser);
/**
* Fluent factory method for constructing and returning a new instance
* with specified property name.
* Note that this method should NOT change configuration of this instance.
*
* @param newName Name to use for the new instance.
*
* @return Newly constructed instance, if property name differs from the
* one used for this instance; or 'this' if not.
*/
public abstract SettableBeanProperty withName(PropertyName newName);
public SettableBeanProperty withSimpleName(String simpleName) {
PropertyName n;
if (_propName == null) {
n = new PropertyName(simpleName);
} else {
n = _propName.withSimpleName(simpleName);
}
n = n.internSimpleName();
return (n == _propName) ? this : withName(n);
}
public abstract SettableBeanProperty withNullProvider(NullValueProvider nva);
public void setManagedReferenceName(String n) {
_managedReferenceName = n;
}
public void setObjectIdInfo(ObjectIdInfo objectIdInfo) {
_objectIdInfo = objectIdInfo;
}
public void setViews(Class<?>[] views) {
if (views == null) {
_viewMatcher = null;
} else {
_viewMatcher = ViewMatcher.construct(views);
}
}
/**
* Method used to assign index for property.
*/
public void assignIndex(int index) {
if (_propertyIndex != -1) {
if (_propertyIndex != index) {
throw new IllegalStateException("Property '"+getName()+"' already had index ("+_propertyIndex+"), trying to assign "+index);
}
}
_propertyIndex = index;
}
/**
* Method called to ensure that the mutator has proper access rights to
* be called, as per configuration. Overridden by implementations that
* have mutators that require access, fields and setters.
*/
public void fixAccess(DeserializationConfig config) {
;
}
public void markAsIgnorable() { }
public boolean isIgnorable() { return false; }
/**
* Whether this property requires merging of values (read-then-write)
*
* @since 2.20
*/
public boolean isMerging() {
// Most are not merging so default to this implementation
return false;
}
/*
/**********************************************************************
/* BeanProperty impl
/**********************************************************************
*/
@Override
public final String getName() {
return _propName.getSimpleName();
}
@Override
public PropertyName getFullName() {
return _propName;
}
@Override
public JavaType getType() { return _type; }
@Override
public PropertyName getWrapperName() {
return _wrapperName;
}
@Override
public abstract AnnotatedMember getMember();
@Override
public abstract <A extends Annotation> A getAnnotation(Class<A> acls);
@Override
public <A extends Annotation> A getContextAnnotation(Class<A> acls) {
return _contextAnnotations.get(acls);
}
@Override
public void depositSchemaProperty(JsonObjectFormatVisitor objectVisitor,
SerializationContext provider)
{
if (isRequired()) {
objectVisitor.property(this);
} else {
objectVisitor.optionalProperty(this);
}
}
/*
/**********************************************************************
/* Accessors
/**********************************************************************
*/
public Class<?> getDeclaringClass() {
return getMember().getDeclaringClass();
}
public String getManagedReferenceName() { return _managedReferenceName; }
public ObjectIdInfo getObjectIdInfo() { return _objectIdInfo; }
public boolean hasValueDeserializer() {
return (_valueDeserializer != null) && (_valueDeserializer != MISSING_VALUE_DESERIALIZER);
}
public boolean hasValueTypeDeserializer() { return (_valueTypeDeserializer != null); }
public ValueDeserializer<Object> getValueDeserializer() {
ValueDeserializer<Object> deser = _valueDeserializer;
if (deser == MISSING_VALUE_DESERIALIZER) {
return null;
}
return deser;
}
public TypeDeserializer getValueTypeDeserializer() { return _valueTypeDeserializer; }
public NullValueProvider getNullValueProvider() { return _nullProvider; }
public boolean visibleInView(Class<?> activeView) {
return (_viewMatcher == null) || _viewMatcher.isVisibleForView(activeView);
}
public boolean hasViews() { return _viewMatcher != null; }
/**
* Method for accessing unique index of this property; indexes are
* assigned once all properties of a {@link BeanDeserializer} have
* been collected.
*
* @return Index of this property
*/
public int getPropertyIndex() { return _propertyIndex; }
/**
* Method for accessing index of the creator property: for other
* types of properties will simply return -1.
*/
public int getCreatorIndex() {
// changed from 'return -1' in 2.7.9 / 2.8.7
throw new IllegalStateException(String.format(
"Internal error: no creator index for property '%s' (of type %s)",
this.getName(), getClass().getName()));
}
/**
* Accessor for id of injectable value, if this bean property supports
* value injection.
*/
public Object getInjectableValueId() { return null; }
/**
* Accessor for injection definition, if this bean property supports
* value injection.
*/
public JacksonInject.Value getInjectionDefinition() { return null; }
/**
* Accessor for checking whether this property is injectable, and if so,
* ONLY injectable (will not bind from input).
* Currently (2.11) can only return {@code true} for Creator-backed properties.
*
* @return True if (and only if) property has injector that is also defined NOT
* to bind from input.
*/
public boolean isInjectionOnly() { return false; } // overridden by CreatorProperty
/*
/**********************************************************************
/* Public API
/**********************************************************************
*/
/**
* Method called to deserialize appropriate value, given parser (and
* context), and set it using appropriate mechanism.
* Pre-condition is that passed parser must point to the first token
* that should be consumed to produce the value (the only value for
* scalars, multiple for Objects and Arrays).
*/
public abstract void deserializeAndSet(JsonParser p,
DeserializationContext ctxt, Object instance) throws JacksonException;
/**
* Alternative to {@link #deserializeAndSet} that returns
* either return value of setter method called (if one is),
* or null to indicate that no return value is available.
* Mostly used to support Builder style deserialization.
*/
public abstract Object deserializeSetAndReturn(JsonParser p,
DeserializationContext ctxt, Object instance) throws JacksonException;
/**
* Method called to assign given value to this property, on
* specified Object.
*<p>
* Note: this is an optional operation, not supported by all
* implementations, creator-backed properties for example do not
* support this method.
*/
public abstract void set(DeserializationContext ctxt,
Object instance, Object value);
/**
* Method called to assign given value to this property, on
* specified Object, and return whatever delegating accessor
* returned (if anything)
*<p>
* Note: this is an optional operation, not supported by all
* implementations, creator-backed properties for example do not
* support this method.
*/
public abstract Object setAndReturn(DeserializationContext ctxt,
Object instance, Object value);
/**
* This method is needed by some specialized bean deserializers,
* and also called by some {@link #deserializeAndSet} implementations.
*<p>
* Pre-condition is that passed parser must point to the first token
* that should be consumed to produce the value (the only value for
* scalars, multiple for Objects and Arrays).
*<p>
* Note that this method is final for performance reasons: to override
* functionality you must override other methods that call this method;
* this method should also not be called directly unless you really know
* what you are doing (and probably not even then).
*/
public final Object deserialize(JsonParser p, DeserializationContext ctxt) throws JacksonException
{
if (p.hasToken(JsonToken.VALUE_NULL)) {
return _nullProvider.getNullValue(ctxt);
}
if (_valueTypeDeserializer != null) {
return _valueDeserializer.deserializeWithType(p, ctxt, _valueTypeDeserializer);
}
// 04-May-2018, tatu: [databind#2023] Coercion from String (mostly) can give null
Object value = _valueDeserializer.deserialize(p, ctxt);
if (value == null) {
value = _nullProvider.getNullValue(ctxt);
}
return value;
}
public final Object deserializeWith(JsonParser p, DeserializationContext ctxt,
Object toUpdate) throws JacksonException
{
// 20-Oct-2016, tatu: Not 100% sure what to do; probably best to simply return
// null value and let caller decide what to do.
if (p.hasToken(JsonToken.VALUE_NULL)) {
// ... except for "skip nulls" case which should just do that:
if (NullsConstantProvider.isSkipper(_nullProvider)) {
return toUpdate;
}
return _nullProvider.getNullValue(ctxt);
}
if (_valueTypeDeserializer != null) {
// 25-Oct-2021 Added by James to support merging polymorphic property
// https://github.com/FasterXML/jackson-databind/issues/2541
// Please note we only support merging same type polymorphic property for now,
// merging different type is hard and usually doesn't make sense.
// Please note you need to configure {@link DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES} as false to
// enable this feature otherwise the unknown property exception will be thrown.
JavaType subType = ctxt.getTypeFactory().constructType(toUpdate.getClass());
ValueDeserializer<Object> subTypeValueDeserializer = ctxt.findContextualValueDeserializer(subType, this);
return subTypeValueDeserializer.deserialize(p, ctxt, toUpdate);
}
// 04-May-2018, tatu: [databind#2023] Coercion from String (mostly) can give null
Object value = _valueDeserializer.deserialize(p, ctxt, toUpdate);
if (value == null) {
if (NullsConstantProvider.isSkipper(_nullProvider)) {
return toUpdate;
}
value = _nullProvider.getNullValue(ctxt);
}
return value;
}
/**
* Returns a copy of this property, unwrapped using given {@link NameTransformer}.
*
* @since 2.19
*/
public SettableBeanProperty unwrapped(DeserializationContext ctxt, NameTransformer xf)
{
String newName = xf.transform(getName());
newName = InternCache.instance.intern(newName);
SettableBeanProperty renamed = withSimpleName(newName);
ValueDeserializer<?> deser = renamed.getValueDeserializer();
if (deser != null) {
@SuppressWarnings("unchecked")
ValueDeserializer<Object> newDeser = (ValueDeserializer<Object>)
deser.unwrappingDeserializer(ctxt, xf);
if (newDeser != deser) {
renamed = renamed.withValueDeserializer(newDeser);
}
}
return renamed;
}
/*
/**********************************************************************
/* Helper methods
/**********************************************************************
*/
protected void _throwAsJacksonE(JsonParser p, Throwable e, Object value)
throws JacksonException
{
if (e instanceof IllegalArgumentException) {
String actType = ClassUtil.classNameOf(value);
StringBuilder msg = new StringBuilder("Problem deserializing property '")
.append(getName())
.append("' (expected type: ")
.append(getType())
.append("; actual type: ")
.append(actType).append(")");
String origMsg = ClassUtil.exceptionMessage(e);
if (origMsg != null) {
msg.append(", problem: ")
.append(origMsg);
} else {
msg.append(" (no error message provided)");
}
throw DatabindException.from(p, msg.toString(), e);
}
_throwAsJacksonE(p, e);
}
protected void _throwAsJacksonE(JsonParser p, Throwable e) throws JacksonException
{
ClassUtil.throwIfError(e);
ClassUtil.throwIfRTE(e);
ClassUtil.throwIfJacksonE(e);
// 10-Apr-2025: [databind#4603] no more unwrapping, retain exception
// Throwable th = ClassUtil.getRootCause(e);
// ... except for InvocationTargetException which we still unwrap as it
// adds no value
if (e instanceof InvocationTargetException ite) {
Throwable t = ite.getTargetException();
ClassUtil.throwIfRTE(t);
ClassUtil.throwIfJacksonE(t);
throw DatabindException.from(p, ClassUtil.exceptionMessage(t), t);
}
throw DatabindException.from(p, ClassUtil.exceptionMessage(e), e);
}
@Override public String toString() { return "[property '"+getName()+"']"; }
/*
/**********************************************************************
/* Helper classes
/**********************************************************************
*/
/**
* Helper | that |
java | hibernate__hibernate-orm | hibernate-envers/src/main/java/org/hibernate/envers/query/AuditQueryCreator.java | {
"start": 8584,
"end": 11361
} | enum ____ of class {@link org.hibernate.envers.RevisionType})</li>
* </ol>
* Additional conditions that the results must satisfy may be specified.
*
* @param c Class of the entities for which to query.
* @param entityName Name of the entity (if it can't be guessed basing on the {@code c}).
* @param selectEntitiesOnly If true, instead of a list of three-element arrays, a list of entities will be
* returned as a result of executing this query.
* @param selectDeletedEntities If true, also revisions where entities were deleted will be returned. The additional
* entities will have revision type "delete", and contain no data (all fields null), except for the id field.
*
* @return A query for revisions at which instances of the given entity were modified, to which
* conditions can be added (for example - a specific id of an entity of class <code>c</code>), and which
* can then be executed. The results of the query will be sorted in ascending order by the revision number,
* unless an order or projection is added.
*/
public AuditQuery forRevisionsOfEntity(
Class<?> c,
String entityName,
boolean selectEntitiesOnly,
boolean selectDeletedEntities) {
c = getTargetClassIfProxied( c );
checkEntityAudited( entityName );
return new RevisionsOfEntityQuery(
enversService,
auditReaderImplementor,
c,
entityName,
selectEntitiesOnly,
selectDeletedEntities,
false,
false
);
}
/**
* Creates a query that selects the revision entities associated with the specified entity. You may also
* specify whether the revision entities list should include those for deletions of the entity class.
*
* @param clazz Class of the entities for which to query.
* @param selectDeletedEntities If true, the result will include revision entities where deletions occurred.
*
* @return A query of revision entities based on the specified entity class. The results of the query will
* be stored in ascending order by the revision number unless an order is specified.
*
* @since 5.3
*/
@Incubating
public AuditQuery forRevisionsOfEntity(Class<?> clazz, boolean selectDeletedEntities) {
clazz = getTargetClassIfProxied( clazz );
return new RevisionsOfEntityQuery(
enversService,
auditReaderImplementor,
clazz,
false,
selectDeletedEntities,
true,
false
);
}
/**
* Creates a query that selects the revision entities associated with the specified entity. You may also
* specify whether the revision entities list should include those for deletions of the entity class.
*
* @param clazz Class of the entities for which to query.
* @param entityName Name of the entity (for cases where it cannot be guessed based on | instance |
java | apache__dubbo | dubbo-remoting/dubbo-remoting-http12/src/main/java/org/apache/dubbo/remoting/http12/message/ServerSentEvent.java | {
"start": 1243,
"end": 1731
} | class ____ the structure of a Server-Sent Event, which may include:
* <ul>
* <li>An event ID</li>
* <li>An event type</li>
* <li>A retry interval</li>
* <li>A comment</li>
* <li>Data payload</li>
* </ul>
* <p>
* Use the {@link #builder()} method to create instances of this class.
*
* @param <T> the type of data that this event contains
* @see <a href="https://html.spec.whatwg.org/multipage/server-sent-events.html">Server-Sent Events</a>
*/
public final | encapsulates |
java | spring-projects__spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/mvc/method/annotation/ResponseEntityExceptionHandler.java | {
"start": 27410,
"end": 30003
} | class ____ to, for
* common handling, and for the creation of a {@link ResponseEntity}.
* <p>The default implementation does the following:
* <ul>
* <li>return {@code null} if response is already committed
* <li>set the {@code "jakarta.servlet.error.exception"} request attribute
* if the response status is 500 (INTERNAL_SERVER_ERROR).
* <li>extract the {@link ErrorResponse#getBody() body} from
* {@link ErrorResponse} exceptions, if the {@code body} is {@code null}.
* </ul>
* @param ex the exception to handle
* @param body the body to use for the response
* @param headers the headers to use for the response
* @param statusCode the status code to use for the response
* @param request the current request
* @return a {@code ResponseEntity} for the response to use, possibly
* {@code null} when the response is already committed
*/
protected @Nullable ResponseEntity<Object> handleExceptionInternal(
Exception ex, @Nullable Object body, HttpHeaders headers, HttpStatusCode statusCode, WebRequest request) {
if (request instanceof ServletWebRequest servletWebRequest) {
HttpServletResponse response = servletWebRequest.getResponse();
if (response != null && response.isCommitted()) {
if (logger.isWarnEnabled()) {
logger.warn("Response already committed. Ignoring: " + ex);
}
return null;
}
}
if (body == null && ex instanceof ErrorResponse errorResponse) {
body = errorResponse.updateAndGetBody(this.messageSource, LocaleContextHolder.getLocale());
}
if (statusCode.equals(HttpStatus.INTERNAL_SERVER_ERROR) && body == null) {
request.setAttribute(WebUtils.ERROR_EXCEPTION_ATTRIBUTE, ex, WebRequest.SCOPE_REQUEST);
}
return createResponseEntity(body, headers, statusCode, request);
}
/**
* Create the {@link ResponseEntity} to use from the given body, headers,
* and statusCode. Subclasses can override this method to inspect and possibly
* modify the body, headers, or statusCode, for example, to re-create an instance of
* {@link ProblemDetail} as an extension of {@link ProblemDetail}.
* @param body the body to use for the response
* @param headers the headers to use for the response
* @param statusCode the status code to use for the response
* @param request the current request
* @return the {@code ResponseEntity} instance to use
* @since 6.0
*/
protected ResponseEntity<Object> createResponseEntity(
@Nullable Object body, HttpHeaders headers, HttpStatusCode statusCode, WebRequest request) {
return new ResponseEntity<>(body, headers, statusCode);
}
}
| delegate |
java | elastic__elasticsearch | modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java | {
"start": 3648,
"end": 11537
} | class ____ extends ESMockAPIBasedRepositoryIntegTestCase {
@Override
protected String repositoryType() {
return GoogleCloudStorageRepository.TYPE;
}
@Override
protected Settings repositorySettings(String repoName) {
Settings.Builder settingsBuilder = Settings.builder()
.put(super.repositorySettings(repoName))
.put(BUCKET.getKey(), "bucket")
.put(CLIENT_NAME.getKey(), "test");
if (randomBoolean()) {
settingsBuilder.put(BASE_PATH.getKey(), randomFrom("test", "test/1"));
}
return settingsBuilder.build();
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Collections.singletonList(TestGoogleCloudStoragePlugin.class);
}
@Override
protected Map<String, HttpHandler> createHttpHandlers() {
return Map.of(
"/",
new GoogleCloudStorageStatsCollectorHttpHandler(new GoogleCloudStorageBlobStoreHttpHandler("bucket")),
"/token",
new FakeOAuth2HttpHandler()
);
}
@Override
protected HttpHandler createErroneousHttpHandler(final HttpHandler delegate) {
if (delegate instanceof FakeOAuth2HttpHandler) {
return new GoogleErroneousHttpHandler(delegate, randomIntBetween(2, 3));
} else {
return new GoogleCloudStorageStatsCollectorHttpHandler(new GoogleErroneousHttpHandler(delegate, randomIntBetween(2, 3)));
}
}
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
final Settings.Builder settings = Settings.builder();
settings.put(super.nodeSettings(nodeOrdinal, otherSettings));
settings.put(ENDPOINT_SETTING.getConcreteSettingForNamespace("test").getKey(), httpServerUrl());
settings.put(TOKEN_URI_SETTING.getConcreteSettingForNamespace("test").getKey(), httpServerUrl() + "/token");
final MockSecureSettings secureSettings = new MockSecureSettings();
final byte[] serviceAccount = TestUtils.createServiceAccount(random());
secureSettings.setFile(CREDENTIALS_FILE_SETTING.getConcreteSettingForNamespace("test").getKey(), serviceAccount);
settings.setSecureSettings(secureSettings);
return settings.build();
}
public void testDeleteSingleItem() throws IOException {
final String repoName = createRepository(randomRepositoryName());
final RepositoriesService repositoriesService = internalCluster().getAnyMasterNodeInstance(RepositoriesService.class);
final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repoName);
repository.blobStore()
.blobContainer(repository.basePath())
.deleteBlobsIgnoringIfNotExists(randomPurpose(), Iterators.single("foo"));
}
public void testChunkSize() {
// default chunk size
RepositoryMetadata repositoryMetadata = new RepositoryMetadata("repo", GoogleCloudStorageRepository.TYPE, Settings.EMPTY);
ByteSizeValue chunkSize = GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repositoryMetadata);
assertEquals(GoogleCloudStorageRepository.MAX_CHUNK_SIZE, chunkSize);
// chunk size in settings
final int size = randomIntBetween(1, 100);
repositoryMetadata = new RepositoryMetadata(
"repo",
GoogleCloudStorageRepository.TYPE,
Settings.builder().put("chunk_size", size + "mb").build()
);
chunkSize = GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repositoryMetadata);
assertEquals(ByteSizeValue.of(size, ByteSizeUnit.MB), chunkSize);
// zero bytes is not allowed
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {
final RepositoryMetadata repoMetadata = new RepositoryMetadata(
"repo",
GoogleCloudStorageRepository.TYPE,
Settings.builder().put("chunk_size", "0").build()
);
GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetadata);
});
assertEquals("failed to parse value [0] for setting [chunk_size], must be >= [1b]", e.getMessage());
// negative bytes not allowed
e = expectThrows(IllegalArgumentException.class, () -> {
final RepositoryMetadata repoMetadata = new RepositoryMetadata(
"repo",
GoogleCloudStorageRepository.TYPE,
Settings.builder().put("chunk_size", "-1").build()
);
GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetadata);
});
assertEquals("failed to parse value [-1] for setting [chunk_size], must be >= [1b]", e.getMessage());
// greater than max chunk size not allowed
e = expectThrows(IllegalArgumentException.class, () -> {
final RepositoryMetadata repoMetadata = new RepositoryMetadata(
"repo",
GoogleCloudStorageRepository.TYPE,
Settings.builder().put("chunk_size", "6tb").build()
);
GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetadata);
});
assertEquals("failed to parse value [6tb] for setting [chunk_size], must be <= [5tb]", e.getMessage());
}
public void testWriteReadLarge() throws IOException {
try (BlobStore store = newBlobStore()) {
final BlobContainer container = store.blobContainer(BlobPath.EMPTY);
byte[] data = randomBytes(GoogleCloudStorageBlobStore.LARGE_BLOB_THRESHOLD_BYTE_SIZE + 1);
writeBlob(container, "foobar", new BytesArray(data), randomBoolean());
if (randomBoolean()) {
// override file, to check if we get latest contents
random().nextBytes(data);
writeBlob(container, "foobar", new BytesArray(data), false);
}
try (InputStream stream = container.readBlob(randomPurpose(), "foobar")) {
BytesRefBuilder target = new BytesRefBuilder();
while (target.length() < data.length) {
byte[] buffer = new byte[scaledRandomIntBetween(1, data.length - target.length())];
int offset = scaledRandomIntBetween(0, buffer.length - 1);
int read = stream.read(buffer, offset, buffer.length - offset);
target.append(new BytesRef(buffer, offset, read));
}
assertEquals(data.length, target.length());
assertArrayEquals(data, Arrays.copyOfRange(target.bytes(), 0, target.length()));
}
container.delete(randomPurpose());
}
}
public void testWriteFileMultipleOfChunkSize() throws IOException {
final int uploadSize = randomIntBetween(2, 4) * GoogleCloudStorageBlobStore.SDK_DEFAULT_CHUNK_SIZE;
try (BlobStore store = newBlobStore()) {
final BlobContainer container = store.blobContainer(BlobPath.EMPTY);
final String key = randomIdentifier();
byte[] initialValue = randomByteArrayOfLength(uploadSize);
container.writeBlob(randomPurpose(), key, new BytesArray(initialValue), true);
BytesReference reference = readFully(container.readBlob(randomPurpose(), key));
assertThat(reference, equalBytes(new BytesArray(initialValue)));
container.deleteBlobsIgnoringIfNotExists(randomPurpose(), Iterators.single(key));
}
}
@Override
public void testRequestStats() throws Exception {
super.testRequestStats();
}
public static | GoogleCloudStorageBlobStoreRepositoryTests |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestDeleteCCMConfigurationAction.java | {
"start": 1162,
"end": 2181
} | class ____ extends BaseRestHandler {
private final CCMFeature ccmFeature;
public RestDeleteCCMConfigurationAction(CCMFeature ccmFeature) {
this.ccmFeature = Objects.requireNonNull(ccmFeature);
}
@Override
public String getName() {
return "delete_inference_ccm_configuration_action";
}
@Override
public List<Route> routes() {
return List.of(new Route(DELETE, INFERENCE_CCM_PATH));
}
@Override
protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException {
if (ccmFeature.isCcmSupportedEnvironment() == false) {
throw CCM_FORBIDDEN_EXCEPTION;
}
return channel -> client.execute(
DeleteCCMConfigurationAction.INSTANCE,
new DeleteCCMConfigurationAction.Request(RestUtils.getMasterNodeTimeout(restRequest), RestUtils.getAckTimeout(restRequest)),
new RestToXContentListener<>(channel)
);
}
}
| RestDeleteCCMConfigurationAction |
java | apache__camel | components/camel-kafka/src/main/java/org/apache/camel/component/kafka/consumer/support/KafkaRecordProcessorFacade.java | {
"start": 1018,
"end": 1399
} | interface ____ {
/**
* Sends a set of records polled from Kafka for processing
*
* @param allRecords All records received from a call to the Kafka's consumer poll method
* @return The result of processing this set of records
*/
ProcessingResult processPolledRecords(ConsumerRecords<Object, Object> allRecords);
}
| KafkaRecordProcessorFacade |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/maybe/MaybeDoOnTerminate.java | {
"start": 851,
"end": 1274
} | class ____<T> extends Maybe<T> {
final MaybeSource<T> source;
final Action onTerminate;
public MaybeDoOnTerminate(MaybeSource<T> source, Action onTerminate) {
this.source = source;
this.onTerminate = onTerminate;
}
@Override
protected void subscribeActual(MaybeObserver<? super T> observer) {
source.subscribe(new DoOnTerminate(observer));
}
final | MaybeDoOnTerminate |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/dialect/oracle/ast/stmt/OracleAlterTableItem.java | {
"start": 820,
"end": 908
} | class ____ extends OracleSQLObjectImpl implements SQLAlterTableItem {
}
| OracleAlterTableItem |
java | google__guava | guava/src/com/google/common/collect/ImmutableMultimap.java | {
"start": 22108,
"end": 25646
} | class ____<K, V> extends ImmutableCollection<Entry<K, V>> {
@Weak final ImmutableMultimap<K, V> multimap;
EntryCollection(ImmutableMultimap<K, V> multimap) {
this.multimap = multimap;
}
@Override
public UnmodifiableIterator<Entry<K, V>> iterator() {
return multimap.entryIterator();
}
@Override
boolean isPartialView() {
return multimap.isPartialView();
}
@Override
public int size() {
return multimap.size();
}
@Override
public boolean contains(@Nullable Object object) {
if (object instanceof Entry) {
Entry<?, ?> entry = (Entry<?, ?>) object;
return multimap.containsEntry(entry.getKey(), entry.getValue());
}
return false;
}
// redeclare to help optimizers with b/310253115
@SuppressWarnings("RedundantOverride")
@Override
@J2ktIncompatible
@GwtIncompatible
Object writeReplace() {
return super.writeReplace();
}
@GwtIncompatible @J2ktIncompatible private static final long serialVersionUID = 0;
}
@Override
UnmodifiableIterator<Entry<K, V>> entryIterator() {
return new UnmodifiableIterator<Entry<K, V>>() {
final Iterator<? extends Entry<K, ? extends ImmutableCollection<V>>> asMapItr =
map.entrySet().iterator();
@Nullable K currentKey = null;
Iterator<V> valueItr = emptyIterator();
@Override
public boolean hasNext() {
return valueItr.hasNext() || asMapItr.hasNext();
}
@Override
public Entry<K, V> next() {
if (!valueItr.hasNext()) {
Entry<K, ? extends ImmutableCollection<V>> entry = asMapItr.next();
currentKey = entry.getKey();
valueItr = entry.getValue().iterator();
}
/*
* requireNonNull is safe: The first call to this method always enters the !hasNext() case
* and populates currentKey, after which it's never cleared.
*/
return immutableEntry(requireNonNull(currentKey), valueItr.next());
}
};
}
@Override
@GwtIncompatible("Spliterator")
Spliterator<Entry<K, V>> entrySpliterator() {
return CollectSpliterators.flatMap(
asMap().entrySet().spliterator(),
keyToValueCollectionEntry -> {
K key = keyToValueCollectionEntry.getKey();
Collection<V> valueCollection = keyToValueCollectionEntry.getValue();
return CollectSpliterators.map(
valueCollection.spliterator(),
Spliterator.ORDERED | Spliterator.NONNULL | Spliterator.IMMUTABLE,
(V value) -> immutableEntry(key, value));
},
Spliterator.SIZED | (this instanceof SetMultimap ? Spliterator.DISTINCT : 0),
size());
}
@Override
public void forEach(BiConsumer<? super K, ? super V> action) {
checkNotNull(action);
asMap()
.forEach(
(key, valueCollection) -> valueCollection.forEach(value -> action.accept(key, value)));
}
/**
* Returns an immutable multiset containing all the keys in this multimap, in the same order and
* with the same frequencies as they appear in this multimap; to get only a single occurrence of
* each key, use {@link #keySet}.
*/
@Override
public ImmutableMultiset<K> keys() {
return (ImmutableMultiset<K>) super.keys();
}
@Override
ImmutableMultiset<K> createKeys() {
return new Keys();
}
@SuppressWarnings("serial") // Uses writeReplace, not default serialization
@WeakOuter
private final | EntryCollection |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceCartesianPointDocValuesAndConstantEvaluator.java | {
"start": 3547,
"end": 4330
} | class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory left;
private final Point right;
public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory left, Point right) {
this.source = source;
this.left = left;
this.right = right;
}
@Override
public StDistanceCartesianPointDocValuesAndConstantEvaluator get(DriverContext context) {
return new StDistanceCartesianPointDocValuesAndConstantEvaluator(source, left.get(context), right, context);
}
@Override
public String toString() {
return "StDistanceCartesianPointDocValuesAndConstantEvaluator[" + "left=" + left + ", right=" + right + "]";
}
}
}
| Factory |
java | spring-projects__spring-boot | module/spring-boot-liquibase/src/test/java/org/springframework/boot/liquibase/endpoint/LiquibaseEndpointTests.java | {
"start": 7509,
"end": 7693
} | class ____ {
@Bean
LiquibaseEndpoint endpoint(ApplicationContext context) {
return new LiquibaseEndpoint(context);
}
}
@Configuration(proxyBeanMethods = false)
static | Config |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.