language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
quarkusio__quarkus
|
integration-tests/maven/src/test/resources-filtered/projects/classic-no-build/src/main/java/org/acme/HelloResource.java
|
{
"start": 257,
"end": 1339
}
|
class ____ {
@ConfigProperty(name = "greeting")
String greeting;
@ConfigProperty(name = "quarkus.application.version")
String applicationVersion;
@ConfigProperty(name = "quarkus.application.name")
String applicationName;
@ConfigProperty(name = "other.greeting", defaultValue = "other")
String otherGreeting;
@GET
@Produces(MediaType.TEXT_PLAIN)
public String hello() {
return "hello";
}
@GET
@Path("/greeting")
@Produces(MediaType.TEXT_PLAIN)
public String greeting() {
return greeting;
}
@GET
@Path("/package")
@Produces(MediaType.TEXT_PLAIN)
public String pkg() {
return Blah.class.getPackage().getName();
}
@GET
@Path("/nameAndVersion")
@Produces(MediaType.TEXT_PLAIN)
public String nameAndVersion() {
return applicationName + "/" + applicationVersion;
}
@GET
@Path("/otherGreeting")
@Produces(MediaType.TEXT_PLAIN)
public String otherGreeting() {
return otherGreeting;
}
public static
|
HelloResource
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/webmonitor/stats/TaskStatsRequestCoordinator.java
|
{
"start": 1982,
"end": 7733
}
|
class ____<T, V> {
protected final Logger log = LoggerFactory.getLogger(getClass());
protected static final int NUM_GHOST_SAMPLE_IDS = 10;
protected final Object lock = new Object();
/** Executor used to run the futures. */
protected final Executor executor;
/** Request time out of the triggered tasks stats request. */
protected final Duration requestTimeout;
/** In progress samples. */
@GuardedBy("lock")
protected final Map<Integer, PendingStatsRequest<T, V>> pendingRequests = new HashMap<>();
/** A list of recent request IDs to identify late messages vs. invalid ones. */
@GuardedBy("lock")
protected final ArrayDeque<Integer> recentPendingRequestIds =
new ArrayDeque<>(NUM_GHOST_SAMPLE_IDS);
/** Sample ID counter. */
@GuardedBy("lock")
protected int requestIdCounter;
/** Flag indicating whether the coordinator is still running. */
@GuardedBy("lock")
protected boolean isShutDown;
/**
* Creates a new coordinator for the cluster.
*
* @param executor Used to execute the futures.
* @param requestTimeout Request time out of the triggered tasks stats request.
*/
public TaskStatsRequestCoordinator(Executor executor, Duration requestTimeout) {
checkNotNull(requestTimeout, "The request timeout must cannot be null.");
checkArgument(requestTimeout.toMillis() >= 0L, "The request timeout must be non-negative.");
this.executor = checkNotNull(executor);
this.requestTimeout = requestTimeout;
}
/**
* Handles the failed stats response by canceling the corresponding unfinished pending request.
*
* @param requestId ID of the request to cancel.
* @param cause Cause of the cancelling (can be <code>null</code>).
*/
public void handleFailedResponse(int requestId, @Nullable Throwable cause) {
synchronized (lock) {
if (isShutDown) {
return;
}
PendingStatsRequest<T, V> pendingRequest = pendingRequests.remove(requestId);
if (pendingRequest != null) {
log.info("Cancelling request {}", requestId, cause);
pendingRequest.discard(cause);
rememberRecentRequestId(requestId);
}
}
}
/**
* Shuts down the coordinator.
*
* <p>After shut down, no further operations are executed.
*/
public void shutDown() {
synchronized (lock) {
if (!isShutDown) {
log.info("Shutting down task stats request coordinator.");
for (PendingStatsRequest<T, V> pending : pendingRequests.values()) {
pending.discard(new RuntimeException("Shut down"));
}
pendingRequests.clear();
recentPendingRequestIds.clear();
isShutDown = true;
}
}
}
/**
* Handles the successfully returned tasks stats response by collecting the corresponding
* subtask samples.
*
* @param requestId ID of the request.
* @param executionIds ID of the sampled task.
* @param result Result of stats request returned by an individual task.
* @throws IllegalStateException If unknown request ID and not recently finished or cancelled
* sample.
*/
public void handleSuccessfulResponse(
int requestId, ImmutableSet<ExecutionAttemptID> executionIds, T result) {
synchronized (lock) {
if (isShutDown) {
return;
}
final String ids =
executionIds.stream()
.map(ExecutionAttemptID::toString)
.collect(Collectors.joining(", "));
if (log.isDebugEnabled()) {
log.debug("Collecting stats sample {} of tasks {}", requestId, ids);
}
PendingStatsRequest<T, V> pending = pendingRequests.get(requestId);
if (pending != null) {
pending.collectTaskStats(executionIds, result);
// Publish the sample
if (pending.isComplete()) {
pendingRequests.remove(requestId);
rememberRecentRequestId(requestId);
pending.completePromiseAndDiscard();
}
} else if (recentPendingRequestIds.contains(requestId)) {
if (log.isDebugEnabled()) {
log.debug("Received late stats sample {} of tasks {}", requestId, ids);
}
} else {
if (log.isDebugEnabled()) {
log.debug(String.format("Unknown request ID %d.", requestId));
}
}
}
}
private void rememberRecentRequestId(int sampleId) {
if (recentPendingRequestIds.size() >= NUM_GHOST_SAMPLE_IDS) {
recentPendingRequestIds.removeFirst();
}
recentPendingRequestIds.addLast(sampleId);
}
@VisibleForTesting
public int getNumberOfPendingRequests() {
synchronized (lock) {
return pendingRequests.size();
}
}
// ------------------------------------------------------------------------
/**
* A pending task stats request, which collects samples from individual tasks and completes the
* response future upon gathering all of of them.
*
* <p>Has to be accessed in lock scope.
*
* @param <T> Type of the result collected from tasks.
* @param <V> Type of the result assembled and returned when all tasks where sampled.
*/
@NotThreadSafe
protected abstract static
|
TaskStatsRequestCoordinator
|
java
|
google__auto
|
value/src/it/functional/src/test/java/com/google/auto/value/AutoBuilderKotlinTest.java
|
{
"start": 7028,
"end": 8989
}
|
interface ____ {
static KotlinDataSomeDefaultsBuilder builder() {
return new AutoBuilder_AutoBuilderKotlinTest_KotlinDataSomeDefaultsBuilder();
}
static KotlinDataSomeDefaultsBuilder fromInstance(KotlinDataSomeDefaults instance) {
return new AutoBuilder_AutoBuilderKotlinTest_KotlinDataSomeDefaultsBuilder(instance);
}
KotlinDataSomeDefaultsBuilder requiredInt(int x);
KotlinDataSomeDefaultsBuilder requiredString(String x);
KotlinDataSomeDefaultsBuilder optionalInt(int x);
KotlinDataSomeDefaultsBuilder optionalString(String x);
KotlinDataSomeDefaults build();
}
@Test
public void kotlinSomeDefaults_someDefaulted() {
KotlinDataSomeDefaults someDefaulted =
KotlinDataSomeDefaultsBuilder.builder().requiredInt(12).requiredString("Monkeys").build();
assertThat(someDefaulted.getOptionalInt()).isEqualTo(23);
assertThat(someDefaulted.getOptionalString()).isEqualTo("Skidoo");
assertThat(KotlinDataSomeDefaultsBuilder.fromInstance(someDefaulted).build())
.isEqualTo(someDefaulted);
}
@Test
public void kotlinSomeDefaults_noneDefaulted() {
KotlinDataSomeDefaults noneDefaulted =
KotlinDataSomeDefaultsBuilder.builder()
.requiredInt(12)
.requiredString("Monkeys")
.optionalInt(3)
.optionalString("Oranges")
.build();
KotlinDataSomeDefaults copy = KotlinDataSomeDefaultsBuilder.fromInstance(noneDefaulted).build();
assertThat(copy).isEqualTo(noneDefaulted);
}
@Test
public void kotlinSomeDefaults_missingRequired() {
IllegalStateException e =
assertThrows(
IllegalStateException.class, () -> KotlinDataSomeDefaultsBuilder.builder().build());
assertThat(e).hasMessageThat().contains("requiredInt");
assertThat(e).hasMessageThat().contains("requiredString");
}
@AutoBuilder(ofClass = KotlinDataSomeDefaultsBig.class)
|
KotlinDataSomeDefaultsBuilder
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailureBase.java
|
{
"start": 2740,
"end": 14983
}
|
class ____ {
public static final Logger LOG = LoggerFactory.getLogger(
TestDFSStripedOutputStreamWithFailureBase.class);
static {
GenericTestUtils.setLogLevel(DFSOutputStream.LOG, Level.TRACE);
GenericTestUtils.setLogLevel(DataStreamer.LOG, Level.TRACE);
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.TRACE);
GenericTestUtils.setLogLevel(
LoggerFactory.getLogger(BlockPlacementPolicy.class), Level.TRACE);
}
protected final int cellSize = 64 * 1024; // 8k
protected final int stripesPerBlock = 4;
protected ErasureCodingPolicy ecPolicy;
protected int dataBlocks;
protected int parityBlocks;
protected int blockSize;
protected int blockGroupSize;
private int[][] dnIndexSuite;
protected List<Integer> lengths;
protected static final Random RANDOM = new Random();
MiniDFSCluster cluster;
DistributedFileSystem dfs;
final Path dir = new Path("/"
+ TestDFSStripedOutputStreamWithFailureBase.class.getSimpleName());
protected static final int FLUSH_POS =
9 * DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT + 1;
public ECSchema getEcSchema() {
return StripedFileTestUtil.getDefaultECPolicy().getSchema();
}
/*
* Initialize erasure coding policy.
*/
@BeforeEach
public void init() {
ecPolicy = new ErasureCodingPolicy(getEcSchema(), cellSize);
dataBlocks = ecPolicy.getNumDataUnits();
parityBlocks = ecPolicy.getNumParityUnits();
blockSize = cellSize * stripesPerBlock;
blockGroupSize = blockSize * dataBlocks;
dnIndexSuite = getDnIndexSuite();
lengths = newLengths();
}
List<Integer> newLengths() {
final List<Integer> lens = new ArrayList<>();
lens.add(FLUSH_POS + 2);
for(int b = 0; b <= 2; b++) {
for(int c = 0; c < stripesPerBlock * dataBlocks; c++) {
for(int delta = -1; delta <= 1; delta++) {
final int length = b * blockGroupSize + c * cellSize + delta;
System.out.println(lens.size() + ": length=" + length
+ ", (b, c, d) = (" + b + ", " + c + ", " + delta + ")");
lens.add(length);
}
}
}
return lens;
}
private int[][] getDnIndexSuite() {
final int maxNumLevel = 2;
final int maxPerLevel = 5;
List<List<Integer>> allLists = new ArrayList<>();
int numIndex = parityBlocks;
for (int i = 0; i < maxNumLevel && numIndex > 1; i++) {
List<List<Integer>> lists =
combinations(dataBlocks + parityBlocks, numIndex);
if (lists.size() > maxPerLevel) {
Collections.shuffle(lists);
lists = lists.subList(0, maxPerLevel);
}
allLists.addAll(lists);
numIndex--;
}
int[][] dnIndexArray = new int[allLists.size()][];
for (int i = 0; i < dnIndexArray.length; i++) {
int[] list = new int[allLists.get(i).size()];
for (int j = 0; j < list.length; j++) {
list[j] = allLists.get(i).get(j);
}
dnIndexArray[i] = list;
}
return dnIndexArray;
}
// get all combinations of k integers from {0,...,n-1}
private static List<List<Integer>> combinations(int n, int k) {
List<List<Integer>> res = new LinkedList<List<Integer>>();
if (k >= 1 && n >= k) {
getComb(n, k, new Stack<Integer>(), res);
}
return res;
}
private static void getComb(int n, int k, Stack<Integer> stack,
List<List<Integer>> res) {
if (stack.size() == k) {
List<Integer> list = new ArrayList<Integer>(stack);
res.add(list);
} else {
int next = stack.empty() ? 0 : stack.peek() + 1;
while (next < n) {
stack.push(next);
getComb(n, k, stack, res);
next++;
}
}
if (!stack.empty()) {
stack.pop();
}
}
int[] getKillPositions(int fileLen, int num) {
int[] positions = new int[num];
for (int i = 0; i < num; i++) {
positions[i] = fileLen * (i + 1) / (num + 1);
}
return positions;
}
Integer getLength(int i) {
return i >= 0 && i < lengths.size() ? lengths.get(i): null;
}
void setup(Configuration conf) throws IOException {
System.out.println("NUM_DATA_BLOCKS = " + dataBlocks);
System.out.println("NUM_PARITY_BLOCKS= " + parityBlocks);
System.out.println("CELL_SIZE = " + cellSize + " (=" +
StringUtils.TraditionalBinaryPrefix.long2String(cellSize, "B", 2)
+ ")");
System.out.println("BLOCK_SIZE = " + blockSize + " (=" +
StringUtils.TraditionalBinaryPrefix.long2String(blockSize, "B", 2)
+ ")");
System.out.println("BLOCK_GROUP_SIZE = " + blockGroupSize + " (=" +
StringUtils.TraditionalBinaryPrefix.long2String(blockGroupSize, "B", 2)
+ ")");
final int numDNs = dataBlocks + parityBlocks;
if (ErasureCodeNative.isNativeCodeLoaded()) {
conf.set(
CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY,
NativeRSRawErasureCoderFactory.CODER_NAME);
}
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
cluster.waitActive();
dfs = cluster.getFileSystem();
AddErasureCodingPolicyResponse[] res =
dfs.addErasureCodingPolicies(new ErasureCodingPolicy[]{ecPolicy});
ecPolicy = res[0].getPolicy();
dfs.enableErasureCodingPolicy(ecPolicy.getName());
DFSTestUtil.enableAllECPolicies(dfs);
dfs.mkdirs(dir);
dfs.setErasureCodingPolicy(dir, ecPolicy.getName());
}
void tearDown() {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
HdfsConfiguration newHdfsConfiguration() {
final HdfsConfiguration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
false);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
return conf;
}
void runTest(final int length) {
final HdfsConfiguration conf = newHdfsConfiguration();
for (int dn = 0; dn < dataBlocks + parityBlocks; dn++) {
try {
LOG.info("runTest: dn=" + dn + ", length=" + length);
setup(conf);
runTest(length, new int[]{length / 2}, new int[]{dn}, false);
} catch (Throwable e) {
final String err = "failed, dn=" + dn + ", length=" + length
+ StringUtils.stringifyException(e);
LOG.error(err);
fail(err);
} finally {
tearDown();
}
}
}
void runTestWithMultipleFailure(final int length) throws Exception {
final HdfsConfiguration conf = newHdfsConfiguration();
for (int[] dnIndex : dnIndexSuite) {
int[] killPos = getKillPositions(length, dnIndex.length);
try {
LOG.info("runTestWithMultipleFailure: length==" + length + ", killPos="
+ Arrays.toString(killPos) + ", dnIndex="
+ Arrays.toString(dnIndex));
setup(conf);
runTest(length, killPos, dnIndex, false);
} catch (Throwable e) {
final String err = "failed, killPos=" + Arrays.toString(killPos)
+ ", dnIndex=" + Arrays.toString(dnIndex) + ", length=" + length;
LOG.error(err);
throw e;
} finally {
tearDown();
}
}
}
/**
* runTest implementation.
* @param length file length
* @param killPos killing positions in ascending order
* @param dnIndex DN index to kill when meets killing positions
* @param tokenExpire wait token to expire when kill a DN
* @throws Exception
*/
void runTest(final int length, final int[] killPos,
final int[] dnIndex, final boolean tokenExpire) throws Exception {
if (killPos[0] <= FLUSH_POS) {
LOG.warn("killPos=" + Arrays.toString(killPos) + " <= FLUSH_POS="
+ FLUSH_POS + ", length=" + length + ", dnIndex="
+ Arrays.toString(dnIndex));
return; //skip test
}
Preconditions.checkArgument(length > killPos[0], "length=%s <= killPos=%s",
length, killPos);
Preconditions.checkArgument(killPos.length == dnIndex.length);
final Path p = new Path(dir, "dn" + Arrays.toString(dnIndex)
+ "len" + length + "kill" + Arrays.toString(killPos));
final String fullPath = p.toString();
LOG.info("fullPath=" + fullPath);
if (tokenExpire) {
final NameNode nn = cluster.getNameNode();
final BlockManager bm = nn.getNamesystem().getBlockManager();
final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
// set a short token lifetime (6 second)
SecurityTestUtil.setBlockTokenLifetime(sm, 6000L);
}
final AtomicInteger pos = new AtomicInteger();
final FSDataOutputStream out = dfs.create(p);
final DFSStripedOutputStream stripedOut
= (DFSStripedOutputStream)out.getWrappedStream();
// first GS of this block group which never proceeds blockRecovery
long firstGS = -1;
long oldGS = -1; // the old GS before bumping
List<Long> gsList = new ArrayList<>();
final List<DatanodeInfo> killedDN = new ArrayList<>();
int numKilled = 0;
for(; pos.get() < length;) {
final int i = pos.getAndIncrement();
if (numKilled < killPos.length && i == killPos[numKilled]) {
assertTrue(firstGS != -1);
final long gs = getGenerationStamp(stripedOut);
if (numKilled == 0) {
assertEquals(firstGS, gs);
} else {
//TODO: implement hflush/hsync and verify gs strict greater than oldGS
assertTrue(gs >= oldGS);
}
oldGS = gs;
if (tokenExpire) {
DFSTestUtil.flushInternal(stripedOut);
waitTokenExpires(out);
}
killedDN.add(
killDatanode(cluster, stripedOut, dnIndex[numKilled], pos));
numKilled++;
}
write(out, i);
if (i % blockGroupSize == FLUSH_POS) {
firstGS = getGenerationStamp(stripedOut);
oldGS = firstGS;
}
if (i > 0 && (i + 1) % blockGroupSize == 0) {
gsList.add(oldGS);
}
}
gsList.add(oldGS);
out.close();
assertEquals(dnIndex.length, numKilled);
StripedFileTestUtil.waitBlockGroupsReported(dfs, fullPath, numKilled);
cluster.triggerBlockReports();
StripedFileTestUtil.checkData(dfs, p, length, killedDN, gsList,
blockGroupSize);
}
static void write(FSDataOutputStream out, int i) throws IOException {
try {
out.write(StripedFileTestUtil.getByte(i));
} catch(IOException ioe) {
throw new IOException("Failed at i=" + i, ioe);
}
}
static long getGenerationStamp(DFSStripedOutputStream out)
throws IOException {
final long gs = out.getBlock().getGenerationStamp();
LOG.info("getGenerationStamp returns " + gs);
return gs;
}
static DatanodeInfo getDatanodes(StripedDataStreamer streamer) {
for(;;) {
DatanodeInfo[] datanodes = streamer.getNodes();
if (datanodes == null) {
// try peeking following block.
final LocatedBlock lb = streamer.peekFollowingBlock();
if (lb != null) {
datanodes = lb.getLocations();
}
}
if (datanodes != null) {
assertEquals(1, datanodes.length);
assertNotNull(datanodes[0]);
return datanodes[0];
}
try {
Thread.sleep(100);
} catch (InterruptedException ie) {
fail(StringUtils.stringifyException(ie));
return null;
}
}
}
static DatanodeInfo killDatanode(MiniDFSCluster cluster,
DFSStripedOutputStream out, final int dnIndex, final AtomicInteger pos) {
final StripedDataStreamer s = out.getStripedDataStreamer(dnIndex);
final DatanodeInfo datanode = getDatanodes(s);
LOG.info("killDatanode " + dnIndex + ": " + datanode + ", pos=" + pos);
if (datanode != null) {
cluster.stopDataNode(datanode.getXferAddr());
}
return datanode;
}
private void waitTokenExpires(FSDataOutputStream out) throws IOException {
Token<BlockTokenIdentifier> token = DFSTestUtil.getBlockToken(out);
while (!SecurityTestUtil.isBlockTokenExpired(token)) {
try {
Thread.sleep(10);
} catch (InterruptedException ignored) {
}
}
}
}
|
TestDFSStripedOutputStreamWithFailureBase
|
java
|
quarkusio__quarkus
|
extensions/vertx-http/runtime/src/main/java/io/quarkus/vertx/http/security/CSRF.java
|
{
"start": 934,
"end": 4798
}
|
interface ____ {
/**
* Form field name which keeps a CSRF token. The default field name is "csrf-token".
*
* @param formFieldName form field name
* @return this builder
*/
Builder formFieldName(String formFieldName);
/**
* The token header name which can provide a CSRF token. The default name is "X-CSRF-TOKEN".
*
* @param tokenHeaderName the CSRF token header name
* @return this builder
*/
Builder tokenHeaderName(String tokenHeaderName);
/**
* The CSRF cookie name. The default name is "csrf-token".
*
* @param cookieName the CSRF cookie name
* @return this builder
*/
Builder cookieName(String cookieName);
/**
* The CSRF cookie max age. The default max age are 2 hours.
*
* @param cookieMaxAge the CSRF cookie max age
* @return this builder
*/
Builder cookieMaxAge(Duration cookieMaxAge);
/**
* The CSRF cookie path. The default path is "/".
*
* @param cookiePath the CSRF cookie path
* @return this builder
*/
Builder cookiePath(String cookiePath);
/**
* The CSRF cookie domain.
*
* @param cookieDomain the CSRF cookie domain
* @return this builder
*/
Builder cookieDomain(String cookieDomain);
/**
* Set the 'secure' parameter on the CSRF cookie to 'true' when the HTTP protocol is used.
* The cookie will always be secure if the HTTPS protocol is used, even if this method is not called.
*
* @return this builder
*/
Builder cookieForceSecure();
/**
* Set the HttpOnly attribute to prevent access to the cookie via JavaScript.
* The HttpOnly attribute is set by default.
*
* @param cookieHttpOnly if the HttpOnly attribute should be set
* @return this builder
*/
Builder cookieHttpOnly(boolean cookieHttpOnly);
/**
* This method is a shortcut for {@code createTokenPath(Set.of(createTokenPath))}.
*
* @return this builder
* @see #createTokenPath(Set) for more information
*/
Builder createTokenPath(String createTokenPath);
/**
* Create CSRF token only if the HTTP GET relative request path matches one of configured paths.
*
* @param createTokenPath list of the HTTP GET requests paths for which Quarkus should create a token
* @return this builder
*/
Builder createTokenPath(Set<String> createTokenPath);
/**
* Random CSRF token size in bytes. The default size in bytes is 16.
*
* @param tokenSize the token size in bytes
* @return this builder
*/
Builder tokenSize(int tokenSize);
/**
* The CSRF token signature key.
*
* @param tokenSignatureKey the CSRF token signature key
* @return this builder
*/
Builder tokenSignatureKey(String tokenSignatureKey);
/**
* Require that only 'application/x-www-form-urlencoded' or 'multipart/form-data' body is accepted for the token
* verification to proceed. Required by default.
*
* @param requireFormUrlEncoded if only 'application/x-www-form-urlencoded' or 'multipart/form-data' body is allowed
* @return this builder
*/
Builder requireFormUrlEncoded(boolean requireFormUrlEncoded);
/**
* Create a new CSRF configuration.
*
* @return CSRF instance, which should be passed to the {@link HttpSecurity} event
*/
CSRF build();
}
}
|
Builder
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/assumptions/BDDAssumptionsTest.java
|
{
"start": 39436,
"end": 39845
}
|
class ____ {
private final Path actual = new File("file.ext").toPath();
@Test
void should_run_test_when_assumption_passes() {
thenCode(() -> given(actual).isRelative()).doesNotThrowAnyException();
}
@Test
void should_ignore_test_when_assumption_fails() {
expectAssumptionNotMetException(() -> given(actual).isAbsolute());
}
}
@Nested
|
BDDAssumptions_given_Path_Test
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/util/AutoPopulatingList.java
|
{
"start": 6327,
"end": 6509
}
|
interface ____ creating elements for an index-based access
* data structure such as a {@link java.util.List}.
*
* @param <E> the element type
*/
@FunctionalInterface
public
|
for
|
java
|
spring-projects__spring-framework
|
spring-beans/src/main/java/org/springframework/beans/factory/config/BeanExpressionResolver.java
|
{
"start": 1192,
"end": 1734
}
|
interface ____ {
/**
* Evaluate the given value as an expression, if applicable;
* return the value as-is otherwise.
* @param value the value to evaluate as an expression
* @param beanExpressionContext the bean expression context to use when
* evaluating the expression
* @return the resolved value (potentially the given value as-is)
* @throws BeansException if evaluation failed
*/
@Nullable Object evaluate(@Nullable String value, BeanExpressionContext beanExpressionContext) throws BeansException;
}
|
BeanExpressionResolver
|
java
|
quarkusio__quarkus
|
extensions/vertx-http/deployment/src/main/java/io/quarkus/vertx/http/deployment/BodyHandlerBuildItem.java
|
{
"start": 175,
"end": 468
}
|
class ____ extends SimpleBuildItem {
private final Handler<RoutingContext> handler;
public BodyHandlerBuildItem(Handler<RoutingContext> handler) {
this.handler = handler;
}
public Handler<RoutingContext> getHandler() {
return handler;
}
}
|
BodyHandlerBuildItem
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/TempDirectoryTests.java
|
{
"start": 51840,
"end": 52060
}
|
class ____ {
@Test
void test(@SuppressWarnings("unused") @TempDir(factory = Factory.class) File tempDir) {
// never called
}
private static
|
FactoryReturningNonDefaultFileSystemPathForFileAnnotatedElementTestCase
|
java
|
quarkusio__quarkus
|
extensions/grpc/deployment/src/test/java/io/quarkus/grpc/deployment/GrpcServerProcessorTest.java
|
{
"start": 6661,
"end": 6941
}
|
class ____ extends NoClassAnnotationsRoot {
static final Set<String> EXPECTED = ImmutableSet.of("noAnnotation", "blocking", "transactional");
void blocking() {
}
void transactional() {
}
}
@Blocking
static
|
TransactionalOverriding
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/AnalyzerRules.java
|
{
"start": 910,
"end": 1526
}
|
class ____<SubPlan extends LogicalPlan> extends Rule<SubPlan, LogicalPlan> {
// transformUp (post-order) - that is first children and then the node
// but with a twist; only if the tree is not resolved or analyzed
@Override
public final LogicalPlan apply(LogicalPlan plan) {
return plan.transformUp(typeToken(), t -> t.analyzed() || skipResolved() && t.resolved() ? t : rule(t));
}
protected abstract LogicalPlan rule(SubPlan plan);
protected boolean skipResolved() {
return true;
}
}
public abstract static
|
AnalyzerRule
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/GoogleCalendarStreamEndpointBuilderFactory.java
|
{
"start": 38690,
"end": 40917
}
|
interface ____ {
/**
* Google Calendar Stream (camel-google-calendar)
* Poll for changes in a Google Calendar.
*
* Category: cloud
* Since: 2.23
* Maven coordinates: org.apache.camel:camel-google-calendar
*
* @return the dsl builder for the headers' name.
*/
default GoogleCalendarStreamHeaderNameBuilder googleCalendarStream() {
return GoogleCalendarStreamHeaderNameBuilder.INSTANCE;
}
/**
* Google Calendar Stream (camel-google-calendar)
* Poll for changes in a Google Calendar.
*
* Category: cloud
* Since: 2.23
* Maven coordinates: org.apache.camel:camel-google-calendar
*
* Syntax: <code>google-calendar-stream:index</code>
*
* Path parameter: index (required)
* Specifies an index for the endpoint
*
* @param path index
* @return the dsl builder
*/
default GoogleCalendarStreamEndpointBuilder googleCalendarStream(String path) {
return GoogleCalendarStreamEndpointBuilderFactory.endpointBuilder("google-calendar-stream", path);
}
/**
* Google Calendar Stream (camel-google-calendar)
* Poll for changes in a Google Calendar.
*
* Category: cloud
* Since: 2.23
* Maven coordinates: org.apache.camel:camel-google-calendar
*
* Syntax: <code>google-calendar-stream:index</code>
*
* Path parameter: index (required)
* Specifies an index for the endpoint
*
* @param componentName to use a custom component name for the endpoint
* instead of the default name
* @param path index
* @return the dsl builder
*/
default GoogleCalendarStreamEndpointBuilder googleCalendarStream(String componentName, String path) {
return GoogleCalendarStreamEndpointBuilderFactory.endpointBuilder(componentName, path);
}
}
/**
* The builder of headers' name for the Google Calendar Stream component.
*/
public static
|
GoogleCalendarStreamBuilders
|
java
|
elastic__elasticsearch
|
x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plan/physical/UnaryExec.java
|
{
"start": 505,
"end": 1517
}
|
class ____ extends PhysicalPlan {
private final PhysicalPlan child;
protected UnaryExec(Source source, PhysicalPlan child) {
super(source, Collections.singletonList(child));
this.child = child;
}
@Override
public final PhysicalPlan replaceChildren(List<PhysicalPlan> newChildren) {
return replaceChild(newChildren.get(0));
}
protected abstract UnaryExec replaceChild(PhysicalPlan newChild);
public PhysicalPlan child() {
return child;
}
@Override
public List<Attribute> output() {
return child.output();
}
@Override
public int hashCode() {
return Objects.hashCode(child());
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
UnaryExec other = (UnaryExec) obj;
return Objects.equals(child, other.child);
}
}
|
UnaryExec
|
java
|
spring-projects__spring-boot
|
module/spring-boot-security-saml2/src/main/java/org/springframework/boot/security/saml2/autoconfigure/Saml2LoginConfiguration.java
|
{
"start": 1574,
"end": 1863
}
|
class ____ {
@Bean
SecurityFilterChain samlSecurityFilterChain(HttpSecurity http) {
http.authorizeHttpRequests((requests) -> requests.anyRequest().authenticated());
http.saml2Login(withDefaults());
http.saml2Logout(withDefaults());
return http.build();
}
}
|
Saml2LoginConfiguration
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/ProtectedMembersInFinalClassTest.java
|
{
"start": 2129,
"end": 2310
}
|
class ____ {
protected void protectedMethod() {}
}
""")
.addSourceLines(
"in/Test.java",
"""
final
|
Base
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/deser/creators/FactoryAndConstructor2962Test.java
|
{
"start": 331,
"end": 403
}
|
class ____
{
// [databind#2962]
static
|
FactoryAndConstructor2962Test
|
java
|
quarkusio__quarkus
|
extensions/tls-registry/deployment/src/test/java/io/quarkus/tls/JKSKeyStoreCredentialsProviderWithCustomKeysTest.java
|
{
"start": 974,
"end": 2444
}
|
class ____ {
private static final String configuration = """
quarkus.tls.key-store.jks.path=target/certs/test-credentials-provider-keystore.jks
quarkus.tls.key-store.credentials-provider.name=tls
quarkus.tls.key-store.credentials-provider.password-key=pwd
quarkus.tls.key-store.credentials-provider.alias-password-key=ak
""";
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest().setArchiveProducer(
() -> ShrinkWrap.create(JavaArchive.class)
.addClass(MyCredentialProvider.class)
.add(new StringAsset(configuration), "application.properties"));
@Inject
TlsConfigurationRegistry certificates;
@Test
void test() throws KeyStoreException, CertificateParsingException {
TlsConfiguration def = certificates.getDefault().orElseThrow();
assertThat(def.getKeyStoreOptions()).isNotNull();
assertThat(def.getKeyStore()).isNotNull();
X509Certificate certificate = (X509Certificate) def.getKeyStore().getCertificate("test-credentials-provider");
assertThat(certificate).isNotNull();
assertThat(certificate.getSubjectAlternativeNames()).anySatisfy(l -> {
assertThat(l.get(0)).isEqualTo(2);
assertThat(l.get(1)).isEqualTo("localhost");
});
}
@ApplicationScoped
public static
|
JKSKeyStoreCredentialsProviderWithCustomKeysTest
|
java
|
apache__camel
|
components/camel-disruptor/src/test/java/org/apache/camel/component/disruptor/DisruptorComplexInOutTest.java
|
{
"start": 1108,
"end": 1997
}
|
class ____ extends CamelTestSupport {
@Test
void testInOut() throws Exception {
getMockEndpoint("mock:result").expectedBodiesReceived("Bye World");
final String out = template.requestBody("direct:start", "Hello World", String.class);
assertEquals("Bye World", out);
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
getContext().setTracing(true);
from("direct:start").to("disruptor:a");
from("disruptor:a").to("log:bar", "disruptor:b");
from("disruptor:b").delay(10).to("direct:c");
from("direct:c").transform(constant("Bye World")).to("mock:result");
}
};
}
}
|
DisruptorComplexInOutTest
|
java
|
spring-projects__spring-framework
|
spring-core/src/test/java/org/springframework/core/type/AbstractAnnotationMetadataTests.java
|
{
"start": 6655,
"end": 6721
}
|
interface ____ extends TestInterface {
}
public @
|
TestSubInterface
|
java
|
apache__camel
|
components/camel-jms/src/test/java/org/apache/camel/component/jms/integration/spring/tx/JMSNestedTransactionRollbackIT.java
|
{
"start": 1110,
"end": 2116
}
|
class ____ extends SpringJMSBasic {
@Override
protected ClassPathXmlApplicationContext createApplicationContext() {
return new ClassPathXmlApplicationContext(
"/org/apache/camel/component/jms/integration/spring/tx/JMSNestedTransactionRollbackIT.xml");
}
@Test
void testNestedTransactionRolledackSuccessfully() throws Exception {
context.start();
// error handler should catch 1 exception and rollback producer transaction
MockEndpoint error = getMockEndpoint("mock:got-message");
error.expectedMessageCount(1);
error.setAssertPeriod(100);
// if transaction rolled back successfully, then no messages should go there
MockEndpoint mock = getMockEndpoint("mock:not-okay");
mock.expectedMessageCount(0);
mock.setAssertPeriod(100);
template.sendBody("jms:queue:okay", "test");
error.assertIsSatisfied();
mock.assertIsSatisfied();
}
}
|
JMSNestedTransactionRollbackIT
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestSecureRegistryDNS.java
|
{
"start": 978,
"end": 1718
}
|
class ____ extends TestRegistryDNS {
@Override protected Configuration createConfiguration() {
Configuration conf = super.createConfiguration();
conf.setBoolean(RegistryConstants.KEY_DNSSEC_ENABLED, true);
conf.set(RegistryConstants.KEY_DNSSEC_PUBLIC_KEY,
"AwEAAe1Jev0Az1khlQCvf0nud1/CNHQwwPEu8BNchZthdDxKPVn29yrD "
+ "CHoAWjwiGsOSw3SzIPrawSbHzyJsjn0oLBhGrH6QedFGnydoxjNsw3m/ "
+ "SCmOjR/a7LGBAMDFKqFioi4gOyuN66svBeY+/5uw72+0ei9AQ20gqf6q "
+ "l9Ozs5bV");
conf.set(RegistryConstants.KEY_DNSSEC_PRIVATE_KEY_FILE,
getClass().getResource("/test.private").getFile());
return conf;
}
@Override protected boolean isSecure() {
return true;
}
}
|
TestSecureRegistryDNS
|
java
|
apache__flink
|
flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/JavaProgramTestBase.java
|
{
"start": 1212,
"end": 1597
}
|
class ____ unit tests that run a single test with object reuse enabled/disabled and against
* collection environments.
*
* <p>To write a unit test against this test base, simply extend it and implement the {@link
* #testProgram()} method.
*
* <p>To skip the execution against collection environments you have to override {@link
* #skipCollectionExecution()}.
*/
public abstract
|
for
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/StatementSwitchToExpressionSwitchTest.java
|
{
"start": 2301,
"end": 3085
}
|
class ____ {
public void foo(Suit suit) {
switch (suit) {
case HEART /* left comment */ /* and there is more: */ // to end of line
:
// Explanatory comment
System.out.println("the best suit is called the");
// Middle comment
System.out.println("heart");
// Break comment
break;
// End comment
case SPADE:
case CLUB:
case DIAMOND:
System.out.println("non-heart");
}
}
}
""")
.addOutputLines(
"Test.java",
"""
|
Test
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/action/ActionRunnable.java
|
{
"start": 810,
"end": 1004
}
|
class ____ {@link Runnable}s that need to call {@link ActionListener#onFailure(Exception)} in case an uncaught
* exception or error is thrown while the actual action is run.
*/
public abstract
|
for
|
java
|
spring-projects__spring-framework
|
spring-core/src/test/java/org/springframework/core/annotation/MergedAnnotationsTests.java
|
{
"start": 140436,
"end": 140636
}
|
interface ____ {
String text() default "enigma";
boolean predicate() default true;
char[] characters() default { 'a', 'b', 'c' };
}
@Retention(RetentionPolicy.RUNTIME)
@
|
AnnotationWithDefaults
|
java
|
apache__dubbo
|
dubbo-plugin/dubbo-rest-openapi/src/main/java/org/apache/dubbo/rpc/protocol/tri/rest/openapi/OpenAPIDefinitionResolver.java
|
{
"start": 1781,
"end": 1917
}
|
interface ____ {
Operation resolve(Operation operation, MethodMeta methodMeta, OperationContext context);
}
|
OperationChain
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/context/aot/RuntimeHintsBeanFactoryInitializationAotProcessor.java
|
{
"start": 4089,
"end": 5010
}
|
class ____ implements BeanFactoryInitializationAotContribution {
private final Iterable<RuntimeHintsRegistrar> registrars;
private final @Nullable ClassLoader beanClassLoader;
RuntimeHintsRegistrarContribution(Iterable<RuntimeHintsRegistrar> registrars,
@Nullable ClassLoader beanClassLoader) {
this.registrars = registrars;
this.beanClassLoader = beanClassLoader;
}
@Override
public void applyTo(GenerationContext generationContext,
BeanFactoryInitializationCode beanFactoryInitializationCode) {
RuntimeHints hints = generationContext.getRuntimeHints();
this.registrars.forEach(registrar -> {
if (logger.isTraceEnabled()) {
logger.trace(LogMessage.format(
"Processing RuntimeHints contribution from [%s]",
registrar.getClass().getCanonicalName()));
}
registrar.registerHints(hints, this.beanClassLoader);
});
}
}
}
|
RuntimeHintsRegistrarContribution
|
java
|
apache__dubbo
|
dubbo-plugin/dubbo-rest-jaxrs/src/test/java/org/apache/dubbo/rpc/protocol/tri/rest/support/jaxrs/compatible/JaxrsRestProtocolTest.java
|
{
"start": 3266,
"end": 23122
}
|
class ____ {
private final Protocol tProtocol =
ApplicationModel.defaultModel().getExtensionLoader(Protocol.class).getExtension("tri");
private final Protocol protocol =
ApplicationModel.defaultModel().getExtensionLoader(Protocol.class).getExtension("rest");
private final ProxyFactory proxy = ApplicationModel.defaultModel()
.getExtensionLoader(ProxyFactory.class)
.getAdaptiveExtension();
private final int availablePort = NetUtils.getAvailablePort();
private final URL exportUrl =
URL.valueOf("tri://127.0.0.1:" + availablePort + "/rest?interface=" + DemoService.class.getName());
private static final String SERVER = "netty4";
private final ModuleServiceRepository repository =
ApplicationModel.defaultModel().getDefaultModule().getServiceRepository();
@AfterEach
public void tearDown() {
tProtocol.destroy();
protocol.destroy();
FrameworkModel.destroyAll();
}
@Test
void testRestProtocol() {
URL url = URL.valueOf("tri://127.0.0.1:" + NetUtils.getAvailablePort() + "/?version=1.0.0&interface="
+ DemoService.class.getName());
DemoServiceImpl server = new DemoServiceImpl();
url = registerProvider(url, server, DemoService.class);
Exporter<DemoService> exporter = tProtocol.export(proxy.getInvoker(server, DemoService.class, url));
Invoker<DemoService> invoker = protocol.refer(DemoService.class, url);
Assertions.assertFalse(server.isCalled());
DemoService client = proxy.getProxy(invoker);
String result = client.sayHello("haha");
Assertions.assertTrue(server.isCalled());
Assertions.assertEquals("Hello, haha", result);
String header = client.header("header test");
Assertions.assertEquals("header test", header);
Assertions.assertEquals(1, client.headerInt(1));
invoker.destroy();
exporter.unexport();
}
@Test
void testAnotherUserRestProtocolByDifferentRestClient() {
testAnotherUserRestProtocol(org.apache.dubbo.remoting.Constants.OK_HTTP);
testAnotherUserRestProtocol(org.apache.dubbo.remoting.Constants.APACHE_HTTP_CLIENT);
testAnotherUserRestProtocol(org.apache.dubbo.remoting.Constants.URL_CONNECTION);
}
void testAnotherUserRestProtocol(String restClient) {
URL url = URL.valueOf("tri://127.0.0.1:" + NetUtils.getAvailablePort() + "/?version=1.0.0&interface="
+ AnotherUserRestService.class.getName() + "&" + org.apache.dubbo.remoting.Constants.CLIENT_KEY + "="
+ restClient);
AnotherUserRestServiceImpl server = new AnotherUserRestServiceImpl();
url = this.registerProvider(url, server, AnotherUserRestService.class);
Exporter<AnotherUserRestService> exporter =
tProtocol.export(proxy.getInvoker(server, AnotherUserRestService.class, url));
Invoker<AnotherUserRestService> invoker = protocol.refer(AnotherUserRestService.class, url);
AnotherUserRestService client = proxy.getProxy(invoker);
User result = client.getUser(123l);
Assertions.assertEquals(123l, result.getId());
result.setName("dubbo");
Assertions.assertEquals(123l, client.registerUser(result).getId());
Assertions.assertEquals("context", client.getContext());
byte[] bytes = {1, 2, 3, 4};
Assertions.assertTrue(Arrays.equals(bytes, client.bytes(bytes)));
Assertions.assertEquals(1l, client.number(1l));
HashMap<String, String> map = new HashMap<>();
map.put("headers", "h1");
Assertions.assertEquals("h1", client.headerMap(map));
Assertions.assertEquals(null, client.headerMap(null));
invoker.destroy();
exporter.unexport();
}
@Test
void testRestProtocolWithContextPath() {
DemoServiceImpl server = new DemoServiceImpl();
Assertions.assertFalse(server.isCalled());
int port = NetUtils.getAvailablePort();
URL url = URL.valueOf(
"tri://127.0.0.1:" + port + "/a/b/c?version=1.0.0&interface=" + DemoService.class.getName());
url = this.registerProvider(url, server, DemoService.class);
Exporter<DemoService> exporter = tProtocol.export(proxy.getInvoker(server, DemoService.class, url));
url = URL.valueOf(
"rest://127.0.0.1:" + port + "/a/b/c/?version=1.0.0&interface=" + DemoService.class.getName());
Invoker<DemoService> invoker = protocol.refer(DemoService.class, url);
DemoService client = proxy.getProxy(invoker);
String result = client.sayHello("haha");
Assertions.assertTrue(server.isCalled());
Assertions.assertEquals("Hello, haha", result);
invoker.destroy();
exporter.unexport();
}
@Test
void testExport() {
DemoService server = new DemoServiceImpl();
URL url = this.registerProvider(exportUrl, server, DemoService.class);
RpcContext.getClientAttachment().setAttachment("timeout", "20000");
Exporter<DemoService> exporter = tProtocol.export(proxy.getInvoker(server, DemoService.class, url));
DemoService demoService = this.proxy.getProxy(protocol.refer(DemoService.class, url));
Integer echoString = demoService.hello(1, 2);
assertThat(echoString, is(3));
exporter.unexport();
}
@Test
void testNettyServer() {
DemoService server = new DemoServiceImpl();
URL url = this.registerProvider(exportUrl, server, DemoService.class);
URL nettyUrl = url.addParameter(SERVER_KEY, SERVER);
Exporter<DemoService> exporter =
tProtocol.export(proxy.getInvoker(new DemoServiceImpl(), DemoService.class, nettyUrl));
DemoService demoService = this.proxy.getProxy(protocol.refer(DemoService.class, nettyUrl));
Integer echoString = demoService.hello(10, 10);
assertThat(echoString, is(20));
exporter.unexport();
}
@Test
void testInvoke() {
DemoService server = new DemoServiceImpl();
URL url = this.registerProvider(exportUrl, server, DemoService.class);
Exporter<DemoService> exporter = tProtocol.export(proxy.getInvoker(server, DemoService.class, url));
RpcInvocation rpcInvocation = new RpcInvocation(
"hello", DemoService.class.getName(), "", new Class[] {Integer.class, Integer.class}, new Integer[] {
2, 3
});
rpcInvocation.setTargetServiceUniqueName(url.getServiceKey());
Result result = exporter.getInvoker().invoke(rpcInvocation);
assertThat(result.getValue(), CoreMatchers.<Object>is(5));
}
@Test
void testDefaultPort() {
assertThat(protocol.getDefaultPort(), is(80));
}
@Test
void testRestExceptionMapper() {
DemoService server = new DemoServiceImpl();
URL url = this.registerProvider(exportUrl, server, DemoService.class);
URL exceptionUrl = url.addParameter(EXTENSION_KEY, ResteasyExceptionMapper.class.getName());
tProtocol.export(proxy.getInvoker(server, DemoService.class, exceptionUrl));
DemoService referDemoService = this.proxy.getProxy(protocol.refer(DemoService.class, exceptionUrl));
Assertions.assertEquals("test-exception", referDemoService.error());
}
@Test
void testFormConsumerParser() {
DemoService server = new DemoServiceImpl();
URL nettyUrl = this.registerProvider(exportUrl, server, DemoService.class);
Exporter<DemoService> exporter = tProtocol.export(proxy.getInvoker(server, DemoService.class, nettyUrl));
DemoService demoService = this.proxy.getProxy(protocol.refer(DemoService.class, nettyUrl));
Long number = demoService.testFormBody(18l);
Assertions.assertEquals(18l, number);
exporter.unexport();
}
@Test
void test404() {
Assertions.assertThrows(RpcException.class, () -> {
DemoService server = new DemoServiceImpl();
URL nettyUrl = this.registerProvider(exportUrl, server, DemoService.class);
Exporter<DemoService> exporter = tProtocol.export(proxy.getInvoker(server, DemoService.class, nettyUrl));
URL referUrl = URL.valueOf(
"tri://127.0.0.1:" + availablePort + "/rest?interface=" + RestDemoForTestException.class.getName());
RestDemoForTestException restDemoForTestException =
this.proxy.getProxy(protocol.refer(RestDemoForTestException.class, referUrl));
restDemoForTestException.test404();
exporter.unexport();
});
}
@Test
void test400() {
Assertions.assertThrows(RpcException.class, () -> {
DemoService server = new DemoServiceImpl();
URL nettyUrl = this.registerProvider(exportUrl, server, DemoService.class);
Exporter<DemoService> exporter = tProtocol.export(proxy.getInvoker(server, DemoService.class, nettyUrl));
URL referUrl = URL.valueOf(
"tri://127.0.0.1:" + availablePort + "/rest?interface=" + RestDemoForTestException.class.getName());
RestDemoForTestException restDemoForTestException =
this.proxy.getProxy(protocol.refer(RestDemoForTestException.class, referUrl));
restDemoForTestException.test400("abc", "edf");
exporter.unexport();
});
}
@Test
void testPrimitive() {
DemoService server = new DemoServiceImpl();
URL nettyUrl = this.registerProvider(exportUrl, server, DemoService.class);
Exporter<DemoService> exporter = tProtocol.export(proxy.getInvoker(server, DemoService.class, nettyUrl));
DemoService demoService = this.proxy.getProxy(protocol.refer(DemoService.class, nettyUrl));
Integer result = demoService.primitiveInt(1, 2);
Long resultLong = demoService.primitiveLong(1, 2l);
long resultByte = demoService.primitiveByte((byte) 1, 2l);
long resultShort = demoService.primitiveShort((short) 1, 2l, 1);
assertThat(result, is(3));
assertThat(resultShort, is(3l));
assertThat(resultLong, is(3l));
assertThat(resultByte, is(3l));
exporter.unexport();
}
@Test
void testMapParam() {
DemoService server = new DemoServiceImpl();
URL nettyUrl = this.registerProvider(exportUrl, server, DemoService.class);
Exporter<DemoService> exporter = tProtocol.export(proxy.getInvoker(server, DemoService.class, nettyUrl));
DemoService demoService = this.proxy.getProxy(protocol.refer(DemoService.class, nettyUrl));
Map<String, String> params = new HashMap<>();
params.put("param", "P1");
;
Map<String, String> headers = new HashMap<>();
headers.put("header", "H1");
Assertions.assertEquals("P1", demoService.testMapParam(params));
Assertions.assertEquals("H1", demoService.testMapHeader(headers));
MultivaluedMapImpl<String, String> forms = new MultivaluedMapImpl<>();
forms.put("form", Arrays.asList("F1"));
Assertions.assertEquals(Arrays.asList("F1"), demoService.testMapForm(forms));
exporter.unexport();
}
@Test
void testNoArgParam() {
DemoService server = new DemoServiceImpl();
URL url = this.registerProvider(exportUrl, server, DemoService.class);
URL nettyUrl = url.addParameter(SERVER_KEY, SERVER);
Exporter<DemoService> exporter = tProtocol.export(proxy.getInvoker(server, DemoService.class, nettyUrl));
DemoService demoService = this.proxy.getProxy(protocol.refer(DemoService.class, nettyUrl));
Assertions.assertEquals(null, demoService.noStringHeader(null));
Assertions.assertEquals(null, demoService.noStringParam(null));
/* Assertions.assertThrows(RpcException.class, () -> {
demoService.noIntHeader(1);
});
Assertions.assertThrows(RpcException.class, () -> {
demoService.noIntParam(1);
});*/
Assertions.assertEquals(null, demoService.noBodyArg(null));
exporter.unexport();
}
@Test
void testHttpMethods() {
testHttpMethod(org.apache.dubbo.remoting.Constants.OK_HTTP);
testHttpMethod(org.apache.dubbo.remoting.Constants.APACHE_HTTP_CLIENT);
testHttpMethod(org.apache.dubbo.remoting.Constants.URL_CONNECTION);
}
void testHttpMethod(String restClient) {
HttpMethodService server = new HttpMethodServiceImpl();
URL url = URL.valueOf("tri://127.0.0.1:" + NetUtils.getAvailablePort() + "/?version=1.0.0&interface="
+ HttpMethodService.class.getName() + "&"
+ org.apache.dubbo.remoting.Constants.CLIENT_KEY + "=" + restClient);
url = this.registerProvider(url, server, HttpMethodService.class);
Exporter<HttpMethodService> exporter = tProtocol.export(proxy.getInvoker(server, HttpMethodService.class, url));
HttpMethodService demoService = this.proxy.getProxy(protocol.refer(HttpMethodService.class, url));
String expect = "hello";
Assertions.assertEquals(null, demoService.sayHelloHead());
Assertions.assertEquals(expect, demoService.sayHelloDelete("hello"));
Assertions.assertEquals(expect, demoService.sayHelloGet("hello"));
Assertions.assertEquals(expect, demoService.sayHelloOptions("hello"));
// Assertions.assertEquals(expect, demoService.sayHelloPatch("hello"));
Assertions.assertEquals(expect, demoService.sayHelloPost("hello"));
Assertions.assertEquals(expect, demoService.sayHelloPut("hello"));
exporter.unexport();
}
@Test
void test405() {
int availablePort = NetUtils.getAvailablePort();
URL url = URL.valueOf("tri://127.0.0.1:" + availablePort
+ "/?version=1.0.0&interface=org.apache.dubbo.rpc.protocol.rest.rest.RestDemoService&");
RestDemoServiceImpl server = new RestDemoServiceImpl();
url = this.registerProvider(url, server, RestDemoService.class);
Exporter<RestDemoService> exporter = tProtocol.export(proxy.getInvoker(server, RestDemoService.class, url));
URL consumer = URL.valueOf("rest://127.0.0.1:" + availablePort + "/?version=1.0.0&interface="
+ RestDemoForTestException.class.getName());
consumer = this.registerProvider(consumer, server, RestDemoForTestException.class);
Invoker<RestDemoForTestException> invoker = protocol.refer(RestDemoForTestException.class, consumer);
RestDemoForTestException client = proxy.getProxy(invoker);
Assertions.assertThrows(RpcException.class, () -> {
client.testMethodDisallowed("aaa");
});
invoker.destroy();
exporter.unexport();
}
@Test
void testContainerRequestFilter() {
DemoService server = new DemoServiceImpl();
URL url = this.registerProvider(exportUrl, server, DemoService.class);
URL nettyUrl = url.addParameter(EXTENSION_KEY, TestContainerRequestFilter.class.getName());
Exporter<DemoService> exporter = tProtocol.export(proxy.getInvoker(server, DemoService.class, nettyUrl));
DemoService demoService = this.proxy.getProxy(protocol.refer(DemoService.class, nettyUrl));
Assertions.assertEquals("return-success", demoService.sayHello("hello"));
exporter.unexport();
}
@Test
void testIntercept() {
DemoService server = new DemoServiceImpl();
URL url = this.registerProvider(exportUrl, server, DemoService.class);
URL nettyUrl = url.addParameter(EXTENSION_KEY, DynamicTraceInterceptor.class.getName());
Exporter<DemoService> exporter = tProtocol.export(proxy.getInvoker(server, DemoService.class, nettyUrl));
DemoService demoService = this.proxy.getProxy(protocol.refer(DemoService.class, nettyUrl));
Assertions.assertEquals("intercept", demoService.sayHello("hello"));
exporter.unexport();
}
@Test
void testResponseFilter() {
DemoService server = new DemoServiceImpl();
URL url = this.registerProvider(exportUrl, server, DemoService.class);
URL nettyUrl = url.addParameter(EXTENSION_KEY, TraceFilter.class.getName());
Exporter<DemoService> exporter = tProtocol.export(proxy.getInvoker(server, DemoService.class, nettyUrl));
DemoService demoService = this.proxy.getProxy(protocol.refer(DemoService.class, nettyUrl));
Assertions.assertEquals("response-filter", demoService.sayHello("hello"));
exporter.unexport();
}
@Test
void testCollectionResult() {
DemoService server = new DemoServiceImpl();
URL nettyUrl = this.registerProvider(exportUrl, server, DemoService.class);
Exporter<DemoService> exporter = tProtocol.export(proxy.getInvoker(server, DemoService.class, nettyUrl));
DemoService demoService = this.proxy.getProxy(protocol.refer(DemoService.class, nettyUrl));
Assertions.assertEquals(
User.getInstance(),
demoService.list(Arrays.asList(User.getInstance())).get(0));
HashSet<User> objects = new HashSet<>();
objects.add(User.getInstance());
Assertions.assertEquals(User.getInstance(), new ArrayList<>(demoService.set(objects)).get(0));
Assertions.assertEquals(User.getInstance(), demoService.array(objects.toArray(new User[0]))[0]);
Map<String, User> map = new HashMap<>();
map.put("map", User.getInstance());
Assertions.assertEquals(User.getInstance(), demoService.stringMap(map).get("map"));
// Map<User, User> maps = new HashMap<>();
// maps.put(User.getInstance(), User.getInstance());
// Assertions.assertEquals(User.getInstance(), demoService.userMap(maps).get(User.getInstance()));
exporter.unexport();
}
@Test
void testRequestAndResponseFilter() {
DemoService server = new DemoServiceImpl();
URL exportUrl = URL.valueOf("tri://127.0.0.1:" + availablePort + "/rest?interface="
+ DemoService.class.getName() + "&extension=" + TraceRequestAndResponseFilter.class.getName());
URL nettyUrl = this.registerProvider(exportUrl, server, DemoService.class);
Exporter<DemoService> exporter = tProtocol.export(proxy.getInvoker(server, DemoService.class, nettyUrl));
DemoService demoService = this.proxy.getProxy(protocol.refer(DemoService.class, nettyUrl));
Assertions.assertEquals("header-result", demoService.sayHello("hello"));
exporter.unexport();
}
@Test
void testFormBody() {
DemoService server = new DemoServiceImpl();
URL url = this.registerProvider(exportUrl, server, DemoService.class);
URL nettyUrl = url.addParameter(SERVER_KEY, SERVER);
Exporter<DemoService> exporter = tProtocol.export(proxy.getInvoker(server, DemoService.class, nettyUrl));
DemoService demoService = this.proxy.getProxy(protocol.refer(DemoService.class, nettyUrl));
User user = demoService.formBody(User.getInstance());
Assertions.assertEquals("formBody", user.getName());
exporter.unexport();
}
private URL registerProvider(URL url, Object impl, Class<?> interfaceClass) {
ServiceDescriptor serviceDescriptor = repository.registerService(interfaceClass);
ProviderModel providerModel = new ProviderModel(url.getServiceKey(), impl, serviceDescriptor, null, null);
repository.registerProvider(providerModel);
return url.setServiceModel(providerModel);
}
}
|
JaxrsRestProtocolTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/query/hql/ImplicitInstantiationTest.java
|
{
"start": 6724,
"end": 7104
}
|
class ____ {
private Long id;
private String name;
public Thing(Long id, String name) {
this.id = id;
this.name = name;
}
Thing() {
}
@Id
public Long getId() {
return this.id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
}
|
Thing
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/Bug89.java
|
{
"start": 177,
"end": 437
}
|
class ____ {
@Test
public void testBug89() {
try {
String s = "{\"a\":з」∠)_,\"}";
JSON.parseObject(s);
fail("Expect JSONException");
} catch (JSONException e) {
// good
}
}
}
|
Bug89
|
java
|
quarkusio__quarkus
|
extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/tag/UserTagWithQuteExtensionIncludeTest.java
|
{
"start": 360,
"end": 1100
}
|
class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addAsResource(new StringAsset("{name}"),
"templates/tags/hello.qute.txt")
.addAsResource(new StringAsset("{#include base}{#item}{#hello name=name /}{/item}{/include}"),
"templates/foo.txt")
.addAsResource(new StringAsset("{#insert item}NOK{/}"),
"templates/base.html"));
@Inject
Template foo;
@Test
public void testInjection() {
assertEquals("OK", foo.data("name", "OK").render());
}
}
|
UserTagWithQuteExtensionIncludeTest
|
java
|
mockito__mockito
|
mockito-core/src/main/java/org/mockito/creation/instance/InstantiationException.java
|
{
"start": 325,
"end": 646
}
|
class ____ extends MockitoException {
/**
* @since 3.5.0
*/
public InstantiationException(String message) {
super(message);
}
/**
* @since 2.15.4
*/
public InstantiationException(String message, Throwable cause) {
super(message, cause);
}
}
|
InstantiationException
|
java
|
apache__camel
|
components/camel-twilio/src/generated/java/org/apache/camel/component/twilio/internal/NotificationApiMethod.java
|
{
"start": 660,
"end": 2031
}
|
enum ____ implements ApiMethod {
FETCHER(
com.twilio.rest.api.v2010.account.NotificationFetcher.class,
"fetcher",
arg("pathSid", String.class)),
FETCHER_1(
com.twilio.rest.api.v2010.account.NotificationFetcher.class,
"fetcher",
arg("pathAccountSid", String.class),
arg("pathSid", String.class)),
READER(
com.twilio.rest.api.v2010.account.NotificationReader.class,
"reader"),
READER_1(
com.twilio.rest.api.v2010.account.NotificationReader.class,
"reader",
arg("pathAccountSid", String.class));
private final ApiMethod apiMethod;
NotificationApiMethod(Class<?> resultType, String name, ApiMethodArg... args) {
this.apiMethod = new ApiMethodImpl(Notification.class, resultType, name, args);
}
@Override
public String getName() { return apiMethod.getName(); }
@Override
public Class<?> getResultType() { return apiMethod.getResultType(); }
@Override
public List<String> getArgNames() { return apiMethod.getArgNames(); }
@Override
public List<String> getSetterArgNames() { return apiMethod.getSetterArgNames(); }
@Override
public List<Class<?>> getArgTypes() { return apiMethod.getArgTypes(); }
@Override
public Method getMethod() { return apiMethod.getMethod(); }
}
|
NotificationApiMethod
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
|
{
"start": 5365,
"end": 46633
}
|
class ____ implements DataEncryptionKeyFactory {
public static final Logger LOG =
LoggerFactory.getLogger(NameNode.class.getName());
// return string marking fsck status
public static final String CORRUPT_STATUS = "is CORRUPT";
public static final String HEALTHY_STATUS = "is HEALTHY";
public static final String DECOMMISSIONING_STATUS = "is DECOMMISSIONING";
public static final String DECOMMISSIONED_STATUS = "is DECOMMISSIONED";
public static final String ENTERING_MAINTENANCE_STATUS =
"is ENTERING MAINTENANCE";
public static final String IN_MAINTENANCE_STATUS = "is IN MAINTENANCE";
public static final String STALE_STATUS = "is STALE";
public static final String EXCESS_STATUS = "is EXCESS";
public static final String NONEXISTENT_STATUS = "does not exist";
public static final String FAILURE_STATUS = "FAILED";
public static final String UNDEFINED = "undefined";
private final NameNode namenode;
private final BlockManager blockManager;
private final NetworkTopology networktopology;
private final int totalDatanodes;
private final InetAddress remoteAddress;
private long totalDirs = 0L;
private long totalSymlinks = 0L;
private String lostFound = null;
private boolean lfInited = false;
private boolean lfInitedOk = false;
private boolean showFiles = false;
private boolean showOpenFiles = false;
private boolean showBlocks = false;
private boolean showLocations = false;
private boolean showRacks = false;
private boolean showStoragePolcies = false;
private boolean showCorruptFileBlocks = false;
private boolean showReplicaDetails = false;
private boolean showUpgradeDomains = false;
private boolean showMaintenanceState = false;
private long staleInterval;
private Tracer tracer;
private String auditSource;
/**
* True if we encountered an internal error during FSCK, such as not being
* able to delete a corrupt file.
*/
private boolean internalError = false;
/**
* True if the user specified the -move option.
*
* Whe this option is in effect, we will copy salvaged blocks into the lost
* and found. */
private boolean doMove = false;
/**
* True if the user specified the -delete option.
*
* Whe this option is in effect, we will delete corrupted files.
*/
private boolean doDelete = false;
/**
* True if the user specified the -replicate option.
*
* When this option is in effect, we will initiate replication work to make
* mis-replicated blocks confirm the block placement policy.
*/
private boolean doReplicate = false;
String path = "/";
private String[] blockIds = null;
// We return back N files that are corrupt; the list of files returned is
// ordered by block id; to allow continuation support, pass in the last block
// # from previous call
private final String[] currentCookie = new String[] { null };
private final Configuration conf;
private final PrintWriter out;
private List<String> snapshottableDirs = null;
private final BlockPlacementPolicies bpPolicies;
private StoragePolicySummary storageTypeSummary = null;
/**
* Filesystem checker.
* @param conf configuration (namenode config)
* @param namenode namenode that this fsck is going to use
* @param pmap key=value[] map passed to the http servlet as url parameters
* @param out output stream to write the fsck output
* @param totalDatanodes number of live datanodes
* @param remoteAddress source address of the fsck request
*/
NamenodeFsck(Configuration conf, NameNode namenode,
NetworkTopology networktopology,
Map<String, String[]> pmap, PrintWriter out,
int totalDatanodes, InetAddress remoteAddress) {
this.conf = conf;
this.namenode = namenode;
this.blockManager = namenode.getNamesystem().getBlockManager();
this.networktopology = networktopology;
this.out = out;
this.totalDatanodes = totalDatanodes;
this.remoteAddress = remoteAddress;
this.bpPolicies = new BlockPlacementPolicies(conf, null,
networktopology,
namenode.getNamesystem().getBlockManager().getDatanodeManager()
.getHost2DatanodeMap());
this.staleInterval =
conf.getLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT);
this.tracer = new Tracer.Builder("NamenodeFsck").
conf(TraceUtils.wrapHadoopConf("namenode.fsck.htrace.", conf)).
build();
for (Iterator<String> it = pmap.keySet().iterator(); it.hasNext();) {
String key = it.next();
if (key.equals("path")) { this.path = pmap.get("path")[0]; }
else if (key.equals("move")) { this.doMove = true; }
else if (key.equals("delete")) { this.doDelete = true; }
else if (key.equals("files")) { this.showFiles = true; }
else if (key.equals("blocks")) { this.showBlocks = true; }
else if (key.equals("locations")) { this.showLocations = true; }
else if (key.equals("racks")) { this.showRacks = true; }
else if (key.equals("replicadetails")) {
this.showReplicaDetails = true;
} else if (key.equals("upgradedomains")) {
this.showUpgradeDomains = true;
} else if (key.equals("maintenance")) {
this.showMaintenanceState = true;
} else if (key.equals("storagepolicies")) {
this.showStoragePolcies = true;
} else if (key.equals("showprogress")) {
out.println("The fsck switch -showprogress is deprecated and no " +
"longer has any effect. Progress is now shown by default.");
LOG.warn("The fsck switch -showprogress is deprecated and no longer " +
"has any effect. Progress is now shown by default.");
} else if (key.equals("openforwrite")) {
this.showOpenFiles = true;
} else if (key.equals("listcorruptfileblocks")) {
this.showCorruptFileBlocks = true;
} else if (key.equals("startblockafter")) {
this.currentCookie[0] = pmap.get("startblockafter")[0];
} else if (key.equals("includeSnapshots")) {
this.snapshottableDirs = new ArrayList<String>();
} else if (key.equals("blockId")) {
this.blockIds = pmap.get("blockId")[0].split(" ");
} else if (key.equals("replicate")) {
this.doReplicate = true;
}
}
this.auditSource = (blockIds != null)
? "blocksIds=" + Arrays.asList(blockIds) : path;
}
public String getAuditSource() {
return auditSource;
}
/**
* Check block information given a blockId number
*
*/
public void blockIdCK(String blockId) {
if(blockId == null) {
out.println("Please provide valid blockId!");
return;
}
// TODO: Just hold the BM read lock.
namenode.getNamesystem().readLock(RwLockMode.GLOBAL);
try {
//get blockInfo
Block block = new Block(Block.getBlockId(blockId));
//find which file this block belongs to
BlockInfo blockInfo = blockManager.getStoredBlock(block);
if (blockInfo == null || blockInfo.isDeleted()) {
out.println("Block "+ blockId +" " + NONEXISTENT_STATUS);
LOG.warn("Block "+ blockId + " " + NONEXISTENT_STATUS);
return;
}
final INodeFile iNode = namenode.getNamesystem().getBlockCollection(blockInfo);
NumberReplicas numberReplicas= blockManager.countNodes(blockInfo);
out.println("Block Id: " + blockId);
out.println("Block belongs to: "+iNode.getFullPathName());
out.println("No. of Expected Replica: " +
blockManager.getExpectedRedundancyNum(blockInfo));
out.println("No. of live Replica: " + numberReplicas.liveReplicas());
out.println("No. of excess Replica: " + numberReplicas.excessReplicas());
out.println("No. of stale Replica: " +
numberReplicas.replicasOnStaleNodes());
out.println("No. of decommissioned Replica: "
+ numberReplicas.decommissioned());
out.println("No. of decommissioning Replica: "
+ numberReplicas.decommissioning());
if (this.showMaintenanceState) {
out.println("No. of entering maintenance Replica: "
+ numberReplicas.liveEnteringMaintenanceReplicas());
out.println("No. of in maintenance Replica: "
+ numberReplicas.maintenanceNotForReadReplicas());
}
out.println("No. of corrupted Replica: " +
numberReplicas.corruptReplicas());
// for striped blocks only and number of redundant internal block replicas.
if (blockInfo.isStriped()) {
out.println("No. of redundant Replica: " + numberReplicas.redundantInternalBlocks());
}
//record datanodes that have corrupted block replica
Collection<DatanodeDescriptor> corruptionRecord = null;
if (blockManager.getCorruptReplicas(block) != null) {
corruptionRecord = blockManager.getCorruptReplicas(block);
}
// report block replicas status on datanodes
if (blockInfo.isStriped()) {
for (int idx = (blockInfo.getCapacity() - 1); idx >= 0; idx--) {
DatanodeDescriptor dn = blockInfo.getDatanode(idx);
if (dn == null) {
continue;
}
printDatanodeReplicaStatus(block, corruptionRecord, dn);
}
} else {
for (int idx = (blockInfo.numNodes() - 1); idx >= 0; idx--) {
DatanodeDescriptor dn = blockInfo.getDatanode(idx);
printDatanodeReplicaStatus(block, corruptionRecord, dn);
}
}
} catch (Exception e) {
String errMsg = "Fsck on blockId '" + blockId;
LOG.warn(errMsg, e);
out.println(e.getMessage());
out.print("\n\n" + errMsg);
LOG.warn("Error in looking up block", e);
} finally {
namenode.getNamesystem().readUnlock(RwLockMode.GLOBAL, "fsck");
}
}
private void printDatanodeReplicaStatus(Block block,
Collection<DatanodeDescriptor> corruptionRecord, DatanodeDescriptor dn) {
out.print("Block replica on datanode/rack: " + dn.getHostName() +
dn.getNetworkLocation() + " ");
if (corruptionRecord != null && corruptionRecord.contains(dn)) {
out.print(CORRUPT_STATUS + "\t ReasonCode: " +
blockManager.getCorruptReason(block, dn));
} else if (dn.isDecommissioned()){
out.print(DECOMMISSIONED_STATUS);
} else if (dn.isDecommissionInProgress()) {
out.print(DECOMMISSIONING_STATUS);
} else if (this.showMaintenanceState && dn.isEnteringMaintenance()) {
out.print(ENTERING_MAINTENANCE_STATUS);
} else if (this.showMaintenanceState && dn.isInMaintenance()) {
out.print(IN_MAINTENANCE_STATUS);
} else if (dn.isStale(this.staleInterval)) {
out.print(STALE_STATUS);
} else if (blockManager.isExcess(dn, blockManager.getStoredBlock(block))) {
out.print(EXCESS_STATUS);
} else {
out.print(HEALTHY_STATUS);
}
out.print("\n");
}
/**
* Check files on DFS, starting from the indicated path.
*/
public void fsck() throws AccessControlException {
final long startTime = Time.monotonicNow();
String operationName = "fsck";
try {
if(blockIds != null) {
namenode.getNamesystem().checkSuperuserPrivilege(operationName, path);
StringBuilder sb = new StringBuilder();
sb.append("FSCK started by " +
UserGroupInformation.getCurrentUser() + " from " +
remoteAddress + " at " + new Date());
out.println(sb);
sb.append(" for blockIds: \n");
for (String blk: blockIds) {
if(blk == null || !blk.contains(Block.BLOCK_FILE_PREFIX)) {
out.println("Incorrect blockId format: " + blk);
continue;
}
out.print("\n");
blockIdCK(blk);
sb.append(blk + "\n");
}
LOG.info("{}", sb.toString());
out.flush();
return;
}
String msg = "FSCK started by " + UserGroupInformation.getCurrentUser()
+ " from " + remoteAddress + " for path " + path + " at " + new Date();
LOG.info(msg);
out.println(msg);
if (snapshottableDirs != null) {
SnapshottableDirectoryStatus[] snapshotDirs =
namenode.getRpcServer().getSnapshottableDirListing();
if (snapshotDirs != null) {
for (SnapshottableDirectoryStatus dir : snapshotDirs) {
snapshottableDirs.add(dir.getFullPath().toString());
}
}
}
final HdfsFileStatus file = namenode.getRpcServer().getFileInfo(path);
if (file != null) {
if (showCorruptFileBlocks) {
listCorruptFileBlocks();
return;
}
if (this.showStoragePolcies) {
storageTypeSummary = new StoragePolicySummary(
namenode.getNamesystem().getBlockManager().getStoragePolicies());
}
Result replRes = new ReplicationResult(conf);
Result ecRes = new ErasureCodingResult(conf);
check(path, file, replRes, ecRes);
out.print("\nStatus: ");
out.println(replRes.isHealthy() && ecRes.isHealthy() ? "HEALTHY" : "CORRUPT");
out.println(" Number of data-nodes:\t" + totalDatanodes);
out.println(" Number of racks:\t\t" + networktopology.getNumOfRacks());
out.println(" Total dirs:\t\t\t" + totalDirs);
out.println(" Total symlinks:\t\t" + totalSymlinks);
out.println("\nReplicated Blocks:");
out.println(replRes);
out.println("\nErasure Coded Block Groups:");
out.println(ecRes);
if (this.showStoragePolcies) {
out.print(storageTypeSummary);
}
out.println("FSCK ended at " + new Date() + " in "
+ (Time.monotonicNow() - startTime + " milliseconds"));
// If there were internal errors during the fsck operation, we want to
// return FAILURE_STATUS, even if those errors were not immediately
// fatal. Otherwise many unit tests will pass even when there are bugs.
if (internalError) {
throw new IOException("fsck encountered internal errors!");
}
// DFSck client scans for the string HEALTHY/CORRUPT to check the status
// of file system and return appropriate code. Changing the output
// string might break testcases. Also note this must be the last line
// of the report.
if (replRes.isHealthy() && ecRes.isHealthy()) {
out.print("\n\nThe filesystem under path '" + path + "' " + HEALTHY_STATUS);
} else {
out.print("\n\nThe filesystem under path '" + path + "' " + CORRUPT_STATUS);
}
} else {
out.print("\n\nPath '" + path + "' " + NONEXISTENT_STATUS);
}
} catch (Exception e) {
String errMsg = "Fsck on path '" + path + "' " + FAILURE_STATUS;
LOG.warn(errMsg, e);
out.println("FSCK ended at " + new Date() + " in "
+ (Time.monotonicNow() - startTime + " milliseconds"));
out.println(e.getMessage());
out.print("\n\n" + errMsg);
} finally {
out.close();
}
}
private void listCorruptFileBlocks() throws IOException {
final List<String> corruptBlocksFiles = namenode.getNamesystem()
.listCorruptFileBlocksWithSnapshot(path, snapshottableDirs,
currentCookie);
int numCorruptFiles = corruptBlocksFiles.size();
String filler;
if (numCorruptFiles > 0) {
filler = Integer.toString(numCorruptFiles);
} else if (currentCookie[0].equals("0")) {
filler = "no";
} else {
filler = "no more";
}
out.println("Cookie:\t" + currentCookie[0]);
for (String s : corruptBlocksFiles) {
out.println(s);
}
out.println("\n\nThe filesystem under path '" + path + "' has " + filler
+ " CORRUPT files");
out.println();
}
@VisibleForTesting
void check(String parent, HdfsFileStatus file, Result replRes, Result ecRes)
throws IOException {
String path = file.getFullName(parent);
if ((totalDirs + totalSymlinks + replRes.totalFiles + ecRes.totalFiles)
% 1000 == 0) {
out.println();
out.flush();
}
if (file.isDirectory()) {
checkDir(path, replRes, ecRes);
return;
}
if (file.isSymlink()) {
if (showFiles) {
out.println(path + " <symlink>");
}
totalSymlinks++;
return;
}
LocatedBlocks blocks = getBlockLocations(path, file);
if (blocks == null) { // the file is deleted
return;
}
final Result r = file.getErasureCodingPolicy() != null ? ecRes: replRes;
collectFileSummary(path, file, r, blocks);
collectBlocksSummary(parent, file, r, blocks);
}
private void checkDir(String path, Result replRes, Result ecRes) throws IOException {
if (snapshottableDirs != null && snapshottableDirs.contains(path)) {
String snapshotPath = (path.endsWith(Path.SEPARATOR) ? path : path
+ Path.SEPARATOR)
+ HdfsConstants.DOT_SNAPSHOT_DIR;
HdfsFileStatus snapshotFileInfo = namenode.getRpcServer().getFileInfo(
snapshotPath);
check(snapshotPath, snapshotFileInfo, replRes, ecRes);
}
byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;
DirectoryListing thisListing;
if (showFiles) {
out.println(path + " <dir>");
}
totalDirs++;
do {
assert lastReturnedName != null;
thisListing = namenode.getRpcServer().getListing(
path, lastReturnedName, false);
if (thisListing == null) {
return;
}
HdfsFileStatus[] files = thisListing.getPartialListing();
for (int i = 0; i < files.length; i++) {
check(path, files[i], replRes, ecRes);
}
lastReturnedName = thisListing.getLastName();
} while (thisListing.hasMore());
}
private LocatedBlocks getBlockLocations(String path, HdfsFileStatus file)
throws IOException {
long fileLen = file.getLen();
LocatedBlocks blocks = null;
final FSNamesystem fsn = namenode.getNamesystem();
final String operationName = "fsckGetBlockLocations";
FSPermissionChecker.setOperationType(operationName);
FSPermissionChecker pc = fsn.getPermissionChecker();
fsn.readLock(RwLockMode.GLOBAL);
try {
blocks = FSDirStatAndListingOp.getBlockLocations(
fsn.getFSDirectory(), pc,
path, 0, fileLen, false)
.blocks;
} catch (FileNotFoundException fnfe) {
blocks = null;
} finally {
fsn.readUnlock(RwLockMode.GLOBAL, operationName);
}
return blocks;
}
private void collectFileSummary(String path, HdfsFileStatus file, Result res,
LocatedBlocks blocks) throws IOException {
long fileLen = file.getLen();
boolean isOpen = blocks.isUnderConstruction();
if (isOpen && !showOpenFiles) {
// We collect these stats about open files to report with default options
res.totalOpenFilesSize += fileLen;
res.totalOpenFilesBlocks += blocks.locatedBlockCount();
res.totalOpenFiles++;
return;
}
res.totalFiles++;
res.totalSize += fileLen;
res.totalBlocks += blocks.locatedBlockCount();
String redundancyPolicy;
ErasureCodingPolicy ecPolicy = file.getErasureCodingPolicy();
if (ecPolicy == null) { // a replicated file
redundancyPolicy = "replicated: replication=" +
file.getReplication() + ",";
} else {
redundancyPolicy = "erasure-coded: policy=" + ecPolicy.getName() + ",";
}
if (showOpenFiles && isOpen) {
out.print(path + " " + fileLen + " bytes, " + redundancyPolicy + " " +
blocks.locatedBlockCount() + " block(s), OPENFORWRITE: ");
} else if (showFiles) {
out.print(path + " " + fileLen + " bytes, " + redundancyPolicy + " " +
blocks.locatedBlockCount() + " block(s): ");
} else if (res.totalFiles % 100 == 0) {
out.print('.');
}
}
/**
* Display info of each replica for replication block.
* For striped block group, display info of each internal block.
*/
private String getReplicaInfo(BlockInfo storedBlock) {
if (!(showLocations || showRacks || showReplicaDetails ||
showUpgradeDomains)) {
return "";
}
final boolean isComplete = storedBlock.isComplete();
Iterator<DatanodeStorageInfo> storagesItr;
StringBuilder sb = new StringBuilder(" [");
final boolean isStriped = storedBlock.isStriped();
Map<DatanodeStorageInfo, Long> storage2Id = new HashMap<>();
if (isComplete) {
if (isStriped) {
long blockId = storedBlock.getBlockId();
Iterable<StorageAndBlockIndex> sis =
((BlockInfoStriped) storedBlock).getStorageAndIndexInfos();
for (StorageAndBlockIndex si : sis) {
storage2Id.put(si.getStorage(), blockId + si.getBlockIndex());
}
}
storagesItr = storedBlock.getStorageInfos();
} else {
storagesItr = storedBlock.getUnderConstructionFeature()
.getExpectedStorageLocationsIterator();
}
while (storagesItr.hasNext()) {
DatanodeStorageInfo storage = storagesItr.next();
if (isStriped && isComplete) {
long index = storage2Id.get(storage);
sb.append("blk_" + index + ":");
}
DatanodeDescriptor dnDesc = storage.getDatanodeDescriptor();
if (showRacks) {
sb.append(NodeBase.getPath(dnDesc));
} else {
sb.append(new DatanodeInfoWithStorage(dnDesc, storage.getStorageID(),
storage.getStorageType()));
}
if (showUpgradeDomains) {
String upgradeDomain = (dnDesc.getUpgradeDomain() != null) ?
dnDesc.getUpgradeDomain() : UNDEFINED;
sb.append("(ud=" + upgradeDomain +")");
}
if (showReplicaDetails) {
Collection<DatanodeDescriptor> corruptReplicas =
blockManager.getCorruptReplicas(storedBlock);
sb.append("(");
if (dnDesc.isDecommissioned()) {
sb.append("DECOMMISSIONED)");
} else if (dnDesc.isDecommissionInProgress()) {
sb.append("DECOMMISSIONING)");
} else if (this.showMaintenanceState &&
dnDesc.isEnteringMaintenance()) {
sb.append("ENTERING MAINTENANCE)");
} else if (this.showMaintenanceState &&
dnDesc.isInMaintenance()) {
sb.append("IN MAINTENANCE)");
} else if (corruptReplicas != null
&& corruptReplicas.contains(dnDesc)) {
sb.append("CORRUPT)");
} else if (blockManager.isExcess(dnDesc, storedBlock)) {
sb.append("EXCESS)");
} else if (dnDesc.isStale(this.staleInterval)) {
sb.append("STALE_NODE)");
} else if (storage.areBlockContentsStale()) {
sb.append("STALE_BLOCK_CONTENT)");
} else {
sb.append("LIVE)");
}
}
if (storagesItr.hasNext()) {
sb.append(", ");
}
}
sb.append(']');
return sb.toString();
}
private void collectBlocksSummary(String parent, HdfsFileStatus file,
Result res, LocatedBlocks blocks) throws IOException {
String path = file.getFullName(parent);
boolean isOpen = blocks.isUnderConstruction();
if (isOpen && !showOpenFiles) {
return;
}
int missing = 0;
int corrupt = 0;
long missize = 0;
long corruptSize = 0;
int underReplicatedPerFile = 0;
int misReplicatedPerFile = 0;
StringBuilder report = new StringBuilder();
int blockNumber = 0;
final LocatedBlock lastBlock = blocks.getLastLocatedBlock();
List<BlockInfo> misReplicatedBlocks = new LinkedList<>();
for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
ExtendedBlock block = lBlk.getBlock();
if (!blocks.isLastBlockComplete() && lastBlock != null &&
lastBlock.getBlock().equals(block)) {
// this is the last block and this is not complete. ignore it since
// it is under construction
continue;
}
final BlockInfo storedBlock = blockManager.getStoredBlock(
block.getLocalBlock());
final int minReplication = blockManager.getMinStorageNum(storedBlock);
// count decommissionedReplicas / decommissioningReplicas
NumberReplicas numberReplicas = blockManager.countNodes(storedBlock);
int decommissionedReplicas = numberReplicas.decommissioned();
int decommissioningReplicas = numberReplicas.decommissioning();
int enteringMaintenanceReplicas =
numberReplicas.liveEnteringMaintenanceReplicas();
int inMaintenanceReplicas =
numberReplicas.maintenanceNotForReadReplicas();
res.decommissionedReplicas += decommissionedReplicas;
res.decommissioningReplicas += decommissioningReplicas;
res.enteringMaintenanceReplicas += enteringMaintenanceReplicas;
res.inMaintenanceReplicas += inMaintenanceReplicas;
// count total replicas
int liveReplicas = numberReplicas.liveReplicas();
int totalReplicasPerBlock = liveReplicas + decommissionedReplicas
+ decommissioningReplicas
+ enteringMaintenanceReplicas
+ inMaintenanceReplicas;
res.totalReplicas += totalReplicasPerBlock;
boolean isMissing;
if (storedBlock.isStriped()) {
isMissing = totalReplicasPerBlock < minReplication;
} else {
isMissing = totalReplicasPerBlock == 0;
}
// count expected replicas
short targetFileReplication;
if (file.getErasureCodingPolicy() != null) {
assert storedBlock instanceof BlockInfoStriped;
targetFileReplication = ((BlockInfoStriped) storedBlock)
.getRealTotalBlockNum();
} else {
targetFileReplication = file.getReplication();
}
res.numExpectedReplicas += targetFileReplication;
// count under min repl'd blocks
if(totalReplicasPerBlock < minReplication){
res.numUnderMinReplicatedBlocks++;
}
// count excessive Replicas / over replicated blocks
if (liveReplicas > targetFileReplication) {
res.excessiveReplicas += (liveReplicas - targetFileReplication);
res.numOverReplicatedBlocks += 1;
}
// count corrupt blocks
boolean isCorrupt = lBlk.isCorrupt();
if (isCorrupt) {
res.addCorrupt(block.getNumBytes());
corrupt++;
corruptSize += block.getNumBytes();
out.print("\n" + path + ": CORRUPT blockpool " +
block.getBlockPoolId() + " block " + block.getBlockName() + "\n");
}
// count minimally replicated blocks
if (totalReplicasPerBlock >= minReplication)
res.numMinReplicatedBlocks++;
// count missing replicas / under replicated blocks
if (totalReplicasPerBlock < targetFileReplication && !isMissing) {
res.missingReplicas += (targetFileReplication - totalReplicasPerBlock);
res.numUnderReplicatedBlocks += 1;
underReplicatedPerFile++;
if (!showFiles) {
out.print("\n" + path + ": ");
}
out.println(" Under replicated " + block + ". Target Replicas is "
+ targetFileReplication + " but found "
+ liveReplicas+ " live replica(s), "
+ decommissionedReplicas + " decommissioned replica(s), "
+ decommissioningReplicas + " decommissioning replica(s)"
+ (this.showMaintenanceState ? (enteringMaintenanceReplicas
+ ", entering maintenance replica(s) and " + inMaintenanceReplicas
+ " in maintenance replica(s).") : "."));
}
// count mis replicated blocks
BlockPlacementStatus blockPlacementStatus = bpPolicies.getPolicy(
lBlk.getBlockType()).verifyBlockPlacement(lBlk.getLocations(),
targetFileReplication);
if (!blockPlacementStatus.isPlacementPolicySatisfied()) {
res.numMisReplicatedBlocks++;
misReplicatedPerFile++;
if (!showFiles) {
if(underReplicatedPerFile == 0)
out.println();
out.print(path + ": ");
}
out.println(" Replica placement policy is violated for " +
block + ". " + blockPlacementStatus.getErrorDescription());
if (doReplicate) {
misReplicatedBlocks.add(storedBlock);
}
}
// count storage summary
if (this.showStoragePolcies && lBlk.getStorageTypes() != null) {
countStorageTypeSummary(file, lBlk);
}
// report
String blkName = block.toString();
report.append(blockNumber + ". " + blkName + " len=" +
block.getNumBytes());
if (isMissing && !isCorrupt) {
// If the block is corrupted, it means all its available replicas are
// corrupted in the case of replication, and it means the state of the
// block group is unrecoverable due to some corrupted intenal blocks in
// the case of EC. We don't mark it as missing given these available
// replicas/internal-blocks might still be accessible as the block might
// be incorrectly marked as corrupted by client machines.
report.append(" MISSING!");
res.addMissing(blkName, block.getNumBytes());
missing++;
missize += block.getNumBytes();
if (storedBlock.isStriped()) {
report.append(" Live_repl=" + liveReplicas);
String info = getReplicaInfo(storedBlock);
if (!info.isEmpty()){
report.append(" ").append(info);
}
}
} else {
report.append(" Live_repl=" + liveReplicas);
String info = getReplicaInfo(storedBlock);
if (!info.isEmpty()){
report.append(" ").append(info);
}
}
report.append('\n');
blockNumber++;
}
//display under construction block info.
if (!blocks.isLastBlockComplete() && lastBlock != null) {
ExtendedBlock block = lastBlock.getBlock();
String blkName = block.toString();
BlockInfo storedBlock = blockManager.getStoredBlock(
block.getLocalBlock());
BlockUnderConstructionFeature uc =
storedBlock.getUnderConstructionFeature();
if (uc != null) {
// BlockUnderConstructionFeature can be null, in case the block was
// in committed state, and the IBR came just after the check.
DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations();
report.append('\n').append("Under Construction Block:\n")
.append(blockNumber).append(". ").append(blkName).append(" len=")
.append(block.getNumBytes())
.append(" Expected_repl=" + storages.length);
String info = getReplicaInfo(storedBlock);
if (!info.isEmpty()) {
report.append(" ").append(info);
}
}
}
// count corrupt file & move or delete if necessary
if ((missing > 0) || (corrupt > 0)) {
if (!showFiles) {
if (missing > 0) {
out.print("\n" + path + ": MISSING " + missing
+ " blocks of total size " + missize + " B.");
}
if (corrupt > 0) {
out.print("\n" + path + ": CORRUPT " + corrupt
+ " blocks of total size " + corruptSize + " B.");
}
}
res.corruptFiles++;
if (isOpen) {
LOG.info("Fsck: ignoring open file " + path);
} else {
if (doMove) copyBlocksToLostFound(parent, file, blocks);
if (doDelete) deleteCorruptedFile(path);
}
}
if (showFiles) {
if (missing > 0 || corrupt > 0) {
if (missing > 0) {
out.print(" MISSING " + missing + " blocks of total size " +
missize + " B\n");
}
if (corrupt > 0) {
out.print(" CORRUPT " + corrupt + " blocks of total size " +
corruptSize + " B\n");
}
} else if (underReplicatedPerFile == 0 && misReplicatedPerFile == 0) {
out.print(" OK\n");
}
if (showBlocks) {
out.print(report + "\n");
}
}
if (doReplicate && !misReplicatedBlocks.isEmpty()) {
int processedBlocks = this.blockManager.processMisReplicatedBlocks(
misReplicatedBlocks);
if (processedBlocks < misReplicatedBlocks.size()) {
LOG.warn("Fsck: Block manager is able to process only " +
processedBlocks +
" mis-replicated blocks (Total count : " +
misReplicatedBlocks.size() +
" ) for path " + path);
}
res.numBlocksQueuedForReplication += processedBlocks;
}
}
private void countStorageTypeSummary(HdfsFileStatus file, LocatedBlock lBlk) {
StorageType[] storageTypes = lBlk.getStorageTypes();
storageTypeSummary.add(Arrays.copyOf(storageTypes, storageTypes.length),
namenode.getNamesystem().getBlockManager()
.getStoragePolicy(file.getStoragePolicy()));
}
private void deleteCorruptedFile(String path) {
try {
namenode.getRpcServer().delete(path, true);
LOG.info("Fsck: deleted corrupt file " + path);
} catch (Exception e) {
LOG.error("Fsck: error deleting corrupted file " + path, e);
internalError = true;
}
}
boolean hdfsPathExists(String path)
throws AccessControlException, UnresolvedLinkException, IOException {
try {
HdfsFileStatus hfs = namenode.getRpcServer().getFileInfo(path);
return (hfs != null);
} catch (FileNotFoundException e) {
return false;
}
}
private void copyBlocksToLostFound(String parent, HdfsFileStatus file,
LocatedBlocks blocks) throws IOException {
final DFSClient dfs = new DFSClient(DFSUtilClient.getNNAddress(conf), conf);
final String fullName = file.getFullName(parent);
OutputStream fos = null;
try {
if (!lfInited) {
lostFoundInit(dfs);
}
if (!lfInitedOk) {
throw new IOException("failed to initialize lost+found");
}
String target = lostFound + fullName;
if (hdfsPathExists(target)) {
LOG.warn("Fsck: can't copy the remains of " + fullName + " to " +
"lost+found, because " + target + " already exists.");
return;
}
if (!namenode.getRpcServer().mkdirs(
target, file.getPermission(), true)) {
throw new IOException("failed to create directory " + target);
}
// create chains
int chain = 0;
boolean copyError = false;
for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
LocatedBlock lblock = lBlk;
DatanodeInfo[] locs = lblock.getLocations();
if (locs == null || locs.length == 0) {
if (fos != null) {
fos.flush();
fos.close();
fos = null;
}
continue;
}
if (fos == null) {
fos = dfs.create(target + "/" + chain, true);
chain++;
}
// copy the block. It's a pity it's not abstracted from DFSInputStream ...
try {
copyBlock(dfs, lblock, fos);
} catch (Exception e) {
LOG.error("Fsck: could not copy block " + lblock.getBlock() +
" to " + target, e);
fos.flush();
fos.close();
fos = null;
internalError = true;
copyError = true;
}
}
if (copyError) {
LOG.warn("Fsck: there were errors copying the remains of the " +
"corrupted file " + fullName + " to /lost+found");
} else {
LOG.info("Fsck: copied the remains of the corrupted file " +
fullName + " to /lost+found");
}
} catch (Exception e) {
LOG.error("copyBlocksToLostFound: error processing " + fullName, e);
internalError = true;
} finally {
if (fos != null) fos.close();
dfs.close();
}
}
/*
* XXX (ab) Bulk of this method is copied verbatim from {@link DFSClient}, which is
* bad. Both places should be refactored to provide a method to copy blocks
* around.
*/
private void copyBlock(final DFSClient dfs, LocatedBlock lblock,
OutputStream fos) throws Exception {
int failures = 0;
InetSocketAddress targetAddr = null;
Set<DatanodeInfo> deadNodes = new HashSet<DatanodeInfo>();
BlockReader blockReader = null;
ExtendedBlock block = lblock.getBlock();
while (blockReader == null) {
DatanodeInfo chosenNode;
try {
chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
targetAddr = NetUtils.createSocketAddr(chosenNode.getXferAddr());
} catch (IOException ie) {
if (failures >= HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) {
throw new IOException("Could not obtain block " + lblock, ie);
}
LOG.info("Could not obtain block from any node: " + ie);
try {
Thread.sleep(10000);
} catch (InterruptedException iex) {
}
deadNodes.clear();
failures++;
continue;
}
try {
String file = BlockReaderFactory.getFileName(targetAddr,
block.getBlockPoolId(), block.getBlockId());
blockReader = new BlockReaderFactory(dfs.getConf()).
setFileName(file).
setBlock(block).
setBlockToken(lblock.getBlockToken()).
setStartOffset(0).
setLength(block.getNumBytes()).
setVerifyChecksum(true).
setClientName("fsck").
setDatanodeInfo(chosenNode).
setInetSocketAddress(targetAddr).
setCachingStrategy(CachingStrategy.newDropBehind()).
setClientCacheContext(dfs.getClientContext()).
setConfiguration(namenode.getConf()).
setRemotePeerFactory(new RemotePeerFactory() {
@Override
public Peer newConnectedPeer(InetSocketAddress addr,
Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
throws IOException {
Peer peer = null;
Socket s = NetUtils.getDefaultSocketFactory(conf).createSocket();
try {
s.connect(addr, HdfsConstants.READ_TIMEOUT);
s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
peer = DFSUtilClient.peerFromSocketAndKey(
dfs.getSaslDataTransferClient(), s, NamenodeFsck.this,
blockToken, datanodeId, HdfsConstants.READ_TIMEOUT);
} finally {
if (peer == null) {
IOUtils.closeStream(s);
}
}
return peer;
}
}).
build();
} catch (IOException ex) {
// Put chosen node into dead list, continue
LOG.info("Failed to connect to " + targetAddr + ":" + ex);
deadNodes.add(chosenNode);
}
}
long bytesRead = 0L;
try {
bytesRead = copyBock(blockReader, fos);
} catch (Exception e) {
throw new Exception("Could not copy block data for " + lblock.getBlock(),
e);
} finally {
blockReader.close();
}
if (bytesRead != block.getNumBytes()) {
throw new IOException("Recorded block size is " + block.getNumBytes()
+ ", but datanode returned " + bytesRead + " bytes");
}
}
private long copyBock(BlockReader blockReader, OutputStream os)
throws IOException {
final byte[] buf = new byte[8192];
int cnt = 0;
long bytesRead = 0L;
while ((cnt = blockReader.read(buf, 0, buf.length)) > 0) {
os.write(buf, 0, cnt);
bytesRead += cnt;
}
return bytesRead;
}
@Override
public DataEncryptionKey newDataEncryptionKey() throws IOException {
return namenode.getRpcServer().getDataEncryptionKey();
}
/*
* XXX (ab) See comment above for copyBlock().
*
* Pick the best node from which to stream the data.
* That's the local one, if available.
*/
private DatanodeInfo bestNode(DFSClient dfs, DatanodeInfo[] nodes,
Set<DatanodeInfo> deadNodes) throws IOException {
if ((nodes == null) || (nodes.length - deadNodes.size() < 1)) {
throw new IOException("No live nodes contain current block");
}
DatanodeInfo chosenNode;
do {
chosenNode = nodes[ThreadLocalRandom.current().nextInt(nodes.length)];
} while (deadNodes.contains(chosenNode));
return chosenNode;
}
private void lostFoundInit(DFSClient dfs) {
lfInited = true;
try {
String lfName = "/lost+found";
final HdfsFileStatus lfStatus = dfs.getFileInfo(lfName);
if (lfStatus == null) { // not exists
lfInitedOk = dfs.mkdirs(lfName, null, true);
lostFound = lfName;
} else if (!lfStatus.isDirectory()) { // exists but not a directory
LOG.warn("Cannot use /lost+found : a regular file with this name exists.");
lfInitedOk = false;
} else { // exists and is a directory
lostFound = lfName;
lfInitedOk = true;
}
} catch (Exception e) {
e.printStackTrace();
lfInitedOk = false;
}
if (lostFound == null) {
LOG.warn("Cannot initialize /lost+found .");
lfInitedOk = false;
internalError = true;
}
}
/**
* FsckResult of checking, plus overall DFS statistics.
*/
@VisibleForTesting
static
|
NamenodeFsck
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/IgniteCacheEndpointBuilderFactory.java
|
{
"start": 17833,
"end": 24445
}
|
interface ____
extends
EndpointProducerBuilder {
default AdvancedIgniteCacheEndpointProducerBuilder advanced() {
return (AdvancedIgniteCacheEndpointProducerBuilder) this;
}
/**
* Sets whether to propagate the incoming body if the return type of the
* underlying Ignite operation is void.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param propagateIncomingBodyIfNoReturnValue the value to set
* @return the dsl builder
*/
default IgniteCacheEndpointProducerBuilder propagateIncomingBodyIfNoReturnValue(boolean propagateIncomingBodyIfNoReturnValue) {
doSetProperty("propagateIncomingBodyIfNoReturnValue", propagateIncomingBodyIfNoReturnValue);
return this;
}
/**
* Sets whether to propagate the incoming body if the return type of the
* underlying Ignite operation is void.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param propagateIncomingBodyIfNoReturnValue the value to set
* @return the dsl builder
*/
default IgniteCacheEndpointProducerBuilder propagateIncomingBodyIfNoReturnValue(String propagateIncomingBodyIfNoReturnValue) {
doSetProperty("propagateIncomingBodyIfNoReturnValue", propagateIncomingBodyIfNoReturnValue);
return this;
}
/**
* Sets whether to treat Collections as cache objects or as Collections
* of items to insert/update/compute, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param treatCollectionsAsCacheObjects the value to set
* @return the dsl builder
*/
default IgniteCacheEndpointProducerBuilder treatCollectionsAsCacheObjects(boolean treatCollectionsAsCacheObjects) {
doSetProperty("treatCollectionsAsCacheObjects", treatCollectionsAsCacheObjects);
return this;
}
/**
* Sets whether to treat Collections as cache objects or as Collections
* of items to insert/update/compute, etc.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param treatCollectionsAsCacheObjects the value to set
* @return the dsl builder
*/
default IgniteCacheEndpointProducerBuilder treatCollectionsAsCacheObjects(String treatCollectionsAsCacheObjects) {
doSetProperty("treatCollectionsAsCacheObjects", treatCollectionsAsCacheObjects);
return this;
}
/**
* The CachePeekMode, only needed for operations that require it
* (IgniteCacheOperation#SIZE).
*
* The option is a: <code>org.apache.ignite.cache.CachePeekMode</code>
* type.
*
* Default: ALL
* Group: producer
*
* @param cachePeekMode the value to set
* @return the dsl builder
*/
default IgniteCacheEndpointProducerBuilder cachePeekMode(org.apache.ignite.cache.CachePeekMode cachePeekMode) {
doSetProperty("cachePeekMode", cachePeekMode);
return this;
}
/**
* The CachePeekMode, only needed for operations that require it
* (IgniteCacheOperation#SIZE).
*
* The option will be converted to a
* <code>org.apache.ignite.cache.CachePeekMode</code> type.
*
* Default: ALL
* Group: producer
*
* @param cachePeekMode the value to set
* @return the dsl builder
*/
default IgniteCacheEndpointProducerBuilder cachePeekMode(String cachePeekMode) {
doSetProperty("cachePeekMode", cachePeekMode);
return this;
}
/**
* Whether to fail the initialization if the cache doesn't exist.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param failIfInexistentCache the value to set
* @return the dsl builder
*/
default IgniteCacheEndpointProducerBuilder failIfInexistentCache(boolean failIfInexistentCache) {
doSetProperty("failIfInexistentCache", failIfInexistentCache);
return this;
}
/**
* Whether to fail the initialization if the cache doesn't exist.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param failIfInexistentCache the value to set
* @return the dsl builder
*/
default IgniteCacheEndpointProducerBuilder failIfInexistentCache(String failIfInexistentCache) {
doSetProperty("failIfInexistentCache", failIfInexistentCache);
return this;
}
/**
* The cache operation to invoke. Possible values: GET, PUT, REMOVE,
* SIZE, REBALANCE, QUERY, CLEAR.
*
* The option is a:
* <code>org.apache.camel.component.ignite.cache.IgniteCacheOperation</code> type.
*
* Group: producer
*
* @param operation the value to set
* @return the dsl builder
*/
default IgniteCacheEndpointProducerBuilder operation(org.apache.camel.component.ignite.cache.IgniteCacheOperation operation) {
doSetProperty("operation", operation);
return this;
}
/**
* The cache operation to invoke. Possible values: GET, PUT, REMOVE,
* SIZE, REBALANCE, QUERY, CLEAR.
*
* The option will be converted to a
* <code>org.apache.camel.component.ignite.cache.IgniteCacheOperation</code> type.
*
* Group: producer
*
* @param operation the value to set
* @return the dsl builder
*/
default IgniteCacheEndpointProducerBuilder operation(String operation) {
doSetProperty("operation", operation);
return this;
}
}
/**
* Advanced builder for endpoint producers for the Ignite Cache component.
*/
public
|
IgniteCacheEndpointProducerBuilder
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/InconsistentCapitalizationTest.java
|
{
"start": 11584,
"end": 12019
}
|
interface ____ {} // ignore
private final WaitHandler waitHandler;
// BUG: Diagnostic contains:
protected Callback(final WaitHandler waithandler) {
this.waitHandler = waithandler;
}
public static Callback doOnSuccess() {
return new Callback(null) {};
}
}
""")
.doTest();
}
}
|
WaitHandler
|
java
|
quarkusio__quarkus
|
extensions/info/deployment/src/main/java/io/quarkus/info/deployment/InfoBuildTimeConfig.java
|
{
"start": 1926,
"end": 2099
}
|
interface ____ {
/**
* Whether java info will be included in the info endpoint
*/
@WithDefault("true")
boolean enabled();
}
}
|
Java
|
java
|
apache__camel
|
components/camel-couchdb/src/generated/java/org/apache/camel/component/couchdb/CouchDbEndpointConfigurer.java
|
{
"start": 734,
"end": 5066
}
|
class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
CouchDbEndpoint target = (CouchDbEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "createdatabase":
case "createDatabase": target.setCreateDatabase(property(camelContext, boolean.class, value)); return true;
case "deletes": target.setDeletes(property(camelContext, boolean.class, value)); return true;
case "exceptionhandler":
case "exceptionHandler": target.setExceptionHandler(property(camelContext, org.apache.camel.spi.ExceptionHandler.class, value)); return true;
case "exchangepattern":
case "exchangePattern": target.setExchangePattern(property(camelContext, org.apache.camel.ExchangePattern.class, value)); return true;
case "heartbeat": target.setHeartbeat(property(camelContext, java.time.Duration.class, value).toMillis()); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "maxmessagesperpoll":
case "maxMessagesPerPoll": target.setMaxMessagesPerPoll(property(camelContext, int.class, value)); return true;
case "password": target.setPassword(property(camelContext, java.lang.String.class, value)); return true;
case "style": target.setStyle(property(camelContext, java.lang.String.class, value)); return true;
case "updates": target.setUpdates(property(camelContext, boolean.class, value)); return true;
case "username": target.setUsername(property(camelContext, java.lang.String.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "createdatabase":
case "createDatabase": return boolean.class;
case "deletes": return boolean.class;
case "exceptionhandler":
case "exceptionHandler": return org.apache.camel.spi.ExceptionHandler.class;
case "exchangepattern":
case "exchangePattern": return org.apache.camel.ExchangePattern.class;
case "heartbeat": return long.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "maxmessagesperpoll":
case "maxMessagesPerPoll": return int.class;
case "password": return java.lang.String.class;
case "style": return java.lang.String.class;
case "updates": return boolean.class;
case "username": return java.lang.String.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
CouchDbEndpoint target = (CouchDbEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "createdatabase":
case "createDatabase": return target.isCreateDatabase();
case "deletes": return target.isDeletes();
case "exceptionhandler":
case "exceptionHandler": return target.getExceptionHandler();
case "exchangepattern":
case "exchangePattern": return target.getExchangePattern();
case "heartbeat": return target.getHeartbeat();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "maxmessagesperpoll":
case "maxMessagesPerPoll": return target.getMaxMessagesPerPoll();
case "password": return target.getPassword();
case "style": return target.getStyle();
case "updates": return target.isUpdates();
case "username": return target.getUsername();
default: return null;
}
}
}
|
CouchDbEndpointConfigurer
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/rest/handler/job/metrics/AbstractMetricsHandlerTest.java
|
{
"start": 7844,
"end": 8881
}
|
class ____ extends AbstractMetricsHandler<TestMessageParameters> {
private boolean returnComponentMetricStore = true;
private TestMetricsHandler(
GatewayRetriever<DispatcherGateway> leaderRetriever,
Duration timeout,
Map<String, String> headers,
MessageHeaders<
EmptyRequestBody,
MetricCollectionResponseBody,
TestMessageParameters>
messageHeaders,
MetricFetcher metricFetcher) {
super(leaderRetriever, timeout, headers, messageHeaders, metricFetcher);
}
@Nullable
@Override
protected MetricStore.ComponentMetricStore getComponentMetricStore(
HandlerRequest<EmptyRequestBody> request, MetricStore metricStore) {
return returnComponentMetricStore ? metricStore.getJobManagerMetricStore() : null;
}
}
private static
|
TestMetricsHandler
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/SlotCountExceedingParallelismTest.java
|
{
"start": 6022,
"end": 7120
}
|
class ____ extends AbstractInvokable {
public static final String CONFIG_KEY = "number-of-times-to-send";
public RoundRobinSubtaskIndexSender(Environment environment) {
super(environment);
}
@Override
public void invoke() throws Exception {
RecordWriter<IntValue> writer =
new RecordWriterBuilder<IntValue>().build(getEnvironment().getWriter(0));
final int numberOfTimesToSend =
getTaskConfiguration().get(getIntConfigOption(CONFIG_KEY), 0);
final IntValue subtaskIndex =
new IntValue(getEnvironment().getTaskInfo().getIndexOfThisSubtask());
try {
for (int i = 0; i < numberOfTimesToSend; i++) {
writer.emit(subtaskIndex);
}
writer.flushAll();
} finally {
writer.close();
}
}
}
/** Expects to receive the subtask index from a configurable number of sender tasks. */
public static
|
RoundRobinSubtaskIndexSender
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java
|
{
"start": 23421,
"end": 23486
}
|
class ____ prepare the test data.
*
*/
public static final
|
to
|
java
|
assertj__assertj-core
|
assertj-guava/src/main/java/org/assertj/guava/api/RangeSetAssert.java
|
{
"start": 2314,
"end": 43788
}
|
class ____<T extends Comparable<T>> extends AbstractAssert<RangeSetAssert<T>, RangeSet<T>> {
protected RangeSetAssert(RangeSet<T> actual) {
super(actual, RangeSetAssert.class);
}
/**
* Verifies that the given {@code RangeSet} has specific {@code size} of disconnected {@code Range} elements.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* assertThat(rangeSet).hasSize(3);</code></pre>
*
* @param size expected amount of disconnected {@code Range} elements.
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual size of {@code RangeSet} is different from the expected {@code size}.
*/
public RangeSetAssert<T> hasSize(int size) {
isNotNull();
assertHasSize(size);
return myself;
}
private void assertHasSize(int expectedSize) {
int actualSize = actual.asRanges().size();
if (actualSize != expectedSize) throwAssertionError(shouldHaveSize(actual, actualSize, expectedSize));
}
/**
* Verifies that the given {@code RangeSet} contains the given values.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* assertThat(rangeSet).contains(50, 270, 550);</code></pre>
*
* @param values the values to look for in actual {@code RangeSet}.
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} does not contain the given {@code values}.
* @throws NullPointerException if values are null.
* @throws IllegalArgumentException if values are empty while actual is not empty.
*/
@SafeVarargs
public final RangeSetAssert<T> contains(T... values) {
isNotNull();
assertContains(values);
return myself;
}
private void assertContains(T[] values) {
requireNonNull(values, shouldNotBeNull("values")::create);
if (actual.isEmpty() && values.length == 0) return;
failIfEmpty(values, "values");
assertRangeSetContainsGivenValues(actual, values);
}
/**
* Verifies that the given {@code RangeSet} contains all the given values.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* assertThat(rangeSet).containsAll(Arrays.asList(50, 270, 550));</code></pre>
*
* @param values the values to look for in actual {@code RangeSet}.
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} does not contain all the given {@code values}.
* @throws NullPointerException if values are null.
* @throws IllegalArgumentException if values are empty while actual is not empty.
*/
public RangeSetAssert<T> containsAll(Iterable<T> values) {
isNotNull();
assertContainsAll(values);
return myself;
}
private void assertContainsAll(Iterable<T> values) {
requireNonNull(values, shouldNotBeNull("values")::create);
if (actual.isEmpty() && !values.iterator().hasNext()) return;
failIfEmpty(values, "values");
assertRangeSetContainsGivenValues(actual, toArray(values, Comparable.class));
}
@SuppressWarnings({ "rawtypes", "unchecked" })
private void assertRangeSetContainsGivenValues(RangeSet actual, Comparable[] values) {
List<?> elementsNotFound = stream(values).filter(value -> !actual.contains(value)).collect(toList());
if (!elementsNotFound.isEmpty()) throwAssertionError(shouldContain(actual, values, elementsNotFound));
}
/**
* Verifies that the given {@code RangeSet} contains at least one of the given values.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* assertThat(rangeSet).containsAnyOf(150, 250, 700);</code></pre>
*
* @param values the values to look for in actual {@code RangeSet}.
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} does not contain at least one of the given {@code values}.
* @throws NullPointerException if values are null.
* @throws IllegalArgumentException if values are empty while actual is not empty.
*/
@SafeVarargs
public final RangeSetAssert<T> containsAnyOf(T... values) {
isNotNull();
assertContainsAnyOf(values);
return myself;
}
private void assertContainsAnyOf(T[] values) {
requireNonNull(values, shouldNotBeNull("values")::create);
// Should pass if both actual and expected are empty
if (actual.isEmpty() && values.length == 0) return;
failIfEmpty(values, "values");
assertRangeSetContainsAnyGivenValues(actual, values);
}
/**
* Verifies that the given {@code RangeSet} contains at least one of the given values.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* assertThat(rangeSet).containsAnyRangesOf(Arrays.asList(150, 250, 700));</code></pre>
*
* @param values the values to look for in actual {@code RangeSet}.
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} does not contain at least one of the given {@code values}.
* @throws NullPointerException if values are null.
* @throws IllegalArgumentException if values are empty while actual is not empty.
*/
public RangeSetAssert<T> containsAnyRangesOf(Iterable<T> values) {
isNotNull();
assertContainsAnyRangesOf(values);
return myself;
}
private void assertContainsAnyRangesOf(Iterable<T> values) {
requireNonNull(values, shouldNotBeNull("values")::create);
if (actual.isEmpty() && !values.iterator().hasNext()) return;
failIfEmpty(values, "values");
assertRangeSetContainsAnyGivenValues(actual, toArray(values, Comparable.class));
}
@SuppressWarnings({ "rawtypes", "unchecked" })
private void assertRangeSetContainsAnyGivenValues(RangeSet actual, Comparable[] values) {
boolean match = stream(values).anyMatch(actual::contains);
if (!match) throwAssertionError(shouldContainAnyOf(actual, values));
}
/**
* Verifies that the given {@code RangeSet} does not contain any of the given values.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* assertThat(rangeSet).doesNotContain(150, 320, 650);</code></pre>
*
* @param values the values that should not be present in actual {@code RangeSet}
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} contains any of the given {@code values}.
* @throws NullPointerException if values are null.
* @throws IllegalArgumentException if values are empty.
*/
@SafeVarargs
public final RangeSetAssert<T> doesNotContain(T... values) {
isNotNull();
assertDoesNotContain(values);
return myself;
}
private void assertDoesNotContain(T[] values) {
requireNonNull(values, shouldNotBeNull("values")::create);
failIfEmpty(values, "values");
assertRangeSetDoesNotContainGivenValues(actual, values);
}
/**
* Verifies that the given {@code RangeSet} does not contain any of the given values.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* assertThat(rangeSet).doesNotContain(Arrays.asList(150, 320, 650));</code></pre>
*
* @param values the values that should not be present in actual {@code RangeSet}
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} contains any of the given {@code values}.
* @throws NullPointerException if values are null.
* @throws IllegalArgumentException if values are empty.
*/
public RangeSetAssert<T> doesNotContainAll(Iterable<T> values) {
isNotNull();
assertDoesNotContainAll(values);
return myself;
}
private void assertDoesNotContainAll(Iterable<T> values) {
requireNonNull(values, shouldNotBeNull("values")::create);
failIfEmpty(values, "values");
assertRangeSetDoesNotContainGivenValues(actual, toArray(values, Comparable.class));
}
@SuppressWarnings({ "rawtypes", "unchecked" })
private void assertRangeSetDoesNotContainGivenValues(RangeSet actual, Comparable[] values) {
List<?> elementsFound = stream(values).filter(actual::contains).collect(toList());
if (!elementsFound.isEmpty()) throwAssertionError(shouldNotContain(actual, values, elementsFound));
}
/**
* Verifies that the actual {@code RangeSet} is empty.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* assertThat(rangeSet).isEmpty();</code></pre>
*
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} is not empty.
*/
public RangeSetAssert<T> isEmpty() {
isNotNull();
assertEmpty();
return myself;
}
private void assertEmpty() {
if (!actual.isEmpty()) throwAssertionError(shouldBeEmpty(actual));
}
/**
* Verifies that the actual {@code RangeSet} is not empty.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* assertThat(rangeSet).isNotEmpty();</code></pre>
*
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} is empty.
*/
public RangeSetAssert<T> isNotEmpty() {
isNotNull();
assertNotEmpty();
return myself;
}
private void assertNotEmpty() {
if (actual.isEmpty()) throwAssertionError(shouldNotBeEmpty());
}
/**
* Verifies that the actual {@code RangeSet} is {@code null} or empty.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* assertThat(rangeSet).isNullOrEmpty();</code></pre>
*
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is not {@code null} or not empty.
*/
public RangeSetAssert<T> isNullOrEmpty() {
assertNullOrEmpty();
return myself;
}
private void assertNullOrEmpty() {
if (actual != null && !actual.isEmpty()) throwAssertionError(shouldBeNullOrEmpty(actual));
}
/**
* Verifies that the given {@code RangeSet} intersects all the given ranges.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* assertThat(rangeSet).intersects(Range.closed(50, 150),
* Range.openClosed(170, 220),
* Range.open(520, 570));</code></pre>
*
* @param ranges the ranges to check whether they intersect the given {@code RangeSet}.
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} does not intersect all the given ranges.
* @throws NullPointerException if ranges are null.
* @throws IllegalArgumentException if ranges are empty while actual is not empty.
*/
@SafeVarargs
public final RangeSetAssert<T> intersects(Range<T>... ranges) {
isNotNull();
assertIntersects(ranges);
return myself;
}
private void assertIntersects(Range<T>[] ranges) {
requireNonNull(ranges, shouldNotBeNull("ranges")::create);
if (actual.isEmpty() && ranges.length == 0) return;
failIfEmpty(ranges, "ranges");
assertRangeSetIntersectsGivenValues(ranges);
}
/**
* Verifies that the given {@code RangeSet} intersects all ranges from the given range set.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* assertThat(rangeSet).intersectsAll(ImmutableRangeSet.of(Range.closed(50, 250)));</code></pre>
*
* @param rangeSet the range set to check whether it intersects the actual {@code RangeSet}.
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} does not intersect all the ranges from the given range set.
* @throws NullPointerException if range set is null.
* @throws IllegalArgumentException if range set is empty while actual is not empty.
*/
public RangeSetAssert<T> intersectsAll(RangeSet<T> rangeSet) {
isNotNull();
assertIntersectsAll(rangeSet);
return myself;
}
@SuppressWarnings("unchecked")
private void assertIntersectsAll(RangeSet<T> rangeSet) {
requireNonNull(rangeSet, shouldNotBeNull("rangeSet")::create);
// Should pass if both actual and expected are empty
if (actual.isEmpty() && rangeSet.isEmpty()) return;
failIfEmpty(rangeSet);
assertRangeSetIntersectsGivenValues(toArray(rangeSet.asRanges(), Range.class));
}
/**
* Verifies that the given {@code RangeSet} intersects all the given ranges.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* assertThat(rangeSet).intersectsAll(Arrays.asList(Range.closed(50, 150),
* Range.openClosed(170, 220),
* Range.open(520, 570)));</code></pre>
*
* @param ranges the ranges to check whether they all intersect the given {@code RangeSet}.
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} does not intersect all the given ranges.
* @throws NullPointerException if ranges are null.
* @throws IllegalArgumentException if ranges are empty while actual is not empty.
*/
public RangeSetAssert<T> intersectsAll(Iterable<Range<T>> ranges) {
isNotNull();
assertIntersectsAll(ranges);
return myself;
}
@SuppressWarnings("unchecked")
private void assertIntersectsAll(Iterable<Range<T>> ranges) {
requireNonNull(ranges, shouldNotBeNull("ranges")::create);
if (actual.isEmpty() && !ranges.iterator().hasNext()) return;
failIfEmpty(ranges, "ranges");
assertRangeSetIntersectsGivenValues(toArray(ranges, Range.class));
}
private void assertRangeSetIntersectsGivenValues(Range<T>[] ranges) {
List<?> notIntersected = stream(ranges).filter(range -> !actual.intersects(range)).collect(toList());
if (!notIntersected.isEmpty()) throwAssertionError(shouldIntersect(actual, ranges, notIntersected));
}
/**
* Verifies that the given {@link RangeSet} intersects at least one of the given ranges.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* assertThat(rangeSet).intersectsAnyOf(Range.closed(50, 150),
* Range.open(170, 190),
* Range.open(600, 670));</code></pre>
*
* @param ranges the ranges to check whether the actual {@code RangeSet} intersects at least one of them.
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} does not intersect any of the given ranges.
* @throws NullPointerException if ranges are null.
* @throws IllegalArgumentException if ranges are empty while actual is not empty.
*/
@SafeVarargs
public final RangeSetAssert<T> intersectsAnyOf(Range<T>... ranges) {
isNotNull();
assertIntersectsAnyOf(ranges);
return myself;
}
private void assertIntersectsAnyOf(Range<T>[] ranges) {
requireNonNull(ranges, shouldNotBeNull("ranges")::create);
if (actual.isEmpty() && ranges.length == 0) return;
failIfEmpty(ranges, "ranges");
assertRangeSetIntersectsAnyOfGivenValues(ranges);
}
/**
* Verifies that the given {@code RangeSet} intersects at least one of the given ranges.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* assertThat(rangeSet).intersectsAnyRangesOf(Arrays.asList(Range.closed(50, 150),
* Range.open(170, 190),
* Range.open(600, 670));</code></pre>
*
* @param ranges the ranges to check whether the actual {@code RangeSet} intersects at least one of them.
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} does not intersect any of the given ranges.
* @throws NullPointerException if ranges are null.
* @throws IllegalArgumentException if ranges are empty while actual is not empty.
*/
public RangeSetAssert<T> intersectsAnyRangesOf(Iterable<Range<T>> ranges) {
isNotNull();
assertIntersectsAnyRangesOf(ranges);
return myself;
}
@SuppressWarnings("unchecked")
private void assertIntersectsAnyRangesOf(Iterable<Range<T>> ranges) {
requireNonNull(ranges, shouldNotBeNull("ranges")::create);
if (actual.isEmpty() && !ranges.iterator().hasNext()) return;
failIfEmpty(ranges, "ranges");
assertRangeSetIntersectsAnyOfGivenValues(toArray(ranges, Range.class));
}
/**
* Verifies that the given {@code RangeSet} intersects at least one range of the given range set.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* assertThat(rangeSet).intersectsAnyRangesOf(ImmutableRangeSet.of(Range.close(50, 150)));</code></pre>
*
* @param rangeSet the range set with ranges to check whether the actual {@code RangeSet} intersects at least one of
* them.
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} does not intersect any of the ranges from the given ranges
* set.
* @throws NullPointerException if range set is null.
* @throws IllegalArgumentException if range set is empty while actual is not empty.
*/
public RangeSetAssert<T> intersectsAnyRangesOf(RangeSet<T> rangeSet) {
isNotNull();
assertIntersectsAnyRangesOf(rangeSet);
return myself;
}
@SuppressWarnings("unchecked")
private void assertIntersectsAnyRangesOf(RangeSet<T> rangeSet) {
requireNonNull(rangeSet, shouldNotBeNull("rangeSet")::create);
if (actual.isEmpty() && rangeSet.isEmpty()) return;
failIfEmpty(rangeSet);
assertRangeSetIntersectsAnyOfGivenValues(toArray(rangeSet.asRanges(), Range.class));
}
private void assertRangeSetIntersectsAnyOfGivenValues(Range<T>[] ranges) {
boolean intersects = stream(ranges).anyMatch(actual::intersects);
if (!intersects) throwAssertionError(shouldIntersectAnyOf(actual, ranges));
}
/**
* Verifies that the given {@code RangeSet} does not intersect the given ranges.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* assertThat(rangeSet).doesNotIntersect(Range.closed(120, 150),
* Range.open(302, 490),
* Range.open(600, 670));</code></pre>
*
* @param ranges the ranges to check whether the actual {@code RangeSet} does not intersect them.
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} intersects the given ranges.
* @throws NullPointerException if ranges are null.
* @throws IllegalArgumentException if ranges are empty.
*/
@SafeVarargs
public final RangeSetAssert<T> doesNotIntersect(Range<T>... ranges) {
isNotNull();
assertDoesNotIntersect(ranges);
return myself;
}
private void assertDoesNotIntersect(Range<T>[] ranges) {
requireNonNull(ranges, shouldNotBeNull("ranges")::create);
failIfEmpty(ranges, "ranges");
assertRangeSetDoesNotIntersectGivenValues(ranges);
}
/**
* Verifies that the given {@code RangeSet} does not intersect ranges from the given range set.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* assertThat(rangeSet).doesNotIntersectAnyRangeFrom(ImmutableRangeSet.of(Range.close(120, 170)));</code></pre>
*
* @param rangeSet the range set to check whether the actual {@code RangeSet} does not intersect ranges from it.
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} intersects the ranges from the given range set.
* @throws NullPointerException if range set is null.
* @throws IllegalArgumentException if range set is empty.
*/
public RangeSetAssert<T> doesNotIntersectAnyRangeFrom(RangeSet<T> rangeSet) {
isNotNull();
assertDoesNotIntersectAnyRangeFrom(rangeSet);
return myself;
}
@SuppressWarnings("unchecked")
private void assertDoesNotIntersectAnyRangeFrom(RangeSet<T> rangeSet) {
requireNonNull(rangeSet, shouldNotBeNull("rangeSet")::create);
failIfEmpty(rangeSet);
assertRangeSetDoesNotIntersectGivenValues(toArray(rangeSet.asRanges(), Range.class));
}
/**
* Verifies that the given {@code RangeSet} does not intersect all the given ranges.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* assertThat(rangeSet).doesNotIntersectAnyRangeFrom(Arrays.asList(Range.closed(120, 150),
* Range.open(302, 490),
* Range.open(600, 670));</code></pre>
*
* @param ranges the ranges to check whether the actual {@code RangeSet} does not intersect them.
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} intersects all the given ranges.
* @throws NullPointerException if ranges are null.
* @throws IllegalArgumentException if ranges are empty.
*/
public RangeSetAssert<T> doesNotIntersectAnyRangeFrom(Iterable<Range<T>> ranges) {
isNotNull();
assertDoesNotIntersectAnyRangeFrom(ranges);
return myself;
}
@SuppressWarnings("unchecked")
private void assertDoesNotIntersectAnyRangeFrom(Iterable<Range<T>> ranges) {
requireNonNull(ranges, shouldNotBeNull("ranges")::create);
failIfEmpty(ranges, "ranges");
assertRangeSetDoesNotIntersectGivenValues(toArray(ranges, Range.class));
}
private void assertRangeSetDoesNotIntersectGivenValues(Range<T>[] ranges) {
List<?> intersected = stream(ranges).filter(actual::intersects).collect(toList());
if (!intersected.isEmpty()) throwAssertionError(shouldNotIntersect(actual, ranges, intersected));
}
/**
* Verifies that the given {@code RangeSet} encloses the given ranges.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* assertThat(rangeSet).encloses(Range.closed(0, 10),
* Range.open(50, 60),
* Range.open(90, 100));</code></pre>
*
* @param ranges the ranges to check whether the actual {@code RangeSet} encloses them.
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} does not enclose the given ranges.
* @throws NullPointerException if ranges are null.
* @throws IllegalArgumentException if ranges are empty while actual is not empty.
*/
@SafeVarargs
public final RangeSetAssert<T> encloses(Range<T>... ranges) {
isNotNull();
assertEncloses(ranges);
return myself;
}
private void assertEncloses(Range<T>[] ranges) {
requireNonNull(ranges, shouldNotBeNull("ranges")::create);
if (actual.isEmpty() && ranges.length == 0) return;
failIfEmpty(ranges, "ranges");
assertRangeSetEnclosesGivenValues(ranges);
}
/**
* Verifies that the given {@code RangeSet} encloses all the given ranges.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* assertThat(rangeSet).enclosesAll(Arrays.asList(Range.closed(0, 10),
* Range.open(50, 60),
* Range.open(90, 100)));</code></pre>
*
* @param ranges the ranges to check whether the actual {@code RangeSet} encloses all of them.
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} does not enclose all the given ranges.
* @throws NullPointerException if ranges are null.
* @throws IllegalArgumentException if ranges are empty while actual is not empty.
*/
public RangeSetAssert<T> enclosesAll(Iterable<Range<T>> ranges) {
isNotNull();
assertEnclosesAll(ranges);
return myself;
}
@SuppressWarnings("unchecked")
private void assertEnclosesAll(Iterable<Range<T>> ranges) {
requireNonNull(ranges, shouldNotBeNull("ranges")::create);
if (actual.isEmpty() && !ranges.iterator().hasNext()) return;
failIfEmpty(ranges, "ranges");
assertRangeSetEnclosesGivenValues(toArray(ranges, Range.class));
}
/**
* Verifies that the given {@code RangeSet} encloses all ranges from the given range set.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* assertThat(rangeSet).enclosesAll(ImmutableRangeSet.of(Range.closed(0, 50));</code></pre>
*
* @param rangeSet the range set to check whether the actual {@code RangeSet} encloses all range from it.
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} does not enclose all ranges from the given range set.
* @throws NullPointerException if range set is null.
* @throws IllegalArgumentException if range set is empty while actual is not empty.
*/
public RangeSetAssert<T> enclosesAll(RangeSet<T> rangeSet) {
isNotNull();
assertEnclosesAll(rangeSet);
return myself;
}
@SuppressWarnings("unchecked")
private void assertEnclosesAll(RangeSet<T> rangeSet) {
requireNonNull(rangeSet, shouldNotBeNull("rangeSet")::create);
if (actual.isEmpty() && rangeSet.isEmpty()) return;
failIfEmpty(rangeSet);
assertRangeSetEnclosesGivenValues(toArray(rangeSet.asRanges(), Range.class));
}
private void assertRangeSetEnclosesGivenValues(Range<T>[] ranges) {
List<?> notEnclosed = stream(ranges).filter(range -> !actual.encloses(range)).collect(toList());
if (!notEnclosed.isEmpty()) throwAssertionError(shouldEnclose(actual, ranges, notEnclosed));
}
/**
* Verifies that the given {@code RangeSet} encloses at least one of the given ranges.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* assertThat(rangeSet).enclosesAnyOf(Range.closed(-10, 10),
* Range.open(150, 260),
* Range.open(290, 296));</code></pre>
*
* @param ranges the ranges to check whether the actual {@code RangeSet} encloses at least one of them.
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} does not enclose at least one of the given ranges.
* @throws NullPointerException if ranges are null.
* @throws IllegalArgumentException if ranges are empty while actual is not empty.
*/
@SafeVarargs
public final RangeSetAssert<T> enclosesAnyOf(Range<T>... ranges) {
isNotNull();
assertEnclosesAnyOf(ranges);
return myself;
}
private void assertEnclosesAnyOf(Range<T>[] ranges) {
requireNonNull(ranges, shouldNotBeNull("ranges")::create);
if (actual.isEmpty() && ranges.length == 0) return;
failIfEmpty(ranges, "ranges");
assertRangeSetEnclosesAnyOfGivenValues(ranges);
}
/**
* Verifies that the given {@code RangeSet} encloses at least one range of the given ranges.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* assertThat(rangeSet).enclosesAnyRangesOf(Arrays.asList(Range.closed(-10, 10),
* Range.open(150, 260),
* Range.open(290, 296)));</code></pre>
*
* @param ranges the ranges to check whether the actual {@code RangeSet} encloses at least one of them.
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} does not enclose at least one of the given ranges.
* @throws NullPointerException if ranges are null.
* @throws IllegalArgumentException if ranges are empty while actual is not empty.
*/
public RangeSetAssert<T> enclosesAnyRangesOf(Iterable<Range<T>> ranges) {
isNotNull();
assertEnclosesAnyRangesOf(ranges);
return myself;
}
@SuppressWarnings("unchecked")
private void assertEnclosesAnyRangesOf(Iterable<Range<T>> ranges) {
requireNonNull(ranges, shouldNotBeNull("ranges")::create);
if (actual.isEmpty() && !ranges.iterator().hasNext()) return;
failIfEmpty(ranges, "ranges");
assertRangeSetEnclosesAnyOfGivenValues(toArray(ranges, Range.class));
}
/**
* Verifies that the given {@code RangeSet} encloses at least one range from the given range set.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* RangeSet<Integer> enclosedSet = TreeRangeSet.create();
*
* enclosedSet.add(Range.closed(-10, 10));
* enclosedSet.add(Range.open(150, 260));
* enclosedSet.add(Range.open(290, 296));
*
* assertThat(rangeSet).enclosesAll(enclosedSet);</code></pre>
*
* @param rangeSet the range set to check whether the actual {@code RangeSet} encloses at least one range from it.
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} does not enclose at least one range from the given range set.
* @throws NullPointerException if range set is null.
* @throws IllegalArgumentException if range set is empty while actual is not empty.
*/
public RangeSetAssert<T> enclosesAnyRangesOf(RangeSet<T> rangeSet) {
isNotNull();
assertEnclosesAnyRangesOf(rangeSet);
return myself;
}
@SuppressWarnings("unchecked")
private void assertEnclosesAnyRangesOf(RangeSet<T> rangeSet) {
requireNonNull(rangeSet, shouldNotBeNull("rangeSet")::create);
if (actual.isEmpty() && rangeSet.isEmpty()) return;
failIfEmpty(rangeSet);
assertRangeSetEnclosesAnyOfGivenValues(toArray(rangeSet.asRanges(), Range.class));
}
private void assertRangeSetEnclosesAnyOfGivenValues(Range<T>[] ranges) {
boolean match = stream(ranges).anyMatch(actual::encloses);
if (!match) throwAssertionError(shouldEncloseAnyOf(actual, ranges));
}
/**
* Verifies that the given {@code RangeSet} does not enclose the given ranges.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* assertThat(rangeSet).doesNotEnclose(Range.closed(-10, 10),
* Range.open(150, 160),
* Range.open(590, 700));</code></pre>
*
* @param ranges the ranges to check whether the actual {@code RangeSet} does not enclose them.
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} encloses any of the given ranges.
* @throws NullPointerException if ranges are null.
* @throws IllegalArgumentException if ranges are empty.
*/
@SafeVarargs
public final RangeSetAssert<T> doesNotEnclose(Range<T>... ranges) {
isNotNull();
assertDoesNotEnclose(ranges);
return myself;
}
private void assertDoesNotEnclose(Range<T>[] ranges) {
requireNonNull(ranges, shouldNotBeNull("ranges")::create);
failIfEmpty(ranges, "ranges");
assertRangeSetDoesNotEncloseGivenValues(ranges);
}
/**
* Verifies that the given {@code RangeSet} does not enclose any of the given ranges.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* assertThat(rangeSet).doesNotEncloseAnyRangesOf(Arrays.asList(Range.closed(-10, 10),
* Range.open(150, 160),
* Range.open(590, 700));</code></pre>
*
* @param ranges the ranges to check whether the actual {@code RangeSet} does not enclose any of them.
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} encloses any of the given ranges.
* @throws NullPointerException if ranges are null.
* @throws IllegalArgumentException if ranges are empty.
*/
public RangeSetAssert<T> doesNotEncloseAnyRangesOf(Iterable<Range<T>> ranges) {
isNotNull();
assertDoesNotEncloseAnyRangesOf(ranges);
return myself;
}
@SuppressWarnings("unchecked")
private void assertDoesNotEncloseAnyRangesOf(Iterable<Range<T>> ranges) {
requireNonNull(ranges, shouldNotBeNull("ranges")::create);
failIfEmpty(ranges, "ranges");
assertRangeSetDoesNotEncloseGivenValues(toArray(ranges, Range.class));
}
/**
* Verifies that the given {@code RangeSet} does not enclose any range from the given range set.
* <p>
* Example:
*
* <pre><code class='java'> RangeSet<Integer> rangeSet = TreeRangeSet.create();
*
* rangeSet.add(Range.closed(0, 100));
* rangeSet.add(Range.closed(200, 300));
* rangeSet.add(Range.closed(500, 600));
*
* RangeSet<Integer> enclosedSet = TreeRangeSet.create();
*
* enclosedSet.add(Range.closed(-10, 10));
* enclosedSet.add(Range.open(150, 360));
* enclosedSet.add(Range.open(590, 690));
*
* assertThat(rangeSet).doesNotEncloseAnyRangesOf(enclosedSet);</code></pre>
*
* @param rangeSet the range set to check whether the actual {@code RangeSet} does not enclose any ranges from it.
* @return this {@link RangeSetAssert} for assertions chaining.
* @throws AssertionError if the actual {@code RangeSet} is {@code null}.
* @throws AssertionError if the actual {@code RangeSet} encloses any range from the given range set.
* @throws NullPointerException if range set is null.
* @throws IllegalArgumentException if range set is empty.
*/
public RangeSetAssert<T> doesNotEncloseAnyRangesOf(RangeSet<T> rangeSet) {
isNotNull();
assertDoesNotEncloseAnyRangesOf(rangeSet);
return myself;
}
@SuppressWarnings("unchecked")
private void assertDoesNotEncloseAnyRangesOf(RangeSet<T> rangeSet) {
requireNonNull(rangeSet, shouldNotBeNull("rangeSet")::create);
failIfEmpty(rangeSet);
assertRangeSetDoesNotEncloseGivenValues(toArray(rangeSet.asRanges(), Range.class));
}
private void assertRangeSetDoesNotEncloseGivenValues(Range<T>[] ranges) {
List<?> enclosed = stream(ranges).filter(actual::encloses).collect(toList());
if (!enclosed.isEmpty()) throwAssertionError(shouldNotEnclose(actual, ranges, enclosed));
}
private static <T> void failIfEmpty(T[] array, String label) {
if (array.length == 0) throw new IllegalArgumentException("Expecting " + label + " not to be empty");
}
private static <T> void failIfEmpty(Iterable<T> iterable, String label) {
if (!iterable.iterator().hasNext()) throw new IllegalArgumentException("Expecting " + label + " not to be empty");
}
private static <T> void failIfEmpty(RangeSet<?> rangeSet) {
if (rangeSet.isEmpty()) throw new IllegalArgumentException("Expecting rangeSet not to be empty");
}
}
|
RangeSetAssert
|
java
|
quarkusio__quarkus
|
extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/applicationfieldaccess/PublicFieldAccessInheritanceTest.java
|
{
"start": 4181,
"end": 4293
}
|
class ____ extends MyAbstractEntity {
public Long concreteEntityField;
}
private
|
MyConcreteEntity
|
java
|
apache__camel
|
components/camel-salesforce/camel-salesforce-component/src/test/java/org/apache/camel/component/salesforce/NotFoundManualIT.java
|
{
"start": 1256,
"end": 2293
}
|
class ____ extends AbstractSalesforceTestBase {
@Test
public void shouldNotReportNotFoundExceptionFromRestApiIfConfiguredNotTo() {
final Account got = template.requestBody("salesforce:getSObjectWithId?sObjectName=Account&sObjectIdName=Name"
+ "¬FoundBehaviour=NULL",
"NonExistent",
Account.class);
assertNull(got, "Expecting null when `notFoundBehaviour` is set to NULL");
}
@Test
public void shouldReportNotFoundExceptionFromRestApi() {
try {
template.requestBody("salesforce:getSObjectWithId?sObjectName=Account&sObjectIdName=Name",
"NonExistant", Account.class);
fail("Expecting CamelExecutionException");
} catch (final CamelExecutionException e) {
assertTrue(e.getCause() instanceof NoSuchSObjectException,
"Expecting the cause of CamelExecutionException to be NoSuchSObjectException");
}
}
}
|
NotFoundManualIT
|
java
|
netty__netty
|
codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocket08FrameEncoder.java
|
{
"start": 2891,
"end": 10175
}
|
class ____ extends MessageToMessageEncoder<WebSocketFrame> implements WebSocketFrameEncoder {
private static final InternalLogger logger = InternalLoggerFactory.getInstance(WebSocket08FrameEncoder.class);
private static final byte OPCODE_CONT = 0x0;
private static final byte OPCODE_TEXT = 0x1;
private static final byte OPCODE_BINARY = 0x2;
private static final byte OPCODE_CLOSE = 0x8;
private static final byte OPCODE_PING = 0x9;
private static final byte OPCODE_PONG = 0xA;
/**
* The size threshold for gathering writes. Non-Masked messages bigger than this size will be sent fragmented as
* a header and a content ByteBuf whereas messages smaller than the size will be merged into a single buffer and
* sent at once.<br>
* Masked messages will always be sent at once.
*/
private static final int GATHERING_WRITE_THRESHOLD = 1024;
private final WebSocketFrameMaskGenerator maskGenerator;
/**
* Constructor
*
* @param maskPayload
* Web socket clients must set this to true to mask payload. Server implementations must set this to
* false.
*/
public WebSocket08FrameEncoder(boolean maskPayload) {
this(maskPayload ? RandomWebSocketFrameMaskGenerator.INSTANCE : null);
}
/**
* Constructor
*
* @param maskGenerator
* Web socket clients must set this to {@code non null} to mask payload.
* Server implementations must set this to {@code null}.
*/
public WebSocket08FrameEncoder(WebSocketFrameMaskGenerator maskGenerator) {
super(WebSocketFrame.class);
this.maskGenerator = maskGenerator;
}
@Override
protected void encode(ChannelHandlerContext ctx, WebSocketFrame msg, List<Object> out) throws Exception {
final ByteBuf data = msg.content();
byte opcode = getOpCode(msg);
int length = data.readableBytes();
if (logger.isTraceEnabled()) {
logger.trace("Encoding WebSocket Frame opCode={} length={}", opcode, length);
}
int b0 = 0;
if (msg.isFinalFragment()) {
b0 |= 1 << 7;
}
b0 |= (msg.rsv() & 0x07) << 4;
b0 |= opcode & 0x7F;
if (opcode == OPCODE_PING && length > 125) {
throw new TooLongFrameException("invalid payload for PING (payload length must be <= 125, was " + length);
}
boolean release = true;
ByteBuf buf = null;
try {
int maskLength = maskGenerator != null ? 4 : 0;
if (length <= 125) {
int size = 2 + maskLength + length;
buf = ctx.alloc().buffer(size);
buf.writeByte(b0);
byte b = (byte) (maskGenerator != null ? 0x80 | length : length);
buf.writeByte(b);
} else if (length <= 0xFFFF) {
int size = 4 + maskLength;
if (maskGenerator != null || length <= GATHERING_WRITE_THRESHOLD) {
size += length;
}
buf = ctx.alloc().buffer(size);
buf.writeByte(b0);
buf.writeByte(maskGenerator != null ? 0xFE : 126);
buf.writeByte(length >>> 8 & 0xFF);
buf.writeByte(length & 0xFF);
} else {
int size = 10 + maskLength;
if (maskGenerator != null) {
size += length;
}
buf = ctx.alloc().buffer(size);
buf.writeByte(b0);
buf.writeByte(maskGenerator != null ? 0xFF : 127);
buf.writeLong(length);
}
// Write payload
if (maskGenerator != null) {
int mask = maskGenerator.nextMask();
buf.writeInt(mask);
// If the mask is 0 we can skip all the XOR operations.
if (mask != 0) {
if (length > 0) {
ByteOrder srcOrder = data.order();
ByteOrder dstOrder = buf.order();
int i = data.readerIndex();
int end = data.writerIndex();
if (srcOrder == dstOrder) {
// Use the optimized path only when byte orders match.
// Avoid sign extension on widening primitive conversion
long longMask = mask & 0xFFFFFFFFL;
longMask |= longMask << 32;
// If the byte order of our buffers it little endian we have to bring our mask
// into the same format, because getInt() and writeInt() will use a reversed byte order
if (srcOrder == ByteOrder.LITTLE_ENDIAN) {
longMask = Long.reverseBytes(longMask);
}
for (int lim = end - 7; i < lim; i += 8) {
buf.writeLong(data.getLong(i) ^ longMask);
}
if (i < end - 3) {
buf.writeInt(data.getInt(i) ^ (int) longMask);
i += 4;
}
}
int maskOffset = 0;
for (; i < end; i++) {
byte byteData = data.getByte(i);
buf.writeByte(byteData ^ WebSocketUtil.byteAtIndex(mask, maskOffset++ & 3));
}
}
out.add(buf);
} else {
addBuffers(buf, data, out);
}
} else {
addBuffers(buf, data, out);
}
release = false;
} finally {
if (release && buf != null) {
buf.release();
}
}
}
private static byte getOpCode(WebSocketFrame msg) {
if (msg instanceof TextWebSocketFrame) {
return OPCODE_TEXT;
}
if (msg instanceof BinaryWebSocketFrame) {
return OPCODE_BINARY;
}
if (msg instanceof PingWebSocketFrame) {
return OPCODE_PING;
}
if (msg instanceof PongWebSocketFrame) {
return OPCODE_PONG;
}
if (msg instanceof CloseWebSocketFrame) {
return OPCODE_CLOSE;
}
if (msg instanceof ContinuationWebSocketFrame) {
return OPCODE_CONT;
}
throw new UnsupportedOperationException("Cannot encode frame of type: " + msg.getClass().getName());
}
private static void addBuffers(ByteBuf buf, ByteBuf data, List<Object> out) {
int readableBytes = data.readableBytes();
if (buf.writableBytes() >= readableBytes) {
// merge buffers as this is cheaper then a gathering write if the payload is small enough
buf.writeBytes(data);
out.add(buf);
} else {
out.add(buf);
if (readableBytes > 0) {
out.add(data.retain());
}
}
}
}
|
WebSocket08FrameEncoder
|
java
|
spring-projects__spring-boot
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/OnClassCondition.java
|
{
"start": 1712,
"end": 6183
}
|
class ____ extends FilteringSpringBootCondition {
@Override
protected final @Nullable ConditionOutcome[] getOutcomes(@Nullable String[] autoConfigurationClasses,
AutoConfigurationMetadata autoConfigurationMetadata) {
// Split the work and perform half in a background thread if more than one
// processor is available. Using a single additional thread seems to offer the
// best performance. More threads make things worse.
if (autoConfigurationClasses.length > 1 && Runtime.getRuntime().availableProcessors() > 1) {
return resolveOutcomesThreaded(autoConfigurationClasses, autoConfigurationMetadata);
}
else {
OutcomesResolver outcomesResolver = new StandardOutcomesResolver(autoConfigurationClasses, 0,
autoConfigurationClasses.length, autoConfigurationMetadata, getBeanClassLoader());
return outcomesResolver.resolveOutcomes();
}
}
private @Nullable ConditionOutcome[] resolveOutcomesThreaded(@Nullable String[] autoConfigurationClasses,
AutoConfigurationMetadata autoConfigurationMetadata) {
int split = autoConfigurationClasses.length / 2;
OutcomesResolver firstHalfResolver = createOutcomesResolver(autoConfigurationClasses, 0, split,
autoConfigurationMetadata);
OutcomesResolver secondHalfResolver = new StandardOutcomesResolver(autoConfigurationClasses, split,
autoConfigurationClasses.length, autoConfigurationMetadata, getBeanClassLoader());
@Nullable ConditionOutcome[] secondHalf = secondHalfResolver.resolveOutcomes();
@Nullable ConditionOutcome[] firstHalf = firstHalfResolver.resolveOutcomes();
@Nullable ConditionOutcome[] outcomes = new ConditionOutcome[autoConfigurationClasses.length];
System.arraycopy(firstHalf, 0, outcomes, 0, firstHalf.length);
System.arraycopy(secondHalf, 0, outcomes, split, secondHalf.length);
return outcomes;
}
private OutcomesResolver createOutcomesResolver(@Nullable String[] autoConfigurationClasses, int start, int end,
AutoConfigurationMetadata autoConfigurationMetadata) {
OutcomesResolver outcomesResolver = new StandardOutcomesResolver(autoConfigurationClasses, start, end,
autoConfigurationMetadata, getBeanClassLoader());
return new ThreadedOutcomesResolver(outcomesResolver);
}
@Override
public ConditionOutcome getMatchOutcome(ConditionContext context, AnnotatedTypeMetadata metadata) {
ClassLoader classLoader = context.getClassLoader();
ConditionMessage matchMessage = ConditionMessage.empty();
List<String> onClasses = getCandidates(metadata, ConditionalOnClass.class);
if (onClasses != null) {
List<String> missing = filter(onClasses, ClassNameFilter.MISSING, classLoader);
if (!missing.isEmpty()) {
return ConditionOutcome.noMatch(ConditionMessage.forCondition(ConditionalOnClass.class)
.didNotFind("required class", "required classes")
.items(Style.QUOTE, missing));
}
matchMessage = matchMessage.andCondition(ConditionalOnClass.class)
.found("required class", "required classes")
.items(Style.QUOTE, filter(onClasses, ClassNameFilter.PRESENT, classLoader));
}
List<String> onMissingClasses = getCandidates(metadata, ConditionalOnMissingClass.class);
if (onMissingClasses != null) {
List<String> present = filter(onMissingClasses, ClassNameFilter.PRESENT, classLoader);
if (!present.isEmpty()) {
return ConditionOutcome.noMatch(ConditionMessage.forCondition(ConditionalOnMissingClass.class)
.found("unwanted class", "unwanted classes")
.items(Style.QUOTE, present));
}
matchMessage = matchMessage.andCondition(ConditionalOnMissingClass.class)
.didNotFind("unwanted class", "unwanted classes")
.items(Style.QUOTE, filter(onMissingClasses, ClassNameFilter.MISSING, classLoader));
}
return ConditionOutcome.match(matchMessage);
}
private @Nullable List<String> getCandidates(AnnotatedTypeMetadata metadata, Class<?> annotationType) {
MultiValueMap<String, @Nullable Object> attributes = metadata
.getAllAnnotationAttributes(annotationType.getName(), true);
if (attributes == null) {
return null;
}
List<String> candidates = new ArrayList<>();
addAll(candidates, attributes.get("value"));
addAll(candidates, attributes.get("name"));
return candidates;
}
private void addAll(List<String> list, @Nullable List<@Nullable Object> itemsToAdd) {
if (itemsToAdd != null) {
for (Object item : itemsToAdd) {
if (item != null) {
Collections.addAll(list, (String[]) item);
}
}
}
}
private
|
OnClassCondition
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/serializer/filters/PropertyPathTest.java
|
{
"start": 350,
"end": 1204
}
|
class ____ extends TestCase {
public void test_path() throws Exception {
A a = new A();
a.setId(123);
B b = new B();
b.setId(234);
C c = new C();
c.setId(345);
D d = new D();
d.setId(456);
a.setB(b);
b.setC(c);
b.setD(d);
Assert.assertEquals("{\"b\":{\"c\":{\"id\":345},\"d\":{\"id\":456},\"id\":234},\"id\":123}",
JSON.toJSONString(a));
Assert.assertEquals("{\"b\":{\"c\":{\"id\":345},\"id\":234},\"id\":123}",
JSON.toJSONString(a, new MyPropertyPreFilter()));
Assert.assertEquals("{'b':{'c':{'id':345},'id':234},'id':123}",
JSON.toJSONString(a, new MyPropertyPreFilter(), SerializerFeature.UseSingleQuotes));
}
public static
|
PropertyPathTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/LookupFromIndexIT.java
|
{
"start": 7776,
"end": 7921
}
|
interface ____ {
void populate(int docCount, List<String> expected, Predicate<Integer> filter) throws IOException;
}
|
PopulateIndices
|
java
|
apache__dubbo
|
dubbo-config/dubbo-config-spring/src/test/java/org/apache/dubbo/config/spring/boot/conditional1/XmlReferenceBeanConditionalTest.java
|
{
"start": 2760,
"end": 2863
}
|
class ____ {}
@Order(Integer.MAX_VALUE - 1)
@Configuration
public static
|
ConsumerConfiguration
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/RoleReference.java
|
{
"start": 1563,
"end": 2812
}
|
class ____ implements RoleReference {
private final String[] roleNames;
public NamedRoleReference(String[] roleNames) {
this.roleNames = roleNames;
}
public String[] getRoleNames() {
return roleNames;
}
@Override
public RoleKey id() {
if (roleNames.length == 0) {
return RoleKey.ROLE_KEY_EMPTY;
} else {
final Set<String> distinctRoles = new HashSet<>(List.of(roleNames));
if (distinctRoles.size() == 1 && distinctRoles.contains(ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR.getName())) {
return RoleKey.ROLE_KEY_SUPERUSER;
} else {
return new RoleKey(Set.copyOf(distinctRoles), RoleKey.ROLES_STORE_SOURCE);
}
}
}
@Override
public void resolve(RoleReferenceResolver resolver, ActionListener<RolesRetrievalResult> listener) {
resolver.resolveNamedRoleReference(this, listener);
}
}
/**
* Referencing API Key role descriptors. Can be either the assigned (key) role descriptors or the limited-by (owner's) role descriptors
*/
final
|
NamedRoleReference
|
java
|
apache__flink
|
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/CatalogDatabase.java
|
{
"start": 1018,
"end": 2021
}
|
interface ____ {
/** Get a map of properties associated with the database. */
Map<String, String> getProperties();
/**
* Get comment of the database.
*
* @return comment of the database
*/
String getComment();
/**
* Get a deep copy of the CatalogDatabase instance.
*
* @return a copy of CatalogDatabase instance
*/
CatalogDatabase copy();
/**
* Returns a copy of this {@code CatalogDatabase} with the given properties.
*
* @return a new copy of this database with replaced properties
*/
CatalogDatabase copy(Map<String, String> properties);
/**
* Get a brief description of the database.
*
* @return an optional short description of the database
*/
Optional<String> getDescription();
/**
* Get a detailed description of the database.
*
* @return an optional long description of the database
*/
Optional<String> getDetailedDescription();
}
|
CatalogDatabase
|
java
|
apache__camel
|
components/camel-controlbus/src/main/java/org/apache/camel/component/controlbus/ControlBusProducer.java
|
{
"start": 3470,
"end": 4929
}
|
class ____ implements Runnable {
private final Exchange exchange;
private final Language language;
private LanguageTask(Exchange exchange, Language language) {
this.exchange = exchange;
this.language = language;
}
@Override
public void run() {
String task = null;
Object result = null;
try {
// create copy of exchange to not cause side effect
Exchange copy = ExchangeHelper.createCopy(exchange, true);
task = copy.getIn().getMandatoryBody(String.class);
if (task != null) {
Expression exp = language.createExpression(task);
result = exp.evaluate(copy, Object.class);
}
if (result != null && !getEndpoint().isAsync()) {
// can only set result on exchange if sync
exchange.getIn().setBody(result);
}
if (task != null) {
logger.log("ControlBus task done [" + task + "] with result -> " + (result != null ? result : "void"));
}
} catch (Exception e) {
logger.log("Error executing ControlBus task [" + task + "]. This exception will be ignored.", e);
}
}
}
/**
* Tasks to run when processing by route action.
*/
private final
|
LanguageTask
|
java
|
google__auto
|
factory/src/test/resources/expected/SimpleClassFactory.java
|
{
"start": 841,
"end": 966
}
|
class ____ {
@Inject
SimpleClassFactory() {}
SimpleClass create() {
return new SimpleClass();
}
}
|
SimpleClassFactory
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/impl/MementoMessage.java
|
{
"start": 1287,
"end": 2499
}
|
class ____ implements Message, StringBuilderFormattable {
private final String formattedMessage;
private final String format;
private final Object[] parameters;
public MementoMessage(final String formattedMessage, final String format, final Object[] parameters) {
this.formattedMessage = formattedMessage;
this.format = format;
this.parameters = parameters;
}
@Override
public String getFormattedMessage() {
return formattedMessage;
}
@Override
public String getFormat() {
return format;
}
@Override
public Object[] getParameters() {
return parameters;
}
/**
* Always returns null.
*
* @return null
*/
@Override
public Throwable getThrowable() {
return null;
}
@Override
public void formatTo(final StringBuilder buffer) {
buffer.append(formattedMessage);
}
@Override
public String toString() {
return "MementoMessage{" + "formattedMessage='"
+ formattedMessage + '\'' + ", format='"
+ format + '\'' + ", parameters="
+ Arrays.toString(parameters) + '}';
}
}
|
MementoMessage
|
java
|
apache__flink
|
flink-formats/flink-protobuf/src/test/java/org/apache/flink/formats/protobuf/NullValueToProtoTest.java
|
{
"start": 1535,
"end": 2633
}
|
class ____ {
@Test
public void testSimple() throws Exception {
RowData row =
GenericRowData.of(
// string
new GenericMapData(
mapOf(
StringData.fromString("key"),
null,
StringData.fromString(""),
StringData.fromString("value"))),
// int32
new GenericMapData(mapOf(0, 1, 1, null)),
// int64
new GenericMapData(mapOf(0L, 1L, 1L, null)),
// boolean
new GenericMapData(mapOf(false, true, true, null)),
// float
new GenericMapData(mapOf(StringData.fromString("key"), null)),
// double
new GenericMapData(mapOf(StringData.fromString("key"), null)),
//
|
NullValueToProtoTest
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/state/v2/SyncIteratorWrapper.java
|
{
"start": 1086,
"end": 1917
}
|
class ____<T> implements Iterator<T> {
private final ArrayList<T> cacheEntries;
private int cacheIndex;
public SyncIteratorWrapper(StateIterator<T> stateIterator) {
this.cacheEntries = new ArrayList<>();
this.cacheIndex = 0;
if (stateIterator instanceof AbstractStateIterator) {
((AbstractStateIterator<T>) stateIterator).onNextSync(this.cacheEntries::add);
}
}
@Override
public boolean hasNext() {
return (cacheIndex < cacheEntries.size());
}
@Override
public T next() {
if (cacheIndex == cacheEntries.size()) {
throw new NoSuchElementException("Iterator has no more elements!");
}
T currentEntry = cacheEntries.get(cacheIndex);
cacheIndex++;
return currentEntry;
}
}
|
SyncIteratorWrapper
|
java
|
quarkusio__quarkus
|
extensions/liquibase/liquibase/deployment/src/test/java/io/quarkus/liquibase/test/LiquibaseExtensionConfigUrlMissingNamedDataSourceStaticInjectionTest.java
|
{
"start": 501,
"end": 2654
}
|
class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
// The URL won't be missing if dev services are enabled
.overrideConfigKey("quarkus.devservices.enabled", "false")
// We need at least one build-time property for the datasource,
// otherwise it's considered unconfigured at build time...
.overrideConfigKey("quarkus.datasource.users.db-kind", "h2")
// We need this otherwise the *default* datasource may impact this test
.overrideConfigKey("quarkus.datasource.db-kind", "h2")
.overrideConfigKey("quarkus.datasource.username", "sa")
.overrideConfigKey("quarkus.datasource.password", "sa")
.overrideConfigKey("quarkus.datasource.jdbc.url",
"jdbc:h2:tcp://localhost/mem:test-quarkus-migrate-at-start;DB_CLOSE_DELAY=-1")
.assertException(e -> assertThat(e)
// Can't use isInstanceOf due to weird classloading in tests
.satisfies(t -> assertThat(t.getClass().getName()).isEqualTo(InactiveBeanException.class.getName()))
.hasMessageContainingAll(
"Liquibase for datasource 'users' was deactivated automatically because this datasource was deactivated.",
"Datasource 'users' was deactivated automatically because its URL is not set.",
"To avoid this exception while keeping the bean inactive", // Message from Arc with generic hints
"To activate the datasource, set configuration property 'quarkus.datasource.\"users\".jdbc.url'.",
"Refer to https://quarkus.io/guides/datasource for guidance.",
"This bean is injected into",
MyBean.class.getName() + "#liquibase"));
@Inject
MyBean myBean;
@Test
public void test() {
Assertions.fail("Startup should have failed");
}
@ApplicationScoped
public static
|
LiquibaseExtensionConfigUrlMissingNamedDataSourceStaticInjectionTest
|
java
|
spring-projects__spring-boot
|
core/spring-boot/src/main/java/org/springframework/boot/info/InfoProperties.java
|
{
"start": 2700,
"end": 3358
}
|
class ____ implements Iterator<Entry> {
private final Iterator<Map.Entry<Object, Object>> iterator;
private PropertiesIterator(Properties properties) {
this.iterator = properties.entrySet().iterator();
}
@Override
public boolean hasNext() {
return this.iterator.hasNext();
}
@Override
public Entry next() {
Map.Entry<Object, Object> entry = this.iterator.next();
return new Entry((String) entry.getKey(), (String) entry.getValue());
}
@Override
public void remove() {
throw new UnsupportedOperationException("InfoProperties are immutable.");
}
}
/**
* Property entry.
*/
public static final
|
PropertiesIterator
|
java
|
spring-projects__spring-boot
|
module/spring-boot-webflux/src/main/java/org/springframework/boot/webflux/autoconfigure/WebFluxAutoConfiguration.java
|
{
"start": 15326,
"end": 19378
}
|
class ____ extends DelegatingWebFluxConfiguration {
private final WebFluxProperties webFluxProperties;
private final WebProperties webProperties;
private final ServerProperties serverProperties;
private final @Nullable WebFluxRegistrations webFluxRegistrations;
EnableWebFluxConfiguration(WebFluxProperties webFluxProperties, WebProperties webProperties,
ServerProperties serverProperties, ObjectProvider<WebFluxRegistrations> webFluxRegistrations) {
this.webFluxProperties = webFluxProperties;
this.webProperties = webProperties;
this.serverProperties = serverProperties;
this.webFluxRegistrations = webFluxRegistrations.getIfUnique();
}
@Bean
@Override
public FormattingConversionService webFluxConversionService() {
Format format = this.webFluxProperties.getFormat();
WebConversionService conversionService = new WebConversionService(
new DateTimeFormatters().dateFormat(format.getDate())
.timeFormat(format.getTime())
.dateTimeFormat(format.getDateTime()));
addFormatters(conversionService);
return conversionService;
}
@Bean
@Override
public Validator webFluxValidator() {
if (!ClassUtils.isPresent("jakarta.validation.Validator", getClass().getClassLoader())
|| !ClassUtils.isPresent("org.springframework.boot.validation.autoconfigure.ValidatorAdapter",
getClass().getClassLoader())) {
return super.webFluxValidator();
}
ApplicationContext applicationContext = getApplicationContext();
Assert.state(applicationContext != null, "'applicationContext' must not be null");
return ValidatorAdapter.get(applicationContext, getValidator());
}
@Override
protected RequestMappingHandlerAdapter createRequestMappingHandlerAdapter() {
if (this.webFluxRegistrations != null) {
RequestMappingHandlerAdapter adapter = this.webFluxRegistrations.getRequestMappingHandlerAdapter();
if (adapter != null) {
return adapter;
}
}
return super.createRequestMappingHandlerAdapter();
}
@Override
protected RequestMappingHandlerMapping createRequestMappingHandlerMapping() {
if (this.webFluxRegistrations != null) {
RequestMappingHandlerMapping mapping = this.webFluxRegistrations.getRequestMappingHandlerMapping();
if (mapping != null) {
return mapping;
}
}
return super.createRequestMappingHandlerMapping();
}
@Bean
@Override
@ConditionalOnMissingBean(name = WebHttpHandlerBuilder.LOCALE_CONTEXT_RESOLVER_BEAN_NAME)
public LocaleContextResolver localeContextResolver() {
Locale locale = this.webProperties.getLocale();
if (this.webProperties.getLocaleResolver() == WebProperties.LocaleResolver.FIXED) {
Assert.state(locale != null, "'locale' must not be null");
return new FixedLocaleContextResolver(locale);
}
AcceptHeaderLocaleContextResolver localeContextResolver = new AcceptHeaderLocaleContextResolver();
localeContextResolver.setDefaultLocale(locale);
return localeContextResolver;
}
@Bean
@ConditionalOnMissingBean(name = WebHttpHandlerBuilder.WEB_SESSION_MANAGER_BEAN_NAME)
WebSessionManager webSessionManager(ObjectProvider<WebSessionIdResolver> webSessionIdResolver) {
DefaultWebSessionManager webSessionManager = new DefaultWebSessionManager();
Duration timeout = this.serverProperties.getReactive().getSession().getTimeout();
int maxSessions = this.serverProperties.getReactive().getSession().getMaxSessions();
MaxIdleTimeInMemoryWebSessionStore sessionStore = new MaxIdleTimeInMemoryWebSessionStore(timeout);
sessionStore.setMaxSessions(maxSessions);
webSessionManager.setSessionStore(sessionStore);
webSessionIdResolver.ifAvailable(webSessionManager::setSessionIdResolver);
return webSessionManager;
}
@Override
@ConditionalOnMissingBean(name = "webFluxApiVersionStrategy")
public @Nullable ApiVersionStrategy webFluxApiVersionStrategy() {
return super.webFluxApiVersionStrategy();
}
}
@Configuration(proxyBeanMethods = false)
@ConditionalOnEnabledResourceChain
static
|
EnableWebFluxConfiguration
|
java
|
spring-projects__spring-boot
|
smoke-test/spring-boot-smoke-test-actuator/src/test/java/smoketest/actuator/ManagementAddressActuatorApplicationTests.java
|
{
"start": 1488,
"end": 2399
}
|
class ____ {
@LocalServerPort
private int port;
@LocalManagementPort
private int managementPort;
@Test
void testHome() {
ResponseEntity<Map<String, Object>> entity = asMapEntity(
new TestRestTemplate().getForEntity("http://localhost:" + this.port, Map.class));
assertThat(entity.getStatusCode()).isEqualTo(HttpStatus.UNAUTHORIZED);
}
@Test
void testHealth() {
ResponseEntity<String> entity = new TestRestTemplate().withBasicAuth("user", "password")
.getForEntity("http://localhost:" + this.managementPort + "/admin/actuator/health", String.class);
assertThat(entity.getStatusCode()).isEqualTo(HttpStatus.OK);
assertThat(entity.getBody()).contains("\"status\":\"UP\"");
}
@SuppressWarnings({ "unchecked", "rawtypes" })
static <K, V> ResponseEntity<Map<K, V>> asMapEntity(ResponseEntity<Map> entity) {
return (ResponseEntity) entity;
}
}
|
ManagementAddressActuatorApplicationTests
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
|
{
"start": 3659,
"end": 20833
}
|
class ____ {
private final static org.apache.hadoop.yarn.factories.RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);
/**
* Test RegisterNodeManagerResponsePBImpl. Test getters and setters. The
* RegisterNodeManagerResponsePBImpl should generate a prototype and data
* restore from prototype
*/
@Test
public void testRegisterNodeManagerResponsePBImpl() {
RegisterNodeManagerResponsePBImpl original =
new RegisterNodeManagerResponsePBImpl();
original.setContainerTokenMasterKey(getMasterKey());
original.setNMTokenMasterKey(getMasterKey());
original.setNodeAction(NodeAction.NORMAL);
original.setDiagnosticsMessage("testDiagnosticMessage");
RegisterNodeManagerResponsePBImpl copy =
new RegisterNodeManagerResponsePBImpl(
original.getProto());
assertEquals(1, copy.getContainerTokenMasterKey().getKeyId());
assertEquals(1, copy.getNMTokenMasterKey().getKeyId());
assertEquals(NodeAction.NORMAL, copy.getNodeAction());
assertEquals("testDiagnosticMessage", copy.getDiagnosticsMessage());
assertFalse(copy.getAreNodeLabelsAcceptedByRM());
}
@Test
public void testRegisterNodeManagerResponsePBImplWithRMAcceptLbls() {
RegisterNodeManagerResponsePBImpl original =
new RegisterNodeManagerResponsePBImpl();
original.setAreNodeLabelsAcceptedByRM(true);
RegisterNodeManagerResponsePBImpl copy =
new RegisterNodeManagerResponsePBImpl(original.getProto());
assertTrue(copy.getAreNodeLabelsAcceptedByRM());
}
/**
* Test NodeHeartbeatRequestPBImpl.
*/
@Test
public void testNodeHeartbeatRequestPBImpl() {
NodeHeartbeatRequestPBImpl original = new NodeHeartbeatRequestPBImpl();
original.setLastKnownContainerTokenMasterKey(getMasterKey());
original.setLastKnownNMTokenMasterKey(getMasterKey());
original.setNodeStatus(getNodeStatus());
original.setNodeLabels(getValidNodeLabels());
Map<ApplicationId, AppCollectorData> collectors = getCollectors(false);
original.setRegisteringCollectors(collectors);
NodeHeartbeatRequestPBImpl copy = new NodeHeartbeatRequestPBImpl(
original.getProto());
assertEquals(1, copy.getLastKnownContainerTokenMasterKey().getKeyId());
assertEquals(1, copy.getLastKnownNMTokenMasterKey().getKeyId());
assertEquals("localhost", copy.getNodeStatus().getNodeId().getHost());
assertEquals(collectors, copy.getRegisteringCollectors());
// check labels are coming with valid values
assertTrue(original.getNodeLabels()
.containsAll(copy.getNodeLabels()));
// check for empty labels
original.setNodeLabels(new HashSet<NodeLabel> ());
copy = new NodeHeartbeatRequestPBImpl(
original.getProto());
assertNotNull(copy.getNodeLabels());
assertEquals(0, copy.getNodeLabels().size());
}
@Test
public void testNodeHBRequestPBImplWithNullCollectorToken() {
NodeHeartbeatRequestPBImpl original = new NodeHeartbeatRequestPBImpl();
Map<ApplicationId, AppCollectorData> collectors = getCollectors(true);
original.setRegisteringCollectors(collectors);
NodeHeartbeatRequestPBImpl copy = new NodeHeartbeatRequestPBImpl(
original.getProto());
assertEquals(collectors, copy.getRegisteringCollectors());
}
/**
* Test NodeHeartbeatRequestPBImpl.
*/
@Test
public void testNodeHeartbeatRequestPBImplWithNullLabels() {
NodeHeartbeatRequestPBImpl original = new NodeHeartbeatRequestPBImpl();
NodeHeartbeatRequestPBImpl copy =
new NodeHeartbeatRequestPBImpl(original.getProto());
assertNull(copy.getNodeLabels());
}
/**
* Test NodeHeartbeatResponsePBImpl.
*
* @throws IOException
*/
@Test
public void testNodeHeartbeatResponsePBImpl() throws IOException {
NodeHeartbeatResponsePBImpl original = new NodeHeartbeatResponsePBImpl();
original.setDiagnosticsMessage("testDiagnosticMessage");
original.setContainerTokenMasterKey(getMasterKey());
original.setNMTokenMasterKey(getMasterKey());
original.setNextHeartBeatInterval(1000);
original.setNodeAction(NodeAction.NORMAL);
original.setResponseId(100);
Map<ApplicationId, AppCollectorData> collectors = getCollectors(false);
original.setAppCollectors(collectors);
// create token1
Text userText1 = new Text("user1");
DelegationTokenIdentifier dtId1 = new DelegationTokenIdentifier(userText1,
new Text("renewer1"), userText1);
final Token<DelegationTokenIdentifier> expectedToken1 =
new Token<DelegationTokenIdentifier>(dtId1.getBytes(),
"password12".getBytes(), dtId1.getKind(), new Text("service1"));
Credentials credentials1 = new Credentials();
credentials1.addToken(expectedToken1.getService(), expectedToken1);
DataOutputBuffer dob1 = new DataOutputBuffer();
credentials1.writeTokenStorageToStream(dob1);
ByteBuffer byteBuffer1 =
ByteBuffer.wrap(dob1.getData(), 0, dob1.getLength());
Map<ApplicationId, ByteBuffer> systemCredentials =
new HashMap<ApplicationId, ByteBuffer>();
systemCredentials.put(getApplicationId(1), byteBuffer1);
original.setSystemCredentialsForApps(
YarnServerBuilderUtils.convertToProtoFormat(systemCredentials));
NodeHeartbeatResponsePBImpl copy = new NodeHeartbeatResponsePBImpl(
original.getProto());
assertEquals(100, copy.getResponseId());
assertEquals(NodeAction.NORMAL, copy.getNodeAction());
assertEquals(1000, copy.getNextHeartBeatInterval());
assertEquals(1, copy.getContainerTokenMasterKey().getKeyId());
assertEquals(1, copy.getNMTokenMasterKey().getKeyId());
assertEquals("testDiagnosticMessage", copy.getDiagnosticsMessage());
assertEquals(collectors, copy.getAppCollectors());
assertEquals(false, copy.getAreNodeLabelsAcceptedByRM());
assertEquals(1, copy.getSystemCredentialsForApps().size());
Credentials credentials1Out = new Credentials();
DataInputByteBuffer buf = new DataInputByteBuffer();
ByteBuffer buffer =
YarnServerBuilderUtils
.convertFromProtoFormat(copy.getSystemCredentialsForApps())
.get(getApplicationId(1));
assertNotNull(buffer);
buffer.rewind();
buf.reset(buffer);
credentials1Out.readTokenStorageStream(buf);
assertEquals(1, credentials1Out.getAllTokens().size());
// Ensure token1's password "password12" is available from proto response
assertEquals(10,
credentials1Out.getAllTokens().iterator().next().getPassword().length);
}
@Test
public void testNodeHeartbeatResponsePBImplWithRMAcceptLbls() {
NodeHeartbeatResponsePBImpl original = new NodeHeartbeatResponsePBImpl();
original.setAreNodeLabelsAcceptedByRM(true);
NodeHeartbeatResponsePBImpl copy =
new NodeHeartbeatResponsePBImpl(original.getProto());
assertTrue(copy.getAreNodeLabelsAcceptedByRM());
}
@Test
public void testNodeHBResponsePBImplWithNullCollectorToken() {
NodeHeartbeatResponsePBImpl original = new NodeHeartbeatResponsePBImpl();
Map<ApplicationId, AppCollectorData> collectors = getCollectors(true);
original.setAppCollectors(collectors);
NodeHeartbeatResponsePBImpl copy = new NodeHeartbeatResponsePBImpl(
original.getProto());
assertEquals(collectors, copy.getAppCollectors());
}
@Test
public void testNodeHeartbeatResponsePBImplWithDecreasedContainers() {
NodeHeartbeatResponsePBImpl original = new NodeHeartbeatResponsePBImpl();
original.addAllContainersToUpdate(
Arrays.asList(getDecreasedContainer(1, 2, 2048, 2),
getDecreasedContainer(2, 3, 1024, 1)));
NodeHeartbeatResponsePBImpl copy =
new NodeHeartbeatResponsePBImpl(original.getProto());
assertEquals(1, copy.getContainersToUpdate().get(0)
.getId().getContainerId());
assertEquals(1024, copy.getContainersToUpdate().get(1)
.getResource().getMemorySize());
}
/**
* Test RegisterNodeManagerRequestPBImpl.
*/
@Test
public void testRegisterNodeManagerRequestPBImpl() {
RegisterNodeManagerRequestPBImpl original = new RegisterNodeManagerRequestPBImpl();
original.setHttpPort(8080);
original.setNodeId(getNodeId());
Resource resource = recordFactory.newRecordInstance(Resource.class);
resource.setMemorySize(10000);
resource.setVirtualCores(2);
original.setResource(resource);
original.setPhysicalResource(resource);
RegisterNodeManagerRequestPBImpl copy = new RegisterNodeManagerRequestPBImpl(
original.getProto());
assertEquals(8080, copy.getHttpPort());
assertEquals(9090, copy.getNodeId().getPort());
assertEquals(10000, copy.getResource().getMemorySize());
assertEquals(2, copy.getResource().getVirtualCores());
assertEquals(10000, copy.getPhysicalResource().getMemorySize());
assertEquals(2, copy.getPhysicalResource().getVirtualCores());
}
/**
* Test MasterKeyPBImpl.
*/
@Test
public void testMasterKeyPBImpl() {
MasterKeyPBImpl original = new MasterKeyPBImpl();
original.setBytes(ByteBuffer.allocate(0));
original.setKeyId(1);
MasterKeyPBImpl copy = new MasterKeyPBImpl(original.getProto());
assertEquals(1, copy.getKeyId());
assertTrue(original.equals(copy));
assertEquals(original.hashCode(), copy.hashCode());
}
/**
* Test SerializedExceptionPBImpl.
*/
@Test
public void testSerializedExceptionPBImpl() {
SerializedExceptionPBImpl original = new SerializedExceptionPBImpl();
original.init("testMessage");
SerializedExceptionPBImpl copy = new SerializedExceptionPBImpl(
original.getProto());
assertEquals("testMessage", copy.getMessage());
original = new SerializedExceptionPBImpl();
original.init("testMessage", new Throwable(new Throwable("parent")));
copy = new SerializedExceptionPBImpl(original.getProto());
assertEquals("testMessage", copy.getMessage());
assertEquals("parent", copy.getCause().getMessage());
assertTrue( copy.getRemoteTrace().startsWith(
"java.lang.Throwable: java.lang.Throwable: parent"));
}
/**
* Test NodeStatusPBImpl.
*/
@Test
public void testNodeStatusPBImpl() {
NodeStatusPBImpl original = new NodeStatusPBImpl();
original.setContainersStatuses(Arrays.asList(getContainerStatus(1, 2, 1),
getContainerStatus(2, 3, 1)));
original.setKeepAliveApplications(Arrays.asList(getApplicationId(3),
getApplicationId(4)));
original.setNodeHealthStatus(getNodeHealthStatus());
original.setNodeId(getNodeId());
original.setResponseId(1);
original.setIncreasedContainers(
Arrays.asList(getIncreasedContainer(1, 2, 2048, 2),
getIncreasedContainer(2, 3, 4096, 3)));
NodeStatusPBImpl copy = new NodeStatusPBImpl(original.getProto());
assertEquals(3L, copy.getContainersStatuses().get(1).getContainerId()
.getContainerId());
assertEquals(3, copy.getKeepAliveApplications().get(0).getId());
assertEquals(1000, copy.getNodeHealthStatus().getLastHealthReportTime());
assertEquals(9090, copy.getNodeId().getPort());
assertEquals(1, copy.getResponseId());
assertEquals(1, copy.getIncreasedContainers().get(0)
.getId().getContainerId());
assertEquals(4096, copy.getIncreasedContainers().get(1)
.getResource().getMemorySize());
}
@Test
public void testRegisterNodeManagerRequestWithNullLabels() {
RegisterNodeManagerRequest request =
RegisterNodeManagerRequest.newInstance(
NodeId.newInstance("host", 1234), 1234, Resource.newInstance(0, 0),
"version", null, null);
// serialze to proto, and get request from proto
RegisterNodeManagerRequest request1 =
new RegisterNodeManagerRequestPBImpl(
((RegisterNodeManagerRequestPBImpl) request).getProto());
// check labels are coming with no values
assertNull(request1.getNodeLabels());
}
@Test
public void testRegisterNodeManagerRequestWithValidLabels() {
HashSet<NodeLabel> nodeLabels = getValidNodeLabels();
RegisterNodeManagerRequest request =
RegisterNodeManagerRequest.newInstance(
NodeId.newInstance("host", 1234), 1234, Resource.newInstance(0, 0),
"version", null, null, nodeLabels);
// serialze to proto, and get request from proto
RegisterNodeManagerRequest copy =
new RegisterNodeManagerRequestPBImpl(
((RegisterNodeManagerRequestPBImpl) request).getProto());
// check labels are coming with valid values
assertEquals(true, nodeLabels.containsAll(copy.getNodeLabels()));
// check for empty labels
request.setNodeLabels(new HashSet<NodeLabel> ());
copy = new RegisterNodeManagerRequestPBImpl(
((RegisterNodeManagerRequestPBImpl) request).getProto());
assertNotNull(copy.getNodeLabels());
assertEquals(0, copy.getNodeLabels().size());
}
@Test
public void testUnRegisterNodeManagerRequestPBImpl() throws Exception {
UnRegisterNodeManagerRequestPBImpl request = new UnRegisterNodeManagerRequestPBImpl();
NodeId nodeId = NodeId.newInstance("host", 1234);
request.setNodeId(nodeId);
UnRegisterNodeManagerRequestPBImpl copy = new UnRegisterNodeManagerRequestPBImpl(
request.getProto());
assertEquals(nodeId, copy.getNodeId());
}
private HashSet<NodeLabel> getValidNodeLabels() {
HashSet<NodeLabel> nodeLabels = new HashSet<NodeLabel>();
nodeLabels.add(NodeLabel.newInstance("java"));
nodeLabels.add(NodeLabel.newInstance("windows"));
nodeLabels.add(NodeLabel.newInstance("gpu"));
nodeLabels.add(NodeLabel.newInstance("x86"));
return nodeLabels;
}
private Map<ApplicationId, AppCollectorData> getCollectors(
boolean hasNullCollectorToken) {
ApplicationId appID = ApplicationId.newInstance(1L, 1);
String collectorAddr = "localhost:0";
AppCollectorData data = AppCollectorData.newInstance(appID, collectorAddr);
if (!hasNullCollectorToken) {
data.setCollectorToken(
org.apache.hadoop.yarn.api.records.Token.newInstance(new byte[0],
"kind", new byte[0], "s"));
}
Map<ApplicationId, AppCollectorData> collectorMap =
new HashMap<>();
collectorMap.put(appID, data);
return collectorMap;
}
private ContainerStatus getContainerStatus(int applicationId,
int containerID, int appAttemptId) {
ContainerStatus status = recordFactory
.newRecordInstance(ContainerStatus.class);
status.setContainerId(getContainerId(containerID, appAttemptId));
return status;
}
private ApplicationAttemptId getApplicationAttemptId(int appAttemptId) {
ApplicationAttemptId result = ApplicationAttemptIdPBImpl.newInstance(
getApplicationId(appAttemptId), appAttemptId);
return result;
}
private ContainerId getContainerId(int containerID, int appAttemptId) {
ContainerId containerId = ContainerIdPBImpl.newContainerId(
getApplicationAttemptId(appAttemptId), containerID);
return containerId;
}
private ApplicationId getApplicationId(int applicationId) {
ApplicationIdPBImpl appId = new ApplicationIdPBImpl() {
public ApplicationIdPBImpl setParameters(int id, long timestamp) {
setClusterTimestamp(timestamp);
setId(id);
build();
return this;
}
}.setParameters(applicationId, 1000);
return new ApplicationIdPBImpl(appId.getProto());
}
private Container getDecreasedContainer(int containerID,
int appAttemptId, int memory, int vCores) {
ContainerId containerId = getContainerId(containerID, appAttemptId);
Resource capability = Resource.newInstance(memory, vCores);
return Container.newInstance(
containerId, null, null, capability, null, null);
}
private Container getIncreasedContainer(int containerID,
int appAttemptId, int memory, int vCores) {
ContainerId containerId = getContainerId(containerID, appAttemptId);
Resource capability = Resource.newInstance(memory, vCores);
return Container.newInstance(
containerId, null, null, capability, null, null);
}
private NodeStatus getNodeStatus() {
NodeStatus status = recordFactory.newRecordInstance(NodeStatus.class);
status.setContainersStatuses(new ArrayList<ContainerStatus>());
status.setKeepAliveApplications(new ArrayList<ApplicationId>());
status.setNodeHealthStatus(getNodeHealthStatus());
status.setNodeId(getNodeId());
status.setResponseId(1);
return status;
}
private NodeId getNodeId() {
return NodeId.newInstance("localhost", 9090);
}
private NodeHealthStatus getNodeHealthStatus() {
NodeHealthStatus healStatus = recordFactory
.newRecordInstance(NodeHealthStatus.class);
healStatus.setHealthReport("healthReport");
healStatus.setIsNodeHealthy(true);
healStatus.setLastHealthReportTime(1000);
return healStatus;
}
private MasterKey getMasterKey() {
MasterKey key = recordFactory.newRecordInstance(MasterKey.class);
key.setBytes(ByteBuffer.allocate(0));
key.setKeyId(1);
return key;
}
}
|
TestYarnServerApiClasses
|
java
|
quarkusio__quarkus
|
extensions/hibernate-orm/runtime/src/main/java/io/quarkus/hibernate/orm/runtime/Hibernate.java
|
{
"start": 84,
"end": 474
}
|
class ____ {
public static void featureInit(boolean enabled) {
// Override the JPA persistence unit resolver so to use our custom boot
// strategy:
PersistenceProviderSetup.registerStaticInitPersistenceProvider();
if (enabled) {
Logger.getLogger("org.hibernate.quarkus.feature").debug("Hibernate Features Enabled");
}
}
}
|
Hibernate
|
java
|
spring-projects__spring-boot
|
module/spring-boot-jooq/src/test/java/org/springframework/boot/jooq/autoconfigure/JooqAutoConfigurationTests.java
|
{
"start": 14124,
"end": 14398
}
|
class ____ implements TransactionProvider {
@Override
public void begin(TransactionContext ctx) {
}
@Override
public void commit(TransactionContext ctx) {
}
@Override
public void rollback(TransactionContext ctx) {
}
}
static
|
CustomTransactionProvider
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/StateMetadata.java
|
{
"start": 2771,
"end": 9488
}
|
class ____ {
public static final String FIELD_NAME_STATE_INDEX = "index";
public static final String FIELD_NAME_STATE_TTL = "ttl";
public static final String FIELD_NAME_STATE_NAME = "name";
@JsonProperty(value = FIELD_NAME_STATE_INDEX, index = 0)
private final int stateIndex;
@JsonProperty(value = FIELD_NAME_STATE_TTL, index = 1)
private final Duration stateTtl;
@JsonProperty(value = FIELD_NAME_STATE_NAME, index = 2)
private final String stateName;
@JsonCreator
public StateMetadata(
@JsonProperty(FIELD_NAME_STATE_INDEX) int stateIndex,
@JsonProperty(FIELD_NAME_STATE_TTL) String stateTtl,
@JsonProperty(FIELD_NAME_STATE_NAME) String stateName) {
this(
stateIndex,
TimeUtils.parseDuration(
Preconditions.checkNotNull(stateTtl, "state ttl should not be null")),
stateName);
}
public StateMetadata(int stateIndex, Duration stateTtl, String stateName) {
Preconditions.checkArgument(stateIndex >= 0, "state index should start from 0");
this.stateIndex = stateIndex;
this.stateTtl = Preconditions.checkNotNull(stateTtl, "state ttl should not be null");
this.stateName = Preconditions.checkNotNull(stateName, "state name should not be null");
}
public int getStateIndex() {
return stateIndex;
}
@JsonGetter(value = FIELD_NAME_STATE_TTL)
public String getStateTtl() {
return TimeUtils.formatWithHighestUnit(stateTtl);
}
public static List<StateMetadata> getOneInputOperatorDefaultMeta(
ReadableConfig tableConfig, String stateName) {
return getOneInputOperatorDefaultMeta(null, tableConfig, stateName);
}
public static List<StateMetadata> getOneInputOperatorDefaultMeta(
@Nullable Long stateTtlFromHint, ReadableConfig tableConfig, String stateName) {
return Collections.singletonList(
new StateMetadata(
0,
stateTtlFromHint == null
? tableConfig.get(ExecutionConfigOptions.IDLE_STATE_RETENTION)
: Duration.ofMillis(stateTtlFromHint),
stateName));
}
public static List<StateMetadata> getMultiInputOperatorDefaultMeta(
Map<Integer, Long> stateTtlFromHint,
ReadableConfig tableConfig,
String... stateNameList) {
Duration ttlFromTableConf = tableConfig.get(ExecutionConfigOptions.IDLE_STATE_RETENTION);
List<StateMetadata> stateMetadataList = new ArrayList<>(stateNameList.length);
for (int i = 0; i < stateNameList.length; i++) {
Duration stateTtl =
stateTtlFromHint.containsKey(i)
? Duration.ofMillis(stateTtlFromHint.get(i))
: ttlFromTableConf;
stateMetadataList.add(new StateMetadata(i, stateTtl, stateNameList[i]));
}
return stateMetadataList;
}
public static long getStateTtlForOneInputOperator(
ExecNodeConfig config, @Nullable List<StateMetadata> stateMetadataList) {
return getStateTtlForMultiInputOperator(config, 1, stateMetadataList).get(0);
}
public static List<Long> getStateTtlForMultiInputOperator(
ExecNodeConfig config,
int inputNumOfOperator,
@Nullable List<StateMetadata> stateMetadataList) {
// for backward compatibility
if (CollectionUtil.isNullOrEmpty(stateMetadataList)) {
return Stream.generate(config::getStateRetentionTime)
.limit(inputNumOfOperator)
.collect(Collectors.toList());
}
// in case malformed json plan
validateStateMetadata(inputNumOfOperator, stateMetadataList);
return stateMetadataList.stream()
.sorted(Comparator.comparing(StateMetadata::getStateIndex))
.map(metadata -> metadata.stateTtl.toMillis())
.collect(Collectors.toList());
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof StateMetadata)) {
return false;
}
StateMetadata that = (StateMetadata) o;
return stateIndex == that.stateIndex
&& stateTtl.equals(that.stateTtl)
&& stateName.equals(that.stateName);
}
@Override
public int hashCode() {
return Objects.hash(stateIndex, stateTtl, stateName);
}
/**
* Validate deserialized state metadata from json content of {@link
* org.apache.flink.table.api.CompiledPlan}.
*
* @param inputNumOfOperator the input number of the stateful operator that the exec node to
* translate to.
* @param stateMetadataList the deserialized state metadata list.
*/
private static void validateStateMetadata(
int inputNumOfOperator, List<StateMetadata> stateMetadataList) {
// the state metadata list size should be equal to the input number of the operator
Preconditions.checkArgument(
inputNumOfOperator == stateMetadataList.size(),
String.format(
"The compiled plan contains inconsistent state metadata configuration.\n"
+ "Received %s state meta for a %sInputStreamOperator.",
stateMetadataList.size(),
inputNumOfOperator > 2
? "Multiple"
: inputNumOfOperator == 2 ? "Two" : "One"));
// the state index should not contain duplicates, and should start from 0 to inputNum - 1
List<Integer> normalizedIndexList =
stateMetadataList.stream()
.map(StateMetadata::getStateIndex)
.sorted()
.distinct()
.collect(Collectors.toList());
Preconditions.checkArgument(
normalizedIndexList.size() == inputNumOfOperator
&& normalizedIndexList.get(0) == 0
&& normalizedIndexList.get(inputNumOfOperator - 1)
== inputNumOfOperator - 1,
"The compiled plan contains inconsistent state metadata configuration.\n"
+ "The state index should not contain duplicates and start from 0 (inclusive) "
+ "and monotonically increase to the input size (exclusive) of the operator.");
}
}
|
StateMetadata
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureLogins.java
|
{
"start": 2378,
"end": 9438
}
|
class ____ extends AbstractSecureRegistryTest {
private static final Logger LOG =
LoggerFactory.getLogger(TestSecureLogins.class);
@Test
public void testHasRealm() throws Throwable {
assertNotNull(getRealm());
LOG.info("ZK principal = {}", getPrincipalAndRealm(ZOOKEEPER_LOCALHOST));
}
@Test
public void testJaasFileSetup() throws Throwable {
// the JVM has seemed inconsistent on setting up here
assertNotNull(jaasFile, "jaasFile");
String confFilename = System.getProperty(Environment.JAAS_CONF_KEY);
assertEquals(jaasFile.getAbsolutePath(), confFilename);
}
@Test
public void testJaasFileBinding() throws Throwable {
// the JVM has seemed inconsistent on setting up here
assertNotNull(jaasFile, "jaasFile");
RegistrySecurity.bindJVMtoJAASFile(jaasFile);
String confFilename = System.getProperty(Environment.JAAS_CONF_KEY);
assertEquals(jaasFile.getAbsolutePath(), confFilename);
}
@Test
public void testClientLogin() throws Throwable {
LoginContext client = login(ALICE_LOCALHOST,
ALICE_CLIENT_CONTEXT,
keytab_alice);
try {
logLoginDetails(ALICE_LOCALHOST, client);
String confFilename = System.getProperty(Environment.JAAS_CONF_KEY);
assertNotNull(confFilename, "Unset: "+ Environment.JAAS_CONF_KEY);
String config = FileUtils.readFileToString(new File(confFilename), StandardCharsets.UTF_8);
LOG.info("{}=\n{}", confFilename, config);
RegistrySecurity.setZKSaslClientProperties(ALICE, ALICE_CLIENT_CONTEXT);
} finally {
client.logout();
}
}
@Test
public void testZKServerContextLogin() throws Throwable {
LoginContext client = login(ZOOKEEPER_LOCALHOST,
ZOOKEEPER_SERVER_CONTEXT,
keytab_zk);
logLoginDetails(ZOOKEEPER_LOCALHOST, client);
client.logout();
}
@Test
public void testServerLogin() throws Throwable {
LoginContext loginContext = createLoginContextZookeeperLocalhost();
loginContext.login();
loginContext.logout();
}
public LoginContext createLoginContextZookeeperLocalhost() throws
LoginException {
String principalAndRealm = getPrincipalAndRealm(ZOOKEEPER_LOCALHOST);
Set<Principal> principals = new HashSet<Principal>();
principals.add(new KerberosPrincipal(ZOOKEEPER_LOCALHOST));
Subject subject = new Subject(false, principals, new HashSet<Object>(),
new HashSet<Object>());
return new LoginContext("", subject, null,
KerberosConfiguration.createServerConfig(ZOOKEEPER_LOCALHOST, keytab_zk));
}
@Test
public void testKerberosAuth() throws Throwable {
File krb5conf = getKdc().getKrb5conf();
String krbConfig = FileUtils.readFileToString(krb5conf, StandardCharsets.UTF_8);
LOG.info("krb5.conf at {}:\n{}", krb5conf, krbConfig);
Subject subject = new Subject();
Class<?> kerb5LoginClass =
Class.forName(KerberosUtil.getKrb5LoginModuleName());
Constructor<?> kerb5LoginConstr = kerb5LoginClass.getConstructor();
Object kerb5LoginObject = kerb5LoginConstr.newInstance();
final Map<String, String> options = new HashMap<String, String>();
options.put("debug", "true");
if (IBM_JAVA) {
options.put("useKeytab",
keytab_alice.getAbsolutePath().startsWith("file://")
? keytab_alice.getAbsolutePath()
: "file://" + keytab_alice.getAbsolutePath());
options.put("principal", ALICE_LOCALHOST);
options.put("refreshKrb5Config", "true");
options.put("credsType", "both");
String ticketCache = System.getenv("KRB5CCNAME");
if (ticketCache != null) {
// IBM JAVA only respect system property and not env variable
// The first value searched when "useDefaultCcache" is used.
System.setProperty("KRB5CCNAME", ticketCache);
options.put("useDefaultCcache", "true");
options.put("renewTGT", "true");
}
} else {
options.put("keyTab", keytab_alice.getAbsolutePath());
options.put("principal", ALICE_LOCALHOST);
options.put("doNotPrompt", "true");
options.put("isInitiator", "true");
options.put("refreshKrb5Config", "true");
options.put("renewTGT", "true");
options.put("storeKey", "true");
options.put("useKeyTab", "true");
options.put("useTicketCache", "true");
}
Method methodInitialize =
kerb5LoginObject.getClass().getMethod("initialize", Subject.class,
CallbackHandler.class, Map.class, Map.class);
methodInitialize.invoke(kerb5LoginObject, subject, null,
new HashMap<String, String>(), options);
Method methodLogin = kerb5LoginObject.getClass().getMethod("login");
boolean loginOk = (Boolean) methodLogin.invoke(kerb5LoginObject);
assertTrue(loginOk, "Failed to login");
Method methodCommit = kerb5LoginObject.getClass().getMethod("commit");
boolean commitOk = (Boolean) methodCommit.invoke(kerb5LoginObject);
assertTrue(commitOk, "Failed to Commit");
}
@Test
public void testDefaultRealmValid() throws Throwable {
String defaultRealm = KerberosUtil.getDefaultRealm();
assertNotEmpty("No default Kerberos Realm",
defaultRealm);
LOG.info("Default Realm '{}'", defaultRealm);
}
@Test
public void testKerberosRulesValid() throws Throwable {
assertTrue(KerberosName.hasRulesBeenSet(),
"!KerberosName.hasRulesBeenSet()");
String rules = KerberosName.getRules();
assertEquals(kerberosRule, rules);
LOG.info(rules);
}
@Test
public void testValidKerberosName() throws Throwable {
KerberosName.setRuleMechanism(MECHANISM_HADOOP);
new HadoopKerberosName(ZOOKEEPER).getShortName();
// MECHANISM_MIT allows '/' and '@' in username
KerberosName.setRuleMechanism(MECHANISM_MIT);
new HadoopKerberosName(ZOOKEEPER).getShortName();
new HadoopKerberosName(ZOOKEEPER_LOCALHOST).getShortName();
new HadoopKerberosName(ZOOKEEPER_REALM).getShortName();
new HadoopKerberosName(ZOOKEEPER_LOCALHOST_REALM).getShortName();
KerberosName.setRuleMechanism(DEFAULT_MECHANISM);
}
@Test
public void testUGILogin() throws Throwable {
UserGroupInformation ugi = loginUGI(ZOOKEEPER, keytab_zk);
RegistrySecurity.UgiInfo ugiInfo =
new RegistrySecurity.UgiInfo(ugi);
LOG.info("logged in as: {}", ugiInfo);
assertTrue(UserGroupInformation.isSecurityEnabled(),
"security is not enabled: " + ugiInfo);
assertTrue(ugi.isFromKeytab(), "login is keytab based: " + ugiInfo);
// now we are here, build a SASL ACL
ACL acl = ugi.doAs(new PrivilegedExceptionAction<ACL>() {
@Override
public ACL run() throws Exception {
return registrySecurity.createSaslACLFromCurrentUser(0);
}
});
assertEquals(ZOOKEEPER_REALM, acl.getId().getId());
assertEquals(ZookeeperConfigOptions.SCHEME_SASL, acl.getId().getScheme());
registrySecurity.addSystemACL(acl);
}
}
|
TestSecureLogins
|
java
|
apache__kafka
|
generator/src/main/java/org/apache/kafka/message/FieldType.java
|
{
"start": 2473,
"end": 2994
}
|
class ____ implements FieldType {
static final Uint16FieldType INSTANCE = new Uint16FieldType();
private static final String NAME = "uint16";
@Override
public String getBoxedJavaType(HeaderGenerator headerGenerator) {
return "Integer";
}
@Override
public Optional<Integer> fixedLength() {
return Optional.of(2);
}
@Override
public String toString() {
return NAME;
}
}
final
|
Uint16FieldType
|
java
|
junit-team__junit5
|
documentation/src/test/java/example/exception/IgnoreIOExceptionTests.java
|
{
"start": 538,
"end": 741
}
|
class ____ {
@Test
void shouldSucceed() throws IOException {
throw new IOException("any");
}
@Test
@ExpectToFail
void shouldFail() {
throw new RuntimeException("any");
}
}
|
IgnoreIOExceptionTests
|
java
|
quarkusio__quarkus
|
extensions/amazon-lambda-http/runtime/src/main/java/io/quarkus/amazon/lambda/http/LambdaHttpRecorder.java
|
{
"start": 179,
"end": 809
}
|
class ____ {
/**
* @deprecated Properly use the config object
*/
@Deprecated
static LambdaHttpConfig config;
/**
* @deprecated Properly use the config object
*/
@Deprecated
static Pattern groupPattern;
private final RuntimeValue<LambdaHttpConfig> runtimeConfig;
public LambdaHttpRecorder(final RuntimeValue<LambdaHttpConfig> runtimeConfig) {
this.runtimeConfig = runtimeConfig;
}
public void setConfig() {
config = runtimeConfig.getValue();
groupPattern = Pattern.compile(runtimeConfig.getValue().cognitoClaimMatcher());
}
}
|
LambdaHttpRecorder
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueAllocationSettings.java
|
{
"start": 1240,
"end": 1395
}
|
class ____ minimum and maximum allocation settings based on the
* {@link CapacitySchedulerConfiguration} and other queue
* properties.
**/
public
|
determines
|
java
|
apache__camel
|
components/camel-jt400/src/test/java/org/apache/camel/component/jt400/Jt400SecureTestSupport.java
|
{
"start": 1244,
"end": 1897
}
|
class ____ extends CamelTestSupport {
@BindToRegistry("mockPool")
private AS400ConnectionPool connectionPool;
protected Jt400SecureTestSupport() {
}
@Override
@BeforeEach
public void doPreSetup() {
connectionPool = new MockAS400SecureConnectionPool();
}
@Override
public void doPostTearDown() {
if (connectionPool != null) {
connectionPool.close();
}
}
/**
* Returns the mock connection pool.
*
* @return the mock connection pool
*/
public AS400ConnectionPool getConnectionPool() {
return connectionPool;
}
}
|
Jt400SecureTestSupport
|
java
|
quarkusio__quarkus
|
extensions/hibernate-reactive/runtime/src/main/java/io/quarkus/hibernate/reactive/runtime/HibernateReactivePersistenceUnitProviderHelper.java
|
{
"start": 286,
"end": 662
}
|
class ____ implements QuarkusPersistenceUnitProviderHelper {
@Override
public boolean isActive(String persistenceUnitName) {
var instance = Arc.container().select(Mutiny.SessionFactory.class, qualifier(persistenceUnitName));
return instance.isResolvable() && instance.getHandle().getBean().isActive();
}
}
|
HibernateReactivePersistenceUnitProviderHelper
|
java
|
spring-projects__spring-boot
|
loader/spring-boot-loader-tools/src/test/java/org/springframework/boot/loader/tools/DefaultTimeZoneOffsetTests.java
|
{
"start": 1037,
"end": 2974
}
|
class ____ {
// gh-34424
@Test
void removeFromWithLongInDifferentTimeZonesReturnsSameValue() {
long time = OffsetDateTime.of(2000, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC).toInstant().toEpochMilli();
TimeZone timeZone1 = TimeZone.getTimeZone("GMT");
TimeZone timeZone2 = TimeZone.getTimeZone("GMT+8");
TimeZone timeZone3 = TimeZone.getTimeZone("GMT-8");
long result1 = new DefaultTimeZoneOffset(timeZone1).removeFrom(time);
long result2 = new DefaultTimeZoneOffset(timeZone2).removeFrom(time);
long result3 = new DefaultTimeZoneOffset(timeZone3).removeFrom(time);
long dosTime1 = toDosTime(Calendar.getInstance(timeZone1), result1);
long dosTime2 = toDosTime(Calendar.getInstance(timeZone2), result2);
long dosTime3 = toDosTime(Calendar.getInstance(timeZone3), result3);
assertThat(dosTime1).isEqualTo(dosTime2).isEqualTo(dosTime3);
}
@Test
void removeFromWithFileTimeReturnsFileTime() {
long time = OffsetDateTime.of(2000, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC).toInstant().toEpochMilli();
long result = new DefaultTimeZoneOffset(TimeZone.getTimeZone("GMT+8")).removeFrom(time);
assertThat(result).isNotEqualTo(time).isEqualTo(946656000000L);
}
/**
* Identical functionality to package-private
* org.apache.commons.compress.archivers.zip.ZipUtil.toDosTime(Calendar, long, byte[],
* int) method used by {@link ZipArchiveOutputStream} to convert times.
* @param calendar the source calendar
* @param time the time to convert
* @return the DOS time
*/
private long toDosTime(Calendar calendar, long time) {
calendar.setTimeInMillis(time);
final int year = calendar.get(Calendar.YEAR);
final int month = calendar.get(Calendar.MONTH) + 1;
return ((year - 1980) << 25) | (month << 21) | (calendar.get(Calendar.DAY_OF_MONTH) << 16)
| (calendar.get(Calendar.HOUR_OF_DAY) << 11) | (calendar.get(Calendar.MINUTE) << 5)
| (calendar.get(Calendar.SECOND) >> 1);
}
}
|
DefaultTimeZoneOffsetTests
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/writeAsArray/WriteAsArray_boolean_public.java
|
{
"start": 204,
"end": 554
}
|
class ____ extends TestCase {
public void test_0 () throws Exception {
VO vo = new VO();
vo.setId(true);
vo.setName("wenshao");
String text = JSON.toJSONString(vo, SerializerFeature.BeanToArray);
Assert.assertEquals("[true,\"wenshao\"]", text);
}
public static
|
WriteAsArray_boolean_public
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/util/internal/CLHMTestlibTests.java
|
{
"start": 157,
"end": 633
}
|
class ____ extends TestCase {
public static Test suite() {
TestSuite suite = new TestSuite();
addCLHMViewTests(suite);
return suite;
}
private static void addCLHMViewTests(TestSuite suite) {
suite.addTest(MapTestFactory.suite("CLHMView", MapTestFactory.synchronousGenerator(
() -> new PrivateMaxEntriesMap.Builder<String, String>()
.maximumCapacity(10).build()))
);
}
}
|
CLHMTestlibTests
|
java
|
spring-cloud__spring-cloud-gateway
|
spring-cloud-gateway-server-webflux/src/main/java/org/springframework/cloud/gateway/support/MessageHeaderUtils.java
|
{
"start": 1034,
"end": 3824
}
|
class ____ {
/**
* Message Header name which contains HTTP request parameters.
*/
public static final String HTTP_REQUEST_PARAM = "http_request_param";
private static final HttpHeaders IGNORED = new HttpHeaders();
private static final HttpHeaders REQUEST_ONLY = new HttpHeaders();
static {
IGNORED.add(MessageHeaders.ID, "");
IGNORED.add(HttpHeaders.CONTENT_LENGTH, "0");
// Headers that would typically be added by a downstream client
REQUEST_ONLY.add(HttpHeaders.ACCEPT, "");
REQUEST_ONLY.add(HttpHeaders.CONTENT_LENGTH, "");
REQUEST_ONLY.add(HttpHeaders.CONTENT_TYPE, "");
REQUEST_ONLY.add(HttpHeaders.HOST, "");
}
private MessageHeaderUtils() {
throw new IllegalStateException("Can't instantiate a utility class");
}
public static HttpHeaders fromMessage(MessageHeaders headers, List<String> ignoredHeders) {
HttpHeaders result = new HttpHeaders();
for (String name : headers.keySet()) {
Object value = headers.get(name);
name = name.toLowerCase(Locale.ROOT);
if (!IGNORED.containsHeader(name) && !ignoredHeders.contains(name)) {
Collection<?> values = multi(value);
for (Object object : values) {
result.set(name, object.toString());
}
}
}
return result;
}
@SuppressWarnings("unchecked")
public static HttpHeaders fromMessage(MessageHeaders headers) {
return fromMessage(headers, Collections.EMPTY_LIST);
}
public static HttpHeaders sanitize(HttpHeaders request, List<String> ignoredHeders,
List<String> requestOnlyHeaders) {
HttpHeaders result = new HttpHeaders();
for (String name : request.headerNames()) {
List<String> value = request.get(name);
name = name.toLowerCase(Locale.ROOT);
if (!IGNORED.containsHeader(name) && !REQUEST_ONLY.containsHeader(name) && !ignoredHeders.contains(name)
&& !requestOnlyHeaders.contains(name)) {
result.put(name, value);
}
}
return result;
}
@SuppressWarnings("unchecked")
public static HttpHeaders sanitize(HttpHeaders request) {
return sanitize(request, Collections.EMPTY_LIST, Collections.EMPTY_LIST);
}
public static MessageHeaders fromHttp(HttpHeaders headers) {
Map<String, Object> map = new LinkedHashMap<>();
for (String name : headers.headerNames()) {
Collection<?> values = multi(headers.get(name));
name = name.toLowerCase(Locale.ROOT);
Object value = values == null ? null : (values.size() == 1 ? values.iterator().next() : values);
if (name.toLowerCase(Locale.ROOT).equals(HttpHeaders.CONTENT_TYPE.toLowerCase(Locale.ROOT))) {
name = MessageHeaders.CONTENT_TYPE;
}
map.put(name, value);
}
return new MessageHeaders(map);
}
private static Collection<?> multi(Object value) {
return value instanceof Collection ? (Collection<?>) value : Arrays.asList(value);
}
}
|
MessageHeaderUtils
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
|
{
"start": 118583,
"end": 120997
}
|
class ____ extends AbstractAppender {
public boolean sawIndexWriterMessage;
public boolean sawIndexWriterIFDMessage;
MockAppender(final String name) throws IllegalAccessException {
super(name, RegexFilter.createFilter(".*(\n.*)*", new String[0], false, null, null), null);
}
@Override
public void append(LogEvent event) {
final String formattedMessage = event.getMessage().getFormattedMessage();
if (event.getLevel() == Level.TRACE && event.getMarker().getName().contains("[index][0]")) {
if (event.getLoggerName().endsWith(".IW") && formattedMessage.contains("IW: now apply all deletes")) {
sawIndexWriterMessage = true;
}
if (event.getLoggerName().endsWith(".IFD")) {
sawIndexWriterIFDMessage = true;
}
}
}
}
// #5891: make sure IndexWriter's infoStream output is
// sent to lucene.iw with log level TRACE:
public void testIndexWriterInfoStream() throws IllegalAccessException, IOException {
assumeFalse("who tests the tester?", VERBOSE);
MockAppender mockAppender = new MockAppender("testIndexWriterInfoStream");
mockAppender.start();
Logger theLogger = LogManager.getLogger("org.elasticsearch.index");
Level savedLevel = theLogger.getLevel();
Loggers.addAppender(theLogger, mockAppender);
Loggers.setLevel(theLogger, Level.DEBUG);
theLogger = LogManager.getLogger("org.elasticsearch.index");
try {
// First, with DEBUG, which should NOT log IndexWriter output:
ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null);
engine.index(indexForDoc(doc));
engine.flush();
assertFalse(mockAppender.sawIndexWriterMessage);
// Again, with TRACE, which should log IndexWriter output:
Loggers.setLevel(theLogger, Level.TRACE);
engine.index(indexForDoc(doc));
engine.flush();
assertTrue(mockAppender.sawIndexWriterMessage);
engine.close();
} finally {
Loggers.removeAppender(theLogger, mockAppender);
mockAppender.stop();
Loggers.setLevel(theLogger, savedLevel);
}
}
private static
|
MockAppender
|
java
|
apache__maven
|
api/maven-api-spi/src/main/java/org/apache/maven/api/spi/ExtensibleEnumProvider.java
|
{
"start": 1120,
"end": 1331
}
|
enum ____ for extensible enumerations.
* <p>
* Maven uses extensible enumerations to allow plugins and extensions to add new values
* to various categories like languages, scopes, and packaging types. This
|
values
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/util/ClassUtilTest.java
|
{
"start": 543,
"end": 644
}
|
class ____ extends DatabindTestUtil
{
/* Test classes and interfaces needed for testing
|
ClassUtilTest
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/ShouldHaveEvenArgsTest.java
|
{
"start": 1730,
"end": 3965
}
|
class ____ {
private static final Map map = new HashMap<String, String>();
public void testWithOddArgs() {
// BUG: Diagnostic contains: even number of arguments
assertThat(map).containsExactly("hello", "there", "rest");
// BUG: Diagnostic contains: even number of arguments
assertThat(map).containsExactly("hello", "there", "hello", "there", "rest");
// BUG: Diagnostic contains: even number of arguments
assertThat(map).containsExactly(null, null, null, null, new Object[] {});
}
public void testWithArrayArgs() {
String key = "hello";
Object[] value = new Object[] {};
Object[][] args = new Object[][] {};
// BUG: Diagnostic contains: even number of arguments
assertThat(map).containsExactly(key, value, (Object) args);
}
public void testWithOddArgsWithCorrespondence() {
assertThat(map)
.comparingValuesUsing(Correspondence.from((a, b) -> true, "dummy"))
// BUG: Diagnostic contains: even number of arguments
.containsExactly("hello", "there", "rest");
assertThat(map)
.comparingValuesUsing(Correspondence.from((a, b) -> true, "dummy"))
// BUG: Diagnostic contains: even number of arguments
.containsExactly("hello", "there", "hello", "there", "rest");
}
}\
""")
.doTest();
}
@Test
public void negativeCase() {
compilationHelper
.addSourceLines(
"ShouldHaveEvenArgsNegativeCases.java",
"""
package com.google.errorprone.bugpatterns.testdata;
import static com.google.common.truth.Truth.assertThat;
import java.util.HashMap;
import java.util.Map;
/**
* Negative test cases for {@link ShouldHaveEvenArgs} check.
*
* @author bhagwani@google.com (Sumit Bhagwani)
*/
public
|
ShouldHaveEvenArgsPositiveCases
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/action/support/UnsafePlainActionFuture.java
|
{
"start": 1372,
"end": 2160
}
|
class ____<T> extends PlainActionFuture<T> {
private final Set<String> unsafeExecutors;
/**
* Create a future which permits any of the given named executors to be used unsafely (i.e. used for both waiting for the future's
* completion and completing the future).
*/
public UnsafePlainActionFuture(String... unsafeExecutors) {
assert unsafeExecutors.length > 0 : "use PlainActionFuture if there are no executors to use unsafely";
this.unsafeExecutors = Set.of(unsafeExecutors);
}
@Override
boolean allowedExecutors(Thread blockedThread, Thread completingThread) {
return super.allowedExecutors(blockedThread, completingThread) || unsafeExecutors.contains(EsExecutors.executorName(blockedThread));
}
}
|
UnsafePlainActionFuture
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/processor/TryCatchMustHaveExceptionConfiguredTest.java
|
{
"start": 1088,
"end": 1957
}
|
class ____ extends ContextTestSupport {
@Test
public void testTryCatchMustHaveExceptionConfigured() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("direct:a").doTry().to("mock:b").throwException(new IllegalArgumentException("Damn")).doCatch()
.to("mock:catch").end();
}
});
try {
context.start();
fail("Should throw exception");
} catch (Exception e) {
assertIsInstanceOf(IllegalArgumentException.class, e.getCause());
assertEquals("At least one Exception must be configured to catch", e.getCause().getMessage());
}
}
@Override
public boolean isUseRouteBuilder() {
return false;
}
}
|
TryCatchMustHaveExceptionConfiguredTest
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/sql/dialect/oracle/ast/stmt/OracleXmlColumnProperties.java
|
{
"start": 1476,
"end": 2819
}
|
class ____ extends OracleSQLObjectImpl {
private boolean secureFile;
private boolean basicFile;
private boolean clob;
private boolean binaryXml;
private OracleLobParameters lobParameters;
@Override
public void accept0(OracleASTVisitor visitor) {
}
public boolean isSecureFile() {
return secureFile;
}
public void setSecureFile(boolean secureFile) {
this.secureFile = secureFile;
}
public boolean isBasicFile() {
return basicFile;
}
public void setBasicFile(boolean basicFile) {
this.basicFile = basicFile;
}
public boolean isClob() {
return clob;
}
public void setClob(boolean clob) {
this.clob = clob;
}
public boolean isBinaryXml() {
return binaryXml;
}
public void setBinaryXml(boolean binaryXml) {
this.binaryXml = binaryXml;
}
public OracleLobParameters getLobParameters() {
return lobParameters;
}
public void setLobParameters(OracleLobParameters x) {
if (x != null) {
x.setParent(this);
}
this.lobParameters = x;
}
}
}
|
OracleXMLTypeStorage
|
java
|
alibaba__nacos
|
api/src/main/java/com/alibaba/nacos/api/remote/Requester.java
|
{
"start": 982,
"end": 1894
}
|
interface ____ {
/**
* send request.
*
* @param request request.
* @param timeoutMills mills of timeouts.
* @return response response returned.
* @throws NacosException exception throw.
*/
Response request(Request request, long timeoutMills) throws NacosException;
/**
* send request.
*
* @param request request.
* @return request future.
* @throws NacosException exception throw.
*/
RequestFuture requestFuture(Request request) throws NacosException;
/**
* send async request.
*
* @param request request.
* @param requestCallBack callback of request.
* @throws NacosException exception throw.
*/
void asyncRequest(Request request, RequestCallBack requestCallBack) throws NacosException;
/**
* close connection.
*/
void close();
}
|
Requester
|
java
|
mapstruct__mapstruct
|
processor/src/main/java/org/mapstruct/ap/spi/StripSuffixEnumTransformationStrategy.java
|
{
"start": 237,
"end": 662
}
|
class ____ implements EnumTransformationStrategy {
@Override
public String getStrategyName() {
return "stripSuffix";
}
@Override
public String transform(String value, String configuration) {
if ( value.endsWith( configuration ) ) {
return value.substring( 0, value.length() - configuration.length() );
}
return value;
}
}
|
StripSuffixEnumTransformationStrategy
|
java
|
apache__camel
|
components/camel-spring-parent/camel-spring-rabbitmq/src/main/java/org/apache/camel/component/springrabbit/SpringRabbitMQHelper.java
|
{
"start": 866,
"end": 1174
}
|
class ____ {
private SpringRabbitMQHelper() {
}
public static boolean isDefaultExchange(String exchangeName) {
return exchangeName == null || exchangeName.isEmpty()
|| exchangeName.equalsIgnoreCase(SpringRabbitMQConstants.DEFAULT_EXCHANGE_NAME);
}
}
|
SpringRabbitMQHelper
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/inlineme/InlinerTest.java
|
{
"start": 9094,
"end": 9559
}
|
class ____ {
public void doTest() {
String str = Client.<T>after();
}
}
""")
.doTest(TEXT_MATCH);
}
@Test
public void instanceMethod_withConflictingImport() {
refactoringTestHelper
.addInputLines(
"Client.java",
"""
import com.google.errorprone.annotations.InlineMe;
import java.time.Duration;
public final
|
Caller
|
java
|
spring-projects__spring-framework
|
spring-test/src/main/java/org/springframework/test/web/servlet/setup/MockMvcBuilders.java
|
{
"start": 1361,
"end": 4493
}
|
class ____ {
private MockMvcBuilders() {
}
/**
* Build a {@link MockMvc} instance using the given, fully initialized
* (i.e., <em>refreshed</em>) {@link WebApplicationContext}.
* <p>The {@link org.springframework.web.servlet.DispatcherServlet DispatcherServlet}
* will use the context to discover Spring MVC infrastructure and application
* controllers in it. The context must have been configured with a
* {@link jakarta.servlet.ServletContext ServletContext}.
*/
public static DefaultMockMvcBuilder webAppContextSetup(WebApplicationContext context) {
return new DefaultMockMvcBuilder(context);
}
/**
* Build a {@link MockMvc} instance by registering one or more
* {@code @Controller} instances and configuring Spring MVC infrastructure
* programmatically.
* <p>This allows full control over the instantiation and initialization of
* controllers and their dependencies, similar to plain unit tests while
* also making it possible to test one controller at a time.
* <p>When this builder is used, the minimum infrastructure required by the
* {@link org.springframework.web.servlet.DispatcherServlet DispatcherServlet}
* to serve requests with annotated controllers is created automatically
* and can be customized, resulting in configuration that is equivalent to
* what MVC Java configuration provides except using builder-style methods.
* <p>If the Spring MVC configuration of an application is relatively
* straight-forward — for example, when using the MVC namespace in
* XML or MVC Java config — then using this builder might be a good
* option for testing a majority of controllers. In such cases, a much
* smaller number of tests can be used to focus on testing and verifying
* the actual Spring MVC configuration.
* @param controllers one or more {@code @Controller} instances to test
* (specified {@code Class} will be turned into instance)
*/
public static StandaloneMockMvcBuilder standaloneSetup(Object... controllers) {
return new StandaloneMockMvcBuilder(controllers);
}
/**
* Build a {@link MockMvc} instance by registering one or more
* {@link RouterFunction RouterFunction} instances and configuring Spring
* MVC infrastructure programmatically.
* <p>This allows full control over the instantiation and initialization of
* router functions and their dependencies, similar to plain unit tests while
* also making it possible to test one router function at a time.
* <p>When this builder is used, the minimum infrastructure required by the
* {@link org.springframework.web.servlet.DispatcherServlet DispatcherServlet}
* to serve requests with router functions is created automatically
* and can be customized, resulting in configuration that is equivalent to
* what MVC Java configuration provides except using builder-style methods.
* @param routerFunctions one or more {@code RouterFunction} instances to test
* @since 6.2
*/
public static RouterFunctionMockMvcBuilder routerFunctions(RouterFunction<?>... routerFunctions) {
return new RouterFunctionMockMvcBuilder(routerFunctions);
}
}
|
MockMvcBuilders
|
java
|
spring-projects__spring-boot
|
configuration-metadata/spring-boot-configuration-processor/src/test/java/org/springframework/boot/configurationprocessor/metadata/TestJsonConverter.java
|
{
"start": 1039,
"end": 1500
}
|
class ____ extends JsonConverter {
@Override
public JSONArray toJsonArray(ConfigurationMetadata metadata, ItemType itemType) throws Exception {
return super.toJsonArray(metadata, itemType);
}
@Override
public JSONArray toJsonArray(Collection<ItemHint> hints) throws Exception {
return super.toJsonArray(hints);
}
@Override
public JSONObject toJsonObject(ItemMetadata item) throws Exception {
return super.toJsonObject(item);
}
}
|
TestJsonConverter
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/tofix/ExternalTypeIdDup1410Test.java
|
{
"start": 1300,
"end": 2908
}
|
class ____ extends EnvironmentEvent {
private String status;
private Object resultData;
protected BackendEvent() {
} // for deserializer
public BackendEvent(String envName, String message, String status, Object results) {
super(envName, message);
this.status = status;
resultData = results;
}
public static BackendEvent create(String environmentName, String message,
String status, Object results) {
return new BackendEvent(environmentName, message,
status, results);
}
@Override
public EnvironmentEventSource getSource() {
return EnvironmentEventSource.BACKEND;
}
public String getStatus() {
return status;
}
public Object getResultData() {
return resultData;
}
@Override
public String toString() {
return String.format("(%s): %s", status, getMessage());
}
}
@JacksonTestFailureExpected
@Test
void dupProps() throws Exception {
ObjectMapper mapper = jsonMapperBuilder()
.enable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES)
.build();
EnvironmentEvent event = new BackendEvent("foo", "hello", "bar", null);
String ser = mapper
.writerWithDefaultPrettyPrinter()
.writeValueAsString(event);
mapper.readValue(ser, EnvironmentEvent.class);
assertNotNull(ser);
}
}
|
BackendEvent
|
java
|
netty__netty
|
handler/src/main/java/io/netty/handler/ssl/OpenSslKeyMaterialManager.java
|
{
"start": 1117,
"end": 1172
}
|
class ____ {
// Code in this
|
OpenSslKeyMaterialManager
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/InMemoryBlockBlobStore.java
|
{
"start": 1191,
"end": 1475
}
|
class ____ {
private final HashMap<String, Entry> blobs = new HashMap<String, Entry>();
private HashMap<String, String> containerMetadata;
public synchronized Iterable<String> getKeys() {
return new ArrayList<String>(blobs.keySet());
}
public static
|
InMemoryBlockBlobStore
|
java
|
apache__flink
|
flink-table/flink-table-common/src/test/java/org/apache/flink/table/types/inference/InputTypeStrategiesTestBase.java
|
{
"start": 2340,
"end": 7093
}
|
class ____ {
@ParameterizedTest(name = "{index}: {0}")
@MethodSource("testData")
void testStrategy(TestSpec testSpec) {
if (testSpec.expectedSignature != null) {
assertThat(generateSignature(testSpec)).isEqualTo(testSpec.expectedSignature);
}
for (List<DataType> actualArgumentTypes : testSpec.actualArgumentTypes) {
if (testSpec.expectedErrorMessage != null) {
assertThatThrownBy(() -> runTypeInference(actualArgumentTypes, testSpec))
.satisfies(
anyCauseMatches(
ValidationException.class, testSpec.expectedErrorMessage));
} else if (testSpec.expectedArgumentTypes != null) {
assertThat(
runTypeInference(actualArgumentTypes, testSpec)
.getExpectedArgumentTypes())
.isEqualTo(testSpec.expectedArgumentTypes);
}
}
}
protected abstract Stream<TestSpec> testData();
// --------------------------------------------------------------------------------------------
private String generateSignature(TestSpec testSpec) {
final FunctionDefinitionMock functionDefinitionMock = new FunctionDefinitionMock();
functionDefinitionMock.functionKind = FunctionKind.SCALAR;
return TypeInferenceUtil.generateSignature(
createTypeInference(testSpec), "f", functionDefinitionMock);
}
private TypeInferenceUtil.Result runTypeInference(
List<DataType> actualArgumentTypes, TestSpec testSpec) {
final FunctionDefinitionMock functionDefinitionMock = new FunctionDefinitionMock();
functionDefinitionMock.functionKind = FunctionKind.SCALAR;
final CallContextMock callContextMock = new CallContextMock();
callContextMock.typeFactory = new DataTypeFactoryMock();
callContextMock.functionDefinition = functionDefinitionMock;
callContextMock.argumentDataTypes = actualArgumentTypes;
callContextMock.argumentLiterals =
IntStream.range(0, actualArgumentTypes.size())
.mapToObj(i -> testSpec.literals.containsKey(i))
.collect(Collectors.toList());
callContextMock.argumentValues =
IntStream.range(0, actualArgumentTypes.size())
.mapToObj(i -> Optional.ofNullable(testSpec.literals.get(i)))
.collect(Collectors.toList());
callContextMock.argumentNulls =
IntStream.range(0, actualArgumentTypes.size())
.mapToObj(i -> false)
.collect(Collectors.toList());
callContextMock.name = "f";
callContextMock.outputDataType = Optional.empty();
callContextMock.tableSemantics = testSpec.tableSemantics;
callContextMock.modelSemantics = testSpec.modelSemantics;
final TypeInferenceUtil.SurroundingInfo surroundingInfo;
if (testSpec.surroundingStrategy != null) {
final TypeInference outerTypeInference =
TypeInference.newBuilder()
.inputTypeStrategy(testSpec.surroundingStrategy)
.outputTypeStrategy(TypeStrategies.MISSING)
.build();
surroundingInfo =
TypeInferenceUtil.SurroundingInfo.of(
"f_outer",
functionDefinitionMock,
outerTypeInference,
1,
0,
callContextMock.isGroupedAggregation);
} else {
surroundingInfo = null;
}
return TypeInferenceUtil.runTypeInference(
createTypeInference(testSpec), callContextMock, surroundingInfo);
}
private TypeInference createTypeInference(TestSpec testSpec) {
final TypeInference.Builder builder =
TypeInference.newBuilder()
.inputTypeStrategy(testSpec.strategy)
.outputTypeStrategy(TypeStrategies.explicit(DataTypes.BOOLEAN()));
if (testSpec.namedArguments != null) {
builder.namedArguments(testSpec.namedArguments);
}
if (testSpec.typedArguments != null) {
builder.typedArguments(testSpec.typedArguments);
}
return builder.build();
}
// --------------------------------------------------------------------------------------------
/** A specification for tests to execute. */
protected static
|
InputTypeStrategiesTestBase
|
java
|
apache__rocketmq
|
client/src/main/java/org/apache/rocketmq/client/trace/TraceView.java
|
{
"start": 1001,
"end": 5081
}
|
class ____ {
private String msgId;
private String tags;
private String keys;
private String storeHost;
private String clientHost;
private int costTime;
private String msgType;
private String offSetMsgId;
private long timeStamp;
private long bornTime;
private String topic;
private String groupName;
private String status;
public static List<TraceView> decodeFromTraceTransData(String key, MessageExt messageExt) {
List<TraceView> messageTraceViewList = new ArrayList<>();
String messageBody = new String(messageExt.getBody(), StandardCharsets.UTF_8);
if (messageBody == null || messageBody.length() <= 0) {
return messageTraceViewList;
}
List<TraceContext> traceContextList = TraceDataEncoder.decoderFromTraceDataString(messageBody);
for (TraceContext context : traceContextList) {
TraceView messageTraceView = new TraceView();
TraceBean traceBean = context.getTraceBeans().get(0);
if (!traceBean.getMsgId().equals(key)) {
continue;
}
messageTraceView.setCostTime(context.getCostTime());
messageTraceView.setGroupName(context.getGroupName());
if (context.isSuccess()) {
messageTraceView.setStatus("success");
} else {
messageTraceView.setStatus("failed");
}
messageTraceView.setKeys(traceBean.getKeys());
messageTraceView.setMsgId(traceBean.getMsgId());
messageTraceView.setTags(traceBean.getTags());
messageTraceView.setTopic(traceBean.getTopic());
messageTraceView.setMsgType(context.getTraceType().name());
messageTraceView.setOffSetMsgId(traceBean.getOffsetMsgId());
messageTraceView.setTimeStamp(context.getTimeStamp());
messageTraceView.setStoreHost(traceBean.getStoreHost());
messageTraceView.setClientHost(messageExt.getBornHostString());
messageTraceViewList.add(messageTraceView);
}
return messageTraceViewList;
}
public String getMsgId() {
return msgId;
}
public void setMsgId(String msgId) {
this.msgId = msgId;
}
public String getTags() {
return tags;
}
public void setTags(String tags) {
this.tags = tags;
}
public String getKeys() {
return keys;
}
public void setKeys(String keys) {
this.keys = keys;
}
public String getStoreHost() {
return storeHost;
}
public void setStoreHost(String storeHost) {
this.storeHost = storeHost;
}
public String getClientHost() {
return clientHost;
}
public void setClientHost(String clientHost) {
this.clientHost = clientHost;
}
public int getCostTime() {
return costTime;
}
public void setCostTime(int costTime) {
this.costTime = costTime;
}
public String getMsgType() {
return msgType;
}
public void setMsgType(String msgType) {
this.msgType = msgType;
}
public String getOffSetMsgId() {
return offSetMsgId;
}
public void setOffSetMsgId(String offSetMsgId) {
this.offSetMsgId = offSetMsgId;
}
public long getTimeStamp() {
return timeStamp;
}
public void setTimeStamp(long timeStamp) {
this.timeStamp = timeStamp;
}
public long getBornTime() {
return bornTime;
}
public void setBornTime(long bornTime) {
this.bornTime = bornTime;
}
public String getTopic() {
return topic;
}
public void setTopic(String topic) {
this.topic = topic;
}
public String getGroupName() {
return groupName;
}
public void setGroupName(String groupName) {
this.groupName = groupName;
}
public String getStatus() {
return status;
}
public void setStatus(String status) {
this.status = status;
}
}
|
TraceView
|
java
|
spring-projects__spring-boot
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/ConditionalOnSingleCandidate.java
|
{
"start": 2325,
"end": 2733
}
|
class ____ is contained in the {@link BeanFactory} and a primary candidate
* exists in case of multiple instances. Beans that are not autowire candidates, that
* are not default candidates, or that are fallback candidates are ignored.
* <p>
* This attribute may <strong>not</strong> be used in conjunction with
* {@link #type()}, but it may be used instead of {@link #type()}.
* @return the
|
specified
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/simple/FeatureRequestFilterWithHighestPriority.java
|
{
"start": 312,
"end": 586
}
|
class ____ implements ContainerRequestFilter {
@Override
public void filter(ContainerRequestContext requestContext) throws IOException {
requestContext.getHeaders().add("feature-filter-request", "authentication");
}
}
|
FeatureRequestFilterWithHighestPriority
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/sql/model/ast/AbstractTableDelete.java
|
{
"start": 449,
"end": 1934
}
|
class ____ extends AbstractRestrictedTableMutation<JdbcDeleteMutation> implements TableDelete {
public AbstractTableDelete(
MutatingTableReference mutatingTable,
MutationTarget<?> mutationTarget,
List<ColumnValueBinding> keyRestrictionBindings,
List<ColumnValueBinding> optLockRestrictionBindings,
List<ColumnValueParameter> parameters) {
this(
mutatingTable,
mutationTarget,
"delete for " + mutationTarget.getRolePath(),
keyRestrictionBindings,
optLockRestrictionBindings,
parameters
);
}
public AbstractTableDelete(
MutatingTableReference mutatingTable,
MutationTarget<?> mutationTarget,
String sqlComment,
List<ColumnValueBinding> keyRestrictionBindings,
List<ColumnValueBinding> optLockRestrictionBindings,
List<ColumnValueParameter> parameters) {
super( mutatingTable, mutationTarget, sqlComment, keyRestrictionBindings, optLockRestrictionBindings, parameters );
}
@Override
protected String getLoggableName() {
return "TableDelete";
}
@Override
public Expectation getExpectation() {
return getMutatingTable().getTableMapping().getDeleteDetails().getExpectation();
}
@Override
protected JdbcDeleteMutation createMutationOperation(
TableMapping tableDetails,
String sql,
List<JdbcParameterBinder> effectiveBinders) {
return new JdbcDeleteMutation(
tableDetails,
getMutationTarget(),
sql,
isCallable(),
getExpectation(),
effectiveBinders
);
}
}
|
AbstractTableDelete
|
java
|
spring-projects__spring-framework
|
spring-jdbc/src/main/java/org/springframework/jdbc/support/CustomSQLErrorCodesTranslation.java
|
{
"start": 1579,
"end": 2031
}
|
class ____ the specified error codes.
*/
public void setExceptionClass(@Nullable Class<?> exceptionClass) {
if (exceptionClass != null && !DataAccessException.class.isAssignableFrom(exceptionClass)) {
throw new IllegalArgumentException("Invalid exception class [" + exceptionClass +
"]: needs to be a subclass of [org.springframework.dao.DataAccessException]");
}
this.exceptionClass = exceptionClass;
}
/**
* Return the exception
|
for
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/adapter/AwsV1BindingSupport.java
|
{
"start": 2781,
"end": 4026
}
|
class ____ implement one of the following means of construction, which are
* attempted in order:
*
* <ol>
* <li>a public constructor accepting java.net.URI and
* org.apache.hadoop.conf.Configuration</li>
* <li>a public constructor accepting
* org.apache.hadoop.conf.Configuration</li>
* <li>a public static method named getInstance that accepts no
* arguments and returns an instance of
* com.amazonaws.auth.AWSCredentialsProvider, or</li>
* <li>a public default constructor.</li>
* </ol>
* @param conf configuration
* @param className credential classname
* @param uri URI of the FS
* @param key configuration key to use
* @return the instantiated class
* @throws InstantiationIOException on any instantiation failure, including v1 SDK not found
* @throws IOException anything else.
*/
public static AwsCredentialsProvider createAWSV1CredentialProvider(
Configuration conf,
String className,
@Nullable URI uri,
final String key) throws IOException {
if (!isAwsV1SdkAvailable()) {
throw unavailable(uri, className, key, "No AWS v1 SDK available");
}
return V1ToV2AwsCredentialProviderAdapter.create(conf, className, uri);
}
}
|
must
|
java
|
apache__camel
|
components/camel-undertow/src/test/java/org/apache/camel/component/undertow/rest/RestManagementTest.java
|
{
"start": 1163,
"end": 2679
}
|
class ____ extends BaseUndertowTest {
@Override
protected boolean useJmx() {
return true;
}
protected MBeanServer getMBeanServer() {
return context.getManagementStrategy().getManagementAgent().getMBeanServer();
}
@Test
public void testRestManagement() throws Exception {
MBeanServer mbeanServer = getMBeanServer();
Set<ObjectName> s = mbeanServer.queryNames(
new ObjectName("org.apache.camel:context=" + context.getManagementName() + ",type=endpoints,*"), null);
assertEquals(6, s.size(), "Could not find 8 endpoints: " + s);
// there should be 3 rest endpoints
long count = s.stream().filter(p -> p.getCanonicalName().contains("rest")).count();
assertEquals(3, count, "There should be 3 rest endpoints");
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
restConfiguration().component("undertow").host("localhost").port(getPort());
rest("/say")
.get("/hello").to("direct:hello")
.get("/bye").consumes("application/json").to("direct:bye")
.post("/bye").to("mock:update");
from("direct:hello")
.transform().constant("Hello World");
from("direct:bye")
.transform().constant("Bye World");
}
};
}
}
|
RestManagementTest
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.