language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/web/servlet/client/samples/XmlContentTests.java | {
"start": 1822,
"end": 4624
} | class ____ {
private static final String persons_XML = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<persons>
<person><name>Jane</name></person>
<person><name>Jason</name></person>
<person><name>John</name></person>
</persons>
""";
private final RestTestClient client = RestTestClient.bindToController(new PersonController()).build();
@Test
void xmlContent() {
this.client.get().uri("/persons")
.accept(MediaType.APPLICATION_XML)
.exchange()
.expectStatus().isOk()
.expectBody().xml(persons_XML);
}
@Test
void xpathIsEqualTo() {
this.client.get().uri("/persons")
.accept(MediaType.APPLICATION_XML)
.exchange()
.expectStatus().isOk()
.expectBody()
.xpath("/").exists()
.xpath("/persons").exists()
.xpath("/persons/person").exists()
.xpath("/persons/person").nodeCount(3)
.xpath("/persons/person[1]/name").isEqualTo("Jane")
.xpath("/persons/person[2]/name").isEqualTo("Jason")
.xpath("/persons/person[3]/name").isEqualTo("John");
}
@Test
void xpathDoesNotExist() {
this.client.get().uri("/persons")
.accept(MediaType.APPLICATION_XML)
.exchange()
.expectStatus().isOk()
.expectBody()
.xpath("/persons/person[4]").doesNotExist();
}
@Test
void xpathNodeCount() {
this.client.get().uri("/persons")
.accept(MediaType.APPLICATION_XML)
.exchange()
.expectStatus().isOk()
.expectBody()
.xpath("/persons/person").nodeCount(3)
.xpath("/persons/person").nodeCount(count -> MatcherAssert.assertThat(count, equalTo(3)));
}
@Test
void xpathMatches() {
this.client.get().uri("/persons")
.accept(MediaType.APPLICATION_XML)
.exchange()
.expectStatus().isOk()
.expectBody()
.xpath("//person/name").string(name -> MatcherAssert.assertThat(name, startsWith("J")))
.xpath("//person/name").string(name -> {
if (!name.startsWith("J")) {
throw new AssertionError("Name does not start with J: " + name);
}
});
}
@Test
void xpathContainsSubstringViaRegex() {
this.client.get().uri("/persons/John")
.accept(MediaType.APPLICATION_XML)
.exchange()
.expectStatus().isOk()
.expectBody()
.xpath("//name[contains(text(), 'oh')]").exists();
}
@Test
void postXmlContent() {
String content =
"<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>" +
"<person><name>John</name></person>";
this.client.post().uri("/persons")
.contentType(MediaType.APPLICATION_XML)
.body(content)
.exchange()
.expectStatus().isCreated()
.expectHeader().valueEquals(HttpHeaders.LOCATION, "/persons/John")
.expectBody().isEmpty();
}
@SuppressWarnings("unused")
@XmlRootElement(name="persons")
@XmlAccessorType(XmlAccessType.FIELD)
private static | XmlContentTests |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/superclass/auditoverride/EmbeddableTest.java | {
"start": 8392,
"end": 8668
} | class ____ extends SimpleAbstractMappedSuperclass {
}
// an embeddable that should introduce 'intValue' as audited based on audit overrides locally
@Embeddable
@AuditOverride(forClass = SimpleAbstractMappedSuperclass.class, name = "intValue")
public static | SimpleEmbeddable |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java | {
"start": 21550,
"end": 36428
} | class ____ stead of SocketException.
IOException se = new SocketException("Original Exception : " + ioe);
se.initCause(ioe);
/* Change the stacktrace so that original trace is not truncated
* when printed.*/
se.setStackTrace(ioe.getStackTrace());
return se;
}
// otherwise just return the same exception.
return ioe;
}
/**
* @param datalen Length of data
* @return number of chunks for data of given size
*/
private int numberOfChunks(long datalen) {
return (int) ((datalen + chunkSize - 1)/chunkSize);
}
/**
* Sends a packet with up to maxChunks chunks of data.
*
* @param pkt buffer used for writing packet data
* @param maxChunks maximum number of chunks to send
* @param out stream to send data to
* @param transferTo use transferTo to send data
* @param throttler used for throttling data transfer bandwidth
*/
private int sendPacket(ByteBuffer pkt, int maxChunks, OutputStream out,
boolean transferTo, DataTransferThrottler throttler) throws IOException {
int dataLen = (int) Math.min(endOffset - offset,
(chunkSize * (long) maxChunks));
int numChunks = numberOfChunks(dataLen); // Number of chunks be sent in the packet
int checksumDataLen = numChunks * checksumSize;
int packetLen = dataLen + checksumDataLen + 4;
boolean lastDataPacket = offset + dataLen == endOffset && dataLen > 0;
// The packet buffer is organized as follows:
// _______HHHHCCCCD?D?D?D?
// ^ ^
// | \ checksumOff
// \ headerOff
// _ padding, since the header is variable-length
// H = header and length prefixes
// C = checksums
// D? = data, if transferTo is false.
int headerLen = writePacketHeader(pkt, dataLen, packetLen);
// Per above, the header doesn't start at the beginning of the
// buffer
int headerOff = pkt.position() - headerLen;
int checksumOff = pkt.position();
byte[] buf = pkt.array();
if (checksumSize > 0 && ris.getChecksumIn() != null) {
readChecksum(buf, checksumOff, checksumDataLen);
// write in progress that we need to use to get last checksum
if (lastDataPacket && lastChunkChecksum != null) {
int start = checksumOff + checksumDataLen - checksumSize;
byte[] updatedChecksum = lastChunkChecksum.getChecksum();
if (updatedChecksum != null) {
System.arraycopy(updatedChecksum, 0, buf, start, checksumSize);
}
}
}
int dataOff = checksumOff + checksumDataLen;
if (!transferTo) { // normal transfer
try {
ris.readDataFully(buf, dataOff, dataLen);
} catch (IOException ioe) {
if (ioe.getMessage().startsWith(EIO_ERROR)) {
throw new DiskFileCorruptException("A disk IO error occurred", ioe);
}
throw ioe;
}
if (verifyChecksum) {
verifyChecksum(buf, dataOff, dataLen, numChunks, checksumOff);
}
}
try {
if (transferTo) {
SocketOutputStream sockOut = (SocketOutputStream)out;
// First write header and checksums
sockOut.write(buf, headerOff, dataOff - headerOff);
// no need to flush since we know out is not a buffered stream
FileChannel fileCh = ((FileInputStream)ris.getDataIn()).getChannel();
LongWritable waitTime = new LongWritable();
LongWritable transferTime = new LongWritable();
fileIoProvider.transferToSocketFully(
ris.getVolumeRef().getVolume(), sockOut, fileCh, blockInPosition,
dataLen, waitTime, transferTime);
datanode.metrics.addSendDataPacketBlockedOnNetworkNanos(waitTime.get());
datanode.metrics.addSendDataPacketTransferNanos(transferTime.get());
blockInPosition += dataLen;
} else {
// normal transfer
out.write(buf, headerOff, dataOff + dataLen - headerOff);
}
} catch (IOException e) {
if (e instanceof SocketTimeoutException) {
/*
* writing to client timed out. This happens if the client reads
* part of a block and then decides not to read the rest (but leaves
* the socket open).
*
* Reporting of this case is done in DataXceiver#run
*/
LOG.warn("Sending packets timed out.", e);
} else {
/* Exception while writing to the client. Connection closure from
* the other end is mostly the case and we do not care much about
* it. But other things can go wrong, especially in transferTo(),
* which we do not want to ignore.
*
* The message parsing below should not be considered as a good
* coding example. NEVER do it to drive a program logic. NEVER.
* It was done here because the NIO throws an IOException for EPIPE.
*/
String ioem = e.getMessage();
if (ioem != null) {
/*
* If we got an EIO when reading files or transferTo the client
* socket, it's very likely caused by bad disk track or other file
* corruptions.
*/
if (ioem.startsWith(EIO_ERROR)) {
throw new DiskFileCorruptException("A disk IO error occurred", e);
}
String causeMessage = e.getCause() != null ? e.getCause().getMessage() : "";
causeMessage = causeMessage != null ? causeMessage : "";
if (!ioem.startsWith("Broken pipe")
&& !ioem.startsWith("Connection reset")
&& !causeMessage.startsWith("Broken pipe")
&& !causeMessage.startsWith("Connection reset")) {
LOG.error("BlockSender.sendChunks() exception: ", e);
datanode.getBlockScanner().markSuspectBlock(
ris.getVolumeRef().getVolume().getStorageID(), block);
}
}
}
throw ioeToSocketException(e);
}
if (throttler != null) { // rebalancing so throttle
throttler.throttle(packetLen);
}
return dataLen;
}
/**
* Read checksum into given buffer
* @param buf buffer to read the checksum into
* @param checksumOffset offset at which to write the checksum into buf
* @param checksumLen length of checksum to write
* @throws IOException on error
*/
private void readChecksum(byte[] buf, final int checksumOffset,
final int checksumLen) throws IOException {
if (checksumSize <= 0 && ris.getChecksumIn() == null) {
return;
}
try {
ris.readChecksumFully(buf, checksumOffset, checksumLen);
} catch (IOException e) {
LOG.warn(" Could not read or failed to verify checksum for data"
+ " at offset " + offset + " for block " + block, e);
ris.closeChecksumStream();
if (corruptChecksumOk) {
if (checksumLen > 0) {
// Just fill the array with zeros.
Arrays.fill(buf, checksumOffset, checksumOffset + checksumLen,
(byte) 0);
}
} else {
throw e;
}
}
}
/**
* Compute checksum for chunks and verify the checksum that is read from
* the metadata file is correct.
*
* @param buf buffer that has checksum and data
* @param dataOffset position where data is written in the buf
* @param datalen length of data
* @param numChunks number of chunks corresponding to data
* @param checksumOffset offset where checksum is written in the buf
* @throws ChecksumException on failed checksum verification
*/
public void verifyChecksum(final byte[] buf, final int dataOffset,
final int datalen, final int numChunks, final int checksumOffset)
throws ChecksumException {
int dOff = dataOffset;
int cOff = checksumOffset;
int dLeft = datalen;
for (int i = 0; i < numChunks; i++) {
checksum.reset();
int dLen = Math.min(dLeft, chunkSize);
checksum.update(buf, dOff, dLen);
if (!checksum.compare(buf, cOff)) {
long failedPos = offset + datalen - dLeft;
StringBuilder replicaInfoString = new StringBuilder();
if (replica != null) {
replicaInfoString.append(" for replica: " + replica.toString());
}
throw new ChecksumException("Checksum failed at " + failedPos
+ replicaInfoString, failedPos);
}
dLeft -= dLen;
dOff += dLen;
cOff += checksumSize;
}
}
/**
* sendBlock() is used to read block and its metadata and stream the data to
* either a client or to another datanode.
*
* @param out stream to which the block is written to
* @param baseStream optional. if non-null, <code>out</code> is assumed to
* be a wrapper over this stream. This enables optimizations for
* sending the data, e.g.
* {@link SocketOutputStream#transferToFully(FileChannel,
* long, int)}.
* @param throttler for sending data.
* @return total bytes read, including checksum data.
*/
long sendBlock(DataOutputStream out, OutputStream baseStream,
DataTransferThrottler throttler) throws IOException {
final TraceScope scope = FsTracer.get(null)
.newScope("sendBlock_" + block.getBlockId());
try {
return doSendBlock(out, baseStream, throttler);
} finally {
scope.close();
}
}
private long doSendBlock(DataOutputStream out, OutputStream baseStream,
DataTransferThrottler throttler) throws IOException {
if (out == null) {
throw new IOException( "out stream is null" );
}
initialOffset = offset;
long totalRead = 0;
OutputStream streamForSendChunks = out;
lastCacheDropOffset = initialOffset;
if (isLongRead() && ris.getDataInFd() != null) {
// Advise that this file descriptor will be accessed sequentially.
ris.dropCacheBehindReads(block.getBlockName(), 0, 0,
POSIX_FADV_SEQUENTIAL);
}
// Trigger readahead of beginning of file if configured.
manageOsCache();
final long startTime = CLIENT_TRACE_LOG.isDebugEnabled() ? System.nanoTime() : 0;
try {
int maxChunksPerPacket;
int pktBufSize = PacketHeader.PKT_MAX_HEADER_LEN;
boolean transferTo = transferToAllowed && !verifyChecksum
&& baseStream instanceof SocketOutputStream
&& ris.getDataIn() instanceof FileInputStream;
if (transferTo) {
FileChannel fileChannel =
((FileInputStream)ris.getDataIn()).getChannel();
blockInPosition = fileChannel.position();
streamForSendChunks = baseStream;
maxChunksPerPacket = numberOfChunks(TRANSFERTO_BUFFER_SIZE);
// Smaller packet size to only hold checksum when doing transferTo
pktBufSize += checksumSize * maxChunksPerPacket;
} else {
maxChunksPerPacket = Math.max(1,
numberOfChunks(IO_FILE_BUFFER_SIZE));
// Packet size includes both checksum and data
pktBufSize += (chunkSize + checksumSize) * maxChunksPerPacket;
}
ByteBuffer pktBuf = ByteBuffer.allocate(pktBufSize);
while (endOffset > offset && !Thread.currentThread().isInterrupted()) {
manageOsCache();
long len = sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks,
transferTo, throttler);
offset += len;
totalRead += len + (numberOfChunks(len) * checksumSize);
seqno++;
}
// If this thread was interrupted, then it did not send the full block.
if (!Thread.currentThread().isInterrupted()) {
try {
// send an empty packet to mark the end of the block
sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo,
throttler);
out.flush();
} catch (IOException e) { //socket error
throw ioeToSocketException(e);
}
sentEntireByteRange = true;
}
} finally {
if ((clientTraceFmt != null) && CLIENT_TRACE_LOG.isDebugEnabled()) {
final long endTime = System.nanoTime();
CLIENT_TRACE_LOG.debug(String.format(clientTraceFmt, totalRead,
initialOffset, endTime - startTime));
}
close();
}
return totalRead;
}
/**
* Manage the OS buffer cache by performing read-ahead
* and drop-behind.
*/
private void manageOsCache() throws IOException {
// We can't manage the cache for this block if we don't have a file
// descriptor to work with.
if (ris.getDataInFd() == null) {
return;
}
// Perform readahead if necessary
if ((readaheadLength > 0) && (datanode.readaheadPool != null) &&
(alwaysReadahead || isLongRead())) {
curReadahead = datanode.readaheadPool.readaheadStream(
clientTraceFmt, ris.getDataInFd(), offset, readaheadLength,
Long.MAX_VALUE, curReadahead);
}
// Drop what we've just read from cache, since we aren't
// likely to need it again
if (dropCacheBehindAllReads ||
(dropCacheBehindLargeReads && isLongRead())) {
long nextCacheDropOffset = lastCacheDropOffset + CACHE_DROP_INTERVAL_BYTES;
if (offset >= nextCacheDropOffset) {
long dropLength = offset - lastCacheDropOffset;
ris.dropCacheBehindReads(block.getBlockName(), lastCacheDropOffset,
dropLength, POSIX_FADV_DONTNEED);
lastCacheDropOffset = offset;
}
}
}
/**
* Returns true if we have done a long enough read for this block to qualify
* for the DataNode-wide cache management defaults. We avoid applying the
* cache management defaults to smaller reads because the overhead would be
* too high.
*
* Note that if the client explicitly asked for dropBehind, we will do it
* even on short reads.
*
* This is also used to determine when to invoke
* posix_fadvise(POSIX_FADV_SEQUENTIAL).
*/
private boolean isLongRead() {
return (endOffset - initialOffset) > LONG_READ_THRESHOLD_BYTES;
}
/**
* Write packet header into {@code pkt},
* return the length of the header written.
*/
private int writePacketHeader(ByteBuffer pkt, int dataLen, int packetLen) {
pkt.clear();
// both syncBlock and syncPacket are false
PacketHeader header = new PacketHeader(packetLen, offset, seqno,
(dataLen == 0), dataLen, false);
int size = header.getSerializedSize();
pkt.position(PacketHeader.PKT_MAX_HEADER_LEN - size);
header.putInBuffer(pkt);
return size;
}
boolean didSendEntireByteRange() {
return sentEntireByteRange;
}
/**
* @return the checksum type that will be used with this block transfer.
*/
DataChecksum getChecksum() {
return checksum;
}
/**
* @return the offset into the block file where the sender is currently
* reading.
*/
long getOffset() {
return offset;
}
}
| in |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/bugs/MockitoStubbedCallInAnswerTest.java | {
"start": 3689,
"end": 3765
} | interface ____ {
String doString();
int doInt();
}
| Foo |
java | resilience4j__resilience4j | resilience4j-micrometer/src/test/java/io/github/resilience4j/micrometer/tagged/TaggedRateLimiterMetricsPublisherTest.java | {
"start": 6946,
"end": 8318
} | class
____.acquirePermission();
assertThat(taggedRateLimiterMetricsPublisher.meterIdMap).containsKeys("backendC");
assertThat(taggedRateLimiterMetricsPublisher.meterIdMap.get("backendC")).hasSize(2);
Collection<Gauge> gauges = meterRegistry.get(DEFAULT_AVAILABLE_PERMISSIONS_METRIC_NAME)
.gauges();
Optional<Gauge> available = findMeterByNamesTag(gauges, oldOne.getName());
assertThat(available).isPresent();
assertThat(available.get().value())
.isEqualTo(oldOne.getMetrics().getAvailablePermissions());
RateLimiter newOne = RateLimiter.of("backendC", RateLimiterConfig.ofDefaults());
// add meters of old
taggedRateLimiterMetricsPublisher.addMetrics(meterRegistry, newOne);
// three permission call
newOne.acquirePermission(3);
assertThat(taggedRateLimiterMetricsPublisher.meterIdMap).containsKeys("backendC");
assertThat(taggedRateLimiterMetricsPublisher.meterIdMap.get("backendC")).hasSize(2);
gauges = meterRegistry.get(DEFAULT_AVAILABLE_PERMISSIONS_METRIC_NAME)
.gauges();
available = findMeterByNamesTag(gauges, newOne.getName());
assertThat(available).isPresent();
assertThat(available.get().value())
.isEqualTo(newOne.getMetrics().getAvailablePermissions());
}
} | oldOne |
java | spring-projects__spring-boot | module/spring-boot-jackson2/src/main/java/org/springframework/boot/jackson2/JsonObjectSerializer.java | {
"start": 1366,
"end": 2186
} | class ____<T> extends JsonSerializer<T> {
@Override
public final void serialize(T value, JsonGenerator jgen, SerializerProvider provider) throws IOException {
try {
jgen.writeStartObject();
serializeObject(value, jgen, provider);
jgen.writeEndObject();
}
catch (Exception ex) {
if (ex instanceof IOException ioException) {
throw ioException;
}
throw new JsonMappingException(jgen, "Object serialize error", ex);
}
}
/**
* Serialize JSON content into the value type this serializer handles.
* @param value the source value
* @param jgen the JSON generator
* @param provider the serializer provider
* @throws IOException on error
*/
protected abstract void serializeObject(T value, JsonGenerator jgen, SerializerProvider provider)
throws IOException;
}
| JsonObjectSerializer |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/annotation/SpyAnnotationTest.java | {
"start": 2453,
"end": 2821
} | class ____ {
@Spy List<String> list;
}
WithSpy withSpy = new WithSpy();
MockitoAnnotations.openMocks(withSpy);
when(withSpy.list.size()).thenReturn(3);
assertEquals(3, withSpy.list.size());
}
@Test
public void should_allow_spying_on_interfaces_when_instance_is_concrete() throws Exception {
| WithSpy |
java | google__truth | core/src/main/java/com/google/common/truth/Platform.java | {
"start": 3672,
"end": 4096
} | class ____ that we're looking up. The obfuscation prevents R8
* from detecting the usage of ActualValueInference. That in turn lets users exclude it from
* the compile-time classpath if they want. (And then *that* probably makes it easier and/or
* safer for R8 users (i.e., Android users) to exclude it from the *runtime* classpath. It
* would do no good there, anyway, since ASM won't find any . | name |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/dirty/DirtyTrackingDynamicUpdateTest.java | {
"start": 2378,
"end": 3112
} | class ____ {
@Id
private Integer id;
private String aSuper;
private String bSuper;
private String aChild;
private String bChild;
public TestEntity() {
}
public TestEntity(Integer id) {
this.id = id;
}
public String getaSuper() {
return aSuper;
}
public void setaSuper(String aSuper) {
this.aSuper = aSuper;
}
public String getbSuper() {
return bSuper;
}
public void setbSuper(String bSuper) {
this.bSuper = bSuper;
}
public String getaChild() {
return aChild;
}
public void setaChild(String aChild) {
this.aChild = aChild;
}
public String getbChild() {
return bChild;
}
public void setbChild(String bChild) {
this.bChild = bChild;
}
}
}
| TestEntity |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ser/TestRootType.java | {
"start": 3953,
"end": 7336
} | interface ____ through due to root declaration; static typing
assertEquals("[{\"b\":3}]", json);
}
/**
* Unit test to ensure that proper exception is thrown if declared
* root type is not compatible with given value instance.
*/
@Test
public void testIncompatibleRootType() throws Exception
{
ObjectMapper mapper = newJsonMapper();
SubType bean = new SubType();
// and then let's try using incompatible type
ObjectWriter w = mapper.writerFor(HashMap.class);
try {
w.writeValueAsString(bean);
fail("Should have failed due to incompatible type");
} catch (InvalidDefinitionException e) {
verifyException(e, "Incompatible types");
}
// and also with alternate output method
try {
w.writeValueAsBytes(bean);
fail("Should have failed due to incompatible type");
} catch (InvalidDefinitionException e) {
verifyException(e, "Incompatible types");
}
}
@Test
public void testJackson398() throws Exception
{
ObjectMapper mapper = newJsonMapper();
JavaType collectionType = defaultTypeFactory().constructCollectionType(ArrayList.class, BaseClass398.class);
List<TestClass398> typedList = new ArrayList<TestClass398>();
typedList.add(new TestClass398());
final String EXP = "[{\"beanClass\":\"TestRootType$TestClass398\",\"property\":\"aa\"}]";
// First simplest way:
String json = mapper.writerFor(collectionType).writeValueAsString(typedList);
assertEquals(EXP, json);
StringWriter out = new StringWriter();
mapper.writerFor(collectionType).writeValue(mapper.createGenerator(out), typedList);
assertEquals(EXP, out.toString());
}
// [JACKSON-163]
@Test
public void testRootWrapping() throws Exception
{
String json = WRAP_ROOT_MAPPER.writeValueAsString(new StringWrapper("abc"));
assertEquals("{\"StringWrapper\":{\"str\":\"abc\"}}", json);
}
/**
* Test to verify that there is support for specifying root type as primitive,
* even if wrapper value is passed (there is no way to pass primitive values as
* Objects); this to support frameworks that may pass unprocessed
* {@link java.lang.reflect.Type} from field or method.
*/
@Test
public void testIssue456WrapperPart() throws Exception
{
ObjectMapper mapper = newJsonMapper();
assertEquals("123", mapper.writerFor(Integer.TYPE).writeValueAsString(Integer.valueOf(123)));
assertEquals("456", mapper.writerFor(Long.TYPE).writeValueAsString(Long.valueOf(456L)));
}
@Test
public void testRootNameAnnotation() throws Exception
{
String json = WRAP_ROOT_MAPPER.writeValueAsString(new WithRootName());
assertEquals("{\"root\":{\"a\":3}}", json);
}
// [databind#412]
@Test
public void testRootNameWithExplicitType() throws Exception
{
TestCommandChild cmd = new TestCommandChild();
cmd.uuid = "1234";
cmd.type = 1;
ObjectWriter writer = WRAP_ROOT_MAPPER.writerFor(TestCommandParent.class);
String json = writer.writeValueAsString(cmd);
assertEquals("{\"TestCommandParent\":{\"uuid\":\"1234\",\"type\":1}}", json);
}
}
| type |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/AuthorizedProjectsResolver.java | {
"start": 819,
"end": 1075
} | class ____ implements AuthorizedProjectsResolver {
@Override
public void resolveAuthorizedProjects(ActionListener<TargetProjects> listener) {
listener.onResponse(TargetProjects.LOCAL_ONLY_FOR_CPS_DISABLED);
}
}
}
| Default |
java | google__error-prone | core/src/test/java/com/google/errorprone/refaster/testdata/template/AnyOfTemplate.java | {
"start": 956,
"end": 1182
} | class ____ {
@BeforeTemplate
boolean signumIsZero(double d) {
return Refaster.anyOf(Math.signum(d) == 0.0, 0.0 == Math.signum(d));
}
@AfterTemplate
boolean isZero(double d) {
return d == 0.0;
}
}
| AnyOfTemplate |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/hql/instantiation/DynamicInstantiationWithJoinAndGroupByAndParameterTest.java | {
"start": 4062,
"end": 4426
} | class ____ {
private final String name;
private final Long count;
public UserStatistic(UserEntity user, Long count) {
this.name = user != null ? user.getFirstname() + " " + user.getLastname() : null;
this.count = count;
}
public String getName() {
return name;
}
public Integer getCount() {
return count.intValue();
}
}
}
| UserStatistic |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/FloatsBaseTest.java | {
"start": 1431,
"end": 2278
} | class ____ {
protected static final WritableAssertionInfo INFO = someInfo();
protected Failures failures;
protected Floats floats;
protected ComparatorBasedComparisonStrategy absValueComparisonStrategy;
protected Floats floatsWithAbsValueComparisonStrategy;
@BeforeEach
public void setUp() {
failures = spy(Failures.instance());
floats = new Floats();
floats.setFailures(failures);
absValueComparisonStrategy = new ComparatorBasedComparisonStrategy(new AbsValueComparator<Float>());
floatsWithAbsValueComparisonStrategy = new Floats(absValueComparisonStrategy);
floatsWithAbsValueComparisonStrategy.failures = failures;
}
protected Float NaN() {
return floats.NaN();
}
protected Float absDiff(Float actual, Float other) {
return Floats.instance().absDiff(actual, other);
}
}
| FloatsBaseTest |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/xml/UtilNamespaceHandler.java | {
"start": 2697,
"end": 3940
} | class ____ extends AbstractSingleBeanDefinitionParser {
@Override
protected Class<?> getBeanClass(Element element) {
return PropertyPathFactoryBean.class;
}
@Override
protected void doParse(Element element, ParserContext parserContext, BeanDefinitionBuilder builder) {
String path = element.getAttribute("path");
if (!StringUtils.hasText(path)) {
parserContext.getReaderContext().error("Attribute 'path' must not be empty", element);
return;
}
int dotIndex = path.indexOf('.');
if (dotIndex == -1) {
parserContext.getReaderContext().error(
"Attribute 'path' must follow pattern 'beanName.propertyName'", element);
return;
}
String beanName = path.substring(0, dotIndex);
String propertyPath = path.substring(dotIndex + 1);
builder.addPropertyValue("targetBeanName", beanName);
builder.addPropertyValue("propertyPath", propertyPath);
}
@Override
protected String resolveId(Element element, AbstractBeanDefinition definition, ParserContext parserContext) {
String id = super.resolveId(element, definition, parserContext);
if (!StringUtils.hasText(id)) {
id = element.getAttribute("path");
}
return id;
}
}
private static | PropertyPathBeanDefinitionParser |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/client/processor/src/main/java/org/jboss/resteasy/reactive/client/processor/scanning/ClientEndpointIndexer.java | {
"start": 11828,
"end": 11935
} | class ____ extends IndexedParameter<ClientIndexedParam> {
}
public static abstract | ClientIndexedParam |
java | spring-projects__spring-boot | module/spring-boot-micrometer-tracing-opentelemetry/src/main/java/org/springframework/boot/micrometer/tracing/opentelemetry/autoconfigure/OpenTelemetryEventPublisherBeansApplicationListener.java | {
"start": 4469,
"end": 5859
} | class ____ {
static final Wrapper instance = new Wrapper();
private final MultiValueMap<ApplicationContext, EventPublishingContextWrapper> beans = new LinkedMultiValueMap<>();
private volatile @Nullable ContextStorage storageDelegate;
private Wrapper() {
}
private void addWrapper() {
ContextStorage.addWrapper(Storage::new);
}
void put(ApplicationContext applicationContext, List<EventPublishingContextWrapper> publishers) {
synchronized (this) {
this.beans.addAll(applicationContext, publishers);
this.storageDelegate = null;
}
}
void remove(ApplicationContext applicationContext) {
synchronized (this) {
this.beans.remove(applicationContext);
this.storageDelegate = null;
}
}
ContextStorage getStorageDelegate(ContextStorage parent) {
ContextStorage delegate = this.storageDelegate;
if (delegate == null) {
synchronized (this) {
delegate = this.storageDelegate;
if (delegate == null) {
delegate = parent;
for (List<EventPublishingContextWrapper> publishers : this.beans.values()) {
for (EventPublishingContextWrapper publisher : publishers) {
delegate = publisher.apply(delegate);
}
}
this.storageDelegate = delegate;
}
}
}
return delegate;
}
/**
* {@link ContextStorage} that delegates to the {@link EventPublisher} beans.
*/
| Wrapper |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/source/SystemEnvironmentPropertyMapperTests.java | {
"start": 936,
"end": 4507
} | class ____ extends AbstractPropertyMapperTests {
@Override
protected PropertyMapper getMapper() {
return SystemEnvironmentPropertyMapper.INSTANCE;
}
@Test
void mapFromStringShouldReturnBestGuess() {
assertThat(mapPropertySourceName("SERVER")).isEqualTo("server");
assertThat(mapPropertySourceName("SERVER_PORT")).isEqualTo("server.port");
assertThat(mapPropertySourceName("HOST_0")).isEqualTo("host[0]");
assertThat(mapPropertySourceName("HOST_0_1")).isEqualTo("host[0][1]");
assertThat(mapPropertySourceName("HOST_0_NAME")).isEqualTo("host[0].name");
assertThat(mapPropertySourceName("HOST_F00_NAME")).isEqualTo("host.f00.name");
assertThat(mapPropertySourceName("S-ERVER")).isEqualTo("s-erver");
}
@Test
void mapFromConfigurationShouldReturnBestGuess() {
assertThat(mapConfigurationPropertyName("server")).containsExactly("SERVER", "server");
assertThat(mapConfigurationPropertyName("server.port")).containsExactly("SERVER_PORT", "server_port");
assertThat(mapConfigurationPropertyName("host[0]")).containsExactly("HOST_0", "host_0");
assertThat(mapConfigurationPropertyName("host[0][1]")).containsExactly("HOST_0_1", "host_0_1");
assertThat(mapConfigurationPropertyName("host[0].name")).containsExactly("HOST_0_NAME", "host_0_name");
assertThat(mapConfigurationPropertyName("host.f00.name")).containsExactly("HOST_F00_NAME", "host_f00_name");
assertThat(mapConfigurationPropertyName("foo.the-bar")).containsExactly("FOO_THEBAR", "FOO_THE_BAR",
"foo_thebar", "foo_the_bar");
}
@Test
void underscoreShouldMapToEmptyString() {
ConfigurationPropertyName mapped = getMapper().map("_");
assertThat(mapped.isEmpty()).isTrue();
}
@Test
void underscoreWithWhitespaceShouldMapToEmptyString() {
ConfigurationPropertyName mapped = getMapper().map(" _");
assertThat(mapped.isEmpty()).isTrue();
}
@Test
void isAncestorOfConsidersLegacyNames() {
ConfigurationPropertyName name = ConfigurationPropertyName.of("my.spring-boot");
BiPredicate<ConfigurationPropertyName, ConfigurationPropertyName> check = getMapper().getAncestorOfCheck();
assertThat(check.test(name, ConfigurationPropertyName.adapt("MY_SPRING_BOOT_PROPERTY", '_'))).isTrue();
assertThat(check.test(name, ConfigurationPropertyName.adapt("MY_SPRINGBOOT_PROPERTY", '_'))).isTrue();
assertThat(check.test(name, ConfigurationPropertyName.adapt("MY_BOOT_PROPERTY", '_'))).isFalse();
}
@Test
void isAncestorOfWhenNonCanonicalSource() {
ConfigurationPropertyName name = ConfigurationPropertyName.adapt("my.springBoot", '.');
BiPredicate<ConfigurationPropertyName, ConfigurationPropertyName> check = getMapper().getAncestorOfCheck();
assertThat(check.test(name, ConfigurationPropertyName.of("my.spring-boot.property"))).isTrue();
assertThat(check.test(name, ConfigurationPropertyName.of("my.springboot.property"))).isTrue();
assertThat(check.test(name, ConfigurationPropertyName.of("my.boot.property"))).isFalse();
}
@Test
void isAncestorOfWhenNonCanonicalAndDashedSource() {
ConfigurationPropertyName name = ConfigurationPropertyName.adapt("my.springBoot.input-value", '.');
BiPredicate<ConfigurationPropertyName, ConfigurationPropertyName> check = getMapper().getAncestorOfCheck();
assertThat(check.test(name, ConfigurationPropertyName.of("my.spring-boot.input-value.property"))).isTrue();
assertThat(check.test(name, ConfigurationPropertyName.of("my.springboot.inputvalue.property"))).isTrue();
assertThat(check.test(name, ConfigurationPropertyName.of("my.boot.property"))).isFalse();
}
}
| SystemEnvironmentPropertyMapperTests |
java | quarkusio__quarkus | extensions/amazon-lambda/deployment/src/main/java/io/quarkus/amazon/lambda/deployment/AmazonLambdaBuildItem.java | {
"start": 107,
"end": 693
} | class ____ extends MultiBuildItem {
private final String handlerClass;
private final String name;
private final boolean streamHandler;
public AmazonLambdaBuildItem(String handlerClass, String name, boolean streamHandler) {
this.handlerClass = handlerClass;
this.name = name;
this.streamHandler = streamHandler;
}
public String getHandlerClass() {
return handlerClass;
}
public String getName() {
return name;
}
public boolean isStreamHandler() {
return streamHandler;
}
}
| AmazonLambdaBuildItem |
java | apache__camel | tooling/maven/camel-package-maven-plugin/src/main/java/org/apache/camel/maven/packaging/GenerateDataTypeTransformerMojo.java | {
"start": 2190,
"end": 2828
} | class ____ extends AbstractGeneratorMojo {
public static final DotName DATA_TYPE_ANNOTATION = DotName.createSimple("org.apache.camel.spi.DataTypeTransformer");
/**
* The project build directory
*/
@Parameter(defaultValue = "${project.build.directory}")
protected File buildDir;
@Parameter(defaultValue = "${project.basedir}/src/generated/resources")
protected File resourcesOutputDir;
@Inject
public GenerateDataTypeTransformerMojo(MavenProjectHelper projectHelper, BuildContext buildContext) {
super(projectHelper, buildContext);
}
private static | GenerateDataTypeTransformerMojo |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/ImmutableCheckerTest.java | {
"start": 72646,
"end": 73156
} | interface ____ {}
void test(I i) {
g(f(i), f(i));
}
}
""")
.doTest();
}
// regression test for b/148734874
@Test
public void immutableTypeParameter_instantiations_negative() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import com.google.errorprone.annotations.ImmutableTypeParameter;
import com.google.errorprone.annotations.Immutable;
abstract | I |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java | {
"start": 1860,
"end": 3244
} | class ____ {
private MiniDFSCluster cluster;
private Configuration config;
static int mockQueueConstructions;
static int mockQueuePuts;
private int nnPort = 0;
private void setUp(Class<?> queueClass) throws IOException {
int portRetries = 5;
Random rand = new Random();
for (; portRetries > 0; --portRetries) {
// Pick a random port in the range [30000,60000).
nnPort = 30000 + rand.nextInt(30000);
config = new Configuration();
String callQueueConfigKey = "ipc." + nnPort + ".callqueue.impl";
config.setClass(callQueueConfigKey, queueClass, BlockingQueue.class);
config.set("hadoop.security.authorization", "true");
FileSystem.setDefaultUri(config, "hdfs://localhost:" + nnPort);
try {
cluster = new MiniDFSCluster.Builder(config).nameNodePort(nnPort)
.build();
cluster.waitActive();
break;
} catch (BindException be) {
// Retry with a different port number.
}
}
if (portRetries == 0) {
// Bail if we get very unlucky with our choice of ports.
fail("Failed to pick an ephemeral port for the NameNode RPC server.");
}
}
@AfterEach
public void tearDown() throws IOException {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
@SuppressWarnings("serial")
public static | TestRefreshCallQueue |
java | jhy__jsoup | src/main/java/org/jsoup/parser/Token.java | {
"start": 12467,
"end": 13999
} | class ____ extends Token {
final TokenData data = new TokenData();
Character() {
super(TokenType.Character);
}
/** Deep copy */
Character(Character source) {
super(TokenType.Character);
this.startPos = source.startPos;
this.endPos = source.endPos;
this.data.set(source.data.value());
}
@Override
Token reset() {
super.reset();
data.reset();
return this;
}
Character data(String str) {
data.set(str);
return this;
}
Character append(String str) {
data.append(str);
return this;
}
String getData() {
return data.value();
}
@Override
public String toString() {
return getData();
}
/**
Normalize null chars in the data. If replace is true, replaces with the replacement char; if false, removes.
*/
public void normalizeNulls(boolean replace) {
String data = this.data.value();
if (data.indexOf(TokeniserState.nullChar) == -1) return;
data = (replace ?
data.replace(TokeniserState.nullChar, Tokeniser.replacementChar) :
data.replace(nullString, ""));
this.data.set(data);
}
private static final String nullString = String.valueOf(TokeniserState.nullChar);
}
final static | Character |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/UnresolvedPathException.java | {
"start": 1191,
"end": 2765
} | class ____ extends UnresolvedLinkException {
private static final long serialVersionUID = 1L;
private String path; // The path containing the link
private String preceding; // The path part preceding the link
private String remainder; // The path part following the link
private String linkTarget; // The link's target
/**
* Used by RemoteException to instantiate an UnresolvedPathException.
*/
public UnresolvedPathException(String msg) {
super(msg);
}
public UnresolvedPathException(String path, String preceding,
String remainder, String linkTarget) {
this.path = path;
this.preceding = preceding;
this.remainder = remainder;
this.linkTarget = linkTarget;
}
/**
* Return a path with the link resolved with the target.
*/
public Path getResolvedPath() {
// If the path is absolute we cam throw out the preceding part and
// just append the remainder to the target, otherwise append each
// piece to resolve the link in path.
boolean noRemainder = (remainder == null || "".equals(remainder));
Path target = new Path(linkTarget);
if (target.isUriPathAbsolute()) {
return noRemainder ? target : new Path(target, remainder);
} else {
return noRemainder
? new Path(preceding, target)
: new Path(new Path(preceding, linkTarget), remainder);
}
}
@Override
public String getMessage() {
String msg = super.getMessage();
if (msg != null) {
return msg;
}
return getResolvedPath().toString();
}
}
| UnresolvedPathException |
java | processing__processing4 | java/src/processing/mode/java/runner/Runner.java | {
"start": 18033,
"end": 28093
} | class ____, but it was
// removed for 3.0a6 because it would break the args passed to sketches.
params.append(PApplet.ARGS_SKETCH_FOLDER + "=" + build.getSketchPath());
/*
if (Toolkit.zoom(100) >= 200) { // Use 100 to bypass possible rounding in zoom()
params.append(PApplet.ARGS_DENSITY + "=2");
}
*/
// Using this causes behavior to be different between exported
// applications and when run from the PDE. Turning it off for 4.2
// so there's a path for debugging all of this. [fry 230218]
/*
if (Platform.isWindows()) {
// Pass the DPI setting to the app to avoid using the helper app.
int dpi = Toolkit.getDefaultToolkit().getScreenResolution();
int uiScale = PApplet.constrain(dpi / 96, 1, 2);
params.append(PApplet.ARGS_UI_SCALE + "=" + uiScale);
}
*/
params.append(build.getSketchClassName());
}
// Add command-line arguments to be given to the sketch itself
if (args != null) {
params.append(args);
}
// Pass back the whole list
return params;
}
protected void launchJava(final String[] args) {
new Thread(() -> {
// PApplet.println("java starting");
vmReturnedError = false;
process = PApplet.exec(args);
try {
// PApplet.println("java waiting");
int result = process.waitFor();
// PApplet.println("java done waiting");
if (result != 0) {
String[] errorStrings = PApplet.loadStrings(process.getErrorStream());
String[] inputStrings = PApplet.loadStrings(process.getInputStream());
// PApplet.println("launchJava stderr:");
// PApplet.println(errorStrings);
// PApplet.println("launchJava stdout:");
PApplet.printArray(inputStrings);
if (errorStrings != null && errorStrings.length > 1) {
if (errorStrings[0].contains("Invalid maximum heap size")) {
Messages.showWarning("Way Too High",
"Please lower the value for \u201Cmaximum available memory\u201D in the\n" +
"Preferences window. For more information, read Help \u2192 Troubleshooting.", null);
} else {
for (String err : errorStrings) {
sketchErr.println(err);
}
sketchErr.println("Using startup command: " + PApplet.join(args, " "));
}
} else {
//exc.printStackTrace();
sketchErr.println("Could not run the sketch (Target VM failed to initialize).");
if (Preferences.getBoolean("run.options.memory")) {
// Only mention this if they've even altered the memory setup
sketchErr.println("Make sure that you haven't set the maximum available memory too high.");
}
sketchErr.println("For more information, read Help \u2192 Troubleshooting.");
}
// changing this to separate editor and listener [091124]
//if (editor != null) {
listener.statusError("Could not run the sketch.");
vmReturnedError = true;
//}
// return null;
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}).start();
}
/**
* Generate the trace.
* Enable events, start thread to display events,
* start threads to forward remote error and output streams,
* resume the remote VM, wait for the final event, and shutdown.
*/
protected void generateTrace() {
//vm.setDebugTraceMode(debugTraceMode);
// vm.setDebugTraceMode(VirtualMachine.TRACE_ALL);
// vm.setDebugTraceMode(VirtualMachine.TRACE_NONE); // formerly, seems to have no effect
try {
// Calling this seems to set something internally to make the
// Eclipse JDI wake up. Without it, an ObjectCollectedException
// is thrown on excReq.enable(). No idea why this works,
// but at least exception handling has returned. (Suspect that it may
// block until all or at least some threads are available, meaning
// that the app has launched and we have legit objects to talk to).
vm.allThreads();
// The bug may not have been noticed because the test suite waits for
// a thread to be available, and queries it by calling allThreads().
// See org.eclipse.debug.jdi.tests.AbstractJDITest for the example.
EventRequestManager mgr = vm.eventRequestManager();
// get only the uncaught exceptions
ExceptionRequest excReq = mgr.createExceptionRequest(null, false, true);
// this version reports all exceptions, caught or uncaught
// suspend so we can step
excReq.setSuspendPolicy(EventRequest.SUSPEND_ALL);
excReq.enable();
} catch (VMDisconnectedException ignore) {
return;
}
Thread eventThread = new Thread(() -> {
try {
boolean connected = true;
while (connected) {
EventQueue eventQueue = vm.eventQueue();
// remove() blocks until event(s) available
EventSet eventSet = eventQueue.remove();
// listener.vmEvent(eventSet);
for (Event event : eventSet) {
// System.out.println("EventThread.handleEvent -> " + event);
if (event instanceof VMStartEvent) {
vm.resume();
} else if (event instanceof ExceptionEvent) {
// for (ThreadReference thread : vm.allThreads()) {
// System.out.println("thread : " + thread);
//// thread.suspend();
// }
exceptionEvent((ExceptionEvent) event);
} else if (event instanceof VMDisconnectEvent) {
connected = false;
}
}
}
// } catch (VMDisconnectedException e) {
// Logger.getLogger(VMEventReader.class.getName()).log(Level.INFO, "VMEventReader quit on VM disconnect");
} catch (Exception e) {
System.err.println("crashed in event thread due to " + e.getMessage());
// Logger.getLogger(VMEventReader.class.getName()).log(Level.SEVERE, "VMEventReader quit", e);
e.printStackTrace();
}
});
eventThread.start();
errThread =
new MessageSiphon(process.getErrorStream(), this).getThread();
outThread = new StreamRedirectThread("JVM stdout Reader",
process.getInputStream(),
sketchOut);
errThread.start();
outThread.start();
// Shutdown begins when event thread terminates
try {
if (eventThread != null) eventThread.join(); // is this the problem?
// System.out.println("in here");
// Bug #852 tracked to this next line in the code.
// https://download.processing.org/bugzilla/852.html
errThread.join(); // Make sure output is forwarded
// System.out.println("and then");
outThread.join(); // before we exit
// System.out.println("finished join for errThread and outThread");
// At this point, disable the run button.
// This happens when the sketch is exited by hitting ESC,
// or the user manually closes the sketch window.
// TODO this should be handled better, should it not?
if (editor != null) {
java.awt.EventQueue.invokeLater(() -> editor.onRunnerExiting(Runner.this));
}
} catch (InterruptedException exc) {
// we don't interrupt
}
//System.out.println("and leaving");
}
protected Connector findConnector(String connectorName) {
List<Connector> connectors =
com.sun.jdi.Bootstrap.virtualMachineManager().allConnectors();
// // debug: code to list available connectors
// Iterator iter2 = connectors.iterator();
// while (iter2.hasNext()) {
// Connector connector = (Connector)iter2.next();
// System.out.println("connector name is " + connector.name());
// }
for (Connector connector : connectors) {
if (connector.name().equals(connectorName)) {
return connector;
}
}
Messages.showError("Compiler Error",
"findConnector() failed to find " +
connectorName + " inside Runner", null);
return null; // Not reachable
}
public void exceptionEvent(ExceptionEvent event) {
ObjectReference or = event.exception();
ReferenceType rt = or.referenceType();
String exceptionName = rt.name();
//Field messageField = Throwable.class.getField("detailMessage");
Field messageField = rt.fieldByName("detailMessage");
// System.out.println("field " + messageField);
Value messageValue = or.getValue(messageField);
// System.out.println("mess val " + messageValue);
//"java.lang.ArrayIndexOutOfBoundsException"
int last = exceptionName.lastIndexOf('.');
String message = exceptionName.substring(last + 1);
if (messageValue != null) {
String messageStr = messageValue.toString();
if (messageStr.startsWith("\"")) {
messageStr = messageStr.substring(1, messageStr.length() - 1);
}
message += ": " + messageStr;
}
// System.out.println("mess type " + messageValue.type());
//StringReference messageReference = (StringReference) messageValue.type();
// First just report the exception and its placement
reportException(message, or, event.thread());
// Then try to pretty it up with a better message
handleCommonErrors(exceptionName, message, listener, sketchErr);
if (editor != null) {
java.awt.EventQueue.invokeLater(() -> editor.onRunnerExiting(Runner.this));
}
}
/**
* Provide more useful explanations of common error messages, perhaps with
* a short message in the status area, and (if necessary) a longer message
* in the console.
*
* @param exceptionClass Class name causing the error (with full package name)
* @param message The message from the exception
* @param listener The Editor or command line | name |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/batch/BatchEntityOneToManyTest.java | {
"start": 4066,
"end": 4322
} | class ____ {
@Id
Long id;
String name;
public Product() {
}
public Product(Long id, String name) {
this.id = id;
this.name = name;
}
public Long getId() {
return id;
}
public String getName() {
return name;
}
}
}
| Product |
java | apache__dubbo | dubbo-compatible/src/test/java/org/apache/dubbo/config/ApplicationConfigTest.java | {
"start": 1801,
"end": 7455
} | class ____ {
@Test
void testName() {
ApplicationConfig application = new ApplicationConfig();
application.setName("app");
assertThat(application.getName(), equalTo("app"));
application = new ApplicationConfig("app2");
assertThat(application.getName(), equalTo("app2"));
Map<String, String> parameters = new HashMap<String, String>();
ApplicationConfig.appendParameters(parameters, application);
assertThat(parameters, hasEntry(APPLICATION_KEY, "app2"));
}
@Test
void testVersion() {
ApplicationConfig application = new ApplicationConfig("app");
application.setVersion("1.0.0");
assertThat(application.getVersion(), equalTo("1.0.0"));
Map<String, String> parameters = new HashMap<String, String>();
ApplicationConfig.appendParameters(parameters, application);
assertThat(parameters, hasEntry("application.version", "1.0.0"));
}
@Test
void testOwner() {
ApplicationConfig application = new ApplicationConfig("app");
application.setOwner("owner");
assertThat(application.getOwner(), equalTo("owner"));
}
@Test
void testOrganization() {
ApplicationConfig application = new ApplicationConfig("app");
application.setOrganization("org");
assertThat(application.getOrganization(), equalTo("org"));
}
@Test
void testArchitecture() {
ApplicationConfig application = new ApplicationConfig("app");
application.setArchitecture("arch");
assertThat(application.getArchitecture(), equalTo("arch"));
}
@Test
void testEnvironment1() {
ApplicationConfig application = new ApplicationConfig("app");
application.setEnvironment("develop");
assertThat(application.getEnvironment(), equalTo("develop"));
application.setEnvironment("test");
assertThat(application.getEnvironment(), equalTo("test"));
application.setEnvironment("product");
assertThat(application.getEnvironment(), equalTo("product"));
}
@Test
void testEnvironment2() {
Assertions.assertThrows(IllegalStateException.class, () -> {
ApplicationConfig application = new ApplicationConfig("app");
application.setEnvironment("illegal-env");
});
}
@Test
void testRegistry() {
ApplicationConfig application = new ApplicationConfig("app");
RegistryConfig registry = new RegistryConfig();
application.setRegistry(registry);
assertThat(application.getRegistry(), sameInstance(registry));
application.setRegistries(Collections.singletonList(registry));
assertThat(application.getRegistries(), contains(registry));
assertThat(application.getRegistries(), hasSize(1));
}
@Test
void testMonitor() {
ApplicationConfig application = new ApplicationConfig("app");
application.setMonitor(new MonitorConfig("monitor-addr"));
assertThat(application.getMonitor().getAddress(), equalTo("monitor-addr"));
application.setMonitor("monitor-addr");
assertThat(application.getMonitor().getAddress(), equalTo("monitor-addr"));
}
@Test
void testLogger() {
ApplicationConfig application = new ApplicationConfig("app");
application.setLogger("log4j2");
assertThat(application.getLogger(), equalTo("log4j2"));
}
@Test
void testDefault() {
ApplicationConfig application = new ApplicationConfig("app");
application.setDefault(true);
assertThat(application.isDefault(), is(true));
}
@Test
void testDumpDirectory() {
ApplicationConfig application = new ApplicationConfig("app");
application.setDumpDirectory("/dump");
assertThat(application.getDumpDirectory(), equalTo("/dump"));
Map<String, String> parameters = new HashMap<String, String>();
ApplicationConfig.appendParameters(parameters, application);
assertThat(parameters, hasEntry(DUMP_DIRECTORY, "/dump"));
}
@Test
void testQosEnable() {
ApplicationConfig application = new ApplicationConfig("app");
application.setQosEnable(true);
assertThat(application.getQosEnable(), is(true));
Map<String, String> parameters = new HashMap<String, String>();
ApplicationConfig.appendParameters(parameters, application);
assertThat(parameters, hasEntry(QOS_ENABLE, "true"));
}
@Test
void testQosPort() {
ApplicationConfig application = new ApplicationConfig("app");
application.setQosPort(8080);
assertThat(application.getQosPort(), equalTo(8080));
}
@Test
void testQosAcceptForeignIp() {
ApplicationConfig application = new ApplicationConfig("app");
application.setQosAcceptForeignIp(true);
assertThat(application.getQosAcceptForeignIp(), is(true));
Map<String, String> parameters = new HashMap<String, String>();
ApplicationConfig.appendParameters(parameters, application);
assertThat(parameters, hasEntry(ACCEPT_FOREIGN_IP, "true"));
}
@Test
void testParameters() {
ApplicationConfig application = new ApplicationConfig("app");
application.setQosAcceptForeignIp(true);
Map<String, String> parameters = new HashMap<String, String>();
parameters.put("k1", "v1");
ApplicationConfig.appendParameters(parameters, application);
assertThat(parameters, hasEntry("k1", "v1"));
assertThat(parameters, hasEntry(ACCEPT_FOREIGN_IP, "true"));
}
}
| ApplicationConfigTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/criteria/internal/hhh13058/Task.java | {
"start": 511,
"end": 1156
} | class ____ {
@Id
@GeneratedValue
Long id;
@ManyToOne(fetch = FetchType.LAZY)
Patient patient;
String description;
public Task() {
}
public Task(Patient patient) {
this.patient = patient;
}
@Override
public boolean equals(Object o) {
if ( this == o ) {
return true;
}
if ( o == null || getClass() != o.getClass() ) {
return false;
}
Task task = (Task) o;
return id.equals( task.id );
}
@Override
public int hashCode() {
return Objects.hash( id );
}
@Override
public String toString() {
return String.format( "Task(id: %d; description: %s)", id, description == null ? "null" : description );
}
}
| Task |
java | apache__camel | components/camel-irc/src/generated/java/org/apache/camel/component/irc/IrcEndpointConfigurer.java | {
"start": 730,
"end": 10181
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
IrcEndpoint target = (IrcEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autorejoin":
case "autoRejoin": target.getConfiguration().setAutoRejoin(property(camelContext, boolean.class, value)); return true;
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "channels": target.getConfiguration().setChannels(property(camelContext, java.lang.String.class, value)); return true;
case "colors": target.getConfiguration().setColors(property(camelContext, boolean.class, value)); return true;
case "commandtimeout":
case "commandTimeout": target.getConfiguration().setCommandTimeout(property(camelContext, long.class, value)); return true;
case "exceptionhandler":
case "exceptionHandler": target.setExceptionHandler(property(camelContext, org.apache.camel.spi.ExceptionHandler.class, value)); return true;
case "exchangepattern":
case "exchangePattern": target.setExchangePattern(property(camelContext, org.apache.camel.ExchangePattern.class, value)); return true;
case "keys": target.getConfiguration().setKeys(property(camelContext, java.lang.String.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "namesonjoin":
case "namesOnJoin": target.getConfiguration().setNamesOnJoin(property(camelContext, boolean.class, value)); return true;
case "nickpassword":
case "nickPassword": target.getConfiguration().setNickPassword(property(camelContext, java.lang.String.class, value)); return true;
case "nickname": target.getConfiguration().setNickname(property(camelContext, java.lang.String.class, value)); return true;
case "onjoin":
case "onJoin": target.getConfiguration().setOnJoin(property(camelContext, boolean.class, value)); return true;
case "onkick":
case "onKick": target.getConfiguration().setOnKick(property(camelContext, boolean.class, value)); return true;
case "onmode":
case "onMode": target.getConfiguration().setOnMode(property(camelContext, boolean.class, value)); return true;
case "onnick":
case "onNick": target.getConfiguration().setOnNick(property(camelContext, boolean.class, value)); return true;
case "onpart":
case "onPart": target.getConfiguration().setOnPart(property(camelContext, boolean.class, value)); return true;
case "onprivmsg":
case "onPrivmsg": target.getConfiguration().setOnPrivmsg(property(camelContext, boolean.class, value)); return true;
case "onquit":
case "onQuit": target.getConfiguration().setOnQuit(property(camelContext, boolean.class, value)); return true;
case "onreply":
case "onReply": target.getConfiguration().setOnReply(property(camelContext, boolean.class, value)); return true;
case "ontopic":
case "onTopic": target.getConfiguration().setOnTopic(property(camelContext, boolean.class, value)); return true;
case "password": target.getConfiguration().setPassword(property(camelContext, java.lang.String.class, value)); return true;
case "persistent": target.getConfiguration().setPersistent(property(camelContext, boolean.class, value)); return true;
case "realname": target.getConfiguration().setRealname(property(camelContext, java.lang.String.class, value)); return true;
case "sslcontextparameters":
case "sslContextParameters": target.getConfiguration().setSslContextParameters(property(camelContext, org.apache.camel.support.jsse.SSLContextParameters.class, value)); return true;
case "trustmanager":
case "trustManager": target.getConfiguration().setTrustManager(property(camelContext, org.schwering.irc.lib.ssl.SSLTrustManager.class, value)); return true;
case "username": target.getConfiguration().setUsername(property(camelContext, java.lang.String.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "autorejoin":
case "autoRejoin": return boolean.class;
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "channels": return java.lang.String.class;
case "colors": return boolean.class;
case "commandtimeout":
case "commandTimeout": return long.class;
case "exceptionhandler":
case "exceptionHandler": return org.apache.camel.spi.ExceptionHandler.class;
case "exchangepattern":
case "exchangePattern": return org.apache.camel.ExchangePattern.class;
case "keys": return java.lang.String.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "namesonjoin":
case "namesOnJoin": return boolean.class;
case "nickpassword":
case "nickPassword": return java.lang.String.class;
case "nickname": return java.lang.String.class;
case "onjoin":
case "onJoin": return boolean.class;
case "onkick":
case "onKick": return boolean.class;
case "onmode":
case "onMode": return boolean.class;
case "onnick":
case "onNick": return boolean.class;
case "onpart":
case "onPart": return boolean.class;
case "onprivmsg":
case "onPrivmsg": return boolean.class;
case "onquit":
case "onQuit": return boolean.class;
case "onreply":
case "onReply": return boolean.class;
case "ontopic":
case "onTopic": return boolean.class;
case "password": return java.lang.String.class;
case "persistent": return boolean.class;
case "realname": return java.lang.String.class;
case "sslcontextparameters":
case "sslContextParameters": return org.apache.camel.support.jsse.SSLContextParameters.class;
case "trustmanager":
case "trustManager": return org.schwering.irc.lib.ssl.SSLTrustManager.class;
case "username": return java.lang.String.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
IrcEndpoint target = (IrcEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autorejoin":
case "autoRejoin": return target.getConfiguration().isAutoRejoin();
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "channels": return target.getConfiguration().getChannels();
case "colors": return target.getConfiguration().isColors();
case "commandtimeout":
case "commandTimeout": return target.getConfiguration().getCommandTimeout();
case "exceptionhandler":
case "exceptionHandler": return target.getExceptionHandler();
case "exchangepattern":
case "exchangePattern": return target.getExchangePattern();
case "keys": return target.getConfiguration().getKeys();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "namesonjoin":
case "namesOnJoin": return target.getConfiguration().isNamesOnJoin();
case "nickpassword":
case "nickPassword": return target.getConfiguration().getNickPassword();
case "nickname": return target.getConfiguration().getNickname();
case "onjoin":
case "onJoin": return target.getConfiguration().isOnJoin();
case "onkick":
case "onKick": return target.getConfiguration().isOnKick();
case "onmode":
case "onMode": return target.getConfiguration().isOnMode();
case "onnick":
case "onNick": return target.getConfiguration().isOnNick();
case "onpart":
case "onPart": return target.getConfiguration().isOnPart();
case "onprivmsg":
case "onPrivmsg": return target.getConfiguration().isOnPrivmsg();
case "onquit":
case "onQuit": return target.getConfiguration().isOnQuit();
case "onreply":
case "onReply": return target.getConfiguration().isOnReply();
case "ontopic":
case "onTopic": return target.getConfiguration().isOnTopic();
case "password": return target.getConfiguration().getPassword();
case "persistent": return target.getConfiguration().isPersistent();
case "realname": return target.getConfiguration().getRealname();
case "sslcontextparameters":
case "sslContextParameters": return target.getConfiguration().getSslContextParameters();
case "trustmanager":
case "trustManager": return target.getConfiguration().getTrustManager();
case "username": return target.getConfiguration().getUsername();
default: return null;
}
}
}
| IrcEndpointConfigurer |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/DatabindContext.java | {
"start": 9954,
"end": 19663
} | class ____:
if (vld != Validity.ALLOWED) {
if (ptv.validateSubType(this, baseType, subType) != Validity.ALLOWED) {
return _throwSubtypeClassNotAllowed(baseType, subClass, ptv);
}
}
return subType;
}
protected <T> T _throwNotASubtype(JavaType baseType, String subType) throws DatabindException {
throw invalidTypeIdException(baseType, subType, "Not a subtype");
}
protected <T> T _throwSubtypeNameNotAllowed(JavaType baseType, String subType,
PolymorphicTypeValidator ptv)
throws DatabindException
{
throw invalidTypeIdException(baseType, subType,
"Configured `PolymorphicTypeValidator` (of type "+ClassUtil.classNameOf(ptv)+") denied resolution");
}
protected <T> T _throwSubtypeClassNotAllowed(JavaType baseType, String subType,
PolymorphicTypeValidator ptv)
throws DatabindException
{
throw invalidTypeIdException(baseType, subType,
"Configured `PolymorphicTypeValidator` (of type "+ClassUtil.classNameOf(ptv)+") denied resolution");
}
/**
* Helper method for constructing exception to indicate that given type id
* could not be resolved to a valid subtype of specified base type.
* Most commonly called during polymorphic deserialization.
*<p>
* Note that most of the time this method should NOT be called directly: instead,
* method <code>handleUnknownTypeId()</code> should be called which will call this method
* if necessary.
*/
protected abstract DatabindException invalidTypeIdException(JavaType baseType, String typeId,
String extraDesc);
public abstract TypeFactory getTypeFactory();
/*
/**********************************************************************
/* Annotation, BeanDescription introspection
/**********************************************************************
*/
/**
* Convenience method for doing full "for serialization or deserialization"
* introspection of specified type; results may be cached for duration (lifespan)
* of this context as well.
*/
public final BeanDescription introspectBeanDescription(JavaType type) {
return introspectBeanDescription(type, introspectClassAnnotations(type));
}
public abstract BeanDescription introspectBeanDescription(JavaType type,
AnnotatedClass classDef);
public BeanDescription.Supplier lazyIntrospectBeanDescription(JavaType type) {
return new BeanDescription.LazySupplier(getConfig(), type) {
@Override
protected BeanDescription _construct(JavaType forType, AnnotatedClass ac) {
// System.out.println("lazyIntrospectBeanDescription.beanDesc("+forType+")");
return introspectBeanDescription(forType);
}
@Override
protected AnnotatedClass _introspect(JavaType forType) {
// System.out.println("lazyIntrospectBeanDescription.annotatedClass("+forType+")");
return introspectClassAnnotations(forType);
}
};
}
public AnnotatedClass introspectClassAnnotations(JavaType type) {
return classIntrospector().introspectClassAnnotations(type);
}
public AnnotatedClass introspectDirectClassAnnotations(JavaType type) {
return classIntrospector().introspectDirectClassAnnotations(type);
}
public AnnotatedClass introspectClassAnnotations(Class<?> rawType) {
return introspectClassAnnotations(constructType(rawType));
}
protected abstract ClassIntrospector classIntrospector();
/*
/**********************************************************************
/* Helper object construction
/**********************************************************************
*/
public ObjectIdGenerator<?> objectIdGeneratorInstance(Annotated annotated,
ObjectIdInfo objectIdInfo)
{
Class<?> implClass = objectIdInfo.getGeneratorType();
final MapperConfig<?> config = getConfig();
HandlerInstantiator hi = config.getHandlerInstantiator();
ObjectIdGenerator<?> gen = (hi == null) ? null : hi.objectIdGeneratorInstance(config, annotated, implClass);
if (gen == null) {
gen = (ObjectIdGenerator<?>) ClassUtil.createInstance(implClass,
config.canOverrideAccessModifiers());
}
return gen.forScope(objectIdInfo.getScope());
}
public ObjectIdResolver objectIdResolverInstance(Annotated annotated, ObjectIdInfo objectIdInfo)
{
Class<? extends ObjectIdResolver> implClass = objectIdInfo.getResolverType();
final MapperConfig<?> config = getConfig();
HandlerInstantiator hi = config.getHandlerInstantiator();
ObjectIdResolver resolver = (hi == null) ? null : hi.resolverIdGeneratorInstance(config, annotated, implClass);
if (resolver == null) {
resolver = ClassUtil.createInstance(implClass, config.canOverrideAccessModifiers());
}
return resolver;
}
/**
* Helper method to use to construct a {@link Converter}, given a definition
* that may be either actual converter instance, or Class for instantiating one.
*/
@SuppressWarnings("unchecked")
public Converter<Object,Object> converterInstance(Annotated annotated,
Object converterDef)
{
if (converterDef == null) {
return null;
}
if (converterDef instanceof Converter<?,?>) {
return (Converter<Object,Object>) converterDef;
}
if (!(converterDef instanceof Class)) {
throw new IllegalStateException("AnnotationIntrospector returned Converter definition of type "
+converterDef.getClass().getName()+"; expected type Converter or Class<Converter> instead");
}
Class<?> converterClass = (Class<?>)converterDef;
// there are some known "no class" markers to consider too:
if (converterClass == Converter.None.class || ClassUtil.isBogusClass(converterClass)) {
return null;
}
if (!Converter.class.isAssignableFrom(converterClass)) {
throw new IllegalStateException("AnnotationIntrospector returned Class "
+converterClass.getName()+"; expected Class<Converter>");
}
final MapperConfig<?> config = getConfig();
HandlerInstantiator hi = config.getHandlerInstantiator();
Converter<?,?> conv = (hi == null) ? null : hi.converterInstance(config, annotated, converterClass);
if (conv == null) {
conv = (Converter<?,?>) ClassUtil.createInstance(converterClass,
config.canOverrideAccessModifiers());
}
return (Converter<Object,Object>) conv;
}
/*
/**********************************************************************
/* Misc config access
/**********************************************************************
*/
public abstract PropertyName findRootName(JavaType rootType);
public abstract PropertyName findRootName(Class<?> rawRootType);
/*
/**********************************************************************
/* Error reporting
/**********************************************************************
*/
/**
* Helper method called to indicate a generic problem that stems from type
* definition(s), not input data, or input/output state; typically this
* means throwing a {@link tools.jackson.databind.exc.InvalidDefinitionException}.
*/
public abstract <T> T reportBadDefinition(JavaType type, String msg)
throws DatabindException;
public <T> T reportBadDefinition(Class<?> type, String msg)
throws DatabindException
{
return reportBadDefinition(constructType(type), msg);
}
public abstract <T> T reportBadTypeDefinition(BeanDescription bean,
String msg, Object... msgArgs)
throws DatabindException;
public <T> T reportBadTypeDefinition(BeanDescription.Supplier beanDescRef,
String msg, Object... msgArgs)
throws DatabindException {
return reportBadTypeDefinition(beanDescRef.get(), msg, msgArgs);
}
/*
/**********************************************************************
/* Helper methods
/**********************************************************************
*/
protected final String _format(String msg, Object... msgArgs) {
if (msgArgs.length > 0) {
return String.format(msg, msgArgs);
}
return msg;
}
protected final String _truncate(String desc) {
if (desc == null) {
return "";
}
if (desc.length() <= MAX_ERROR_STR_LEN) {
return desc;
}
return desc.substring(0, MAX_ERROR_STR_LEN) + "]...[" + desc.substring(desc.length() - MAX_ERROR_STR_LEN);
}
protected String _quotedString(String desc) {
if (desc == null) {
return "[N/A]";
}
// !!! should we quote it? (in case there are control chars, linefeeds)
return String.format("\"%s\"", _truncate(desc));
}
protected String _colonConcat(String msgBase, String extra) {
if (extra == null) {
return msgBase;
}
return msgBase + ": " + extra;
}
protected String _desc(String desc) {
if (desc == null) {
return "[N/A]";
}
// !!! should we quote it? (in case there are control chars, linefeeds)
return _truncate(desc);
}
}
| acceptable |
java | spring-projects__spring-boot | module/spring-boot-jersey/src/test/java/org/springframework/boot/jersey/autoconfigure/JerseyAutoConfigurationTests.java | {
"start": 6157,
"end": 6389
} | class ____ {
@Bean
FilterRegistrationBean<RequestContextFilter> customRequestContextFilterRegistration() {
return new FilterRegistrationBean<>(new RequestContextFilter());
}
}
}
| RequestContextFilterRegistrationConfiguration |
java | quarkusio__quarkus | extensions/smallrye-metrics/deployment/src/main/java/io/quarkus/smallrye/metrics/deployment/jandex/JandexAnnotationInfoAdapter.java | {
"start": 327,
"end": 1171
} | class ____ implements AnnotationInfoAdapter<AnnotationInstance> {
private final IndexView indexView;
public JandexAnnotationInfoAdapter(IndexView indexView) {
this.indexView = indexView;
}
@Override
public AnnotationInfo convert(AnnotationInstance input) {
return new RawAnnotationInfo(
input.valueWithDefault(indexView, "name").asString(),
input.valueWithDefault(indexView, "absolute").asBoolean(),
input.valueWithDefault(indexView, "tags").asStringArray(),
input.valueWithDefault(indexView, "unit").asString(),
input.valueWithDefault(indexView, "description").asString(),
input.valueWithDefault(indexView, "displayName").asString(),
input.name().toString());
}
}
| JandexAnnotationInfoAdapter |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/common/time/DateTimeParser.java | {
"start": 718,
"end": 1441
} | interface ____ {
ZoneId getZone();
Locale getLocale();
DateTimeParser withZone(ZoneId zone);
DateTimeParser withLocale(Locale locale);
/**
* Parses the specified string.
* <p>
* The pattern must fully match, using the whole string.
* If the string cannot be fully parsed, {@link DateTimeParseException} is thrown.
* @throws DateTimeParseException The string could not be fully parsed
*/
TemporalAccessor parse(CharSequence str);
/**
* Try to parse the specified string.
* <p>
* The pattern must fully match, using the whole string. It must not throw exceptions if parsing fails.
*/
ParseResult tryParse(CharSequence str);
}
| DateTimeParser |
java | google__auto | value/src/main/java/com/google/auto/value/AutoOneOf.java | {
"start": 1314,
"end": 2308
} | enum ____ {STRING, INTEGER}
*
* public abstract Kind getKind();
*
* public abstract String string();
* public abstract int integer();
*
* public static StringOrInteger ofString(String s) {
* return AutoOneOf_StringOrInteger.string(s);
* }
*
* public static StringOrInteger ofInteger(int i) {
* return AutoOneOf_StringOrInteger.integer(i);
* }
* }
*
* String client(StringOrInteger stringOrInteger) {
* switch (stringOrInteger.getKind()) {
* case STRING:
* return "the string '" + stringOrInteger.string() + "'";
* case INTEGER:
* return "the integer " + stringOrInteger.integer();
* }
* throw new AssertionError();
* }
* }</pre>
*
* <p>{@code @AutoOneOf} is explained in more detail in the <a
* href="https://github.com/google/auto/blob/main/value/userguide/howto.md#oneof">user guide</a>.
*
* @author Chris Nokleberg
* @author Éamonn McManus
*/
@Retention(RetentionPolicy.CLASS)
@Target(ElementType.TYPE)
public @ | Kind |
java | apache__hadoop | hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/util/FSUtils.java | {
"start": 1064,
"end": 2673
} | class ____ {
private static final String OVERFLOW_ERROR_HINT =
FSExceptionMessages.TOO_MANY_BYTES_FOR_DEST_BUFFER
+ ": request length = %s, with offset = %s, buffer capacity = %s";
private FSUtils() {
}
public static void checkReadParameters(byte[] buffer, int offset, int length) {
Preconditions.checkArgument(buffer != null, "Null buffer");
if (offset < 0 || offset > buffer.length) {
throw new IndexOutOfBoundsException(
String.format("offset: %s is out of range [%s, %s]", offset, 0, buffer.length));
}
Preconditions.checkArgument(length >= 0, "length: %s is negative", length);
if (buffer.length < offset + length) {
throw new IndexOutOfBoundsException(
String.format(OVERFLOW_ERROR_HINT, length, offset, (buffer.length - offset)));
}
}
public static URI normalizeURI(URI fsUri, Configuration hadoopConfig) {
final String scheme = fsUri.getScheme();
final String authority = fsUri.getAuthority();
if (scheme == null && authority == null) {
fsUri = FileSystem.getDefaultUri(hadoopConfig);
} else if (scheme != null && authority == null) {
URI defaultUri = FileSystem.getDefaultUri(hadoopConfig);
if (scheme.equals(defaultUri.getScheme()) && defaultUri.getAuthority() != null) {
fsUri = defaultUri;
}
}
return fsUri;
}
public static String scheme(Configuration conf, URI uri) {
if (uri.getScheme() == null || uri.getScheme().isEmpty()) {
return FileSystem.getDefaultUri(conf).getScheme();
} else {
return uri.getScheme();
}
}
}
| FSUtils |
java | micronaut-projects__micronaut-core | core-processor/src/main/java/io/micronaut/context/visitor/InternalApiTypeElementVisitor.java | {
"start": 1359,
"end": 5103
} | class ____ implements TypeElementVisitor<Object, Object> {
private static final String IO_MICRONAUT = "io.micronaut";
private static final String MICRONAUT_PROCESSING_INTERNAL_WARNINGS = "micronaut.processing.internal.warnings";
private boolean isMicronautClass;
private boolean hasMicronautSuperClass;
private boolean warned;
@Override
public Set<String> getSupportedAnnotationNames() {
return Set.of(
Internal.class.getName(),
Experimental.class.getName()
);
}
@Override
public Set<String> getSupportedOptions() {
return Set.of(MICRONAUT_PROCESSING_INTERNAL_WARNINGS);
}
@NonNull
@Override
public VisitorKind getVisitorKind() {
return VisitorKind.ISOLATING;
}
@Override
public void visitClass(ClassElement element, VisitorContext context) {
reset();
isMicronautClass = isMicronautElement(element);
if (isMicronautClass) {
return;
}
ClassElement currentElement = element;
while (true) {
currentElement = currentElement.getSuperType().orElse(null);
if (currentElement == null) {
hasMicronautSuperClass = false;
break;
}
if (isMicronautElement(currentElement)) {
hasMicronautSuperClass = true;
if (isInternalOrExperimental(currentElement)) {
warn(element, context);
}
break;
}
}
}
private void reset() {
warned = false;
hasMicronautSuperClass = false;
isMicronautClass = false;
}
private boolean isMicronautElement(ClassElement element) {
return element.getName().startsWith(IO_MICRONAUT);
}
@Override
public void visitMethod(MethodElement element, VisitorContext context) {
warnMember(element, context);
}
@Override
public void visitConstructor(ConstructorElement element, VisitorContext context) {
warnMember(element, context);
}
private void warnMember(MethodElement element, VisitorContext context) {
if (isMicronautClass || !hasMicronautSuperClass) {
return;
}
if (!element.getDeclaringType().equals(element.getOwningType())) {
// We are only interested in declared methods
return;
}
if (!isInternalOrExperimental(element.getMethodAnnotationMetadata())) {
return;
}
// We can probably check if the method is actually overridden but let's avoid it for perf reasons
warn(element, context);
}
private void warn(Element element, VisitorContext context) {
warned = true;
if (warnEnabled(context)) {
context.warn("Element extends or implements an internal or experimental Micronaut API", element);
}
}
private boolean isInternalOrExperimental(AnnotationMetadata annotationMetadata) {
return annotationMetadata.hasAnnotation(Internal.class) || annotationMetadata.hasAnnotation(Experimental.class);
}
@Override
public void finish(VisitorContext visitorContext) {
if (warned && warnEnabled(visitorContext)) {
visitorContext.warn("Overriding an internal Micronaut API may result in breaking changes in minor or patch versions of the framework. Proceed with caution!", null);
}
reset();
}
private boolean warnEnabled(VisitorContext visitorContext) {
String value = visitorContext.getOptions().get(MICRONAUT_PROCESSING_INTERNAL_WARNINGS);
return value == null || StringUtils.TRUE.equals(value);
}
}
| InternalApiTypeElementVisitor |
java | spring-projects__spring-security | saml2/saml2-service-provider/src/main/java/org/springframework/security/saml2/internal/Saml2Utils.java | {
"start": 2898,
"end": 3669
} | class ____ {
private static final Base64Checker BASE_64_CHECKER = new Base64Checker();
private final String encoded;
private boolean inflate;
private boolean requireBase64;
private DecodingConfigurer(String encoded) {
this.encoded = encoded;
}
DecodingConfigurer inflate(boolean inflate) {
this.inflate = inflate;
return this;
}
DecodingConfigurer requireBase64(boolean requireBase64) {
this.requireBase64 = requireBase64;
return this;
}
String decode() {
if (this.requireBase64) {
BASE_64_CHECKER.checkAcceptable(this.encoded);
}
byte[] bytes = Saml2Utils.samlDecode(this.encoded);
return (this.inflate) ? Saml2Utils.samlInflate(bytes) : new String(bytes, StandardCharsets.UTF_8);
}
static | DecodingConfigurer |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/chararray/CharArrayAssert_containsOnlyOnce_with_Character_array_Test.java | {
"start": 1198,
"end": 1932
} | class ____ extends CharArrayAssertBaseTest {
@Test
void should_fail_if_values_is_null() {
// GIVEN
Character[] values = null;
// WHEN
Throwable thrown = catchThrowable(() -> assertions.containsOnlyOnce(values));
// THEN
then(thrown).isInstanceOf(NullPointerException.class)
.hasMessage(shouldNotBeNull("values").create());
}
@Override
protected CharArrayAssert invoke_api_method() {
return assertions.containsOnlyOnce(new Character[] { 'a', 'b' });
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertContainsOnlyOnce(getInfo(assertions), getActual(assertions), arrayOf('a', 'b'));
}
}
| CharArrayAssert_containsOnlyOnce_with_Character_array_Test |
java | apache__camel | components/camel-jms/src/test/java/org/apache/camel/component/jms/integration/consumers/TwoConsumerOnSameQueueIT.java | {
"start": 2090,
"end": 6599
} | class ____ extends CamelTestSupport {
@RegisterExtension
public static ArtemisService service = ArtemisServiceFactory.createVMService();
@Override
protected CamelContext createCamelContext() throws Exception {
CamelContext camelContext = super.createCamelContext();
createConnectionFactory(camelContext);
return camelContext;
}
protected void createConnectionFactory(CamelContext camelContext) {
ConnectionFactory connectionFactory = ConnectionFactoryHelper.createConnectionFactory(service);
camelContext.addComponent("activemq", jmsComponentAutoAcknowledge(connectionFactory));
}
@Test
public void testTwoConsumerOnSameQueue() throws Exception {
sendTwoMessagesWhichShouldReceivedOnBothEndpointsAndAssert();
}
@Test
@DisabledIfSystemProperty(named = "ci.env.name", matches = ".*", disabledReason = "Flaky on Github CI")
public void testStopAndStartOneRoute() throws Exception {
sendTwoMessagesWhichShouldReceivedOnBothEndpointsAndAssert();
// now stop route A
context.getRouteController().stopRoute("a");
// send new message should go to B only
MockEndpoint.resetMocks(context);
getMockEndpoint("mock:a").expectedMessageCount(0);
getMockEndpoint("mock:b").expectedBodiesReceived("Bye World", "Bye World");
template.sendBody("activemq:queue:TwoConsumerOnSameQueueTest", "Bye World");
template.sendBody("activemq:queue:TwoConsumerOnSameQueueTest", "Bye World");
MockEndpoint.assertIsSatisfied(context);
// now start route A
context.getRouteController().startRoute("a");
// send new message should go to both A and B
MockEndpoint.resetMocks(context);
sendTwoMessagesWhichShouldReceivedOnBothEndpointsAndAssert();
}
@Test
@DisabledIfSystemProperty(named = "ci.env.name", matches = ".*", disabledReason = "Flaky on Github CI")
public void testRemoveOneRoute() throws Exception {
sendTwoMessagesWhichShouldReceivedOnBothEndpointsAndAssert();
// now stop and remove route A
context.getRouteController().stopRoute("a");
assertTrue(context.removeRoute("a"));
// send new message should go to B only
MockEndpoint.resetMocks(context);
getMockEndpoint("mock:a").expectedMessageCount(0);
getMockEndpoint("mock:b").expectedBodiesReceived("Bye World", "Bye World");
template.sendBody("activemq:queue:TwoConsumerOnSameQueueTest", "Bye World");
template.sendBody("activemq:queue:TwoConsumerOnSameQueueTest", "Bye World");
MockEndpoint.assertIsSatisfied(context);
}
private void sendTwoMessagesWhichShouldReceivedOnBothEndpointsAndAssert() throws InterruptedException {
final MockEndpoint mockB = getMockEndpoint("mock:b");
final MockEndpoint mockA = getMockEndpoint("mock:a");
template.sendBody("activemq:queue:TwoConsumerOnSameQueueTest", "Hello World");
template.sendBody("activemq:queue:TwoConsumerOnSameQueueTest", "Hello World");
Awaitility.await().atMost(5, TimeUnit.SECONDS).untilAsserted(
() -> assertEquals(2, mockA.getReceivedCounter() + mockB.getReceivedCounter()));
for (Exchange exchange : mockA.getReceivedExchanges()) {
assertExchange(exchange);
}
for (Exchange exchange : mockB.getReceivedExchanges()) {
assertExchange(exchange);
}
}
private static void assertExchange(Exchange exchange) {
assertNotNull(exchange.getIn(), "There should be an in message");
assertNotNull(exchange.getIn().getBody(), "There should be an in body");
assertNotNull(exchange.getIn().getBody(String.class), "The in message body should be of type String");
assertEquals("Hello World", exchange.getIn().getBody(), "The in message body should be 'Hello World");
}
@AfterEach
void resetMocks() {
MockEndpoint.resetMocks(context);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("activemq:queue:TwoConsumerOnSameQueueTest").routeId("a")
.to("log:a", "mock:a");
from("activemq:queue:TwoConsumerOnSameQueueTest").routeId("b")
.to("log:b", "mock:b");
}
};
}
}
| TwoConsumerOnSameQueueIT |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/Exchange.java | {
"start": 30312,
"end": 36428
} | class ____ for this {@link Exchange} for more details and this
* <a href="http://camel.apache.org/using-getin-or-getout-methods-on-exchange.html">FAQ entry</a>.
*
* @param type the given type
* @return the message as the given type or <tt>null</tt> if not possible to covert to given type
* @see #getIn(Class)
* @deprecated use {@link #getMessage(Class)}
*/
@Deprecated(since = "3.0.0")
<T> T getOut(Class<T> type);
/**
* Returns whether an OUT message has been set or not.
*
* @return <tt>true</tt> if an OUT message exists, <tt>false</tt> otherwise.
* @deprecated use {@link #getMessage()}
*/
@Deprecated(since = "3.0.0")
boolean hasOut();
/**
* Sets the outbound message
*
* @param out the outbound message
* @deprecated use {@link #setMessage(Message)}
*/
@Deprecated(since = "3.0.0")
void setOut(Message out);
/**
* Returns the exception associated with this exchange
*
* @return the exception (or null if no faults)
*/
Exception getException();
/**
* Returns the exception associated with this exchange.
* <p/>
* Is used to get the caused exception that typically have been wrapped in some sort of Camel wrapper exception
* <p/>
* The strategy is to look in the exception hierarchy to find the first given cause that matches the type. Will
* start from the bottom (the real cause) and walk upwards.
*
* @param type the exception type
* @return the exception (or <tt>null</tt> if no caused exception matched)
*/
<T> T getException(Class<T> type);
/**
* Sets the exception associated with this exchange
* <p/>
* Camel will wrap {@link Throwable} into {@link Exception} type to accommodate for the {@link #getException()}
* method returning a plain {@link Exception} type.
*
* @param t the caused exception
*/
void setException(Throwable t);
/**
* Returns true if this exchange failed due to an exception
*
* @return true if this exchange failed due to an exception
* @see Exchange#getException()
*/
boolean isFailed();
/**
* Returns true if this exchange is transacted
*/
boolean isTransacted();
/**
* Returns true if this exchange is marked to stop and not continue routing.
*/
boolean isRouteStop();
/**
* Sets whether this exchange is marked to stop and not continue routing.
*
* @param routeStop <tt>true</tt> to stop routing
*/
void setRouteStop(boolean routeStop);
/**
* Returns true if this exchange is an external initiated redelivered message (such as a JMS broker).
* <p/>
* <b>Important: </b> It is not always possible to determine if the message is a redelivery or not, and therefore
* <tt>false</tt> is returned. Such an example would be a JDBC message. However JMS brokers provides details if a
* message is redelivered.
*
* @return <tt>true</tt> if redelivered, <tt>false</tt> if not or not able to determine
*/
boolean isExternalRedelivered();
/**
* Returns true if this exchange is marked for rollback
*/
boolean isRollbackOnly();
/**
* Sets whether to mark this exchange for rollback
*/
void setRollbackOnly(boolean rollbackOnly);
/**
* Returns true if this exchange is marked for rollback (only last transaction section)
*/
boolean isRollbackOnlyLast();
/**
* Sets whether to mark this exchange for rollback (only last transaction section)
*/
void setRollbackOnlyLast(boolean rollbackOnlyLast);
/**
* Returns the container so that a processor can resolve endpoints from URIs
*
* @return the container which owns this exchange
*/
CamelContext getContext();
/**
* Creates a copy of the current message exchange so that it can be forwarded to another destination
*/
Exchange copy();
/**
* Returns the endpoint which originated this message exchange if a consumer on an endpoint created the message
* exchange, otherwise his property will be <tt>null</tt>.
*
* Note: In case this message exchange has been cloned through another parent message exchange (which itself has
* been created through the consumer of it's own endpoint), then if desired one could still retrieve the consumer
* endpoint of such a parent message exchange as the following:
*
* <pre>
* getContext().getRoute(getFromRouteId()).getEndpoint()
* </pre>
*/
Endpoint getFromEndpoint();
/**
* Returns the route id which originated this message exchange if a route consumer on an endpoint created the
* message exchange, otherwise his property will be <tt>null</tt>.
*
* Note: In case this message exchange has been cloned through another parent message exchange then this method
* would return the <tt>fromRouteId<tt> property of that exchange.
*/
String getFromRouteId();
/**
* Returns the unit of work that this exchange belongs to; which may map to zero, one or more physical transactions
*/
UnitOfWork getUnitOfWork();
/**
* Returns the exchange id (unique)
*/
String getExchangeId();
/**
* Set the exchange id
*/
void setExchangeId(String id);
/**
* Gets the timestamp in millis when this exchange was created.
*
* @see Clock#getCreated()
*/
@Deprecated(since = "4.4.0")
long getCreated();
/**
* Gets the {@link ExchangeExtension} that contains the extension points for internal exchange APIs. These APIs are
* intended for internal usage within Camel and end-users should avoid using them.
*
* @return the {@link ExchangeExtension} point for this exchange.
*/
ExchangeExtension getExchangeExtension();
/**
* Gets {@link Clock} that holds time information about the exchange
*/
Clock getClock();
}
| Javadoc |
java | apache__kafka | trogdor/src/main/java/org/apache/kafka/trogdor/workload/ConsumeBenchWorker.java | {
"start": 14225,
"end": 14947
} | class ____ implements Runnable {
final Map<String, JsonNode> statuses;
StatusUpdater() {
statuses = new HashMap<>();
}
@Override
public void run() {
try {
update();
} catch (Exception e) {
WorkerUtils.abort(log, "ConsumeStatusUpdater", e, doneFuture);
}
}
synchronized void update() {
workerStatus.update(JsonUtil.JSON_SERDE.valueToTree(statuses));
}
synchronized void updateConsumeStatus(String clientId, StatusData status) {
statuses.put(clientId, JsonUtil.JSON_SERDE.valueToTree(status));
}
}
/**
* Runnable | StatusUpdater |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/filter/wall/mysql/MySqlWallTest107.java | {
"start": 804,
"end": 1135
} | class ____ extends TestCase {
public void test_false() throws Exception {
WallProvider provider = new MySqlWallProvider();
provider.getConfig().setCommentAllow(false);
String sql = "select * from t where id = ? or bin(94) = 1011110";
assertFalse(provider.checkValid(sql));
}
}
| MySqlWallTest107 |
java | apache__kafka | clients/src/test/java/org/apache/kafka/common/errors/TransactionExceptionHierarchyTest.java | {
"start": 2742,
"end": 2834
} | class ____ `RefreshRetriableException`
*
* @param exceptionClass the exception | extends |
java | junit-team__junit5 | junit-platform-launcher/src/main/java/org/junit/platform/launcher/jfr/FlightRecordingDiscoveryListener.java | {
"start": 3674,
"end": 3915
} | class ____ extends DiscoveryEvent {
@UniqueId
@Label("Unique Id")
@Nullable
String uniqueId;
@Label("Result")
@Nullable
String result;
}
@Label("Discovery Issue")
@Name("org.junit.DiscoveryIssue")
static | EngineDiscoveryEvent |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/EqualsNullTest.java | {
"start": 4068,
"end": 4424
} | class ____ {",
" boolean m(Object x) {",
EXPECTED_BUG_COMMENT,
" return x.equals(null);",
" }",
"}")
.doTest();
}
@Test
public void positiveReturnObjectEqualsNullFix() {
refactoringTestHelper
.addInputLines(
"in/Test.java",
"""
| Test |
java | apache__camel | components/camel-openapi-java/src/main/java/org/apache/camel/openapi/OpenApiRestApiProcessorFactory.java | {
"start": 1068,
"end": 3287
} | class ____ implements RestApiProcessorFactory {
@Override
public Processor createApiProcessor(
CamelContext camelContext, String contextPath,
RestConfiguration configuration, Map<String, Object> parameters)
throws Exception {
Map<String, Object> options = new HashMap<>(parameters);
if (configuration.getApiProperties() != null) {
options.putAll(configuration.getApiProperties());
}
// need to include host in options
String host = (String) options.get("host");
if (host != null) {
options.put("host", host);
} else {
// favor using explicit configured host for the api
host = configuration.getApiHost();
if (host != null) {
options.put("host", host);
} else {
host = configuration.getHost();
int port = configuration.getPort();
if (host != null && port > 0) {
options.put("host", host + ":" + port);
} else if (host != null) {
options.put("host", host);
} else {
options.put("host", "localhost");
}
}
}
// and include the default scheme as well if not explicit configured
if (!options.containsKey("schemes") && !options.containsKey("schemas")) {
// NOTE schemas is a typo but kept for backwards compatible
String scheme = configuration.getScheme();
if (scheme != null) {
options.put("schemes", scheme);
}
}
// and context path is the base.path
String path = configuration.getContextPath();
if (path != null) {
options.put("base.path", path);
}
// is cors enabled?
Object cors = options.get("cors");
if (cors == null && configuration.isEnableCORS()) {
options.put("cors", "true");
}
RestOpenApiProcessor answer = new RestOpenApiProcessor(options, configuration);
answer.setCamelContext(camelContext);
return answer;
}
}
| OpenApiRestApiProcessorFactory |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/embedded/EmbeddableWithManyToOneCircularityTest.java | {
"start": 2758,
"end": 3286
} | class ____ {
@Id
private Integer id;
@Embedded
private EmbeddableTest embeddedAttribute;
public EntityTest2() {
}
public EntityTest2(int id) {
this.id = id;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public EmbeddableTest getEmbeddedAttribute() {
return embeddedAttribute;
}
public void setEmbeddedAttribute(EmbeddableTest embeddedAttribute) {
this.embeddedAttribute = embeddedAttribute;
}
}
@Embeddable
public static | EntityTest2 |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/SelfSetTest.java | {
"start": 879,
"end": 1259
} | class ____ {
private final CompilationTestHelper compilationHelper =
CompilationTestHelper.newInstance(SelfSet.class, getClass());
@Test
public void positiveCase() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import com.google.errorprone.bugpatterns.proto.ProtoTest.TestProtoMessage;
final | SelfSetTest |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/legacy/metrics/MetricStore.java | {
"start": 2790,
"end": 21520
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(MetricStore.class);
/**
* The set holds the names of the transient metrics which are no longer useful after a subtask
* reaches terminal state and shall be removed to avoid misleading users. Note that there may be
* other transient metrics, we currently only support cleaning these three.
*/
private static final Set<String> TRANSIENT_METRIC_NAMES =
new HashSet<>(
Arrays.asList(
MetricNames.TASK_IDLE_TIME,
MetricNames.TASK_BACK_PRESSURED_TIME,
MetricNames.TASK_BUSY_TIME));
private final ComponentMetricStore jobManager = new ComponentMetricStore();
private final Map<String, TaskManagerMetricStore> taskManagers = new ConcurrentHashMap<>();
private final Map<String, JobMetricStore> jobs = new ConcurrentHashMap<>();
/**
* The map holds the attempt number of the representing execution for each subtask of each
* vertex. The keys and values are JobID -> JobVertexID -> SubtaskIndex ->
* CurrentExecutionAttemptNumber. When a metric of an execution attempt is added, the metric can
* also be added to the SubtaskMetricStore when it is of the representing execution.
*/
private final Map<String, Map<String, Map<Integer, Integer>>> representativeAttempts =
new ConcurrentHashMap<>();
/**
* Remove inactive task managers.
*
* @param activeTaskManagers to retain.
*/
synchronized void retainTaskManagers(List<String> activeTaskManagers) {
taskManagers.keySet().retainAll(activeTaskManagers);
}
/**
* Remove inactive jobs..
*
* @param activeJobs to retain.
*/
synchronized void retainJobs(List<String> activeJobs) {
jobs.keySet().retainAll(activeJobs);
representativeAttempts.keySet().retainAll(activeJobs);
}
public synchronized void updateCurrentExecutionAttempts(Collection<JobDetails> jobs) {
for (JobDetails job : jobs) {
String jobId = job.getJobId().toString();
Map<String, Map<Integer, CurrentAttempts>> currentAttempts =
job.getCurrentExecutionAttempts();
Map<String, Map<Integer, Integer>> jobRepresentativeAttempts =
representativeAttempts.compute(
jobId,
(k, overwritten) ->
CollectionUtil.newHashMapWithExpectedSize(
currentAttempts.size()));
currentAttempts.forEach(
(vertexId, subtaskAttempts) -> {
Map<Integer, Integer> vertexAttempts =
jobRepresentativeAttempts.compute(
vertexId, (k, overwritten) -> new HashMap<>());
Optional<TaskMetricStore> taskMetricStoreOptional =
Optional.ofNullable(this.jobs.get(jobId))
.map(map -> map.getTaskMetricStore(vertexId));
// Retains current active subtasks to accommodate dynamic scaling
taskMetricStoreOptional.ifPresent(
taskMetricStore ->
taskMetricStore.retainSubtasks(subtaskAttempts.keySet()));
subtaskAttempts.forEach(
(subtaskIndex, attempts) -> {
// Updates representative attempts
vertexAttempts.put(
subtaskIndex, attempts.getRepresentativeAttempt());
// Retains current attempt metrics to avoid memory leak
taskMetricStoreOptional
.map(
taskMetricStore ->
taskMetricStore.getSubtaskMetricStore(
subtaskIndex))
.ifPresent(
subtaskMetricStore ->
subtaskMetricStore.retainAttempts(
attempts.getCurrentAttempts()));
// Remove transient metrics for terminal subtasks
if (attempts.isTerminalState()) {
taskMetricStoreOptional.ifPresent(
taskMetricStore ->
taskMetricStore.removeTransientMetrics(
subtaskIndex));
}
});
});
}
}
public Map<String, Map<String, Map<Integer, Integer>>> getRepresentativeAttempts() {
return representativeAttempts;
}
/**
* Add metric dumps to the store.
*
* @param metricDumps to add.
*/
synchronized void addAll(List<MetricDump> metricDumps) {
for (MetricDump metric : metricDumps) {
add(metric);
}
}
// -----------------------------------------------------------------------------------------------------------------
// Accessors for sub MetricStores
// -----------------------------------------------------------------------------------------------------------------
/**
* Returns the {@link ComponentMetricStore} for the JobManager.
*
* @return ComponentMetricStore for the JobManager
*/
public synchronized ComponentMetricStore getJobManagerMetricStore() {
return ComponentMetricStore.unmodifiable(jobManager);
}
public synchronized ComponentMetricStore getJobManagerOperatorMetricStore(
String jobID, String taskID) {
if (jobID == null || taskID == null) {
return null;
}
JobMetricStore job = jobs.get(jobID);
if (job == null) {
return null;
}
TaskMetricStore task = job.getTaskMetricStore(taskID);
if (task == null) {
return null;
}
return ComponentMetricStore.unmodifiable(task.getJobManagerOperatorMetricStore());
}
/**
* Returns the {@link TaskManagerMetricStore} for the given taskmanager ID.
*
* @param tmID taskmanager ID
* @return TaskManagerMetricStore for the given ID, or null if no store for the given argument
* exists
*/
public synchronized TaskManagerMetricStore getTaskManagerMetricStore(String tmID) {
return tmID == null ? null : TaskManagerMetricStore.unmodifiable(taskManagers.get(tmID));
}
/**
* Returns the {@link ComponentMetricStore} for the given job ID.
*
* @param jobID job ID
* @return ComponentMetricStore for the given ID, or null if no store for the given argument
* exists
*/
public synchronized ComponentMetricStore getJobMetricStore(String jobID) {
return jobID == null ? null : ComponentMetricStore.unmodifiable(jobs.get(jobID));
}
/**
* Returns the {@link ComponentMetricStore} for the given job/task ID.
*
* @param jobID job ID
* @param taskID task ID
* @return ComponentMetricStore for given IDs, or null if no store for the given arguments
* exists
*/
public synchronized TaskMetricStore getTaskMetricStore(String jobID, String taskID) {
JobMetricStore job = jobID == null ? null : jobs.get(jobID);
if (job == null || taskID == null) {
return null;
}
return TaskMetricStore.unmodifiable(job.getTaskMetricStore(taskID));
}
public synchronized JobMetricStoreSnapshot getJobs() {
return new JobMetricStoreSnapshot(unmodifiableMap(jobs));
}
public synchronized Map<String, TaskManagerMetricStore> getTaskManagers() {
return unmodifiableMap(taskManagers);
}
@VisibleForTesting
public void add(MetricDump metric) {
try {
QueryScopeInfo info = metric.scopeInfo;
TaskManagerMetricStore tm;
JobMetricStore job;
TaskMetricStore task;
SubtaskMetricStore subtask;
ComponentMetricStore attempt;
ComponentMetricStore jmOperator;
boolean isRepresentativeAttempt;
String name = info.scope.isEmpty() ? metric.name : info.scope + "." + metric.name;
if (name.isEmpty()) { // malformed transmission
return;
}
switch (info.getCategory()) {
case INFO_CATEGORY_JM:
addMetric(jobManager, name, metric);
break;
case INFO_CATEGORY_TM:
String tmID = ((QueryScopeInfo.TaskManagerQueryScopeInfo) info).taskManagerID;
tm = taskManagers.computeIfAbsent(tmID, k -> new TaskManagerMetricStore());
if (name.contains("GarbageCollector")) {
String gcName =
name.substring(
"Status.JVM.GarbageCollector.".length(),
name.lastIndexOf('.'));
tm.addGarbageCollectorName(gcName);
}
addMetric(tm, name, metric);
break;
case INFO_CATEGORY_JOB:
QueryScopeInfo.JobQueryScopeInfo jobInfo =
(QueryScopeInfo.JobQueryScopeInfo) info;
job = jobs.computeIfAbsent(jobInfo.jobID, k -> new JobMetricStore());
addMetric(job, name, metric);
break;
case INFO_CATEGORY_TASK:
QueryScopeInfo.TaskQueryScopeInfo taskInfo =
(QueryScopeInfo.TaskQueryScopeInfo) info;
job = jobs.computeIfAbsent(taskInfo.jobID, k -> new JobMetricStore());
task = job.tasks.computeIfAbsent(taskInfo.vertexID, k -> new TaskMetricStore());
subtask =
task.subtasks.computeIfAbsent(
taskInfo.subtaskIndex, k -> new SubtaskMetricStore());
// The attempt is the representative one if the current execution attempt
// number for the subtask is not present in the currentExecutionAttempts,
// which means there should be only one execution
isRepresentativeAttempt =
isRepresentativeAttempt(
taskInfo.jobID,
taskInfo.vertexID,
taskInfo.subtaskIndex,
taskInfo.attemptNumber);
attempt =
subtask.attempts.computeIfAbsent(
taskInfo.attemptNumber, k -> new ComponentMetricStore());
addMetric(attempt, name, metric);
// If the attempt is representative one, its metrics can be updated to the
// subtask and task metric store.
if (isRepresentativeAttempt) {
/**
* The duplication is intended. Metrics scoped by subtask are useful for
* several job/task handlers, while the WebInterface task metric queries
* currently do not account for subtasks, so we don't divide by subtask and
* instead use the concatenation of subtask index and metric name as the
* name for those.
*/
addMetric(subtask, name, metric);
addMetric(task, taskInfo.subtaskIndex + "." + name, metric);
}
break;
case INFO_CATEGORY_OPERATOR:
QueryScopeInfo.OperatorQueryScopeInfo operatorInfo =
(QueryScopeInfo.OperatorQueryScopeInfo) info;
job = jobs.computeIfAbsent(operatorInfo.jobID, k -> new JobMetricStore());
task =
job.tasks.computeIfAbsent(
operatorInfo.vertexID, k -> new TaskMetricStore());
subtask =
task.subtasks.computeIfAbsent(
operatorInfo.subtaskIndex, k -> new SubtaskMetricStore());
isRepresentativeAttempt =
isRepresentativeAttempt(
operatorInfo.jobID,
operatorInfo.vertexID,
operatorInfo.subtaskIndex,
operatorInfo.attemptNumber);
attempt =
subtask.attempts.computeIfAbsent(
operatorInfo.attemptNumber, k -> new ComponentMetricStore());
addMetric(attempt, operatorInfo.operatorName + "." + name, metric);
// If the attempt is representative one, its metrics can be updated to the
// subtask and task metric store.
if (isRepresentativeAttempt) {
/**
* As the WebInterface does not account for operators (because it can't) we
* don't divide by operator and instead use the concatenation of subtask
* index, operator name and metric name as the name.
*/
addMetric(subtask, operatorInfo.operatorName + "." + name, metric);
addMetric(
task,
operatorInfo.subtaskIndex
+ "."
+ operatorInfo.operatorName
+ "."
+ name,
metric);
}
break;
case INFO_CATEGORY_JM_OPERATOR:
QueryScopeInfo.JobManagerOperatorQueryScopeInfo jmOperatorInfo =
(QueryScopeInfo.JobManagerOperatorQueryScopeInfo) info;
job = jobs.computeIfAbsent(jmOperatorInfo.jobID, k -> new JobMetricStore());
task =
job.tasks.computeIfAbsent(
jmOperatorInfo.vertexID, k -> new TaskMetricStore());
jmOperator =
task.jmOperator == null ? new ComponentMetricStore() : task.jmOperator;
addMetric(
jmOperator,
// to distinguish between operators
jmOperatorInfo.operatorName + "." + name,
metric);
break;
default:
LOG.debug("Invalid metric dump category: " + info.getCategory());
}
} catch (Exception e) {
LOG.debug("Malformed metric dump.", e);
}
}
// Returns whether the attempt is the representative one. It's also true if the current
// execution attempt number for the subtask is not present in the currentExecutionAttempts,
// which means there should be only one execution
private boolean isRepresentativeAttempt(
String jobID, String vertexID, int subtaskIndex, int attemptNumber) {
return Optional.of(representativeAttempts)
.map(m -> m.get(jobID))
.map(m -> m.get(vertexID))
.map(m -> m.get(subtaskIndex))
.orElse(attemptNumber)
== attemptNumber;
}
private void addMetric(ComponentMetricStore target, String name, MetricDump metric) {
switch (metric.getCategory()) {
case METRIC_CATEGORY_COUNTER:
MetricDump.CounterDump counter = (MetricDump.CounterDump) metric;
target.addMetric(name, String.valueOf(counter.count));
break;
case METRIC_CATEGORY_GAUGE:
MetricDump.GaugeDump gauge = (MetricDump.GaugeDump) metric;
target.addMetric(name, gauge.value);
break;
case METRIC_CATEGORY_HISTOGRAM:
MetricDump.HistogramDump histogram = (MetricDump.HistogramDump) metric;
target.addMetric(name + "_min", String.valueOf(histogram.min));
target.addMetric(name + "_max", String.valueOf(histogram.max));
target.addMetric(name + "_mean", String.valueOf(histogram.mean));
target.addMetric(name + "_median", String.valueOf(histogram.median));
target.addMetric(name + "_stddev", String.valueOf(histogram.stddev));
target.addMetric(name + "_p75", String.valueOf(histogram.p75));
target.addMetric(name + "_p90", String.valueOf(histogram.p90));
target.addMetric(name + "_p95", String.valueOf(histogram.p95));
target.addMetric(name + "_p98", String.valueOf(histogram.p98));
target.addMetric(name + "_p99", String.valueOf(histogram.p99));
target.addMetric(name + "_p999", String.valueOf(histogram.p999));
break;
case METRIC_CATEGORY_METER:
MetricDump.MeterDump meter = (MetricDump.MeterDump) metric;
target.addMetric(name, String.valueOf(meter.rate));
break;
}
}
// -----------------------------------------------------------------------------------------------------------------
// sub MetricStore classes
// -----------------------------------------------------------------------------------------------------------------
/** Structure containing metrics of a single component. */
@ThreadSafe
public static | MetricStore |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/subquery/SubqueryCorrelatedEntityJoinTest.java | {
"start": 1256,
"end": 3159
} | class ____ {
@BeforeAll
public void setUp(EntityManagerFactoryScope scope) {
scope.inTransaction( em -> List.of(
new Primary( 1, 10 ),
new Secondary( 10, "n10" ),
new Tertiary( 100, 10, "n100" )
).forEach( em::persist ) );
}
@AfterAll
public void tearDown(EntityManagerFactoryScope scope) {
scope.inTransaction( em -> {
em.createQuery( "delete from PrimaryEntity" ).executeUpdate();
em.createQuery( "delete from SecondaryEntity" ).executeUpdate();
em.createQuery( "delete from TertiaryEntity" ).executeUpdate();
} );
}
@Test
public void testCorrelatedEntityJoin(EntityManagerFactoryScope scope) {
scope.inTransaction( em -> {
final HibernateCriteriaBuilder cb = em.unwrap( Session.class ).getCriteriaBuilder();
final JpaCriteriaQuery<Tuple> query = cb.createTupleQuery();
final JpaRoot<Primary> primary = query.from( Primary.class );
final JpaEntityJoin<Primary,Secondary> secondaryJoin = primary.join( Secondary.class );
secondaryJoin.on(
cb.equal( primary.get( "secondaryFk" ), secondaryJoin.get( "id" ) )
);
final JpaSubQuery<String> subquery = query.subquery( String.class );
final JpaRoot<Tertiary> tertiary = subquery.from( Tertiary.class );
final JpaJoin<Primary,Secondary> correlatedSecondaryJoin = subquery.correlate( secondaryJoin );
subquery.select( tertiary.get( "name" ) ).where( cb.equal(
tertiary.get( "secondaryFk" ),
correlatedSecondaryJoin.get( "id" )
) );
query.multiselect( primary.get( "id" ), secondaryJoin.get( "name" ), subquery );
final Tuple result = em.createQuery( query ).getSingleResult();
assertThat( result.get( 0, Integer.class ) ).isEqualTo( 1 );
assertThat( result.get( 1, String.class ) ).isEqualTo( "n10" );
assertThat( result.get( 2, String.class ) ).isEqualTo( "n100" );
} );
}
@Entity( name = "PrimaryEntity" )
public static | SubqueryCorrelatedEntityJoinTest |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/creator/JSONCreatorTest_default_long.java | {
"start": 551,
"end": 836
} | class ____ {
private final long id;
private final String name;
@JSONCreator
public Model(@JSONField(name="id") long id, @JSONField(name="name") String name) {
this.id = id;
this.name = name;
}
}
public static | Model |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/tofix/JsonIdentityInfoAndBackReferences3964Test.java | {
"start": 3663,
"end": 4156
} | class ____ {
public int id;
public List<Food> foods;
@JsonBackReference("id")
public Animal animal;
@JsonCreator
public Cat(@JsonProperty("id") int id, @JsonProperty("foods") List<Food> foods) {
this.id = id;
this.foods = foods;
}
}
@JsonIdentityInfo(
generator = ObjectIdGenerators.PropertyGenerator.class,
property = "id",
scope = Food.class
)
public static | Cat |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/script/ReindexMetadata.java | {
"start": 1328,
"end": 4380
} | class ____ extends Metadata {
static final Map<String, FieldProperty<?>> PROPERTIES = Map.of(
INDEX,
ObjectField.withWritable(),
ID,
ObjectField.withWritable().withNullable(),
VERSION,
LongField.withWritable().withNullable(),
ROUTING,
StringField.withWritable().withNullable(),
OP,
StringField.withWritable().withValidation(stringSetValidator(Set.of("noop", "index", "delete"))),
NOW,
LongField
);
protected final String index;
protected final String id;
protected final Long version;
protected final String routing;
public ReindexMetadata(String index, String id, Long version, String routing, String op, long timestamp) {
super(metadataMap(index, id, version, routing, op, timestamp), PROPERTIES);
this.index = index;
this.id = id;
this.version = version;
this.routing = routing;
}
/**
* Create the backing metadata map with the standard contents assuming default validators.
*/
protected static Map<String, Object> metadataMap(String index, String id, Long version, String routing, String op, long timestamp) {
Map<String, Object> metadata = Maps.newHashMapWithExpectedSize(IngestDocument.Metadata.values().length);
metadata.put(INDEX, index);
metadata.put(ID, id);
metadata.put(VERSION, version);
metadata.put(ROUTING, routing);
metadata.put(OP, op);
metadata.put(NOW, timestamp);
return metadata;
}
/**
* Get version, if it's null, return sentinel value {@link Long#MIN_VALUE}
*/
@Override
public long getVersion() {
Number version = getNumber(VERSION);
if (version == null) {
return Long.MIN_VALUE;
}
return version.longValue();
}
public boolean isVersionInternal() {
return get(VERSION) == null;
}
/**
* Augmentation to allow {@link ReindexScript}s to check if the version is set to "internal"
*/
public static boolean isVersionInternal(Metadata receiver) {
return receiver.get(VERSION) == null;
}
/**
* Augmentation to allow {@link ReindexScript}s to set the version to "internal".
*
* This is necessary because {@link #setVersion(long)} takes a primitive long.
*/
public static void setVersionToInternal(Metadata receiver) {
receiver.put(VERSION, null);
}
public boolean versionChanged() {
Number updated = getNumber(VERSION);
if (version == null || updated == null) {
return version != updated;
}
return version != updated.longValue();
}
public boolean indexChanged() {
return Objects.equals(index, getString(INDEX)) == false;
}
public boolean idChanged() {
return Objects.equals(id, getString(ID)) == false;
}
public boolean routingChanged() {
return Objects.equals(routing, getString(ROUTING)) == false;
}
}
| ReindexMetadata |
java | apache__camel | components/camel-jta/src/main/java/org/apache/camel/jta/TransactionErrorHandler.java | {
"start": 2107,
"end": 14441
} | class ____ extends ErrorHandlerSupport
implements AsyncProcessor, ShutdownPrepared, Navigate<Processor> {
private static final Logger LOG = LoggerFactory.getLogger(TransactionErrorHandler.class);
protected final CamelContext camelContext;
protected final Processor output;
protected volatile boolean preparingShutdown;
private JtaTransactionPolicy transactionPolicy;
private final String transactionKey;
private final LoggingLevel rollbackLoggingLevel;
/**
* Creates the transaction error handler.
*
* @param camelContext the camel context
* @param output outer processor that should use this default error handler
* @param transactionPolicy the transaction policy
* @param rollbackLoggingLevel logging level to use for logging transaction rollback occurred
*/
public TransactionErrorHandler(CamelContext camelContext, Processor output,
JtaTransactionPolicy transactionPolicy, LoggingLevel rollbackLoggingLevel) {
this.camelContext = camelContext;
this.output = output;
this.transactionPolicy = transactionPolicy;
this.rollbackLoggingLevel = rollbackLoggingLevel;
this.transactionKey = ObjectHelper.getIdentityHashCode(transactionPolicy);
}
@Override
public ErrorHandler clone(Processor output) {
TransactionErrorHandler answer = new TransactionErrorHandler(
camelContext, output, transactionPolicy, rollbackLoggingLevel);
// shallow clone is okay as we do not mutate these
if (exceptionPolicies != null) {
answer.exceptionPolicies = exceptionPolicies;
}
return answer;
}
@Override
public void process(Exchange exchange) throws Exception {
// we have to run this synchronously as a JTA Transaction does *not*
// support using multiple threads to span a transaction
if (exchange.getUnitOfWork().isTransactedBy(transactionKey)) {
// already transacted by this transaction template
// so lets just let the error handler process it
processByErrorHandler(exchange);
} else {
// not yet wrapped in transaction so lets do that
// and then have it invoke the error handler from within that
// transaction
processInTransaction(exchange);
}
}
@Override
public boolean process(Exchange exchange, AsyncCallback callback) {
// invoke this synchronous method as JTA Transaction does *not*
// support using multiple threads to span a transaction
try {
process(exchange);
} catch (Exception e) {
exchange.setException(e);
}
// notify callback we are done synchronously
callback.done(true);
return true;
}
@Override
public CompletableFuture<Exchange> processAsync(Exchange exchange) {
AsyncCallbackToCompletableFutureAdapter<Exchange> callback = new AsyncCallbackToCompletableFutureAdapter<>(exchange);
process(exchange, callback);
return callback.getFuture();
}
protected void processInTransaction(final Exchange exchange) {
// is the exchange redelivered, for example JMS brokers support such details
final String redelivered = Boolean.toString(exchange.isExternalRedelivered());
final String ids = ExchangeHelper.logIds(exchange);
try {
// mark the beginning of this transaction boundary
exchange.getUnitOfWork().beginTransactedBy(transactionKey);
// do in transaction
logTransactionBegin(redelivered, ids);
doInTransactionTemplate(exchange);
logTransactionCommit(redelivered, ids);
} catch (TransactionRolledbackException e) {
// do not set as exception, as its just a dummy exception to force
// spring TX to rollback
logTransactionRollback(redelivered, ids, null, true);
} catch (Throwable e) {
exchange.setException(e);
logTransactionRollback(redelivered, ids, e, false);
} finally {
// mark the end of this transaction boundary
exchange.getUnitOfWork().endTransactedBy(transactionKey);
}
// if it was a local rollback only then remove its marker so outer
// transaction wont see the marker
boolean onlyLast = exchange.isRollbackOnlyLast();
exchange.setRollbackOnlyLast(false);
if (onlyLast) {
// we only want this logged at debug level
if (LOG.isDebugEnabled()) {
// log exception if there was a cause exception so we have the
// stack trace
Exception cause = exchange.getException();
if (cause != null) {
LOG.debug("Transaction rollback ({}) redelivered({}) for {} "
+ "due exchange was marked for rollbackOnlyLast and caught: ",
transactionKey, redelivered, ids, cause);
} else {
LOG.debug("Transaction rollback ({}) redelivered({}) for {} "
+ "due exchange was marked for rollbackOnlyLast",
transactionKey, redelivered, ids);
}
}
// remove caused exception due we was marked as rollback only last
// so by removing the exception, any outer transaction will not be
// affected
exchange.setException(null);
}
}
public void setTransactionPolicy(JtaTransactionPolicy transactionPolicy) {
this.transactionPolicy = transactionPolicy;
}
protected void doInTransactionTemplate(final Exchange exchange) throws Throwable {
// spring transaction template is working best with rollback if you
// throw it a runtime exception
// otherwise it may not rollback messages send to JMS queues etc.
transactionPolicy.run(new JtaTransactionPolicy.Runnable() {
@Override
public void run() throws Throwable {
// wrapper exception to throw if the exchange failed
// IMPORTANT: Must be a runtime exception to let Spring regard
// it as to do "rollback"
Throwable rce;
// and now let process the exchange by the error handler
processByErrorHandler(exchange);
// after handling and still an exception or marked as rollback
// only then rollback
if (exchange.getException() != null || exchange.isRollbackOnly()) {
// wrap exception in transacted exception
if (exchange.getException() != null) {
rce = exchange.getException();
} else {
// create dummy exception to force spring transaction
// manager to rollback
rce = new TransactionRolledbackException();
}
// throw runtime exception to force rollback (which works
// best to rollback with Spring transaction manager)
if (LOG.isTraceEnabled()) {
LOG.trace("Throwing runtime exception to force transaction to rollback on {}",
transactionPolicy);
}
throw rce;
}
}
});
}
/**
* Processes the {@link Exchange} using the error handler.
* <p/>
* This implementation will invoke ensure this occurs synchronously, that means if the async routing engine did kick
* in, then this implementation will wait for the task to complete before it continues.
*
* @param exchange the exchange
*/
protected void processByErrorHandler(final Exchange exchange) {
try {
output.process(exchange);
} catch (Throwable e) {
throw new RuntimeCamelException(e);
}
}
/**
* Logs the transaction begin
*/
private void logTransactionBegin(String redelivered, String ids) {
if (LOG.isDebugEnabled()) {
LOG.debug("Transaction begin ({}) redelivered({}) for {})",
transactionKey, redelivered, ids);
}
}
/**
* Logs the transaction commit
*/
private void logTransactionCommit(String redelivered, String ids) {
if ("true".equals(redelivered)) {
// okay its a redelivered message so log at INFO level if
// rollbackLoggingLevel is INFO or higher
// this allows people to know that the redelivered message was
// committed this time
if (rollbackLoggingLevel == LoggingLevel.INFO || rollbackLoggingLevel == LoggingLevel.WARN
|| rollbackLoggingLevel == LoggingLevel.ERROR) {
LOG.info("Transaction commit ({}) redelivered({}) for {})",
transactionKey, redelivered, ids);
// return after we have logged
return;
}
}
// log non redelivered by default at DEBUG level
LOG.debug("Transaction commit ({}) redelivered({}) for {})", transactionKey, redelivered, ids);
}
public void doLog(String redelivered, String ids, Throwable e, boolean rollbackOnly, Level level) {
if (rollbackOnly) {
LOG.atLevel(level).log("Transaction rollback ({}) redelivered({}) for {} due exchange was marked for rollbackOnly",
transactionKey, redelivered, ids);
} else {
LOG.atLevel(level).log("Transaction rollback ({}) redelivered({}) for {} caught: {}",
transactionKey, redelivered, ids, e.getMessage());
}
}
/**
* Logs the transaction rollback.
*/
private void logTransactionRollback(String redelivered, String ids, Throwable e, boolean rollbackOnly) {
if (rollbackLoggingLevel != LoggingLevel.OFF) {
if (rollbackLoggingLevel == LoggingLevel.ERROR && LOG.isErrorEnabled()) {
doLog(redelivered, ids, e, rollbackOnly, Level.ERROR);
} else if (rollbackLoggingLevel == LoggingLevel.WARN && LOG.isWarnEnabled()) {
doLog(redelivered, ids, e, rollbackOnly, Level.WARN);
} else if (rollbackLoggingLevel == LoggingLevel.INFO && LOG.isInfoEnabled()) {
doLog(redelivered, ids, e, rollbackOnly, Level.INFO);
} else if (rollbackLoggingLevel == LoggingLevel.DEBUG && LOG.isDebugEnabled()) {
doLog(redelivered, ids, e, rollbackOnly, Level.DEBUG);
} else if (rollbackLoggingLevel == LoggingLevel.TRACE && LOG.isTraceEnabled()) {
doLog(redelivered, ids, e, rollbackOnly, Level.TRACE);
}
}
}
@Override
public Processor getOutput() {
return output;
}
@Override
protected void doStart() throws Exception {
ServiceHelper.startService(output);
preparingShutdown = false;
}
@Override
protected void doStop() throws Exception {
// noop, do not stop any services which we only do when shutting down
// as the error handler can be context scoped, and should not stop in
// case a route stops
}
@Override
protected void doShutdown() throws Exception {
ServiceHelper.stopAndShutdownServices(output);
}
@Override
public boolean supportTransacted() {
return true;
}
@Override
public boolean hasNext() {
return output != null;
}
@Override
public List<Processor> next() {
if (!hasNext()) {
return null;
}
List<Processor> answer = new ArrayList<>(1);
answer.add(output);
return answer;
}
@Override
public void prepareShutdown(boolean suspendOnly, boolean forced) {
// prepare for shutdown, eg do not allow redelivery if configured
LOG.trace("Prepare shutdown on error handler {}", this);
preparingShutdown = true;
}
}
| TransactionErrorHandler |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/PagerUtilsTest_Count_PG_0.java | {
"start": 163,
"end": 2977
} | class ____ extends TestCase {
public void test_pg_0() throws Exception {
String sql = "select * from t";
String result = PagerUtils.count(sql, JdbcConstants.POSTGRESQL);
assertEquals("SELECT COUNT(*)\n" +
"FROM t", result);
}
public void test_pg_1() throws Exception {
String sql = "select id, name from t";
String result = PagerUtils.count(sql, JdbcConstants.POSTGRESQL);
assertEquals("SELECT COUNT(*)\n" +
"FROM t", result);
}
public void test_pg_2() throws Exception {
String sql = "select id, name from t order by id";
String result = PagerUtils.count(sql, JdbcConstants.POSTGRESQL);
assertEquals("SELECT COUNT(*)\n" +
"FROM t", result);
}
public void test_pg_3() throws Exception {
String sql = "select * from test where shape.STIntersects(geometry::STGeomFromText('POLYGON ((86610.054 86610.054,112372.95799999963 88785.5940000005,112372.91199999955 88675.996999999508,86610.054 86610.054))',0))=1;";
String countSql = PagerUtils.count(sql, JdbcConstants.POSTGRESQL);
assertEquals("SELECT COUNT(*)\n" +
"FROM test\n" +
"WHERE shape.STIntersects(geometry::STGeomFromText('POLYGON ((86610.054 86610.054,112372.95799999963 88785.5940000005,112372.91199999955 88675.996999999508,86610.054 86610.054))', 0)) = 1", countSql);
String limitSql = PagerUtils.limit(sql, JdbcConstants.POSTGRESQL, 100, 10);
assertEquals("SELECT *\n" +
"FROM test\n" +
"WHERE shape.STIntersects(geometry::STGeomFromText('POLYGON ((86610.054 86610.054,112372.95799999963 88785.5940000005,112372.91199999955 88675.996999999508,86610.054 86610.054))', 0)) = 1\n" +
"LIMIT 10 OFFSET 100", limitSql);
}
public void test_pg_group_0() throws Exception {
String sql = "select type, count(*) from t group by type";
String result = PagerUtils.count(sql, JdbcConstants.POSTGRESQL);
assertEquals("SELECT COUNT(*)\n" +
"FROM (\n" +
"\tSELECT type, count(*)\n" +
"\tFROM t\n" +
"\tGROUP BY type\n" +
") ALIAS_COUNT", result);
}
public void test_pg_union_0() throws Exception {
String sql = "select id, name from t1 union select id, name from t2 order by id";
String result = PagerUtils.count(sql, JdbcConstants.POSTGRESQL);
assertEquals("SELECT COUNT(*)\n" +
"FROM (\n" +
"\tSELECT id, name\n" +
"\tFROM t1\n" +
"\tUNION\n" +
"\tSELECT id, name\n" +
"\tFROM t2\n" +
") ALIAS_COUNT", result);
}
}
| PagerUtilsTest_Count_PG_0 |
java | elastic__elasticsearch | x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPutTransformAction.java | {
"start": 3067,
"end": 9772
} | class ____ extends AcknowledgedTransportMasterNodeAction<Request> {
private static final Logger logger = LogManager.getLogger(TransportPutTransformAction.class);
private final Settings settings;
private final IndexNameExpressionResolver indexNameExpressionResolver;
private final Client client;
private final TransformConfigManager transformConfigManager;
private final SecurityContext securityContext;
private final TransformAuditor auditor;
private final TransformConfigAutoMigration transformConfigAutoMigration;
private final ProjectResolver projectResolver;
@Inject
public TransportPutTransformAction(
Settings settings,
TransportService transportService,
ThreadPool threadPool,
ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
ClusterService clusterService,
TransformServices transformServices,
Client client,
TransformConfigAutoMigration transformConfigAutoMigration,
ProjectResolver projectResolver
) {
super(
PutTransformAction.NAME,
transportService,
clusterService,
threadPool,
actionFilters,
PutTransformAction.Request::new,
EsExecutors.DIRECT_EXECUTOR_SERVICE
);
this.settings = settings;
this.indexNameExpressionResolver = indexNameExpressionResolver;
this.client = client;
this.transformConfigManager = transformServices.configManager();
this.securityContext = XPackSettings.SECURITY_ENABLED.get(settings)
? new SecurityContext(settings, threadPool.getThreadContext())
: null;
this.auditor = transformServices.auditor();
this.transformConfigAutoMigration = transformConfigAutoMigration;
this.projectResolver = projectResolver;
}
@Override
protected void masterOperation(Task task, Request request, ClusterState clusterState, ActionListener<AcknowledgedResponse> listener) {
XPackPlugin.checkReadyForXPackCustomMetadata(clusterState);
if (TransformMetadata.upgradeMode(clusterState)) {
listener.onFailure(
new ElasticsearchStatusException(
"Cannot create new Transform while the Transform feature is upgrading.",
RestStatus.CONFLICT
)
);
return;
}
TransformConfig config = request.getConfig().setCreateTime(Instant.now()).setVersion(TransformConfigVersion.CURRENT);
config.setHeaders(getSecurityHeadersPreferringSecondary(threadPool, securityContext, clusterState));
String transformId = config.getId();
// quick check whether a transform has already been created under that name
if (PersistentTasksCustomMetadata.getTaskWithId(clusterState, transformId) != null) {
listener.onFailure(
new ResourceAlreadyExistsException(TransformMessages.getMessage(TransformMessages.REST_PUT_TRANSFORM_EXISTS, transformId))
);
return;
}
// <3> Create the transform
ActionListener<ValidateTransformAction.Response> validateTransformListener = listener.delegateFailureAndWrap(
(l, unused) -> putTransform(request, l)
);
// <2> Validate source and destination indices
var parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId());
ActionListener<Void> checkPrivilegesListener = validateTransformListener.delegateFailureAndWrap(
(l, aVoid) -> ClientHelper.executeAsyncWithOrigin(
new ParentTaskAssigningClient(client, parentTaskId),
ClientHelper.TRANSFORM_ORIGIN,
ValidateTransformAction.INSTANCE,
new ValidateTransformAction.Request(config, request.isDeferValidation(), request.ackTimeout()),
l
)
);
// <1> Early check to verify that the user can create the destination index and can read from the source
if (XPackSettings.SECURITY_ENABLED.get(settings)) {
TransformPrivilegeChecker.checkPrivileges(
"create",
settings,
securityContext,
indexNameExpressionResolver,
clusterState,
client,
config,
true,
ActionListener.wrap(
aVoid -> AuthorizationStatePersistenceUtils.persistAuthState(
settings,
transformConfigManager,
transformId,
AuthorizationState.green(),
checkPrivilegesListener
),
e -> {
if (request.isDeferValidation()) {
AuthorizationStatePersistenceUtils.persistAuthState(
settings,
transformConfigManager,
transformId,
AuthorizationState.red(e),
checkPrivilegesListener
);
} else {
checkPrivilegesListener.onFailure(e);
}
}
)
);
} else { // No security enabled, just move on
checkPrivilegesListener.onResponse(null);
}
}
@Override
protected ClusterBlockException checkBlock(PutTransformAction.Request request, ClusterState state) {
return state.blocks().globalBlockedException(projectResolver.getProjectId(), ClusterBlockLevel.METADATA_WRITE);
}
private void putTransform(Request request, ActionListener<AcknowledgedResponse> listener) {
var config = transformConfigAutoMigration.migrate(request.getConfig());
transformConfigManager.putTransformConfiguration(config, listener.delegateFailureAndWrap((l, unused) -> {
var transformId = config.getId();
logger.info("[{}] created transform", transformId);
auditor.info(transformId, "Created transform.");
var validationFunc = FunctionFactory.create(config);
TransformConfigLinter.getWarnings(validationFunc, config.getSource(), config.getSyncConfig()).forEach(warning -> {
logger.warn("[{}] {}", transformId, warning);
auditor.warning(transformId, warning);
});
l.onResponse(AcknowledgedResponse.TRUE);
}));
}
}
| TransportPutTransformAction |
java | apache__camel | catalog/camel-route-parser/src/test/java/org/apache/camel/parser/java/RoasterCSimpleRouteBuilderConfigureTest.java | {
"start": 1372,
"end": 3081
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(RoasterCSimpleRouteBuilderConfigureTest.class);
@Test
void parse() throws Exception {
JavaClassSource clazz = (JavaClassSource) Roaster
.parse(new File("src/test/java/org/apache/camel/parser/java/MyCSimpleRouteBuilder.java"));
MethodSource<JavaClassSource> method = clazz.getMethod("configure");
List<ParserResult> list = CamelJavaParserHelper.parseCamelLanguageExpressions(method, "csimple");
for (ParserResult csimple : list) {
LOG.info("CSimple: {}", csimple.getElement());
LOG.info(" Line: {}", findLineNumber(csimple.getPosition()));
}
assertEquals("${body} > 99", list.get(0).getElement());
assertEquals(true, list.get(0).getPredicate());
assertEquals(27, findLineNumber(list.get(0).getPosition()));
assertEquals("${body} > 201", list.get(1).getElement());
assertEquals(true, list.get(1).getPredicate());
assertEquals(30, findLineNumber(list.get(1).getPosition()));
}
public static int findLineNumber(int pos) throws Exception {
int lines = 0;
int current = 0;
File file = new File("src/test/java/org/apache/camel/parser/java/MyCSimpleRouteBuilder.java");
try (BufferedReader br = new BufferedReader(new FileReader(file))) {
String line;
while ((line = br.readLine()) != null) {
lines++;
current += line.length();
if (current > pos) {
return lines;
}
}
}
return -1;
}
}
| RoasterCSimpleRouteBuilderConfigureTest |
java | quarkusio__quarkus | integration-tests/injectmock/src/test/java/io/quarkus/it/mockbean/WithoutMocksTest.java | {
"start": 214,
"end": 639
} | class ____ {
@Test
public void testGreet() {
given()
.when().get("/greeting")
.then()
.statusCode(200)
.body(is("HELLO"));
}
@Test
public void testDummy() {
given()
.when().get("/dummy")
.then()
.statusCode(200)
.body(is("first/second"));
}
}
| WithoutMocksTest |
java | apache__camel | components/camel-aws/camel-aws-config/src/main/java/org/apache/camel/component/aws/config/client/AWSConfigClientFactory.java | {
"start": 1378,
"end": 2314
} | class ____ {
private AWSConfigClientFactory() {
}
/**
* Return the correct AWS Config client (based on remote vs local).
*
* @param configuration configuration
* @return ConfigClient
*/
public static AWSConfigInternalClient getConfigClient(AWSConfigConfiguration configuration) {
if (Boolean.TRUE.equals(configuration.isUseDefaultCredentialsProvider())) {
return new AWSConfigClientIAMOptimizedImpl(configuration);
} else if (Boolean.TRUE.equals(configuration.isUseProfileCredentialsProvider())) {
return new AWSConfigClientIAMProfileOptimizedImpl(configuration);
} else if (Boolean.TRUE.equals(configuration.isUseSessionCredentials())) {
return new AWSConfigClientSessionTokenImpl(configuration);
} else {
return new AWSConfigClientStandardImpl(configuration);
}
}
}
| AWSConfigClientFactory |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/script/ReindexMetadataTests.java | {
"start": 554,
"end": 4790
} | class ____ extends ESTestCase {
private static final String INDEX = "myIndex";
private static final String ID = "myId";
private static final long VERSION = 5;
private static final String ROUTING = "myRouting";
private static final String OP = "index";
private static final long TIMESTAMP = 1_658_000_000_000L;
private ReindexMetadata metadata;
@Override
public void setUp() throws Exception {
super.setUp();
reset();
}
protected void reset() {
metadata = new ReindexMetadata(INDEX, ID, VERSION, ROUTING, OP, TIMESTAMP);
}
public void testIndex() {
assertFalse(metadata.indexChanged());
metadata.put("_index", INDEX);
assertFalse(metadata.indexChanged());
IllegalArgumentException err = expectThrows(IllegalArgumentException.class, () -> metadata.remove("_index"));
assertEquals("_index cannot be removed", err.getMessage());
err = expectThrows(IllegalArgumentException.class, () -> metadata.put("_index", null));
assertEquals("_index cannot be null", err.getMessage());
assertFalse(metadata.indexChanged());
metadata.put("_index", "myIndex2");
assertTrue(metadata.indexChanged());
metadata.put("_index", INDEX);
assertFalse(metadata.indexChanged());
metadata.setIndex("myIndex3");
assertTrue(metadata.indexChanged());
}
public void testId() {
assertFalse(metadata.idChanged());
metadata.put("_id", ID);
assertFalse(metadata.idChanged());
metadata.remove("_id");
assertTrue(metadata.idChanged());
assertNull(metadata.getId());
metadata.put("_id", "myId2");
assertTrue(metadata.idChanged());
metadata.setId(ID);
assertFalse(metadata.idChanged());
metadata.setId("myId3");
assertTrue(metadata.idChanged());
}
public void testRouting() {
assertFalse(metadata.routingChanged());
metadata.put("_routing", ROUTING);
assertFalse(metadata.routingChanged());
metadata.remove("_routing");
assertTrue(metadata.routingChanged());
assertNull(metadata.getRouting());
metadata.put("_routing", "myRouting2");
assertTrue(metadata.routingChanged());
metadata.setRouting(ROUTING);
assertFalse(metadata.routingChanged());
metadata.setRouting("myRouting3");
assertTrue(metadata.routingChanged());
}
public void testVersion() {
assertFalse(metadata.versionChanged());
metadata.put("_version", VERSION);
assertFalse(metadata.versionChanged());
metadata.remove("_version");
assertTrue(metadata.versionChanged());
assertTrue(metadata.isVersionInternal());
assertEquals(Long.MIN_VALUE, metadata.getVersion());
assertNull(metadata.get("_version"));
metadata.put("_version", VERSION + 5);
assertTrue(metadata.versionChanged());
metadata.setVersion(VERSION);
assertFalse(metadata.versionChanged());
metadata.setVersion(VERSION + 10);
assertTrue(metadata.versionChanged());
assertEquals(VERSION + 10, metadata.getVersion());
ReindexMetadata.setVersionToInternal(metadata);
assertTrue(metadata.isVersionInternal());
assertEquals(Long.MIN_VALUE, metadata.getVersion());
assertNull(metadata.get("_version"));
}
public void testOp() {
assertEquals("index", metadata.getOp());
assertEquals("index", metadata.get("op"));
metadata.setOp("noop");
assertEquals("noop", metadata.getOp());
assertEquals("noop", metadata.get("op"));
metadata.put("op", "delete");
assertEquals("delete", metadata.getOp());
IllegalArgumentException err = expectThrows(IllegalArgumentException.class, () -> metadata.setOp("bad"));
assertEquals("[op] must be one of delete, index, noop, not [bad]", err.getMessage());
err = expectThrows(IllegalArgumentException.class, () -> metadata.put("op", "malo"));
assertEquals("[op] must be one of delete, index, noop, not [malo]", err.getMessage());
}
}
| ReindexMetadataTests |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java | {
"start": 3336,
"end": 3490
} | class ____ {@link MultiMatchQueryParser} to build the text query around operators and {@link QueryParser}
* to assemble the result logically.
*/
public | uses |
java | micronaut-projects__micronaut-core | test-suite/src/test/java/io/micronaut/docs/ioc/injection/field/Vehicle.java | {
"start": 237,
"end": 319
} | class ____ {
void start() {
System.out.println("Vrooom!" );
}
}
| Engine |
java | elastic__elasticsearch | modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java | {
"start": 14152,
"end": 15149
} | class ____ {
private String mediaType = DEFAULT_MEDIA_TYPE;
private boolean detectMissingParams = DEFAULT_DETECT_MISSING_PARAMS;
private Builder() {}
public Builder mediaType(String mediaType) {
this.mediaType = mediaType;
return this;
}
/**
* Sets the behavior for handling missing parameters during template execution.
*
* @param detectMissingParams If true, an exception is thrown when executing the template with missing parameters.
* If false, the template gracefully handles missing parameters without throwing an exception.
*/
public Builder detectMissingParams(boolean detectMissingParams) {
this.detectMissingParams = detectMissingParams;
return this;
}
public CustomMustacheFactory build() {
return new CustomMustacheFactory(mediaType, detectMissingParams);
}
}
}
| Builder |
java | redisson__redisson | redisson/src/main/java/org/redisson/api/RTransferQueue.java | {
"start": 828,
"end": 1042
} | interface ____<V> extends TransferQueue<V>, RBlockingQueue<V>, RTransferQueueAsync<V> {
/**
* Returns all queue elements at once
*
* @return elements
*/
List<V> readAll();
}
| RTransferQueue |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/util/LruCache.java | {
"start": 6315,
"end": 6480
} | class ____<Y> {
final Y value;
final int size;
@Synthetic
Entry(Y value, int size) {
this.value = value;
this.size = size;
}
}
}
| Entry |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/TestingInputsLocationsRetriever.java | {
"start": 5511,
"end": 7614
} | class ____ {
private final Map<ExecutionVertexID, Collection<ConsumedPartitionGroup>>
vertexToConsumedPartitionGroups = new HashMap<>();
private final Map<IntermediateResultPartitionID, ExecutionVertexID> partitionToProducer =
new HashMap<>();
public Builder connectConsumerToProducer(
final ExecutionVertexID consumer, final ExecutionVertexID producer) {
return connectConsumerToProducers(consumer, Collections.singletonList(producer));
}
public Builder connectConsumerToProducers(
final ExecutionVertexID consumer, final List<ExecutionVertexID> producers) {
return connectConsumersToProducers(Collections.singletonList(consumer), producers);
}
public Builder connectConsumersToProducers(
final List<ExecutionVertexID> consumers, final List<ExecutionVertexID> producers) {
TestingSchedulingTopology.ConnectionResult connectionResult =
connectConsumersToProducersById(
consumers,
producers,
new IntermediateDataSetID(),
ResultPartitionType.PIPELINED);
for (int i = 0; i < producers.size(); i++) {
partitionToProducer.put(
connectionResult.getResultPartitions().get(i), producers.get(i));
}
for (ExecutionVertexID consumer : consumers) {
final Collection<ConsumedPartitionGroup> consumedPartitionGroups =
vertexToConsumedPartitionGroups.computeIfAbsent(
consumer, ignore -> new ArrayList<>());
consumedPartitionGroups.add(connectionResult.getConsumedPartitionGroup());
}
return this;
}
public TestingInputsLocationsRetriever build() {
return new TestingInputsLocationsRetriever(
vertexToConsumedPartitionGroups, partitionToProducer);
}
}
}
| Builder |
java | processing__processing4 | build/shared/tools/MovieMaker/src/ch/randelshofer/gui/datatransfer/CompositeTransferable.java | {
"start": 1304,
"end": 2781
} | class ____ the flavor.
*
* @param flavor the requested flavor for the data
* @see DataFlavor#getRepresentationClass
* @exception IOException if the data is no longer available
* in the requested flavor.
* @exception UnsupportedFlavorException if the requested data flavor is
* not supported.
*/
public Object getTransferData(DataFlavor flavor) throws UnsupportedFlavorException, IOException {
Transferable t = transferables.get(flavor);
if (t == null) throw new UnsupportedFlavorException(flavor);
return t.getTransferData(flavor);
}
/**
* Returns an array of DataFlavor objects indicating the flavors the data
* can be provided in. The array should be ordered according to preference
* for providing the data (from most richly descriptive to least descriptive).
* @return an array of data flavors in which this data can be transferred
*/
public DataFlavor[] getTransferDataFlavors() {
return flavors.toArray(new DataFlavor[transferables.size()]);
}
/**
* Returns whether or not the specified data flavor is supported for
* this object.
* @param flavor the requested flavor for the data
* @return boolean indicating wjether or not the data flavor is supported
*/
public boolean isDataFlavorSupported(DataFlavor flavor) {
return transferables.containsKey(flavor);
}
}
| of |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/beanparam/BeanFormParamTest.java | {
"start": 698,
"end": 1312
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest TEST = new QuarkusUnitTest();
@TestHTTPResource
URI baseUri;
@Test
void shouldPassFormParamsFromBeanParam() {
assertThat(formTestClient().postFormParams(new BeanWithFormParams("value1", "value2", Param.SECOND)))
.isEqualTo(
"received value1-value2-2");
}
private FormTestClient formTestClient() {
return RestClientBuilder.newBuilder().baseUri(baseUri).register(ParamConverter.class).build(FormTestClient.class);
}
@Path("/form")
public | BeanFormParamTest |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/lock/LockInterceptorTest.java | {
"start": 3314,
"end": 4124
} | class ____ {
static CountDownLatch FIRST_INSIDE_LATCH;
static CountDownLatch MAY_COMPLETE_LATCH;
static AtomicInteger COMPLETED;
static void reset() {
FIRST_INSIDE_LATCH = new CountDownLatch(1);
MAY_COMPLETE_LATCH = new CountDownLatch(1);
COMPLETED = new AtomicInteger();
}
void ping(int idx) throws InterruptedException {
if (FIRST_INSIDE_LATCH.getCount() == 0 && COMPLETED.get() == 0) {
fail("Locked method invocation not finished yet");
}
FIRST_INSIDE_LATCH.countDown();
assertTrue(MAY_COMPLETE_LATCH.await(5, TimeUnit.SECONDS), MAY_COMPLETE_LATCH.toString());
COMPLETED.incrementAndGet();
}
}
@Lock
@Singleton
static | Ping |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterService.java | {
"start": 12601,
"end": 13225
} | class ____ extends ElasticsearchStatusException {
IrrecoverableException(String msg, RestStatus status, Throwable cause, Object... args) {
super(msg, status, cause, args);
}
}
/**
* @param ex The exception to check
* @return true when the failure will persist no matter how many times we retry.
*/
private static boolean isIrrecoverable(Exception ex) {
Throwable t = ExceptionsHelper.unwrapCause(ex);
return IRRECOVERABLE_REST_STATUSES.contains(status(t));
}
@SuppressWarnings("NonAtomicOperationOnVolatileField")
static | IrrecoverableException |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/language/XPathRouteConcurrentBigTest.java | {
"start": 1069,
"end": 5697
} | class ____ extends ContextTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(XPathRouteConcurrentBigTest.class);
private static final String XMLTEST1 = "<message><messageType>AAA</messageType><sender>0123456789101112131415</sender>"
+ "<rawData>Uyw7TSVkUMxUyw7TSgGUMQAyw7TSVkUMxUyA7TSgGUMQAyw7TSVkUMxUyA</rawData>"
+ "<sentDate>2009-10-12T12:22:02+02:00</sentDate> <receivedDate>2009-10-12T12:23:31.248+02:00</receivedDate>"
+ "<intproperty>1</intproperty><stringproperty>aaaaaaabbbbbbbccccccccdddddddd</stringproperty></message>";
private static final String XMLTEST2 = "<message><messageType>AAB</messageType><sender>0123456789101112131415</sender>"
+ "<rawData>Uyw7TSVkUMxUyw7TSgGUMQAyw7TSVkUMxUyA7TSgGUMQAyw7TSVkUMxUyA</rawData>"
+ "<sentDate>2009-10-12T12:22:02+02:00</sentDate> <receivedDate>2009-10-12T12:23:31.248+02:00</receivedDate>"
+ "<intproperty>1</intproperty><stringproperty>aaaaaaabbbbbbbccccccccdddddddd</stringproperty></message>";
private static final String XMLTEST3 = "<message><messageType>ZZZ</messageType><sender>0123456789101112131415</sender>"
+ "<rawData>Uyw7TSVkUMxUyw7TSgGUMQAyw7TSVkUMxUyA7TSgGUMQAyw7TSVkUMxUyA</rawData>"
+ "<sentDate>2009-10-12T12:22:02+02:00</sentDate> <receivedDate>2009-10-12T12:23:31.248+02:00</receivedDate>"
+ "<intproperty>1</intproperty><stringproperty>aaaaaaabbbbbbbccccccccdddddddd</stringproperty></message>";
@Test
public void testConcurrent() throws Exception {
doSendMessages(333);
}
private void doSendMessages(int messageCount) throws Exception {
LOG.info("Sending {} messages", messageCount);
int forResult = (messageCount * 2 / 3) + messageCount % 3;
int forOther = messageCount - forResult;
StopWatch watch = new StopWatch();
// give more time on slow servers
getMockEndpoint("mock:result").setResultWaitTime(30000);
getMockEndpoint("mock:other").setResultWaitTime(30000);
getMockEndpoint("mock:result").expectedMessageCount(forResult);
getMockEndpoint("mock:other").expectedMessageCount(forOther);
for (int i = 0; i < messageCount; i++) {
switch (i % 3) {
case 0:
template.sendBody("seda:foo", XMLTEST1);
break;
case 1:
template.sendBody("seda:foo", XMLTEST2);
break;
case 2:
template.sendBody("seda:foo", XMLTEST3);
break;
default:
break;
}
}
LOG.info("Sent {} messages in {} ms", messageCount, watch.taken());
assertMockEndpointsSatisfied();
LOG.info("Processed {} messages in {} ms", messageCount, watch.taken());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("seda:foo?concurrentConsumers=50&size=250000").choice().when()
.xpath("//messageType = 'AAA' or " + "//messageType = 'AAB' or " + "//messageType = 'AAC' or "
+ "//messageType = 'AAD' or " + "//messageType = 'AAE' or "
+ "//messageType = 'AAF' or " + "//messageType = 'AAG' or " + "//messageType = 'AAH' or "
+ "//messageType = 'AAI' or " + "//messageType = 'AAJ' or "
+ "//messageType = 'AAK' or " + "//messageType = 'AAL' or " + "//messageType = 'AAM' or "
+ "//messageType = 'AAN' or " + "//messageType = 'AAO' or "
+ "//messageType = 'AAP' or " + "//messageType = 'AAQ' or " + "//messageType = 'AAR' or "
+ "//messageType = 'AAS' or " + "//messageType = 'AAT' or "
+ "//messageType = 'AAU' or " + "//messageType = 'AAV' or " + "//messageType = 'AAW' or "
+ "//messageType = 'AAX' or " + "//messageType = 'AAY'")
.to("mock:result").otherwise().to("mock:other").end();
}
};
}
}
| XPathRouteConcurrentBigTest |
java | google__guava | android/guava/src/com/google/common/collect/ImmutableRangeMap.java | {
"start": 1994,
"end": 4518
} | class ____<K extends Comparable<?>, V> implements RangeMap<K, V>, Serializable {
private static final ImmutableRangeMap<Comparable<?>, Object> EMPTY =
new ImmutableRangeMap<>(ImmutableList.of(), ImmutableList.of());
/**
* Returns a {@code Collector} that accumulates the input elements into a new {@code
* ImmutableRangeMap}. As in {@link Builder}, overlapping ranges are not permitted.
*
* @since 33.2.0 (available since 23.1 in guava-jre)
*/
@IgnoreJRERequirement // Users will use this only if they're already using streams.
public static <T extends @Nullable Object, K extends Comparable<? super K>, V>
Collector<T, ?, ImmutableRangeMap<K, V>> toImmutableRangeMap(
Function<? super T, Range<K>> keyFunction,
Function<? super T, ? extends V> valueFunction) {
return CollectCollectors.toImmutableRangeMap(keyFunction, valueFunction);
}
/**
* Returns an empty immutable range map.
*
* <p><b>Performance note:</b> the instance returned is a singleton.
*/
@SuppressWarnings("unchecked")
public static <K extends Comparable<?>, V> ImmutableRangeMap<K, V> of() {
return (ImmutableRangeMap<K, V>) EMPTY;
}
/** Returns an immutable range map mapping a single range to a single value. */
public static <K extends Comparable<?>, V> ImmutableRangeMap<K, V> of(Range<K> range, V value) {
return new ImmutableRangeMap<>(ImmutableList.of(range), ImmutableList.of(value));
}
@SuppressWarnings("unchecked")
public static <K extends Comparable<?>, V> ImmutableRangeMap<K, V> copyOf(
RangeMap<K, ? extends V> rangeMap) {
if (rangeMap instanceof ImmutableRangeMap) {
return (ImmutableRangeMap<K, V>) rangeMap;
}
Map<Range<K>, ? extends V> map = rangeMap.asMapOfRanges();
ImmutableList.Builder<Range<K>> rangesBuilder = new ImmutableList.Builder<>(map.size());
ImmutableList.Builder<V> valuesBuilder = new ImmutableList.Builder<>(map.size());
for (Entry<Range<K>, ? extends V> entry : map.entrySet()) {
rangesBuilder.add(entry.getKey());
valuesBuilder.add(entry.getValue());
}
return new ImmutableRangeMap<>(rangesBuilder.build(), valuesBuilder.build());
}
/** Returns a new builder for an immutable range map. */
public static <K extends Comparable<?>, V> Builder<K, V> builder() {
return new Builder<>();
}
/**
* A builder for immutable range maps. Overlapping ranges are prohibited.
*
* @since 14.0
*/
@DoNotMock
public static final | ImmutableRangeMap |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/state/SavepointStateBackendSwitchTestBase.java | {
"start": 3290,
"end": 13906
} | class ____ {
private static final KeyGroupRange KEY_GROUP_RANGE = new KeyGroupRange(0, 1);
private static final int NUM_KEY_GROUPS = KEY_GROUP_RANGE.getNumberOfKeyGroups();
@ClassRule public static TemporaryFolder tempFolder = new TemporaryFolder();
private final BackendSwitchSpec fromBackend;
private final BackendSwitchSpec toBackend;
protected SavepointStateBackendSwitchTestBase(
BackendSwitchSpec fromBackend, BackendSwitchSpec toBackend) {
this.fromBackend = fromBackend;
this.toBackend = toBackend;
}
@Test
public void switchStateBackend() throws Exception {
final File pathToWrite = tempFolder.newFile();
final MapStateDescriptor<Long, Long> mapStateDescriptor =
new MapStateDescriptor<>("my-map-state", Long.class, Long.class);
mapStateDescriptor.initializeSerializerUnlessSet(new ExecutionConfig());
final ValueStateDescriptor<Long> valueStateDescriptor =
new ValueStateDescriptor<>("my-value-state", Long.class);
valueStateDescriptor.initializeSerializerUnlessSet(new ExecutionConfig());
final ListStateDescriptor<Long> listStateDescriptor =
new ListStateDescriptor<>("my-list-state", Long.class);
listStateDescriptor.initializeSerializerUnlessSet(new ExecutionConfig());
final Integer namespace1 = 1;
final Integer namespace2 = 2;
final Integer namespace3 = 3;
final Integer namespace4 = 4;
try (final CheckpointableKeyedStateBackend<String> keyedBackend =
fromBackend.createBackend(
KEY_GROUP_RANGE, NUM_KEY_GROUPS, Collections.emptyList())) {
takeSavepoint(
keyedBackend,
pathToWrite,
mapStateDescriptor,
valueStateDescriptor,
listStateDescriptor,
namespace1,
namespace2,
namespace3,
namespace4);
}
final SnapshotResult<KeyedStateHandle> stateHandles;
try (BufferedInputStream bis =
new BufferedInputStream((new FileInputStream(pathToWrite)))) {
stateHandles =
InstantiationUtil.deserializeObject(
bis, Thread.currentThread().getContextClassLoader());
}
final KeyedStateHandle stateHandle = stateHandles.getJobManagerOwnedSnapshot();
try (final CheckpointableKeyedStateBackend<String> keyedBackend =
toBackend.createBackend(
KEY_GROUP_RANGE,
NUM_KEY_GROUPS,
StateObjectCollection.singleton(stateHandle))) {
verifyRestoredState(
mapStateDescriptor,
valueStateDescriptor,
listStateDescriptor,
namespace1,
namespace2,
namespace3,
namespace4,
keyedBackend);
}
}
private <K, N, UK, UV> int getStateSize(InternalMapState<K, N, UK, UV> mapState)
throws Exception {
int i = 0;
Iterator<Map.Entry<UK, UV>> itt = mapState.iterator();
while (itt.hasNext()) {
i++;
itt.next();
}
return i;
}
private void takeSavepoint(
CheckpointableKeyedStateBackend<String> keyedBackend,
File pathToWrite,
MapStateDescriptor<Long, Long> stateDescr,
ValueStateDescriptor<Long> valueStateDescriptor,
ListStateDescriptor<Long> listStateDescriptor,
Integer namespace1,
Integer namespace2,
Integer namespace3,
Integer namespace4)
throws Exception {
InternalMapState<String, Integer, Long, Long> mapState =
keyedBackend.createOrUpdateInternalState(IntSerializer.INSTANCE, stateDescr);
InternalValueState<String, Integer, Long> valueState =
keyedBackend.createOrUpdateInternalState(
IntSerializer.INSTANCE, valueStateDescriptor);
InternalListState<String, Integer, Long> listState =
keyedBackend.createOrUpdateInternalState(
IntSerializer.INSTANCE, listStateDescriptor);
keyedBackend.setCurrentKey("abc");
mapState.setCurrentNamespace(namespace1);
mapState.put(33L, 33L);
mapState.put(55L, 55L);
mapState.setCurrentNamespace(namespace2);
mapState.put(22L, 22L);
mapState.put(11L, 11L);
listState.setCurrentNamespace(namespace2);
listState.add(4L);
listState.add(5L);
listState.add(6L);
mapState.setCurrentNamespace(namespace3);
mapState.put(44L, 44L);
keyedBackend.setCurrentKey("mno");
mapState.setCurrentNamespace(namespace3);
mapState.put(11L, 11L);
mapState.put(22L, 22L);
mapState.put(33L, 33L);
mapState.put(44L, 44L);
mapState.put(55L, 55L);
valueState.setCurrentNamespace(namespace3);
valueState.update(1239L);
listState.setCurrentNamespace(namespace3);
listState.add(1L);
listState.add(2L);
listState.add(3L);
mapState.setCurrentNamespace(namespace4);
mapState.put(1L, 1L);
// HEAP state backend will keep an empty map as an entry in the underlying State Table
// we should skip such entries when serializing
Iterator<Map.Entry<Long, Long>> iterator = mapState.iterator();
while (iterator.hasNext()) {
iterator.next();
iterator.remove();
}
KeyGroupedInternalPriorityQueue<TimerHeapInternalTimer<String, Integer>> priorityQueue =
keyedBackend.create(
"event-time",
new TimerSerializer<>(
keyedBackend.getKeySerializer(), IntSerializer.INSTANCE));
priorityQueue.add(new TimerHeapInternalTimer<>(1234L, "mno", namespace3));
priorityQueue.add(new TimerHeapInternalTimer<>(2345L, "mno", namespace2));
priorityQueue.add(new TimerHeapInternalTimer<>(3456L, "mno", namespace3));
SnapshotStrategyRunner<KeyedStateHandle, ? extends FullSnapshotResources<?>>
savepointRunner =
StreamOperatorStateHandler.prepareCanonicalSavepoint(
keyedBackend, new CloseableRegistry());
RunnableFuture<SnapshotResult<KeyedStateHandle>> snapshot =
savepointRunner.snapshot(
0L,
0L,
new MemCheckpointStreamFactory(4 * 1024 * 1024),
new CheckpointOptions(
SavepointType.savepoint(SavepointFormatType.CANONICAL),
CheckpointStorageLocationReference.getDefault()));
snapshot.run();
try (BufferedOutputStream bis =
new BufferedOutputStream(new FileOutputStream(pathToWrite))) {
InstantiationUtil.serializeObject(bis, snapshot.get());
}
}
private void verifyRestoredState(
MapStateDescriptor<Long, Long> mapStateDescriptor,
ValueStateDescriptor<Long> valueStateDescriptor,
ListStateDescriptor<Long> listStateDescriptor,
Integer namespace1,
Integer namespace2,
Integer namespace3,
Integer namespace4,
CheckpointableKeyedStateBackend<String> keyedBackend)
throws Exception {
InternalMapState<String, Integer, Long, Long> mapState =
keyedBackend.createOrUpdateInternalState(
IntSerializer.INSTANCE, mapStateDescriptor);
InternalValueState<String, Integer, Long> valueState =
keyedBackend.createOrUpdateInternalState(
IntSerializer.INSTANCE, valueStateDescriptor);
InternalListState<String, Integer, Long> listState =
keyedBackend.createOrUpdateInternalState(
IntSerializer.INSTANCE, listStateDescriptor);
keyedBackend.setCurrentKey("abc");
mapState.setCurrentNamespace(namespace1);
assertEquals(33L, (long) mapState.get(33L));
assertEquals(55L, (long) mapState.get(55L));
assertEquals(2, getStateSize(mapState));
mapState.setCurrentNamespace(namespace2);
assertEquals(22L, (long) mapState.get(22L));
assertEquals(11L, (long) mapState.get(11L));
assertEquals(2, getStateSize(mapState));
listState.setCurrentNamespace(namespace2);
assertThat(listState.get(), contains(4L, 5L, 6L));
mapState.setCurrentNamespace(namespace3);
assertEquals(44L, (long) mapState.get(44L));
assertEquals(1, getStateSize(mapState));
keyedBackend.setCurrentKey("mno");
mapState.setCurrentNamespace(namespace3);
assertEquals(11L, (long) mapState.get(11L));
assertEquals(22L, (long) mapState.get(22L));
assertEquals(33L, (long) mapState.get(33L));
assertEquals(44L, (long) mapState.get(44L));
assertEquals(55L, (long) mapState.get(55L));
assertEquals(5, getStateSize(mapState));
valueState.setCurrentNamespace(namespace3);
assertEquals(1239L, (long) valueState.value());
listState.setCurrentNamespace(namespace3);
assertThat(listState.get(), contains(1L, 2L, 3L));
mapState.setCurrentNamespace(namespace4);
assertThat(mapState.isEmpty(), is(true));
KeyGroupedInternalPriorityQueue<TimerHeapInternalTimer<String, Integer>> priorityQueue =
keyedBackend.create(
"event-time",
new TimerSerializer<>(
keyedBackend.getKeySerializer(), IntSerializer.INSTANCE));
assertThat(priorityQueue.size(), equalTo(3));
assertThat(
priorityQueue.poll(),
equalTo(new TimerHeapInternalTimer<>(1234L, "mno", namespace3)));
assertThat(
priorityQueue.poll(),
equalTo(new TimerHeapInternalTimer<>(2345L, "mno", namespace2)));
assertThat(
priorityQueue.poll(),
equalTo(new TimerHeapInternalTimer<>(3456L, "mno", namespace3)));
}
}
| SavepointStateBackendSwitchTestBase |
java | apache__logging-log4j2 | log4j-1.2-api/src/main/java/org/apache/log4j/builders/layout/TTCCLayoutBuilder.java | {
"start": 1712,
"end": 5943
} | class ____ extends AbstractBuilder<Layout> implements LayoutBuilder {
private static final String THREAD_PRINTING_PARAM = "ThreadPrinting";
private static final String CATEGORY_PREFIXING_PARAM = "CategoryPrefixing";
private static final String CONTEXT_PRINTING_PARAM = "ContextPrinting";
private static final String DATE_FORMAT_PARAM = "DateFormat";
private static final String TIMEZONE_FORMAT = "TimeZone";
public TTCCLayoutBuilder() {}
public TTCCLayoutBuilder(final String prefix, final Properties props) {
super(prefix, props);
}
@Override
public Layout parse(final Element layoutElement, final XmlConfiguration config) {
final AtomicBoolean threadPrinting = new AtomicBoolean(Boolean.TRUE);
final AtomicBoolean categoryPrefixing = new AtomicBoolean(Boolean.TRUE);
final AtomicBoolean contextPrinting = new AtomicBoolean(Boolean.TRUE);
final AtomicReference<String> dateFormat = new AtomicReference<>(RELATIVE);
final AtomicReference<String> timezone = new AtomicReference<>();
forEachElement(layoutElement.getElementsByTagName("param"), currentElement -> {
if (currentElement.getTagName().equals(PARAM_TAG)) {
switch (getNameAttributeKey(currentElement)) {
case THREAD_PRINTING_PARAM:
threadPrinting.set(getBooleanValueAttribute(currentElement));
break;
case CATEGORY_PREFIXING_PARAM:
categoryPrefixing.set(getBooleanValueAttribute(currentElement));
break;
case CONTEXT_PRINTING_PARAM:
contextPrinting.set(getBooleanValueAttribute(currentElement));
break;
case DATE_FORMAT_PARAM:
dateFormat.set(getValueAttribute(currentElement));
break;
case TIMEZONE_FORMAT:
timezone.set(getValueAttribute(currentElement));
break;
}
}
});
return createLayout(
threadPrinting.get(),
categoryPrefixing.get(),
contextPrinting.get(),
dateFormat.get(),
timezone.get(),
config);
}
@Override
public Layout parse(final PropertiesConfiguration config) {
final boolean threadPrinting = getBooleanProperty(THREAD_PRINTING_PARAM, true);
final boolean categoryPrefixing = getBooleanProperty(CATEGORY_PREFIXING_PARAM, true);
final boolean contextPrinting = getBooleanProperty(CONTEXT_PRINTING_PARAM, true);
final String dateFormat = getProperty(DATE_FORMAT_PARAM, RELATIVE);
final String timezone = getProperty(TIMEZONE_FORMAT);
return createLayout(threadPrinting, categoryPrefixing, contextPrinting, dateFormat, timezone, config);
}
private Layout createLayout(
final boolean threadPrinting,
final boolean categoryPrefixing,
final boolean contextPrinting,
final String dateFormat,
final String timezone,
final Log4j1Configuration config) {
final StringBuilder sb = new StringBuilder();
if (dateFormat != null) {
if (RELATIVE.equalsIgnoreCase(dateFormat)) {
sb.append("%r ");
} else if (!NULL.equalsIgnoreCase(dateFormat)) {
sb.append("%d{").append(dateFormat).append("}");
if (timezone != null) {
sb.append("{").append(timezone).append("}");
}
sb.append(" ");
}
}
if (threadPrinting) {
sb.append("[%t] ");
}
sb.append("%p ");
if (categoryPrefixing) {
sb.append("%c ");
}
if (contextPrinting) {
sb.append("%notEmpty{%ndc }");
}
sb.append("- %m%n");
return LayoutWrapper.adapt(PatternLayout.newBuilder()
.setPattern(sb.toString())
.setConfiguration(config)
.build());
}
}
| TTCCLayoutBuilder |
java | processing__processing4 | app/src/processing/app/syntax/JEditTextArea.java | {
"start": 68424,
"end": 73748
} | class ____ extends MouseAdapter {
public void mousePressed(MouseEvent event) {
// try {
// requestFocus();
// // Focus events not fired sometimes?
// setCaretVisible(true);
// focusedComponent = JEditTextArea.this;
// Here be dragons: for release 0195, this fixes a problem where the
// line segment data from the previous window was being used for
// selections, causing an exception when the window you're clicking to
// was not full of text. Simply ignoring clicks when not focused fixes
// the problem, though it's not clear why the wrong Document data was
// being using regardless of the focusedComponent.
// if (focusedComponent != JEditTextArea.this) return;
if (!hasFocus()) {
// System.out.println("requesting focus in window");
// The following condition check fixes #3649 [manindra, 08/20/15]
if(!requestFocusInWindow()) {
return;
}
}
// isPopupTrigger() is handled differently across platforms,
// so it may fire during release, or during the press.
// http://docs.oracle.com/javase/7/docs/api/java/awt/event/MouseEvent.html#isPopupTrigger()
// However, we have to exit out of this method if it's a right-click
// anyway, because otherwise it'll de-select the current word.
// As a result, better to just check for BUTTON3 now, indicating that
// isPopupTrigger() is going to fire on the release anyway.
boolean windowsRightClick =
Platform.isWindows() && (event.getButton() == MouseEvent.BUTTON3);
if ((event.isPopupTrigger() || windowsRightClick) && (popup != null)) {
// // Windows fires the popup trigger on release (see mouseReleased() below)(
// if (!Base.isWindows()) {
// if (event.isPopupTrigger() && (popup != null)) {
// If user right-clicked inside the selection, preserve it;
// move caret to click offset otherwise
int offset = xyToOffset(event.getX(), event.getY());
int selectionStart = getSelectionStart();
int selectionStop = getSelectionStop();
if (offset < selectionStart || offset >= selectionStop) {
select(offset, offset);
}
popup.show(painter, event.getX(), event.getY());
return;
// }
}
int line = yToLine(event.getY());
int offset = xToOffset(line, event.getX());
int dot = getLineStartOffset(line) + offset;
selectLine = false;
selectWord = false;
switch (event.getClickCount()) {
case 1:
doSingleClick(event,line,offset,dot);
break;
case 2:
// It uses the bracket matching stuff, so it can throw a BLE
try {
doDoubleClick(event, line, offset, dot);
} catch (BadLocationException bl) {
bl.printStackTrace();
}
break;
case 3:
doTripleClick(event,line,offset,dot);
break;
}
// } catch (ArrayIndexOutOfBoundsException aioobe) {
// aioobe.printStackTrace();
// int line = yToLine(evt.getY());
// System.out.println("line is " + line + ", line count is " + getLineCount());
// }
}
/*
// Because isPopupTrigger() is handled differently across platforms,
// it may fire during release, or during the press.
// http://docs.oracle.com/javase/7/docs/api/java/awt/event/MouseEvent.html#isPopupTrigger()
public void mouseReleased(MouseEvent event) {
if (event.isPopupTrigger() && (popup != null)) {
popup.show(painter, event.getX(), event.getY());
}
}
*/
private void doSingleClick(MouseEvent evt, int line, int offset, int dot) {
if (evt.isShiftDown()) {
select(getMarkPosition(),dot);
} else {
setCaretPosition(dot);
}
}
private void doDoubleClick(MouseEvent evt, int line, int offset,
int dot) throws BadLocationException {
// Ignore empty lines
if (getLineLength(line) != 0) {
try {
String text = document.getText(0, document.getLength());
int bracket = bracketHelper.findMatchingBracket(text, Math.max(0, dot - 1));
if (bracket != -1) {
int mark = getMarkPosition();
// Hack
if (bracket > mark) {
bracket++;
mark--;
}
select(mark,bracket);
return;
}
} catch(BadLocationException bl) {
bl.printStackTrace();
}
setNewSelectionWord( line, offset );
select(newSelectionStart,newSelectionEnd);
selectWord = true;
selectionAncorStart = selectionStart;
selectionAncorEnd = selectionEnd;
/*
String lineText = getLineText(line);
String noWordSep = (String)document.getProperty("noWordSep");
int wordStart = TextUtilities.findWordStart(lineText,offset,noWordSep);
int wordEnd = TextUtilities.findWordEnd(lineText,offset,noWordSep);
int lineStart = getLineStartOffset(line);
select(lineStart + wordStart,lineStart + wordEnd);
*/
}
}
private void doTripleClick(MouseEvent evt, int line, int offset, int dot) {
selectLine(line);
}
}
| MouseHandler |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/superclass/auditoverride/AuditOverrideAuditJoinTableTest.java | {
"start": 9937,
"end": 10400
} | class ____ extends NonAuditedSuperClass {
@Column(name = "val")
private String value;
@NotAudited
private String notAuditedValue;
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
public String getNotAuditedValue() {
return notAuditedValue;
}
public void setNotAuditedValue(String notAuditedValue) {
this.notAuditedValue = notAuditedValue;
}
}
}
| OtherAuditParentsAuditEntity |
java | micronaut-projects__micronaut-core | http-client-core/src/main/java/io/micronaut/http/client/RawHttpClient.java | {
"start": 1310,
"end": 3693
} | interface ____ extends Closeable {
/**
* Send a raw request.
*
* @param request The request metadata (method, URI, headers). The
* {@link HttpRequest#getBody() body} of this object is ignored
* @param requestBody The request body bytes. {@code null} is equivalent to an empty body.
* The ownership of the body immediately transfers to the client, i.e. the
* client will always call {@link CloseableByteBody#close()} on the body
* even if there is an error before the request is sent.
* @param blockedThread The thread that is blocked waiting for this request. This is used for
* deadlock detection. Optional parameter.
* @return A mono that will contain the response to this request. This response will
* <i>usually</i> be a {@link ByteBodyHttpResponse}, unless a filter replaced it.
*/
@NonNull
@SingleResult
Publisher<? extends HttpResponse<?>> exchange(@NonNull HttpRequest<?> request, @Nullable CloseableByteBody requestBody, @Nullable Thread blockedThread);
/**
* Create a new {@link RawHttpClient}.
* Note that this method should only be used outside the context of a Micronaut application.
* The returned {@link RawHttpClient} is not subject to dependency injection.
* The creator is responsible for closing the client to avoid leaking connections.
* Within a Micronaut application use {@link jakarta.inject.Inject} to inject a client instead.
*
* @param url The base URL
* @return The client
*/
static RawHttpClient create(@Nullable URI url) {
return RawHttpClientFactoryResolver.getFactory().createRawClient(url);
}
/**
* Create a new {@link RawHttpClient} with the specified configuration. Note that this method should only be used
* outside the context of an application. Within Micronaut use {@link jakarta.inject.Inject} to inject a client instead
*
* @param url The base URL
* @param configuration the client configuration
* @return The client
*/
static RawHttpClient create(@Nullable URI url, @NonNull HttpClientConfiguration configuration) {
return RawHttpClientFactoryResolver.getFactory().createRawClient(url, configuration);
}
}
| RawHttpClient |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/StaticAssignmentOfThrowableTest.java | {
"start": 4657,
"end": 5367
} | class ____ {
static Throwable foo;
public Test(int a) {}
void foo(int a) {
java.util.Arrays.asList().stream()
.map(
x -> {
// BUG: Diagnostic contains: [StaticAssignmentOfThrowable]
foo = new NullPointerException("assign");
return a;
})
.count();
}
}
""")
.doTest();
}
@Test
public void staticWithThrowableInLambdaInLambdaInMethod_error() {
helper
.addSourceLines(
"Test.java",
"""
| Test |
java | apache__camel | components/camel-google/camel-google-mail/src/generated/java/org/apache/camel/component/google/mail/GmailUsersHistoryEndpointConfiguration.java | {
"start": 851,
"end": 3216
} | class ____ extends GoogleMailConfiguration {
@UriParam
@ApiParam(optional = true, apiMethods = {@ApiMethod(methodName = "list", description="History types to be returned by the function")})
private java.util.List historyTypes;
@UriParam
@ApiParam(optional = true, apiMethods = {@ApiMethod(methodName = "list", description="Only return messages with a label matching the ID")})
private java.lang.String labelId;
@UriParam
@ApiParam(optional = true, apiMethods = {@ApiMethod(methodName = "list", description="Maximum number of history records to return")})
private java.lang.Long maxResults;
@UriParam
@ApiParam(optional = true, apiMethods = {@ApiMethod(methodName = "list", description="Page token to retrieve a specific page of results in the list")})
private java.lang.String pageToken;
@UriParam
@ApiParam(optional = true, apiMethods = {@ApiMethod(methodName = "list", description="Required")})
private java.math.BigInteger startHistoryId;
@UriParam
@ApiParam(optional = false, apiMethods = {@ApiMethod(methodName = "list", description="The user's email address. The special value me can be used to indicate the authenticated user. default: me")})
private String userId;
public java.util.List getHistoryTypes() {
return historyTypes;
}
public void setHistoryTypes(java.util.List historyTypes) {
this.historyTypes = historyTypes;
}
public java.lang.String getLabelId() {
return labelId;
}
public void setLabelId(java.lang.String labelId) {
this.labelId = labelId;
}
public java.lang.Long getMaxResults() {
return maxResults;
}
public void setMaxResults(java.lang.Long maxResults) {
this.maxResults = maxResults;
}
public java.lang.String getPageToken() {
return pageToken;
}
public void setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
}
public java.math.BigInteger getStartHistoryId() {
return startHistoryId;
}
public void setStartHistoryId(java.math.BigInteger startHistoryId) {
this.startHistoryId = startHistoryId;
}
public String getUserId() {
return userId;
}
public void setUserId(String userId) {
this.userId = userId;
}
}
| GmailUsersHistoryEndpointConfiguration |
java | elastic__elasticsearch | x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/CurrentExecutions.java | {
"start": 2813,
"end": 4582
} | class ____ accepting new executions and throws and exception instead.
* In addition it waits for a certain amount of time for current executions to finish before returning
*
* @param maxStopTimeout The maximum wait time to wait to current executions to finish
* @param stoppedListener The listener that will set Watcher state to: {@link WatcherState#STOPPED}, may be a no-op assuming the
* {@link WatcherState#STOPPED} is set elsewhere or not needed to be set.
*/
void sealAndAwaitEmpty(TimeValue maxStopTimeout, Runnable stoppedListener) {
assert stoppedListener != null;
lock.lock();
// We may have current executions still going on.
// We should try to wait for the current executions to have completed.
// Otherwise we can run into a situation where we didn't delete the watch from the .triggered_watches index,
// but did insert into the history index. Upon start this can lead to DocumentAlreadyExistsException,
// because we already stored the history record during shutdown...
// (we always first store the watch record and then remove the triggered watch)
try {
seal.set(true);
while (currentExecutions.size() > 0) {
empty.await(maxStopTimeout.millis(), TimeUnit.MILLISECONDS);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} finally {
// fully stop Watcher after all executions are finished
stoppedListener.run();
lock.unlock();
}
}
@Override
public Iterator<ExecutionService.WatchExecution> iterator() {
return currentExecutions.values().iterator();
}
}
| stop |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/CompileTimeConstantCheckerTest.java | {
"start": 26679,
"end": 27280
} | class ____ {
@CompileTimeConstant
final String s =
// BUG: Diagnostic contains:
switch (1) {
case 1 -> "a";
case 2 -> toString();
default -> "c";
};
}
""")
.doTest();
}
@Test
public void switchExpression_onlyConsiderReturningBranches() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import com.google.errorprone.annotations.CompileTimeConstant;
public | Test |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/SoftAssertions_wasSuccess_Test.java | {
"start": 869,
"end": 3647
} | class ____ extends BaseAssertionsTest {
private SoftAssertions softly;
@BeforeEach
void setup() {
Assertions.setRemoveAssertJRelatedElementsFromStackTrace(false);
softly = new SoftAssertions();
}
@Test
void should_return_success_of_last_assertion() {
softly.assertThat(true).isFalse();
softly.assertThat(true).isEqualTo(true);
assertThat(softly.wasSuccess()).isTrue();
}
@Test
void should_return_success_of_last_assertion_with_nested_calls() {
softly.assertThat(true).isFalse();
softly.assertThat(true).isTrue(); // isTrue() calls isEqualTo(true)
assertThat(softly.wasSuccess()).isTrue();
}
@Test
void should_return_failure_of_last_assertion() {
softly.assertThat(true).isTrue();
softly.assertThat(true).isEqualTo(false);
assertThat(softly.wasSuccess()).isFalse();
}
@Test
void should_return_failure_of_last_assertion_with_multilple_nested_calls() {
softly.assertThat(true).isTrue();
softly.assertThat(true).isFalse(); // isFalse() calls isEqualTo(false)
assertThat(softly.wasSuccess()).isFalse();
}
@Test
void should_return_failure_of_last_assertion_with_nested_calls() {
// scenario to avoid:
// -- softly.assertThat(true).isFalse()
// ----- proxied isFalse() -> calls isEqualTo(false) which is proxied
// ------- proxied isEqualTo(false) : catch AssertionError => wasSuccess = false, back to outer call
// ---- proxied isFalse() : no AssertionError caught => last result success = true
softly.assertThat(true).isFalse();
assertThat(softly.wasSuccess()).isFalse();
}
@Test
void should_return_failure_after_fail() {
// GIVEN
String failureMessage = "Should not reach here";
// WHEN
softly.fail(failureMessage);
// THEN
assertThat(softly.wasSuccess()).isFalse();
assertThat(softly.errorsCollected()).hasSize(1);
assertThat(softly.errorsCollected().get(0)).hasMessageStartingWith(failureMessage);
}
@Test
void should_return_failure_after_fail_with_parameters() {
// GIVEN
String failureMessage = "Should not reach %s or %s";
// WHEN
softly.fail(failureMessage, "here", "here");
// THEN
assertThat(softly.wasSuccess()).isFalse();
}
@Test
void should_return_failure_after_fail_with_throwable() {
// GIVEN
String failureMessage = "Should not reach here";
IllegalStateException realCause = new IllegalStateException();
// WHEN
softly.fail(failureMessage, realCause);
// THEN
assertThat(softly.wasSuccess()).isFalse();
}
@Test
void should_return_failure_after_shouldHaveThrown() {
// WHEN
softly.shouldHaveThrown(IllegalArgumentException.class);
// THEN
assertThat(softly.wasSuccess()).isFalse();
}
}
| SoftAssertions_wasSuccess_Test |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java | {
"start": 1308,
"end": 3262
} | class ____ extends OutputStream
implements IOStatisticsSource {
/**
* The output stream to be compressed.
*/
protected final OutputStream out;
/**
* If non-null, this is the Compressor object that we should call
* CodecPool#returnCompressor on when this stream is closed.
*/
private Compressor trackedCompressor;
/**
* Create a compression output stream that writes
* the compressed bytes to the given stream.
* @param out out.
*/
protected CompressionOutputStream(OutputStream out) {
this.out = out;
}
void setTrackedCompressor(Compressor compressor) {
trackedCompressor = compressor;
}
@Override
public void close() throws IOException {
try {
finish();
} finally {
try {
out.close();
} finally {
if (trackedCompressor != null) {
CodecPool.returnCompressor(trackedCompressor);
trackedCompressor = null;
}
}
}
}
@Override
public void flush() throws IOException {
out.flush();
}
/**
* Write compressed bytes to the stream.
* Made abstract to prevent leakage to underlying stream.
*/
@Override
public abstract void write(byte[] b, int off, int len) throws IOException;
/**
* Finishes writing compressed data to the output stream
* without closing the underlying stream.
* @throws IOException raised on errors performing I/O.
*/
public abstract void finish() throws IOException;
/**
* Reset the compression to the initial state.
* Does not reset the underlying stream.
* @throws IOException raised on errors performing I/O.
*/
public abstract void resetState() throws IOException;
/**
* Return any IOStatistics provided by the underlying stream.
* @return IO stats from the inner stream.
*/
@Override
public IOStatistics getIOStatistics() {
return IOStatisticsSupport.retrieveIOStatistics(out);
}
}
| CompressionOutputStream |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestApplicationLifetimeMonitor.java | {
"start": 3779,
"end": 25717
} | class ____ {
private final long maxLifetime = 30L;
private static final QueuePath ROOT = new QueuePath(CapacitySchedulerConfiguration.ROOT);
private static final String CQ1 = "child1";
private static final QueuePath CQ1_QUEUE_PATH = ROOT.createNewLeaf(CQ1);
private static final QueuePath DEFAULT_QUEUE_PATH = ROOT.createNewLeaf("default");
private YarnConfiguration conf;
public static Collection<Object[]> data() {
Collection<Object[]> params = new ArrayList<Object[]>();
params.add(new Object[]{CapacityScheduler.class});
params.add(new Object[]{FairScheduler.class});
return params;
}
private Class scheduler;
private void initTestApplicationLifetimeMonitor(Class schedulerParameter)
throws IOException {
scheduler = schedulerParameter;
setup();
}
public void setup() throws IOException {
if (scheduler.equals(CapacityScheduler.class)) {
// Since there is limited lifetime monitoring support in fair scheduler
// it does not need queue setup
long defaultLifetime = 15L;
Configuration capacitySchedulerConfiguration =
setUpCSQueue(maxLifetime, defaultLifetime);
conf = new YarnConfiguration(capacitySchedulerConfiguration);
} else {
conf = new YarnConfiguration();
}
// Always run for CS, since other scheduler do not support this.
conf.setClass(YarnConfiguration.RM_SCHEDULER,
scheduler, ResourceScheduler.class);
GenericTestUtils.setRootLogLevel(Level.DEBUG);
UserGroupInformation.setConfiguration(conf);
conf.setLong(YarnConfiguration.RM_APPLICATION_MONITOR_INTERVAL_MS,
3000L);
}
@Timeout(value = 60)
@ParameterizedTest
@MethodSource("data")
public void testApplicationLifetimeMonitor(Class schedulerParameter)
throws Exception {
initTestApplicationLifetimeMonitor(schedulerParameter);
MockRM rm = null;
try {
rm = new MockRM(conf);
rm.start();
Priority appPriority = Priority.newInstance(0);
MockNM nm1 = rm.registerNode("127.0.0.1:1234", 16 * 1024);
Map<ApplicationTimeoutType, Long> timeouts =
new HashMap<ApplicationTimeoutType, Long>();
timeouts.put(ApplicationTimeoutType.LIFETIME, 10L);
RMApp app1 = MockRMAppSubmitter.submit(rm,
MockRMAppSubmissionData.Builder.createWithMemory(1024, rm)
.withAppPriority(appPriority)
.withApplicationTimeouts(timeouts)
.build());
// 20L seconds
timeouts.put(ApplicationTimeoutType.LIFETIME, 20L);
RMApp app2 = MockRMAppSubmitter.submit(rm,
MockRMAppSubmissionData.Builder.createWithMemory(1024, rm)
.withAppPriority(appPriority)
.withApplicationTimeouts(timeouts)
.build());
// user not set lifetime, so queue max lifetime will be considered.
RMApp app3 = MockRMAppSubmitter.submit(rm,
MockRMAppSubmissionData.Builder.createWithMemory(1024, rm)
.withAppPriority(appPriority)
.withApplicationTimeouts(Collections.emptyMap())
.build());
// asc lifetime exceeds queue max lifetime
timeouts.put(ApplicationTimeoutType.LIFETIME, 40L);
RMApp app4 = MockRMAppSubmitter.submit(rm,
MockRMAppSubmissionData.Builder.createWithMemory(1024, rm)
.withAppPriority(appPriority)
.withApplicationTimeouts(timeouts)
.build());
nm1.nodeHeartbeat(true);
// Send launch Event
MockAM am1 =
rm.sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId());
am1.registerAppAttempt();
rm.waitForState(app1.getApplicationId(), RMAppState.KILLED);
assertTrue((System.currentTimeMillis() - app1.getSubmitTime()) > 10000,
"Application killed before lifetime value");
Map<ApplicationTimeoutType, String> updateTimeout =
new HashMap<ApplicationTimeoutType, String>();
long newLifetime = 40L;
// update 30L seconds more to timeout which is greater than queue max
// lifetime
String formatISO8601 =
Times.formatISO8601(System.currentTimeMillis() + newLifetime * 1000);
updateTimeout.put(ApplicationTimeoutType.LIFETIME, formatISO8601);
UpdateApplicationTimeoutsRequest request =
UpdateApplicationTimeoutsRequest.newInstance(app2.getApplicationId(),
updateTimeout);
Map<ApplicationTimeoutType, Long> applicationTimeouts =
app2.getApplicationTimeouts();
// has old timeout time
long beforeUpdate =
applicationTimeouts.get(ApplicationTimeoutType.LIFETIME);
// update app2 lifetime to new time i.e now + timeout
rm.getRMContext().getClientRMService().updateApplicationTimeouts(request);
applicationTimeouts =
app2.getApplicationTimeouts();
long afterUpdate =
applicationTimeouts.get(ApplicationTimeoutType.LIFETIME);
assertTrue(afterUpdate > beforeUpdate,
"Application lifetime value not updated");
// verify for application report.
RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
GetApplicationReportRequest appRequest =
recordFactory.newRecordInstance(GetApplicationReportRequest.class);
appRequest.setApplicationId(app2.getApplicationId());
Map<ApplicationTimeoutType, ApplicationTimeout> appTimeouts = rm
.getRMContext().getClientRMService().getApplicationReport(appRequest)
.getApplicationReport().getApplicationTimeouts();
assertTrue(!appTimeouts.isEmpty(), "Application Timeout are empty.");
ApplicationTimeout timeout =
appTimeouts.get(ApplicationTimeoutType.LIFETIME);
assertTrue(timeout.getRemainingTime() > 0,
"Application remaining time is incorrect");
rm.waitForState(app2.getApplicationId(), RMAppState.KILLED);
// verify for app killed with updated lifetime
assertTrue(app2.getFinishTime() > afterUpdate,
"Application killed before lifetime value");
if (scheduler.equals(CapacityScheduler.class)) {
// Supported only on capacity scheduler
rm.waitForState(app3.getApplicationId(), RMAppState.KILLED);
// app4 submitted exceeding queue max lifetime,
// so killed after queue max lifetime.
rm.waitForState(app4.getApplicationId(), RMAppState.KILLED);
long totalTimeRun = app4.getFinishTime() - app4.getSubmitTime();
assertTrue(totalTimeRun > (maxLifetime * 1000),
"Application killed before lifetime value");
assertTrue(totalTimeRun < ((maxLifetime + 10L) * 1000),
"Application killed before lifetime value " + totalTimeRun);
}
} finally {
stopRM(rm);
}
}
@Timeout(value = 180)
@ParameterizedTest
@MethodSource("data")
public void testApplicationLifetimeOnRMRestart(Class schedulerParameter) throws Exception {
initTestApplicationLifetimeMonitor(schedulerParameter);
conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED,
true);
conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
MockRM rm1 = new MockRM(conf);
MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
rm1.start();
MockNM nm1 =
new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
nm1.registerNode();
nm1.nodeHeartbeat(true);
long appLifetime = 30L;
Map<ApplicationTimeoutType, Long> timeouts =
new HashMap<ApplicationTimeoutType, Long>();
timeouts.put(ApplicationTimeoutType.LIFETIME, appLifetime);
RMApp app1 = MockRMAppSubmitter.submit(rm1,
MockRMAppSubmissionData.Builder.createWithMemory(200, rm1)
.withAppPriority(Priority.newInstance(0))
.withApplicationTimeouts(timeouts)
.build());
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
// Re-start RM
MockRM rm2 = new MockRM(conf, memStore);
// make sure app has been unregistered with old RM else both will trigger
// Expire event
rm1.getRMContext().getRMAppLifetimeMonitor().unregisterApp(
app1.getApplicationId(), ApplicationTimeoutType.LIFETIME);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
// recover app
RMApp recoveredApp1 =
rm2.getRMContext().getRMApps().get(app1.getApplicationId());
NMContainerStatus amContainer = TestRMRestart.createNMContainerStatus(
am1.getApplicationAttemptId(), 1, ContainerState.RUNNING);
NMContainerStatus runningContainer = TestRMRestart.createNMContainerStatus(
am1.getApplicationAttemptId(), 2, ContainerState.RUNNING);
nm1.registerNode(Arrays.asList(amContainer, runningContainer), null);
// Wait for RM to settle down on recovering containers;
TestWorkPreservingRMRestart.waitForNumContainersToRecover(2, rm2,
am1.getApplicationAttemptId());
Set<ContainerId> launchedContainers =
((RMNodeImpl) rm2.getRMContext().getRMNodes().get(nm1.getNodeId()))
.getLaunchedContainers();
assertTrue(launchedContainers.contains(amContainer.getContainerId()));
assertTrue(launchedContainers.contains(runningContainer.getContainerId()));
// check RMContainers are re-recreated and the container state is correct.
rm2.waitForState(nm1, amContainer.getContainerId(),
RMContainerState.RUNNING);
rm2.waitForState(nm1, runningContainer.getContainerId(),
RMContainerState.RUNNING);
// re register attempt to rm2
rm2.waitForState(recoveredApp1.getApplicationId(), RMAppState.ACCEPTED);
am1.setAMRMProtocol(rm2.getApplicationMasterService(), rm2.getRMContext());
am1.registerAppAttempt();
rm2.waitForState(recoveredApp1.getApplicationId(), RMAppState.RUNNING);
// wait for app life time and application to be in killed state.
rm2.waitForState(recoveredApp1.getApplicationId(), RMAppState.KILLED);
assertTrue(recoveredApp1.getFinishTime() > (recoveredApp1.getSubmitTime()
+ appLifetime * 1000), "Application killed before lifetime value");
}
@Timeout(value = 60)
@ParameterizedTest
@MethodSource("data")
public void testUpdateApplicationTimeoutForStateStoreUpdateFail(Class schedulerParameter)
throws Exception {
initTestApplicationLifetimeMonitor(schedulerParameter);
MockRM rm1 = null;
try {
MemoryRMStateStore memStore = new MemoryRMStateStore() {
private int count = 0;
@Override
public synchronized void updateApplicationStateInternal(
ApplicationId appId, ApplicationStateData appState)
throws Exception {
// fail only 1 time.
if (count++ == 0) {
throw new Exception("State-store update failed");
}
super.updateApplicationStateInternal(appId, appState);
}
};
memStore.init(conf);
rm1 = new MockRM(conf, memStore);
rm1.start();
MockNM nm1 =
new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
nm1.registerNode();
nm1.nodeHeartbeat(true);
long appLifetime = 30L;
Map<ApplicationTimeoutType, Long> timeouts =
new HashMap<ApplicationTimeoutType, Long>();
timeouts.put(ApplicationTimeoutType.LIFETIME, appLifetime);
RMApp app1 = MockRMAppSubmitter.submit(rm1,
MockRMAppSubmissionData.Builder.createWithMemory(200, rm1)
.withAppPriority(Priority.newInstance(0))
.withApplicationTimeouts(timeouts)
.build());
Map<ApplicationTimeoutType, String> updateTimeout =
new HashMap<ApplicationTimeoutType, String>();
long newLifetime = 10L;
// update 10L seconds more to timeout i.e 30L seconds overall
updateTimeout.put(ApplicationTimeoutType.LIFETIME,
Times.formatISO8601(System.currentTimeMillis() + newLifetime * 1000));
UpdateApplicationTimeoutsRequest request =
UpdateApplicationTimeoutsRequest.newInstance(app1.getApplicationId(),
updateTimeout);
Map<ApplicationTimeoutType, Long> applicationTimeouts =
app1.getApplicationTimeouts();
// has old timeout time
long beforeUpdate =
applicationTimeouts.get(ApplicationTimeoutType.LIFETIME);
try {
// update app2 lifetime to new time i.e now + timeout
rm1.getRMContext().getClientRMService()
.updateApplicationTimeouts(request);
fail("Update application should fail.");
} catch (YarnException e) {
// expected
assertTrue(e.getMessage().contains(app1.getApplicationId().toString()),
"State-store exception does not containe appId");
}
applicationTimeouts = app1.getApplicationTimeouts();
// has old timeout time
long afterUpdate =
applicationTimeouts.get(ApplicationTimeoutType.LIFETIME);
assertEquals(beforeUpdate, afterUpdate,
"Application timeout is updated");
rm1.waitForState(app1.getApplicationId(), RMAppState.KILLED);
// verify for app killed with updated lifetime
assertTrue(app1.getFinishTime() > afterUpdate,
"Application killed before lifetime value");
} finally {
stopRM(rm1);
}
}
@Timeout(value = 120)
@ParameterizedTest
@MethodSource("data")
public void testInheritAppLifetimeFromParentQueue(Class schedulerParameter) throws Exception {
initTestApplicationLifetimeMonitor(schedulerParameter);
YarnConfiguration yarnConf = conf;
long maxRootLifetime = 20L;
long defaultRootLifetime = 10L;
if (scheduler.equals(CapacityScheduler.class)) {
CapacitySchedulerConfiguration csConf =
new CapacitySchedulerConfiguration();
csConf.setQueues(ROOT, new String[] {CQ1});
csConf.setCapacity(CQ1_QUEUE_PATH, 100);
csConf.setMaximumLifetimePerQueue(ROOT, maxRootLifetime);
csConf.setDefaultLifetimePerQueue(ROOT, defaultRootLifetime);
yarnConf = new YarnConfiguration(csConf);
}
MockRM rm = null;
try {
rm = new MockRM(yarnConf);
rm.start();
Priority appPriority = Priority.newInstance(0);
MockNM nm1 = rm.registerNode("127.0.0.1:1234", 16 * 1024);
// user not set lifetime, so queue max lifetime will be considered.
RMApp app1 = MockRMAppSubmitter.submit(rm,
MockRMAppSubmissionData.Builder.createWithMemory(1024, rm)
.withAppPriority(appPriority)
.withApplicationTimeouts(Collections.emptyMap())
.withQueue(CQ1)
.build());
nm1.nodeHeartbeat(true);
if (scheduler.equals(CapacityScheduler.class)) {
// Supported only on capacity scheduler
CapacityScheduler csched =
(CapacityScheduler) rm.getResourceScheduler();
rm.waitForState(app1.getApplicationId(), RMAppState.KILLED);
long totalTimeRun = app1.getFinishTime() - app1.getSubmitTime();
// Child queue should have inherited parent max and default lifetimes.
assertEquals(maxRootLifetime,
csched.getQueue(CQ1).getMaximumApplicationLifetime(),
"Child queue max lifetime should have overridden"
+ " parent value");
assertEquals(defaultRootLifetime,
csched.getQueue(CQ1).getDefaultApplicationLifetime(),
"Child queue default lifetime should have"
+ " overridden parent value");
// app1 (run in the 'child1' queue) should have run longer than the
// default lifetime but less than the max lifetime.
assertTrue(totalTimeRun > (defaultRootLifetime * 1000),
"Application killed before default lifetime value");
assertTrue(totalTimeRun < (maxRootLifetime * 1000),
"Application killed after max lifetime value " + totalTimeRun);
}
} finally {
stopRM(rm);
}
}
@Timeout(value = 120)
@ParameterizedTest
@MethodSource("data")
public void testOverrideParentQueueMaxAppLifetime(Class schedulerParameter) throws Exception {
initTestApplicationLifetimeMonitor(schedulerParameter);
YarnConfiguration yarnConf = conf;
long maxRootLifetime = 20L;
long maxChildLifetime = 40L;
long defaultRootLifetime = 10L;
if (scheduler.equals(CapacityScheduler.class)) {
CapacitySchedulerConfiguration csConf =
new CapacitySchedulerConfiguration();
csConf.setQueues(ROOT, new String[] {CQ1});
csConf.setCapacity(CQ1_QUEUE_PATH, 100);
csConf.setMaximumLifetimePerQueue(ROOT, maxRootLifetime);
csConf.setMaximumLifetimePerQueue(CQ1_QUEUE_PATH, maxChildLifetime);
csConf.setDefaultLifetimePerQueue(ROOT, defaultRootLifetime);
csConf.setDefaultLifetimePerQueue(CQ1_QUEUE_PATH, maxChildLifetime);
yarnConf = new YarnConfiguration(csConf);
}
MockRM rm = null;
try {
rm = new MockRM(yarnConf);
rm.start();
Priority appPriority = Priority.newInstance(0);
MockNM nm1 = rm.registerNode("127.0.0.1:1234", 16 * 1024);
// user not set lifetime, so queue max lifetime will be considered.
RMApp app1 = MockRMAppSubmitter.submit(rm,
MockRMAppSubmissionData.Builder.createWithMemory(1024, rm)
.withAppPriority(appPriority)
.withApplicationTimeouts(Collections.emptyMap())
.withQueue(CQ1)
.build());
nm1.nodeHeartbeat(true);
if (scheduler.equals(CapacityScheduler.class)) {
// Supported only on capacity scheduler
CapacityScheduler csched =
(CapacityScheduler) rm.getResourceScheduler();
rm.waitForState(app1.getApplicationId(), RMAppState.KILLED);
long totalTimeRun = app1.getFinishTime() - app1.getSubmitTime();
// Child queue's max lifetime can override parent's and be larger.
assertTrue((maxRootLifetime < maxChildLifetime) &&
(totalTimeRun > (maxChildLifetime * 1000)),
"Application killed before default lifetime value");
assertEquals(maxRootLifetime, csched.getRootQueue().getMaximumApplicationLifetime(),
"Root queue max lifetime property set incorrectly");
assertEquals(maxChildLifetime, csched.getQueue(CQ1).getMaximumApplicationLifetime(),
"Child queue max lifetime should have overridden"
+ " parent value");
}
} finally {
stopRM(rm);
}
}
@Timeout(value = 120)
@ParameterizedTest
@MethodSource("data")
public void testOverrideParentQueueDefaultAppLifetime(
Class schedulerParameter) throws Exception {
initTestApplicationLifetimeMonitor(schedulerParameter);
YarnConfiguration yarnConf = conf;
long maxRootLifetime = -1L;
long maxChildLifetime = -1L;
long defaultChildLifetime = 10L;
if (scheduler.equals(CapacityScheduler.class)) {
CapacitySchedulerConfiguration csConf =
new CapacitySchedulerConfiguration();
csConf.setQueues(ROOT, new String[] {CQ1});
csConf.setCapacity(CQ1_QUEUE_PATH, 100);
csConf.setMaximumLifetimePerQueue(ROOT, maxRootLifetime);
csConf.setMaximumLifetimePerQueue(CQ1_QUEUE_PATH, maxChildLifetime);
csConf.setDefaultLifetimePerQueue(CQ1_QUEUE_PATH, defaultChildLifetime);
yarnConf = new YarnConfiguration(csConf);
}
MockRM rm = null;
try {
rm = new MockRM(yarnConf);
rm.start();
Priority appPriority = Priority.newInstance(0);
MockNM nm1 = rm.registerNode("127.0.0.1:1234", 16 * 1024);
// user not set lifetime, so queue max lifetime will be considered.
RMApp app1 = MockRMAppSubmitter.submit(rm,
MockRMAppSubmissionData.Builder.createWithMemory(1024, rm)
.withAppPriority(appPriority)
.withApplicationTimeouts(Collections.emptyMap())
.withQueue(CQ1)
.build());
nm1.nodeHeartbeat(true);
if (scheduler.equals(CapacityScheduler.class)) {
// Supported only on capacity scheduler
CapacityScheduler csched =
(CapacityScheduler) rm.getResourceScheduler();
rm.waitForState(app1.getApplicationId(), RMAppState.KILLED);
long totalTimeRun = app1.getFinishTime() - app1.getSubmitTime();
// app1 (run in 'child1' queue) should have overridden the parent's
// default lifetime.
assertTrue(totalTimeRun > (defaultChildLifetime * 1000),
"Application killed before default lifetime value");
// Root and child queue's max lifetime should be -1.
assertEquals(maxRootLifetime, csched.getRootQueue().getMaximumApplicationLifetime(),
"Root queue max lifetime property set incorrectly");
assertEquals(maxChildLifetime, csched.getQueue(CQ1).getMaximumApplicationLifetime(),
"Child queue max lifetime property set incorrectly");
// 'child1' queue's default lifetime should have overridden parent's.
assertEquals(defaultChildLifetime, csched.getQueue(CQ1).getDefaultApplicationLifetime(),
"Child queue default lifetime should have"
+ " overridden parent value");
}
} finally {
stopRM(rm);
}
}
private CapacitySchedulerConfiguration setUpCSQueue(long maxLifetime,
long defaultLifetime) {
CapacitySchedulerConfiguration csConf =
new CapacitySchedulerConfiguration();
csConf.setQueues(ROOT,
new String[] {"default"});
csConf.setCapacity(DEFAULT_QUEUE_PATH, 100);
csConf.setMaximumLifetimePerQueue(DEFAULT_QUEUE_PATH, maxLifetime);
csConf.setDefaultLifetimePerQueue(DEFAULT_QUEUE_PATH, defaultLifetime);
return csConf;
}
private void stopRM(MockRM rm) {
if (rm != null) {
rm.stop();
}
}
}
| TestApplicationLifetimeMonitor |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/OperatorSubtaskStateTest.java | {
"start": 4647,
"end": 5122
} | class ____ extends ByteStreamStateHandle {
private static final long serialVersionUID = 1L;
private boolean discarded = false;
DiscardOnceStreamStateHandle() {
super("test", new byte[0]);
}
@Override
public void discardState() {
super.discardState();
assertThat(discarded).as("state was discarded twice").isFalse();
discarded = true;
}
}
}
| DiscardOnceStreamStateHandle |
java | spring-projects__spring-boot | module/spring-boot-security/src/test/java/org/springframework/boot/security/autoconfigure/web/reactive/ReactiveWebSecurityAutoConfigurationTests.java | {
"start": 1818,
"end": 4034
} | class ____ {
private final ReactiveWebApplicationContextRunner contextRunner = new ReactiveWebApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(ReactiveWebSecurityAutoConfiguration.class));
@Test
void backsOffWhenWebFilterChainProxyBeanPresent() {
this.contextRunner.withUserConfiguration(WebFilterChainProxyConfiguration.class)
.run((context) -> assertThat(context).hasSingleBean(WebFilterChainProxy.class));
}
@Test
void autoConfiguresDenyAllReactiveAuthenticationManagerWhenNoAlternativeIsAvailable() {
this.contextRunner
.run((context) -> assertThat(context).hasSingleBean(ReactiveWebSecurityAutoConfiguration.class)
.hasBean("denyAllAuthenticationManager"));
}
@Test
void enablesWebFluxSecurityWhenUserDetailsServiceIsPresent() {
this.contextRunner.withUserConfiguration(UserDetailsServiceConfiguration.class).run((context) -> {
assertThat(context).hasSingleBean(WebFilterChainProxy.class);
assertThat(context).doesNotHaveBean("denyAllAuthenticationManager");
});
}
@Test
void enablesWebFluxSecurityWhenReactiveAuthenticationManagerIsPresent() {
this.contextRunner
.withBean(ReactiveAuthenticationManager.class, () -> mock(ReactiveAuthenticationManager.class))
.run((context) -> {
assertThat(context).hasSingleBean(WebFilterChainProxy.class);
assertThat(context).doesNotHaveBean("denyAllAuthenticationManager");
});
}
@Test
void enablesWebFluxSecurityWhenSecurityWebFilterChainIsPresent() {
this.contextRunner.withBean(SecurityWebFilterChain.class, () -> mock(SecurityWebFilterChain.class))
.run((context) -> {
assertThat(context).hasSingleBean(WebFilterChainProxy.class);
assertThat(context).doesNotHaveBean("denyAllAuthenticationManager");
});
}
@Test
void autoConfigurationIsConditionalOnClass() {
this.contextRunner
.withClassLoader(new FilteredClassLoader(Flux.class, EnableWebFluxSecurity.class, WebFilterChainProxy.class,
WebFluxConfigurer.class))
.withUserConfiguration(UserDetailsServiceConfiguration.class)
.run((context) -> assertThat(context).doesNotHaveBean(WebFilterChainProxy.class));
}
@Configuration(proxyBeanMethods = false)
static | ReactiveWebSecurityAutoConfigurationTests |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/metrics/groups/AbstractMetricGroupTest.java | {
"start": 2251,
"end": 16147
} | class ____ {
/**
* Verifies that no {@link NullPointerException} is thrown when {@link
* AbstractMetricGroup#getAllVariables()} is called and the parent is null.
*/
@Test
void testGetAllVariables() throws Exception {
MetricRegistryImpl registry =
new MetricRegistryImpl(
MetricRegistryTestUtils.defaultMetricRegistryConfiguration());
AbstractMetricGroup<?> group =
new AbstractMetricGroup<AbstractMetricGroup<?>>(registry, new String[0], null) {
@Override
protected QueryScopeInfo createQueryServiceMetricInfo(CharacterFilter filter) {
return null;
}
@Override
protected String getGroupName(CharacterFilter filter) {
return "";
}
};
assertThat(group.getAllVariables()).isEmpty();
registry.closeAsync().get();
}
@Test
void testGetAllVariablesWithOutExclusions() {
MetricRegistry registry = NoOpMetricRegistry.INSTANCE;
AbstractMetricGroup<?> group = new ProcessMetricGroup(registry, "host");
assertThat(group.getAllVariables()).containsKey(ScopeFormat.SCOPE_HOST);
}
@Test
void testGetAllVariablesWithExclusions() {
MetricRegistry registry = NoOpMetricRegistry.INSTANCE;
AbstractMetricGroup<?> group = new ProcessMetricGroup(registry, "host");
assertThat(group.getAllVariables(-1, Collections.singleton(ScopeFormat.SCOPE_HOST)))
.isEmpty();
}
@Test
void testGetAllVariablesWithExclusionsForReporters() {
MetricRegistry registry = TestingMetricRegistry.builder().setNumberReporters(2).build();
AbstractMetricGroup<?> group =
new GenericMetricGroup(registry, null, "test") {
@Override
protected void putVariables(Map<String, String> variables) {
variables.put("k1", "v1");
variables.put("k2", "v2");
}
};
group.getAllVariables(-1, Collections.emptySet());
assertThat(group.getAllVariables(0, Collections.singleton("k1"))).doesNotContainKey("k1");
assertThat(group.getAllVariables(0, Collections.singleton("k1"))).containsKey("k2");
assertThat(group.getAllVariables(1, Collections.singleton("k2"))).containsKey("k1");
assertThat(group.getAllVariables(1, Collections.singleton("k2"))).doesNotContainKey("k2");
}
// ========================================================================
// Scope Caching
// ========================================================================
private static final CharacterFilter FILTER_C =
new CharacterFilter() {
@Override
public String filterCharacters(String input) {
return input.replace("C", "X");
}
};
private static final CharacterFilter FILTER_B =
new CharacterFilter() {
@Override
public String filterCharacters(String input) {
return input.replace("B", "X");
}
};
@Test
void testScopeCachingForMultipleReporters() throws Exception {
String counterName = "1";
Configuration config = new Configuration();
config.set(MetricOptions.SCOPE_NAMING_TM, "A.B.C.D");
MetricConfig metricConfig1 = new MetricConfig();
metricConfig1.setProperty(MetricOptions.REPORTER_SCOPE_DELIMITER.key(), "-");
MetricConfig metricConfig2 = new MetricConfig();
metricConfig2.setProperty(MetricOptions.REPORTER_SCOPE_DELIMITER.key(), "!");
config.setString(
ConfigConstants.METRICS_REPORTER_PREFIX
+ "test1."
+ MetricOptions.REPORTER_SCOPE_DELIMITER.key(),
"-");
config.setString(
ConfigConstants.METRICS_REPORTER_PREFIX
+ "test2."
+ MetricOptions.REPORTER_SCOPE_DELIMITER.key(),
"!");
CollectingMetricsReporter reporter1 = new CollectingMetricsReporter(FILTER_B);
CollectingMetricsReporter reporter2 = new CollectingMetricsReporter(FILTER_C);
MetricRegistryImpl testRegistry =
new MetricRegistryImpl(
MetricRegistryTestUtils.fromConfiguration(config),
Arrays.asList(
ReporterSetupBuilder.METRIC_SETUP_BUILDER.forReporter(
"test1", metricConfig1, reporter1),
ReporterSetupBuilder.METRIC_SETUP_BUILDER.forReporter(
"test2", metricConfig2, reporter2)));
try {
MetricGroup tmGroup =
TaskManagerMetricGroup.createTaskManagerMetricGroup(
testRegistry, "host", new ResourceID("id"));
tmGroup.counter(counterName);
assertThat(testRegistry.getReporters())
.withFailMessage("Reporters were not properly instantiated")
.hasSize(2);
{
// verify reporter1
MetricGroupAndName nameAndGroup =
reporter1.getAddedMetrics().stream()
.filter(nag -> nag.name.equals(counterName))
.findAny()
.get();
String metricName = nameAndGroup.name;
MetricGroup group = nameAndGroup.group;
// the first call determines which filter is applied to all future
// calls; in
// this case
// no filter is used at all
assertThat(group.getMetricIdentifier(metricName)).isEqualTo("A-B-C-D-1");
// from now on the scope string is cached and should not be reliant
// on the
// given filter
assertThat(group.getMetricIdentifier(metricName, FILTER_C)).isEqualTo("A-B-C-D-1");
assertThat(group.getMetricIdentifier(metricName, reporter1)).isEqualTo("A-B-C-D-1");
// the metric name however is still affected by the filter as it is
// not
// cached
assertThat(
group.getMetricIdentifier(
metricName,
input -> input.replace("B", "X").replace(counterName, "4")))
.isEqualTo("A-B-C-D-4");
}
{
// verify reporter2
MetricGroupAndName nameAndGroup =
reporter2.getAddedMetrics().stream()
.filter(nag -> nag.name.equals(counterName))
.findAny()
.get();
String metricName = nameAndGroup.name;
MetricGroup group = nameAndGroup.group;
// the first call determines which filter is applied to all future calls
assertThat(group.getMetricIdentifier(metricName, reporter2)).isEqualTo("A!B!X!D!1");
// from now on the scope string is cached and should not be reliant on the given
// filter
assertThat(group.getMetricIdentifier(metricName)).isEqualTo("A!B!X!D!1");
assertThat(group.getMetricIdentifier(metricName, FILTER_C)).isEqualTo("A!B!X!D!1");
// the metric name however is still affected by the filter as it is not cached
assertThat(
group.getMetricIdentifier(
metricName,
input -> input.replace("A", "X").replace(counterName, "3")))
.isEqualTo("A!B!X!D!3");
}
} finally {
testRegistry.closeAsync().get();
}
}
@Test
@SuppressWarnings("unchecked")
void testLogicalScopeCachingForMultipleReporters() throws Exception {
String counterName = "1";
CollectingMetricsReporter reporter1 = new CollectingMetricsReporter(FILTER_B);
CollectingMetricsReporter reporter2 = new CollectingMetricsReporter(FILTER_C);
MetricRegistryImpl testRegistry =
new MetricRegistryImpl(
MetricRegistryTestUtils.defaultMetricRegistryConfiguration(),
Arrays.asList(
ReporterSetupBuilder.METRIC_SETUP_BUILDER.forReporter(
"test1", reporter1),
ReporterSetupBuilder.METRIC_SETUP_BUILDER.forReporter(
"test2", reporter2)));
try {
MetricGroup tmGroup =
TaskManagerMetricGroup.createTaskManagerMetricGroup(
testRegistry, "host", new ResourceID("id"))
.addGroup("B")
.addGroup("C");
tmGroup.counter(counterName);
assertThat(testRegistry.getReporters())
.withFailMessage("Reporters were not properly instantiated")
.hasSize(2);
assertThat(
LogicalScopeProvider.castFrom(reporter1.findAdded(counterName).group)
.getLogicalScope(reporter1, '-'))
.isEqualTo("taskmanager-X-C");
assertThat(
LogicalScopeProvider.castFrom(reporter2.findAdded(counterName).group)
.getLogicalScope(reporter2, ','))
.isEqualTo("taskmanager,B,X");
} finally {
testRegistry.closeAsync().get();
}
}
@Test
void testScopeGenerationWithoutReporters() throws Exception {
Configuration config = new Configuration();
config.set(MetricOptions.SCOPE_NAMING_TM, "A.B.C.D");
MetricRegistryImpl testRegistry =
new MetricRegistryImpl(MetricRegistryTestUtils.fromConfiguration(config));
try {
TaskManagerMetricGroup group =
TaskManagerMetricGroup.createTaskManagerMetricGroup(
testRegistry, "host", new ResourceID("id"));
assertThat(testRegistry.getReporters())
.withFailMessage("MetricReporters list should be empty")
.isEmpty();
// default delimiter should be used
assertThat(group.getMetricIdentifier("1", FILTER_C)).isEqualTo("A.B.X.D.1");
// no caching should occur
assertThat(group.getMetricIdentifier("1", FILTER_B)).isEqualTo("A.X.C.D.1");
// invalid reporter indices do not throw errors
assertThat(group.getMetricIdentifier("1", FILTER_B, -1, '.')).isEqualTo("A.X.C.D.1");
assertThat(group.getMetricIdentifier("1", FILTER_B, 2, '.')).isEqualTo("A.X.C.D.1");
} finally {
testRegistry.closeAsync().get();
}
}
@Test
void testGetAllVariablesDoesNotDeadlock() throws InterruptedException {
final BlockerSync parentSync = new BlockerSync();
final BlockerSync childSync = new BlockerSync();
AtomicReference<BlockerSync> syncRef = new AtomicReference<>();
final MetricRegistry registry =
TestingMetricRegistry.builder()
.setRegisterConsumer(
(metric, metricName, group) -> {
syncRef.get().blockNonInterruptible();
group.getAllVariables();
})
.build();
final MetricGroup parent =
new GenericMetricGroup(
registry,
UnregisteredMetricGroups.createUnregisteredTaskManagerMetricGroup(),
"parent");
final MetricGroup child = parent.addGroup("child");
final Thread parentRegisteringThread = new Thread(() -> parent.counter("parent_counter"));
final Thread childRegisteringThread = new Thread(() -> child.counter("child_counter"));
try {
// start both threads and have them block in the registry, so they acquire the lock of
// their respective group
syncRef.set(childSync);
childRegisteringThread.start();
childSync.awaitBlocker();
syncRef.set(parentSync);
parentRegisteringThread.start();
parentSync.awaitBlocker();
// the parent thread remains blocked to simulate the child thread holding some lock in
// the registry/reporter
// the child thread continues execution and calls getAllVariables()
// in the past this would block indefinitely since the method acquires the locks of all
// parent groups
childSync.releaseBlocker();
// wait with a timeout to ensure the finally block is executed _at some point_,
// un-blocking the parent
childRegisteringThread.join(1000 * 10);
parentSync.releaseBlocker();
parentRegisteringThread.join();
} finally {
parentSync.releaseBlocker();
childSync.releaseBlocker();
parentRegisteringThread.join();
childRegisteringThread.join();
}
}
}
| AbstractMetricGroupTest |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/Aware.java | {
"start": 1383,
"end": 1482
} | interface ____.
*
* @author Chris Beams
* @author Juergen Hoeller
* @since 3.1
*/
public | callbacks |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetFeatureUsageResponse.java | {
"start": 848,
"end": 950
} | class ____ extends ActionResponse implements ToXContentObject {
public static | GetFeatureUsageResponse |
java | reactor__reactor-core | reactor-core/src/test/java/reactor/core/SwapDisposableTest.java | {
"start": 1065,
"end": 5568
} | class ____ {
private Disposables.SwapDisposable sequentialDisposable;
@BeforeEach
public void setUp() {
sequentialDisposable = new Disposables.SwapDisposable();
}
@Test
public void unsubscribingWithoutUnderlyingDoesNothing() {
sequentialDisposable.dispose();
}
@Test
public void getDisposableShouldReturnset() {
final Disposable underlying = mock(Disposable.class);
sequentialDisposable.update(underlying);
assertThat(sequentialDisposable.get()).isSameAs(underlying);
final Disposable another = mock(Disposable.class);
sequentialDisposable.update(another);
assertThat(sequentialDisposable.get()).isSameAs(another);
}
@Test
public void notDisposedWhenReplaced() {
final Disposable underlying = mock(Disposable.class);
sequentialDisposable.update(underlying);
sequentialDisposable.replace(() -> {});
sequentialDisposable.dispose();
verify(underlying, never()).dispose();
}
@Test
public void unsubscribingTwiceDoesUnsubscribeOnce() {
Disposable underlying = mock(Disposable.class);
sequentialDisposable.update(underlying);
sequentialDisposable.dispose();
verify(underlying).dispose();
sequentialDisposable.dispose();
verifyNoMoreInteractions(underlying);
}
@Test
public void settingSameDisposableTwiceDoesUnsubscribeIt() {
Disposable underlying = mock(Disposable.class);
sequentialDisposable.update(underlying);
verifyNoInteractions(underlying);
sequentialDisposable.update(underlying);
verify(underlying).dispose();
}
@Test
public void unsubscribingWithSingleUnderlyingUnsubscribes() {
Disposable underlying = mock(Disposable.class);
sequentialDisposable.update(underlying);
underlying.dispose();
verify(underlying).dispose();
}
@Test
public void replacingFirstUnderlyingCausesUnsubscription() {
Disposable first = mock(Disposable.class);
sequentialDisposable.update(first);
Disposable second = mock(Disposable.class);
sequentialDisposable.update(second);
verify(first).dispose();
}
@Test
public void whenUnsubscribingSecondUnderlyingUnsubscribed() {
Disposable first = mock(Disposable.class);
sequentialDisposable.update(first);
Disposable second = mock(Disposable.class);
sequentialDisposable.update(second);
sequentialDisposable.dispose();
verify(second).dispose();
}
@Test
public void settingUnderlyingWhenUnsubscribedCausesImmediateUnsubscription() {
sequentialDisposable.dispose();
Disposable underlying = mock(Disposable.class);
sequentialDisposable.update(underlying);
verify(underlying).dispose();
}
@Test
@Timeout(1)
public void settingUnderlyingWhenUnsubscribedCausesImmediateUnsubscriptionConcurrently()
throws InterruptedException {
final Disposable firstSet = mock(Disposable.class);
sequentialDisposable.update(firstSet);
final CountDownLatch start = new CountDownLatch(1);
final int count = 10;
final CountDownLatch end = new CountDownLatch(count);
final List<Thread> threads = new ArrayList<>();
for (int i = 0; i < count; i++) {
final Thread t = new Thread(() -> {
try {
start.await();
sequentialDisposable.dispose();
} catch (InterruptedException e) {
fail(e.getMessage());
} finally {
end.countDown();
}
});
t.start();
threads.add(t);
}
final Disposable underlying = mock(Disposable.class);
start.countDown();
sequentialDisposable.update(underlying);
end.await();
verify(firstSet).dispose();
verify(underlying).dispose();
for (final Thread t : threads) {
t.join();
}
}
@Test
public void concurrentSetDisposableShouldNotInterleave()
throws InterruptedException {
final int count = 10;
final List<Disposable> subscriptions = new ArrayList<>();
final CountDownLatch start = new CountDownLatch(1);
final CountDownLatch end = new CountDownLatch(count);
final List<Thread> threads = new ArrayList<>();
for (int i = 0; i < count; i++) {
final Disposable subscription = mock(Disposable.class);
subscriptions.add(subscription);
final Thread t = new Thread(() -> {
try {
start.await();
sequentialDisposable.update(subscription);
} catch (InterruptedException e) {
fail(e.getMessage());
} finally {
end.countDown();
}
});
t.start();
threads.add(t);
}
start.countDown();
end.await();
sequentialDisposable.dispose();
for (final Disposable subscription : subscriptions) {
verify(subscription).dispose();
}
for (final Thread t : threads) {
t.join();
}
}
}
| SwapDisposableTest |
java | elastic__elasticsearch | libs/gpu-codec/src/test/java/org/elasticsearch/gpu/codec/CuVSResourceManagerTests.java | {
"start": 1201,
"end": 8900
} | class ____ extends ESTestCase {
private static final Logger log = LogManager.getLogger(CuVSResourceManagerTests.class);
public static final long TOTAL_DEVICE_MEMORY_IN_BYTES = 256L * 1024 * 1024;
private static void testBasic(CagraIndexParams params) throws InterruptedException {
var mgr = new MockPoolingCuVSResourceManager(2);
var res1 = mgr.acquire(0, 0, CuVSMatrix.DataType.FLOAT, params);
var res2 = mgr.acquire(0, 0, CuVSMatrix.DataType.FLOAT, params);
assertThat(res1.toString(), containsString("id=0"));
assertThat(res2.toString(), containsString("id=1"));
mgr.release(res1);
mgr.release(res2);
res1 = mgr.acquire(0, 0, CuVSMatrix.DataType.FLOAT, params);
res2 = mgr.acquire(0, 0, CuVSMatrix.DataType.FLOAT, params);
assertThat(res1.toString(), containsString("id=0"));
assertThat(res2.toString(), containsString("id=1"));
mgr.release(res1);
mgr.release(res2);
mgr.shutdown();
}
public void testBasicWithNNDescent() throws InterruptedException {
testBasic(createNnDescentParams());
}
public void testBasicWithIvfPq() throws InterruptedException {
testBasic(createIvfPqParams());
}
public void testMultipleAcquireRelease() throws InterruptedException {
var mgr = new MockPoolingCuVSResourceManager(2);
var res1 = mgr.acquire(16 * 1024, 1024, CuVSMatrix.DataType.FLOAT, createNnDescentParams());
var res2 = mgr.acquire(16 * 1024, 1024, CuVSMatrix.DataType.FLOAT, createIvfPqParams());
assertThat(res1.toString(), containsString("id=0"));
assertThat(res2.toString(), containsString("id=1"));
assertThat(mgr.availableMemory(), lessThan(TOTAL_DEVICE_MEMORY_IN_BYTES / 2));
mgr.release(res1);
mgr.release(res2);
assertThat(mgr.availableMemory(), equalTo(TOTAL_DEVICE_MEMORY_IN_BYTES));
res1 = mgr.acquire(16 * 1024, 1024, CuVSMatrix.DataType.FLOAT, createNnDescentParams());
res2 = mgr.acquire(16 * 1024, 1024, CuVSMatrix.DataType.FLOAT, createIvfPqParams());
assertThat(res1.toString(), containsString("id=0"));
assertThat(res2.toString(), containsString("id=1"));
assertThat(mgr.availableMemory(), lessThan(TOTAL_DEVICE_MEMORY_IN_BYTES / 2));
mgr.release(res1);
mgr.release(res2);
assertThat(mgr.availableMemory(), equalTo(TOTAL_DEVICE_MEMORY_IN_BYTES));
mgr.shutdown();
}
private static void testBlocking(CagraIndexParams params) throws Exception {
var mgr = new MockPoolingCuVSResourceManager(2);
var res1 = mgr.acquire(0, 0, CuVSMatrix.DataType.FLOAT, params);
var res2 = mgr.acquire(0, 0, CuVSMatrix.DataType.FLOAT, params);
AtomicReference<CuVSResources> holder = new AtomicReference<>();
Thread t = new Thread(() -> {
try {
var res3 = mgr.acquire(0, 0, CuVSMatrix.DataType.FLOAT, params);
holder.set(res3);
} catch (InterruptedException e) {
throw new AssertionError(e);
}
});
t.start();
Thread.sleep(1_000);
assertNull(holder.get());
mgr.release(randomFrom(res1, res2));
t.join();
assertThat(holder.get().toString(), anyOf(containsString("id=0"), containsString("id=1")));
mgr.shutdown();
}
public void testBlockingWithNNDescent() throws Exception {
testBlocking(createNnDescentParams());
}
public void testBlockingWithIvfPq() throws Exception {
testBlocking(createIvfPqParams());
}
private static void testBlockingOnInsufficientMemory(CagraIndexParams params, CuVSResourceManager mgr) throws Exception {
var res1 = mgr.acquire(16 * 1024, 1024, CuVSMatrix.DataType.FLOAT, params);
AtomicReference<CuVSResources> holder = new AtomicReference<>();
Thread t = new Thread(() -> {
try {
var res2 = mgr.acquire((16 * 1024) + 1, 1024, CuVSMatrix.DataType.FLOAT, params);
holder.set(res2);
} catch (InterruptedException e) {
throw new AssertionError(e);
}
});
t.start();
Thread.sleep(1_000);
assertNull(holder.get());
mgr.release(res1);
t.join();
assertThat(holder.get().toString(), anyOf(containsString("id=0"), containsString("id=1")));
mgr.shutdown();
}
public void testBlockingOnInsufficientMemoryNnDescent() throws Exception {
var mgr = new MockPoolingCuVSResourceManager(2);
testBlockingOnInsufficientMemory(createNnDescentParams(), mgr);
}
public void testBlockingOnInsufficientMemoryIvfPq() throws Exception {
var mgr = new MockPoolingCuVSResourceManager(2, 32L * 1024 * 1024);
testBlockingOnInsufficientMemory(createIvfPqParams(), mgr);
}
private static void testNotBlockingOnSufficientMemory(CagraIndexParams params, CuVSResourceManager mgr) throws Exception {
var res1 = mgr.acquire(16 * 1024, 1024, CuVSMatrix.DataType.FLOAT, params);
AtomicReference<CuVSResources> holder = new AtomicReference<>();
Thread t = new Thread(() -> {
try {
var res2 = mgr.acquire((16 * 1024) - 1000, 1024, CuVSMatrix.DataType.FLOAT, params);
holder.set(res2);
} catch (InterruptedException e) {
throw new AssertionError(e);
}
});
t.start();
t.join(5_000);
assertNotNull(holder.get());
assertThat(holder.get().toString(), not(equalTo(res1.toString())));
mgr.shutdown();
}
public void testNotBlockingOnSufficientMemoryNnDescent() throws Exception {
var mgr = new MockPoolingCuVSResourceManager(2);
testNotBlockingOnSufficientMemory(createNnDescentParams(), mgr);
}
public void testNotBlockingOnSufficientMemoryIvfPq() throws Exception {
var mgr = new MockPoolingCuVSResourceManager(2, 32L * 1024 * 1024);
testNotBlockingOnSufficientMemory(createIvfPqParams(), mgr);
}
public void testManagedResIsNotClosable() throws Exception {
var mgr = new MockPoolingCuVSResourceManager(1);
var res = mgr.acquire(0, 0, CuVSMatrix.DataType.FLOAT, createNnDescentParams());
assertThrows(UnsupportedOperationException.class, res::close);
mgr.release(res);
mgr.shutdown();
}
public void testDoubleRelease() throws InterruptedException {
var mgr = new MockPoolingCuVSResourceManager(2);
var res1 = mgr.acquire(0, 0, CuVSMatrix.DataType.FLOAT, createNnDescentParams());
var res2 = mgr.acquire(0, 0, CuVSMatrix.DataType.FLOAT, createNnDescentParams());
mgr.release(res1);
mgr.release(res2);
assertThrows(AssertionError.class, () -> mgr.release(randomFrom(res1, res2)));
mgr.shutdown();
}
private static CagraIndexParams createNnDescentParams() {
return new CagraIndexParams.Builder().withCagraGraphBuildAlgo(CagraIndexParams.CagraGraphBuildAlgo.NN_DESCENT)
.withNNDescentNumIterations(5)
.build();
}
private static CagraIndexParams createIvfPqParams() {
return new CagraIndexParams.Builder().withCagraGraphBuildAlgo(CagraIndexParams.CagraGraphBuildAlgo.IVF_PQ)
.withCuVSIvfPqParams(
new CuVSIvfPqParams.Builder().withCuVSIvfPqIndexParams(
new CuVSIvfPqIndexParams.Builder().withPqBits(4).withPqDim(1024).build()
).build()
)
.build();
}
static | CuVSResourceManagerTests |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/lucene/util/automaton/MinimizationOperations.java | {
"start": 11263,
"end": 13119
} | class ____ {
final int q;
StateListNode next, prev;
final StateList sl;
StateListNode(int q, StateList sl) {
this.q = q;
this.sl = sl;
if (sl.size++ == 0) sl.first = sl.last = this;
else {
sl.last.next = this;
prev = sl.last;
sl.last = this;
}
}
void remove() {
sl.size--;
if (sl.first == this) sl.first = next;
else prev.next = next;
if (sl.last == this) sl.last = prev;
else next.prev = prev;
}
}
static Automaton totalize(Automaton a) {
Automaton result = new Automaton();
int numStates = a.getNumStates();
for (int i = 0; i < numStates; i++) {
result.createState();
result.setAccept(i, a.isAccept(i));
}
int deadState = result.createState();
result.addTransition(deadState, deadState, Character.MIN_CODE_POINT, Character.MAX_CODE_POINT);
Transition t = new Transition();
for (int i = 0; i < numStates; i++) {
int maxi = Character.MIN_CODE_POINT;
int count = a.initTransition(i, t);
for (int j = 0; j < count; j++) {
a.getNextTransition(t);
result.addTransition(i, t.dest, t.min, t.max);
if (t.min > maxi) {
result.addTransition(i, deadState, maxi, t.min - 1);
}
if (t.max + 1 > maxi) {
maxi = t.max + 1;
}
}
if (maxi <= Character.MAX_CODE_POINT) {
result.addTransition(i, deadState, maxi, Character.MAX_CODE_POINT);
}
}
result.finishState();
return result;
}
}
| StateListNode |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/validation/beanvalidation/SpringValidatorAdapter.java | {
"start": 2083,
"end": 2169
} | interface ____.
*
* <p>Can be used as a programmatic wrapper. Also serves as base | itself |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-server-webflux/src/main/java/org/springframework/cloud/gateway/config/GatewayReactiveLoadBalancerClientAutoConfiguration.java | {
"start": 2167,
"end": 3028
} | class ____ {
@Bean
@ConditionalOnBean(LoadBalancerClientFactory.class)
@ConditionalOnMissingBean(ReactiveLoadBalancerClientFilter.class)
@ConditionalOnEnabledGlobalFilter
public ReactiveLoadBalancerClientFilter gatewayLoadBalancerClientFilter(LoadBalancerClientFactory clientFactory,
GatewayLoadBalancerProperties properties) {
return new ReactiveLoadBalancerClientFilter(clientFactory, properties);
}
@Bean
@ConditionalOnBean({ ReactiveLoadBalancerClientFilter.class, LoadBalancerClientFactory.class })
@ConditionalOnMissingBean
@ConditionalOnEnabledGlobalFilter
public LoadBalancerServiceInstanceCookieFilter loadBalancerServiceInstanceCookieFilter(
LoadBalancerClientFactory loadBalancerClientFactory) {
return new LoadBalancerServiceInstanceCookieFilter(loadBalancerClientFactory);
}
}
| GatewayReactiveLoadBalancerClientAutoConfiguration |
java | junit-team__junit5 | documentation/src/test/java/example/MethodSourceParameterResolutionDemo.java | {
"start": 1441,
"end": 1907
} | class ____ implements ParameterResolver {
@Override
public boolean supportsParameter(ParameterContext parameterContext,
ExtensionContext extensionContext) {
return parameterContext.getParameter().getType() == int.class;
}
@Override
public Object resolveParameter(ParameterContext parameterContext,
ExtensionContext extensionContext) {
return 2;
}
}
// end::parameter_resolution_MethodSource_example[]
// @formatter:on
}
| IntegerResolver |
java | apache__logging-log4j2 | log4j-1.2-api/src/main/java/org/apache/log4j/spi/NOPLoggerRepository.java | {
"start": 1157,
"end": 1211
} | class ____.
*
* @since 1.2.15
*/
public final | reloading |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/doc/XmlParser.java | {
"start": 954,
"end": 1509
} | class ____ implements AutoCloseable {
private InputStream xml;
public XmlParser(InputStream xml) {
this.xml = xml;
}
public XmlNode parse() {
try {
DocumentBuilderFactory dbFactory = DocumentBuilderFactory.newInstance();
DocumentBuilder dBuilder = dbFactory.newDocumentBuilder();
return new XmlNode(dBuilder.parse(this.xml));
}
catch (IOException | ParserConfigurationException | SAXException ex) {
throw new IllegalStateException(ex);
}
}
@Override
public void close() throws IOException {
this.xml.close();
}
}
| XmlParser |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/LockNotBeforeTryTest.java | {
"start": 6133,
"end": 6643
} | class ____ {
private void test(ReentrantLock lock) {
lock.lock();
try {
System.out.println("hi");
} finally {
lock.unlock();
}
}
}
""")
.doTest();
}
@Test
public void twoRegions() {
refactoringHelper
.addInputLines(
"Test.java",
"""
import java.util.concurrent.locks.ReentrantLock;
| Test |
java | redisson__redisson | redisson/src/main/java/org/redisson/RedissonSetMultimap.java | {
"start": 1560,
"end": 14228
} | class ____<K, V> extends RedissonMultimap<K, V> implements RSetMultimap<K, V> {
private static final RedisStrictCommand<Boolean> SCARD_VALUE = new RedisStrictCommand<Boolean>("SCARD", new BooleanAmountReplayConvertor());
private static final RedisCommand<Boolean> SISMEMBER_VALUE = new RedisCommand<Boolean>("SISMEMBER", new BooleanReplayConvertor());
public RedissonSetMultimap(CommandAsyncExecutor connectionManager, String name) {
super(connectionManager, name);
}
public RedissonSetMultimap(Codec codec, CommandAsyncExecutor connectionManager, String name) {
super(codec, connectionManager, name);
}
@Override
public RFuture<Integer> sizeAsync() {
return commandExecutor.evalReadAsync(getRawName(), codec, RedisCommands.EVAL_INTEGER,
"local keys = redis.call('hgetall', KEYS[1]); " +
"local size = 0; " +
"for i, v in ipairs(keys) do " +
"if i % 2 == 0 then " +
"local name = ARGV[1] .. v; " +
"size = size + redis.call('scard', name); " +
"end;" +
"end; " +
"return size; ",
Arrays.<Object>asList(getRawName()),
prefix);
}
@Override
public RFuture<Long> fastRemoveValueAsync(V... values) {
List<Object> args = new ArrayList<>(values.length + 1);
args.add(prefix);
encodeMapValues(args, Arrays.asList(values));
return commandExecutor.evalWriteAsync(getRawName(), codec, RedisCommands.EVAL_INTEGER,
"local keys = redis.call('hgetall', KEYS[1]); " +
"local size = 0; " +
"for i, v in ipairs(keys) do " +
"if i % 2 == 0 then " +
"local name = ARGV[1] .. v; " +
"for j = 2, #ARGV, 1 do " +
"size = size + redis.call('srem', name, ARGV[j]); " +
"end; " +
"if redis.call('exists', name) == 0 then " +
"redis.call('hdel', KEYS[1], keys[i-1]); " +
"end; " +
"end;" +
"end; " +
"return 0; ",
Arrays.asList(getRawName()),
args.toArray());
}
@Override
public RFuture<Boolean> containsKeyAsync(Object key) {
String keyHash = keyHash(key);
String setName = getValuesName(keyHash);
return commandExecutor.readAsync(getRawName(), codec, SCARD_VALUE, setName);
}
@Override
public RFuture<Boolean> containsValueAsync(Object value) {
ByteBuf valueState = encodeMapValue(value);
return commandExecutor.evalReadAsync(getRawName(), codec, RedisCommands.EVAL_BOOLEAN,
"local keys = redis.call('hgetall', KEYS[1]); " +
"for i, v in ipairs(keys) do " +
"if i % 2 == 0 then " +
"local name = ARGV[2] .. v; " +
"if redis.call('sismember', name, ARGV[1]) == 1 then "
+ "return 1; " +
"end;" +
"end;" +
"end; " +
"return 0; ",
Arrays.<Object>asList(getRawName()),
valueState, prefix);
}
@Override
public RFuture<Boolean> containsEntryAsync(Object key, Object value) {
String keyHash = keyHash(key);
ByteBuf valueState = encodeMapValue(value);
String setName = getValuesName(keyHash);
return commandExecutor.readAsync(getRawName(), codec, SISMEMBER_VALUE, setName, valueState);
}
@Override
public RFuture<Boolean> putAsync(K key, V value) {
ByteBuf keyState = encodeMapKey(key);
String keyHash = hash(keyState);
ByteBuf valueState = encodeMapValue(value);
String setName = getValuesName(keyHash);
return commandExecutor.evalWriteAsync(getRawName(), codec, RedisCommands.EVAL_BOOLEAN,
"redis.call('hsetnx', KEYS[1], ARGV[1], ARGV[2]); " +
"return redis.call('sadd', KEYS[2], ARGV[3]); ",
Arrays.<Object>asList(getRawName(), setName), keyState, keyHash, valueState);
}
@Override
public RFuture<Boolean> removeAsync(Object key, Object value) {
ByteBuf keyState = encodeMapKey(key);
String keyHash = hash(keyState);
ByteBuf valueState = encodeMapValue(value);
String setName = getValuesName(keyHash);
return commandExecutor.evalWriteAsync(getRawName(), codec, RedisCommands.EVAL_BOOLEAN,
"local res = redis.call('srem', KEYS[2], ARGV[2]); "
+ "if res == 1 and redis.call('scard', KEYS[2]) == 0 then "
+ "redis.call('hdel', KEYS[1], ARGV[1]); "
+ "end; "
+ "return res; ",
Arrays.<Object>asList(getRawName(), setName), keyState, valueState);
}
@Override
public RFuture<Boolean> putAllAsync(K key, Iterable<? extends V> values) {
List<Object> params = new ArrayList<Object>();
ByteBuf keyState = encodeMapKey(key);
params.add(keyState);
String keyHash = hash(keyState);
params.add(keyHash);
for (Object value : values) {
ByteBuf valueState = encodeMapValue(value);
params.add(valueState);
}
String setName = getValuesName(keyHash);
return commandExecutor.evalWriteAsync(getRawName(), codec, RedisCommands.EVAL_BOOLEAN_AMOUNT,
"redis.call('hset', KEYS[1], ARGV[1], ARGV[2]); " +
"local n = 0; " +
"for i=3, #ARGV, 5000 do " +
"n = n + redis.call('sadd', KEYS[2], unpack(ARGV, i, math.min(i+4999, table.getn(ARGV)))) " +
"end; " +
"return n; ",
Arrays.<Object>asList(getRawName(), setName), params.toArray());
}
@Override
public RSet<V> get(final K key) {
String keyHash = keyHash(key);
final String setName = getValuesName(keyHash);
return new InnerSet(setName, key);
}
@Override
public Set<V> getAll(K key) {
return (Set<V>) super.getAll(key);
}
@Override
public RFuture<Collection<V>> getAllAsync(K key) {
String keyHash = keyHash(key);
String setName = getValuesName(keyHash);
return commandExecutor.readAsync(getRawName(), codec, RedisCommands.SMEMBERS, setName);
}
@Override
public Set<V> removeAll(Object key) {
return (Set<V>) get(removeAllAsync(key));
}
@Override
public RFuture<Collection<V>> removeAllAsync(Object key) {
ByteBuf keyState = encodeMapKey(key);
String keyHash = hash(keyState);
String setName = getValuesName(keyHash);
return commandExecutor.evalWriteAsync(getRawName(), codec, RedisCommands.EVAL_SET,
"redis.call('hdel', KEYS[1], ARGV[1]); " +
"local members = redis.call('smembers', KEYS[2]); " +
"redis.call('del', KEYS[2]); " +
"return members; ",
Arrays.<Object>asList(getRawName(), setName), keyState);
}
@Override
public Set<Entry<K, V>> entries() {
return (Set<Entry<K, V>>) super.entries();
}
@Override
public Set<V> replaceValues(K key, Iterable<? extends V> values) {
return (Set<V>) get(replaceValuesAsync(key, values));
}
@Override
public void fastReplaceValues(final K key, final Iterable<? extends V> values) {
get(fastReplaceValuesAsync(key, values));
}
@Override
Iterator<V> valuesIterator() {
return new RedissonSetMultimapIterator<K, V, V>(RedissonSetMultimap.this, commandExecutor, codec) {
@Override
V getValue(V entry) {
return (V) entry;
}
};
}
@Override
RedissonSetMultimapIterator<K, V, Entry<K, V>> entryIterator() {
return new RedissonSetMultimapIterator<>(RedissonSetMultimap.this, commandExecutor, codec);
}
@Override
public RFuture<Collection<V>> replaceValuesAsync(K key, Iterable<? extends V> values) {
List<Object> params = new ArrayList<Object>();
ByteBuf keyState = encodeMapKey(key);
params.add(keyState);
String keyHash = hash(keyState);
params.add(keyHash);
for (Object value : values) {
ByteBuf valueState = encodeMapValue(value);
params.add(valueState);
}
String setName = getValuesName(keyHash);
return commandExecutor.evalWriteAsync(getRawName(), codec, RedisCommands.EVAL_SET,
"redis.call('hset', KEYS[1], ARGV[1], ARGV[2]); " +
"local members = redis.call('smembers', KEYS[2]); " +
"redis.call('del', KEYS[2]); " +
"if #ARGV > 2 then " +
"for i=3, #ARGV, 5000 do "
+ "redis.call('sadd', KEYS[2], unpack(ARGV, i, math.min(i+4999, table.getn(ARGV)))) "
+ "end; " +
"end; " +
"return members; ",
Arrays.<Object>asList(getRawName(), setName), params.toArray());
}
@Override
public RFuture<Void> fastReplaceValuesAsync(K key, Iterable<? extends V> values) {
List<Object> params = new ArrayList<Object>();
ByteBuf keyState = encodeMapKey(key);
params.add(keyState);
String keyHash = hash(keyState);
params.add(keyHash);
for (Object value : values) {
ByteBuf valueState = encodeMapValue(value);
params.add(valueState);
}
String setName = getValuesName(keyHash);
return commandExecutor.evalWriteAsync(getRawName(), codec, RedisCommands.EVAL_VOID,
"redis.call('hset', KEYS[1], ARGV[1], ARGV[2]); " +
"redis.call('del', KEYS[2]); " +
"if #ARGV > 2 then " +
"for i=3, #ARGV, 5000 do "
+ "redis.call('sadd', KEYS[2], unpack(ARGV, i, math.min(i+4999, table.getn(ARGV)))) "
+ "end; " +
"end; ",
Arrays.<Object>asList(getRawName(), setName), params.toArray());
}
@Override
protected <T extends ObjectListener> int addListener(String name, T listener, BiConsumer<T, String> consumer) {
if (listener instanceof SetAddListener
|| listener instanceof SetRemoveListener) {
String prefix = getValuesName("");
return addListener(name, listener, consumer, m -> m.startsWith(prefix));
}
return super.addListener(name, listener, consumer);
}
@Override
protected <T extends ObjectListener> RFuture<Integer> addListenerAsync(String name, T listener, BiConsumer<T, String> consumer) {
if (listener instanceof SetAddListener
|| listener instanceof SetRemoveListener) {
String prefix = getValuesName("");
return addListenerAsync(name, listener, consumer, m -> m.startsWith(prefix));
}
return super.addListenerAsync(name, listener, consumer);
}
@Override
public int addListener(ObjectListener listener) {
if (listener instanceof SetAddListener) {
return addListener("__keyevent@*:sadd", (SetAddListener) listener, SetAddListener::onAdd);
}
if (listener instanceof SetRemoveListener) {
return addListener("__keyevent@*:srem", (SetRemoveListener) listener, SetRemoveListener::onRemove);
}
return super.addListener(listener);
}
@Override
public RFuture<Integer> addListenerAsync(ObjectListener listener) {
if (listener instanceof SetAddListener) {
return addListenerAsync("__keyevent@*:sadd", (SetAddListener) listener, SetAddListener::onAdd);
}
if (listener instanceof SetRemoveListener) {
return addListenerAsync("__keyevent@*:srem", (SetRemoveListener) listener, SetRemoveListener::onRemove);
}
return super.addListenerAsync(listener);
}
@Override
public void removeListener(int listenerId) {
removeListener(listenerId, "__keyevent@*:sadd", "__keyevent@*:srem");
super.removeListener(listenerId);
}
@Override
public RFuture<Void> removeListenerAsync(int listenerId) {
return removeListenerAsync(listenerId, "__keyevent@*:sadd", "__keyevent@*:srem");
}
protected | RedissonSetMultimap |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritable.java | {
"start": 2565,
"end": 4188
} | class ____ extends SimpleWritable
implements WritableComparable<SimpleWritableComparable>, Configurable {
private Configuration conf;
public SimpleWritableComparable() {}
public void setConf(Configuration conf) {
this.conf = conf;
}
public Configuration getConf() {
return this.conf;
}
public int compareTo(SimpleWritableComparable o) {
return this.state - o.state;
}
}
/** Test 1: Check that SimpleWritable. */
@Test
public void testSimpleWritable() throws Exception {
testWritable(new SimpleWritable());
}
@Test
public void testByteWritable() throws Exception {
testWritable(new ByteWritable((byte)128));
}
@Test
public void testShortWritable() throws Exception {
testWritable(new ShortWritable((byte)256));
}
@Test
public void testDoubleWritable() throws Exception {
testWritable(new DoubleWritable(1.0));
}
/** Utility method for testing writables. */
public static Writable testWritable(Writable before)
throws Exception {
return testWritable(before, null);
}
/** Utility method for testing writables. */
public static Writable testWritable(Writable before
, Configuration conf) throws Exception {
DataOutputBuffer dob = new DataOutputBuffer();
before.write(dob);
DataInputBuffer dib = new DataInputBuffer();
dib.reset(dob.getData(), dob.getLength());
Writable after = (Writable)ReflectionUtils.newInstance(
before.getClass(), conf);
after.readFields(dib);
assertEquals(before, after);
return after;
}
private static | SimpleWritableComparable |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.