language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
spring-projects__spring-security
|
webauthn/src/main/java/org/springframework/security/web/webauthn/jackson/DurationJackson2Serializer.java
|
{
"start": 1198,
"end": 1532
}
|
class ____ extends StdSerializer<Duration> {
/**
* Creates an instance.
*/
DurationJackson2Serializer() {
super(Duration.class);
}
@Override
public void serialize(Duration duration, JsonGenerator jgen, SerializerProvider provider) throws IOException {
jgen.writeNumber(duration.toMillis());
}
}
|
DurationJackson2Serializer
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/CyberArkVaultEndpointBuilderFactory.java
|
{
"start": 1574,
"end": 8868
}
|
interface ____
extends
EndpointProducerBuilder {
default AdvancedCyberArkVaultEndpointBuilder advanced() {
return (AdvancedCyberArkVaultEndpointBuilder) this;
}
/**
* The CyberArk Conjur account name.
*
* The option is a: <code>java.lang.String</code> type.
*
* Required: true
* Group: producer
*
* @param account the value to set
* @return the dsl builder
*/
default CyberArkVaultEndpointBuilder account(String account) {
doSetProperty("account", account);
return this;
}
/**
* Path to the SSL certificate for verification.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param certificatePath the value to set
* @return the dsl builder
*/
default CyberArkVaultEndpointBuilder certificatePath(String certificatePath) {
doSetProperty("certificatePath", certificatePath);
return this;
}
/**
* Reference to a ConjurClient instance in the registry.
*
* The option is a:
* <code>org.apache.camel.component.cyberark.vault.client.ConjurClient</code> type.
*
* Group: producer
*
* @param conjurClient the value to set
* @return the dsl builder
*/
default CyberArkVaultEndpointBuilder conjurClient(org.apache.camel.component.cyberark.vault.client.ConjurClient conjurClient) {
doSetProperty("conjurClient", conjurClient);
return this;
}
/**
* Reference to a ConjurClient instance in the registry.
*
* The option will be converted to a
* <code>org.apache.camel.component.cyberark.vault.client.ConjurClient</code> type.
*
* Group: producer
*
* @param conjurClient the value to set
* @return the dsl builder
*/
default CyberArkVaultEndpointBuilder conjurClient(String conjurClient) {
doSetProperty("conjurClient", conjurClient);
return this;
}
/**
* The operation to perform. It can be getSecret or createSecret.
*
* The option is a:
* <code>org.apache.camel.component.cyberark.vault.CyberArkVaultOperations</code> type.
*
* Default: getSecret
* Group: producer
*
* @param operation the value to set
* @return the dsl builder
*/
default CyberArkVaultEndpointBuilder operation(org.apache.camel.component.cyberark.vault.CyberArkVaultOperations operation) {
doSetProperty("operation", operation);
return this;
}
/**
* The operation to perform. It can be getSecret or createSecret.
*
* The option will be converted to a
* <code>org.apache.camel.component.cyberark.vault.CyberArkVaultOperations</code> type.
*
* Default: getSecret
* Group: producer
*
* @param operation the value to set
* @return the dsl builder
*/
default CyberArkVaultEndpointBuilder operation(String operation) {
doSetProperty("operation", operation);
return this;
}
/**
* The secret ID to retrieve from CyberArk Conjur.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param secretId the value to set
* @return the dsl builder
*/
default CyberArkVaultEndpointBuilder secretId(String secretId) {
doSetProperty("secretId", secretId);
return this;
}
/**
* The CyberArk Conjur instance URL.
*
* The option is a: <code>java.lang.String</code> type.
*
* Required: true
* Group: producer
*
* @param url the value to set
* @return the dsl builder
*/
default CyberArkVaultEndpointBuilder url(String url) {
doSetProperty("url", url);
return this;
}
/**
* Whether to verify SSL certificates when connecting to CyberArk
* Conjur.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: producer
*
* @param verifySsl the value to set
* @return the dsl builder
*/
default CyberArkVaultEndpointBuilder verifySsl(boolean verifySsl) {
doSetProperty("verifySsl", verifySsl);
return this;
}
/**
* Whether to verify SSL certificates when connecting to CyberArk
* Conjur.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: producer
*
* @param verifySsl the value to set
* @return the dsl builder
*/
default CyberArkVaultEndpointBuilder verifySsl(String verifySsl) {
doSetProperty("verifySsl", verifySsl);
return this;
}
/**
* The API key for authentication with CyberArk Conjur.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param apiKey the value to set
* @return the dsl builder
*/
default CyberArkVaultEndpointBuilder apiKey(String apiKey) {
doSetProperty("apiKey", apiKey);
return this;
}
/**
* Pre-authenticated token to use for CyberArk Conjur.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param authToken the value to set
* @return the dsl builder
*/
default CyberArkVaultEndpointBuilder authToken(String authToken) {
doSetProperty("authToken", authToken);
return this;
}
/**
* The password for authentication with CyberArk Conjur.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param password the value to set
* @return the dsl builder
*/
default CyberArkVaultEndpointBuilder password(String password) {
doSetProperty("password", password);
return this;
}
/**
* The username for authentication with CyberArk Conjur.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param username the value to set
* @return the dsl builder
*/
default CyberArkVaultEndpointBuilder username(String username) {
doSetProperty("username", username);
return this;
}
}
/**
* Advanced builder for endpoint for the CyberArk Vault component.
*/
public
|
CyberArkVaultEndpointBuilder
|
java
|
apache__spark
|
common/unsafe/src/main/java/org/apache/spark/unsafe/memory/HeapMemoryAllocator.java
|
{
"start": 1159,
"end": 4346
}
|
class ____ implements MemoryAllocator {
@GuardedBy("this")
private final Map<Long, LinkedList<WeakReference<long[]>>> bufferPoolsBySize = new HashMap<>();
private static final int POOLING_THRESHOLD_BYTES = 1024 * 1024;
/**
* Returns true if allocations of the given size should go through the pooling mechanism and
* false otherwise.
*/
private boolean shouldPool(long size) {
// Very small allocations are less likely to benefit from pooling.
return size >= POOLING_THRESHOLD_BYTES;
}
@Override
public MemoryBlock allocate(long size) throws OutOfMemoryError {
int numWords = (int) ((size + 7) / 8);
long alignedSize = numWords * 8L;
assert (alignedSize >= size);
if (shouldPool(alignedSize)) {
synchronized (this) {
final LinkedList<WeakReference<long[]>> pool = bufferPoolsBySize.get(alignedSize);
if (pool != null) {
while (!pool.isEmpty()) {
final WeakReference<long[]> arrayReference = pool.pop();
final long[] array = arrayReference.get();
if (array != null) {
assert (array.length * 8L >= size);
MemoryBlock memory = new MemoryBlock(array, Platform.LONG_ARRAY_OFFSET, size);
if (MemoryAllocator.MEMORY_DEBUG_FILL_ENABLED) {
memory.fill(MemoryAllocator.MEMORY_DEBUG_FILL_CLEAN_VALUE);
}
return memory;
}
}
bufferPoolsBySize.remove(alignedSize);
}
}
}
long[] array = new long[numWords];
MemoryBlock memory = new MemoryBlock(array, Platform.LONG_ARRAY_OFFSET, size);
if (MemoryAllocator.MEMORY_DEBUG_FILL_ENABLED) {
memory.fill(MemoryAllocator.MEMORY_DEBUG_FILL_CLEAN_VALUE);
}
return memory;
}
@Override
public void free(MemoryBlock memory) {
assert (memory.obj != null) :
"baseObject was null; are you trying to use the on-heap allocator to free off-heap memory?";
assert (memory.pageNumber != MemoryBlock.FREED_IN_ALLOCATOR_PAGE_NUMBER) :
"page has already been freed";
assert ((memory.pageNumber == MemoryBlock.NO_PAGE_NUMBER)
|| (memory.pageNumber == MemoryBlock.FREED_IN_TMM_PAGE_NUMBER)) :
"TMM-allocated pages must first be freed via TMM.freePage(), not directly in allocator " +
"free()";
final long size = memory.size();
if (MemoryAllocator.MEMORY_DEBUG_FILL_ENABLED) {
memory.fill(MemoryAllocator.MEMORY_DEBUG_FILL_FREED_VALUE);
}
// Mark the page as freed (so we can detect double-frees).
memory.pageNumber = MemoryBlock.FREED_IN_ALLOCATOR_PAGE_NUMBER;
// As an additional layer of defense against use-after-free bugs, we mutate the
// MemoryBlock to null out its reference to the long[] array.
long[] array = (long[]) memory.obj;
memory.setObjAndOffset(null, 0);
long alignedSize = ((size + 7) / 8) * 8;
if (shouldPool(alignedSize)) {
synchronized (this) {
LinkedList<WeakReference<long[]>> pool =
bufferPoolsBySize.computeIfAbsent(alignedSize, k -> new LinkedList<>());
pool.add(new WeakReference<>(array));
}
}
}
}
|
HeapMemoryAllocator
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/custom/response/BaseCustomResponseParserTests.java
|
{
"start": 1222,
"end": 4801
}
|
class ____ extends ESTestCase {
public void testValidateNonNull_ThrowsException_WhenPassedNull() {
var exception = expectThrows(NullPointerException.class, () -> BaseCustomResponseParser.validateNonNull(null, "field"));
assertThat(exception.getMessage(), is("Failed to parse field [field], extracted field was null"));
}
public void testValidateList_ThrowsException_WhenPassedAnObjectThatIsNotAList() {
var exception = expectThrows(IllegalArgumentException.class, () -> validateList(new Object(), "field"));
assertThat(exception.getMessage(), is("Extracted field [field] is an invalid type, expected a list but received [Object]"));
}
public void testValidateList_ReturnsList() {
Object obj = List.of("abc", "123");
assertThat(validateList(obj, "field"), is(List.of("abc", "123")));
}
public void testConvertToListOfFloats_ThrowsException_WhenAnItemInTheListIsNotANumber() {
var list = List.of(1, "hello");
var exception = expectThrows(IllegalStateException.class, () -> convertToListOfFloats(list, "field"));
assertThat(
exception.getMessage(),
is("Failed to parse list entry [1], error: Unable to convert field [field] of type [String] to Number")
);
}
public void testConvertToListOfFloats_ReturnsList() {
var list = List.of(1, 1.1f, -2.0d, new AtomicInteger(1));
assertThat(convertToListOfFloats(list, "field"), is(List.of(1f, 1.1f, -2f, 1f)));
}
public void testCastList() {
var list = List.of("abc", "123", 1, 2.2d);
assertThat(castList(list, (obj, fieldName) -> obj.toString(), "field"), is(List.of("abc", "123", "1", "2.2")));
}
public void testCastList_ThrowsException() {
var list = List.of("abc");
var exception = expectThrows(IllegalStateException.class, () -> castList(list, (obj, fieldName) -> {
throw new IllegalArgumentException("failed");
}, "field"));
assertThat(exception.getMessage(), is("Failed to parse list entry [0], error: failed"));
}
public void testValidateMap() {
assertThat(validateMap(Map.of("abc", 123), "field"), is(Map.of("abc", 123)));
}
public void testValidateMap_ThrowsException_WhenObjectIsNotAMap() {
var exception = expectThrows(IllegalArgumentException.class, () -> validateMap("hello", "field"));
assertThat(exception.getMessage(), is("Extracted field [field] is an invalid type, expected a map but received [String]"));
}
public void testValidateMap_ThrowsException_WhenKeysAreNotStrings() {
var exception = expectThrows(IllegalStateException.class, () -> validateMap(Map.of("key", "value", 1, "abc"), "field"));
assertThat(
exception.getMessage(),
is("Extracted field [field] map has an invalid key type. Expected a string but received [Integer]")
);
}
public void testToFloat() {
assertThat(toFloat(1, "field"), is(1f));
}
public void testToFloat_AtomicLong() {
assertThat(toFloat(new AtomicLong(100), "field"), is(100f));
}
public void testToFloat_Double() {
assertThat(toFloat(1.123d, "field"), is(1.123f));
}
public void testToType() {
Object obj = "hello";
assertThat(toType(obj, String.class, "field"), is("hello"));
}
public void testToType_List() {
Object obj = List.of(123, 456);
assertThat(toType(obj, List.class, "field"), is(List.of(123, 456)));
}
}
|
BaseCustomResponseParserTests
|
java
|
apache__camel
|
components/camel-google/camel-google-bigquery/src/main/java/org/apache/camel/component/google/bigquery/GoogleBigQueryConstants.java
|
{
"start": 908,
"end": 3251
}
|
class ____ {
// All the schemes
public static final String SCHEME_BIGQUERY_SQL = "google-bigquery-sql";
public static final String SCHEME_BIGQUERY = "google-bigquery";
@Metadata(description = "Table suffix to use when inserting data", javaType = "String", applicableFor = SCHEME_BIGQUERY)
public static final String TABLE_SUFFIX = "CamelGoogleBigQueryTableSuffix";
@Metadata(description = "Table id where data will be submitted. If specified will override endpoint configuration",
javaType = "String", applicableFor = SCHEME_BIGQUERY)
public static final String TABLE_ID = "CamelGoogleBigQueryTableId";
@Metadata(description = "InsertId to use when inserting data", javaType = "String", applicableFor = SCHEME_BIGQUERY)
public static final String INSERT_ID = "CamelGoogleBigQueryInsertId";
@Metadata(description = "Partition decorator to indicate partition to use when inserting data", javaType = "String",
applicableFor = SCHEME_BIGQUERY)
public static final String PARTITION_DECORATOR = "CamelGoogleBigQueryPartitionDecorator";
@Metadata(description = "Preprocessed query text", javaType = "String", applicableFor = SCHEME_BIGQUERY_SQL)
public static final String TRANSLATED_QUERY = "CamelGoogleBigQueryTranslatedQuery";
@Metadata(description = "A custom `JobId` to use", javaType = "com.google.cloud.bigquery.JobId",
applicableFor = SCHEME_BIGQUERY_SQL)
public static final String JOB_ID = "CamelGoogleBigQueryJobId";
@Metadata(description = "The page token to retrieve a specific page of results from BigQuery. " +
"If not set, the first page is returned",
javaType = "String", applicableFor = SCHEME_BIGQUERY_SQL)
public static final String PAGE_TOKEN = "CamelGoogleBigQueryPageToken";
@Metadata(description = "The next page token returned by BigQuery. Use this token in the " +
"`CamelGoogleBigQueryPageToken` header of a subsequent request to retrieve the next page of results",
javaType = "String", applicableFor = SCHEME_BIGQUERY_SQL)
public static final String NEXT_PAGE_TOKEN = "CamelGoogleBigQueryNextPageToken";
/**
* Prevent instantiation.
*/
private GoogleBigQueryConstants() {
}
}
|
GoogleBigQueryConstants
|
java
|
apache__avro
|
lang/java/avro/src/main/java/org/apache/avro/file/DataFileStream.java
|
{
"start": 1937,
"end": 10655
}
|
class ____ {
Schema schema;
Map<String, byte[]> meta = new HashMap<>();
private transient List<String> metaKeyList = new ArrayList<>();
byte[] sync = new byte[DataFileConstants.SYNC_SIZE];
private Header() {
}
}
private final DatumReader<D> reader;
private long blockSize;
private boolean availableBlock = false;
private Header header;
/** Decoder on raw input stream. (Used for metadata.) */
BinaryDecoder vin;
/**
* Secondary decoder, for datums. (Different than vin for block segments.)
*/
BinaryDecoder datumIn = null;
ByteBuffer blockBuffer;
long blockCount; // # entries in block
long blockRemaining; // # entries remaining in block
byte[] syncBuffer = new byte[DataFileConstants.SYNC_SIZE];
private Codec codec;
/**
* Construct a reader for an input stream. For file-based input, use
* {@link DataFileReader}. This will buffer, wrapping with a
* {@link java.io.BufferedInputStream} is not necessary.
*/
public DataFileStream(InputStream in, DatumReader<D> reader) throws IOException {
this.reader = reader;
initialize(in, null);
}
/**
* create an uninitialized DataFileStream
*/
protected DataFileStream(DatumReader<D> reader) {
this.reader = reader;
}
byte[] readMagic() throws IOException {
if (this.vin == null) {
throw new IOException("InputStream is not initialized");
}
byte[] magic = new byte[DataFileConstants.MAGIC.length];
try {
vin.readFixed(magic); // read magic
} catch (IOException e) {
throw new IOException("Not an Avro data file.", e);
}
return magic;
}
void validateMagic(byte[] magic) throws InvalidAvroMagicException {
if (!Arrays.equals(DataFileConstants.MAGIC, magic))
throw new InvalidAvroMagicException("Not an Avro data file.");
}
/** Initialize the stream by reading from its head. */
void initialize(InputStream in, byte[] magic) throws IOException {
this.header = new Header();
this.vin = DecoderFactory.get().binaryDecoder(in, vin);
magic = (magic == null) ? readMagic() : magic;
validateMagic(magic);
long l = vin.readMapStart(); // read meta data
if (l > 0) {
do {
for (long i = 0; i < l; i++) {
String key = vin.readString(null).toString();
ByteBuffer value = vin.readBytes(null);
byte[] bb = new byte[value.remaining()];
value.get(bb);
header.meta.put(key, bb);
header.metaKeyList.add(key);
}
} while ((l = vin.mapNext()) != 0);
}
vin.readFixed(header.sync); // read sync
// finalize the header
header.metaKeyList = Collections.unmodifiableList(header.metaKeyList);
header.schema = new Schema.Parser(NameValidator.NO_VALIDATION).setValidateDefaults(false)
.parse(getMetaString(DataFileConstants.SCHEMA));
this.codec = resolveCodec();
reader.setSchema(header.schema);
}
/** Initialize the stream without reading from it. */
void initialize(Header header) {
this.header = header;
this.codec = resolveCodec();
reader.setSchema(header.schema);
}
Codec resolveCodec() {
String codecStr = getMetaString(DataFileConstants.CODEC);
if (codecStr != null) {
return CodecFactory.fromString(codecStr).createInstance();
} else {
return CodecFactory.nullCodec().createInstance();
}
}
/**
* A handle that can be used to reopen this stream without rereading the head.
*/
public Header getHeader() {
return header;
}
/** Return the schema used in this file. */
public Schema getSchema() {
return header.schema;
}
/** Return the list of keys in the metadata */
public List<String> getMetaKeys() {
return header.metaKeyList;
}
/** Return the value of a metadata property. */
public byte[] getMeta(String key) {
return header.meta.get(key);
}
/** Return the value of a metadata property. */
public String getMetaString(String key) {
byte[] value = getMeta(key);
if (value == null) {
return null;
}
return new String(value, StandardCharsets.UTF_8);
}
/** Return the value of a metadata property. */
public long getMetaLong(String key) {
return Long.parseLong(getMetaString(key));
}
/**
* Returns an iterator over entries in this file. Note that this iterator is
* shared with other users of the file: it does not contain a separate pointer
* into the file.
*/
@Override
public Iterator<D> iterator() {
return this;
}
private DataBlock block = null;
/** True if more entries remain in this file. */
@Override
public boolean hasNext() {
try {
if (blockRemaining == 0) {
// check that the previous block was finished
if (null != datumIn) {
boolean atEnd = datumIn.isEnd();
if (!atEnd) {
throw new IOException("Block read partially, the data may be corrupt");
}
}
if (hasNextBlock()) {
block = nextRawBlock(block);
block.decompressUsing(codec);
blockBuffer = block.getAsByteBuffer();
datumIn = DecoderFactory.get().binaryDecoder(blockBuffer.array(),
blockBuffer.arrayOffset() + blockBuffer.position(), blockBuffer.remaining(), datumIn);
}
}
return blockRemaining != 0;
} catch (EOFException e) { // at EOF
return false;
} catch (IOException e) {
throw new AvroRuntimeException(e);
}
}
/**
* Read the next datum in the file.
*
* @throws NoSuchElementException if no more remain in the file.
*/
@Override
public D next() {
try {
return next(null);
} catch (IOException e) {
throw new AvroRuntimeException(e);
}
}
/**
* Read the next datum from the file.
*
* @param reuse an instance to reuse.
* @throws NoSuchElementException if no more remain in the file.
*/
public D next(D reuse) throws IOException {
if (!hasNext())
throw new NoSuchElementException();
D result = reader.read(reuse, datumIn);
if (0 == --blockRemaining) {
blockFinished();
}
return result;
}
/** Expert: Return the next block in the file, as binary-encoded data. */
public ByteBuffer nextBlock() throws IOException {
if (!hasNext())
throw new NoSuchElementException();
if (blockRemaining != blockCount)
throw new IllegalStateException("Not at block start.");
blockRemaining = 0;
blockFinished();
datumIn = null;
return blockBuffer;
}
/** Expert: Return the count of items in the current block. */
public long getBlockCount() {
return blockCount;
}
/** Expert: Return the size in bytes (uncompressed) of the current block. */
public long getBlockSize() {
return blockSize;
}
protected void blockFinished() throws IOException {
// nothing for the stream impl
}
boolean hasNextBlock() {
try {
if (availableBlock)
return true;
if (vin.isEnd())
return false;
blockRemaining = vin.readLong(); // read block count
blockSize = vin.readLong(); // read block size
if (blockSize > Integer.MAX_VALUE || blockSize < 0) {
throw new IOException("Block size invalid or too large for this implementation: " + blockSize);
}
blockCount = blockRemaining;
availableBlock = true;
return true;
} catch (EOFException eof) {
return false;
} catch (IOException e) {
throw new AvroRuntimeException(e);
}
}
DataBlock nextRawBlock(DataBlock reuse) throws IOException {
if (!hasNextBlock()) {
throw new NoSuchElementException();
}
if (reuse == null || reuse.data.length < (int) blockSize) {
reuse = new DataBlock(blockRemaining, (int) blockSize);
} else {
reuse.numEntries = blockRemaining;
reuse.blockSize = (int) blockSize;
}
// throws if it can't read the size requested
vin.readFixed(reuse.data, 0, reuse.blockSize);
vin.readFixed(syncBuffer);
availableBlock = false;
if (!Arrays.equals(syncBuffer, header.sync))
throw new IOException("Invalid sync marker! The sync marker in the data block doesn't match the "
+ "file header's sync marker. This likely indicates data corruption, truncated file, "
+ "or incorrectly concatenated Avro files. Verify file integrity and ensure proper "
+ "file transmission or creation.");
return reuse;
}
/** Not supported. */
@Override
public void remove() {
throw new UnsupportedOperationException();
}
/** Close this reader. */
@Override
public void close() throws IOException {
vin.inputStream().close();
}
static
|
Header
|
java
|
alibaba__nacos
|
plugin-default-impl/nacos-default-auth-plugin/src/main/java/com/alibaba/nacos/plugin/auth/impl/persistence/PermissionInfo.java
|
{
"start": 783,
"end": 1563
}
|
class ____ implements Serializable {
private static final long serialVersionUID = 388813573388837395L;
/**
* Role name.
*/
private String role;
/**
* Resource.
*/
private String resource;
/**
* Action on resource.
*/
private String action;
public String getRole() {
return role;
}
public void setRole(String role) {
this.role = role;
}
public String getResource() {
return resource;
}
public void setResource(String resource) {
this.resource = resource;
}
public String getAction() {
return action;
}
public void setAction(String action) {
this.action = action;
}
}
|
PermissionInfo
|
java
|
apache__rocketmq
|
remoting/src/main/java/org/apache/rocketmq/remoting/protocol/statictopic/TopicQueueMappingUtils.java
|
{
"start": 1380,
"end": 32932
}
|
class ____ {
Map<String, Integer> brokerNumMap = new HashMap<>();
Map<Integer, String> idToBroker = new HashMap<>();
//used for remapping
Map<String, Integer> brokerNumMapBeforeRemapping;
int currentIndex = 0;
List<String> leastBrokers = new ArrayList<>();
private MappingAllocator(Map<Integer, String> idToBroker, Map<String, Integer> brokerNumMap, Map<String, Integer> brokerNumMapBeforeRemapping) {
this.idToBroker.putAll(idToBroker);
this.brokerNumMap.putAll(brokerNumMap);
this.brokerNumMapBeforeRemapping = brokerNumMapBeforeRemapping;
}
private void freshState() {
int minNum = Integer.MAX_VALUE;
for (Map.Entry<String, Integer> entry : brokerNumMap.entrySet()) {
if (entry.getValue() < minNum) {
leastBrokers.clear();
leastBrokers.add(entry.getKey());
minNum = entry.getValue();
} else if (entry.getValue() == minNum) {
leastBrokers.add(entry.getKey());
}
}
//reduce the remapping
if (brokerNumMapBeforeRemapping != null
&& !brokerNumMapBeforeRemapping.isEmpty()) {
leastBrokers.sort((o1, o2) -> {
int i1 = 0, i2 = 0;
if (brokerNumMapBeforeRemapping.containsKey(o1)) {
i1 = brokerNumMapBeforeRemapping.get(o1);
}
if (brokerNumMapBeforeRemapping.containsKey(o2)) {
i2 = brokerNumMapBeforeRemapping.get(o2);
}
return i1 - i2;
});
} else {
//reduce the imbalance
Collections.shuffle(leastBrokers);
}
currentIndex = leastBrokers.size() - 1;
}
private String nextBroker() {
if (leastBrokers.isEmpty()) {
freshState();
}
int tmpIndex = currentIndex % leastBrokers.size();
return leastBrokers.remove(tmpIndex);
}
public Map<String, Integer> getBrokerNumMap() {
return brokerNumMap;
}
public void upToNum(int maxQueueNum) {
int currSize = idToBroker.size();
if (maxQueueNum <= currSize) {
return;
}
for (int i = currSize; i < maxQueueNum; i++) {
String nextBroker = nextBroker();
if (brokerNumMap.containsKey(nextBroker)) {
brokerNumMap.put(nextBroker, brokerNumMap.get(nextBroker) + 1);
} else {
brokerNumMap.put(nextBroker, 1);
}
idToBroker.put(i, nextBroker);
}
}
public Map<Integer, String> getIdToBroker() {
return idToBroker;
}
}
public static MappingAllocator buildMappingAllocator(Map<Integer, String> idToBroker, Map<String, Integer> brokerNumMap, Map<String, Integer> brokerNumMapBeforeRemapping) {
return new MappingAllocator(idToBroker, brokerNumMap, brokerNumMapBeforeRemapping);
}
public static Map.Entry<Long, Integer> findMaxEpochAndQueueNum(List<TopicQueueMappingDetail> mappingDetailList) {
long epoch = -1;
int queueNum = 0;
for (TopicQueueMappingDetail mappingDetail : mappingDetailList) {
if (mappingDetail.getEpoch() > epoch) {
epoch = mappingDetail.getEpoch();
}
if (mappingDetail.getTotalQueues() > queueNum) {
queueNum = mappingDetail.getTotalQueues();
}
}
return new AbstractMap.SimpleImmutableEntry<>(epoch, queueNum);
}
public static List<TopicQueueMappingDetail> getMappingDetailFromConfig(Collection<TopicConfigAndQueueMapping> configs) {
List<TopicQueueMappingDetail> detailList = new ArrayList<>();
for (TopicConfigAndQueueMapping configMapping : configs) {
if (configMapping.getMappingDetail() != null) {
detailList.add(configMapping.getMappingDetail());
}
}
return detailList;
}
public static Map.Entry<Long, Integer> checkNameEpochNumConsistence(String topic, Map<String, TopicConfigAndQueueMapping> brokerConfigMap) {
if (brokerConfigMap == null
|| brokerConfigMap.isEmpty()) {
return null;
}
//make sure it is not null
long maxEpoch = -1;
int maxNum = -1;
String scope = null;
for (Map.Entry<String, TopicConfigAndQueueMapping> entry : brokerConfigMap.entrySet()) {
String broker = entry.getKey();
TopicConfigAndQueueMapping configMapping = entry.getValue();
if (configMapping.getMappingDetail() == null) {
throw new RuntimeException("Mapping info should not be null in broker " + broker);
}
TopicQueueMappingDetail mappingDetail = configMapping.getMappingDetail();
if (!broker.equals(mappingDetail.getBname())) {
throw new RuntimeException(String.format("The broker name is not equal %s != %s ", broker, mappingDetail.getBname()));
}
if (mappingDetail.isDirty()) {
throw new RuntimeException("The mapping info is dirty in broker " + broker);
}
if (!configMapping.getTopicName().equals(mappingDetail.getTopic())) {
throw new RuntimeException("The topic name is inconsistent in broker " + broker);
}
if (topic != null
&& !topic.equals(mappingDetail.getTopic())) {
throw new RuntimeException("The topic name is not match for broker " + broker);
}
if (scope != null
&& !scope.equals(mappingDetail.getScope())) {
throw new RuntimeException(String.format("scope does not match %s != %s in %s", mappingDetail.getScope(), scope, broker));
} else {
scope = mappingDetail.getScope();
}
if (maxEpoch != -1
&& maxEpoch != mappingDetail.getEpoch()) {
throw new RuntimeException(String.format("epoch does not match %d != %d in %s", maxEpoch, mappingDetail.getEpoch(), mappingDetail.getBname()));
} else {
maxEpoch = mappingDetail.getEpoch();
}
if (maxNum != -1
&& maxNum != mappingDetail.getTotalQueues()) {
throw new RuntimeException(String.format("total queue number does not match %d != %d in %s", maxNum, mappingDetail.getTotalQueues(), mappingDetail.getBname()));
} else {
maxNum = mappingDetail.getTotalQueues();
}
}
return new AbstractMap.SimpleEntry<>(maxEpoch, maxNum);
}
public static String getMockBrokerName(String scope) {
assert scope != null;
if (scope.equals(MixAll.METADATA_SCOPE_GLOBAL)) {
return MixAll.LOGICAL_QUEUE_MOCK_BROKER_PREFIX + scope.substring(2);
} else {
return MixAll.LOGICAL_QUEUE_MOCK_BROKER_PREFIX + scope;
}
}
public static void makeSureLogicQueueMappingItemImmutable(List<LogicQueueMappingItem> oldItems, List<LogicQueueMappingItem> newItems, boolean epochEqual, boolean isCLean) {
if (oldItems == null || oldItems.isEmpty()) {
return;
}
if (newItems == null || newItems.isEmpty()) {
throw new RuntimeException("The new item list is null or empty");
}
int iold = 0, inew = 0;
while (iold < oldItems.size() && inew < newItems.size()) {
LogicQueueMappingItem newItem = newItems.get(inew);
LogicQueueMappingItem oldItem = oldItems.get(iold);
if (newItem.getGen() < oldItem.getGen()) {
//the earliest item may have been deleted concurrently
inew++;
} else if (oldItem.getGen() < newItem.getGen()) {
//in the following cases, the new item-list has fewer items than old item-list
//1. the queue is mapped back to a broker which hold the logic queue before
//2. The earliest item is deleted by TopicQueueMappingCleanService
iold++;
} else {
assert oldItem.getBname().equals(newItem.getBname());
assert oldItem.getQueueId() == newItem.getQueueId();
assert oldItem.getStartOffset() == newItem.getStartOffset();
if (oldItem.getLogicOffset() != -1) {
assert oldItem.getLogicOffset() == newItem.getLogicOffset();
}
iold++;
inew++;
}
}
if (epochEqual) {
LogicQueueMappingItem oldLeader = oldItems.get(oldItems.size() - 1);
LogicQueueMappingItem newLeader = newItems.get(newItems.size() - 1);
if (newLeader.getGen() != oldLeader.getGen()
|| !newLeader.getBname().equals(oldLeader.getBname())
|| newLeader.getQueueId() != oldLeader.getQueueId()
|| newLeader.getStartOffset() != oldLeader.getStartOffset()) {
throw new RuntimeException("The new leader is different but epoch equal");
}
}
}
public static void checkLogicQueueMappingItemOffset(List<LogicQueueMappingItem> items) {
if (items == null
|| items.isEmpty()) {
return;
}
int lastGen = -1;
long lastOffset = -1;
for (int i = items.size() - 1; i >= 0 ; i--) {
LogicQueueMappingItem item = items.get(i);
if (item.getStartOffset() < 0
|| item.getGen() < 0
|| item.getQueueId() < 0) {
throw new RuntimeException("The field is illegal, should not be negative");
}
if (items.size() >= 2
&& i <= items.size() - 2
&& items.get(i).getLogicOffset() < 0) {
throw new RuntimeException("The non-latest item has negative logic offset");
}
if (lastGen != -1 && item.getGen() >= lastGen) {
throw new RuntimeException("The gen does not increase monotonically");
}
if (item.getEndOffset() != -1
&& item.getEndOffset() < item.getStartOffset()) {
throw new RuntimeException("The endOffset is smaller than the start offset");
}
if (lastOffset != -1 && item.getLogicOffset() != -1) {
if (item.getLogicOffset() >= lastOffset) {
throw new RuntimeException("The base logic offset does not increase monotonically");
}
if (item.computeMaxStaticQueueOffset() >= lastOffset) {
throw new RuntimeException("The max logic offset does not increase monotonically");
}
}
lastGen = item.getGen();
lastOffset = item.getLogicOffset();
}
}
public static void checkIfReusePhysicalQueue(Collection<TopicQueueMappingOne> mappingOnes) {
Map<String, TopicQueueMappingOne> physicalQueueIdMap = new HashMap<>();
for (TopicQueueMappingOne mappingOne : mappingOnes) {
for (LogicQueueMappingItem item: mappingOne.items) {
String physicalQueueId = item.getBname() + "-" + item.getQueueId();
if (physicalQueueIdMap.containsKey(physicalQueueId)) {
throw new RuntimeException(String.format("Topic %s global queue id %d and %d shared the same physical queue %s",
mappingOne.topic, mappingOne.globalId, physicalQueueIdMap.get(physicalQueueId).globalId, physicalQueueId));
} else {
physicalQueueIdMap.put(physicalQueueId, mappingOne);
}
}
}
}
public static void checkLeaderInTargetBrokers(Collection<TopicQueueMappingOne> mappingOnes, Set<String> targetBrokers) {
for (TopicQueueMappingOne mappingOne : mappingOnes) {
if (!targetBrokers.contains(mappingOne.bname)) {
throw new RuntimeException("The leader broker does not in target broker");
}
}
}
public static void checkPhysicalQueueConsistence(Map<String, TopicConfigAndQueueMapping> brokerConfigMap) {
for (Map.Entry<String, TopicConfigAndQueueMapping> entry : brokerConfigMap.entrySet()) {
TopicConfigAndQueueMapping configMapping = entry.getValue();
assert configMapping != null;
assert configMapping.getMappingDetail() != null;
if (configMapping.getReadQueueNums() < configMapping.getWriteQueueNums()) {
throw new RuntimeException("Read queues is smaller than write queues");
}
for (List<LogicQueueMappingItem> items: configMapping.getMappingDetail().getHostedQueues().values()) {
for (LogicQueueMappingItem item: items) {
if (item.getStartOffset() != 0) {
throw new RuntimeException("The start offset does not begin from 0");
}
TopicConfig topicConfig = brokerConfigMap.get(item.getBname());
if (topicConfig == null) {
throw new RuntimeException("The broker of item does not exist");
}
if (item.getQueueId() >= topicConfig.getWriteQueueNums()) {
throw new RuntimeException("The physical queue id is overflow the write queues");
}
}
}
}
}
public static Map<Integer, TopicQueueMappingOne> checkAndBuildMappingItems(List<TopicQueueMappingDetail> mappingDetailList, boolean replace, boolean checkConsistence) {
mappingDetailList.sort((o1, o2) -> (int) (o2.getEpoch() - o1.getEpoch()));
int maxNum = 0;
Map<Integer, TopicQueueMappingOne> globalIdMap = new HashMap<>();
for (TopicQueueMappingDetail mappingDetail : mappingDetailList) {
if (mappingDetail.totalQueues > maxNum) {
maxNum = mappingDetail.totalQueues;
}
for (Map.Entry<Integer, List<LogicQueueMappingItem>> entry : mappingDetail.getHostedQueues().entrySet()) {
Integer globalid = entry.getKey();
checkLogicQueueMappingItemOffset(entry.getValue());
String leaderBrokerName = getLeaderBroker(entry.getValue());
if (!leaderBrokerName.equals(mappingDetail.getBname())) {
//not the leader
continue;
}
if (globalIdMap.containsKey(globalid)) {
if (!replace) {
throw new RuntimeException(String.format("The queue id is duplicated in broker %s %s", leaderBrokerName, mappingDetail.getBname()));
}
} else {
globalIdMap.put(globalid, new TopicQueueMappingOne(mappingDetail, mappingDetail.topic, mappingDetail.bname, globalid, entry.getValue()));
}
}
}
if (checkConsistence) {
if (maxNum != globalIdMap.size()) {
throw new RuntimeException(String.format("The total queue number in config does not match the real hosted queues %d != %d", maxNum, globalIdMap.size()));
}
for (int i = 0; i < maxNum; i++) {
if (!globalIdMap.containsKey(i)) {
throw new RuntimeException(String.format("The queue number %s is not in globalIdMap", i));
}
}
}
checkIfReusePhysicalQueue(globalIdMap.values());
return globalIdMap;
}
public static String getLeaderBroker(List<LogicQueueMappingItem> items) {
return getLeaderItem(items).getBname();
}
public static LogicQueueMappingItem getLeaderItem(List<LogicQueueMappingItem> items) {
assert items.size() > 0;
return items.get(items.size() - 1);
}
public static String writeToTemp(TopicRemappingDetailWrapper wrapper, boolean after) {
String topic = wrapper.getTopic();
String data = wrapper.toJson();
String suffix = TopicRemappingDetailWrapper.SUFFIX_BEFORE;
if (after) {
suffix = TopicRemappingDetailWrapper.SUFFIX_AFTER;
}
String fileName = System.getProperty("java.io.tmpdir") + File.separator + topic + "-" + wrapper.getEpoch() + suffix;
try {
MixAll.string2File(data, fileName);
return fileName;
} catch (Exception e) {
throw new RuntimeException("write file failed " + fileName,e);
}
}
public static long blockSeqRoundUp(long offset, long blockSeqSize) {
long num = offset / blockSeqSize;
long left = offset % blockSeqSize;
if (left < blockSeqSize / 2) {
return (num + 1) * blockSeqSize;
} else {
return (num + 2) * blockSeqSize;
}
}
public static void checkTargetBrokersComplete(Set<String> targetBrokers, Map<String, TopicConfigAndQueueMapping> brokerConfigMap) {
for (String broker : brokerConfigMap.keySet()) {
if (brokerConfigMap.get(broker).getMappingDetail().getHostedQueues().isEmpty()) {
continue;
}
if (!targetBrokers.contains(broker)) {
throw new RuntimeException("The existed broker " + broker + " does not in target brokers ");
}
}
}
public static void checkNonTargetBrokers(Set<String> targetBrokers, Set<String> nonTargetBrokers) {
for (String broker : nonTargetBrokers) {
if (targetBrokers.contains(broker)) {
throw new RuntimeException("The non-target broker exist in target broker");
}
}
}
public static TopicRemappingDetailWrapper createTopicConfigMapping(String topic, int queueNum, Set<String> targetBrokers, Map<String, TopicConfigAndQueueMapping> brokerConfigMap) {
checkTargetBrokersComplete(targetBrokers, brokerConfigMap);
Map<Integer, TopicQueueMappingOne> globalIdMap = new HashMap<>();
Map.Entry<Long, Integer> maxEpochAndNum = new AbstractMap.SimpleImmutableEntry<>(System.currentTimeMillis(), queueNum);
if (!brokerConfigMap.isEmpty()) {
maxEpochAndNum = TopicQueueMappingUtils.checkNameEpochNumConsistence(topic, brokerConfigMap);
globalIdMap = TopicQueueMappingUtils.checkAndBuildMappingItems(new ArrayList<>(TopicQueueMappingUtils.getMappingDetailFromConfig(brokerConfigMap.values())), false, true);
checkIfReusePhysicalQueue(globalIdMap.values());
checkPhysicalQueueConsistence(brokerConfigMap);
}
if (queueNum < globalIdMap.size()) {
throw new RuntimeException(String.format("Cannot decrease the queue num for static topic %d < %d", queueNum, globalIdMap.size()));
}
//check the queue number
if (queueNum == globalIdMap.size()) {
throw new RuntimeException("The topic queue num is equal the existed queue num, do nothing");
}
//the check is ok, now do the mapping allocation
Map<String, Integer> brokerNumMap = new HashMap<>();
for (String broker: targetBrokers) {
brokerNumMap.put(broker, 0);
}
final Map<Integer, String> oldIdToBroker = new HashMap<>();
for (Map.Entry<Integer, TopicQueueMappingOne> entry : globalIdMap.entrySet()) {
String leaderbroker = entry.getValue().getBname();
oldIdToBroker.put(entry.getKey(), leaderbroker);
if (!brokerNumMap.containsKey(leaderbroker)) {
brokerNumMap.put(leaderbroker, 1);
} else {
brokerNumMap.put(leaderbroker, brokerNumMap.get(leaderbroker) + 1);
}
}
TopicQueueMappingUtils.MappingAllocator allocator = TopicQueueMappingUtils.buildMappingAllocator(oldIdToBroker, brokerNumMap, null);
allocator.upToNum(queueNum);
Map<Integer, String> newIdToBroker = allocator.getIdToBroker();
//construct the topic configAndMapping
long newEpoch = Math.max(maxEpochAndNum.getKey() + 1000, System.currentTimeMillis());
for (Map.Entry<Integer, String> e : newIdToBroker.entrySet()) {
Integer queueId = e.getKey();
String broker = e.getValue();
if (globalIdMap.containsKey(queueId)) {
//ignore the exited
continue;
}
TopicConfigAndQueueMapping configMapping;
if (!brokerConfigMap.containsKey(broker)) {
configMapping = new TopicConfigAndQueueMapping(new TopicConfig(topic), new TopicQueueMappingDetail(topic, 0, broker, System.currentTimeMillis()));
configMapping.setWriteQueueNums(1);
configMapping.setReadQueueNums(1);
brokerConfigMap.put(broker, configMapping);
} else {
configMapping = brokerConfigMap.get(broker);
configMapping.setWriteQueueNums(configMapping.getWriteQueueNums() + 1);
configMapping.setReadQueueNums(configMapping.getReadQueueNums() + 1);
}
LogicQueueMappingItem mappingItem = new LogicQueueMappingItem(0, configMapping.getWriteQueueNums() - 1, broker, 0, 0, -1, -1, -1);
TopicQueueMappingDetail.putMappingInfo(configMapping.getMappingDetail(), queueId, new ArrayList<>(Collections.singletonList(mappingItem)));
}
// set the topic config
for (Map.Entry<String, TopicConfigAndQueueMapping> entry : brokerConfigMap.entrySet()) {
TopicConfigAndQueueMapping configMapping = entry.getValue();
configMapping.getMappingDetail().setEpoch(newEpoch);
configMapping.getMappingDetail().setTotalQueues(queueNum);
}
//double check the config
{
TopicQueueMappingUtils.checkNameEpochNumConsistence(topic, brokerConfigMap);
globalIdMap = TopicQueueMappingUtils.checkAndBuildMappingItems(getMappingDetailFromConfig(brokerConfigMap.values()), false, true);
checkIfReusePhysicalQueue(globalIdMap.values());
checkPhysicalQueueConsistence(brokerConfigMap);
}
return new TopicRemappingDetailWrapper(topic, TopicRemappingDetailWrapper.TYPE_CREATE_OR_UPDATE, newEpoch, brokerConfigMap, new HashSet<>(), new HashSet<>());
}
public static TopicRemappingDetailWrapper remappingStaticTopic(String topic, Map<String, TopicConfigAndQueueMapping> brokerConfigMap, Set<String> targetBrokers) {
Map.Entry<Long, Integer> maxEpochAndNum = TopicQueueMappingUtils.checkNameEpochNumConsistence(topic, brokerConfigMap);
Map<Integer, TopicQueueMappingOne> globalIdMap = TopicQueueMappingUtils.checkAndBuildMappingItems(getMappingDetailFromConfig(brokerConfigMap.values()), false, true);
TopicQueueMappingUtils.checkPhysicalQueueConsistence(brokerConfigMap);
TopicQueueMappingUtils.checkIfReusePhysicalQueue(globalIdMap.values());
//the check is ok, now do the mapping allocation
int maxNum = maxEpochAndNum.getValue();
Map<String, Integer> brokerNumMap = new HashMap<>();
for (String broker: targetBrokers) {
brokerNumMap.put(broker, 0);
}
Map<String, Integer> brokerNumMapBeforeRemapping = new HashMap<>();
for (TopicQueueMappingOne mappingOne: globalIdMap.values()) {
if (brokerNumMapBeforeRemapping.containsKey(mappingOne.bname)) {
brokerNumMapBeforeRemapping.put(mappingOne.bname, brokerNumMapBeforeRemapping.get(mappingOne.bname) + 1);
} else {
brokerNumMapBeforeRemapping.put(mappingOne.bname, 1);
}
}
TopicQueueMappingUtils.MappingAllocator allocator = TopicQueueMappingUtils.buildMappingAllocator(new HashMap<>(), brokerNumMap, brokerNumMapBeforeRemapping);
allocator.upToNum(maxNum);
Map<String, Integer> expectedBrokerNumMap = allocator.getBrokerNumMap();
Queue<Integer> waitAssignQueues = new ArrayDeque<>();
//cannot directly use the idBrokerMap from allocator, for the number of globalId maybe not in the natural order
Map<Integer, String> expectedIdToBroker = new HashMap<>();
//the following logic will make sure that, for one broker, either "map in" or "map out"
//It can't both, map in some queues but also map out some queues.
for (Map.Entry<Integer, TopicQueueMappingOne> entry : globalIdMap.entrySet()) {
Integer queueId = entry.getKey();
TopicQueueMappingOne mappingOne = entry.getValue();
String leaderBroker = mappingOne.getBname();
if (expectedBrokerNumMap.containsKey(leaderBroker)) {
if (expectedBrokerNumMap.get(leaderBroker) > 0) {
expectedIdToBroker.put(queueId, leaderBroker);
expectedBrokerNumMap.put(leaderBroker, expectedBrokerNumMap.get(leaderBroker) - 1);
} else {
waitAssignQueues.add(queueId);
expectedBrokerNumMap.remove(leaderBroker);
}
} else {
waitAssignQueues.add(queueId);
}
}
for (Map.Entry<String, Integer> entry: expectedBrokerNumMap.entrySet()) {
String broker = entry.getKey();
Integer queueNum = entry.getValue();
for (int i = 0; i < queueNum; i++) {
Integer queueId = waitAssignQueues.poll();
assert queueId != null;
expectedIdToBroker.put(queueId, broker);
}
}
long newEpoch = Math.max(maxEpochAndNum.getKey() + 1000, System.currentTimeMillis());
//Now construct the remapping info
Set<String> brokersToMapOut = new HashSet<>();
Set<String> brokersToMapIn = new HashSet<>();
for (Map.Entry<Integer, String> mapEntry : expectedIdToBroker.entrySet()) {
Integer queueId = mapEntry.getKey();
String broker = mapEntry.getValue();
TopicQueueMappingOne topicQueueMappingOne = globalIdMap.get(queueId);
assert topicQueueMappingOne != null;
if (topicQueueMappingOne.getBname().equals(broker)) {
continue;
}
//remapping
final String mapInBroker = broker;
final String mapOutBroker = topicQueueMappingOne.getBname();
brokersToMapIn.add(mapInBroker);
brokersToMapOut.add(mapOutBroker);
TopicConfigAndQueueMapping mapInConfig = brokerConfigMap.get(mapInBroker);
TopicConfigAndQueueMapping mapOutConfig = brokerConfigMap.get(mapOutBroker);
if (mapInConfig == null) {
mapInConfig = new TopicConfigAndQueueMapping(new TopicConfig(topic, 0, 0), new TopicQueueMappingDetail(topic, maxNum, mapInBroker, newEpoch));
brokerConfigMap.put(mapInBroker, mapInConfig);
}
mapInConfig.setWriteQueueNums(mapInConfig.getWriteQueueNums() + 1);
mapInConfig.setReadQueueNums(mapInConfig.getReadQueueNums() + 1);
List<LogicQueueMappingItem> items = new ArrayList<>(topicQueueMappingOne.getItems());
LogicQueueMappingItem last = items.get(items.size() - 1);
items.add(new LogicQueueMappingItem(last.getGen() + 1, mapInConfig.getWriteQueueNums() - 1, mapInBroker, -1, 0, -1, -1, -1));
//Use the same object
TopicQueueMappingDetail.putMappingInfo(mapInConfig.getMappingDetail(), queueId, items);
TopicQueueMappingDetail.putMappingInfo(mapOutConfig.getMappingDetail(), queueId, items);
}
for (Map.Entry<String, TopicConfigAndQueueMapping> entry : brokerConfigMap.entrySet()) {
TopicConfigAndQueueMapping configMapping = entry.getValue();
configMapping.getMappingDetail().setEpoch(newEpoch);
configMapping.getMappingDetail().setTotalQueues(maxNum);
}
//double check
{
TopicQueueMappingUtils.checkNameEpochNumConsistence(topic, brokerConfigMap);
globalIdMap = TopicQueueMappingUtils.checkAndBuildMappingItems(getMappingDetailFromConfig(brokerConfigMap.values()), false, true);
TopicQueueMappingUtils.checkPhysicalQueueConsistence(brokerConfigMap);
TopicQueueMappingUtils.checkIfReusePhysicalQueue(globalIdMap.values());
TopicQueueMappingUtils.checkLeaderInTargetBrokers(globalIdMap.values(), targetBrokers);
}
return new TopicRemappingDetailWrapper(topic, TopicRemappingDetailWrapper.TYPE_REMAPPING, newEpoch, brokerConfigMap, brokersToMapIn, brokersToMapOut);
}
public static LogicQueueMappingItem findLogicQueueMappingItem(List<LogicQueueMappingItem> mappingItems, long logicOffset, boolean ignoreNegative) {
if (mappingItems == null
|| mappingItems.isEmpty()) {
return null;
}
//Could use bi-search to polish performance
for (int i = mappingItems.size() - 1; i >= 0; i--) {
LogicQueueMappingItem item = mappingItems.get(i);
if (ignoreNegative && item.getLogicOffset() < 0) {
continue;
}
if (logicOffset >= item.getLogicOffset()) {
return item;
}
}
//if not found, maybe out of range, return the first one
for (int i = 0; i < mappingItems.size(); i++) {
LogicQueueMappingItem item = mappingItems.get(i);
if (ignoreNegative && item.getLogicOffset() < 0) {
continue;
} else {
return item;
}
}
return null;
}
public static LogicQueueMappingItem findNext(List<LogicQueueMappingItem> items, LogicQueueMappingItem currentItem, boolean ignoreNegative) {
if (items == null
|| currentItem == null) {
return null;
}
for (int i = 0; i < items.size(); i++) {
LogicQueueMappingItem item = items.get(i);
if (ignoreNegative && item.getLogicOffset() < 0) {
continue;
}
if (item.getGen() == currentItem.getGen()) {
if (i < items.size() - 1) {
item = items.get(i + 1);
if (ignoreNegative && item.getLogicOffset() < 0) {
return null;
} else {
return item;
}
} else {
return null;
}
}
}
return null;
}
public static boolean checkIfLeader(List<LogicQueueMappingItem> items, TopicQueueMappingDetail mappingDetail) {
if (items == null
|| mappingDetail == null
|| items.isEmpty()) {
return false;
}
return items.get(items.size() - 1).getBname().equals(mappingDetail.getBname());
}
}
|
MappingAllocator
|
java
|
quarkusio__quarkus
|
integration-tests/test-extension/tests/src/test/java/io/quarkus/it/extension/AbstractQuarkusTestMetaAnnotationTest.java
|
{
"start": 211,
"end": 432
}
|
class ____ {
@Inject
MyTestBean bean;
@Test
void testInjectedBean() {
Assertions.assertEquals("foo", bean.foo());
}
@ApplicationScoped
public static
|
AbstractQuarkusTestMetaAnnotationTest
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java
|
{
"start": 1209,
"end": 1269
}
|
class ____ also go to that client class.
*/
public final
|
should
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/FunctionalInterfaceMethodChangedTest.java
|
{
"start": 2033,
"end": 2542
}
|
interface ____ extends SuperFI, OtherSuperFI {
void subSam();
@Override
default void superSam() {
subSam();
}
@Override
// BUG: Diagnostic contains:
default void otherSuperSam() {
subSam();
System.out.println("do something else");
}
}
@FunctionalInterface
|
MultipleInheritanceSubFIOneBad
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsTailLatencyTracker.java
|
{
"start": 1250,
"end": 1457
}
|
class ____ the latency of various operations like read, write etc for a single account.
* It maintains a sliding window histogram for each operation type to analyze latency patterns over time.
*/
public
|
tracks
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcess.java
|
{
"start": 1789,
"end": 5561
}
|
class ____ extends AbstractNativeProcess implements AutodetectProcess {
private static final Logger LOGGER = LogManager.getLogger(NativeAutodetectProcess.class);
private static final String NAME = "autodetect";
private final ProcessResultsParser<AutodetectResult> resultsParser;
NativeAutodetectProcess(
String jobId,
NativeController nativeController,
ProcessPipes processPipes,
int numberOfFields,
List<Path> filesToDelete,
ProcessResultsParser<AutodetectResult> resultsParser,
Consumer<String> onProcessCrash
) {
super(jobId, nativeController, processPipes, numberOfFields, filesToDelete, onProcessCrash);
this.resultsParser = resultsParser;
}
@Override
public String getName() {
return NAME;
}
@Override
public void restoreState(StateStreamer stateStreamer, ModelSnapshot modelSnapshot) {
if (modelSnapshot != null) {
try (OutputStream r = processRestoreStream()) {
stateStreamer.restoreStateToStream(jobId(), modelSnapshot, r);
} catch (Exception e) {
// TODO: should we fail to start?
if (isProcessKilled() == false) {
LOGGER.error("Error restoring model state for job " + jobId(), e);
}
}
}
setReady();
}
@Override
public void writeResetBucketsControlMessage(DataLoadParams params) throws IOException {
newMessageWriter().writeResetBucketsMessage(params);
}
@Override
public void writeUpdateModelPlotMessage(ModelPlotConfig modelPlotConfig) throws IOException {
newMessageWriter().writeUpdateModelPlotMessage(modelPlotConfig);
}
@Override
public void writeUpdatePerPartitionCategorizationMessage(PerPartitionCategorizationConfig perPartitionCategorizationConfig)
throws IOException {
newMessageWriter().writeCategorizationStopOnWarnMessage(perPartitionCategorizationConfig.isStopOnWarn());
}
@Override
public void writeUpdateDetectorRulesMessage(int detectorIndex, List<DetectionRule> rules) throws IOException {
newMessageWriter().writeUpdateDetectorRulesMessage(detectorIndex, rules);
}
@Override
public void writeUpdateFiltersMessage(List<MlFilter> filters) throws IOException {
newMessageWriter().writeUpdateFiltersMessage(filters);
}
@Override
public void writeUpdateScheduledEventsMessage(List<ScheduledEvent> events, TimeValue bucketSpan) throws IOException {
newMessageWriter().writeUpdateScheduledEventsMessage(events, bucketSpan);
}
@Override
public String flushJob(FlushJobParams params) throws IOException {
AutodetectControlMsgWriter writer = newMessageWriter();
writer.writeFlushControlMessage(params);
return writer.writeFlushMessage();
}
@Override
public void forecastJob(ForecastParams params) throws IOException {
newMessageWriter().writeForecastMessage(params);
}
@Override
public void persistState() throws IOException {
newMessageWriter().writeStartBackgroundPersistMessage();
}
@Override
public Iterator<AutodetectResult> readAutodetectResults() {
return resultsParser.parseResults(processOutStream());
}
private AutodetectControlMsgWriter newMessageWriter() {
return new AutodetectControlMsgWriter(recordWriter(), numberOfFields());
}
@Override
public void persistState(long snapshotTimestamp, String snapshotId, String snapshotDescription) throws IOException {
newMessageWriter().writeStartBackgroundPersistMessage(snapshotTimestamp, snapshotId, snapshotDescription);
}
}
|
NativeAutodetectProcess
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/common/xcontent/support/XContentHelperTests.java
|
{
"start": 1538,
"end": 19278
}
|
class ____ extends ESTestCase {
Map<String, Object> getMap(Object... keyValues) {
Map<String, Object> map = new HashMap<>();
for (int i = 0; i < keyValues.length; i++) {
map.put((String) keyValues[i], keyValues[++i]);
}
return map;
}
Map<String, Object> getNamedMap(String name, Object... keyValues) {
Map<String, Object> map = getMap(keyValues);
Map<String, Object> namedMap = Maps.newMapWithExpectedSize(1);
namedMap.put(name, map);
return namedMap;
}
List<Object> getList(Object... values) {
return Arrays.asList(values);
}
public void testMergingListValuesAreMapsOfOne() {
Map<String, Object> defaults = getMap("test", getList(getNamedMap("name1", "t1", "1"), getNamedMap("name2", "t2", "2")));
Map<String, Object> content = getMap("test", getList(getNamedMap("name2", "t3", "3"), getNamedMap("name4", "t4", "4")));
Map<String, Object> expected = getMap(
"test",
getList(getNamedMap("name2", "t2", "2", "t3", "3"), getNamedMap("name4", "t4", "4"), getNamedMap("name1", "t1", "1"))
);
XContentHelper.mergeDefaults(content, defaults);
assertThat(content, equalTo(expected));
}
public void testMergingDefaults() {
Map<String, Object> base = getMap("key1", "old", "key3", "old", "map", getMap("key1", "old", "key3", "old"));
Map<String, Object> toMerge = getMap("key2", "new", "key3", "new", "map", getMap("key2", "new", "key3", "new"));
XContentHelper.mergeDefaults(base, toMerge);
Map<String, Object> expected = getMap(
"key1",
"old",
"key2",
"new",
"key3",
"old",
"map",
Map.of("key1", "old", "key2", "new", "key3", "old")
);
assertThat(base, equalTo(expected));
}
public void testMergingWithCustomMerge() {
Map<String, Object> base = getMap("key1", "old", "key3", "old", "key4", "old");
Map<String, Object> toMerge = getMap("key2", "new", "key3", "new", "key4", "new");
XContentHelper.merge(base, toMerge, (parent, key, oldValue, newValue) -> "key3".equals(key) ? newValue : oldValue);
Map<String, Object> expected = getMap("key1", "old", "key2", "new", "key3", "new", "key4", "old");
assertThat(base, equalTo(expected));
}
public void testMergingWithCustomMapReplacement() {
Map<String, Object> base = getMap(
"key1",
"old",
"key3",
"old",
"key4",
"old",
"map",
Map.of("key1", "old", "key3", "old", "key4", "old")
);
Map<String, Object> toMerge = getMap(
"key2",
"new",
"key3",
"new",
"key4",
"new",
"map",
Map.of("key2", "new", "key3", "new", "key4", "new")
);
XContentHelper.merge(
base,
toMerge,
(parent, key, oldValue, newValue) -> "key3".equals(key) || "map".equals(key) ? newValue : oldValue
);
Map<String, Object> expected = getMap(
"key1",
"old",
"key2",
"new",
"key3",
"new",
"key4",
"old",
"map",
Map.of("key2", "new", "key3", "new", "key4", "new")
);
assertThat(base, equalTo(expected));
}
public void testMergingWithCustomMapMerge() {
Map<String, Object> base = getMap(
"key1",
"old",
"key3",
"old",
"key4",
"old",
"map",
new HashMap<>(Map.of("key1", "old", "key3", "old", "key4", "old"))
);
Map<String, Object> toMerge = getMap(
"key2",
"new",
"key3",
"new",
"key4",
"new",
"map",
Map.of("key2", "new", "key3", "new", "key4", "new")
);
XContentHelper.merge(base, toMerge, (parent, key, oldValue, newValue) -> "key3".equals(key) ? oldValue : null);
Map<String, Object> expected = getMap(
"key1",
"old",
"key2",
"new",
"key3",
"old",
"key4",
"old",
"map",
Map.of("key1", "old", "key2", "new", "key3", "old", "key4", "old")
);
assertThat(base, equalTo(expected));
}
public void testMergingListValueWithCustomMapReplacement() {
Map<String, Object> base = getMap(
"key",
List.of("value1", "value3", "value4"),
"list",
List.of(new HashMap<>(Map.of("map", new HashMap<>(Map.of("key1", "old", "key3", "old", "key4", "old")))))
);
Map<String, Object> toMerge = getMap(
"key",
List.of("value1", "value2", "value4"),
"list",
List.of(Map.of("map", Map.of("key2", "new", "key3", "new", "key4", "new")))
);
XContentHelper.merge(
base,
toMerge,
(parent, key, oldValue, newValue) -> "key3".equals(key) || "map".equals(key) ? newValue : oldValue
);
Map<String, Object> expected = getMap(
"key",
List.of("value1", "value2", "value4", "value3"),
"list",
List.of(Map.of("map", Map.of("key2", "new", "key3", "new", "key4", "new")))
);
assertThat(base, equalTo(expected));
}
public void testMergingListValueWithCustomMapMerge() {
Map<String, Object> base = getMap(
"key",
List.of("value1", "value3", "value4"),
"list",
List.of(new HashMap<>(Map.of("map", new HashMap<>(Map.of("key1", "old", "key3", "old", "key4", "old")))))
);
Map<String, Object> toMerge = getMap(
"key",
List.of("value1", "value2", "value4"),
"list",
List.of(Map.of("map", Map.of("key2", "new", "key3", "new", "key4", "new")))
);
XContentHelper.merge(base, toMerge, (parent, key, oldValue, newValue) -> "key3".equals(key) ? newValue : null);
Map<String, Object> expected = getMap(
"key",
List.of("value1", "value2", "value4", "value3"),
"list",
List.of(Map.of("map", Map.of("key1", "old", "key2", "new", "key3", "new", "key4", "old")))
);
assertThat(base, equalTo(expected));
}
public void testMergingWithCustomMergeWithException() {
final Map<String, Object> base = getMap("key1", "old", "key3", "old", "key4", "old");
final Map<String, Object> toMerge = getMap("key2", "new", "key3", "new", "key4", "new");
final XContentHelper.CustomMerge customMerge = (parent, key, oldValue, newValue) -> {
if ("key3".equals(key)) {
throw new IllegalArgumentException(key + " is not allowed");
}
return oldValue;
};
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> XContentHelper.merge(base, toMerge, customMerge));
assertThat(e.getMessage(), containsString("key3 is not allowed"));
}
public void testToXContent() throws IOException {
final XContentType xContentType = randomFrom(XContentType.values());
final ToXContent toXContent;
final boolean error;
if (randomBoolean()) {
if (randomBoolean()) {
error = false;
toXContent = (builder, params) -> builder.field("field", "value");
} else {
error = true;
toXContent = (builder, params) -> builder.startObject().field("field", "value").endObject();
}
} else {
if (randomBoolean()) {
error = false;
toXContent = (ToXContentObject) (builder, params) -> builder.startObject().field("field", "value").endObject();
} else {
error = true;
toXContent = (ToXContentObject) (builder, params) -> builder.field("field", "value");
}
}
if (error) {
expectThrows(IOException.class, () -> XContentHelper.toXContent(toXContent, xContentType, randomBoolean()));
} else {
BytesReference bytes = XContentHelper.toXContent(toXContent, xContentType, randomBoolean());
try (XContentParser parser = xContentType.xContent().createParser(XContentParserConfiguration.EMPTY, bytes.streamInput())) {
assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken());
assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken());
assertTrue(parser.nextToken().isValue());
assertEquals("value", parser.text());
assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken());
assertNull(parser.nextToken());
}
}
}
public void testChildBytes() throws IOException {
for (XContentType xContentType : XContentType.values()) {
XContentBuilder builder = XContentBuilder.builder(xContentType.xContent());
builder.startObject().startObject("level1");
builder.startObject("level2")
.startObject("object")
.field("text", "string")
.field("number", 10)
.endObject()
.startObject("object2")
.field("boolean", true)
.nullField("null")
.startArray("array_of_strings")
.value("string1")
.value("string2")
.endArray()
.endObject()
.endObject();
builder.field("field", "value");
builder.endObject().endObject();
BytesReference input = BytesReference.bytes(builder);
BytesReference bytes;
try (XContentParser parser = xContentType.xContent().createParser(XContentParserConfiguration.EMPTY, input.streamInput())) {
assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken());
assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken());
assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken());
assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken());
assertEquals("level2", parser.currentName());
// Extract everything under 'level2' as a bytestream
bytes = XContentHelper.childBytes(parser);
assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken());
assertEquals("field", parser.currentName());
}
// now parse the contents of 'level2'
try (XContentParser parser = xContentType.xContent().createParser(XContentParserConfiguration.EMPTY, bytes.streamInput())) {
assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken());
assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken());
assertEquals("object", parser.currentName());
assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken());
assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken());
assertEquals("text", parser.currentName());
assertEquals(XContentParser.Token.VALUE_STRING, parser.nextToken());
assertEquals("string", parser.text());
assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken());
assertEquals("number", parser.currentName());
assertEquals(XContentParser.Token.VALUE_NUMBER, parser.nextToken());
assertEquals(10, parser.numberValue());
assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken());
assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken());
assertEquals("object2", parser.currentName());
assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken());
assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken());
assertEquals("boolean", parser.currentName());
assertEquals(XContentParser.Token.VALUE_BOOLEAN, parser.nextToken());
assertTrue(parser.booleanValue());
assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken());
assertEquals("null", parser.currentName());
assertEquals(XContentParser.Token.VALUE_NULL, parser.nextToken());
assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken());
assertEquals("array_of_strings", parser.currentName());
assertEquals(XContentParser.Token.START_ARRAY, parser.nextToken());
assertEquals(XContentParser.Token.VALUE_STRING, parser.nextToken());
assertEquals("string1", parser.text());
assertEquals(XContentParser.Token.VALUE_STRING, parser.nextToken());
assertEquals("string2", parser.text());
assertEquals(XContentParser.Token.END_ARRAY, parser.nextToken());
assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken());
assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken());
assertNull(parser.nextToken());
}
}
}
public void testEmbeddedObject() throws IOException {
// Need to test this separately as XContentType.JSON never produces VALUE_EMBEDDED_OBJECT
XContentBuilder builder = XContentBuilder.builder(XContentType.CBOR.xContent());
builder.startObject().startObject("root");
CompressedXContent embedded = new CompressedXContent("{\"field\":\"value\"}");
builder.field("bytes", embedded.compressed());
builder.endObject().endObject();
BytesReference bytes = BytesReference.bytes(builder);
BytesReference inner;
try (XContentParser parser = XContentType.CBOR.xContent().createParser(XContentParserConfiguration.EMPTY, bytes.streamInput())) {
assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken());
assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken());
inner = XContentHelper.childBytes(parser);
assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken());
assertNull(parser.nextToken());
}
try (XContentParser parser = XContentType.CBOR.xContent().createParser(XContentParserConfiguration.EMPTY, inner.streamInput())) {
assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken());
assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken());
assertEquals("bytes", parser.currentName());
assertEquals(XContentParser.Token.VALUE_EMBEDDED_OBJECT, parser.nextToken());
assertEquals(embedded, new CompressedXContent(parser.binaryValue()));
assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken());
assertNull(parser.nextToken());
}
}
public void testEmptyChildBytes() throws IOException {
String inputJson = "{ \"mappings\" : {} }";
try (XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, inputJson)) {
assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken());
assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken());
BytesReference bytes = XContentHelper.childBytes(parser);
assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken());
assertNull(parser.nextToken());
assertEquals("{}", bytes.utf8ToString());
}
}
public void testParseToType() throws IOException {
String json = """
{ "a": "b", "c": "d"}
""";
Set<String> names = XContentHelper.parseToType(parser -> {
Set<String> fields = new HashSet<>();
XContentParser.Token token = parser.currentToken();
if (token == null) {
token = parser.nextToken();
}
if (token == XContentParser.Token.START_OBJECT) {
fields.add(parser.nextFieldName());
}
for (token = parser.nextToken(); token != null; token = parser.nextToken()) {
if (token == XContentParser.Token.FIELD_NAME) {
fields.add(parser.currentName());
}
}
return fields;
}, new BytesArray(json), XContentType.JSON, null).v2();
assertThat(names, equalTo(Set.of("a", "c")));
}
public void testGetParserWithInvalidInput() throws IOException {
assertThrows(
"Should detect bad JSON",
NotXContentException.class,
() -> XContentHelper.createParser(XContentParserConfiguration.EMPTY, new BytesArray("not actually XContent"))
);
XContentParser parser = XContentHelper.createParser(
XContentParserConfiguration.EMPTY,
new BytesArray("not actually XContent"),
XContentType.JSON
);
assertNotNull("Should not detect bad JSON", parser); // This is more like assertNotThrows
assertThrows("Should detect bad JSON at parse time", XContentParseException.class, parser::numberValue);
}
}
|
XContentHelperTests
|
java
|
grpc__grpc-java
|
interop-testing/src/generated/main/grpc/io/grpc/testing/integration/XdsUpdateClientConfigureServiceGrpc.java
|
{
"start": 14383,
"end": 15606
}
|
class ____
extends XdsUpdateClientConfigureServiceBaseDescriptorSupplier
implements io.grpc.protobuf.ProtoMethodDescriptorSupplier {
private final java.lang.String methodName;
XdsUpdateClientConfigureServiceMethodDescriptorSupplier(java.lang.String methodName) {
this.methodName = methodName;
}
@java.lang.Override
public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() {
return getServiceDescriptor().findMethodByName(methodName);
}
}
private static volatile io.grpc.ServiceDescriptor serviceDescriptor;
public static io.grpc.ServiceDescriptor getServiceDescriptor() {
io.grpc.ServiceDescriptor result = serviceDescriptor;
if (result == null) {
synchronized (XdsUpdateClientConfigureServiceGrpc.class) {
result = serviceDescriptor;
if (result == null) {
serviceDescriptor = result = io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME)
.setSchemaDescriptor(new XdsUpdateClientConfigureServiceFileDescriptorSupplier())
.addMethod(getConfigureMethod())
.build();
}
}
}
return result;
}
}
|
XdsUpdateClientConfigureServiceMethodDescriptorSupplier
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/EnableNameserviceResponsePBImpl.java
|
{
"start": 1600,
"end": 2665
}
|
class ____ extends EnableNameserviceResponse
implements PBRecord {
private FederationProtocolPBTranslator<EnableNameserviceResponseProto,
Builder, EnableNameserviceResponseProtoOrBuilder> translator =
new FederationProtocolPBTranslator<>(
EnableNameserviceResponseProto.class);
public EnableNameserviceResponsePBImpl() {
}
public EnableNameserviceResponsePBImpl(EnableNameserviceResponseProto proto) {
this.translator.setProto(proto);
}
@Override
public EnableNameserviceResponseProto getProto() {
return translator.build();
}
@Override
public void setProto(Message proto) {
this.translator.setProto(proto);
}
@Override
public void readInstance(String base64String) throws IOException {
this.translator.readInstance(base64String);
}
@Override
public boolean getStatus() {
return this.translator.getProtoOrBuilder().getStatus();
}
@Override
public void setStatus(boolean status) {
this.translator.getBuilder().setStatus(status);
}
}
|
EnableNameserviceResponsePBImpl
|
java
|
apache__rocketmq
|
broker/src/main/java/org/apache/rocketmq/broker/offset/BroadcastOffsetManager.java
|
{
"start": 9120,
"end": 9768
}
|
class ____ {
/**
* the timeStamp of last update occurred
*/
private volatile long timestamp;
/**
* mark the offset of this client is updated by proxy or not
*/
private volatile boolean fromProxy;
/**
* the pulled offset of each queue
*/
private final BroadcastOffsetStore offsetStore;
public BroadcastTimedOffsetStore(boolean fromProxy) {
this.timestamp = System.currentTimeMillis();
this.fromProxy = fromProxy;
this.offsetStore = new BroadcastOffsetStore();
}
}
}
|
BroadcastTimedOffsetStore
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/action/PrimaryMissingActionException.java
|
{
"start": 645,
"end": 910
}
|
class ____ extends ElasticsearchException {
public PrimaryMissingActionException(String message) {
super(message);
}
public PrimaryMissingActionException(StreamInput in) throws IOException {
super(in);
}
}
|
PrimaryMissingActionException
|
java
|
spring-projects__spring-security
|
cas/src/main/java/org/springframework/security/cas/jackson/AssertionImplMixin.java
|
{
"start": 1598,
"end": 2385
}
|
class ____ {
/**
* Mixin Constructor helps in deserialize
* {@link org.apereo.cas.client.validation.AssertionImpl}
* @param principal the Principal to associate with the Assertion.
* @param validFromDate when the assertion is valid from.
* @param validUntilDate when the assertion is valid to.
* @param authenticationDate when the assertion is authenticated.
* @param attributes the key/value pairs for this attribute.
*/
@JsonCreator
AssertionImplMixin(@JsonProperty("principal") AttributePrincipal principal,
@JsonProperty("validFromDate") Date validFromDate, @JsonProperty("validUntilDate") Date validUntilDate,
@JsonProperty("authenticationDate") Date authenticationDate,
@JsonProperty("attributes") Map<String, Object> attributes) {
}
}
|
AssertionImplMixin
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/boot/InvalidMappingException.java
|
{
"start": 283,
"end": 923
}
|
class ____ extends org.hibernate.InvalidMappingException {
private final Origin origin;
public InvalidMappingException(Origin origin) {
super(
String.format( "Could not parse mapping document: %s (%s)", origin.getName(), origin.getType() ),
origin
);
this.origin = origin;
}
public InvalidMappingException(Origin origin, Throwable e) {
super(
String.format( "Could not parse mapping document: %s (%s)", origin.getName(), origin.getType() ),
origin.getType().getLegacyTypeText(),
origin.getName(),
e
);
this.origin = origin;
}
public Origin getOrigin() {
return origin;
}
}
|
InvalidMappingException
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ADeleteOnExit.java
|
{
"start": 1555,
"end": 1621
}
|
class ____ extends AbstractS3AMockTest {
static
|
TestS3ADeleteOnExit
|
java
|
apache__flink
|
flink-end-to-end-tests/flink-queryable-state-test/src/main/java/org/apache/flink/streaming/tests/queryablestate/Email.java
|
{
"start": 992,
"end": 2136
}
|
class ____ {
private EmailId emailId;
private Instant timestamp;
private String foo;
private LabelSurrogate label;
public Email(EmailId emailId, Instant timestamp, String foo, LabelSurrogate label) {
this.emailId = emailId;
this.timestamp = timestamp;
this.foo = foo;
this.label = label;
}
public EmailId getEmailId() {
return emailId;
}
public void setEmailId(EmailId emailId) {
this.emailId = emailId;
}
public Instant getTimestamp() {
return timestamp;
}
public void setTimestamp(Instant timestamp) {
this.timestamp = timestamp;
}
public String getFoo() {
return foo;
}
public void setFoo(String foo) {
this.foo = foo;
}
public LabelSurrogate getLabel() {
return label;
}
public void setLabel(LabelSurrogate label) {
this.label = label;
}
public String getDate() {
DateTimeFormatter formatter =
DateTimeFormatter.ofPattern("yyyy-MM-dd").withZone(ZoneId.of("UTC"));
return formatter.format(timestamp);
}
}
|
Email
|
java
|
apache__dubbo
|
dubbo-plugin/dubbo-qos/src/main/java/org/apache/dubbo/qos/command/impl/SerializeCheckStatus.java
|
{
"start": 1305,
"end": 2752
}
|
class ____ implements BaseCommand {
private final SerializeCheckUtils serializeCheckUtils;
public SerializeCheckStatus(FrameworkModel frameworkModel) {
serializeCheckUtils = frameworkModel.getBeanFactory().getBean(SerializeCheckUtils.class);
}
@Override
public String execute(CommandContext commandContext, String[] args) {
if (commandContext.isHttp()) {
Map<String, Object> result = new HashMap<>();
result.put("checkStatus", serializeCheckUtils.getStatus());
result.put("checkSerializable", serializeCheckUtils.isCheckSerializable());
result.put("allowedPrefix", serializeCheckUtils.getAllowedList());
result.put("disAllowedPrefix", serializeCheckUtils.getDisAllowedList());
return JsonUtils.toJson(result);
} else {
return "CheckStatus: " + serializeCheckUtils.getStatus() + "\n\n" + "CheckSerializable: "
+ serializeCheckUtils.isCheckSerializable() + "\n\n" + "AllowedPrefix:"
+ "\n"
+ serializeCheckUtils.getAllowedList().stream().sorted().collect(Collectors.joining("\n"))
+ "\n\n"
+ "DisAllowedPrefix:"
+ "\n"
+ serializeCheckUtils.getDisAllowedList().stream().sorted().collect(Collectors.joining("\n"))
+ "\n\n";
}
}
}
|
SerializeCheckStatus
|
java
|
apache__flink
|
flink-state-backends/flink-statebackend-forst/src/test/java/org/apache/flink/state/forst/datatransfer/DataTransferStrategyTest.java
|
{
"start": 13195,
"end": 15063
}
|
class ____ {
Map<String, Tuple2<Path, HandleAndLocalPath>> dbSnapshotFiles;
DBFilesSnapshot() {
dbSnapshotFiles = new HashMap<>();
}
void add(String fileName, Path dbFilePath, HandleAndLocalPath handleAndLocalPath) {
dbSnapshotFiles.put(fileName, new Tuple2<>(dbFilePath, handleAndLocalPath));
}
List<HandleAndLocalPath> getStateHandles() {
List<HandleAndLocalPath> handles = new ArrayList<>();
dbSnapshotFiles
.values()
.forEach(
tuple -> {
handles.add(tuple.f1);
});
return handles;
}
List<String> getDbFiles() {
return new ArrayList<>(dbSnapshotFiles.keySet());
}
void checkAllFilesExist() throws IOException {
checkFilesExist(true, true);
checkFilesExist(false, true);
}
// check whether the snapshots for local/remote files exist
void checkFilesExist(boolean shouldBeLocalFile, boolean shouldExist) throws IOException {
for (Tuple2<Path, HandleAndLocalPath> tuple : dbSnapshotFiles.values()) {
Path dbFilePath = tuple.f0;
StreamStateHandle handle = tuple.f1.getHandle();
if (!(handle instanceof FileStateHandle)
|| FileOwnershipDecider.shouldAlwaysBeLocal(dbFilePath)
!= shouldBeLocalFile) {
continue;
}
Path realFilePath = ((FileStateHandle) handle).getFilePath();
boolean exist = realFilePath.getFileSystem().exists(realFilePath);
assertThat(exist).isEqualTo(shouldExist);
}
}
}
static
|
DBFilesSnapshot
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/execution/librarycache/BlobLibraryCacheManagerTest.java
|
{
"start": 9215,
"end": 25591
}
|
class ____ leases for a
* single job a closed.
*/
@Test
public void testLibraryCacheManagerCleanup() throws Exception {
JobID jobId = new JobID();
List<PermanentBlobKey> keys = new ArrayList<>();
BlobServer server = null;
PermanentBlobCache cache = null;
BlobLibraryCacheManager libCache = null;
final byte[] buf = new byte[128];
try {
Configuration config = new Configuration();
config.set(BlobServerOptions.CLEANUP_INTERVAL, 1L);
server = new BlobServer(config, temporaryFolder.newFolder(), new VoidBlobStore());
server.start();
InetSocketAddress serverAddress = new InetSocketAddress("localhost", server.getPort());
cache =
new PermanentBlobCache(
config,
temporaryFolder.newFolder(),
new VoidBlobStore(),
serverAddress);
keys.add(server.putPermanent(jobId, buf));
buf[0] += 1;
keys.add(server.putPermanent(jobId, buf));
libCache = createBlobLibraryCacheManager(cache);
cache.registerJob(jobId);
assertEquals(0, libCache.getNumberOfManagedJobs());
assertEquals(0, libCache.getNumberOfReferenceHolders(jobId));
checkFileCountForJob(2, jobId, server);
checkFileCountForJob(0, jobId, cache);
final LibraryCacheManager.ClassLoaderLease classLoaderLease1 =
libCache.registerClassLoaderLease(jobId);
UserCodeClassLoader classLoader1 =
classLoaderLease1.getOrResolveClassLoader(keys, Collections.emptyList());
assertEquals(1, libCache.getNumberOfManagedJobs());
assertEquals(1, libCache.getNumberOfReferenceHolders(jobId));
assertEquals(2, checkFilesExist(jobId, keys, cache, true));
checkFileCountForJob(2, jobId, server);
checkFileCountForJob(2, jobId, cache);
final LibraryCacheManager.ClassLoaderLease classLoaderLease2 =
libCache.registerClassLoaderLease(jobId);
final UserCodeClassLoader classLoader2 =
classLoaderLease2.getOrResolveClassLoader(keys, Collections.emptyList());
assertThat(classLoader1, sameInstance(classLoader2));
try {
classLoaderLease1.getOrResolveClassLoader(
Collections.emptyList(), Collections.emptyList());
fail("Should fail with an IllegalStateException");
} catch (IllegalStateException e) {
// that's what we want
}
try {
classLoaderLease1.getOrResolveClassLoader(
keys, Collections.singletonList(new URL("file:///tmp/does-not-exist")));
fail("Should fail with an IllegalStateException");
} catch (IllegalStateException e) {
// that's what we want
}
assertEquals(1, libCache.getNumberOfManagedJobs());
assertEquals(2, libCache.getNumberOfReferenceHolders(jobId));
assertEquals(2, checkFilesExist(jobId, keys, cache, true));
checkFileCountForJob(2, jobId, server);
checkFileCountForJob(2, jobId, cache);
classLoaderLease1.release();
assertEquals(1, libCache.getNumberOfManagedJobs());
assertEquals(1, libCache.getNumberOfReferenceHolders(jobId));
assertEquals(2, checkFilesExist(jobId, keys, cache, true));
checkFileCountForJob(2, jobId, server);
checkFileCountForJob(2, jobId, cache);
classLoaderLease2.release();
assertEquals(0, libCache.getNumberOfManagedJobs());
assertEquals(0, libCache.getNumberOfReferenceHolders(jobId));
assertEquals(2, checkFilesExist(jobId, keys, cache, true));
checkFileCountForJob(2, jobId, server);
checkFileCountForJob(2, jobId, cache);
// only PermanentBlobCache#releaseJob() calls clean up files (tested in
// BlobCacheCleanupTest etc.
} finally {
if (libCache != null) {
libCache.shutdown();
}
// should have been closed by the libraryCacheManager, but just in case
if (cache != null) {
cache.close();
}
if (server != null) {
server.close();
}
}
}
@Test
@Category(FailsInGHAContainerWithRootUser.class)
public void testRegisterAndDownload() throws IOException {
assumeTrue(!OperatingSystem.isWindows()); // setWritable doesn't work on Windows.
JobID jobId = new JobID();
BlobServer server = null;
PermanentBlobCache cache = null;
BlobLibraryCacheManager libCache = null;
File cacheDir = null;
try {
// create the blob transfer services
Configuration config = new Configuration();
config.set(BlobServerOptions.CLEANUP_INTERVAL, 1_000_000L);
server = new BlobServer(config, temporaryFolder.newFolder(), new VoidBlobStore());
server.start();
InetSocketAddress serverAddress = new InetSocketAddress("localhost", server.getPort());
cache =
new PermanentBlobCache(
config,
temporaryFolder.newFolder(),
new VoidBlobStore(),
serverAddress);
// upload some meaningless data to the server
PermanentBlobKey dataKey1 =
server.putPermanent(jobId, new byte[] {1, 2, 3, 4, 5, 6, 7, 8});
PermanentBlobKey dataKey2 =
server.putPermanent(jobId, new byte[] {11, 12, 13, 14, 15, 16, 17, 18});
libCache = createBlobLibraryCacheManager(cache);
assertEquals(0, libCache.getNumberOfManagedJobs());
checkFileCountForJob(2, jobId, server);
checkFileCountForJob(0, jobId, cache);
// first try to access a non-existing entry
assertEquals(0, libCache.getNumberOfReferenceHolders(new JobID()));
// register some BLOBs as libraries
{
Collection<PermanentBlobKey> keys = Collections.singleton(dataKey1);
cache.registerJob(jobId);
final LibraryCacheManager.ClassLoaderLease classLoaderLease1 =
libCache.registerClassLoaderLease(jobId);
final UserCodeClassLoader classLoader1 =
classLoaderLease1.getOrResolveClassLoader(keys, Collections.emptyList());
assertEquals(1, libCache.getNumberOfManagedJobs());
assertEquals(1, libCache.getNumberOfReferenceHolders(jobId));
assertEquals(1, checkFilesExist(jobId, keys, cache, true));
checkFileCountForJob(2, jobId, server);
checkFileCountForJob(1, jobId, cache);
final LibraryCacheManager.ClassLoaderLease classLoaderLease2 =
libCache.registerClassLoaderLease(jobId);
final UserCodeClassLoader classLoader2 =
classLoaderLease2.getOrResolveClassLoader(keys, Collections.emptyList());
assertThat(classLoader1, sameInstance(classLoader2));
assertEquals(1, libCache.getNumberOfManagedJobs());
assertEquals(2, libCache.getNumberOfReferenceHolders(jobId));
assertEquals(1, checkFilesExist(jobId, keys, cache, true));
checkFileCountForJob(2, jobId, server);
checkFileCountForJob(1, jobId, cache);
// un-register the job
classLoaderLease1.release();
// still one task
assertEquals(1, libCache.getNumberOfManagedJobs());
assertEquals(1, libCache.getNumberOfReferenceHolders(jobId));
assertEquals(1, checkFilesExist(jobId, keys, cache, true));
checkFileCountForJob(2, jobId, server);
checkFileCountForJob(1, jobId, cache);
// unregister the task registration
classLoaderLease2.release();
assertEquals(0, libCache.getNumberOfManagedJobs());
assertEquals(0, libCache.getNumberOfReferenceHolders(jobId));
// changing the libCache registration does not influence the BLOB stores...
checkFileCountForJob(2, jobId, server);
checkFileCountForJob(1, jobId, cache);
cache.releaseJob(jobId);
// library is still cached (but not associated with job any more)
checkFileCountForJob(2, jobId, server);
checkFileCountForJob(1, jobId, cache);
}
// see BlobUtils for the directory layout
cacheDir = cache.getStorageLocation(jobId, new PermanentBlobKey()).getParentFile();
assertTrue(cacheDir.exists());
// make sure no further blobs can be downloaded by removing the write
// permissions from the directory
assertTrue(
"Could not remove write permissions from cache directory",
cacheDir.setWritable(false, false));
// since we cannot download this library any more, this call should fail
try {
cache.registerJob(jobId);
final LibraryCacheManager.ClassLoaderLease classLoaderLease =
libCache.registerClassLoaderLease(jobId);
classLoaderLease.getOrResolveClassLoader(
Collections.singleton(dataKey2), Collections.emptyList());
fail("This should fail with an IOException");
} catch (IOException e) {
// splendid!
cache.releaseJob(jobId);
}
} finally {
if (cacheDir != null) {
if (!cacheDir.setWritable(true, false)) {
System.err.println("Could not re-add write permissions to cache directory.");
}
}
if (cache != null) {
cache.close();
}
if (libCache != null) {
libCache.shutdown();
}
if (server != null) {
server.close();
}
}
}
@Test(expected = IOException.class)
public void getOrResolveClassLoader_missingBlobKey_shouldFail() throws IOException {
final PermanentBlobKey missingKey = new PermanentBlobKey();
final BlobLibraryCacheManager libraryCacheManager = createSimpleBlobLibraryCacheManager();
final LibraryCacheManager.ClassLoaderLease classLoaderLease =
libraryCacheManager.registerClassLoaderLease(new JobID());
classLoaderLease.getOrResolveClassLoader(
Collections.singletonList(missingKey), Collections.emptyList());
}
@Test(expected = IllegalStateException.class)
public void getOrResolveClassLoader_closedLease_shouldFail() throws IOException {
final BlobLibraryCacheManager libraryCacheManager = createSimpleBlobLibraryCacheManager();
final LibraryCacheManager.ClassLoaderLease classLoaderLease =
libraryCacheManager.registerClassLoaderLease(new JobID());
classLoaderLease.release();
classLoaderLease.getOrResolveClassLoader(Collections.emptyList(), Collections.emptyList());
}
@Test
public void closingAllLeases_willReleaseUserCodeClassLoader() throws IOException {
final TestingClassLoader classLoader = new TestingClassLoader();
final BlobLibraryCacheManager libraryCacheManager =
new TestingBlobLibraryCacheManagerBuilder()
.setClassLoaderFactory(ignored -> classLoader)
.build();
final JobID jobId = new JobID();
final LibraryCacheManager.ClassLoaderLease classLoaderLease1 =
libraryCacheManager.registerClassLoaderLease(jobId);
final LibraryCacheManager.ClassLoaderLease classLoaderLease2 =
libraryCacheManager.registerClassLoaderLease(jobId);
UserCodeClassLoader userCodeClassLoader =
classLoaderLease1.getOrResolveClassLoader(
Collections.emptyList(), Collections.emptyList());
classLoaderLease1.release();
assertFalse(classLoader.isClosed());
classLoaderLease2.release();
if (wrapsSystemClassLoader) {
assertEquals(userCodeClassLoader.asClassLoader(), ClassLoader.getSystemClassLoader());
assertFalse(classLoader.isClosed());
} else {
assertTrue(classLoader.isClosed());
}
}
@Test
public void differentLeasesForSameJob_returnSameClassLoader() throws IOException {
final BlobLibraryCacheManager libraryCacheManager = createSimpleBlobLibraryCacheManager();
final JobID jobId = new JobID();
final LibraryCacheManager.ClassLoaderLease classLoaderLease1 =
libraryCacheManager.registerClassLoaderLease(jobId);
final LibraryCacheManager.ClassLoaderLease classLoaderLease2 =
libraryCacheManager.registerClassLoaderLease(jobId);
final UserCodeClassLoader classLoader1 =
classLoaderLease1.getOrResolveClassLoader(
Collections.emptyList(), Collections.emptyList());
final UserCodeClassLoader classLoader2 =
classLoaderLease2.getOrResolveClassLoader(
Collections.emptyList(), Collections.emptyList());
assertThat(classLoader1, sameInstance(classLoader2));
}
@Test(expected = IllegalStateException.class)
public void closingLibraryCacheManager_invalidatesAllOpenLeases() throws IOException {
final BlobLibraryCacheManager libraryCacheManager = createSimpleBlobLibraryCacheManager();
final LibraryCacheManager.ClassLoaderLease classLoaderLease =
libraryCacheManager.registerClassLoaderLease(new JobID());
libraryCacheManager.shutdown();
classLoaderLease.getOrResolveClassLoader(Collections.emptyList(), Collections.emptyList());
}
@Test
public void closingLibraryCacheManager_closesClassLoader() throws IOException {
final TestingClassLoader classLoader = new TestingClassLoader();
final BlobLibraryCacheManager libraryCacheManager =
new TestingBlobLibraryCacheManagerBuilder()
.setClassLoaderFactory(ignored -> classLoader)
.build();
final LibraryCacheManager.ClassLoaderLease classLoaderLease =
libraryCacheManager.registerClassLoaderLease(new JobID());
UserCodeClassLoader userCodeClassLoader =
classLoaderLease.getOrResolveClassLoader(
Collections.emptyList(), Collections.emptyList());
libraryCacheManager.shutdown();
if (wrapsSystemClassLoader) {
assertEquals(userCodeClassLoader.asClassLoader(), ClassLoader.getSystemClassLoader());
assertFalse(classLoader.isClosed());
} else {
assertTrue(classLoader.isClosed());
}
}
@Test
public void releaseUserCodeClassLoader_willRunReleaseHooks()
throws IOException, InterruptedException {
final BlobLibraryCacheManager libraryCacheManager =
new TestingBlobLibraryCacheManagerBuilder().build();
final LibraryCacheManager.ClassLoaderLease classLoaderLease =
libraryCacheManager.registerClassLoaderLease(new JobID());
final UserCodeClassLoader userCodeClassLoader =
classLoaderLease.getOrResolveClassLoader(
Collections.emptyList(), Collections.emptyList());
final OneShotLatch releaseHookLatch = new OneShotLatch();
userCodeClassLoader.registerReleaseHookIfAbsent("test", releaseHookLatch::trigger);
// this should trigger the release of the
|
loader
|
java
|
apache__maven
|
compat/maven-builder-support/src/test/java/org/apache/maven/building/StringSourceTest.java
|
{
"start": 997,
"end": 1889
}
|
class ____ {
@Test
void testGetInputStream() throws Exception {
StringSource source = new StringSource("Hello World!");
try (InputStream is = source.getInputStream();
Scanner scanner = new Scanner(is)) {
assertEquals("Hello World!", scanner.nextLine());
}
}
@Test
void testGetLocation() {
StringSource source = new StringSource("Hello World!");
assertEquals("(memory)", source.getLocation());
source = new StringSource("Hello World!", "LOCATION");
assertEquals("LOCATION", source.getLocation());
}
@Test
void testGetContent() {
StringSource source = new StringSource(null);
assertEquals("", source.getContent());
source = new StringSource("Hello World!", "LOCATION");
assertEquals("Hello World!", source.getContent());
}
}
|
StringSourceTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpper.java
|
{
"start": 1078,
"end": 3070
}
|
class ____ extends ChangeCase {
public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "ToUpper", ToUpper::new);
private static final TransportVersion ESQL_SERIALIZE_SOURCE_FUNCTIONS_WARNINGS = TransportVersion.fromName(
"esql_serialize_source_functions_warnings"
);
@FunctionInfo(
returnType = { "keyword" },
description = "Returns a new string representing the input string converted to upper case.",
examples = @Example(file = "string", tag = "to_upper")
)
public ToUpper(Source source, @Param(name = "str", type = { "keyword", "text" }, description = """
String expression. If `null`, the function returns `null`. The input can be a single-valued column or expression,
or a multi-valued column or expression {applies_to}`stack: ga 9.1.0`.
""") Expression field, Configuration configuration) {
super(source, field, configuration, Case.UPPER);
}
private ToUpper(StreamInput in) throws IOException {
this(
in.getTransportVersion().supports(ESQL_SERIALIZE_SOURCE_FUNCTIONS_WARNINGS)
? Source.readFrom((PlanStreamInput) in)
: Source.EMPTY,
in.readNamedWriteable(Expression.class),
((PlanStreamInput) in).configuration()
);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
if (out.getTransportVersion().supports(ESQL_SERIALIZE_SOURCE_FUNCTIONS_WARNINGS)) {
source().writeTo(out);
}
out.writeNamedWriteable(field());
}
@Override
public String getWriteableName() {
return ENTRY.name;
}
public ToUpper replaceChild(Expression child) {
return new ToUpper(source(), child, configuration());
}
@Override
protected NodeInfo<? extends Expression> info() {
return NodeInfo.create(this, ToUpper::new, field(), configuration());
}
}
|
ToUpper
|
java
|
spring-projects__spring-boot
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/logging/ConditionEvaluationReportMessage.java
|
{
"start": 1542,
"end": 7748
}
|
class ____ {
private final StringBuilder message;
public ConditionEvaluationReportMessage(ConditionEvaluationReport report) {
this(report, "CONDITIONS EVALUATION REPORT");
}
public ConditionEvaluationReportMessage(ConditionEvaluationReport report, String title) {
this.message = getLogMessage(report, title);
}
private StringBuilder getLogMessage(ConditionEvaluationReport report, String title) {
String separator = "=".repeat(title.length());
StringBuilder message = new StringBuilder();
message.append(String.format("%n%n%n"));
message.append(String.format("%s%n", separator));
message.append(String.format("%s%n", title));
message.append(String.format("%s%n%n%n", separator));
Map<String, ConditionAndOutcomes> shortOutcomes = orderByName(report.getConditionAndOutcomesBySource());
logPositiveMatches(message, shortOutcomes);
logNegativeMatches(message, shortOutcomes);
logExclusions(report, message);
logUnconditionalClasses(report, message);
message.append(String.format("%n%n"));
return message;
}
private void logPositiveMatches(StringBuilder message, Map<String, ConditionAndOutcomes> shortOutcomes) {
message.append(String.format("Positive matches:%n"));
message.append(String.format("-----------------%n"));
List<Entry<String, ConditionAndOutcomes>> matched = shortOutcomes.entrySet()
.stream()
.filter((entry) -> entry.getValue().isFullMatch())
.toList();
if (matched.isEmpty()) {
message.append(String.format("%n None%n"));
}
else {
matched.forEach((entry) -> addMatchLogMessage(message, entry.getKey(), entry.getValue()));
}
message.append(String.format("%n%n"));
}
private void logNegativeMatches(StringBuilder message, Map<String, ConditionAndOutcomes> shortOutcomes) {
message.append(String.format("Negative matches:%n"));
message.append(String.format("-----------------%n"));
List<Entry<String, ConditionAndOutcomes>> nonMatched = shortOutcomes.entrySet()
.stream()
.filter((entry) -> !entry.getValue().isFullMatch())
.toList();
if (nonMatched.isEmpty()) {
message.append(String.format("%n None%n"));
}
else {
nonMatched.forEach((entry) -> addNonMatchLogMessage(message, entry.getKey(), entry.getValue()));
}
message.append(String.format("%n%n"));
}
private void logExclusions(ConditionEvaluationReport report, StringBuilder message) {
message.append(String.format("Exclusions:%n"));
message.append(String.format("-----------%n"));
if (report.getExclusions().isEmpty()) {
message.append(String.format("%n None%n"));
}
else {
for (String exclusion : report.getExclusions()) {
message.append(String.format("%n %s%n", exclusion));
}
}
message.append(String.format("%n%n"));
}
private void logUnconditionalClasses(ConditionEvaluationReport report, StringBuilder message) {
message.append(String.format("Unconditional classes:%n"));
message.append(String.format("----------------------%n"));
if (report.getUnconditionalClasses().isEmpty()) {
message.append(String.format("%n None%n"));
}
else {
for (String unconditionalClass : report.getUnconditionalClasses()) {
message.append(String.format("%n %s%n", unconditionalClass));
}
}
}
private Map<String, ConditionAndOutcomes> orderByName(Map<String, ConditionAndOutcomes> outcomes) {
MultiValueMap<String, String> map = mapToFullyQualifiedNames(outcomes.keySet());
List<String> shortNames = new ArrayList<>(map.keySet());
Collections.sort(shortNames);
Map<String, ConditionAndOutcomes> result = new LinkedHashMap<>();
for (String shortName : shortNames) {
List<String> fullyQualifiedNames = map.get(shortName);
Assert.state(fullyQualifiedNames != null, "'fullyQualifiedNames' must not be null");
if (fullyQualifiedNames.size() > 1) {
fullyQualifiedNames
.forEach((fullyQualifiedName) -> result.put(fullyQualifiedName, outcomes.get(fullyQualifiedName)));
}
else {
result.put(shortName, outcomes.get(fullyQualifiedNames.get(0)));
}
}
return result;
}
private MultiValueMap<String, String> mapToFullyQualifiedNames(Set<String> keySet) {
LinkedMultiValueMap<String, String> map = new LinkedMultiValueMap<>();
keySet
.forEach((fullyQualifiedName) -> map.add(ClassUtils.getShortName(fullyQualifiedName), fullyQualifiedName));
return map;
}
private void addMatchLogMessage(StringBuilder message, String source, ConditionAndOutcomes matches) {
message.append(String.format("%n %s matched:%n", source));
for (ConditionAndOutcome match : matches) {
logConditionAndOutcome(message, " ", match);
}
}
private void addNonMatchLogMessage(StringBuilder message, String source,
ConditionAndOutcomes conditionAndOutcomes) {
message.append(String.format("%n %s:%n", source));
List<ConditionAndOutcome> matches = new ArrayList<>();
List<ConditionAndOutcome> nonMatches = new ArrayList<>();
for (ConditionAndOutcome conditionAndOutcome : conditionAndOutcomes) {
if (conditionAndOutcome.getOutcome().isMatch()) {
matches.add(conditionAndOutcome);
}
else {
nonMatches.add(conditionAndOutcome);
}
}
message.append(String.format(" Did not match:%n"));
for (ConditionAndOutcome nonMatch : nonMatches) {
logConditionAndOutcome(message, " ", nonMatch);
}
if (!matches.isEmpty()) {
message.append(String.format(" Matched:%n"));
for (ConditionAndOutcome match : matches) {
logConditionAndOutcome(message, " ", match);
}
}
}
private void logConditionAndOutcome(StringBuilder message, String indent, ConditionAndOutcome conditionAndOutcome) {
message.append(String.format("%s- ", indent));
String outcomeMessage = conditionAndOutcome.getOutcome().getMessage();
if (StringUtils.hasLength(outcomeMessage)) {
message.append(outcomeMessage);
}
else {
message.append(conditionAndOutcome.getOutcome().isMatch() ? "matched" : "did not match");
}
message.append(" (");
message.append(ClassUtils.getShortName(conditionAndOutcome.getCondition().getClass()));
message.append(String.format(")%n"));
}
@Override
public String toString() {
return this.message.toString();
}
}
|
ConditionEvaluationReportMessage
|
java
|
google__gson
|
gson/src/test/java/com/google/gson/functional/CustomDeserializerTest.java
|
{
"start": 4575,
"end": 4648
}
|
class ____ extends MyBase {
String field1;
}
private static
|
SubType1
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/filter/wall/WallSelectLimitTest.java
|
{
"start": 1099,
"end": 3188
}
|
class ____ extends TestCase {
private String sql = "select * from t";
private WallConfig config = new WallConfig();
protected void setUp() throws Exception {
config.setSelectLimit(1000);
}
public void testMySql() throws Exception {
WallProvider provider = new MySqlWallProvider(config);
WallCheckResult checkResult = provider.check(sql);
String resultSql = checkResult.getSql();
System.out.println(resultSql);
assertEquals("SELECT *\n" +
"FROM t\n" +
"LIMIT 1000", resultSql);
}
public void testDB2() throws Exception {
WallProvider provider = new DB2WallProvider(config);
WallCheckResult checkResult = provider.check(sql);
String resultSql = checkResult.getSql();
System.out.println(resultSql);
assertEquals("SELECT *\n" +
"FROM t\n" +
"FETCH FIRST 1000 ROWS ONLY", resultSql);
}
public void testSQLServer() throws Exception {
WallProvider provider = new SQLServerWallProvider(config);
WallCheckResult checkResult = provider.check(sql);
String resultSql = checkResult.getSql();
System.out.println(resultSql);
assertEquals("SELECT TOP 1000 *\n" +
"FROM t", resultSql);
}
public void testOracle() throws Exception {
WallProvider provider = new OracleWallProvider(config);
WallCheckResult checkResult = provider.check(sql);
String resultSql = checkResult.getSql();
System.out.println(resultSql);
assertEquals("SELECT *\n" +
"FROM t\n" +
"WHERE ROWNUM <= 1000", resultSql);
}
public void testPG() throws Exception {
WallProvider provider = new PGWallProvider(config);
WallCheckResult checkResult = provider.check(sql);
String resultSql = checkResult.getSql();
System.out.println(resultSql);
assertEquals("SELECT *\n" +
"FROM t\n" +
"LIMIT 1000", resultSql);
}
}
|
WallSelectLimitTest
|
java
|
spring-projects__spring-boot
|
module/spring-boot-jdbc/src/main/java/org/springframework/boot/jdbc/autoconfigure/health/DataSourceHealthIndicatorProperties.java
|
{
"start": 1017,
"end": 1445
}
|
class ____ {
/**
* Whether to ignore AbstractRoutingDataSources when creating database health
* indicators.
*/
private boolean ignoreRoutingDataSources;
public boolean isIgnoreRoutingDataSources() {
return this.ignoreRoutingDataSources;
}
public void setIgnoreRoutingDataSources(boolean ignoreRoutingDataSources) {
this.ignoreRoutingDataSources = ignoreRoutingDataSources;
}
}
|
DataSourceHealthIndicatorProperties
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/selection/jaxb/test2/OrderShippingDetailsType.java
|
{
"start": 576,
"end": 1411
}
|
class ____ {
@XmlElementRef(name = "orderShippedFrom",
namespace = "http://www.mapstruct.org/ap/test/jaxb/selection/test2", type = JAXBElement.class)
private JAXBElement<String> orderShippedFrom;
@XmlElementRef(name = "orderShippedTo",
namespace = "http://www.mapstruct.org/ap/test/jaxb/selection/test2", type = JAXBElement.class)
private JAXBElement<String> orderShippedTo;
public JAXBElement<String> getOrderShippedFrom() {
return orderShippedFrom;
}
public void setOrderShippedFrom(JAXBElement<String> value) {
this.orderShippedFrom = value;
}
public JAXBElement<String> getOrderShippedTo() {
return orderShippedTo;
}
public void setOrderShippedTo(JAXBElement<String> value) {
this.orderShippedTo = value;
}
}
|
OrderShippingDetailsType
|
java
|
apache__camel
|
core/camel-base-engine/src/main/java/org/apache/camel/impl/engine/DefaultHealthCheckResolver.java
|
{
"start": 1320,
"end": 5217
}
|
class ____ implements HealthCheckResolver, CamelContextAware {
public static final String HEALTH_CHECK_RESOURCE_PATH = "META-INF/services/org/apache/camel/health-check/";
protected FactoryFinder healthCheckFactory;
private CamelContext camelContext;
@Override
public CamelContext getCamelContext() {
return camelContext;
}
@Override
public void setCamelContext(CamelContext camelContext) {
this.camelContext = camelContext;
}
@Override
public HealthCheck resolveHealthCheck(String id) {
// lookup in registry first
HealthCheck answer = camelContext.getRegistry().lookupByNameAndType(id + "-health-check", HealthCheck.class);
if (answer == null) {
answer = camelContext.getRegistry().lookupByNameAndType(id, HealthCheck.class);
}
if (answer != null) {
return answer;
}
Class<?> type = null;
try {
type = findHealthCheck(id, camelContext);
} catch (NoFactoryAvailableException e) {
// ignore
} catch (Exception e) {
throw new IllegalArgumentException("Invalid URI, no HealthCheck registered for id: " + id, e);
}
if (type != null) {
if (HealthCheck.class.isAssignableFrom(type)) {
return (HealthCheck) camelContext.getInjector().newInstance(type, false);
} else {
throw new IllegalArgumentException(
"Resolving health-check: " + id + " detected type conflict: Not a HealthCheck implementation. Found: "
+ type.getName());
}
}
return null;
}
@Override
public HealthCheckRepository resolveHealthCheckRepository(String id) {
// lookup in registry first
HealthCheckRepository answer
= camelContext.getRegistry().lookupByNameAndType(id + "-health-check-repository", HealthCheckRepository.class);
if (answer == null) {
answer = camelContext.getRegistry().lookupByNameAndType(id, HealthCheckRepository.class);
}
if (answer != null) {
return answer;
}
Class<?> type = null;
try {
type = findHealthCheckRepository(id, camelContext);
} catch (NoFactoryAvailableException e) {
// ignore
} catch (Exception e) {
throw new IllegalArgumentException("Invalid URI, no HealthCheckRepository registered for id: " + id, e);
}
if (type != null) {
if (HealthCheckRepository.class.isAssignableFrom(type)) {
return (HealthCheckRepository) camelContext.getInjector().newInstance(type, false);
} else {
throw new IllegalArgumentException(
"Resolving health-check-repository: " + id
+ " detected type conflict: Not a HealthCheckRepository implementation. Found: "
+ type.getName());
}
}
return null;
}
protected Class<?> findHealthCheck(String name, CamelContext context) throws Exception {
if (healthCheckFactory == null) {
healthCheckFactory = context.getCamelContextExtension().getFactoryFinder(HEALTH_CHECK_RESOURCE_PATH);
}
return healthCheckFactory.findOptionalClass(name + "-check").orElse(null);
}
protected Class<?> findHealthCheckRepository(String name, CamelContext context) throws Exception {
if (healthCheckFactory == null) {
healthCheckFactory = context.getCamelContextExtension().getFactoryFinder(HEALTH_CHECK_RESOURCE_PATH);
}
return healthCheckFactory.findOptionalClass(name + "-repository").orElse(null);
}
}
|
DefaultHealthCheckResolver
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/boot/models/annotations/internal/CollectionIdJavaTypeAnnotation.java
|
{
"start": 478,
"end": 1675
}
|
class ____ implements CollectionIdJavaType {
private java.lang.Class<? extends org.hibernate.type.descriptor.java.BasicJavaType<?>> value;
/**
* Used in creating dynamic annotation instances (e.g. from XML)
*/
public CollectionIdJavaTypeAnnotation(ModelsContext modelContext) {
}
/**
* Used in creating annotation instances from JDK variant
*/
public CollectionIdJavaTypeAnnotation(CollectionIdJavaType annotation, ModelsContext modelContext) {
this.value = annotation.value();
}
/**
* Used in creating annotation instances from Jandex variant
*/
public CollectionIdJavaTypeAnnotation(Map<String, Object> attributeValues, ModelsContext modelContext) {
this.value = (Class<? extends org.hibernate.type.descriptor.java.BasicJavaType<?>>) attributeValues.get( "value" );
}
@Override
public Class<? extends Annotation> annotationType() {
return CollectionIdJavaType.class;
}
@Override
public java.lang.Class<? extends org.hibernate.type.descriptor.java.BasicJavaType<?>> value() {
return value;
}
public void value(java.lang.Class<? extends org.hibernate.type.descriptor.java.BasicJavaType<?>> value) {
this.value = value;
}
}
|
CollectionIdJavaTypeAnnotation
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/internal/HostAndPort.java
|
{
"start": 165,
"end": 7207
}
|
class ____ {
private static final int NO_PORT = -1;
public final String hostText;
public final int port;
/**
*
* @param hostText must not be empty or {@code null}.
* @param port
*/
private HostAndPort(String hostText, int port) {
LettuceAssert.notNull(hostText, "HostText must not be null");
this.hostText = hostText;
this.port = port;
}
/**
* Create a {@link HostAndPort} of {@code host} and {@code port}
*
* @param host the hostname
* @param port a valid port
* @return the {@link HostAndPort} of {@code host} and {@code port}
*/
public static HostAndPort of(String host, int port) {
LettuceAssert.isTrue(isValidPort(port), () -> String.format("Port out of range: %s", port));
HostAndPort parsedHost = parse(host);
LettuceAssert.isTrue(!parsedHost.hasPort(), () -> String.format("Host has a port: %s", host));
return new HostAndPort(host, port);
}
/**
* Parse a host and port string into a {@link HostAndPort}. The port is optional. Examples: {@code host:port} or
* {@code host}
*
* @param hostPortString
* @return
*/
public static HostAndPort parse(String hostPortString) {
LettuceAssert.notNull(hostPortString, "HostPortString must not be null");
String host;
String portString = null;
if (hostPortString.startsWith("[")) {
String[] hostAndPort = getHostAndPortFromBracketedHost(hostPortString);
host = hostAndPort[0];
portString = hostAndPort[1];
} else {
int colonPos = hostPortString.indexOf(':');
if (colonPos >= 0 && hostPortString.indexOf(':', colonPos + 1) == -1) {
// Exactly 1 colon. Split into host:port.
host = hostPortString.substring(0, colonPos);
portString = hostPortString.substring(colonPos + 1);
} else {
// 0 or 2+ colons. Bare hostname or IPv6 literal.
host = hostPortString;
}
}
int port = NO_PORT;
if (!LettuceStrings.isEmpty(portString)) {
// Try to parse the whole port string as a number.
// JDK7 accepts leading plus signs. We don't want to.
LettuceAssert.isTrue(!portString.startsWith("+"), () -> String.format("Cannot port number: %s", hostPortString));
try {
port = Integer.parseInt(portString);
} catch (NumberFormatException e) {
throw new IllegalArgumentException(String.format("Cannot parse port number: %s", hostPortString));
}
LettuceAssert.isTrue(isValidPort(port), () -> String.format("Port number out of range: %s", hostPortString));
}
return new HostAndPort(host, port);
}
/**
* Temporary workaround until Redis provides IPv6 addresses in bracket notation. Allows parsing of {@code 1.2.3.4:6479} and
* {@code dead:beef:dead:beef:affe::1:6379} into host and port. We assume the last item after the colon is a port.
*
* @param hostAndPortPart the string containing the host and port
* @return the parsed {@link HostAndPort}.
*/
public static HostAndPort parseCompat(String hostAndPortPart) {
int firstColonIndex = hostAndPortPart.indexOf(':');
int lastColonIndex = hostAndPortPart.lastIndexOf(':');
int bracketIndex = hostAndPortPart.lastIndexOf(']');
if (firstColonIndex != lastColonIndex && lastColonIndex != -1 && bracketIndex == -1) {
String hostPart = hostAndPortPart.substring(0, lastColonIndex);
String portPart = hostAndPortPart.substring(lastColonIndex + 1);
return HostAndPort.of(hostPart, Integer.parseInt(portPart));
}
return HostAndPort.parse(hostAndPortPart);
}
/**
*
* @return {@code true} if has a port.
*/
public boolean hasPort() {
return port != NO_PORT;
}
/**
*
* @return the host text.
*/
public String getHostText() {
return hostText;
}
/**
*
* @return the port.
*/
public int getPort() {
if (!hasPort()) {
throw new IllegalStateException("No port present.");
}
return port;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (!(o instanceof HostAndPort))
return false;
HostAndPort that = (HostAndPort) o;
if (port != that.port)
return false;
return hostText != null ? hostText.equals(that.hostText) : that.hostText == null;
}
@Override
public int hashCode() {
int result = hostText != null ? hostText.hashCode() : 0;
result = 31 * result + port;
return result;
}
/**
* Parses a bracketed host-port string, throwing IllegalArgumentException if parsing fails.
*
* @param hostPortString the full bracketed host-port specification. Post might not be specified.
* @return an array with 2 strings: host and port, in that order.
* @throws IllegalArgumentException if parsing the bracketed host-port string fails.
*/
private static String[] getHostAndPortFromBracketedHost(String hostPortString) {
LettuceAssert.isTrue(hostPortString.charAt(0) == '[',
() -> String.format("Bracketed host-port string must start with a bracket: %s", hostPortString));
int colonIndex = hostPortString.indexOf(':');
int closeBracketIndex = hostPortString.lastIndexOf(']');
LettuceAssert.isTrue(colonIndex > -1 && closeBracketIndex > colonIndex,
() -> String.format("Invalid bracketed host/port: %s", hostPortString));
String host = hostPortString.substring(1, closeBracketIndex);
if (closeBracketIndex + 1 == hostPortString.length()) {
return new String[] { host, "" };
} else {
LettuceAssert.isTrue(hostPortString.charAt(closeBracketIndex + 1) == ':',
"Only a colon may follow a close bracket: " + hostPortString);
for (int i = closeBracketIndex + 2; i < hostPortString.length(); ++i) {
LettuceAssert.isTrue(Character.isDigit(hostPortString.charAt(i)),
() -> String.format("Port must be numeric: %s", hostPortString));
}
return new String[] { host, hostPortString.substring(closeBracketIndex + 2) };
}
}
/**
*
* @param port the port number
* @return {@code true} for valid port numbers.
*/
private static boolean isValidPort(int port) {
return port >= 0 && port <= 65535;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(hostText);
if (hasPort()) {
sb.append(':').append(port);
}
return sb.toString();
}
}
|
HostAndPort
|
java
|
apache__maven
|
its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng3943PluginExecutionInheritanceTest.java
|
{
"start": 1183,
"end": 2250
}
|
class ____ extends AbstractMavenIntegrationTestCase {
/**
* Test that plugin executions are properly merged during inheritance, even if the child uses a different
* plugin version than the parent.
*
* @throws Exception in case of failure
*/
@Test
public void testitMNG3943() throws Exception {
File testDir = extractResources("/mng-3943");
Verifier verifier = newVerifier(new File(testDir, "sub").getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
verifier.addCliArgument("validate");
verifier.execute();
verifier.verifyErrorFreeLog();
List<String> executions = verifier.loadLines("target/exec.log");
// NOTE: Ordering of executions is another issue (MNG-3887), so ignore/normalize order
Collections.sort(executions);
List<String> expected = Arrays.asList(new String[] {"child-1", "child-default", "parent-1"});
assertEquals(expected, executions);
}
}
|
MavenITmng3943PluginExecutionInheritanceTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/RestDeleteShutdownNodeAction.java
|
{
"start": 670,
"end": 1487
}
|
class ____ extends BaseRestHandler {
@Override
public String getName() {
return "delete_shutdown_node";
}
@Override
public List<Route> routes() {
return List.of(new Route(RestRequest.Method.DELETE, "/_nodes/{nodeId}/shutdown"));
}
@Override
public boolean canTripCircuitBreaker() {
return false;
}
@Override
protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) {
final var nodeId = request.param("nodeId");
final var parsedRequest = new DeleteShutdownNodeAction.Request(getMasterNodeTimeout(request), getAckTimeout(request), nodeId);
return channel -> client.execute(DeleteShutdownNodeAction.INSTANCE, parsedRequest, new RestToXContentListener<>(channel));
}
}
|
RestDeleteShutdownNodeAction
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/generated/sqldefault/PostgresUuidTest.java
|
{
"start": 1272,
"end": 1356
}
|
class ____ {
@Id @Generated
@ColumnDefault("gen_random_uuid()")
UUID uuid;
}
}
|
It
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/ImmutableCheckerTest.java
|
{
"start": 12493,
"end": 12669
}
|
class ____ {
List<Integer> xs = new ArrayList<>();
@Immutable
// BUG: Diagnostic contains: has mutable enclosing instance
|
A
|
java
|
apache__avro
|
lang/java/avro/src/test/java/org/apache/avro/TestCompare.java
|
{
"start": 1362,
"end": 7664
}
|
class ____ {
@Test
void testNull() throws Exception {
Schema schema = new Schema.Parser().parse("\"null\"");
byte[] b = render(null, schema, new GenericDatumWriter<>());
assertEquals(0, BinaryData.compare(b, 0, b, 0, schema));
}
@Test
void testBoolean() throws Exception {
check("\"boolean\"", Boolean.FALSE, Boolean.TRUE);
}
@Test
void string() throws Exception {
check("\"string\"", new Utf8(""), new Utf8("a"));
check("\"string\"", new Utf8("a"), new Utf8("b"));
check("\"string\"", new Utf8("a"), new Utf8("ab"));
check("\"string\"", new Utf8("ab"), new Utf8("b"));
}
@Test
void bytes() throws Exception {
check("\"bytes\"", ByteBuffer.wrap(new byte[] {}), ByteBuffer.wrap(new byte[] { 1 }));
check("\"bytes\"", ByteBuffer.wrap(new byte[] { 1 }), ByteBuffer.wrap(new byte[] { 2 }));
check("\"bytes\"", ByteBuffer.wrap(new byte[] { 1, 2 }), ByteBuffer.wrap(new byte[] { 2 }));
}
@Test
void testInt() throws Exception {
check("\"int\"", -1, 0);
check("\"int\"", 0, 1);
}
@Test
void testLong() throws Exception {
check("\"long\"", 11L, 12L);
check("\"long\"", (long) -1, 1L);
}
@Test
void testFloat() throws Exception {
check("\"float\"", 1.1f, 1.2f);
check("\"float\"", (float) -1.1, 1.0f);
}
@Test
void testDouble() throws Exception {
check("\"double\"", 1.2, 1.3);
check("\"double\"", -1.2, 1.3);
}
@Test
void array() throws Exception {
String json = "{\"type\":\"array\", \"items\": \"long\"}";
Schema schema = new Schema.Parser().parse(json);
GenericArray<Long> a1 = new GenericData.Array<>(1, schema);
a1.add(1L);
GenericArray<Long> a2 = new GenericData.Array<>(1, schema);
a2.add(1L);
a2.add(0L);
check(json, a1, a2);
}
@Test
void record() throws Exception {
String fields = " \"fields\":[" + "{\"name\":\"f\",\"type\":\"int\",\"order\":\"ignore\"},"
+ "{\"name\":\"g\",\"type\":\"int\",\"order\":\"descending\"}," + "{\"name\":\"h\",\"type\":\"int\"}]}";
String recordJson = "{\"type\":\"record\", \"name\":\"Test\"," + fields;
Schema schema = new Schema.Parser().parse(recordJson);
GenericData.Record r1 = new GenericData.Record(schema);
r1.put("f", 1);
r1.put("g", 13);
r1.put("h", 41);
GenericData.Record r2 = new GenericData.Record(schema);
r2.put("f", 0);
r2.put("g", 12);
r2.put("h", 41);
check(recordJson, r1, r2);
r2.put("f", 0);
r2.put("g", 13);
r2.put("h", 42);
check(recordJson, r1, r2);
String record2Json = "{\"type\":\"record\", \"name\":\"Test2\"," + fields;
Schema schema2 = new Schema.Parser().parse(record2Json);
GenericData.Record r3 = new GenericData.Record(schema2);
r3.put("f", 1);
r3.put("g", 13);
r3.put("h", 41);
assert (!r1.equals(r3)); // same fields, diff name
}
@Test
void testEnum() throws Exception {
String json = "{\"type\":\"enum\", \"name\":\"Test\",\"symbols\": [\"A\", \"B\"]}";
Schema schema = new Schema.Parser().parse(json);
check(json, new GenericData.EnumSymbol(schema, "A"), new GenericData.EnumSymbol(schema, "B"));
}
@Test
void fixed() throws Exception {
String json = "{\"type\": \"fixed\", \"name\":\"Test\", \"size\": 1}";
Schema schema = new Schema.Parser().parse(json);
check(json, new GenericData.Fixed(schema, new byte[] { (byte) 'a' }),
new GenericData.Fixed(schema, new byte[] { (byte) 'b' }));
}
@Test
void union() throws Exception {
check("[\"string\", \"long\"]", new Utf8("a"), new Utf8("b"), false);
check("[\"string\", \"long\"]", 1L, 2L, false);
check("[\"string\", \"long\"]", new Utf8("a"), 1L, false);
}
private static <T> void check(String schemaJson, T o1, T o2) throws Exception {
check(schemaJson, o1, o2, true);
}
private static <T> void check(String schemaJson, T o1, T o2, boolean comparable) throws Exception {
check(new Schema.Parser().parse(schemaJson), o1, o2, comparable, new GenericDatumWriter<>(), GenericData.get());
}
private static <T> void check(Schema schema, T o1, T o2, boolean comparable, DatumWriter<T> writer,
GenericData comparator) throws Exception {
byte[] b1 = render(o1, schema, writer);
byte[] b2 = render(o2, schema, writer);
assertEquals(-1, BinaryData.compare(b1, 0, b2, 0, schema));
assertEquals(1, BinaryData.compare(b2, 0, b1, 0, schema));
assertEquals(0, BinaryData.compare(b1, 0, b1, 0, schema));
assertEquals(0, BinaryData.compare(b2, 0, b2, 0, schema));
assertEquals(-1, compare(o1, o2, schema, comparable, comparator));
assertEquals(1, compare(o2, o1, schema, comparable, comparator));
assertEquals(0, compare(o1, o1, schema, comparable, comparator));
assertEquals(0, compare(o2, o2, schema, comparable, comparator));
assert (o1.equals(o1));
assert (o2.equals(o2));
assert (!o1.equals(o2));
assert (!o2.equals(o1));
assert (!o1.equals(new Object()));
assert (!o2.equals(new Object()));
assert (!o1.equals(null));
assert (!o2.equals(null));
assert (o1.hashCode() != o2.hashCode());
// check BinaryData.hashCode against Object.hashCode
if (schema.getType() != Schema.Type.ENUM) {
assertEquals(o1.hashCode(), BinaryData.hashCode(b1, 0, b1.length, schema));
assertEquals(o2.hashCode(), BinaryData.hashCode(b2, 0, b2.length, schema));
}
// check BinaryData.hashCode against GenericData.hashCode
assertEquals(comparator.hashCode(o1, schema), BinaryData.hashCode(b1, 0, b1.length, schema));
assertEquals(comparator.hashCode(o2, schema), BinaryData.hashCode(b2, 0, b2.length, schema));
}
@SuppressWarnings(value = "unchecked")
private static int compare(Object o1, Object o2, Schema schema, boolean comparable, GenericData comparator) {
return comparable ? ((Comparable<Object>) o1).compareTo(o2) : comparator.compare(o1, o2, schema);
}
private static <T> byte[] render(T datum, Schema schema, DatumWriter<T> writer) throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
writer.setSchema(schema);
Encoder enc = new EncoderFactory().directBinaryEncoder(out, null);
writer.write(datum, enc);
enc.flush();
return out.toByteArray();
}
}
|
TestCompare
|
java
|
apache__rocketmq
|
store/src/main/java/org/apache/rocketmq/store/queue/DispatchEntry.java
|
{
"start": 1026,
"end": 1800
}
|
class ____ {
public byte[] topic;
public int queueId;
public long queueOffset;
public long commitLogOffset;
public int messageSize;
public long tagCode;
public long storeTimestamp;
public static DispatchEntry from(@Nonnull DispatchRequest request) {
DispatchEntry entry = new DispatchEntry();
entry.topic = request.getTopic().getBytes(StandardCharsets.UTF_8);
entry.queueId = request.getQueueId();
entry.queueOffset = request.getConsumeQueueOffset();
entry.commitLogOffset = request.getCommitLogOffset();
entry.messageSize = request.getMsgSize();
entry.tagCode = request.getTagsCode();
entry.storeTimestamp = request.getStoreTimestamp();
return entry;
}
}
|
DispatchEntry
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/joinedsubclass/JoinedSubclassWithImplicitDiscriminatorTest.java
|
{
"start": 3901,
"end": 4051
}
|
class ____ extends Animal {
public Cat() {
super();
}
public Cat(Integer id) {
super( id );
}
}
@Entity(name = "Dog")
public static
|
Cat
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/main/java/io/vertx/core/net/ServerSSLOptions.java
|
{
"start": 823,
"end": 4946
}
|
class ____ extends SSLOptions {
/**
* Default value of whether client auth is required (SSL/TLS) = No
*/
public static final ClientAuth DEFAULT_CLIENT_AUTH = ClientAuth.NONE;
/**
* Default value of whether the server supports SNI = false
*/
public static final boolean DEFAULT_SNI = false;
private ClientAuth clientAuth;
private boolean sni;
/**
* Default constructor
*/
public ServerSSLOptions() {
super();
}
/**
* Copy constructor
*
* @param other the options to copy
*/
public ServerSSLOptions(ServerSSLOptions other) {
super(other);
clientAuth = other.clientAuth;
sni = other.sni;
}
/**
* Create options from JSON
*
* @param json the JSON
*/
public ServerSSLOptions(JsonObject json) {
super(json);
ServerSSLOptionsConverter.fromJson(json, this);
}
@Override
protected void init() {
super.init();
this.clientAuth = DEFAULT_CLIENT_AUTH;
this.sni = DEFAULT_SNI;
}
public ServerSSLOptions copy() {
return new ServerSSLOptions(this);
}
public ClientAuth getClientAuth() {
return clientAuth;
}
/**
* Set whether client auth is required
*
* @param clientAuth One of "NONE, REQUEST, REQUIRED". If it's set to "REQUIRED" then server will require the
* SSL cert to be presented otherwise it won't accept the request. If it's set to "REQUEST" then
* it won't mandate the certificate to be presented, basically make it optional.
* @return a reference to this, so the API can be used fluently
*/
public ServerSSLOptions setClientAuth(ClientAuth clientAuth) {
this.clientAuth = clientAuth;
return this;
}
/**
* @return whether the server supports Server Name Indication
*/
public boolean isSni() {
return sni;
}
/**
* Set whether the server supports Server Name Indiciation
*
* @return a reference to this, so the API can be used fluently
*/
public ServerSSLOptions setSni(boolean sni) {
this.sni = sni;
return this;
}
@Override
public ServerSSLOptions setKeyCertOptions(KeyCertOptions options) {
return (ServerSSLOptions) super.setKeyCertOptions(options);
}
@Override
public ServerSSLOptions setTrustOptions(TrustOptions options) {
return (ServerSSLOptions) super.setTrustOptions(options);
}
@Override
public ServerSSLOptions setUseAlpn(boolean useAlpn) {
return (ServerSSLOptions) super.setUseAlpn(useAlpn);
}
@Override
public ServerSSLOptions setSslHandshakeTimeout(long sslHandshakeTimeout) {
return (ServerSSLOptions) super.setSslHandshakeTimeout(sslHandshakeTimeout);
}
@Override
public ServerSSLOptions setSslHandshakeTimeoutUnit(TimeUnit sslHandshakeTimeoutUnit) {
return (ServerSSLOptions) super.setSslHandshakeTimeoutUnit(sslHandshakeTimeoutUnit);
}
@Override
public ServerSSLOptions setEnabledSecureTransportProtocols(Set<String> enabledSecureTransportProtocols) {
return (ServerSSLOptions) super.setEnabledSecureTransportProtocols(enabledSecureTransportProtocols);
}
@Override
public ServerSSLOptions setApplicationLayerProtocols(List<String> protocols) {
return (ServerSSLOptions) super.setApplicationLayerProtocols(protocols);
}
@Override
public ServerSSLOptions addEnabledCipherSuite(String suite) {
return (ServerSSLOptions) super.addEnabledCipherSuite(suite);
}
@Override
public ServerSSLOptions addCrlPath(String crlPath) throws NullPointerException {
return (ServerSSLOptions) super.addCrlPath(crlPath);
}
@Override
public ServerSSLOptions addCrlValue(Buffer crlValue) throws NullPointerException {
return (ServerSSLOptions) super.addCrlValue(crlValue);
}
@Override
public ServerSSLOptions addEnabledSecureTransportProtocol(String protocol) {
return (ServerSSLOptions) super.addEnabledSecureTransportProtocol(protocol);
}
/**
* Convert to JSON
*
* @return the JSON
*/
public JsonObject toJson() {
JsonObject json = super.toJson();
ServerSSLOptionsConverter.toJson(this, json);
return json;
}
}
|
ServerSSLOptions
|
java
|
quarkusio__quarkus
|
extensions/funqy/funqy-google-cloud-functions/runtime/src/main/java/io/quarkus/funqy/gcp/functions/event/StorageEvent.java
|
{
"start": 1244,
"end": 1345
}
|
class ____ {
public String entity;
public String entityId;
}
public static
|
Owner
|
java
|
apache__flink
|
flink-core/src/main/java/org/apache/flink/api/common/io/ReplicatingInputFormat.java
|
{
"start": 3530,
"end": 6051
}
|
class ____<OT, S extends InputSplit> extends RichInputFormat<OT, S> {
private static final long serialVersionUID = 1L;
private InputFormat<OT, S> replicatedIF;
public ReplicatingInputFormat(InputFormat<OT, S> wrappedIF) {
this.replicatedIF = wrappedIF;
}
public InputFormat<OT, S> getReplicatedInputFormat() {
return this.replicatedIF;
}
@Override
public void configure(Configuration parameters) {
this.replicatedIF.configure(parameters);
}
@Override
public BaseStatistics getStatistics(BaseStatistics cachedStatistics) throws IOException {
return this.replicatedIF.getStatistics(cachedStatistics);
}
@Override
public S[] createInputSplits(int minNumSplits) throws IOException {
return this.replicatedIF.createInputSplits(minNumSplits);
}
@Override
public InputSplitAssigner getInputSplitAssigner(S[] inputSplits) {
return new ReplicatingInputSplitAssigner(inputSplits);
}
@Override
public void open(S split) throws IOException {
this.replicatedIF.open(split);
}
@Override
public boolean reachedEnd() throws IOException {
return this.replicatedIF.reachedEnd();
}
@Override
public OT nextRecord(OT reuse) throws IOException {
return this.replicatedIF.nextRecord(reuse);
}
@Override
public void close() throws IOException {
this.replicatedIF.close();
}
@Override
public void setRuntimeContext(RuntimeContext context) {
if (this.replicatedIF instanceof RichInputFormat) {
((RichInputFormat) this.replicatedIF).setRuntimeContext(context);
}
}
@Override
public RuntimeContext getRuntimeContext() {
if (this.replicatedIF instanceof RichInputFormat) {
return ((RichInputFormat) this.replicatedIF).getRuntimeContext();
} else {
throw new RuntimeException(
"The underlying input format to this ReplicatingInputFormat isn't context aware");
}
}
@Override
public void openInputFormat() throws IOException {
if (this.replicatedIF instanceof RichInputFormat) {
((RichInputFormat) this.replicatedIF).openInputFormat();
}
}
@Override
public void closeInputFormat() throws IOException {
if (this.replicatedIF instanceof RichInputFormat) {
((RichInputFormat) this.replicatedIF).closeInputFormat();
}
}
}
|
ReplicatingInputFormat
|
java
|
spring-projects__spring-boot
|
module/spring-boot-servlet/src/main/java/org/springframework/boot/servlet/filter/OrderedFormContentFilter.java
|
{
"start": 934,
"end": 1425
}
|
class ____ extends FormContentFilter implements OrderedFilter {
/**
* Higher order to ensure the filter is applied before Spring Security.
*/
public static final int DEFAULT_ORDER = REQUEST_WRAPPER_FILTER_MAX_ORDER - 9900;
private int order = DEFAULT_ORDER;
@Override
public int getOrder() {
return this.order;
}
/**
* Set the order for this filter.
* @param order the order to set
*/
public void setOrder(int order) {
this.order = order;
}
}
|
OrderedFormContentFilter
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/runtime/operators/join/lookup/keyordered/TableAsyncExecutionControllerTest.java
|
{
"start": 2814,
"end": 15369
}
|
class ____ {
private static final KeySelector<Integer, Integer> keySelector = input -> input;
private final Queue<Integer> outputQueue = new LinkedList<>();
private final Queue<Watermark> outputWatermark = new LinkedList<>();
private final Queue<StreamRecord<Integer>> outputProcessedRecords = new LinkedList<>();
private final Queue<Integer> outputProcessedInputIndexes = new LinkedList<>();
private TestAsyncExecutionController asyncExecutionController;
private MailboxExecutor mailboxExecutor;
private TestLazyAsyncFunction asyncFunction;
@BeforeEach
public void before() throws Exception {
TaskMailbox mailbox = new TaskMailboxImpl();
MailboxProcessor mailboxProcessor =
new MailboxProcessor(controller -> {}, mailbox, StreamTaskActionExecutor.IMMEDIATE);
mailboxExecutor =
new MailboxExecutorImpl(
mailbox, 0, StreamTaskActionExecutor.IMMEDIATE, mailboxProcessor);
asyncFunction = new TestLazyAsyncFunction();
asyncFunction.open(DefaultOpenContext.INSTANCE);
asyncExecutionController =
new TestAsyncExecutionController(
element ->
asyncFunction.asyncInvoke(
element.getRecord().getValue(),
new Handler(
element,
new TestStreamElementQueueEntry(
element.getRecord(),
element.getInputIndex(),
outputProcessedRecords,
outputProcessedInputIndexes,
outputQueue),
mailboxExecutor,
asyncExecutionController)),
outputWatermark::add);
}
@AfterEach
public void after() throws Exception {
asyncFunction.close();
asyncExecutionController.close();
outputQueue.clear();
outputProcessedRecords.clear();
outputProcessedInputIndexes.clear();
outputWatermark.clear();
}
@Test
public void testPendingRecords() throws Exception {
asyncExecutionController.submitRecord(new StreamRecord<>(1, 1), null, 0);
asyncExecutionController.submitRecord(new StreamRecord<>(2, 2), null, 0);
asyncExecutionController.submitRecord(new StreamRecord<>(2, 3), null, 0);
asyncExecutionController.submitRecord(new StreamRecord<>(3, 4), null, 0);
asyncExecutionController.submitRecord(new StreamRecord<>(3, 5), null, 0);
asyncExecutionController.submitRecord(new StreamRecord<>(4, 6), null, 0);
Map<Integer, Deque<AecRecord<Integer, Integer>>> actualPending =
asyncExecutionController.pendingElements();
Epoch<Integer> epoch = new Epoch<>(new Watermark(Long.MIN_VALUE));
IntStream.range(0, 6).forEach(i -> epoch.incrementCount());
assertThat(actualPending.get(1))
.containsExactlyInAnyOrder(new AecRecord<>(new StreamRecord<>(1, 1), epoch, 0));
assertThat(actualPending.get(2))
.containsExactly(
new AecRecord<>(new StreamRecord<>(2, 2), epoch, 0),
new AecRecord<>(new StreamRecord<>(2, 3), epoch, 0));
assertThat(actualPending.get(3))
.containsExactly(
new AecRecord<>(new StreamRecord<>(3, 4), epoch, 0),
new AecRecord<>(new StreamRecord<>(3, 5), epoch, 0));
assertThat(actualPending.get(4))
.containsExactlyInAnyOrder(new AecRecord<>(new StreamRecord<>(4, 6), epoch, 0));
}
@Test
public void testDifferentKeyWithoutWatermark() throws Exception {
asyncExecutionController.submitRecord(new StreamRecord<>(1, 1), null, 0);
asyncExecutionController.submitRecord(new StreamRecord<>(2, 2), null, 0);
asyncExecutionController.submitRecord(new StreamRecord<>(3, 3), null, 0);
asyncExecutionController.submitRecord(new StreamRecord<>(4, 4), null, 0);
asyncExecutionController.submitRecord(new StreamRecord<>(5, 5), null, 0);
asyncExecutionController.submitRecord(new StreamRecord<>(6, 6), null, 0);
assertThat(asyncExecutionController.processedSize()).isEqualTo(6);
waitComplete();
Queue<Integer> expectedOutput = new LinkedList<>(Arrays.asList(1, 2, 3, 4, 5, 6));
assertThat(outputQueue.stream().sorted()).isEqualTo(expectedOutput);
Epoch<Integer> expectedEpoch = new Epoch<>(new Watermark(Long.MIN_VALUE));
assertThat(asyncExecutionController.getActiveEpoch()).isEqualTo(expectedEpoch);
}
@Test
public void testDifferentKeyWithWatermark() throws Exception {
asyncExecutionController.submitRecord(new StreamRecord<>(1, 1), null, 0);
asyncExecutionController.submitRecord(new StreamRecord<>(2, 2), null, 0);
asyncExecutionController.submitWatermark(new Watermark(3));
asyncExecutionController.submitRecord(new StreamRecord<>(3, 4), null, 0);
asyncExecutionController.submitRecord(new StreamRecord<>(4, 5), null, 0);
asyncExecutionController.submitWatermark(new Watermark(6));
asyncExecutionController.submitRecord(new StreamRecord<>(5, 7), null, 0);
asyncExecutionController.submitRecord(new StreamRecord<>(6, 8), null, 0);
assertThat(asyncExecutionController.processedSize()).isEqualTo(6);
waitComplete();
Queue<Integer> expectedOutput = new LinkedList<>(Arrays.asList(1, 2, 3, 4, 5, 6));
assertThat(outputQueue.stream().sorted()).isEqualTo(expectedOutput);
Queue<Watermark> expectedWatermark =
new LinkedList<>(Arrays.asList(new Watermark(3), new Watermark(6)));
assertThat(outputWatermark).isEqualTo(expectedWatermark);
Epoch<Integer> expectedEpoch = new Epoch<>(new Watermark(6));
assertThat(asyncExecutionController.getActiveEpoch()).isEqualTo(expectedEpoch);
}
@Test
public void testSameKeyWithWatermark() throws Exception {
asyncExecutionController.submitRecord(new StreamRecord<>(1, 1), null, 0);
asyncExecutionController.submitRecord(new StreamRecord<>(1, 2), null, 0);
asyncExecutionController.submitRecord(new StreamRecord<>(1, 3), null, 0);
asyncExecutionController.submitRecord(new StreamRecord<>(1, 4), null, 0);
asyncExecutionController.submitWatermark(new Watermark(5));
asyncExecutionController.submitWatermark(new Watermark(6));
asyncExecutionController.submitRecord(new StreamRecord<>(1, 7), null, 0);
asyncExecutionController.submitRecord(new StreamRecord<>(1, 8), null, 0);
asyncExecutionController.submitWatermark(new Watermark(9));
assertThat(asyncExecutionController.processedSize()).isEqualTo(6);
waitComplete();
Queue<Integer> expectedOutput = new LinkedList<>(Arrays.asList(1, 1, 1, 1, 1, 1));
assertThat(outputQueue).isEqualTo(expectedOutput);
Queue<StreamRecord<Integer>> expectedProcessed =
new LinkedList<>(
Arrays.asList(
new StreamRecord<>(1, 1),
new StreamRecord<>(1, 2),
new StreamRecord<>(1, 3),
new StreamRecord<>(1, 4),
new StreamRecord<>(1, 7),
new StreamRecord<>(1, 8)));
assertThat(outputProcessedRecords).isEqualTo(expectedProcessed);
Queue<Watermark> expectedWatermark =
new LinkedList<>(
Arrays.asList(new Watermark(5), new Watermark(6), new Watermark(9)));
assertThat(outputWatermark).isEqualTo(expectedWatermark);
Epoch<Integer> expectedEpoch = new Epoch<>(new Watermark(9));
assertThat(asyncExecutionController.getActiveEpoch()).isEqualTo(expectedEpoch);
}
@Test
public void testMixKeyWithWatermark() throws Exception {
asyncExecutionController.submitRecord(new StreamRecord<>(1, 1), null, 0);
asyncExecutionController.submitRecord(new StreamRecord<>(3, 2), null, 0);
asyncExecutionController.submitRecord(new StreamRecord<>(4, 3), null, 0);
asyncExecutionController.submitRecord(new StreamRecord<>(3, 4), null, 0);
asyncExecutionController.submitWatermark(new Watermark(5));
asyncExecutionController.submitWatermark(new Watermark(6));
asyncExecutionController.submitRecord(new StreamRecord<>(4, 7), null, 0);
asyncExecutionController.submitRecord(new StreamRecord<>(1, 8), null, 0);
asyncExecutionController.submitWatermark(new Watermark(9));
assertThat(asyncExecutionController.processedSize()).isEqualTo(6);
waitComplete();
Queue<Integer> expectedOutput = new LinkedList<>(Arrays.asList(1, 1, 3, 3, 4, 4));
assertThat(outputQueue.stream().sorted()).isEqualTo(expectedOutput);
Queue<StreamRecord<Integer>> expectedProcessed =
new LinkedList<>(Arrays.asList(new StreamRecord<>(1, 1), new StreamRecord<>(1, 8)));
assertKeyOrdered(outputProcessedRecords, expectedProcessed);
expectedProcessed =
new LinkedList<>(Arrays.asList(new StreamRecord<>(3, 2), new StreamRecord<>(3, 4)));
assertKeyOrdered(outputProcessedRecords, expectedProcessed);
expectedProcessed =
new LinkedList<>(Arrays.asList(new StreamRecord<>(4, 3), new StreamRecord<>(4, 7)));
assertKeyOrdered(outputProcessedRecords, expectedProcessed);
Queue<Watermark> expectedWatermark =
new LinkedList<>(
Arrays.asList(new Watermark(5), new Watermark(6), new Watermark(9)));
assertThat(outputWatermark).isEqualTo(expectedWatermark);
Epoch<Integer> expectedEpoch = new Epoch<>(new Watermark(9));
assertThat(asyncExecutionController.getActiveEpoch()).isEqualTo(expectedEpoch);
}
@Test
public void testProcessWithMultiInputs() throws Exception {
asyncExecutionController.submitRecord(new StreamRecord<>(1, 1), null, 1);
asyncExecutionController.submitRecord(new StreamRecord<>(1, 2), null, 2);
asyncExecutionController.submitRecord(new StreamRecord<>(1, 3), null, 1);
asyncExecutionController.submitRecord(new StreamRecord<>(1, 4), null, 3);
asyncExecutionController.submitWatermark(new Watermark(5));
asyncExecutionController.submitRecord(new StreamRecord<>(1, 6), null, 4);
assertThat(asyncExecutionController.processedSize()).isEqualTo(5);
waitComplete();
Queue<Integer> expectedOutput = new LinkedList<>(Arrays.asList(1, 1, 1, 1, 1));
assertThat(outputQueue).isEqualTo(expectedOutput);
Queue<StreamRecord<Integer>> expectedProcessed =
new LinkedList<>(
Arrays.asList(
new StreamRecord<>(1, 1),
new StreamRecord<>(1, 2),
new StreamRecord<>(1, 3),
new StreamRecord<>(1, 4),
new StreamRecord<>(1, 6)));
assertThat(outputProcessedRecords).isEqualTo(expectedProcessed);
Queue<Watermark> expectedWatermark =
new LinkedList<>(Collections.singletonList(new Watermark(5)));
assertThat(outputWatermark).isEqualTo(expectedWatermark);
Epoch<Integer> expectedEpoch = new Epoch<>(new Watermark(5));
assertThat(asyncExecutionController.getActiveEpoch()).isEqualTo(expectedEpoch);
Queue<Integer> expectedProcessedInputIndexes =
new LinkedList<>(Arrays.asList(1, 2, 1, 3, 4));
assertThat(outputProcessedInputIndexes).isEqualTo(expectedProcessedInputIndexes);
}
private void waitComplete() {
long now = System.currentTimeMillis();
while (mailboxExecutor.tryYield()) {
if (System.currentTimeMillis() - now > 3000) {
fail("Execution timeout");
}
}
}
private static
|
TableAsyncExecutionControllerTest
|
java
|
square__retrofit
|
retrofit/java-test/src/test/java/retrofit2/RequestFactoryTest.java
|
{
"start": 101311,
"end": 101667
}
|
class ____ {
@GET("/")
Call<ResponseBody> method(@Tag List<String> tag) {
return null;
}
}
List<String> strings = asList("tag", "value");
Request request = buildRequest(Example.class, strings);
assertThat(request.tag(List.class)).isSameInstanceAs(strings);
}
@Test
public void tagDuplicateFails() {
|
Example
|
java
|
spring-projects__spring-framework
|
spring-web/src/main/java/org/springframework/web/bind/annotation/ControllerAdvice.java
|
{
"start": 3920,
"end": 5299
}
|
interface ____ {
/**
* Alias for {@link Component#value}.
* @since 6.1
*/
@AliasFor(annotation = Component.class, attribute = "value")
String name() default "";
/**
* Alias for the {@link #basePackages} attribute.
* <p>Allows for more concise annotation declarations — for example,
* {@code @ControllerAdvice("org.my.pkg")} is equivalent to
* {@code @ControllerAdvice(basePackages = "org.my.pkg")}.
* @since 4.0
* @see #basePackages
*/
@AliasFor("basePackages")
String[] value() default {};
/**
* Array of base packages.
* <p>Controllers that belong to those base packages or sub-packages thereof
* will be included — for example,
* {@code @ControllerAdvice(basePackages = "org.my.pkg")} or
* {@code @ControllerAdvice(basePackages = {"org.my.pkg", "org.my.other.pkg"})}.
* <p>{@link #value} is an alias for this attribute, simply allowing for
* more concise use of the annotation.
* <p>Also consider using {@link #basePackageClasses} as a type-safe
* alternative to String-based package names.
* @since 4.0
*/
@AliasFor("value")
String[] basePackages() default {};
/**
* Type-safe alternative to {@link #basePackages} for specifying the packages
* in which to select controllers to be advised by the {@code @ControllerAdvice}
* annotated class.
* <p>Consider creating a special no-op marker
|
ControllerAdvice
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/annotation/web/configuration/HttpSecurityConfigurationTests.java
|
{
"start": 31509,
"end": 32206
}
|
class ____ {
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
return http
.authorizeHttpRequests((authorize) -> authorize
.anyRequest().authenticated()
)
.formLogin((form) -> form
.failureHandler((request, response, exception) -> {
if (exception instanceof CompromisedPasswordException) {
response.sendRedirect("/reset-password");
return;
}
response.sendRedirect("/login?error");
})
)
.build();
// @formatter:on
}
}
@Configuration(proxyBeanMethods = false)
@EnableWebSecurity
@EnableWebMvc
static
|
SecurityEnabledRedirectIfPasswordExceptionConfig
|
java
|
apache__camel
|
core/camel-core-model/src/main/java/org/apache/camel/model/rest/RestDefinition.java
|
{
"start": 48704,
"end": 48927
}
|
class ____ data type
param(verb).name(RestParamType.body.name()).type(RestParamType.body).dataType(bodyType).endParam();
} else {
// must be body type and set the model
|
as
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/deser/inject/JacksonInject4218Test.java
|
{
"start": 711,
"end": 1731
}
|
class ____ extends InjectableValues.Std
{
private static final long serialVersionUID = 1L;
int nextId = 1; // count up if injected
@Override
public Object findInjectableValue(
DeserializationContext ctxt,
Object valueId,
BeanProperty forProperty,
Object beanInstance,
Boolean optional, Boolean useInput) {
if (valueId.equals("id")) {
return "id" + nextId++;
} else {
return super.findInjectableValue(ctxt, valueId, forProperty, beanInstance, optional, useInput);
}
}
}
// [databind#4218]
@Test
void injectNoDups4218() throws Exception
{
ObjectReader reader = newJsonMapper()
.readerFor(Dto.class)
.with(new MyInjectableValues());
Dto dto = reader.readValue("{}");
String actual = dto.id;
assertEquals("id1", actual);
}
}
|
MyInjectableValues
|
java
|
spring-projects__spring-boot
|
smoke-test/spring-boot-smoke-test-web-secure/src/test/java/smoketest/web/secure/AbstractErrorPageTests.java
|
{
"start": 1353,
"end": 1514
}
|
class ____ tests to ensure that the error page is accessible only to
* authorized users.
*
* @author Madhura Bhave
*/
@AutoConfigureTestRestTemplate
abstract
|
for
|
java
|
apache__camel
|
core/camel-api/src/main/java/org/apache/camel/CamelAuthorizationException.java
|
{
"start": 1058,
"end": 1696
}
|
class ____ extends CamelExchangeException {
private final String policyId;
public CamelAuthorizationException(String message, Exchange exchange) {
super(message, exchange);
policyId = exchange.getIn().getHeader(Exchange.AUTHENTICATION_FAILURE_POLICY_ID, String.class);
}
public CamelAuthorizationException(String message, Exchange exchange, Throwable cause) {
super(message, exchange, cause);
policyId = exchange.getIn().getHeader(Exchange.AUTHENTICATION_FAILURE_POLICY_ID, String.class);
}
public String getPolicyId() {
return policyId;
}
}
|
CamelAuthorizationException
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/engine/discovery/predicates/TestClassPredicatesTests.java
|
{
"start": 2955,
"end": 4714
}
|
class ____ {
@SuppressWarnings("unused")
@Test
void test() {
}
}
var candidate = LocalClass.class;
assertTrue(predicates.looksLikeIntendedTestClass(candidate));
assertFalse(predicates.isValidStandaloneTestClass(candidate));
var issue = DiscoveryIssue.builder(Severity.WARNING,
"Test class '%s' must not be a local class. It will not be executed.".formatted(candidate.getName())) //
.source(ClassSource.from(candidate)) //
.build();
assertThat(discoveryIssues).containsExactly(issue);
}
@Test
void anonymousClassEvaluatesToFalse() {
Object object = new Object() {
@SuppressWarnings("unused")
@Test
void test() {
}
};
Class<?> candidate = object.getClass();
assertTrue(predicates.looksLikeIntendedTestClass(candidate));
assertFalse(predicates.isValidStandaloneTestClass(candidate));
var issue = DiscoveryIssue.builder(Severity.WARNING,
"Test class '%s' must not be anonymous. It will not be executed.".formatted(candidate.getName())) //
.source(ClassSource.from(candidate)) //
.build();
assertThat(discoveryIssues).containsExactly(issue);
}
@Test
void privateClassWithTestMethodEvaluatesToFalse() {
var candidate = TestCases.PrivateClassWithTestMethod.class;
assertTrue(predicates.looksLikeIntendedTestClass(candidate));
assertFalse(predicates.isValidStandaloneTestClass(candidate));
var notPrivateIssue = DiscoveryIssue.builder(Severity.WARNING,
"Test class '%s' must not be private. It will not be executed.".formatted(candidate.getName())) //
.source(ClassSource.from(candidate)) //
.build();
var notInnerClassIssue = DiscoveryIssue.builder(Severity.WARNING,
"Test class '%s' must not be an inner
|
LocalClass
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/UpdateMountTableEntryResponse.java
|
{
"start": 1240,
"end": 1580
}
|
class ____ {
public static UpdateMountTableEntryResponse newInstance() throws IOException {
return StateStoreSerializer.newRecord(UpdateMountTableEntryResponse.class);
}
@Public
@Unstable
public abstract boolean getStatus();
@Public
@Unstable
public abstract void setStatus(boolean result);
}
|
UpdateMountTableEntryResponse
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/bugs/_1797/Issue1797Test.java
|
{
"start": 561,
"end": 908
}
|
class ____ {
@ProcessorTest
public void shouldCorrectlyMapEnumSetToEnumSet() {
Customer customer = new Customer( EnumSet.of( Customer.Type.ONE ) );
CustomerDto customerDto = Issue1797Mapper.INSTANCE.map( customer );
assertThat( customerDto.getTypes() ).containsExactly( CustomerDto.Type.ONE );
}
}
|
Issue1797Test
|
java
|
spring-projects__spring-security
|
oauth2/oauth2-authorization-server/src/main/java/org/springframework/security/oauth2/server/authorization/client/JdbcRegisteredClientRepository.java
|
{
"start": 12723,
"end": 12884
}
|
class ____ that
* {@link RegisteredClientRowMapper} can be deprecated in favor of
* {@link JsonMapperRegisteredClientRowMapper}.
*/
private abstract static
|
so
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/type/TypeAliasesTest.java
|
{
"start": 440,
"end": 531
}
|
class ____<T> {
public T inconsequential = null;
}
public static abstract
|
Base
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/StaticQualifiedUsingExpressionTest.java
|
{
"start": 10812,
"end": 11017
}
|
interface ____ {
int CONST = 42;
I id();
}
""")
.expectUnchanged()
.addInputLines(
"in/Test.java",
"""
|
I
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/lazy/proxy/crosspackage/base/BaseEntity.java
|
{
"start": 368,
"end": 761
}
|
class ____ {
@Id
@GeneratedValue
private Long id;
@Embedded
protected EmbeddableType embeddedField;
public Long getId() {
return id;
}
public void setId(final Long id) {
this.id = id;
}
public EmbeddableType getEmbeddedField() {
return embeddedField;
}
public void setEmbeddedField(final EmbeddableType embeddedField) {
this.embeddedField = embeddedField;
}
}
|
BaseEntity
|
java
|
alibaba__nacos
|
core/src/main/java/com/alibaba/nacos/core/context/remote/HttpRequestContextConfig.java
|
{
"start": 1016,
"end": 1700
}
|
class ____ {
@Bean
public FilterRegistrationBean<HttpRequestContextFilter> requestContextFilterRegistration(
HttpRequestContextFilter requestContextFilter) {
FilterRegistrationBean<HttpRequestContextFilter> registration = new FilterRegistrationBean<>();
registration.setFilter(requestContextFilter);
registration.addUrlPatterns("/*");
registration.setName("nacosRequestContextFilter");
registration.setOrder(Integer.MIN_VALUE);
return registration;
}
@Bean
public HttpRequestContextFilter nacosRequestContextFilter() {
return new HttpRequestContextFilter();
}
}
|
HttpRequestContextConfig
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/jsontype/PolymorphicDeserSubtypeCheck5016Test.java
|
{
"start": 1467,
"end": 1674
}
|
class ____ {
@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS,
include = JsonTypeInfo.As.PROPERTY,
property = "@class")
public Plant thisType;
}
static
|
PlantInfo
|
java
|
spring-projects__spring-security
|
web/src/main/java/org/springframework/security/web/server/ServerWebExchangeThreadLocalAccessor.java
|
{
"start": 920,
"end": 1478
}
|
class ____ the existing Reactor Context attribute
* {@code ServerWebExchange.class} to the {@link ThreadLocalAccessor} contract to allow
* Micrometer Context Propagation to automatically propagate a {@link ServerWebExchange}
* in Reactive applications. It is automatically registered with the
* {@link io.micrometer.context.ContextRegistry} through the
* {@link java.util.ServiceLoader} mechanism when context-propagation is on the classpath.
*
* @author Steve Riesenberg
* @since 6.5
* @see io.micrometer.context.ContextRegistry
*/
public final
|
adapts
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/annotations/Type.java
|
{
"start": 1694,
"end": 2046
}
|
class ____ implements {@link UserType}.
*/
Class<? extends UserType<?>> value();
/**
* Parameters to be injected into the custom type after it is
* instantiated. The {@link UserType} implementation must implement
* {@link org.hibernate.usertype.ParameterizedType} to receive the
* parameters.
*/
Parameter[] parameters() default {};
}
|
which
|
java
|
google__auto
|
value/src/test/java/com/google/auto/value/processor/AutoAnnotationCompilationTest.java
|
{
"start": 13047,
"end": 13364
}
|
interface ____ {",
" int[] value();",
" MyEnum[] enums() default {};",
"}");
JavaFileObject myEnumJavaFile =
JavaFileObjects.forSourceLines(
"com.example.enums.MyEnum",
"package com.example.enums;",
"",
"public
|
MyAnnotation
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/bugs/_1590/BookShelfMapper.java
|
{
"start": 523,
"end": 670
}
|
interface ____ {
BookShelfMapper INSTANCE = Mappers.getMapper( BookShelfMapper.class );
BookShelf map(BookShelf bookShelf);
}
|
BookShelfMapper
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/test/java/io/vertx/tests/http/Http1xClientResponseParserTest.java
|
{
"start": 454,
"end": 532
}
|
class ____ extends HttpClientResponseParserTest {
}
|
Http1xClientResponseParserTest
|
java
|
apache__flink
|
flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/ParquetVectorizedInputFormat.java
|
{
"start": 21318,
"end": 22335
}
|
class ____<T> {
private final WritableColumnVector[] writableVectors;
protected final VectorizedColumnBatch columnarBatch;
private final Pool.Recycler<ParquetReaderBatch<T>> recycler;
protected ParquetReaderBatch(
WritableColumnVector[] writableVectors,
VectorizedColumnBatch columnarBatch,
Pool.Recycler<ParquetReaderBatch<T>> recycler) {
this.writableVectors = writableVectors;
this.columnarBatch = columnarBatch;
this.recycler = recycler;
}
public void recycle() {
recycler.recycle(this);
}
/**
* Provides reading iterator after the records are written to the {@link #columnarBatch}.
*
* @param rowsReturned The number of rows that have been returned before this batch.
*/
public abstract RecordIterator<T> convertAndGetIterator(long rowsReturned)
throws IOException;
}
}
|
ParquetReaderBatch
|
java
|
spring-projects__spring-framework
|
spring-webmvc/src/test/java/org/springframework/web/servlet/mvc/method/annotation/ServletAnnotationControllerHandlerMethodTests.java
|
{
"start": 123786,
"end": 124575
}
|
class ____ {
@RequestMapping("*/myHandle") // was **/myHandle
public void myHandle(HttpServletResponse response) throws IOException {
response.getWriter().write("myView");
}
@RequestMapping("/*/*Other") // was /**/*Other
public void myOtherHandle(HttpServletResponse response) throws IOException {
response.getWriter().write("myOtherView");
}
@RequestMapping("*/myLang") // was **/myLang
public void myLangHandle(HttpServletResponse response) throws IOException {
response.getWriter().write("myLangView");
}
@RequestMapping("/*/surprise") // was /**/surprise
public void mySurpriseHandle(HttpServletResponse response) throws IOException {
response.getWriter().write("mySurpriseView");
}
}
@Controller
static
|
MyRelativeMethodPathDispatchingController
|
java
|
quarkusio__quarkus
|
extensions/websockets-next/deployment/src/test/java/io/quarkus/websockets/next/test/errors/RuntimeErrorTest.java
|
{
"start": 1829,
"end": 3009
}
|
class ____ {
@Inject
WebSocketConnection connection;
@Inject
RequestBean requestBean;
@OnBinaryMessage
void process(WebSocketConnection connection, Buffer message) {
requestBean.setState("ok");
throw new IllegalStateException("Something went wrong");
}
@OnError
String encodingError(BinaryEncodeException e) {
return "Problem encoding: " + e.getEncodedObject().toString();
}
@OnError
String decodingError(BinaryDecodeException e) {
return "Problem decoding: " + e.getBytes().toString();
}
@OnError
Uni<Void> runtimeProblem(RuntimeException e, WebSocketConnection connection) {
assertTrue(Context.isOnEventLoopThread());
assertEquals(connection.id(), this.connection.id());
// A new request context is used
assertEquals("nok", requestBean.getState());
return connection.sendText(e.getMessage());
}
@OnError
String catchAll(Throwable e) {
return "Ooops!";
}
}
@RequestScoped
public static
|
Echo
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManager.java
|
{
"start": 6341,
"end": 6616
}
|
class ____ configured");
} catch (Exception e) {
fail("Exception caught");
e.printStackTrace();
}
}
/**
* Test whether NodeManager passes user-provided conf to
* UserGroupInformation class. If it reads this (incorrect)
* AuthenticationMethod
|
is
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/boot/model/source/internal/hbm/ModelBinder.java
|
{
"start": 120806,
"end": 122895
}
|
class ____ extends AbstractPluralAttributeSecondPass {
public PluralAttributeMapSecondPass(
MappingDocument sourceDocument,
IndexedPluralAttributeSource attributeSource,
org.hibernate.mapping.Map collectionBinding) {
super( sourceDocument, attributeSource, collectionBinding );
}
@Override
public IndexedPluralAttributeSource getPluralAttributeSource() {
return (IndexedPluralAttributeSource) super.getPluralAttributeSource();
}
@Override
public org.hibernate.mapping.Map getCollectionBinding() {
return (org.hibernate.mapping.Map) super.getCollectionBinding();
}
@Override
protected void bindCollectionIndex() {
bindMapKey(
getMappingDocument(),
getPluralAttributeSource(),
getCollectionBinding()
);
}
@Override
protected void createBackReferences() {
super.createBackReferences();
final var collectionBinding = getCollectionBinding();
boolean indexIsFormula = false;
for ( var selectable: collectionBinding.getIndex().getSelectables() ) {
if ( selectable.isFormula() ) {
indexIsFormula = true;
break;
}
}
if ( collectionBinding.isOneToMany()
&& !collectionBinding.getKey().isNullable()
&& !collectionBinding.isInverse()
&& !indexIsFormula ) {
final var oneToMany = (OneToMany) collectionBinding.getElement();
final String entityName = oneToMany.getReferencedEntityName();
final var referenced =
getMappingDocument().getMetadataCollector()
.getEntityBinding( entityName );
final var backref = new IndexBackref();
backref.setName( '_' + collectionBinding.getOwnerEntityName()
+ "." + getPluralAttributeSource().getName() + "IndexBackref" );
backref.setOptional( true );
backref.setUpdatable( false );
backref.setSelectable( false );
backref.setCollectionRole( collectionBinding.getRole() );
backref.setEntityName( collectionBinding.getOwner().getEntityName() );
backref.setValue( collectionBinding.getIndex() );
referenced.addProperty( backref );
}
}
}
private
|
PluralAttributeMapSecondPass
|
java
|
netty__netty
|
codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2FrameReader.java
|
{
"start": 2965,
"end": 26951
}
|
class ____ implements Http2FrameReader, Http2FrameSizePolicy, Configuration {
private final Http2HeadersDecoder headersDecoder;
/**
* {@code true} = reading headers, {@code false} = reading payload.
*/
private boolean readingHeaders = true;
/**
* Once set to {@code true} the value will never change. This is set to {@code true} if an unrecoverable error which
* renders the connection unusable.
*/
private boolean readError;
private byte frameType;
private int streamId;
private Http2Flags flags;
private int payloadLength;
private HeadersContinuation headersContinuation;
private int maxFrameSize;
/**
* Create a new instance.
* <p>
* Header names will be validated.
*/
public DefaultHttp2FrameReader() {
this(true);
}
/**
* Create a new instance.
* @param validateHeaders {@code true} to validate headers. {@code false} to not validate headers.
* @see DefaultHttp2HeadersDecoder(boolean)
*/
public DefaultHttp2FrameReader(boolean validateHeaders) {
this(new DefaultHttp2HeadersDecoder(validateHeaders));
}
public DefaultHttp2FrameReader(Http2HeadersDecoder headersDecoder) {
this.headersDecoder = headersDecoder;
maxFrameSize = DEFAULT_MAX_FRAME_SIZE;
}
@Override
public Http2HeadersDecoder.Configuration headersConfiguration() {
return headersDecoder.configuration();
}
@Override
public Configuration configuration() {
return this;
}
@Override
public Http2FrameSizePolicy frameSizePolicy() {
return this;
}
@Override
public void maxFrameSize(int max) throws Http2Exception {
if (!isMaxFrameSizeValid(max)) {
// SETTINGS frames affect the entire connection state and thus errors must be connection errors.
// See https://datatracker.ietf.org/doc/html/rfc9113#section-4.2 for details.
throw connectionError(FRAME_SIZE_ERROR, "Invalid MAX_FRAME_SIZE specified in sent settings: %d", max);
}
maxFrameSize = max;
}
@Override
public int maxFrameSize() {
return maxFrameSize;
}
@Override
public void close() {
closeHeadersContinuation();
}
private void closeHeadersContinuation() {
if (headersContinuation != null) {
headersContinuation.close();
headersContinuation = null;
}
}
@Override
public void readFrame(ChannelHandlerContext ctx, ByteBuf input, Http2FrameListener listener)
throws Http2Exception {
if (readError) {
input.skipBytes(input.readableBytes());
return;
}
try {
do {
if (readingHeaders && !preProcessFrame(input)) {
return;
}
// The header is complete, fall into the next case to process the payload.
// This is to ensure the proper handling of zero-length payloads. In this
// case, we don't want to loop around because there may be no more data
// available, causing us to exit the loop. Instead, we just want to perform
// the first pass at payload processing now.
// Wait until the entire payload has been read.
if (input.readableBytes() < payloadLength) {
return;
}
// Slice to work only on the frame being read
ByteBuf framePayload = input.readSlice(payloadLength);
// We have consumed the data for this frame, next time we read,
// we will be expecting to read a new frame header.
readingHeaders = true;
verifyFrameState();
processPayloadState(ctx, framePayload, listener);
} while (input.isReadable());
} catch (Http2Exception e) {
readError = !Http2Exception.isStreamError(e);
throw e;
} catch (RuntimeException e) {
readError = true;
throw e;
} catch (Throwable cause) {
readError = true;
PlatformDependent.throwException(cause);
}
}
private boolean preProcessFrame(ByteBuf in) throws Http2Exception {
// Start pre-processing the frame by reading the necessary data
// in common between all frame types
if (in.readableBytes() < FRAME_HEADER_LENGTH) {
// Wait until the entire framing section has been read.
return false;
}
payloadLength = in.readUnsignedMedium();
if (payloadLength > maxFrameSize) {
throw connectionError(FRAME_SIZE_ERROR, "Frame length: %d exceeds maximum: %d", payloadLength,
maxFrameSize);
}
frameType = in.readByte();
flags = new Http2Flags(in.readUnsignedByte());
streamId = readUnsignedInt(in);
readingHeaders = false;
return true;
}
private void verifyFrameState() throws Http2Exception {
switch (frameType) {
case DATA:
verifyDataFrame();
break;
case HEADERS:
verifyHeadersFrame();
break;
case PRIORITY:
verifyPriorityFrame();
break;
case RST_STREAM:
verifyRstStreamFrame();
break;
case SETTINGS:
verifySettingsFrame();
break;
case PUSH_PROMISE:
verifyPushPromiseFrame();
break;
case PING:
verifyPingFrame();
break;
case GO_AWAY:
verifyGoAwayFrame();
break;
case WINDOW_UPDATE:
verifyWindowUpdateFrame();
break;
case CONTINUATION:
verifyContinuationFrame();
break;
default:
// Unknown frame type, could be an extension.
verifyUnknownFrame();
break;
}
}
private void processPayloadState(ChannelHandlerContext ctx, ByteBuf in, Http2FrameListener listener)
throws Http2Exception {
// When this method is called, we ensure that the payload buffer passed in
// matches what we expect to be reading for payloadLength
assert in.readableBytes() == payloadLength;
// Read the payload and fire the frame event to the listener.
switch (frameType) {
case DATA:
readDataFrame(ctx, in, listener);
break;
case HEADERS:
readHeadersFrame(ctx, in, listener);
break;
case PRIORITY:
readPriorityFrame(ctx, in, listener);
break;
case RST_STREAM:
readRstStreamFrame(ctx, in, listener);
break;
case SETTINGS:
readSettingsFrame(ctx, in, listener);
break;
case PUSH_PROMISE:
readPushPromiseFrame(ctx, in, listener);
break;
case PING:
readPingFrame(ctx, in.readLong(), listener);
break;
case GO_AWAY:
readGoAwayFrame(ctx, in, listener);
break;
case WINDOW_UPDATE:
readWindowUpdateFrame(ctx, in, listener);
break;
case CONTINUATION:
readContinuationFrame(in, listener);
break;
default:
readUnknownFrame(ctx, in, listener);
break;
}
}
private void verifyDataFrame() throws Http2Exception {
verifyAssociatedWithAStream();
verifyNotProcessingHeaders();
if (payloadLength < flags.getPaddingPresenceFieldLength()) {
throw streamError(streamId, FRAME_SIZE_ERROR,
"Frame length %d too small.", payloadLength);
}
}
private void verifyHeadersFrame() throws Http2Exception {
verifyAssociatedWithAStream();
verifyNotProcessingHeaders();
int requiredLength = flags.getPaddingPresenceFieldLength() + flags.getNumPriorityBytes();
if (payloadLength < requiredLength) {
// HEADER frames carry a field_block and thus failure to process them results
// in HPACK corruption and renders the connection unusable.
// See https://datatracker.ietf.org/doc/html/rfc9113#section-4.2 for details.
throw connectionError(FRAME_SIZE_ERROR,
"Frame length %d too small for HEADERS frame with stream %d.", payloadLength, streamId);
}
}
private void verifyPriorityFrame() throws Http2Exception {
verifyAssociatedWithAStream();
verifyNotProcessingHeaders();
if (payloadLength != PRIORITY_ENTRY_LENGTH) {
throw streamError(streamId, FRAME_SIZE_ERROR,
"Invalid frame length %d.", payloadLength);
}
}
private void verifyRstStreamFrame() throws Http2Exception {
verifyAssociatedWithAStream();
verifyNotProcessingHeaders();
if (payloadLength != INT_FIELD_LENGTH) {
throw connectionError(FRAME_SIZE_ERROR, "Invalid frame length %d.", payloadLength);
}
}
private void verifySettingsFrame() throws Http2Exception {
verifyNotProcessingHeaders();
if (streamId != 0) {
throw connectionError(PROTOCOL_ERROR, "A stream ID must be zero.");
}
if (flags.ack() && payloadLength > 0) {
throw connectionError(FRAME_SIZE_ERROR, "Ack settings frame must have an empty payload.");
}
if (payloadLength % SETTING_ENTRY_LENGTH > 0) {
throw connectionError(FRAME_SIZE_ERROR, "Frame length %d invalid.", payloadLength);
}
}
private void verifyPushPromiseFrame() throws Http2Exception {
verifyNotProcessingHeaders();
// Subtract the length of the promised stream ID field, to determine the length of the
// rest of the payload (header block fragment + payload).
int minLength = flags.getPaddingPresenceFieldLength() + INT_FIELD_LENGTH;
if (payloadLength < minLength) {
// PUSH_PROMISE frames carry a field_block and thus failure to process them results
// in HPACK corruption and renders the connection unusable.
// See https://datatracker.ietf.org/doc/html/rfc9113#section-4.2 for details.
throw connectionError(FRAME_SIZE_ERROR,
"Frame length %d too small for PUSH_PROMISE frame with stream id %d.", payloadLength, streamId);
}
}
private void verifyPingFrame() throws Http2Exception {
verifyNotProcessingHeaders();
if (streamId != 0) {
throw connectionError(PROTOCOL_ERROR, "A stream ID must be zero.");
}
if (payloadLength != PING_FRAME_PAYLOAD_LENGTH) {
throw connectionError(FRAME_SIZE_ERROR,
"Frame length %d incorrect size for ping.", payloadLength);
}
}
private void verifyGoAwayFrame() throws Http2Exception {
verifyNotProcessingHeaders();
if (streamId != 0) {
throw connectionError(PROTOCOL_ERROR, "A stream ID must be zero.");
}
if (payloadLength < 8) {
throw connectionError(FRAME_SIZE_ERROR, "Frame length %d too small.", payloadLength);
}
}
private void verifyWindowUpdateFrame() throws Http2Exception {
verifyNotProcessingHeaders();
verifyStreamOrConnectionId(streamId, "Stream ID");
if (payloadLength != INT_FIELD_LENGTH) {
throw connectionError(FRAME_SIZE_ERROR, "Invalid frame length %d.", payloadLength);
}
}
private void verifyContinuationFrame() throws Http2Exception {
verifyAssociatedWithAStream();
if (headersContinuation == null) {
throw connectionError(PROTOCOL_ERROR, "Received %s frame but not currently processing headers.",
frameType);
}
if (streamId != headersContinuation.getStreamId()) {
throw connectionError(PROTOCOL_ERROR, "Continuation stream ID does not match pending headers. "
+ "Expected %d, but received %d.", headersContinuation.getStreamId(), streamId);
}
}
private void verifyUnknownFrame() throws Http2Exception {
verifyNotProcessingHeaders();
}
private void readDataFrame(ChannelHandlerContext ctx, ByteBuf payload,
Http2FrameListener listener) throws Http2Exception {
int padding = readPadding(payload);
// Determine how much data there is to read by removing the trailing
// padding.
int dataLength = lengthWithoutTrailingPadding(payload.readableBytes(), padding);
payload.writerIndex(payload.readerIndex() + dataLength);
listener.onDataRead(ctx, streamId, payload, padding, flags.endOfStream());
}
private void readHeadersFrame(final ChannelHandlerContext ctx, ByteBuf payload,
Http2FrameListener listener) throws Http2Exception {
final int headersStreamId = streamId;
final Http2Flags headersFlags = flags;
final int padding = readPadding(payload);
// The callback that is invoked is different depending on whether priority information
// is present in the headers frame.
if (flags.priorityPresent()) {
long word1 = payload.readUnsignedInt();
final boolean exclusive = (word1 & 0x80000000L) != 0;
final int streamDependency = (int) (word1 & 0x7FFFFFFFL);
if (streamDependency == streamId) {
// Stream dependencies are deprecated in RFC 9113 but this behavior is defined in
// https://datatracker.ietf.org/doc/html/rfc7540#section-5.3.1 which says this must be treated as a
// stream error of type PROTOCOL_ERROR. However, because we will not process the payload, a stream
// error would result in HPACK corruption. Therefor, it is elevated to a connection error.
throw connectionError(
PROTOCOL_ERROR, "HEADERS frame for stream %d cannot depend on itself.", streamId);
}
final short weight = (short) (payload.readUnsignedByte() + 1);
final int lenToRead = lengthWithoutTrailingPadding(payload.readableBytes(), padding);
// Create a handler that invokes the listener when the header block is complete.
headersContinuation = new HeadersContinuation() {
@Override
public int getStreamId() {
return headersStreamId;
}
@Override
public void processFragment(boolean endOfHeaders, ByteBuf fragment, int len,
Http2FrameListener listener) throws Http2Exception {
final HeadersBlockBuilder hdrBlockBuilder = headersBlockBuilder();
hdrBlockBuilder.addFragment(fragment, len, ctx.alloc(), endOfHeaders);
if (endOfHeaders) {
listener.onHeadersRead(ctx, headersStreamId, hdrBlockBuilder.headers(), streamDependency,
weight, exclusive, padding, headersFlags.endOfStream());
}
}
};
// Process the initial fragment, invoking the listener's callback if end of headers.
headersContinuation.processFragment(flags.endOfHeaders(), payload, lenToRead, listener);
resetHeadersContinuationIfEnd(flags.endOfHeaders());
return;
}
// The priority fields are not present in the frame. Prepare a continuation that invokes
// the listener callback without priority information.
headersContinuation = new HeadersContinuation() {
@Override
public int getStreamId() {
return headersStreamId;
}
@Override
public void processFragment(boolean endOfHeaders, ByteBuf fragment, int len,
Http2FrameListener listener) throws Http2Exception {
final HeadersBlockBuilder hdrBlockBuilder = headersBlockBuilder();
hdrBlockBuilder.addFragment(fragment, len, ctx.alloc(), endOfHeaders);
if (endOfHeaders) {
listener.onHeadersRead(ctx, headersStreamId, hdrBlockBuilder.headers(), padding,
headersFlags.endOfStream());
}
}
};
// Process the initial fragment, invoking the listener's callback if end of headers.
int len = lengthWithoutTrailingPadding(payload.readableBytes(), padding);
headersContinuation.processFragment(flags.endOfHeaders(), payload, len, listener);
resetHeadersContinuationIfEnd(flags.endOfHeaders());
}
private void resetHeadersContinuationIfEnd(boolean endOfHeaders) {
if (endOfHeaders) {
closeHeadersContinuation();
}
}
private void readPriorityFrame(ChannelHandlerContext ctx, ByteBuf payload,
Http2FrameListener listener) throws Http2Exception {
long word1 = payload.readUnsignedInt();
boolean exclusive = (word1 & 0x80000000L) != 0;
int streamDependency = (int) (word1 & 0x7FFFFFFFL);
if (streamDependency == streamId) {
throw streamError(streamId, PROTOCOL_ERROR, "A stream cannot depend on itself.");
}
short weight = (short) (payload.readUnsignedByte() + 1);
listener.onPriorityRead(ctx, streamId, streamDependency, weight, exclusive);
}
private void readRstStreamFrame(ChannelHandlerContext ctx, ByteBuf payload,
Http2FrameListener listener) throws Http2Exception {
long errorCode = payload.readUnsignedInt();
listener.onRstStreamRead(ctx, streamId, errorCode);
}
private void readSettingsFrame(ChannelHandlerContext ctx, ByteBuf payload,
Http2FrameListener listener) throws Http2Exception {
if (flags.ack()) {
listener.onSettingsAckRead(ctx);
} else {
int numSettings = payloadLength / SETTING_ENTRY_LENGTH;
Http2Settings settings = new Http2Settings();
for (int index = 0; index < numSettings; ++index) {
char id = (char) payload.readUnsignedShort();
long value = payload.readUnsignedInt();
try {
settings.put(id, Long.valueOf(value));
} catch (IllegalArgumentException e) {
if (id == SETTINGS_INITIAL_WINDOW_SIZE) {
throw connectionError(FLOW_CONTROL_ERROR, e,
"Failed setting initial window size: %s", e.getMessage());
}
throw connectionError(PROTOCOL_ERROR, e, "Protocol error: %s", e.getMessage());
}
}
listener.onSettingsRead(ctx, settings);
}
}
private void readPushPromiseFrame(final ChannelHandlerContext ctx, ByteBuf payload,
Http2FrameListener listener) throws Http2Exception {
final int pushPromiseStreamId = streamId;
final int padding = readPadding(payload);
final int promisedStreamId = readUnsignedInt(payload);
// Create a handler that invokes the listener when the header block is complete.
headersContinuation = new HeadersContinuation() {
@Override
public int getStreamId() {
return pushPromiseStreamId;
}
@Override
public void processFragment(boolean endOfHeaders, ByteBuf fragment, int len,
Http2FrameListener listener) throws Http2Exception {
headersBlockBuilder().addFragment(fragment, len, ctx.alloc(), endOfHeaders);
if (endOfHeaders) {
listener.onPushPromiseRead(ctx, pushPromiseStreamId, promisedStreamId,
headersBlockBuilder().headers(), padding);
}
}
};
// Process the initial fragment, invoking the listener's callback if end of headers.
int len = lengthWithoutTrailingPadding(payload.readableBytes(), padding);
headersContinuation.processFragment(flags.endOfHeaders(), payload, len, listener);
resetHeadersContinuationIfEnd(flags.endOfHeaders());
}
private void readPingFrame(ChannelHandlerContext ctx, long data,
Http2FrameListener listener) throws Http2Exception {
if (flags.ack()) {
listener.onPingAckRead(ctx, data);
} else {
listener.onPingRead(ctx, data);
}
}
private void readGoAwayFrame(ChannelHandlerContext ctx, ByteBuf payload,
Http2FrameListener listener) throws Http2Exception {
int lastStreamId = readUnsignedInt(payload);
long errorCode = payload.readUnsignedInt();
listener.onGoAwayRead(ctx, lastStreamId, errorCode, payload);
}
private void readWindowUpdateFrame(ChannelHandlerContext ctx, ByteBuf payload,
Http2FrameListener listener) throws Http2Exception {
int windowSizeIncrement = readUnsignedInt(payload);
if (windowSizeIncrement == 0) {
// On the connection stream this must be a connection error but for request streams it is a stream error.
// See https://datatracker.ietf.org/doc/html/rfc9113#section-6.9 for details.
if (streamId == CONNECTION_STREAM_ID) {
throw connectionError(PROTOCOL_ERROR,
"Received WINDOW_UPDATE with delta 0 for connection stream");
} else {
throw streamError(streamId, PROTOCOL_ERROR,
"Received WINDOW_UPDATE with delta 0 for stream: %d", streamId);
}
}
listener.onWindowUpdateRead(ctx, streamId, windowSizeIncrement);
}
private void readContinuationFrame(ByteBuf payload, Http2FrameListener listener)
throws Http2Exception {
// Process the initial fragment, invoking the listener's callback if end of headers.
headersContinuation.processFragment(flags.endOfHeaders(), payload,
payloadLength, listener);
resetHeadersContinuationIfEnd(flags.endOfHeaders());
}
private void readUnknownFrame(ChannelHandlerContext ctx, ByteBuf payload,
Http2FrameListener listener) throws Http2Exception {
listener.onUnknownFrame(ctx, frameType, streamId, flags, payload);
}
/**
* If padding is present in the payload, reads the next byte as padding. The padding also includes the one byte
* width of the pad length field. Otherwise, returns zero.
*/
private int readPadding(ByteBuf payload) {
if (!flags.paddingPresent()) {
return 0;
}
return payload.readUnsignedByte() + 1;
}
/**
* The padding parameter consists of the 1 byte pad length field and the trailing padding bytes. This method
* returns the number of readable bytes without the trailing padding.
*/
private static int lengthWithoutTrailingPadding(int readableBytes, int padding) throws Http2Exception {
if (padding == 0) {
return readableBytes;
}
int n = readableBytes - (padding - 1);
if (n < 0) {
throw connectionError(PROTOCOL_ERROR, "Frame payload too small for padding.");
}
return n;
}
/**
* Base
|
DefaultHttp2FrameReader
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/conditional/expression/ErroneousConditionExpressionMapper.java
|
{
"start": 428,
"end": 900
}
|
interface ____ {
@Mapping(target = "name", conditionExpression = "!employee.getName().isEmpty()")
BasicEmployee map(EmployeeDto employee);
@Mapping(target = "name", conditionExpression = "java(true)", constant = "test")
BasicEmployee mapConstant(EmployeeDto employee);
@Mapping(target = "name", conditionExpression = "java(true)", expression = "java(\"test\")")
BasicEmployee mapExpression(EmployeeDto employee);
}
|
ErroneousConditionExpressionMapper
|
java
|
elastic__elasticsearch
|
test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java
|
{
"start": 14232,
"end": 14401
}
|
class ____ to create a JUnit suite description that doesn't have the {@link TestIssueLogging} annotation, but its test methods
* have it.
*/
public static
|
used
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/CheckedExceptionNotThrownTest.java
|
{
"start": 5162,
"end": 5457
}
|
class ____ {
Test() throws Exception {}
int f = test();
static int test() throws Exception {
Thread.sleep(1);
return 1;
}
}
""")
.expectUnchanged()
.doTest();
}
}
|
Test
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationSupplier.java
|
{
"start": 1091,
"end": 1667
}
|
interface ____ {
Aggregator build(
String name,
AggregatorFactories factories,
Rounding rounding,
BucketOrder order,
boolean keyed,
long minDocCount,
boolean downsampledResultsOffset,
@Nullable LongBounds extendedBounds,
@Nullable LongBounds hardBounds,
ValuesSourceConfig valuesSourceConfig,
AggregationContext context,
Aggregator parent,
CardinalityUpperBound cardinality,
Map<String, Object> metadata
) throws IOException;
}
|
DateHistogramAggregationSupplier
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/params/ParameterInfoIntegrationTests.java
|
{
"start": 1745,
"end": 4189
}
|
class ____
implements BeforeClassTemplateInvocationCallback, BeforeEachCallback {
@Override
public void beforeClassTemplateInvocation(ExtensionContext parameterizedClassInvocationContext) {
if (TestCase.Inner.class.equals(parameterizedClassInvocationContext.getRequiredTestClass())) {
assertParameterInfo(parameterizedClassInvocationContext, "j", 2);
var nestedParameterizedClassContext = parameterizedClassInvocationContext.getParent().orElseThrow();
assertParameterInfo(nestedParameterizedClassContext, "i", 1);
parameterizedClassInvocationContext = nestedParameterizedClassContext.getParent().orElseThrow();
}
assertParameterInfo(parameterizedClassInvocationContext, "i", 1);
var outerParameterizedClassContext = parameterizedClassInvocationContext.getParent().orElseThrow();
assertNull(ParameterInfo.get(outerParameterizedClassContext));
}
@Override
public void beforeEach(ExtensionContext parameterizedTestInvocationContext) {
assertParameterInfo(parameterizedTestInvocationContext, "k", 3);
var parameterizedTestContext = parameterizedTestInvocationContext.getParent().orElseThrow();
assertParameterInfo(parameterizedTestContext, "j", 2);
var nestedParameterizedClassInvocationContext = parameterizedTestContext.getParent().orElseThrow();
assertParameterInfo(nestedParameterizedClassInvocationContext, "j", 2);
var nestedParameterizedClassContext = nestedParameterizedClassInvocationContext.getParent().orElseThrow();
assertParameterInfo(nestedParameterizedClassContext, "i", 1);
var outerParameterizedClassInvocationContext = nestedParameterizedClassContext.getParent().orElseThrow();
assertParameterInfo(outerParameterizedClassInvocationContext, "i", 1);
var outerParameterizedClassContext = outerParameterizedClassInvocationContext.getParent().orElseThrow();
assertNull(ParameterInfo.get(outerParameterizedClassContext));
}
private static void assertParameterInfo(ExtensionContext context, String parameterName, int argumentValue) {
var parameterInfo = ParameterInfo.get(context);
assertNotNull(parameterInfo);
var declaration = parameterInfo.getDeclarations().get(0).orElseThrow();
assertEquals(parameterName, declaration.getParameterName().orElseThrow());
assertEquals(int.class, declaration.getParameterType());
assertEquals(argumentValue, parameterInfo.getArguments().getInteger(0));
}
}
}
|
ParameterInfoConsumingExtension
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/TableStreamOperator.java
|
{
"start": 1705,
"end": 3365
}
|
class ____<OUT> extends AbstractStreamOperator<OUT> {
/** We listen to this ourselves because we don't have an {@link InternalTimerService}. */
protected long currentWatermark = Long.MIN_VALUE;
protected transient ContextImpl ctx;
public TableStreamOperator() {
this(null);
}
public TableStreamOperator(StreamOperatorParameters<OUT> parameters) {
super(parameters);
}
@Override
public void open() throws Exception {
super.open();
this.ctx = new ContextImpl(getProcessingTimeService());
}
@Override
public boolean useInterruptibleTimers() {
return true;
}
/** Compute memory size from memory faction. */
public long computeMemorySize() {
final Environment environment = getContainingTask().getEnvironment();
return environment
.getMemoryManager()
.computeMemorySize(
getOperatorConfig()
.getManagedMemoryFractionOperatorUseCaseOfSlot(
ManagedMemoryUseCase.OPERATOR,
environment.getJobConfiguration(),
environment.getTaskManagerInfo().getConfiguration(),
environment.getUserCodeClassLoader().asClassLoader()));
}
@Override
public void processWatermark(Watermark mark) throws Exception {
currentWatermark = mark.getTimestamp();
super.processWatermark(mark);
}
/** Information available in an invocation of processElement. */
protected
|
TableStreamOperator
|
java
|
elastic__elasticsearch
|
x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/action/TransportGetStatusAction.java
|
{
"start": 4137,
"end": 5422
}
|
class ____ implements ClusterStateObserver.Listener {
private final ActionListener<GetStatusAction.Response> listener;
private final DiscoveryNode localNode;
private final ClusterService clusterService;
private final StatusResolver resolver;
private StatusListener(
ActionListener<GetStatusAction.Response> listener,
DiscoveryNode localNode,
ClusterService clusterService,
StatusResolver resolver
) {
this.listener = listener;
this.localNode = localNode;
this.clusterService = clusterService;
this.resolver = resolver;
}
@Override
public void onNewClusterState(ClusterState state) {
resolver.execute(state, listener);
}
@Override
public void onClusterServiceClose() {
listener.onFailure(new NodeClosedException(localNode));
}
@Override
public void onTimeout(TimeValue timeout) {
resolver.execute(clusterService.state(), ActionListener.wrap(response -> {
response.setTimedOut(true);
listener.onResponse(response);
}, listener::onFailure));
}
}
private static
|
StatusListener
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/usertype/CompositeUserType.java
|
{
"start": 351,
"end": 1226
}
|
interface ____ be implemented by user-defined custom types
* that have persistent attributes and can be thought of as something
* more like an {@linkplain jakarta.persistence.Embeddable embeddable}
* object. However, these persistent "attributes" need not necessarily
* correspond directly to Java fields or properties.
* <p>
* A value type managed by a {@code CompositeUserType} may be used in
* almost every way that a regular embeddable type may be used. It may
* even contain {@linkplain jakarta.persistence.ManyToOne many to one}
* associations.
* <p>
* To "map" the attributes of a composite custom type, each
* {@code CompositeUserType} provides a {@linkplain #embeddable()
* regular embeddable class} with the same logical structure as the
* {@linkplain #returnedClass() value type managed by the custom type}.
* <p>
* Properties of this embeddable
|
should
|
java
|
apache__spark
|
streaming/src/main/java/org/apache/spark/streaming/util/WriteAheadLog.java
|
{
"start": 1359,
"end": 1656
}
|
class ____ {
/**
* Write the record to the log and return a record handle, which contains all the information
* necessary to read back the written record. The time is used to the index the record,
* such that it can be cleaned later. Note that implementations of this abstract
|
WriteAheadLog
|
java
|
assertj__assertj-core
|
assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/internal/objectarrays/ObjectArrays_assertNotEmpty_Test.java
|
{
"start": 1158,
"end": 1761
}
|
class ____ extends ObjectArraysBaseTest {
@Test
void should_fail_if_actual_is_null() {
// WHEN
var error = expectAssertionError(() -> arrays.assertNotEmpty(INFO, null));
// THEN
then(error).hasMessage(actualIsNull());
}
@Test
void should_fail_if_actual_is_empty() {
// WHEN
var error = expectAssertionError(() -> arrays.assertNotEmpty(INFO, emptyArray()));
// THEN
then(error).hasMessage(shouldNotBeEmpty().create());
}
@Test
void should_pass_if_actual_is_not_empty() {
arrays.assertNotEmpty(INFO, array("Yoda"));
}
}
|
ObjectArrays_assertNotEmpty_Test
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
|
{
"start": 24531,
"end": 24757
}
|
class ____ extends DefaultSpeculator {
public CustomSpeculator(Configuration conf, AppContext context) {
super(conf, context);
verifyClassLoader(getClass());
}
/**
* Verifies that the
|
CustomSpeculator
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/ExtensionRegistrationViaParametersAndFieldsTests.java
|
{
"start": 26968,
"end": 27957
}
|
class ____ {
@RegisterExtension
Extension extension = new InstanceParameterResolver<>(this);
@RepeatedTest(100)
void test(InitializationPerInstanceTestCase outerInstance, Wrapper innerInstance) {
assertSame(InitializationPerInstanceTestCase.this, outerInstance);
assertSame(Wrapper.this, innerInstance);
}
}
private record InstanceParameterResolver<T>(T instance) implements ParameterResolver {
@Override
public boolean supportsParameter(ParameterContext parameterContext, ExtensionContext extensionContext)
throws ParameterResolutionException {
return instance.getClass().equals(parameterContext.getParameter().getType());
}
@Override
public Object resolveParameter(ParameterContext parameterContext, ExtensionContext extensionContext)
throws ParameterResolutionException {
return instance;
}
}
}
}
@Target(ElementType.PARAMETER)
@Retention(RetentionPolicy.RUNTIME)
@ExtendWith(MagicParameter.Extension.class)
@
|
Wrapper
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateEntry.java
|
{
"start": 1441,
"end": 2902
}
|
class ____<K, N, V> {
private final InternalKvState<K, N, V> state;
private final KvStateInfo<K, N, V> stateInfo;
private final boolean areSerializersStateless;
private final ConcurrentMap<Thread, KvStateInfo<K, N, V>> serializerCache;
private final ClassLoader userClassLoader;
public KvStateEntry(final InternalKvState<K, N, V> state, ClassLoader userClassLoader) {
this.state = Preconditions.checkNotNull(state);
this.stateInfo =
new KvStateInfo<>(
state.getKeySerializer(),
state.getNamespaceSerializer(),
state.getValueSerializer());
this.serializerCache = new ConcurrentHashMap<>();
this.userClassLoader = userClassLoader;
this.areSerializersStateless = stateInfo.duplicate() == stateInfo;
}
public InternalKvState<K, N, V> getState() {
return state;
}
public ClassLoader getUserClassLoader() {
return userClassLoader;
}
public KvStateInfo<K, N, V> getInfoForCurrentThread() {
return areSerializersStateless
? stateInfo
: serializerCache.computeIfAbsent(
Thread.currentThread(), t -> stateInfo.duplicate());
}
public void clear() {
serializerCache.clear();
}
@VisibleForTesting
public int getCacheSize() {
return serializerCache.size();
}
}
|
KvStateEntry
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/NativeMemoryCalculator.java
|
{
"start": 1529,
"end": 15975
}
|
class ____ {
// Maximum permitted JVM heap size when auto-configured.
// Must match the value used in MachineDependentHeap.MachineNodeRole.ML_ONLY.
public static final long STATIC_JVM_UPPER_THRESHOLD = ByteSizeValue.ofGb(31).getBytes();
public static final long MINIMUM_AUTOMATIC_NODE_SIZE = ByteSizeValue.ofMb(512).getBytes();
private static final long OS_OVERHEAD = ByteSizeValue.ofMb(200).getBytes();
// Memory size beyond which the JVM is given 10% of memory instead of 40%.
// Must match the value used in MachineDependentHeap.MachineNodeRole.ML_ONLY.
public static final long JVM_SIZE_KNOT_POINT = ByteSizeValue.ofGb(16).getBytes();
private static final long BYTES_IN_4MB = ByteSizeValue.ofMb(4).getBytes();
// The minimum automatic node size implicitly defines a minimum JVM size
private static final long MINIMUM_AUTOMATIC_JVM_SIZE = dynamicallyCalculateJvmSizeFromNodeSize(MINIMUM_AUTOMATIC_NODE_SIZE);
private NativeMemoryCalculator() {}
public static OptionalLong allowedBytesForMl(DiscoveryNode node, Settings settings) {
if (node.getRoles().contains(DiscoveryNodeRole.ML_ROLE) == false) {
return OptionalLong.empty();
}
return allowedBytesForMl(
node.getAttributes().get(MACHINE_MEMORY_NODE_ATTR),
node.getAttributes().get(MAX_JVM_SIZE_NODE_ATTR),
MAX_MACHINE_MEMORY_PERCENT.get(settings),
USE_AUTO_MACHINE_MEMORY_PERCENT.get(settings)
);
}
public static OptionalLong allowedBytesForMl(DiscoveryNode node, ClusterSettings settings) {
if (node.getRoles().contains(DiscoveryNodeRole.ML_ROLE) == false) {
return OptionalLong.empty();
}
return allowedBytesForMl(
node.getAttributes().get(MACHINE_MEMORY_NODE_ATTR),
node.getAttributes().get(MAX_JVM_SIZE_NODE_ATTR),
settings.get(MAX_MACHINE_MEMORY_PERCENT),
settings.get(USE_AUTO_MACHINE_MEMORY_PERCENT)
);
}
public static OptionalLong allowedBytesForMl(DiscoveryNode node, int maxMemoryPercent, boolean useAutoPercent) {
if (node.getRoles().contains(DiscoveryNodeRole.ML_ROLE) == false) {
return OptionalLong.empty();
}
return allowedBytesForMl(
node.getAttributes().get(MACHINE_MEMORY_NODE_ATTR),
node.getAttributes().get(MAX_JVM_SIZE_NODE_ATTR),
maxMemoryPercent,
useAutoPercent
);
}
private static OptionalLong allowedBytesForMl(String nodeBytes, String jvmBytes, int maxMemoryPercent, boolean useAuto) {
assert nodeBytes != null
: "This private method should only be called for ML nodes, and all ML nodes should have the ml.machine_memory node attribute";
if (nodeBytes == null) {
return OptionalLong.empty();
}
final long machineMemory;
try {
machineMemory = Long.parseLong(nodeBytes);
} catch (NumberFormatException e) {
assert e == null : "ml.machine_memory should parse because we set it internally: invalid value was " + nodeBytes;
return OptionalLong.empty();
}
assert jvmBytes != null
: "This private method should only be called for ML nodes, and all ML nodes should have the ml.max_jvm_size node attribute";
if (jvmBytes == null) {
return OptionalLong.empty();
}
long jvmMemory;
try {
jvmMemory = Long.parseLong(jvmBytes);
} catch (NumberFormatException e) {
assert e == null : "ml.max_jvm_size should parse because we set it internally: invalid value was " + jvmBytes;
return OptionalLong.empty();
}
return OptionalLong.of(allowedBytesForMl(machineMemory, jvmMemory, maxMemoryPercent, useAuto));
}
public static long calculateApproxNecessaryNodeSize(
long mlNativeMemoryRequirement,
Long jvmSize,
int maxMemoryPercent,
boolean useAuto
) {
if (mlNativeMemoryRequirement == 0) {
return 0;
}
if (useAuto) {
jvmSize = jvmSize == null ? dynamicallyCalculateJvmSizeFromMlNativeMemorySize(mlNativeMemoryRequirement) : jvmSize;
return Math.max(mlNativeMemoryRequirement + jvmSize + OS_OVERHEAD, MINIMUM_AUTOMATIC_NODE_SIZE);
}
// Round up here, to ensure enough ML memory when the formula is reversed
return (long) Math.ceil((100.0 / maxMemoryPercent) * mlNativeMemoryRequirement);
}
static long allowedBytesForMl(long machineMemory, long jvmSize, int maxMemoryPercent, boolean useAuto) {
// machineMemory can get set to -1 if the OS probe that determines memory fails
if (machineMemory <= 0) {
return 0L;
}
if (useAuto) {
// It is conceivable that there is a machine smaller than 200MB.
// If the administrator wants to use the auto configuration, the node should be larger.
if (machineMemory - jvmSize <= OS_OVERHEAD) {
return machineMemory / 100;
}
// This calculation is dynamic and designed to maximally take advantage of the underlying machine for machine learning.
// We allow for the JVM and 200MB for the operating system, and then use the remaining space for ML native memory subject
// to a maximum of 90% of the node size.
return Math.min(machineMemory - jvmSize - OS_OVERHEAD, machineMemory * 9 / 10);
}
// Round down here, so we don't permit a model that's 1 byte too big after rounding
return machineMemory * maxMemoryPercent / 100;
}
public static long allowedBytesForMl(long machineMemory, int maxMemoryPercent, boolean useAuto) {
return allowedBytesForMl(
machineMemory,
useAuto ? dynamicallyCalculateJvmSizeFromNodeSize(machineMemory) : Math.min(machineMemory / 2, STATIC_JVM_UPPER_THRESHOLD),
maxMemoryPercent,
useAuto
);
}
public static long dynamicallyCalculateJvmSizeFromNodeSize(long nodeSize) {
// This must match the logic in MachineDependentHeap.MachineNodeRole.ML_ONLY,
// including rounding down to the next lower multiple of 4 megabytes.
if (nodeSize <= JVM_SIZE_KNOT_POINT) {
return ((long) (nodeSize * 0.4) / BYTES_IN_4MB) * BYTES_IN_4MB;
}
return Math.min(
((long) (JVM_SIZE_KNOT_POINT * 0.4 + (nodeSize - JVM_SIZE_KNOT_POINT) * 0.1) / BYTES_IN_4MB) * BYTES_IN_4MB,
STATIC_JVM_UPPER_THRESHOLD
);
}
public static long dynamicallyCalculateJvmSizeFromMlNativeMemorySize(long mlNativeMemorySize) {
// For <= 16GB node, the JVM is 0.4 * total_node_size. This means the rest is 0.6 the node size.
// So, nativeAndOverhead = 0.6 * total_node_size => total_node_size = (nativeAndOverhead / 0.6)
// Consequently jvmSize = (nativeAndOverhead / 0.6) * 0.4 = nativeAndOverhead * 2 / 3
//
// For > 16GB node, the JVM is 0.4 * 16GB + 0.1 * (total_node_size - 16GB).
// nativeAndOverheadAbove16GB = 0.9 * total_node_size_above_16GB
// Also, nativeAndOverheadAbove16GB = nativeAndOverhead - nativeAndOverheadBelow16GB = nativeAndOverhead - 0.6 * 16GB
// Consequently jvmSize = 0.4 * 16GB + (nativeAndOverheadAbove16GB / 0.9) * 0.1
//
// In both cases JVM size is rounded down to the next lower multiple of 4 megabytes to match
// MachineDependentHeap.MachineNodeRole.ML_ONLY.
long nativeAndOverhead = mlNativeMemorySize + OS_OVERHEAD;
long higherAnswer;
if (nativeAndOverhead <= (JVM_SIZE_KNOT_POINT - dynamicallyCalculateJvmSizeFromNodeSize(JVM_SIZE_KNOT_POINT))) {
higherAnswer = (nativeAndOverhead * 2 / 3 / BYTES_IN_4MB) * BYTES_IN_4MB;
} else {
double nativeAndOverheadAbove16GB = nativeAndOverhead - JVM_SIZE_KNOT_POINT * 0.6;
higherAnswer = ((long) (JVM_SIZE_KNOT_POINT * 0.4 + nativeAndOverheadAbove16GB / 0.9 * 0.1) / BYTES_IN_4MB) * BYTES_IN_4MB;
}
// Because we're rounding JVM size to a multiple of 4MB there will be a range of node sizes that can satisfy the required
// amount of ML memory. It's better to choose the lower size, because it avoids waste and avoids the possibility of a
// scale up followed by a scale down. For example, suppose we asked for a 2049MB node when the job would also fit on a
// 2048MB node. Then Cloud will give us a 4096MB node (because it can only give us certain fixed sizes). That immediately
// shows up as wasteful in the ML overview in the UI where the user can visually see that half the ML memory is unused.
// And an hour later the downscale will realise that the job could fit on a smaller node and will downscale to a 2048MB
// node. So it's better all round to choose the slightly lower size from the start if everything will still fit.
if (higherAnswer > BYTES_IN_4MB) {
long lowerAnswer = higherAnswer - BYTES_IN_4MB;
long nodeSizeImpliedByLowerAnswer = nativeAndOverhead + lowerAnswer;
if (dynamicallyCalculateJvmSizeFromNodeSize(nodeSizeImpliedByLowerAnswer) == lowerAnswer) {
return Math.max(MINIMUM_AUTOMATIC_JVM_SIZE, Math.min(lowerAnswer, STATIC_JVM_UPPER_THRESHOLD));
}
}
return Math.max(MINIMUM_AUTOMATIC_JVM_SIZE, Math.min(higherAnswer, STATIC_JVM_UPPER_THRESHOLD));
}
/**
* Calculates the highest model memory limit that a job could be
* given and still stand a chance of being assigned in the cluster.
* The calculation takes into account the possibility of autoscaling,
* i.e. if lazy nodes are available then the maximum possible node
* size is considered as well as the sizes of nodes in the current
* cluster.
*/
public static ByteSizeValue calculateMaxModelMemoryLimitToFit(ClusterSettings clusterSettings, DiscoveryNodes nodes) {
long maxMlMemory = 0;
for (DiscoveryNode node : nodes) {
OptionalLong limit = allowedBytesForMl(node, clusterSettings);
if (limit.isEmpty()) {
continue;
}
maxMlMemory = Math.max(maxMlMemory, limit.getAsLong());
}
// It is possible that there is scope for more ML nodes to be added
// to the cluster, in which case take those into account too
long maxMlNodeSize = clusterSettings.get(MAX_ML_NODE_SIZE).getBytes();
int maxLazyNodes = clusterSettings.get(MAX_LAZY_ML_NODES);
// Even if all the lazy nodes have been added to the cluster, we make
// the assumption that if any were configured they'll be able to grow
// to the maximum ML node size. (We are assuming that lazy nodes always
// behave like they do with Elastic Cloud autoscaling, where vertical
// scaling is possible.)
if (maxMlNodeSize > 0 && maxLazyNodes > 0) {
maxMlMemory = Math.max(
maxMlMemory,
allowedBytesForMl(
maxMlNodeSize,
clusterSettings.get(MAX_MACHINE_MEMORY_PERCENT),
clusterSettings.get(USE_AUTO_MACHINE_MEMORY_PERCENT)
)
);
}
if (maxMlMemory == 0L) {
// This implies there are currently no ML nodes in the cluster, and
// no automatic mechanism for adding one, so we have no idea what
// the effective limit would be if one were added
return null;
}
maxMlMemory -= Math.max(Job.PROCESS_MEMORY_OVERHEAD.getBytes(), DataFrameAnalyticsConfig.PROCESS_MEMORY_OVERHEAD.getBytes());
maxMlMemory -= MachineLearning.NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes();
return ByteSizeValue.ofMb(ByteSizeUnit.BYTES.toMB(Math.max(0L, maxMlMemory)));
}
public static ByteSizeValue calculateTotalMlMemory(ClusterSettings clusterSettings, DiscoveryNodes nodes) {
long totalMlMemory = 0;
for (DiscoveryNode node : nodes) {
OptionalLong limit = allowedBytesForMl(node, clusterSettings);
if (limit.isEmpty()) {
continue;
}
totalMlMemory += limit.getAsLong();
}
// Round down to a whole number of megabytes, since we generally deal with model
// memory limits in whole megabytes
return ByteSizeValue.ofMb(ByteSizeUnit.BYTES.toMB(totalMlMemory));
}
/**
* Get the maximum value of model memory limit that a user may set in a job config.
* If the xpack.ml.max_model_memory_limit setting is set then the value comes from that.
* Otherwise, if xpack.ml.use_auto_machine_memory_percent is set then the maximum model
* memory limit is considered to be the largest model memory limit that could fit into
* the cluster (on the assumption that configured lazy nodes will be added and other
* jobs stopped to make space).
* @return The maximum model memory limit calculated from the current cluster settings,
* or {@link ByteSizeValue#ZERO} if there is no limit.
*/
public static ByteSizeValue getMaxModelMemoryLimit(ClusterService clusterService) {
ClusterSettings clusterSettings = clusterService.getClusterSettings();
ByteSizeValue maxModelMemoryLimit = clusterSettings.get(MachineLearningField.MAX_MODEL_MEMORY_LIMIT);
if (maxModelMemoryLimit != null && maxModelMemoryLimit.getBytes() > 0) {
return maxModelMemoryLimit;
}
// When the ML memory percent is being set automatically and no explicit max model memory limit is set,
// max model memory limit is considered to be the max model memory limit that will fit in the cluster
Boolean autoMemory = clusterSettings.get(MachineLearningField.USE_AUTO_MACHINE_MEMORY_PERCENT);
if (autoMemory) {
DiscoveryNodes nodes = clusterService.state().getNodes();
ByteSizeValue modelMemoryLimitToFit = calculateMaxModelMemoryLimitToFit(clusterSettings, nodes);
if (modelMemoryLimitToFit != null) {
return modelMemoryLimitToFit;
}
}
return ByteSizeValue.ZERO;
}
}
|
NativeMemoryCalculator
|
java
|
mapstruct__mapstruct
|
integrationtest/src/test/resources/faultyAstModifyingAnnotationProcessorTest/generator/src/main/java/org/mapstruct/itest/faultyAstModifyingProcessor/FaultyStaticAstModifyingProcessor.java
|
{
"start": 351,
"end": 657
}
|
class ____ implements AstModifyingAnnotationProcessor {
static {
if ( true ) {
throw new RuntimeException( "Failed to initialize class" );
}
}
@Override
public boolean isTypeComplete(TypeMirror type) {
return true;
}
}
|
FaultyStaticAstModifyingProcessor
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/context/annotation/NestedConfigurationClassTests.java
|
{
"start": 11200,
"end": 11244
}
|
class ____ {
}
}
}
static
|
L2ConfigEmpty
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/Jt400EndpointBuilderFactory.java
|
{
"start": 56010,
"end": 59753
}
|
interface ____ {
/**
* JT400 (camel-jt400)
* Exchanges messages with an IBM i system using data queues, message
* queues, or program call. IBM i is the replacement for AS/400 and
* iSeries servers.
*
* Category: messaging
* Since: 1.5
* Maven coordinates: org.apache.camel:camel-jt400
*
* @return the dsl builder for the headers' name.
*/
default Jt400HeaderNameBuilder jt400() {
return Jt400HeaderNameBuilder.INSTANCE;
}
/**
* JT400 (camel-jt400)
* Exchanges messages with an IBM i system using data queues, message
* queues, or program call. IBM i is the replacement for AS/400 and
* iSeries servers.
*
* Category: messaging
* Since: 1.5
* Maven coordinates: org.apache.camel:camel-jt400
*
* Syntax:
* <code>jt400:userID:password@systemName/QSYS.LIB/objectPath.type</code>
*
* Path parameter: userID (required)
* Returns the ID of the IBM i user.
*
* Path parameter: password (required)
* Returns the password of the IBM i user.
*
* Path parameter: systemName (required)
* Returns the name of the IBM i system.
*
* Path parameter: objectPath (required)
* Returns the fully qualified integrated file system path name of the
* target object of this endpoint.
*
* Path parameter: type (required)
* Whether to work with data queues or remote program call
* There are 4 enums and the value can be one of: DTAQ, PGM, SRVPGM,
* MSGQ
*
* @param path userID:password@systemName/QSYS.LIB/objectPath.type
* @return the dsl builder
*/
default Jt400EndpointBuilder jt400(String path) {
return Jt400EndpointBuilderFactory.endpointBuilder("jt400", path);
}
/**
* JT400 (camel-jt400)
* Exchanges messages with an IBM i system using data queues, message
* queues, or program call. IBM i is the replacement for AS/400 and
* iSeries servers.
*
* Category: messaging
* Since: 1.5
* Maven coordinates: org.apache.camel:camel-jt400
*
* Syntax:
* <code>jt400:userID:password@systemName/QSYS.LIB/objectPath.type</code>
*
* Path parameter: userID (required)
* Returns the ID of the IBM i user.
*
* Path parameter: password (required)
* Returns the password of the IBM i user.
*
* Path parameter: systemName (required)
* Returns the name of the IBM i system.
*
* Path parameter: objectPath (required)
* Returns the fully qualified integrated file system path name of the
* target object of this endpoint.
*
* Path parameter: type (required)
* Whether to work with data queues or remote program call
* There are 4 enums and the value can be one of: DTAQ, PGM, SRVPGM,
* MSGQ
*
* @param componentName to use a custom component name for the endpoint
* instead of the default name
* @param path userID:password@systemName/QSYS.LIB/objectPath.type
* @return the dsl builder
*/
default Jt400EndpointBuilder jt400(String componentName, String path) {
return Jt400EndpointBuilderFactory.endpointBuilder(componentName, path);
}
}
/**
* The builder of headers' name for the JT400 component.
*/
public static
|
Jt400Builders
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/pattern/RootThrowablePatternConverter.java
|
{
"start": 1338,
"end": 2192
}
|
class ____ extends ThrowablePatternConverter {
private RootThrowablePatternConverter(@Nullable final Configuration config, @Nullable final String[] options) {
super(
"RootThrowable",
"throwable",
options,
config,
ThrowableInvertedPropertyRendererFactory.INSTANCE,
ThrowableInvertedStackTraceRendererFactory.INSTANCE);
}
/**
* Creates an instance of the class.
*
* @param config a configuration
* @param options the pattern options
* @return a new instance
*/
public static RootThrowablePatternConverter newInstance(
@Nullable final Configuration config, @Nullable final String[] options) {
return new RootThrowablePatternConverter(config, options);
}
}
|
RootThrowablePatternConverter
|
java
|
junit-team__junit5
|
junit-jupiter-engine/src/main/java/org/junit/jupiter/engine/extension/TimeoutDurationParser.java
|
{
"start": 1117,
"end": 2079
}
|
class ____ {
private static final Pattern PATTERN = Pattern.compile("([1-9]\\d*) ?((?:[nμm]?s)|m|h|d)?",
CASE_INSENSITIVE | UNICODE_CASE);
private static final Map<String, TimeUnit> UNITS_BY_ABBREVIATION = Map.of( //
"ns", NANOSECONDS, //
"μs", MICROSECONDS, //
"ms", MILLISECONDS, //
"s", SECONDS, //
"m", MINUTES, //
"h", HOURS, //
"d", DAYS //
);
TimeoutDuration parse(CharSequence text) throws DateTimeParseException {
Matcher matcher = PATTERN.matcher(text);
if (matcher.matches()) {
long value = Long.parseLong(matcher.group(1));
String unitAbbreviation = matcher.group(2);
TimeUnit unit = unitAbbreviation == null ? SECONDS
: requireNonNull(UNITS_BY_ABBREVIATION.get(unitAbbreviation.toLowerCase(Locale.ENGLISH)));
return new TimeoutDuration(value, unit);
}
throw new DateTimeParseException("Timeout duration is not in the expected format (<number> [ns|μs|ms|s|m|h|d])",
text, 0);
}
}
|
TimeoutDurationParser
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/EqualsGetClassTest.java
|
{
"start": 7158,
"end": 7709
}
|
class ____ {
private int a;
@Override
public boolean equals(Object o) {
if (o == null) {
return false;
}
if (o.getClass() != getClass()) {
return false;
}
return ((Test) o).a == a;
}
}
""")
.doTest();
}
@Test
public void negative_anonymous() {
helper
.addSourceLines(
"Test.java",
"""
final
|
Test
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatApiScope.java
|
{
"start": 12527,
"end": 12704
}
|
class ____ {
private Result prepareResult = Result.SKIP;
private Result cleanupResult = Result.SKIP;
private Result methodResult = Result.SKIP;
}
private
|
CaseResult
|
java
|
apache__flink
|
flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/vector/reader/ByteColumnReader.java
|
{
"start": 1320,
"end": 3728
}
|
class ____ extends AbstractColumnReader<WritableByteVector> {
public ByteColumnReader(ColumnDescriptor descriptor, PageReader pageReader) throws IOException {
super(descriptor, pageReader);
checkTypeName(PrimitiveType.PrimitiveTypeName.INT32);
}
@Override
protected void readBatch(int rowId, int num, WritableByteVector column) {
int left = num;
while (left > 0) {
if (runLenDecoder.currentCount == 0) {
runLenDecoder.readNextGroup();
}
int n = Math.min(left, runLenDecoder.currentCount);
switch (runLenDecoder.mode) {
case RLE:
if (runLenDecoder.currentValue == maxDefLevel) {
readBytes(n, column, rowId);
} else {
column.setNulls(rowId, n);
}
break;
case PACKED:
for (int i = 0; i < n; ++i) {
if (runLenDecoder.currentBuffer[runLenDecoder.currentBufferIdx++]
== maxDefLevel) {
column.setByte(rowId + i, readByte());
} else {
column.setNullAt(rowId + i);
}
}
break;
}
rowId += n;
left -= n;
runLenDecoder.currentCount -= n;
}
}
@Override
protected void readBatchFromDictionaryIds(
int rowId, int num, WritableByteVector column, WritableIntVector dictionaryIds) {
for (int i = rowId; i < rowId + num; ++i) {
if (!column.isNullAt(i)) {
column.setByte(i, (byte) dictionary.decodeToInt(dictionaryIds.getInt(i)));
}
}
}
private byte readByte() {
return (byte) readDataBuffer(4).getInt();
}
private void readBytes(int total, WritableByteVector c, int rowId) {
// Bytes are stored as a 4-byte little endian int. Just read the first byte.
int requiredBytes = total * 4;
ByteBuffer buffer = readDataBuffer(requiredBytes);
for (int i = 0; i < total; i += 1) {
c.setByte(rowId + i, buffer.get());
// skip the next 3 bytes
buffer.position(buffer.position() + 3);
}
}
}
|
ByteColumnReader
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/TestingListener.java
|
{
"start": 1148,
"end": 1937
}
|
class ____ extends TestingRetrievalBase implements LeaderRetrievalListener {
private static final Logger LOG = LoggerFactory.getLogger(TestingListener.class);
@Override
public void notifyLeaderAddress(String leaderAddress, UUID leaderSessionID) {
LOG.debug(
"Notified about new leader address {} with session ID {}.",
leaderAddress,
leaderSessionID);
if (leaderAddress == null && leaderSessionID == null) {
offerToLeaderQueue(LeaderInformation.empty());
} else {
offerToLeaderQueue(LeaderInformation.known(leaderSessionID, leaderAddress));
}
}
@Override
public void handleError(Exception exception) {
super.handleError(exception);
}
}
|
TestingListener
|
java
|
bumptech__glide
|
library/src/main/java/com/bumptech/glide/manager/DefaultConnectivityMonitor.java
|
{
"start": 393,
"end": 1154
}
|
class ____ implements ConnectivityMonitor {
private final Context context;
@SuppressWarnings("WeakerAccess")
@Synthetic
final ConnectivityListener listener;
DefaultConnectivityMonitor(@NonNull Context context, @NonNull ConnectivityListener listener) {
this.context = context.getApplicationContext();
this.listener = listener;
}
private void register() {
SingletonConnectivityReceiver.get(context).register(listener);
}
private void unregister() {
SingletonConnectivityReceiver.get(context).unregister(listener);
}
@Override
public void onStart() {
register();
}
@Override
public void onStop() {
unregister();
}
@Override
public void onDestroy() {
// Do nothing.
}
}
|
DefaultConnectivityMonitor
|
java
|
ReactiveX__RxJava
|
src/test/java/io/reactivex/rxjava3/internal/operators/observable/ObservableRefCountTest.java
|
{
"start": 28554,
"end": 30079
}
|
class ____ extends ConnectableObservable<Object> {
@Override
public void connect(Consumer<? super Disposable> connection) {
throw new TestException("connect");
}
@Override
public void reset() {
// nothing to do in this test
}
@Override
protected void subscribeActual(Observer<? super Object> observer) {
observer.onSubscribe(Disposable.empty());
}
}
@Test
@SuppressUndeliverable
public void badSourceSubscribe() {
BadObservableSubscribe bo = new BadObservableSubscribe();
try {
bo.refCount()
.test();
fail("Should have thrown");
} catch (NullPointerException ex) {
assertTrue(ex.getCause() instanceof TestException);
}
}
@Test
public void badSourceDispose() {
BadObservableDispose bo = new BadObservableDispose();
try {
bo.refCount()
.test()
.dispose();
fail("Should have thrown");
} catch (TestException expected) {
}
}
@Test
@SuppressUndeliverable
public void badSourceConnect() {
BadObservableConnect bo = new BadObservableConnect();
try {
bo.refCount()
.test();
fail("Should have thrown");
} catch (NullPointerException ex) {
assertTrue(ex.getCause() instanceof TestException);
}
}
static final
|
BadObservableConnect
|
java
|
grpc__grpc-java
|
testing/src/main/java/io/grpc/testing/GrpcCleanupRule.java
|
{
"start": 4899,
"end": 6784
}
|
class ____ ExternalResource so it can be used in JUnit 5. But JUnit 5 will only call
// before() and after(), thus code cannot assume this method will be called.
@Override
public Statement apply(final Statement base, Description description) {
return super.apply(new Statement() {
@Override
public void evaluate() throws Throwable {
abruptShutdown = false;
try {
base.evaluate();
} catch (Throwable t) {
abruptShutdown = true;
throw t;
}
}
}, description);
}
/**
* Releases all the registered resources.
*/
@Override
protected void after() {
stopwatch.reset();
stopwatch.start();
InterruptedException interrupted = null;
if (!abruptShutdown) {
for (Resource resource : Lists.reverse(resources)) {
resource.cleanUp();
}
for (int i = resources.size() - 1; i >= 0; i--) {
try {
boolean released = resources.get(i).awaitReleased(
timeoutNanos - stopwatch.elapsed(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS);
if (released) {
resources.remove(i);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
interrupted = e;
break;
}
}
}
if (!resources.isEmpty()) {
for (Resource resource : Lists.reverse(resources)) {
resource.forceCleanUp();
}
try {
if (interrupted != null) {
throw new AssertionError(
"Thread interrupted before resources gracefully released", interrupted);
} else if (!abruptShutdown) {
throw new AssertionError(
"Resources could not be released in time at the end of test: " + resources);
}
} finally {
resources.clear();
}
}
}
@VisibleForTesting
|
extends
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.