language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | dropwizard__dropwizard | dropwizard-db/src/test/java/io/dropwizard/db/CustomConnectionValidator.java | {
"start": 110,
"end": 402
} | class ____ implements Validator {
// It's used only once, so static access should be fine
static volatile boolean loaded;
@Override
public boolean validate(Connection connection, int validateAction) {
loaded = true;
return true;
}
}
| CustomConnectionValidator |
java | google__guava | android/guava-tests/benchmark/com/google/common/util/concurrent/ExecutionListBenchmark.java | {
"start": 1931,
"end": 2153
} | interface ____ {
void add(Runnable runnable, Executor executor);
void execute();
/** Returns the underlying implementation, useful for the Footprint benchmark. */
Object getImpl();
}
| ExecutionListWrapper |
java | apache__maven | impl/maven-impl/src/test/java/org/apache/maven/impl/model/profile/FileProfileActivatorTest.java | {
"start": 1526,
"end": 5738
} | class ____ extends AbstractProfileActivatorTest<FileProfileActivator> {
@TempDir
Path tempDir;
private final DefaultProfileActivationContext context = newContext();
@BeforeEach
@Override
void setUp() throws Exception {
activator = new FileProfileActivator();
context.setModel(Model.newBuilder().pomFile(tempDir.resolve("pom.xml")).build());
File file = new File(tempDir.resolve("file.txt").toString());
if (!file.createNewFile()) {
throw new IOException("Can't create " + file);
}
}
@Test
void testRootDirectoryWithNull() {
context.setModel(Model.newInstance());
NullPointerException e = assertThrows(
NullPointerException.class,
() -> assertActivation(false, newExistsProfile("${project.rootDirectory}"), context));
assertEquals(RootLocator.UNABLE_TO_FIND_ROOT_PROJECT_MESSAGE, e.getMessage());
}
@Test
void testRootDirectory() {
assertActivation(false, newExistsProfile("${project.rootDirectory}/someFile.txt"), context);
assertActivation(true, newMissingProfile("${project.rootDirectory}/someFile.txt"), context);
assertActivation(true, newExistsProfile("${project.rootDirectory}"), context);
assertActivation(true, newExistsProfile("${project.rootDirectory}/" + "file.txt"), context);
assertActivation(false, newMissingProfile("${project.rootDirectory}"), context);
assertActivation(false, newMissingProfile("${project.rootDirectory}/" + "file.txt"), context);
}
@Test
void testIsActiveNoFileWithShortBasedir() {
assertActivation(false, newExistsProfile(null), context);
assertActivation(false, newExistsProfile("someFile.txt"), context);
assertActivation(false, newExistsProfile("${basedir}/someFile.txt"), context);
assertActivation(false, newMissingProfile(null), context);
assertActivation(true, newMissingProfile("someFile.txt"), context);
assertActivation(true, newMissingProfile("${basedir}/someFile.txt"), context);
}
@Test
void testIsActiveNoFile() {
assertActivation(false, newExistsProfile(null), context);
assertActivation(false, newExistsProfile("someFile.txt"), context);
assertActivation(false, newExistsProfile("${project.basedir}/someFile.txt"), context);
assertActivation(false, newMissingProfile(null), context);
assertActivation(true, newMissingProfile("someFile.txt"), context);
assertActivation(true, newMissingProfile("${project.basedir}/someFile.txt"), context);
}
@Test
void testIsActiveExistsFileExists() {
assertActivation(true, newExistsProfile("file.txt"), context);
assertActivation(true, newExistsProfile("${project.basedir}"), context);
assertActivation(true, newExistsProfile("${project.basedir}/" + "file.txt"), context);
assertActivation(false, newMissingProfile("file.txt"), context);
assertActivation(false, newMissingProfile("${project.basedir}"), context);
assertActivation(false, newMissingProfile("${project.basedir}/" + "file.txt"), context);
}
@Test
void testIsActiveExistsLeavesFileUnchanged() {
Profile profile = newExistsProfile("file.txt");
assertEquals("file.txt", profile.getActivation().getFile().getExists());
assertActivation(true, profile, context);
assertEquals("file.txt", profile.getActivation().getFile().getExists());
}
private Profile newExistsProfile(String filePath) {
ActivationFile activationFile =
ActivationFile.newBuilder().exists(filePath).build();
return newProfile(activationFile);
}
private Profile newMissingProfile(String filePath) {
ActivationFile activationFile =
ActivationFile.newBuilder().missing(filePath).build();
return newProfile(activationFile);
}
private Profile newProfile(ActivationFile activationFile) {
Activation activation = Activation.newBuilder().file(activationFile).build();
return Profile.newBuilder().activation(activation).build();
}
}
| FileProfileActivatorTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/embeddables/Investment.java | {
"start": 276,
"end": 809
} | class ____ {
private DollarValue amount;
private String description;
@Column(name = "`date`")
private MyDate date;
public DollarValue getAmount() {
return amount;
}
public void setAmount(DollarValue amount) {
this.amount = amount;
}
@Column(length = 500)
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public MyDate getDate() {
return date;
}
public void setDate(MyDate date) {
this.date = date;
}
}
| Investment |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java | {
"start": 63783,
"end": 63957
} | class ____ construct is taken from the configuration.
* @param uri Uri to construct
* @return The constructed journal manager
* @throws IllegalArgumentException if no | to |
java | google__dagger | javatests/dagger/spi/SpiPluginTest.java | {
"start": 9200,
"end": 9544
} | interface ____ {",
" TestSubcomponent sub();",
"}");
JavaFileObject subcomponent =
JavaFileObjects.forSourceLines(
"test.TestSubcomponent",
"package test;",
"",
"import dagger.Subcomponent;",
"",
"@Subcomponent",
" | TestComponent |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/Mockito.java | {
"start": 75520,
"end": 76311
} | class ____.
* </li>
* <li>{@link MockitoSessionBuilder#name(String)} allows to pass a name from the testing framework to the
* {@link MockitoSession} that will be used for printing warnings when {@link Strictness#WARN} is used.
* </li>
* <li>{@link MockitoSessionBuilder#logger(MockitoSessionLogger)} makes it possible to customize the logger used
* for hints/warnings produced when finishing mocking (useful for testing and to connect reporting capabilities
* provided by testing frameworks such as JUnit Jupiter).
* </li>
* <li>{@link MockitoSession#setStrictness(Strictness)} allows to change the strictness of a {@link MockitoSession}
* for one-off scenarios, e.g. it enables configuring a default strictness for all tests in a | instances |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractSeek.java | {
"start": 2083,
"end": 12666
} | class ____ extends AbstractContractSeekTest{
private final boolean isSecure;
private final ABFSContractTestBinding binding;
private static final byte[] BLOCK = dataset(100 * 1024, 0, 255);
public ITestAbfsFileSystemContractSeek() throws Exception {
binding = new ABFSContractTestBinding();
this.isSecure = binding.isSecureMode();
}
@BeforeEach
@Override
public void setup() throws Exception {
binding.setup();
super.setup();
}
@Override
protected Configuration createConfiguration() {
return binding.getRawConfiguration();
}
@Override
protected AbstractFSContract createContract(final Configuration conf) {
conf.setInt(AZURE_READ_AHEAD_RANGE, MIN_BUFFER_SIZE);
conf.setInt(AZURE_READ_BUFFER_SIZE, MIN_BUFFER_SIZE);
disableFilesystemCaching(conf);
return new AbfsFileSystemContract(conf, isSecure);
}
/**
* Test verifies if the data is read correctly
* when {@code ConfigurationKeys#AZURE_READ_AHEAD_RANGE} is set.
* Reason for not breaking this test into smaller parts is we
* really want to simulate lot of forward and backward seeks
* similar to real production use case.
*/
@Test
public void testSeekAndReadWithReadAhead() throws IOException {
describe(" Testing seek and read with read ahead "
+ "enabled for random reads");
Path testSeekFile = path(getMethodName() + "bigseekfile.txt");
createDataSet(testSeekFile);
try (FSDataInputStream in = getFileSystem().open(testSeekFile)) {
AbfsInputStream inStream = ((AbfsInputStream) in.getWrappedStream());
AbfsInputStreamStatisticsImpl streamStatistics =
(AbfsInputStreamStatisticsImpl) inStream.getStreamStatistics();
assertEquals(MIN_BUFFER_SIZE, inStream.getReadAheadRange(),
String.format("Value of %s is not set correctly", AZURE_READ_AHEAD_RANGE));
long remoteReadOperationsOldVal = streamStatistics.getRemoteReadOperations();
Assertions.assertThat(remoteReadOperationsOldVal)
.describedAs("Number of remote read ops should be 0 "
+ "before any read call is made")
.isEqualTo(0);
// Test read at first position. Remote read.
Assertions.assertThat(inStream.getPos())
.describedAs("First call to getPos() should return 0")
.isEqualTo(0);
assertDataAtPos(0, (byte) in.read());
assertSeekBufferStats(0, streamStatistics.getSeekInBuffer());
long remoteReadOperationsNewVal = streamStatistics.getRemoteReadOperations();
assertIncrementInRemoteReadOps(remoteReadOperationsOldVal,
remoteReadOperationsNewVal);
remoteReadOperationsOldVal = remoteReadOperationsNewVal;
// Seeking just before read ahead range. Read from buffer.
int newSeek = inStream.getReadAheadRange() - 1;
in.seek(newSeek);
assertGetPosition(newSeek, in.getPos());
assertDataAtPos(newSeek, (byte) in.read());
assertSeekBufferStats(1, streamStatistics.getSeekInBuffer());
remoteReadOperationsNewVal = streamStatistics.getRemoteReadOperations();
assertNoIncrementInRemoteReadOps(remoteReadOperationsOldVal,
remoteReadOperationsNewVal);
remoteReadOperationsOldVal = remoteReadOperationsNewVal;
// Seeking boundary of read ahead range. Read from buffer manager.
newSeek = inStream.getReadAheadRange();
inStream.seek(newSeek);
assertGetPosition(newSeek, in.getPos());
assertDataAtPos(newSeek, (byte) in.read());
assertSeekBufferStats(1, streamStatistics.getSeekInBuffer());
remoteReadOperationsNewVal = streamStatistics.getRemoteReadOperations();
assertNoIncrementInRemoteReadOps(remoteReadOperationsOldVal,
remoteReadOperationsNewVal);
remoteReadOperationsOldVal = remoteReadOperationsNewVal;
// Seeking just after read ahead range. Read from buffer.
newSeek = inStream.getReadAheadRange() + 1;
in.seek(newSeek);
assertGetPosition(newSeek, in.getPos());
assertDataAtPos(newSeek, (byte) in.read());
assertSeekBufferStats(2, streamStatistics.getSeekInBuffer());
remoteReadOperationsNewVal = streamStatistics.getRemoteReadOperations();
assertNoIncrementInRemoteReadOps(remoteReadOperationsOldVal,
remoteReadOperationsNewVal);
remoteReadOperationsOldVal = remoteReadOperationsNewVal;
// Seeking just 10 more bytes such that data is read from buffer.
newSeek += 10;
in.seek(newSeek);
assertGetPosition(newSeek, in.getPos());
assertDataAtPos(newSeek, (byte) in.read());
assertSeekBufferStats(3, streamStatistics.getSeekInBuffer());
remoteReadOperationsNewVal = streamStatistics.getRemoteReadOperations();
assertNoIncrementInRemoteReadOps(remoteReadOperationsOldVal,
remoteReadOperationsNewVal);
remoteReadOperationsOldVal = remoteReadOperationsNewVal;
// Seek backward such that data is read from remote.
newSeek -= 106;
in.seek(newSeek);
assertGetPosition(newSeek, in.getPos());
assertDataAtPos(newSeek, (byte) in.read());
assertSeekBufferStats(3, streamStatistics.getSeekInBuffer());
remoteReadOperationsNewVal = streamStatistics.getRemoteReadOperations();
assertIncrementInRemoteReadOps(remoteReadOperationsOldVal,
remoteReadOperationsNewVal);
remoteReadOperationsOldVal = remoteReadOperationsNewVal;
// Seeking just 10 more bytes such that data is read from buffer.
newSeek += 10;
in.seek(newSeek);
assertGetPosition(newSeek, in.getPos());
assertDataAtPos(newSeek, (byte) in.read());
assertSeekBufferStats(4, streamStatistics.getSeekInBuffer());
remoteReadOperationsNewVal = streamStatistics.getRemoteReadOperations();
assertNoIncrementInRemoteReadOps(remoteReadOperationsOldVal,
remoteReadOperationsNewVal);
remoteReadOperationsOldVal = remoteReadOperationsNewVal;
// Read multiple bytes across read ahead range. Remote read.
long oldSeek = newSeek;
newSeek = 2*inStream.getReadAheadRange() -1;
byte[] bytes = new byte[5];
in.readFully(newSeek, bytes);
// With readFully getPos should return oldSeek pos.
// Adding one as one byte is already read
// after the last seek is done.
assertGetPosition(oldSeek + 1, in.getPos());
assertSeekBufferStats(4, streamStatistics.getSeekInBuffer());
assertDatasetEquals(newSeek, "Read across read ahead ",
bytes, bytes.length);
remoteReadOperationsNewVal = streamStatistics.getRemoteReadOperations();
assertIncrementInRemoteReadOps(remoteReadOperationsOldVal,
remoteReadOperationsNewVal);
}
}
/**
* Test to validate the getPos() when a seek is done
* post {@code AbfsInputStream#unbuffer} call is made.
* Also using optimised builder api to open file.
*/
@Test
public void testSeekAfterUnbuffer() throws IOException {
describe("Test to make sure that seeking in AbfsInputStream after "
+ "unbuffer() call is not doing anyIO.");
Path testFile = path(getMethodName() + ".txt");
createDataSet(testFile);
final CompletableFuture<FSDataInputStream> future =
getFileSystem().openFile(testFile)
.build();
try (FSDataInputStream inputStream = awaitFuture(future)) {
AbfsInputStream abfsInputStream = (AbfsInputStream) inputStream.getWrappedStream();
AbfsInputStreamStatisticsImpl streamStatistics =
(AbfsInputStreamStatisticsImpl) abfsInputStream.getStreamStatistics();
int readAheadRange = abfsInputStream.getReadAheadRange();
long seekPos = readAheadRange;
inputStream.seek(seekPos);
assertDataAtPos(readAheadRange, (byte) inputStream.read());
long currentRemoteReadOps = streamStatistics.getRemoteReadOperations();
assertIncrementInRemoteReadOps(0, currentRemoteReadOps);
inputStream.unbuffer();
seekPos -= 10;
inputStream.seek(seekPos);
// Seek backwards shouldn't do any IO
assertNoIncrementInRemoteReadOps(currentRemoteReadOps, streamStatistics.getRemoteReadOperations());
assertGetPosition(seekPos, inputStream.getPos());
}
}
private void createDataSet(Path path) throws IOException {
createFile(getFileSystem(), path, true, BLOCK);
}
private void assertGetPosition(long expected, long actual) {
final String seekPosErrorMsg = "getPos() should return %s";
Assertions.assertThat(actual)
.describedAs(seekPosErrorMsg, expected)
.isEqualTo(actual);
}
private void assertDataAtPos(int pos, byte actualData) {
final String dataErrorMsg = "Mismatch in data@%s";
Assertions.assertThat(actualData)
.describedAs(dataErrorMsg, pos)
.isEqualTo(BLOCK[pos]);
}
private void assertSeekBufferStats(long expected, long actual) {
final String statsErrorMsg = "Mismatch in seekInBuffer counts";
Assertions.assertThat(actual)
.describedAs(statsErrorMsg)
.isEqualTo(expected);
}
private void assertNoIncrementInRemoteReadOps(long oldVal, long newVal) {
final String incrementErrorMsg = "Number of remote read ops shouldn't increase";
Assertions.assertThat(newVal)
.describedAs(incrementErrorMsg)
.isEqualTo(oldVal);
}
private void assertIncrementInRemoteReadOps(long oldVal, long newVal) {
final String incrementErrorMsg = "Number of remote read ops should increase";
Assertions.assertThat(newVal)
.describedAs(incrementErrorMsg)
.isGreaterThan(oldVal);
}
/**
* Assert that the data read matches the dataset at the given offset.
* This helps verify that the seek process is moving the read pointer
* to the correct location in the file.
* @param readOffset the offset in the file where the read began.
* @param operation operation name for the assertion.
* @param data data read in.
* @param length length of data to check.
*/
private void assertDatasetEquals(
final int readOffset,
final String operation,
final byte[] data,
int length) {
for (int i = 0; i < length; i++) {
int o = readOffset + i;
Assertions.assertThat(data[i])
.describedAs(operation + "with read offset " + readOffset
+ ": data[" + i + "] != actualData[" + o + "]")
.isEqualTo(BLOCK[o]);
}
}
}
| ITestAbfsFileSystemContractSeek |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/TranslateTimeSeriesAggregate.java | {
"start": 3161,
"end": 6886
} | class ____ the aggregates in the time-series aggregations to standard aggregates.
* This approach helps avoid introducing new plans and operators for time-series aggregations specially.
* <p>
* Examples:
* <pre>
* TS k8s | STATS max(rate(request))
*
* becomes
*
* TS k8s
* | STATS rate_$1 = rate(request) BY _tsid
* | STATS max(rate_$1)
*
* TS k8s | STATS max(rate(request)) BY host
*
* becomes
*
* TS k8s
* | STATS rate_$1=rate(request), DIMENSION_VALUES(host) BY _tsid
* | STATS max(rate_$1) BY host=`DIMENSION_VALUES(host)`
*
* TS k8s | STATS avg(rate(request)) BY host
*
* becomes
*
* TS k8s
* | STATS rate_$1=rate(request), DIMENSION_VALUES(host) BY _tsid
* | STATS sum(rate_$1), count(rate_$1) BY host=`DIMENSION_VALUES(host)`
* | EVAL `avg(rate(request))` = `sum(rate_$1)` / `count(rate_$1)`
* | KEEP `avg(rate(request))`, host
*
* TS k8s | STATS avg(rate(request)) BY host, bucket(@timestamp, 1minute)
*
* becomes
*
* TS k8s
* | EVAL `bucket(@timestamp, 1minute)`=datetrunc(@timestamp, 1minute)
* | STATS rate_$1=rate(request), DIMENSION_VALUES(host) BY _tsid,`bucket(@timestamp, 1minute)`
* | STATS sum=sum(rate_$1), count(rate_$1) BY host=`DIMENSION_VALUES(host)`, `bucket(@timestamp, 1minute)`
* | EVAL `avg(rate(request))` = `sum(rate_$1)` / `count(rate_$1)`
* | KEEP `avg(rate(request))`, host, `bucket(@timestamp, 1minute)`
* </pre>
*
* Non time-series aggregates will be rewritten with last_over_time used in the first pass aggregation.
* Here, we don't have the staleness interval, but allow any value within the bucket (_tsid and optionally time-bucket).
*
* <pre>
* TS k8s | STATS max(rate(request)), max(memory_used) becomes:
*
* TS k8s
* | STATS rate_$1=rate(request), $last_m1=last_over_time(memory_used) BY _tsid
* | STATS max(rate_$1), `max(memory_used)` = max($last_m1)
*
* TS k8s | STATS max(rate(request)) avg(memory_used) BY host
*
* becomes
*
* TS k8s
* | STATS rate_$1=rate(request), $p1=last_over_time(memory_used), VALUES(host) BY _tsid
* | STATS max(rate_$1), $sum=sum($p1), $count=count($p1) BY host=`VALUES(host)`
* | EVAL `avg(memory_used)` = $sum / $count
* | KEEP `max(rate(request))`, `avg(memory_used)`, host
*
* TS k8s | STATS min(memory_used) sum(rate(request)) BY pod, bucket(@timestamp, 5m)
*
* becomes
*
* TS k8s
* | EVAL `bucket(@timestamp, 5m)` = datetrunc(@timestamp, '5m')
* | STATS rate_$1=rate(request), $p1=last_over_time(memory_used)), VALUES(pod) BY _tsid, `bucket(@timestamp, 5m)`
* | STATS sum(rate_$1), `min(memory_used)` = min($p1) BY pod=`VALUES(pod)`, `bucket(@timestamp, 5m)`
* | KEEP `min(memory_used)`, `sum(rate_$1)`, pod, `bucket(@timestamp, 5m)`
*
* {agg}_over_time time-series aggregation will be rewritten in the similar way
*
* TS k8s | STATS sum(max_over_time(memory_usage)) BY host, bucket(@timestamp, 1minute)
*
* becomes
*
* FROM k8s
* | STATS max_over_time_$1 = max(memory_usage), host_values=VALUES(host) BY _tsid, time_bucket=bucket(@timestamp, 1minute)
* | STATS sum(max_over_time_$1) BY host_values, time_bucket
*
*
* TS k8s | STATS sum(avg_over_time(memory_usage)) BY host, bucket(@timestamp, 1minute)
*
* becomes
*
* FROM k8s
* | STATS avg_over_time_$1 = avg(memory_usage), host_values=VALUES(host) BY _tsid, time_bucket=bucket(@timestamp, 1minute)
* | STATS sum(avg_over_time_$1) BY host_values, time_bucket
*
* TS k8s | STATS max(rate(post_requests) + rate(get_requests)) BY host, bucket(@timestamp, 1minute)
*
* becomes
*
* FROM k8s
* | STATS rate_$1=rate(post_requests), rate_$2=rate(post_requests) BY _tsid, time_bucket=bucket(@timestamp, 1minute)
* | STATS max(rate_$1 + rate_$2) BY host_values, time_bucket
* </pre>
*/
public final | translates |
java | spring-projects__spring-boot | integration-test/spring-boot-actuator-integration-tests/src/test/java/org/springframework/boot/actuate/endpoint/web/reactive/ControllerEndpointHandlerMappingIntegrationTests.java | {
"start": 7529,
"end": 8602
} | class ____ {
@Bean
NettyReactiveWebServerFactory netty() {
return new NettyReactiveWebServerFactory(0);
}
@Bean
HttpHandler httpHandler(ApplicationContext applicationContext) {
return WebHttpHandlerBuilder.applicationContext(applicationContext).build();
}
@Bean
ControllerEndpointDiscoverer webEndpointDiscoverer(ApplicationContext applicationContext) {
return new ControllerEndpointDiscoverer(applicationContext, null, Collections.emptyList());
}
@Bean
ControllerEndpointHandlerMapping webEndpointHandlerMapping(ControllerEndpointsSupplier endpointsSupplier,
EndpointAccessResolver endpointAccessResolver) {
return new ControllerEndpointHandlerMapping(new EndpointMapping("actuator"),
endpointsSupplier.getEndpoints(), null, endpointAccessResolver);
}
@Bean
EndpointAccessResolver endpointAccessResolver(Environment environment) {
return (id, defaultAccess) -> environment.getProperty("endpoint-access", Access.class, Access.UNRESTRICTED);
}
}
@RestControllerEndpoint(id = "example")
static | EndpointConfiguration |
java | qos-ch__slf4j | log4j-over-slf4j/src/main/java/org/apache/log4j/Log4jLoggerFactory.java | {
"start": 796,
"end": 1095
} | class ____ a factory that creates and maintains org.apache.log4j.Loggers
* wrapping org.slf4j.Loggers.
*
* It keeps a hashtable of all created org.apache.log4j.Logger instances so that
* all newly created instances are not duplicates of existing loggers.
*
* @author Sébastien Pennec
*/
| is |
java | quarkusio__quarkus | integration-tests/spring-data-jpa/src/test/java/io/quarkus/it/spring/data/jpa/CustomerResourceTest.java | {
"start": 279,
"end": 1863
} | class ____ {
@Test
void testAll() {
when().get("/customer").then()
.statusCode(200)
.body("size()", is(3))
.body(containsString("Jason"))
.body(containsString("Homer"))
.body(containsString("Peter"));
}
@Test
void testAllActiveUsers() {
when().get("/customer/active").then()
.statusCode(200)
.body(containsString("Simpson"))
.body(containsString("Homer"));
}
@Test
void testAllInactiveUsers() {
when().get("/customer/inactive").then()
.statusCode(200)
.body(containsString("Peter"))
.body(containsString("Quin"));
}
@Test
void testFindById() {
when().get("/customer/1").then()
.statusCode(200)
.body(containsString("Jason"))
.body(containsString("Bourne"));
}
@Test
void testDeleteThenCustomerIsDisabled() {
when().get("/customer/active").then()
.statusCode(200)
.body("size()", is(2));
when().delete("/customer/1").then()
.statusCode(204);
when().get("/customer/active").then()
.statusCode(200)
.body("size()", is(1));
when().get("/customer/inactive").then()
.statusCode(200)
.body("size()", is(2))
.body(containsString("Jason"))
.body(containsString("Bourne"));
}
}
| CustomerResourceTest |
java | apache__camel | core/camel-util/src/test/java/org/apache/camel/util/MimeTypeHelperTest.java | {
"start": 992,
"end": 3472
} | class ____ {
@Test
void testSanitizeMimeTypes() {
assertNull(MimeTypeHelper.sanitizeMimeType(null));
assertEquals("application/json", MimeTypeHelper.sanitizeMimeType("application/json"));
assertEquals("application/xslt+xml", MimeTypeHelper.sanitizeMimeType("application/xslt xml"));
assertEquals("application/xslt+xml,application/xml",
MimeTypeHelper.sanitizeMimeType("application/xslt xml , application/xml"));
}
@Test
void testMimeType() {
assertEquals("application/xslt+xml", MimeTypeHelper.probeMimeType("xslt"));
assertEquals("application/json", MimeTypeHelper.probeMimeType("json"));
assertEquals("application/xml", MimeTypeHelper.probeMimeType("xml"));
assertEquals("text/plain", MimeTypeHelper.probeMimeType("txt"));
assertEquals("application/metalink4+xml", MimeTypeHelper.probeMimeType("meta4"));
assertEquals("application/xslt+xml", MimeTypeHelper.probeMimeType("foo.xslt"));
assertEquals("application/json", MimeTypeHelper.probeMimeType("foo.json"));
assertEquals("application/xml", MimeTypeHelper.probeMimeType("foo.xml"));
assertEquals("text/plain", MimeTypeHelper.probeMimeType("foo.txt"));
assertEquals("application/metalink4+xml", MimeTypeHelper.probeMimeType("foo.meta4"));
assertEquals("application/xslt+xml", MimeTypeHelper.probeMimeType("foo.bar.xslt"));
assertEquals("application/json", MimeTypeHelper.probeMimeType("foo.bar.json"));
assertEquals("application/xml", MimeTypeHelper.probeMimeType("foo.bar.xml"));
assertEquals("text/plain", MimeTypeHelper.probeMimeType("foo.bar.txt"));
assertEquals("application/metalink4+xml", MimeTypeHelper.probeMimeType("foo.meta4"));
assertEquals("application/xslt+xml", MimeTypeHelper.probeMimeType("FOO.BAR.XSLT"));
assertEquals("application/json", MimeTypeHelper.probeMimeType("FOO.BAR.JSON"));
assertEquals("application/xml", MimeTypeHelper.probeMimeType("foo.BAR.XmL"));
assertEquals("text/plain", MimeTypeHelper.probeMimeType("foo.bAr.TxT"));
assertEquals("application/metalink4+xml", MimeTypeHelper.probeMimeType("foo.meta4"));
// extra
assertEquals("text/yaml", MimeTypeHelper.probeMimeType("yaml"));
assertEquals("text/yaml", MimeTypeHelper.probeMimeType("yml"));
assertNull(null, MimeTypeHelper.probeMimeType("unknown"));
}
}
| MimeTypeHelperTest |
java | apache__flink | flink-python/src/main/java/org/apache/flink/table/runtime/typeutils/PythonTypeUtils.java | {
"start": 28785,
"end": 30254
} | class ____
extends DataConverter<MapData, Map<?, ?>, Map<?, ?>> {
private final DataConverter keyConverter;
private final DataConverter valueConverter;
MapDataConverter(
DataConverter keyConverter,
DataConverter valueConverter,
DataFormatConverters.DataFormatConverter<MapData, Map<?, ?>> dataFormatConverter) {
super(dataFormatConverter);
this.keyConverter = keyConverter;
this.valueConverter = valueConverter;
}
@SuppressWarnings("unchecked")
@Override
Map toInternalImpl(Map<?, ?> value) {
Map<Object, Object> map = new HashMap<>();
for (Map.Entry<?, ?> entry : value.entrySet()) {
map.put(
keyConverter.toInternalImpl(entry.getKey()),
valueConverter.toInternalImpl(entry.getValue()));
}
return map;
}
@SuppressWarnings("unchecked")
@Override
Map<?, ?> toExternalImpl(Map<?, ?> value) {
Map<Object, Object> map = new HashMap<>();
for (Map.Entry<?, ?> entry : value.entrySet()) {
map.put(
keyConverter.toExternalImpl(entry.getKey()),
valueConverter.toExternalImpl(entry.getValue()));
}
return map;
}
}
private static final | MapDataConverter |
java | grpc__grpc-java | xds/src/test/java/io/grpc/xds/ClusterImplLoadBalancerTest.java | {
"start": 4491,
"end": 59531
} | class ____ {
@Rule public final MockitoRule mocks = MockitoJUnit.rule();
private static final double TOLERANCE = 1.0e-10;
private static final String AUTHORITY = "api.google.com";
private static final String CLUSTER = "cluster-foo.googleapis.com";
private static final String EDS_SERVICE_NAME = "service.googleapis.com";
private static final ServerInfo LRS_SERVER_INFO =
ServerInfo.create("api.google.com", InsecureChannelCredentials.create());
private static final Metadata.Key<OrcaLoadReport> ORCA_ENDPOINT_LOAD_METRICS_KEY =
Metadata.Key.of(
"endpoint-load-metrics-bin",
ProtoUtils.metadataMarshaller(OrcaLoadReport.getDefaultInstance()));
private final SynchronizationContext syncContext = new SynchronizationContext(
new Thread.UncaughtExceptionHandler() {
@Override
public void uncaughtException(Thread t, Throwable e) {
throw new AssertionError(e);
}
});
private final FakeClock fakeClock = new FakeClock();
private final Locality locality =
Locality.create("test-region", "test-zone", "test-subzone");
private final Object roundRobin = GracefulSwitchLoadBalancer.createLoadBalancingPolicyConfig(
new FakeLoadBalancerProvider("round_robin"), null);
private final List<FakeLoadBalancer> downstreamBalancers = new ArrayList<>();
private final FakeTlsContextManager tlsContextManager = new FakeTlsContextManager();
private final LoadStatsManager2 loadStatsManager =
new LoadStatsManager2(fakeClock.getStopwatchSupplier());
private final FakeXdsClient xdsClient = new FakeXdsClient();
private final CallCounterProvider callCounterProvider = new CallCounterProvider() {
@Override
public AtomicLong getOrCreate(String cluster, @Nullable String edsServiceName) {
return new AtomicLong();
}
};
private final FakeLbHelper helper = new FakeLbHelper();
private PickSubchannelArgs pickSubchannelArgs = new PickSubchannelArgsImpl(
TestMethodDescriptors.voidMethod(), new Metadata(), CallOptions.DEFAULT,
new PickDetailsConsumer() {});
@Mock
private ThreadSafeRandom mockRandom;
private int xdsClientRefs;
private ConnectivityState currentState;
private SubchannelPicker currentPicker;
private ClusterImplLoadBalancer loadBalancer;
@Before
public void setUp() {
loadBalancer = new ClusterImplLoadBalancer(helper, mockRandom);
}
@After
public void tearDown() {
if (loadBalancer != null) {
loadBalancer.shutdown();
}
assertThat(xdsClientRefs).isEqualTo(0);
assertThat(downstreamBalancers).isEmpty();
}
@Test
public void handleResolvedAddresses_propagateToChildPolicy() {
FakeLoadBalancerProvider weightedTargetProvider =
new FakeLoadBalancerProvider(XdsLbPolicies.WEIGHTED_TARGET_POLICY_NAME);
Object weightedTargetConfig = new Object();
ClusterImplConfig config = new ClusterImplConfig(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO,
null, Collections.<DropOverload>emptyList(),
GracefulSwitchLoadBalancer.createLoadBalancingPolicyConfig(
weightedTargetProvider, weightedTargetConfig),
null, Collections.emptyMap(), null);
EquivalentAddressGroup endpoint = makeAddress("endpoint-addr", locality);
deliverAddressesAndConfig(Collections.singletonList(endpoint), config);
FakeLoadBalancer childBalancer = Iterables.getOnlyElement(downstreamBalancers);
assertThat(Iterables.getOnlyElement(childBalancer.addresses)).isEqualTo(endpoint);
assertThat(childBalancer.config).isSameInstanceAs(weightedTargetConfig);
assertThat(childBalancer.attributes.get(io.grpc.xds.XdsAttributes.XDS_CLIENT))
.isSameInstanceAs(xdsClient);
assertThat(childBalancer.attributes.get(NameResolver.ATTR_BACKEND_SERVICE)).isEqualTo(CLUSTER);
}
/**
* If the control plane switches from using the legacy lb_policy field in the xDS Cluster proto
* to the newer load_balancing_policy then the child policy can switch from weighted_target to
* xds_wrr_locality (this could happen the opposite way as well). This test assures that this
* results in the child LB changing if this were to happen. If this is not done correctly the new
* configuration would be given to the old LB implementation which would cause a channel panic.
*/
@Test
public void handleResolvedAddresses_childPolicyChanges() {
FakeLoadBalancerProvider weightedTargetProvider =
new FakeLoadBalancerProvider(XdsLbPolicies.WEIGHTED_TARGET_POLICY_NAME);
Object weightedTargetConfig = new Object();
ClusterImplConfig configWithWeightedTarget = new ClusterImplConfig(CLUSTER, EDS_SERVICE_NAME,
LRS_SERVER_INFO,
null, Collections.<DropOverload>emptyList(),
GracefulSwitchLoadBalancer.createLoadBalancingPolicyConfig(
weightedTargetProvider, weightedTargetConfig),
null, Collections.emptyMap(), null);
EquivalentAddressGroup endpoint = makeAddress("endpoint-addr", locality);
deliverAddressesAndConfig(Collections.singletonList(endpoint), configWithWeightedTarget);
FakeLoadBalancer childBalancer = Iterables.getOnlyElement(downstreamBalancers);
assertThat(childBalancer.name).isEqualTo(XdsLbPolicies.WEIGHTED_TARGET_POLICY_NAME);
assertThat(childBalancer.config).isSameInstanceAs(weightedTargetConfig);
FakeLoadBalancerProvider wrrLocalityProvider =
new FakeLoadBalancerProvider(XdsLbPolicies.WRR_LOCALITY_POLICY_NAME);
Object wrrLocalityConfig = new Object();
ClusterImplConfig configWithWrrLocality = new ClusterImplConfig(CLUSTER, EDS_SERVICE_NAME,
LRS_SERVER_INFO,
null, Collections.<DropOverload>emptyList(),
GracefulSwitchLoadBalancer.createLoadBalancingPolicyConfig(
wrrLocalityProvider, wrrLocalityConfig),
null, Collections.emptyMap(), null);
deliverAddressesAndConfig(Collections.singletonList(endpoint), configWithWrrLocality);
childBalancer = Iterables.getOnlyElement(downstreamBalancers);
assertThat(childBalancer.name).isEqualTo(XdsLbPolicies.WRR_LOCALITY_POLICY_NAME);
assertThat(childBalancer.config).isSameInstanceAs(wrrLocalityConfig);
}
@Test
public void nameResolutionError_beforeChildPolicyInstantiated_returnErrorPickerToUpstream() {
loadBalancer.handleNameResolutionError(Status.UNIMPLEMENTED.withDescription("not found"));
assertThat(currentState).isEqualTo(ConnectivityState.TRANSIENT_FAILURE);
PickResult result = currentPicker.pickSubchannel(pickSubchannelArgs);
assertThat(result.getStatus().isOk()).isFalse();
assertThat(result.getStatus().getCode()).isEqualTo(Code.UNIMPLEMENTED);
assertThat(result.getStatus().getDescription()).isEqualTo("not found");
}
@Test
public void nameResolutionError_afterChildPolicyInstantiated_propagateToDownstream() {
FakeLoadBalancerProvider weightedTargetProvider =
new FakeLoadBalancerProvider(XdsLbPolicies.WEIGHTED_TARGET_POLICY_NAME);
Object weightedTargetConfig = new Object();
ClusterImplConfig config = new ClusterImplConfig(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO,
null, Collections.<DropOverload>emptyList(),
GracefulSwitchLoadBalancer.createLoadBalancingPolicyConfig(
weightedTargetProvider, weightedTargetConfig),
null, Collections.emptyMap(), null);
EquivalentAddressGroup endpoint = makeAddress("endpoint-addr", locality);
deliverAddressesAndConfig(Collections.singletonList(endpoint), config);
FakeLoadBalancer childBalancer = Iterables.getOnlyElement(downstreamBalancers);
loadBalancer.handleNameResolutionError(
Status.UNAVAILABLE.withDescription("cannot reach server"));
assertThat(childBalancer.upstreamError.getCode()).isEqualTo(Code.UNAVAILABLE);
assertThat(childBalancer.upstreamError.getDescription())
.isEqualTo("cannot reach server");
}
@Test
public void pick_addsOptionalLabels() {
LoadBalancerProvider weightedTargetProvider = new WeightedTargetLoadBalancerProvider();
WeightedTargetConfig weightedTargetConfig =
buildWeightedTargetConfig(ImmutableMap.of(locality, 10));
ClusterImplConfig config = new ClusterImplConfig(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO,
null, Collections.<DropOverload>emptyList(),
GracefulSwitchLoadBalancer.createLoadBalancingPolicyConfig(
weightedTargetProvider, weightedTargetConfig),
null, Collections.emptyMap(), null);
EquivalentAddressGroup endpoint = makeAddress("endpoint-addr", locality);
deliverAddressesAndConfig(Collections.singletonList(endpoint), config);
FakeLoadBalancer leafBalancer = Iterables.getOnlyElement(downstreamBalancers);
leafBalancer.createSubChannel();
FakeSubchannel fakeSubchannel = helper.subchannels.poll();
fakeSubchannel.updateState(ConnectivityStateInfo.forNonError(ConnectivityState.CONNECTING));
fakeSubchannel.setConnectedEagIndex(0);
fakeSubchannel.updateState(ConnectivityStateInfo.forNonError(ConnectivityState.READY));
assertThat(currentState).isEqualTo(ConnectivityState.READY);
PickDetailsConsumer detailsConsumer = mock(PickDetailsConsumer.class);
pickSubchannelArgs = new PickSubchannelArgsImpl(
TestMethodDescriptors.voidMethod(), new Metadata(), CallOptions.DEFAULT, detailsConsumer);
PickResult result = currentPicker.pickSubchannel(pickSubchannelArgs);
assertThat(result.getStatus().isOk()).isTrue();
// The value will be determined by the parent policy, so can be different than the value used in
// makeAddress() for the test.
verify(detailsConsumer).addOptionalLabel("grpc.lb.locality", locality.toString());
verify(detailsConsumer).addOptionalLabel("grpc.lb.backend_service", CLUSTER);
}
@Test
public void pick_noResult_addsClusterLabel() {
LoadBalancerProvider weightedTargetProvider = new WeightedTargetLoadBalancerProvider();
WeightedTargetConfig weightedTargetConfig =
buildWeightedTargetConfig(ImmutableMap.of(locality, 10));
ClusterImplConfig config = new ClusterImplConfig(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO,
null, Collections.<DropOverload>emptyList(),
GracefulSwitchLoadBalancer.createLoadBalancingPolicyConfig(
weightedTargetProvider, weightedTargetConfig),
null, Collections.emptyMap(), null);
EquivalentAddressGroup endpoint = makeAddress("endpoint-addr", locality);
deliverAddressesAndConfig(Collections.singletonList(endpoint), config);
FakeLoadBalancer leafBalancer = Iterables.getOnlyElement(downstreamBalancers);
leafBalancer.deliverSubchannelState(PickResult.withNoResult(), ConnectivityState.CONNECTING);
assertThat(currentState).isEqualTo(ConnectivityState.CONNECTING);
PickDetailsConsumer detailsConsumer = mock(PickDetailsConsumer.class);
pickSubchannelArgs = new PickSubchannelArgsImpl(
TestMethodDescriptors.voidMethod(), new Metadata(), CallOptions.DEFAULT, detailsConsumer);
PickResult result = currentPicker.pickSubchannel(pickSubchannelArgs);
assertThat(result.getStatus().isOk()).isTrue();
verify(detailsConsumer).addOptionalLabel("grpc.lb.backend_service", CLUSTER);
}
@Test
public void recordLoadStats() {
LoadBalancerProvider weightedTargetProvider = new WeightedTargetLoadBalancerProvider();
WeightedTargetConfig weightedTargetConfig =
buildWeightedTargetConfig(ImmutableMap.of(locality, 10));
ClusterImplConfig config = new ClusterImplConfig(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO,
null, Collections.<DropOverload>emptyList(),
GracefulSwitchLoadBalancer.createLoadBalancingPolicyConfig(
weightedTargetProvider, weightedTargetConfig),
null, Collections.emptyMap(), null);
EquivalentAddressGroup endpoint = makeAddress("endpoint-addr", locality);
deliverAddressesAndConfig(Collections.singletonList(endpoint), config);
FakeLoadBalancer leafBalancer = Iterables.getOnlyElement(downstreamBalancers);
Subchannel subchannel = leafBalancer.createSubChannel();
FakeSubchannel fakeSubchannel = helper.subchannels.poll();
fakeSubchannel.updateState(ConnectivityStateInfo.forNonError(ConnectivityState.CONNECTING));
fakeSubchannel.setConnectedEagIndex(0);
fakeSubchannel.updateState(ConnectivityStateInfo.forNonError(ConnectivityState.READY));
assertThat(currentState).isEqualTo(ConnectivityState.READY);
PickResult result = currentPicker.pickSubchannel(pickSubchannelArgs);
assertThat(result.getStatus().isOk()).isTrue();
ClientStreamTracer streamTracer1 = result.getStreamTracerFactory().newClientStreamTracer(
ClientStreamTracer.StreamInfo.newBuilder().build(), new Metadata()); // first RPC call
ClientStreamTracer streamTracer2 = result.getStreamTracerFactory().newClientStreamTracer(
ClientStreamTracer.StreamInfo.newBuilder().build(), new Metadata()); // second RPC call
ClientStreamTracer streamTracer3 = result.getStreamTracerFactory().newClientStreamTracer(
ClientStreamTracer.StreamInfo.newBuilder().build(), new Metadata()); // third RPC call
// When the trailer contains an ORCA report, the listener callback will be invoked.
Metadata trailersWithOrcaLoadReport1 = new Metadata();
trailersWithOrcaLoadReport1.put(ORCA_ENDPOINT_LOAD_METRICS_KEY,
OrcaLoadReport.newBuilder().setApplicationUtilization(1.414).setMemUtilization(0.034)
.setRpsFractional(1.414).putNamedMetrics("named1", 3.14159)
.putNamedMetrics("named2", -1.618).build());
streamTracer1.inboundTrailers(trailersWithOrcaLoadReport1);
streamTracer1.streamClosed(Status.OK);
Metadata trailersWithOrcaLoadReport2 = new Metadata();
trailersWithOrcaLoadReport2.put(ORCA_ENDPOINT_LOAD_METRICS_KEY,
OrcaLoadReport.newBuilder().setApplicationUtilization(0.99).setMemUtilization(0.123)
.setRpsFractional(0.905).putNamedMetrics("named1", 2.718)
.putNamedMetrics("named2", 1.414)
.putNamedMetrics("named3", 0.009).build());
streamTracer2.inboundTrailers(trailersWithOrcaLoadReport2);
streamTracer2.streamClosed(Status.UNAVAILABLE);
ClusterStats clusterStats =
Iterables.getOnlyElement(loadStatsManager.getClusterStatsReports(CLUSTER));
UpstreamLocalityStats localityStats =
Iterables.getOnlyElement(clusterStats.upstreamLocalityStatsList());
assertThat(localityStats.locality()).isEqualTo(locality);
assertThat(localityStats.totalIssuedRequests()).isEqualTo(3L);
assertThat(localityStats.totalSuccessfulRequests()).isEqualTo(1L);
assertThat(localityStats.totalErrorRequests()).isEqualTo(1L);
assertThat(localityStats.totalRequestsInProgress()).isEqualTo(1L);
assertThat(localityStats.loadMetricStatsMap().containsKey("named1")).isTrue();
assertThat(
localityStats.loadMetricStatsMap().get("named1").numRequestsFinishedWithMetric()).isEqualTo(
2L);
assertThat(localityStats.loadMetricStatsMap().get("named1").totalMetricValue()).isWithin(
TOLERANCE).of(3.14159 + 2.718);
assertThat(localityStats.loadMetricStatsMap().containsKey("named2")).isTrue();
assertThat(
localityStats.loadMetricStatsMap().get("named2").numRequestsFinishedWithMetric()).isEqualTo(
2L);
assertThat(localityStats.loadMetricStatsMap().get("named2").totalMetricValue()).isWithin(
TOLERANCE).of(-1.618 + 1.414);
assertThat(localityStats.loadMetricStatsMap().containsKey("named3")).isTrue();
assertThat(
localityStats.loadMetricStatsMap().get("named3").numRequestsFinishedWithMetric()).isEqualTo(
1L);
assertThat(localityStats.loadMetricStatsMap().get("named3").totalMetricValue()).isWithin(
TOLERANCE).of(0.009);
streamTracer3.streamClosed(Status.OK);
subchannel.shutdown(); // stats recorder released
clusterStats = Iterables.getOnlyElement(loadStatsManager.getClusterStatsReports(CLUSTER));
// Locality load is reported for one last time in case of loads occurred since the previous
// load report.
localityStats = Iterables.getOnlyElement(clusterStats.upstreamLocalityStatsList());
assertThat(localityStats.locality()).isEqualTo(locality);
assertThat(localityStats.totalIssuedRequests()).isEqualTo(0L);
assertThat(localityStats.totalSuccessfulRequests()).isEqualTo(1L);
assertThat(localityStats.totalErrorRequests()).isEqualTo(0L);
assertThat(localityStats.totalRequestsInProgress()).isEqualTo(0L);
assertThat(localityStats.loadMetricStatsMap().isEmpty()).isTrue();
clusterStats = Iterables.getOnlyElement(loadStatsManager.getClusterStatsReports(CLUSTER));
assertThat(clusterStats.upstreamLocalityStatsList()).isEmpty(); // no longer reported
}
@Test
public void recordLoadStats_orcaLrsPropagationEnabled() {
boolean originalVal = LoadStatsManager2.isEnabledOrcaLrsPropagation;
LoadStatsManager2.isEnabledOrcaLrsPropagation = true;
BackendMetricPropagation backendMetricPropagation = BackendMetricPropagation.fromMetricSpecs(
Arrays.asList("application_utilization", "cpu_utilization", "named_metrics.named1"));
LoadBalancerProvider weightedTargetProvider = new WeightedTargetLoadBalancerProvider();
WeightedTargetConfig weightedTargetConfig =
buildWeightedTargetConfig(ImmutableMap.of(locality, 10));
ClusterImplConfig config = new ClusterImplConfig(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO,
null, Collections.<DropOverload>emptyList(),
GracefulSwitchLoadBalancer.createLoadBalancingPolicyConfig(
weightedTargetProvider, weightedTargetConfig),
null, Collections.emptyMap(), backendMetricPropagation);
EquivalentAddressGroup endpoint = makeAddress("endpoint-addr", locality);
deliverAddressesAndConfig(Collections.singletonList(endpoint), config);
FakeLoadBalancer leafBalancer = Iterables.getOnlyElement(downstreamBalancers);
Subchannel subchannel = leafBalancer.createSubChannel();
FakeSubchannel fakeSubchannel = helper.subchannels.poll();
fakeSubchannel.updateState(ConnectivityStateInfo.forNonError(ConnectivityState.CONNECTING));
fakeSubchannel.setConnectedEagIndex(0);
fakeSubchannel.updateState(ConnectivityStateInfo.forNonError(ConnectivityState.READY));
assertThat(currentState).isEqualTo(ConnectivityState.READY);
PickResult result = currentPicker.pickSubchannel(pickSubchannelArgs);
assertThat(result.getStatus().isOk()).isTrue();
ClientStreamTracer streamTracer = result.getStreamTracerFactory().newClientStreamTracer(
ClientStreamTracer.StreamInfo.newBuilder().build(), new Metadata());
Metadata trailersWithOrcaLoadReport = new Metadata();
trailersWithOrcaLoadReport.put(ORCA_ENDPOINT_LOAD_METRICS_KEY,
OrcaLoadReport.newBuilder()
.setApplicationUtilization(1.414)
.setCpuUtilization(0.5)
.setMemUtilization(0.034)
.putNamedMetrics("named1", 3.14159)
.putNamedMetrics("named2", -1.618).build());
streamTracer.inboundTrailers(trailersWithOrcaLoadReport);
streamTracer.streamClosed(Status.OK);
ClusterStats clusterStats =
Iterables.getOnlyElement(loadStatsManager.getClusterStatsReports(CLUSTER));
UpstreamLocalityStats localityStats =
Iterables.getOnlyElement(clusterStats.upstreamLocalityStatsList());
assertThat(localityStats.loadMetricStatsMap()).containsKey("application_utilization");
assertThat(localityStats.loadMetricStatsMap().get("application_utilization").totalMetricValue())
.isWithin(TOLERANCE).of(1.414);
assertThat(localityStats.loadMetricStatsMap()).containsKey("cpu_utilization");
assertThat(localityStats.loadMetricStatsMap().get("cpu_utilization").totalMetricValue())
.isWithin(TOLERANCE).of(0.5);
assertThat(localityStats.loadMetricStatsMap()).doesNotContainKey("mem_utilization");
assertThat(localityStats.loadMetricStatsMap()).containsKey("named_metrics.named1");
assertThat(localityStats.loadMetricStatsMap().get("named_metrics.named1").totalMetricValue())
.isWithin(TOLERANCE).of(3.14159);
assertThat(localityStats.loadMetricStatsMap()).doesNotContainKey("named_metrics.named2");
subchannel.shutdown();
LoadStatsManager2.isEnabledOrcaLrsPropagation = originalVal;
}
@Test
public void recordLoadStats_orcaLrsPropagationDisabled() {
boolean originalVal = LoadStatsManager2.isEnabledOrcaLrsPropagation;
LoadStatsManager2.isEnabledOrcaLrsPropagation = false;
BackendMetricPropagation backendMetricPropagation = BackendMetricPropagation.fromMetricSpecs(
Arrays.asList("application_utilization", "cpu_utilization", "named_metrics.named1"));
LoadBalancerProvider weightedTargetProvider = new WeightedTargetLoadBalancerProvider();
WeightedTargetConfig weightedTargetConfig =
buildWeightedTargetConfig(ImmutableMap.of(locality, 10));
ClusterImplConfig config = new ClusterImplConfig(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO,
null, Collections.<DropOverload>emptyList(),
GracefulSwitchLoadBalancer.createLoadBalancingPolicyConfig(
weightedTargetProvider, weightedTargetConfig),
null, Collections.emptyMap(), backendMetricPropagation);
EquivalentAddressGroup endpoint = makeAddress("endpoint-addr", locality);
deliverAddressesAndConfig(Collections.singletonList(endpoint), config);
FakeLoadBalancer leafBalancer = Iterables.getOnlyElement(downstreamBalancers);
Subchannel subchannel = leafBalancer.createSubChannel();
FakeSubchannel fakeSubchannel = helper.subchannels.poll();
fakeSubchannel.updateState(ConnectivityStateInfo.forNonError(ConnectivityState.CONNECTING));
fakeSubchannel.setConnectedEagIndex(0);
fakeSubchannel.updateState(ConnectivityStateInfo.forNonError(ConnectivityState.READY));
assertThat(currentState).isEqualTo(ConnectivityState.READY);
PickResult result = currentPicker.pickSubchannel(pickSubchannelArgs);
assertThat(result.getStatus().isOk()).isTrue();
ClientStreamTracer streamTracer = result.getStreamTracerFactory().newClientStreamTracer(
ClientStreamTracer.StreamInfo.newBuilder().build(), new Metadata());
Metadata trailersWithOrcaLoadReport = new Metadata();
trailersWithOrcaLoadReport.put(ORCA_ENDPOINT_LOAD_METRICS_KEY,
OrcaLoadReport.newBuilder()
.setApplicationUtilization(1.414)
.setCpuUtilization(0.5)
.setMemUtilization(0.034)
.putNamedMetrics("named1", 3.14159)
.putNamedMetrics("named2", -1.618).build());
streamTracer.inboundTrailers(trailersWithOrcaLoadReport);
streamTracer.streamClosed(Status.OK);
ClusterStats clusterStats =
Iterables.getOnlyElement(loadStatsManager.getClusterStatsReports(CLUSTER));
UpstreamLocalityStats localityStats =
Iterables.getOnlyElement(clusterStats.upstreamLocalityStatsList());
assertThat(localityStats.loadMetricStatsMap()).doesNotContainKey("application_utilization");
assertThat(localityStats.loadMetricStatsMap()).doesNotContainKey("cpu_utilization");
assertThat(localityStats.loadMetricStatsMap()).doesNotContainKey("mem_utilization");
assertThat(localityStats.loadMetricStatsMap()).doesNotContainKey("named_metrics.named1");
assertThat(localityStats.loadMetricStatsMap()).doesNotContainKey("named_metrics.named2");
assertThat(localityStats.loadMetricStatsMap().containsKey("named1")).isTrue();
assertThat(localityStats.loadMetricStatsMap().containsKey("named2")).isTrue();
subchannel.shutdown();
LoadStatsManager2.isEnabledOrcaLrsPropagation = originalVal;
}
// Verifies https://github.com/grpc/grpc-java/issues/11434.
@Test
public void pickFirstLoadReport_onUpdateAddress() {
Locality locality1 =
Locality.create("test-region", "test-zone", "test-subzone");
Locality locality2 =
Locality.create("other-region", "other-zone", "other-subzone");
LoadBalancerProvider pickFirstProvider = LoadBalancerRegistry
.getDefaultRegistry().getProvider("pick_first");
Object pickFirstConfig = pickFirstProvider.parseLoadBalancingPolicyConfig(new HashMap<>())
.getConfig();
ClusterImplConfig config = new ClusterImplConfig(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO,
null, Collections.<DropOverload>emptyList(),
GracefulSwitchLoadBalancer.createLoadBalancingPolicyConfig(pickFirstProvider,
pickFirstConfig),
null, Collections.emptyMap(), null);
EquivalentAddressGroup endpoint1 = makeAddress("endpoint-addr1", locality1);
EquivalentAddressGroup endpoint2 = makeAddress("endpoint-addr2", locality2);
deliverAddressesAndConfig(Arrays.asList(endpoint1, endpoint2), config);
// Leaf balancer is created by Pick First. Get FakeSubchannel created to update attributes
// A real subchannel would get these attributes from the connected address's EAG locality.
FakeSubchannel fakeSubchannel = helper.subchannels.poll();
fakeSubchannel.updateState(ConnectivityStateInfo.forNonError(ConnectivityState.CONNECTING));
fakeSubchannel.setConnectedEagIndex(0);
fakeSubchannel.updateState(ConnectivityStateInfo.forNonError(ConnectivityState.READY));
assertThat(currentState).isEqualTo(ConnectivityState.READY);
PickResult result = currentPicker.pickSubchannel(pickSubchannelArgs);
assertThat(result.getStatus().isOk()).isTrue();
ClientStreamTracer streamTracer1 = result.getStreamTracerFactory().newClientStreamTracer(
ClientStreamTracer.StreamInfo.newBuilder().build(), new Metadata()); // first RPC call
streamTracer1.streamClosed(Status.OK);
ClusterStats clusterStats = Iterables.getOnlyElement(
loadStatsManager.getClusterStatsReports(CLUSTER));
UpstreamLocalityStats localityStats = Iterables.getOnlyElement(
clusterStats.upstreamLocalityStatsList());
assertThat(localityStats.locality()).isEqualTo(locality1);
assertThat(localityStats.totalIssuedRequests()).isEqualTo(1L);
assertThat(localityStats.totalSuccessfulRequests()).isEqualTo(1L);
assertThat(localityStats.totalErrorRequests()).isEqualTo(0L);
fakeSubchannel.updateState(ConnectivityStateInfo.forNonError(ConnectivityState.IDLE));
loadBalancer.requestConnection();
fakeSubchannel.updateState(ConnectivityStateInfo.forNonError(ConnectivityState.CONNECTING));
// Faksubchannel mimics update address and returns different locality
if (PickFirstLoadBalancerProvider.isEnabledNewPickFirst()) {
fakeSubchannel.updateState(ConnectivityStateInfo.forTransientFailure(
Status.UNAVAILABLE.withDescription("Try second address instead")));
fakeSubchannel = helper.subchannels.poll();
fakeSubchannel.updateState(ConnectivityStateInfo.forNonError(ConnectivityState.CONNECTING));
fakeSubchannel.setConnectedEagIndex(0);
fakeSubchannel.updateState(ConnectivityStateInfo.forNonError(ConnectivityState.READY));
} else {
fakeSubchannel.setConnectedEagIndex(1);
fakeSubchannel.updateState(ConnectivityStateInfo.forNonError(ConnectivityState.READY));
}
result = currentPicker.pickSubchannel(pickSubchannelArgs);
assertThat(result.getStatus().isOk()).isTrue();
ClientStreamTracer streamTracer2 = result.getStreamTracerFactory().newClientStreamTracer(
ClientStreamTracer.StreamInfo.newBuilder().build(), new Metadata()); // second RPC call
streamTracer2.streamClosed(Status.UNAVAILABLE);
clusterStats = Iterables.getOnlyElement(loadStatsManager.getClusterStatsReports(CLUSTER));
List<UpstreamLocalityStats> upstreamLocalityStatsList =
clusterStats.upstreamLocalityStatsList();
UpstreamLocalityStats localityStats1 = Iterables.find(upstreamLocalityStatsList,
upstreamLocalityStats -> upstreamLocalityStats.locality().equals(locality1));
assertThat(localityStats1.totalIssuedRequests()).isEqualTo(0L);
assertThat(localityStats1.totalSuccessfulRequests()).isEqualTo(0L);
assertThat(localityStats1.totalErrorRequests()).isEqualTo(0L);
UpstreamLocalityStats localityStats2 = Iterables.find(upstreamLocalityStatsList,
upstreamLocalityStats -> upstreamLocalityStats.locality().equals(locality2));
assertThat(localityStats2.totalIssuedRequests()).isEqualTo(1L);
assertThat(localityStats2.totalSuccessfulRequests()).isEqualTo(0L);
assertThat(localityStats2.totalErrorRequests()).isEqualTo(1L);
loadBalancer.shutdown();
loadBalancer = null;
// No more references are held for localityStats1 hence dropped.
// Locality load is reported for one last time in case of loads occurred since the previous
// load report.
clusterStats = Iterables.getOnlyElement(loadStatsManager.getClusterStatsReports(CLUSTER));
localityStats2 = Iterables.getOnlyElement(clusterStats.upstreamLocalityStatsList());
assertThat(localityStats2.locality()).isEqualTo(locality2);
assertThat(localityStats2.totalIssuedRequests()).isEqualTo(0L);
assertThat(localityStats2.totalSuccessfulRequests()).isEqualTo(0L);
assertThat(localityStats2.totalErrorRequests()).isEqualTo(0L);
assertThat(localityStats2.totalRequestsInProgress()).isEqualTo(0L);
assertThat(loadStatsManager.getClusterStatsReports(CLUSTER)).isEmpty();
}
@Test
public void dropRpcsWithRespectToLbConfigDropCategories() {
LoadBalancerProvider weightedTargetProvider = new WeightedTargetLoadBalancerProvider();
WeightedTargetConfig weightedTargetConfig =
buildWeightedTargetConfig(ImmutableMap.of(locality, 10));
ClusterImplConfig config = new ClusterImplConfig(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO,
null, Collections.singletonList(DropOverload.create("throttle", 500_000)),
GracefulSwitchLoadBalancer.createLoadBalancingPolicyConfig(
weightedTargetProvider, weightedTargetConfig),
null, Collections.emptyMap(), null);
EquivalentAddressGroup endpoint = makeAddress("endpoint-addr", locality);
deliverAddressesAndConfig(Collections.singletonList(endpoint), config);
when(mockRandom.nextInt(anyInt())).thenReturn(499_999, 999_999, 1_000_000);
assertThat(downstreamBalancers).hasSize(1); // one leaf balancer
FakeLoadBalancer leafBalancer = Iterables.getOnlyElement(downstreamBalancers);
assertThat(leafBalancer.name).isEqualTo("round_robin");
assertThat(Iterables.getOnlyElement(leafBalancer.addresses).getAddresses())
.isEqualTo(endpoint.getAddresses());
leafBalancer.createSubChannel();
FakeSubchannel fakeSubchannel = helper.subchannels.poll();
fakeSubchannel.updateState(ConnectivityStateInfo.forNonError(ConnectivityState.CONNECTING));
fakeSubchannel.setConnectedEagIndex(0);
fakeSubchannel.updateState(ConnectivityStateInfo.forNonError(ConnectivityState.READY));
assertThat(currentState).isEqualTo(ConnectivityState.READY);
PickResult result = currentPicker.pickSubchannel(pickSubchannelArgs);
assertThat(result.getStatus().isOk()).isFalse();
assertThat(result.getStatus().getCode()).isEqualTo(Code.UNAVAILABLE);
assertThat(result.getStatus().getDescription()).isEqualTo("Dropped: throttle");
ClusterStats clusterStats =
Iterables.getOnlyElement(loadStatsManager.getClusterStatsReports(CLUSTER));
assertThat(clusterStats.clusterServiceName()).isEqualTo(EDS_SERVICE_NAME);
assertThat(Iterables.getOnlyElement(clusterStats.droppedRequestsList()).category())
.isEqualTo("throttle");
assertThat(Iterables.getOnlyElement(clusterStats.droppedRequestsList()).droppedCount())
.isEqualTo(1L);
assertThat(clusterStats.totalDroppedRequests()).isEqualTo(1L);
// Config update updates drop policies.
config = new ClusterImplConfig(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO, null,
Collections.singletonList(DropOverload.create("lb", 1_000_000)),
GracefulSwitchLoadBalancer.createLoadBalancingPolicyConfig(
weightedTargetProvider, weightedTargetConfig),
null, Collections.emptyMap(), null);
loadBalancer.acceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(Collections.singletonList(endpoint))
.setAttributes(
Attributes.newBuilder()
.set(io.grpc.xds.XdsAttributes.XDS_CLIENT, xdsClient)
.build())
.setLoadBalancingPolicyConfig(config)
.build());
result = currentPicker.pickSubchannel(pickSubchannelArgs);
assertThat(result.getStatus().isOk()).isFalse();
assertThat(result.getStatus().getCode()).isEqualTo(Code.UNAVAILABLE);
assertThat(result.getStatus().getDescription()).isEqualTo("Dropped: lb");
clusterStats =
Iterables.getOnlyElement(loadStatsManager.getClusterStatsReports(CLUSTER));
assertThat(clusterStats.clusterServiceName()).isEqualTo(EDS_SERVICE_NAME);
assertThat(Iterables.getOnlyElement(clusterStats.droppedRequestsList()).category())
.isEqualTo("lb");
assertThat(Iterables.getOnlyElement(clusterStats.droppedRequestsList()).droppedCount())
.isEqualTo(1L);
assertThat(clusterStats.totalDroppedRequests()).isEqualTo(1L);
result = currentPicker.pickSubchannel(pickSubchannelArgs);
assertThat(result.getStatus().isOk()).isTrue();
}
@Test
public void maxConcurrentRequests_appliedByLbConfig_disableCircuitBreaking() {
boolean originalEnableCircuitBreaking = ClusterImplLoadBalancer.enableCircuitBreaking;
ClusterImplLoadBalancer.enableCircuitBreaking = false;
subtest_maxConcurrentRequests_appliedByLbConfig(false);
ClusterImplLoadBalancer.enableCircuitBreaking = originalEnableCircuitBreaking;
}
@Test
public void maxConcurrentRequests_appliedByLbConfig_circuitBreakingEnabledByDefault() {
subtest_maxConcurrentRequests_appliedByLbConfig(true);
}
private void subtest_maxConcurrentRequests_appliedByLbConfig(boolean enableCircuitBreaking) {
long maxConcurrentRequests = 100L;
LoadBalancerProvider weightedTargetProvider = new WeightedTargetLoadBalancerProvider();
WeightedTargetConfig weightedTargetConfig =
buildWeightedTargetConfig(ImmutableMap.of(locality, 10));
ClusterImplConfig config = new ClusterImplConfig(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO,
maxConcurrentRequests, Collections.<DropOverload>emptyList(),
GracefulSwitchLoadBalancer.createLoadBalancingPolicyConfig(
weightedTargetProvider, weightedTargetConfig),
null, Collections.emptyMap(), null);
EquivalentAddressGroup endpoint = makeAddress("endpoint-addr", locality);
deliverAddressesAndConfig(Collections.singletonList(endpoint), config);
assertThat(downstreamBalancers).hasSize(1); // one leaf balancer
FakeLoadBalancer leafBalancer = Iterables.getOnlyElement(downstreamBalancers);
assertThat(leafBalancer.name).isEqualTo("round_robin");
assertThat(Iterables.getOnlyElement(leafBalancer.addresses).getAddresses())
.isEqualTo(endpoint.getAddresses());
leafBalancer.createSubChannel();
FakeSubchannel fakeSubchannel = helper.subchannels.poll();
fakeSubchannel.updateState(ConnectivityStateInfo.forNonError(ConnectivityState.CONNECTING));
fakeSubchannel.setConnectedEagIndex(0);
fakeSubchannel.updateState(ConnectivityStateInfo.forNonError(ConnectivityState.READY));
assertThat(currentState).isEqualTo(ConnectivityState.READY);
assertThat(currentState).isEqualTo(ConnectivityState.READY);
for (int i = 0; i < maxConcurrentRequests; i++) {
PickResult result = currentPicker.pickSubchannel(pickSubchannelArgs);
assertThat(result.getStatus().isOk()).isTrue();
ClientStreamTracer.Factory streamTracerFactory = result.getStreamTracerFactory();
streamTracerFactory.newClientStreamTracer(
ClientStreamTracer.StreamInfo.newBuilder().build(), new Metadata());
}
ClusterStats clusterStats =
Iterables.getOnlyElement(loadStatsManager.getClusterStatsReports(CLUSTER));
assertThat(clusterStats.clusterServiceName()).isEqualTo(EDS_SERVICE_NAME);
assertThat(clusterStats.totalDroppedRequests()).isEqualTo(0L);
PickResult result = currentPicker.pickSubchannel(pickSubchannelArgs);
clusterStats = Iterables.getOnlyElement(loadStatsManager.getClusterStatsReports(CLUSTER));
assertThat(clusterStats.clusterServiceName()).isEqualTo(EDS_SERVICE_NAME);
if (enableCircuitBreaking) {
assertThat(result.getStatus().isOk()).isFalse();
assertThat(result.getStatus().getCode()).isEqualTo(Code.UNAVAILABLE);
assertThat(result.getStatus().getDescription())
.isEqualTo("Cluster max concurrent requests limit of 100 exceeded");
assertThat(clusterStats.totalDroppedRequests()).isEqualTo(1L);
} else {
assertThat(result.getStatus().isOk()).isTrue();
assertThat(clusterStats.totalDroppedRequests()).isEqualTo(0L);
}
// Config update increments circuit breakers max_concurrent_requests threshold.
maxConcurrentRequests = 101L;
config = new ClusterImplConfig(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO,
maxConcurrentRequests, Collections.<DropOverload>emptyList(),
GracefulSwitchLoadBalancer.createLoadBalancingPolicyConfig(
weightedTargetProvider, weightedTargetConfig),
null, Collections.emptyMap(), null);
deliverAddressesAndConfig(Collections.singletonList(endpoint), config);
result = currentPicker.pickSubchannel(pickSubchannelArgs);
assertThat(result.getStatus().isOk()).isTrue();
result.getStreamTracerFactory().newClientStreamTracer(
ClientStreamTracer.StreamInfo.newBuilder().build(), new Metadata()); // 101th request
clusterStats = Iterables.getOnlyElement(loadStatsManager.getClusterStatsReports(CLUSTER));
assertThat(clusterStats.clusterServiceName()).isEqualTo(EDS_SERVICE_NAME);
assertThat(clusterStats.totalDroppedRequests()).isEqualTo(0L);
result = currentPicker.pickSubchannel(pickSubchannelArgs); // 102th request
clusterStats = Iterables.getOnlyElement(loadStatsManager.getClusterStatsReports(CLUSTER));
assertThat(clusterStats.clusterServiceName()).isEqualTo(EDS_SERVICE_NAME);
if (enableCircuitBreaking) {
assertThat(result.getStatus().isOk()).isFalse();
assertThat(result.getStatus().getCode()).isEqualTo(Code.UNAVAILABLE);
assertThat(result.getStatus().getDescription())
.isEqualTo("Cluster max concurrent requests limit of 101 exceeded");
assertThat(clusterStats.totalDroppedRequests()).isEqualTo(1L);
} else {
assertThat(result.getStatus().isOk()).isTrue();
assertThat(clusterStats.totalDroppedRequests()).isEqualTo(0L);
}
}
@Test
public void maxConcurrentRequests_appliedWithDefaultValue_disableCircuitBreaking() {
boolean originalEnableCircuitBreaking = ClusterImplLoadBalancer.enableCircuitBreaking;
ClusterImplLoadBalancer.enableCircuitBreaking = false;
subtest_maxConcurrentRequests_appliedWithDefaultValue(false);
ClusterImplLoadBalancer.enableCircuitBreaking = originalEnableCircuitBreaking;
}
@Test
public void maxConcurrentRequests_appliedWithDefaultValue_circuitBreakingEnabledByDefault() {
subtest_maxConcurrentRequests_appliedWithDefaultValue(true);
}
private void subtest_maxConcurrentRequests_appliedWithDefaultValue(
boolean enableCircuitBreaking) {
LoadBalancerProvider weightedTargetProvider = new WeightedTargetLoadBalancerProvider();
WeightedTargetConfig weightedTargetConfig =
buildWeightedTargetConfig(ImmutableMap.of(locality, 10));
ClusterImplConfig config = new ClusterImplConfig(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO,
null, Collections.<DropOverload>emptyList(),
GracefulSwitchLoadBalancer.createLoadBalancingPolicyConfig(
weightedTargetProvider, weightedTargetConfig),
null, Collections.emptyMap(), null);
EquivalentAddressGroup endpoint = makeAddress("endpoint-addr", locality);
deliverAddressesAndConfig(Collections.singletonList(endpoint), config);
assertThat(downstreamBalancers).hasSize(1); // one leaf balancer
FakeLoadBalancer leafBalancer = Iterables.getOnlyElement(downstreamBalancers);
assertThat(leafBalancer.name).isEqualTo("round_robin");
assertThat(Iterables.getOnlyElement(leafBalancer.addresses).getAddresses())
.isEqualTo(endpoint.getAddresses());
leafBalancer.createSubChannel();
FakeSubchannel fakeSubchannel = helper.subchannels.poll();
fakeSubchannel.updateState(ConnectivityStateInfo.forNonError(ConnectivityState.CONNECTING));
fakeSubchannel.setConnectedEagIndex(0);
fakeSubchannel.updateState(ConnectivityStateInfo.forNonError(ConnectivityState.READY));
assertThat(currentState).isEqualTo(ConnectivityState.READY);
assertThat(currentState).isEqualTo(ConnectivityState.READY);
for (int i = 0; i < ClusterImplLoadBalancer.DEFAULT_PER_CLUSTER_MAX_CONCURRENT_REQUESTS; i++) {
PickResult result = currentPicker.pickSubchannel(pickSubchannelArgs);
assertThat(result.getStatus().isOk()).isTrue();
ClientStreamTracer.Factory streamTracerFactory = result.getStreamTracerFactory();
streamTracerFactory.newClientStreamTracer(
ClientStreamTracer.StreamInfo.newBuilder().build(), new Metadata());
}
ClusterStats clusterStats =
Iterables.getOnlyElement(loadStatsManager.getClusterStatsReports(CLUSTER));
assertThat(clusterStats.clusterServiceName()).isEqualTo(EDS_SERVICE_NAME);
assertThat(clusterStats.totalDroppedRequests()).isEqualTo(0L);
PickResult result = currentPicker.pickSubchannel(pickSubchannelArgs);
clusterStats = Iterables.getOnlyElement(loadStatsManager.getClusterStatsReports(CLUSTER));
assertThat(clusterStats.clusterServiceName()).isEqualTo(EDS_SERVICE_NAME);
if (enableCircuitBreaking) {
assertThat(result.getStatus().isOk()).isFalse();
assertThat(result.getStatus().getCode()).isEqualTo(Code.UNAVAILABLE);
assertThat(result.getStatus().getDescription())
.isEqualTo("Cluster max concurrent requests limit of 1024 exceeded");
assertThat(clusterStats.totalDroppedRequests()).isEqualTo(1L);
} else {
assertThat(result.getStatus().isOk()).isTrue();
assertThat(clusterStats.totalDroppedRequests()).isEqualTo(0L);
}
}
@Test
public void endpointAddressesAttachedWithClusterName() {
LoadBalancerProvider weightedTargetProvider = new WeightedTargetLoadBalancerProvider();
WeightedTargetConfig weightedTargetConfig =
buildWeightedTargetConfig(ImmutableMap.of(locality, 10));
ClusterImplConfig config = new ClusterImplConfig(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO,
null, Collections.<DropOverload>emptyList(),
GracefulSwitchLoadBalancer.createLoadBalancingPolicyConfig(
weightedTargetProvider, weightedTargetConfig),
null, Collections.emptyMap(), null);
// One locality with two endpoints.
EquivalentAddressGroup endpoint1 = makeAddress("endpoint-addr1", locality);
EquivalentAddressGroup endpoint2 = makeAddress("endpoint-addr2", locality);
deliverAddressesAndConfig(Arrays.asList(endpoint1, endpoint2), config);
assertThat(downstreamBalancers).hasSize(1); // one leaf balancer
FakeLoadBalancer leafBalancer = Iterables.getOnlyElement(downstreamBalancers);
assertThat(leafBalancer.name).isEqualTo("round_robin");
// Simulates leaf load balancer creating subchannels.
CreateSubchannelArgs args =
CreateSubchannelArgs.newBuilder()
.setAddresses(leafBalancer.addresses)
.build();
Subchannel subchannel = leafBalancer.helper.createSubchannel(args);
for (EquivalentAddressGroup eag : subchannel.getAllAddresses()) {
assertThat(eag.getAttributes().get(io.grpc.xds.XdsAttributes.ATTR_CLUSTER_NAME))
.isEqualTo(CLUSTER);
}
// An address update should also retain the cluster attribute.
subchannel.updateAddresses(leafBalancer.addresses);
for (EquivalentAddressGroup eag : subchannel.getAllAddresses()) {
assertThat(eag.getAttributes().get(io.grpc.xds.XdsAttributes.ATTR_CLUSTER_NAME))
.isEqualTo(CLUSTER);
}
}
@Test
public void
endpointsWithAuthorityHostname_autoHostRewriteEnabled_pickResultHasAuthorityHostname() {
System.setProperty("GRPC_EXPERIMENTAL_XDS_AUTHORITY_REWRITE", "true");
try {
LoadBalancerProvider weightedTargetProvider = new WeightedTargetLoadBalancerProvider();
WeightedTargetConfig weightedTargetConfig =
buildWeightedTargetConfig(ImmutableMap.of(locality, 10));
ClusterImplConfig config = new ClusterImplConfig(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO,
null, Collections.<DropOverload>emptyList(),
GracefulSwitchLoadBalancer.createLoadBalancingPolicyConfig(
weightedTargetProvider, weightedTargetConfig),
null, Collections.emptyMap(), null);
EquivalentAddressGroup endpoint1 = makeAddress("endpoint-addr1", locality,
"authority-host-name");
deliverAddressesAndConfig(Arrays.asList(endpoint1), config);
assertThat(downstreamBalancers).hasSize(1); // one leaf balancer
FakeLoadBalancer leafBalancer = Iterables.getOnlyElement(downstreamBalancers);
assertThat(leafBalancer.name).isEqualTo("round_robin");
// Simulates leaf load balancer creating subchannels.
CreateSubchannelArgs args =
CreateSubchannelArgs.newBuilder()
.setAddresses(leafBalancer.addresses)
.build();
Subchannel subchannel = leafBalancer.helper.createSubchannel(args);
subchannel.start(infoObject -> {
if (infoObject.getState() == ConnectivityState.READY) {
helper.updateBalancingState(
ConnectivityState.READY,
new FixedResultPicker(PickResult.withSubchannel(subchannel)));
}
});
assertThat(subchannel.getAttributes().get(ATTR_SUBCHANNEL_ADDRESS_NAME)).isEqualTo(
"authority-host-name");
for (EquivalentAddressGroup eag : subchannel.getAllAddresses()) {
assertThat(eag.getAttributes().get(XdsInternalAttributes.ATTR_ADDRESS_NAME))
.isEqualTo("authority-host-name");
}
leafBalancer.deliverSubchannelState(subchannel, ConnectivityState.READY);
assertThat(currentState).isEqualTo(ConnectivityState.READY);
PickDetailsConsumer detailsConsumer = mock(PickDetailsConsumer.class);
pickSubchannelArgs = new PickSubchannelArgsImpl(
TestMethodDescriptors.voidMethod(), new Metadata(),
CallOptions.DEFAULT.withOption(AUTO_HOST_REWRITE_KEY, true), detailsConsumer);
PickResult result = currentPicker.pickSubchannel(pickSubchannelArgs);
assertThat(result.getAuthorityOverride()).isEqualTo("authority-host-name");
} finally {
System.clearProperty("GRPC_EXPERIMENTAL_XDS_AUTHORITY_REWRITE");
}
}
@Test
public void
endpointWithAuthorityHostname_autoHostRewriteNotEnabled_pickResultNoAuthorityHostname() {
LoadBalancerProvider weightedTargetProvider = new WeightedTargetLoadBalancerProvider();
WeightedTargetConfig weightedTargetConfig =
buildWeightedTargetConfig(ImmutableMap.of(locality, 10));
ClusterImplConfig config = new ClusterImplConfig(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO,
null, Collections.<DropOverload>emptyList(),
GracefulSwitchLoadBalancer.createLoadBalancingPolicyConfig(
weightedTargetProvider, weightedTargetConfig),
null, Collections.emptyMap(), null);
EquivalentAddressGroup endpoint1 = makeAddress("endpoint-addr1", locality,
"authority-host-name");
deliverAddressesAndConfig(Arrays.asList(endpoint1), config);
assertThat(downstreamBalancers).hasSize(1); // one leaf balancer
FakeLoadBalancer leafBalancer = Iterables.getOnlyElement(downstreamBalancers);
assertThat(leafBalancer.name).isEqualTo("round_robin");
// Simulates leaf load balancer creating subchannels.
CreateSubchannelArgs args =
CreateSubchannelArgs.newBuilder()
.setAddresses(leafBalancer.addresses)
.build();
Subchannel subchannel = leafBalancer.helper.createSubchannel(args);
subchannel.start(infoObject -> {
if (infoObject.getState() == ConnectivityState.READY) {
helper.updateBalancingState(
ConnectivityState.READY,
new FixedResultPicker(PickResult.withSubchannel(subchannel)));
}
});
// Sub Channel wrapper args won't have the address name although addresses will.
assertThat(subchannel.getAttributes().get(ATTR_SUBCHANNEL_ADDRESS_NAME)).isNull();
for (EquivalentAddressGroup eag : subchannel.getAllAddresses()) {
assertThat(eag.getAttributes().get(XdsInternalAttributes.ATTR_ADDRESS_NAME))
.isEqualTo("authority-host-name");
}
leafBalancer.deliverSubchannelState(subchannel, ConnectivityState.READY);
assertThat(currentState).isEqualTo(ConnectivityState.READY);
PickDetailsConsumer detailsConsumer = mock(PickDetailsConsumer.class);
pickSubchannelArgs = new PickSubchannelArgsImpl(
TestMethodDescriptors.voidMethod(), new Metadata(), CallOptions.DEFAULT, detailsConsumer);
PickResult result = currentPicker.pickSubchannel(pickSubchannelArgs);
assertThat(result.getAuthorityOverride()).isNull();
}
@Test
public void endpointAddressesAttachedWithTlsConfig_securityEnabledByDefault() {
UpstreamTlsContext upstreamTlsContext =
CommonTlsContextTestsUtil.buildUpstreamTlsContext(
"google_cloud_private_spiffe", true);
LoadBalancerProvider weightedTargetProvider = new WeightedTargetLoadBalancerProvider();
WeightedTargetConfig weightedTargetConfig =
buildWeightedTargetConfig(ImmutableMap.of(locality, 10));
ClusterImplConfig config = new ClusterImplConfig(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO,
null, Collections.<DropOverload>emptyList(),
GracefulSwitchLoadBalancer.createLoadBalancingPolicyConfig(
weightedTargetProvider, weightedTargetConfig),
upstreamTlsContext, Collections.emptyMap(), null);
// One locality with two endpoints.
EquivalentAddressGroup endpoint1 = makeAddress("endpoint-addr1", locality);
EquivalentAddressGroup endpoint2 = makeAddress("endpoint-addr2", locality);
deliverAddressesAndConfig(Arrays.asList(endpoint1, endpoint2), config);
assertThat(downstreamBalancers).hasSize(1); // one leaf balancer
FakeLoadBalancer leafBalancer = Iterables.getOnlyElement(downstreamBalancers);
assertThat(leafBalancer.name).isEqualTo("round_robin");
// Simulates leaf load balancer creating subchannels.
CreateSubchannelArgs args =
CreateSubchannelArgs.newBuilder()
.setAddresses(leafBalancer.addresses)
.build();
Subchannel subchannel = leafBalancer.helper.createSubchannel(args);
for (EquivalentAddressGroup eag : subchannel.getAllAddresses()) {
SslContextProviderSupplier supplier =
eag.getAttributes().get(SecurityProtocolNegotiators.ATTR_SSL_CONTEXT_PROVIDER_SUPPLIER);
assertThat(supplier.getTlsContext()).isEqualTo(upstreamTlsContext);
}
// Removes UpstreamTlsContext from the config.
config = new ClusterImplConfig(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO,
null, Collections.<DropOverload>emptyList(),
GracefulSwitchLoadBalancer.createLoadBalancingPolicyConfig(
weightedTargetProvider, weightedTargetConfig),
null, Collections.emptyMap(), null);
deliverAddressesAndConfig(Arrays.asList(endpoint1, endpoint2), config);
assertThat(Iterables.getOnlyElement(downstreamBalancers)).isSameInstanceAs(leafBalancer);
subchannel = leafBalancer.helper.createSubchannel(args); // creates new connections
for (EquivalentAddressGroup eag : subchannel.getAllAddresses()) {
assertThat(
eag.getAttributes().get(SecurityProtocolNegotiators.ATTR_SSL_CONTEXT_PROVIDER_SUPPLIER))
.isNull();
}
// Config with a new UpstreamTlsContext.
upstreamTlsContext = CommonTlsContextTestsUtil.buildUpstreamTlsContext(
"google_cloud_private_spiffe1", true);
config = new ClusterImplConfig(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO,
null, Collections.<DropOverload>emptyList(),
GracefulSwitchLoadBalancer.createLoadBalancingPolicyConfig(
weightedTargetProvider, weightedTargetConfig),
upstreamTlsContext, Collections.emptyMap(), null);
deliverAddressesAndConfig(Arrays.asList(endpoint1, endpoint2), config);
assertThat(Iterables.getOnlyElement(downstreamBalancers)).isSameInstanceAs(leafBalancer);
subchannel = leafBalancer.helper.createSubchannel(args); // creates new connections
for (EquivalentAddressGroup eag : subchannel.getAllAddresses()) {
SslContextProviderSupplier supplier =
eag.getAttributes().get(SecurityProtocolNegotiators.ATTR_SSL_CONTEXT_PROVIDER_SUPPLIER);
assertThat(supplier.isShutdown()).isFalse();
assertThat(supplier.getTlsContext()).isEqualTo(upstreamTlsContext);
}
loadBalancer.shutdown();
for (EquivalentAddressGroup eag : subchannel.getAllAddresses()) {
SslContextProviderSupplier supplier =
eag.getAttributes().get(SecurityProtocolNegotiators.ATTR_SSL_CONTEXT_PROVIDER_SUPPLIER);
assertThat(supplier.isShutdown()).isTrue();
}
loadBalancer = null;
}
private void deliverAddressesAndConfig(List<EquivalentAddressGroup> addresses,
ClusterImplConfig config) {
loadBalancer.acceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(addresses)
.setAttributes(
Attributes.newBuilder()
.set(io.grpc.xds.XdsAttributes.XDS_CLIENT, xdsClient)
.set(io.grpc.xds.XdsAttributes.CALL_COUNTER_PROVIDER, callCounterProvider)
.build())
.setLoadBalancingPolicyConfig(config)
.build());
}
private WeightedTargetConfig buildWeightedTargetConfig(Map<Locality, Integer> localityWeights) {
Map<String, WeightedPolicySelection> targets = new HashMap<>();
for (Locality locality : localityWeights.keySet()) {
int weight = localityWeights.get(locality);
WeightedPolicySelection weightedLocalityLbPolicy =
new WeightedPolicySelection(weight, roundRobin);
targets.put(locality.toString(), weightedLocalityLbPolicy);
}
return new WeightedTargetConfig(Collections.unmodifiableMap(targets));
}
/**
* Create a locality-labeled address.
*/
private static EquivalentAddressGroup makeAddress(final String name, Locality locality) {
return makeAddress(name, locality, null);
}
private static EquivalentAddressGroup makeAddress(final String name, Locality locality,
String authorityHostname) {
| ClusterImplLoadBalancerTest |
java | elastic__elasticsearch | x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizationTestCase.java | {
"start": 689,
"end": 1222
} | class ____ extends ESTestCase {
protected CategorizationBytesRefHash bytesRefHash;
@Before
public void createHash() {
bytesRefHash = new CategorizationBytesRefHash(new BytesRefHash(2048, BigArrays.NON_RECYCLING_INSTANCE));
}
@After
public void destroyHash() {
bytesRefHash.close();
}
protected TokenAndWeight tw(String token, int weight) {
return new TokenAndWeight(bytesRefHash.put(new BytesRef(token.getBytes(StandardCharsets.UTF_8))), weight);
}
}
| CategorizationTestCase |
java | apache__flink | flink-core-api/src/main/java/org/apache/flink/api/java/tuple/Tuple22.java | {
"start": 1892,
"end": 2929
} | class ____ extends Tuple22", then don't use
* instances of Foo in a DataStream<Tuple22> / DataSet<Tuple22>, but declare it as
* DataStream<Foo> / DataSet<Foo>.)
* </ul>
*
* @see Tuple
* @param <T0> The type of field 0
* @param <T1> The type of field 1
* @param <T2> The type of field 2
* @param <T3> The type of field 3
* @param <T4> The type of field 4
* @param <T5> The type of field 5
* @param <T6> The type of field 6
* @param <T7> The type of field 7
* @param <T8> The type of field 8
* @param <T9> The type of field 9
* @param <T10> The type of field 10
* @param <T11> The type of field 11
* @param <T12> The type of field 12
* @param <T13> The type of field 13
* @param <T14> The type of field 14
* @param <T15> The type of field 15
* @param <T16> The type of field 16
* @param <T17> The type of field 17
* @param <T18> The type of field 18
* @param <T19> The type of field 19
* @param <T20> The type of field 20
* @param <T21> The type of field 21
*/
@Public
public | Foo |
java | apache__camel | components/camel-telemetry/src/test/java/org/apache/camel/telemetry/decorators/JdbcSpanDecoratorTest.java | {
"start": 1238,
"end": 2090
} | class ____ {
private static final String SQL_STATEMENT = "select * from customer";
@Test
public void testPre() {
Endpoint endpoint = Mockito.mock(Endpoint.class);
Exchange exchange = Mockito.mock(Exchange.class);
Message message = Mockito.mock(Message.class);
Mockito.when(endpoint.getEndpointUri()).thenReturn("test");
Mockito.when(exchange.getIn()).thenReturn(message);
Mockito.when(message.getBody()).thenReturn(SQL_STATEMENT);
SpanDecorator decorator = new JdbcSpanDecorator();
MockSpanAdapter span = new MockSpanAdapter();
decorator.beforeTracingEvent(span, exchange, endpoint);
assertEquals("sql", span.tags().get(TagConstants.DB_SYSTEM));
assertEquals(SQL_STATEMENT, span.tags().get(TagConstants.DB_STATEMENT));
}
}
| JdbcSpanDecoratorTest |
java | apache__maven | compat/maven-compat/src/main/java/org/apache/maven/project/DefaultProjectBuilderConfiguration.java | {
"start": 1075,
"end": 2984
} | class ____ implements ProjectBuilderConfiguration {
private ProfileManager globalProfileManager;
private ArtifactRepository localRepository;
private Properties userProperties;
private Properties executionProperties = System.getProperties();
private Date buildStartTime;
public DefaultProjectBuilderConfiguration() {}
@Override
public ProjectBuilderConfiguration setGlobalProfileManager(ProfileManager globalProfileManager) {
this.globalProfileManager = globalProfileManager;
return this;
}
@Override
public ProfileManager getGlobalProfileManager() {
return globalProfileManager;
}
@Override
public ProjectBuilderConfiguration setLocalRepository(ArtifactRepository localRepository) {
this.localRepository = localRepository;
return this;
}
@Override
public ArtifactRepository getLocalRepository() {
return localRepository;
}
@Override
public ProjectBuilderConfiguration setUserProperties(Properties userProperties) {
this.userProperties = userProperties;
return this;
}
@Override
public Properties getUserProperties() {
if (userProperties == null) {
userProperties = new Properties();
}
return userProperties;
}
@Override
public Properties getExecutionProperties() {
return executionProperties;
}
@Override
public ProjectBuilderConfiguration setExecutionProperties(Properties executionProperties) {
this.executionProperties = executionProperties;
return this;
}
@Override
public Date getBuildStartTime() {
return buildStartTime;
}
@Override
public ProjectBuilderConfiguration setBuildStartTime(Date buildStartTime) {
this.buildStartTime = buildStartTime;
return this;
}
}
| DefaultProjectBuilderConfiguration |
java | resilience4j__resilience4j | resilience4j-spring-boot3/src/main/java/io/github/resilience4j/springboot3/micrometer/autoconfigure/TimerAutoConfiguration.java | {
"start": 1984,
"end": 2469
} | class ____ {
@Bean
@ConditionalOnAvailableEndpoint
public TimerEndpoint timerEndpoint(TimerRegistry timerRegistry) {
return new TimerEndpoint(timerRegistry);
}
@Bean
@ConditionalOnAvailableEndpoint
public TimerEventsEndpoint timerEventsEndpoint(EventConsumerRegistry<TimerEvent> eventConsumerRegistry) {
return new TimerEventsEndpoint(eventConsumerRegistry);
}
}
}
| TimerAutoEndpointConfiguration |
java | spring-projects__spring-framework | spring-orm/src/test/java/org/springframework/orm/jpa/support/PersistenceInjectionTests.java | {
"start": 36573,
"end": 36720
} | interface ____ extends EntityManagerFactory, EntityManagerFactoryInfo {
}
@SuppressWarnings("serial")
private static | EntityManagerFactoryWithInfo |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Bug_for_issue_430.java | {
"start": 254,
"end": 1680
} | class ____ extends TestCase {
protected void setUp() throws Exception {
ParserConfig.global.addAccept("com.alibaba.json.bvt.bug.Bug_for_issue_430");
}
public void test_for_issue() throws Exception {
String text = "[{\"@type\": \"com.alibaba.json.bvt.bug.Bug_for_issue_430$FooModel\", \"fooCollection\": null}, {\"@type\": \"com.alibaba.json.bvt.bug.Bug_for_issue_430$FooModel\", \"fooCollection\": null}]";
JSONArray array = JSON.parseArray(text);
Assert.assertEquals(FooModel.class, array.get(0).getClass());
Assert.assertEquals(FooModel.class, array.get(1).getClass());
Assert.assertNull(((FooModel)array.get(0)).fooCollection);
Assert.assertNull(((FooModel)array.get(1)).fooCollection);
}
public void test_for_issue_1() throws Exception {
String text = "[{\"@type\": \"com.alibaba.json.bvt.bug.Bug_for_issue_430$FooModel\", \"fooCollection\": null}, {\"@type\": \"com.alibaba.json.bvt.bug.Bug_for_issue_430$FooModel\", \"fooCollection\": null}]";
JSONArray array = (JSONArray) JSON.parse(text);
Assert.assertEquals(FooModel.class, array.get(0).getClass());
Assert.assertEquals(FooModel.class, array.get(1).getClass());
Assert.assertNull(((FooModel)array.get(0)).fooCollection);
Assert.assertNull(((FooModel)array.get(1)).fooCollection);
}
public static | Bug_for_issue_430 |
java | playframework__playframework | web/play-java-forms/src/main/java/play/data/validation/Constraints.java | {
"start": 1578,
"end": 2452
} | class ____<T> {
/**
* @param object the value to test.
* @param payload the payload providing validation context information.
* @return {@code true} if this value is valid.
*/
public abstract boolean isValid(T object, ValidationPayload payload);
/**
* @param object the object to check
* @param constraintContext The JSR-303 validation context.
* @return {@code true} if this value is valid for the given constraint.
*/
public boolean isValid(T object, ConstraintValidatorContext constraintContext) {
return isValid(
object,
constraintContext
.unwrap(HibernateConstraintValidatorContext.class)
.getConstraintValidatorPayload(ValidationPayload.class));
}
public abstract Tuple<String, Object[]> getErrorMessageKey();
}
public static | ValidatorWithPayload |
java | apache__camel | core/camel-main/src/test/java/org/apache/camel/main/scan/MyScanConfiguration.java | {
"start": 971,
"end": 1227
} | class ____ implements CamelConfiguration {
@Override
public void configure(CamelContext camelContext) {
Assertions.assertNotNull(camelContext);
camelContext.getGlobalOptions().put("scanConfigured", "true");
}
}
| MyScanConfiguration |
java | spring-projects__spring-security | core/src/test/java/org/springframework/security/jackson2/SecurityJackson2ModulesTests.java | {
"start": 4355,
"end": 4549
} | class ____ {
private String property = "bar";
String getProperty() {
return this.property;
}
void setProperty(String property) {
}
}
@JsonIgnoreType(false)
static | NotAllowlisted |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java | {
"start": 1290,
"end": 10549
} | class ____ extends ESTestCase {
/**
* Makes sure we won't advance the retained seq# if the retention lock is held
*/
public void testSoftDeletesRetentionLock() {
long retainedOps = between(0, 10000);
AtomicLong globalCheckpoint = new AtomicLong(NO_OPS_PERFORMED);
final AtomicLong[] retainingSequenceNumbers = new AtomicLong[randomIntBetween(0, 8)];
for (int i = 0; i < retainingSequenceNumbers.length; i++) {
retainingSequenceNumbers[i] = new AtomicLong();
}
final Supplier<RetentionLeases> retentionLeasesSupplier = () -> {
final List<RetentionLease> leases = new ArrayList<>(retainingSequenceNumbers.length);
for (int i = 0; i < retainingSequenceNumbers.length; i++) {
leases.add(new RetentionLease(Integer.toString(i), retainingSequenceNumbers[i].get(), 0L, "test"));
}
return new RetentionLeases(1, 1, leases);
};
long safeCommitCheckpoint = globalCheckpoint.get();
SoftDeletesPolicy policy = new SoftDeletesPolicy(globalCheckpoint::get, between(1, 10000), retainedOps, retentionLeasesSupplier);
long minRetainedSeqNo = policy.getMinRetainedSeqNo();
List<Releasable> locks = new ArrayList<>();
int iters = scaledRandomIntBetween(10, 1000);
for (int i = 0; i < iters; i++) {
if (randomBoolean()) {
locks.add(policy.acquireRetentionLock());
}
// Advances the global checkpoint and the local checkpoint of a safe commit
globalCheckpoint.addAndGet(between(0, 1000));
for (final AtomicLong retainingSequenceNumber : retainingSequenceNumbers) {
retainingSequenceNumber.set(randomLongBetween(retainingSequenceNumber.get(), Math.max(globalCheckpoint.get(), 0L)));
}
safeCommitCheckpoint = randomLongBetween(safeCommitCheckpoint, globalCheckpoint.get());
policy.setLocalCheckpointOfSafeCommit(safeCommitCheckpoint);
if (rarely()) {
retainedOps = between(0, 10000);
policy.setRetentionOperations(retainedOps);
}
// Release some locks
List<Releasable> releasingLocks = randomSubsetOf(locks);
locks.removeAll(releasingLocks);
releasingLocks.forEach(Releasable::close);
// getting the query has side effects, updating the internal state of the policy
final Query query = policy.getRetentionQuery(SeqNoFieldMapper.SeqNoIndexOptions.POINTS_AND_DOC_VALUES);
assertThat(query, instanceOf(PointRangeQuery.class));
final PointRangeQuery retentionQuery = (PointRangeQuery) query;
// we only expose the minimum sequence number to the merge policy if the retention lock is not held
if (locks.isEmpty()) {
final long minimumRetainingSequenceNumber = Arrays.stream(retainingSequenceNumbers)
.mapToLong(AtomicLong::get)
.min()
.orElse(Long.MAX_VALUE);
long retainedSeqNo = Math.min(
1 + safeCommitCheckpoint,
Math.min(minimumRetainingSequenceNumber, 1 + globalCheckpoint.get() - retainedOps)
);
minRetainedSeqNo = Math.max(minRetainedSeqNo, retainedSeqNo);
}
assertThat(retentionQuery.getNumDims(), equalTo(1));
assertThat(LongPoint.decodeDimension(retentionQuery.getLowerPoint(), 0), equalTo(minRetainedSeqNo));
assertThat(LongPoint.decodeDimension(retentionQuery.getUpperPoint(), 0), equalTo(Long.MAX_VALUE));
assertThat(policy.getMinRetainedSeqNo(), equalTo(minRetainedSeqNo));
}
locks.forEach(Releasable::close);
final long minimumRetainingSequenceNumber = Arrays.stream(retainingSequenceNumbers)
.mapToLong(AtomicLong::get)
.min()
.orElse(Long.MAX_VALUE);
long retainedSeqNo = Math.min(
1 + safeCommitCheckpoint,
Math.min(minimumRetainingSequenceNumber, 1 + globalCheckpoint.get() - retainedOps)
);
minRetainedSeqNo = Math.max(minRetainedSeqNo, retainedSeqNo);
assertThat(policy.getMinRetainedSeqNo(), equalTo(minRetainedSeqNo));
}
public void testWhenGlobalCheckpointDictatesThePolicy() {
final int retentionOperations = randomIntBetween(0, 1024);
final AtomicLong globalCheckpoint = new AtomicLong(randomLongBetween(0, Long.MAX_VALUE - 2));
final Collection<RetentionLease> leases = new ArrayList<>();
final int numberOfLeases = randomIntBetween(0, 16);
for (int i = 0; i < numberOfLeases; i++) {
// setup leases where the minimum retained sequence number is more than the policy dictated by the global checkpoint
leases.add(
new RetentionLease(
Integer.toString(i),
randomLongBetween(1 + globalCheckpoint.get() - retentionOperations + 1, Long.MAX_VALUE),
randomNonNegativeLong(),
"test"
)
);
}
final long primaryTerm = randomNonNegativeLong();
final long version = randomNonNegativeLong();
final Supplier<RetentionLeases> leasesSupplier = () -> new RetentionLeases(primaryTerm, version, List.copyOf(leases));
final SoftDeletesPolicy policy = new SoftDeletesPolicy(globalCheckpoint::get, 0, retentionOperations, leasesSupplier);
// set the local checkpoint of the safe commit to more than the policy dicated by the global checkpoint
final long localCheckpointOfSafeCommit = randomLongBetween(1 + globalCheckpoint.get() - retentionOperations + 1, Long.MAX_VALUE);
policy.setLocalCheckpointOfSafeCommit(localCheckpointOfSafeCommit);
assertThat(policy.getMinRetainedSeqNo(), equalTo(1 + globalCheckpoint.get() - retentionOperations));
}
public void testWhenLocalCheckpointOfSafeCommitDictatesThePolicy() {
final int retentionOperations = randomIntBetween(0, 1024);
final long localCheckpointOfSafeCommit = randomLongBetween(-1, Long.MAX_VALUE - retentionOperations - 1);
final AtomicLong globalCheckpoint = new AtomicLong(
randomLongBetween(Math.max(0, localCheckpointOfSafeCommit + retentionOperations), Long.MAX_VALUE - 1)
);
final Collection<RetentionLease> leases = new ArrayList<>();
final int numberOfLeases = randomIntBetween(0, 16);
for (int i = 0; i < numberOfLeases; i++) {
leases.add(
new RetentionLease(
Integer.toString(i),
randomLongBetween(1 + localCheckpointOfSafeCommit + 1, Long.MAX_VALUE), // leases are for more than the local checkpoint
randomNonNegativeLong(),
"test"
)
);
}
final long primaryTerm = randomNonNegativeLong();
final long version = randomNonNegativeLong();
final Supplier<RetentionLeases> leasesSupplier = () -> new RetentionLeases(primaryTerm, version, List.copyOf(leases));
final SoftDeletesPolicy policy = new SoftDeletesPolicy(globalCheckpoint::get, 0, retentionOperations, leasesSupplier);
policy.setLocalCheckpointOfSafeCommit(localCheckpointOfSafeCommit);
assertThat(policy.getMinRetainedSeqNo(), equalTo(1 + localCheckpointOfSafeCommit));
}
public void testWhenRetentionLeasesDictateThePolicy() {
final int retentionOperations = randomIntBetween(0, 1024);
final Collection<RetentionLease> leases = new ArrayList<>();
final int numberOfLeases = randomIntBetween(1, 16);
for (int i = 0; i < numberOfLeases; i++) {
leases.add(
new RetentionLease(
Integer.toString(i),
randomLongBetween(0, Long.MAX_VALUE - retentionOperations - 1),
randomNonNegativeLong(),
"test"
)
);
}
final OptionalLong minimumRetainingSequenceNumber = leases.stream().mapToLong(RetentionLease::retainingSequenceNumber).min();
assert minimumRetainingSequenceNumber.isPresent() : leases;
final long localCheckpointOfSafeCommit = randomLongBetween(minimumRetainingSequenceNumber.getAsLong(), Long.MAX_VALUE - 1);
final AtomicLong globalCheckpoint = new AtomicLong(
randomLongBetween(minimumRetainingSequenceNumber.getAsLong() + retentionOperations, Long.MAX_VALUE - 1)
);
final long primaryTerm = randomNonNegativeLong();
final long version = randomNonNegativeLong();
final Supplier<RetentionLeases> leasesSupplier = () -> new RetentionLeases(primaryTerm, version, List.copyOf(leases));
final SoftDeletesPolicy policy = new SoftDeletesPolicy(globalCheckpoint::get, 0, retentionOperations, leasesSupplier);
policy.setLocalCheckpointOfSafeCommit(localCheckpointOfSafeCommit);
assertThat(policy.getMinRetainedSeqNo(), equalTo(minimumRetainingSequenceNumber.getAsLong()));
}
}
| SoftDeletesPolicyTests |
java | google__auto | value/src/test/java/com/google/auto/value/processor/AutoValueCompilationTest.java | {
"start": 76002,
"end": 76092
} | class ____ an interface")
.inFile(javaFileObject)
.onLineContaining("public | or |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/NitriteEndpointBuilderFactory.java | {
"start": 5108,
"end": 10927
} | interface ____
extends
EndpointConsumerBuilder {
default NitriteEndpointConsumerBuilder basic() {
return (NitriteEndpointConsumerBuilder) this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedNitriteEndpointConsumerBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedNitriteEndpointConsumerBuilder bridgeErrorHandler(String bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedNitriteEndpointConsumerBuilder exceptionHandler(org.apache.camel.spi.ExceptionHandler exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedNitriteEndpointConsumerBuilder exceptionHandler(String exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option is a: <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedNitriteEndpointConsumerBuilder exchangePattern(org.apache.camel.ExchangePattern exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option will be converted to a
* <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedNitriteEndpointConsumerBuilder exchangePattern(String exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
}
/**
* Builder for endpoint producers for the Nitrite component.
*/
public | AdvancedNitriteEndpointConsumerBuilder |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/DelegatingInputFormat.java | {
"start": 1775,
"end": 5057
} | class ____<K, V> implements InputFormat<K, V> {
public InputSplit[] getSplits(JobConf conf, int numSplits) throws IOException {
JobConf confCopy = new JobConf(conf);
List<InputSplit> splits = new ArrayList<InputSplit>();
Map<Path, InputFormat> formatMap = MultipleInputs.getInputFormatMap(conf);
Map<Path, Class<? extends Mapper>> mapperMap = MultipleInputs
.getMapperTypeMap(conf);
Map<Class<? extends InputFormat>, List<Path>> formatPaths
= new HashMap<Class<? extends InputFormat>, List<Path>>();
// First, build a map of InputFormats to Paths
for (Entry<Path, InputFormat> entry : formatMap.entrySet()) {
if (!formatPaths.containsKey(entry.getValue().getClass())) {
formatPaths.put(entry.getValue().getClass(), new LinkedList<Path>());
}
formatPaths.get(entry.getValue().getClass()).add(entry.getKey());
}
for (Entry<Class<? extends InputFormat>, List<Path>> formatEntry :
formatPaths.entrySet()) {
Class<? extends InputFormat> formatClass = formatEntry.getKey();
InputFormat format = (InputFormat) ReflectionUtils.newInstance(
formatClass, conf);
List<Path> paths = formatEntry.getValue();
Map<Class<? extends Mapper>, List<Path>> mapperPaths
= new HashMap<Class<? extends Mapper>, List<Path>>();
// Now, for each set of paths that have a common InputFormat, build
// a map of Mappers to the paths they're used for
for (Path path : paths) {
Class<? extends Mapper> mapperClass = mapperMap.get(path);
if (!mapperPaths.containsKey(mapperClass)) {
mapperPaths.put(mapperClass, new LinkedList<Path>());
}
mapperPaths.get(mapperClass).add(path);
}
// Now each set of paths that has a common InputFormat and Mapper can
// be added to the same job, and split together.
for (Entry<Class<? extends Mapper>, List<Path>> mapEntry : mapperPaths
.entrySet()) {
paths = mapEntry.getValue();
Class<? extends Mapper> mapperClass = mapEntry.getKey();
if (mapperClass == null) {
mapperClass = conf.getMapperClass();
}
FileInputFormat.setInputPaths(confCopy, paths.toArray(new Path[paths
.size()]));
// Get splits for each input path and tag with InputFormat
// and Mapper types by wrapping in a TaggedInputSplit.
InputSplit[] pathSplits = format.getSplits(confCopy, numSplits);
for (InputSplit pathSplit : pathSplits) {
splits.add(new TaggedInputSplit(pathSplit, conf, format.getClass(),
mapperClass));
}
}
}
return splits.toArray(new InputSplit[splits.size()]);
}
@SuppressWarnings("unchecked")
public RecordReader<K, V> getRecordReader(InputSplit split, JobConf conf,
Reporter reporter) throws IOException {
// Find the InputFormat and then the RecordReader from the
// TaggedInputSplit.
TaggedInputSplit taggedInputSplit = (TaggedInputSplit) split;
InputFormat<K, V> inputFormat = (InputFormat<K, V>) ReflectionUtils
.newInstance(taggedInputSplit.getInputFormatClass(), conf);
return inputFormat.getRecordReader(taggedInputSplit.getInputSplit(), conf,
reporter);
}
}
| DelegatingInputFormat |
java | spring-projects__spring-boot | module/spring-boot-kotlinx-serialization-json/src/test/java/org/springframework/boot/kotlinx/serialization/json/autoconfigure/KotlinxSerializationJsonPropertiesTests.java | {
"start": 3728,
"end": 3833
} | interface ____<S, @Nullable P extends @Nullable Object> {
@Nullable P get(S source);
}
}
}
| Accessor |
java | quarkusio__quarkus | extensions/smallrye-reactive-messaging-kafka/runtime/src/main/java/io/quarkus/smallrye/reactivemessaging/kafka/RedisStateStore.java | {
"start": 2052,
"end": 6042
} | class ____ implements CheckpointStateStore.Factory {
@Inject
@Any
Instance<ReactiveRedisDataSource> redisDataSource;
@Inject
Instance<ProcessingStateCodec.Factory> stateCodecFactory;
@Override
public CheckpointStateStore create(KafkaConnectorIncomingConfiguration config, Vertx vertx,
KafkaConsumer<?, ?> consumer, Class<?> stateType) {
String consumerGroupId = (String) consumer.configuration().get(ConsumerConfig.GROUP_ID_CONFIG);
String clientName = config.config().getOptionalValue(KafkaCommitHandler.Strategy.CHECKPOINT + "." +
REDIS_STATE_STORE + ".client-name", String.class)
.orElse(null);
ReactiveRedisDataSource rds = clientName != null
? redisDataSource.select(RedisClientName.Literal.of(clientName)).get()
: redisDataSource.select(Default.Literal.INSTANCE).get();
ProcessingStateCodec stateCodec = CDIUtils.getInstanceById(stateCodecFactory, config.getChannel(), () -> {
if (stateCodecFactory.isUnsatisfied()) {
return VertxJsonProcessingStateCodec.FACTORY;
} else {
return stateCodecFactory.get();
}
}).create(stateType);
return new RedisStateStore(rds, consumerGroupId, stateCodec);
}
}
@Override
public void close() {
closed.set(true);
}
@Override
public Uni<Map<TopicPartition, ProcessingState<?>>> fetchProcessingState(Collection<TopicPartition> partitions) {
if (partitions.isEmpty() || closed.get()) {
return Uni.createFrom().item(Collections.emptyMap());
}
List<Tuple2<TopicPartition, String>> tps = partitions.stream()
.map(tp -> Tuple2.of(tp, getKey(tp)))
.collect(Collectors.toList());
return redis.value(byte[].class).mget(tps.stream().map(Tuple2::getItem2).toArray(String[]::new))
.map(response -> response.entrySet().stream()
.filter(e -> e.getValue() != null)
.collect(Collectors.toMap(e -> getTpFromKey(e.getKey()),
e -> ProcessingState.getOrEmpty(stateCodec.decode(e.getValue())))));
}
private String getKey(TopicPartition partition) {
return consumerGroupId + ":" + partition.topic() + ":" + partition.partition();
}
private TopicPartition getTpFromKey(String key) {
String[] parts = key.split(":");
return new TopicPartition(parts[1], Integer.parseInt(parts[2]));
}
@Override
public Uni<Void> persistProcessingState(Map<TopicPartition, ProcessingState<?>> states) {
if (states.isEmpty() || closed.get()) {
return Uni.createFrom().voidItem();
}
String[] keys = states.keySet().stream().map(this::getKey).toArray(String[]::new);
return redis.withTransaction(r -> r.value(byte[].class).mget(keys), (current, r) -> {
Map<String, byte[]> map = states.entrySet().stream().filter(toPersist -> {
String key = getKey(toPersist.getKey());
ProcessingState<?> newState = toPersist.getValue();
if (!current.containsKey(key)) {
return true;
}
ProcessingState<?> currentState = stateCodec.decode(current.get(key));
return ProcessingState.isEmptyOrNull(currentState) ||
(!ProcessingState.isEmptyOrNull(newState) && newState.getOffset() >= currentState.getOffset());
}).collect(Collectors.toMap(e -> getKey(e.getKey()), e -> stateCodec.encode(e.getValue())));
if (map.isEmpty()) {
return Uni.createFrom().voidItem();
} else {
return r.value(byte[].class).mset(map);
}
}, keys).replaceWithVoid();
}
}
| Factory |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/enums/EnumMapDeserializationTest.java | {
"start": 1326,
"end": 1587
} | class ____ extends EnumMap<TestEnum,String> {
@JsonCreator
public FromDelegateEnumMap(Map<Object,Object> stuff) {
super(TestEnum.class);
put(TestEnum.OK, String.valueOf(stuff));
}
}
static | FromDelegateEnumMap |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarkOutputMultiplexer.java | {
"start": 2302,
"end": 2435
} | class ____ {
/** A callback for propagating changes to split based watermarks. */
@Internal
public | WatermarkOutputMultiplexer |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_3100/Issue3131.java | {
"start": 1515,
"end": 2215
} | class ____ extends Org implements Serializable{
private String name;
private String idcard;
public UserOrg() {
}
public UserOrg(String name, String idcard)
{
super (name);
this.name = name;
this.idcard = idcard;
}
public String getName()
{
return name;
}
public void setName(String name)
{
this.name = name;
}
public String getIdcard()
{
return idcard;
}
public void setIdcard(String idcard)
{
this.idcard = idcard;
}
}
public static abstract | UserOrg |
java | netty__netty | codec-smtp/src/main/java/io/netty/handler/codec/smtp/DefaultSmtpResponse.java | {
"start": 854,
"end": 2589
} | class ____ implements SmtpResponse {
private final int code;
private final List<CharSequence> details;
/**
* Creates a new instance with the given smtp code and no details.
*/
public DefaultSmtpResponse(int code) {
this(code, (List<CharSequence>) null);
}
/**
* Creates a new instance with the given smtp code and details.
*/
public DefaultSmtpResponse(int code, CharSequence... details) {
this(code, SmtpUtils.toUnmodifiableList(details));
}
DefaultSmtpResponse(int code, List<CharSequence> details) {
if (code < 100 || code > 599) {
throw new IllegalArgumentException("code must be 100 <= code <= 599");
}
this.code = code;
if (details == null) {
this.details = Collections.emptyList();
} else {
this.details = Collections.unmodifiableList(details);
}
}
@Override
public int code() {
return code;
}
@Override
public List<CharSequence> details() {
return details;
}
@Override
public int hashCode() {
return code * 31 + details.hashCode();
}
@Override
public boolean equals(Object o) {
if (!(o instanceof DefaultSmtpResponse)) {
return false;
}
if (o == this) {
return true;
}
DefaultSmtpResponse other = (DefaultSmtpResponse) o;
return code() == other.code() &&
details().equals(other.details());
}
@Override
public String toString() {
return "DefaultSmtpResponse{" +
"code=" + code +
", details=" + details +
'}';
}
}
| DefaultSmtpResponse |
java | apache__camel | components/camel-quickfix/src/main/java/org/apache/camel/component/quickfixj/QuickfixjEventListener.java | {
"start": 1008,
"end": 1156
} | interface ____ {
void onEvent(QuickfixjEventCategory eventCategory, SessionID sessionID, Message message) throws Exception;
}
| QuickfixjEventListener |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/filter/HiddenHttpMethodFilter.java | {
"start": 2350,
"end": 3792
} | class ____ extends OncePerRequestFilter {
private static final List<String> ALLOWED_METHODS =
List.of(HttpMethod.PUT.name(), HttpMethod.DELETE.name(), HttpMethod.PATCH.name());
/** Default method parameter: {@code _method}. */
public static final String DEFAULT_METHOD_PARAM = "_method";
private String methodParam = DEFAULT_METHOD_PARAM;
/**
* Set the parameter name to look for HTTP methods.
* @see #DEFAULT_METHOD_PARAM
*/
public void setMethodParam(String methodParam) {
Assert.hasText(methodParam, "'methodParam' must not be empty");
this.methodParam = methodParam;
}
@Override
protected void doFilterInternal(HttpServletRequest request, HttpServletResponse response, FilterChain filterChain)
throws ServletException, IOException {
HttpServletRequest requestToUse = request;
if ("POST".equals(request.getMethod()) && request.getAttribute(WebUtils.ERROR_EXCEPTION_ATTRIBUTE) == null) {
String paramValue = request.getParameter(this.methodParam);
if (StringUtils.hasLength(paramValue)) {
String method = paramValue.toUpperCase(Locale.ROOT);
if (ALLOWED_METHODS.contains(method)) {
requestToUse = new HttpMethodRequestWrapper(request, method);
}
}
}
filterChain.doFilter(requestToUse, response);
}
/**
* Simple {@link HttpServletRequest} wrapper that returns the supplied method for
* {@link HttpServletRequest#getMethod()}.
*/
private static | HiddenHttpMethodFilter |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/onetoone/bidirectional/BiRefingOptionalEntity.java | {
"start": 489,
"end": 1428
} | class ____ {
@Id
@GeneratedValue
private Integer id;
@OneToOne(optional = true)
@JoinTable(name = "A_B", joinColumns = @JoinColumn(name = "a_id", unique = true), inverseJoinColumns = @JoinColumn(name = "b_id") )
private BiRefedOptionalEntity reference;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public BiRefedOptionalEntity getReference() {
return reference;
}
public void setReference(BiRefedOptionalEntity reference) {
this.reference = reference;
}
@Override
public int hashCode() {
return ( id != null ? id.hashCode() : 0 );
}
@Override
public boolean equals(Object object) {
if ( object == this ) {
return true;
}
if ( !( object instanceof BiRefingOptionalEntity ) ) {
return false;
}
BiRefingOptionalEntity that = (BiRefingOptionalEntity) object;
return !( id != null ? !id.equals( that.id ) : that.id != null );
}
}
| BiRefingOptionalEntity |
java | redisson__redisson | redisson/src/test/java/org/redisson/RedissonBucketReactiveTest.java | {
"start": 264,
"end": 4299
} | class ____ extends BaseReactiveTest {
@Test
public void testExpire() throws InterruptedException {
RBucketReactive<String> bucket = redisson.getBucket("test1");
sync(bucket.set("someValue", 1, TimeUnit.SECONDS));
Assertions.assertNotNull(sync(bucket.get()));
Thread.sleep(1100);
Assertions.assertNull(sync(bucket.get()));
}
@Test
public void testRenamenx() {
RBucketReactive<String> bucket = redisson.getBucket("test");
sync(bucket.set("someValue"));
RBucketReactive<String> bucket2 = redisson.getBucket("test2");
sync(bucket2.set("someValue2"));
Assertions.assertTrue(sync(bucket.renamenx("test1")));
RBucketReactive<String> oldBucket = redisson.getBucket("test");
Assertions.assertNull(sync(oldBucket.get()));
RBucketReactive<String> newBucket = redisson.getBucket("test1");
Assertions.assertEquals("someValue", sync(newBucket.get()));
Assertions.assertFalse(sync(newBucket.renamenx("test2")));
}
@Test
public void testRename() {
RBucketReactive<String> bucket = redisson.getBucket("test");
sync(bucket.set("someValue"));
sync(bucket.rename("test1"));
RBucketReactive<String> oldBucket = redisson.getBucket("test");
Assertions.assertNull(sync(oldBucket.get()));
RBucketReactive<String> newBucket = redisson.getBucket("test1");
Assertions.assertEquals("someValue", sync(newBucket.get()));
}
@Test
public void testSetGet() {
RBucketReactive<String> bucket = redisson.getBucket("test");
Assertions.assertNull(sync(bucket.get()));
String value = "somevalue";
sync(bucket.set(value));
Assertions.assertEquals(value, sync(bucket.get()));
}
@Test
public void testSetDelete() {
RBucketReactive<String> bucket = redisson.getBucket("test");
String value = "somevalue";
sync(bucket.set(value));
Assertions.assertEquals(value, sync(bucket.get()));
Assertions.assertTrue(sync(bucket.delete()));
Assertions.assertNull(sync(bucket.get()));
Assertions.assertFalse(sync(bucket.delete()));
}
@Test
public void testSetExist() {
RBucketReactive<String> bucket = redisson.getBucket("test");
Assertions.assertNull(sync(bucket.get()));
String value = "somevalue";
sync(bucket.set(value));
Assertions.assertEquals(value, sync(bucket.get()));
Assertions.assertTrue(sync(bucket.isExists()));
}
@Test
public void testSetDeleteNotExist() {
RBucketReactive<String> bucket = redisson.getBucket("test");
Assertions.assertNull(sync(bucket.get()));
String value = "somevalue";
sync(bucket.set(value));
Assertions.assertEquals(value, sync(bucket.get()));
Assertions.assertTrue(sync(bucket.isExists()));
sync(bucket.delete());
Assertions.assertFalse(sync(bucket.isExists()));
}
@Test
public void testFindPattern() {
Collection<String> names = Arrays.asList("test:testGetPattern:one", "test:testGetPattern:two");
Collection<String> vals = Arrays.asList("one-val", "two-val");
sync(redisson.getBucket("test:three").set("three-val"));
sync(redisson.getBucket("test:testGetPattern:one").set("one-val"));
sync(redisson.getBucket("test:testGetPattern:two").set("two-val"));
List<RBucketReactive<String>> buckets = redisson.findBuckets("test:testGetPattern:*");
Assertions.assertEquals(2, buckets.size());
Assertions.assertTrue(names.contains(buckets.get(0).getName()));
Assertions.assertTrue(names.contains(buckets.get(1).getName()));
Assertions.assertTrue(vals.contains(sync(buckets.get(0).get())));
Assertions.assertTrue(vals.contains(sync(buckets.get(1).get())));
for (RBucketReactive<String> bucket : buckets) {
bucket.delete();
}
}
}
| RedissonBucketReactiveTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/dataflow/nullnesspropagation/NullnessPropagationTest.java | {
"start": 71460,
"end": 72026
} | interface ____<T extends @NonNull Object> {
T get();
}
}
""")
.doTest();
}
@Test
public void annotatedMethodTypeParams() {
compilationHelper
.addSourceLines(
"AnnotatedMethodTypeParamsTest.java",
"""
package com.google.errorprone.dataflow.nullnesspropagation;
import static com.google.errorprone.dataflow.nullnesspropagation.NullnessPropagationTest.triggerNullnessChecker;
import org.checkerframework.checker.nullness.qual.Nullable;
import org.checkerframework.checker.nullness.qual.NonNull;
public | NonNullElementCollection |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/VEDeviceDiscoverer.java | {
"start": 1541,
"end": 5006
} | class ____ {
private static final String STATE_TERMINATING = "TERMINATING";
private static final String STATE_INITIALIZING = "INITIALIZING";
private static final String STATE_OFFLINE = "OFFLINE";
private static final String STATE_ONLINE = "ONLINE";
private static final Logger LOG =
LoggerFactory.getLogger(VEDeviceDiscoverer.class);
private static final String[] DEVICE_STATE = {STATE_ONLINE, STATE_OFFLINE,
STATE_INITIALIZING, STATE_TERMINATING};
private UdevUtil udev;
private Function<String[], CommandExecutor>
commandExecutorProvider = this::createCommandExecutor;
VEDeviceDiscoverer(UdevUtil udevUtil) {
udev = udevUtil;
}
public Set<Device> getDevicesFromPath(String path) throws IOException {
MutableInt counter = new MutableInt(0);
try (Stream<Path> stream = Files.walk(Paths.get(path), 1)) {
return stream.filter(p -> p.toFile().getName().startsWith("veslot"))
.map(p -> toDevice(p, counter))
.collect(Collectors.toSet());
}
}
private Device toDevice(Path p, MutableInt counter) {
CommandExecutor executor =
commandExecutorProvider.apply(
new String[]{"stat", "-L", "-c", "%t:%T:%F", p.toString()});
try {
LOG.info("Checking device file: {}", p);
executor.execute();
String statOutput = executor.getOutput();
String[] stat = statOutput.trim().split(":");
int major = Integer.parseInt(stat[0], 16);
int minor = Integer.parseInt(stat[1], 16);
char devType = getDevType(p, stat[2]);
int deviceNumber = makeDev(major, minor);
LOG.info("Device: major: {}, minor: {}, devNo: {}, type: {}",
major, minor, deviceNumber, devType);
String sysPath = udev.getSysPath(deviceNumber, devType);
LOG.info("Device syspath: {}", sysPath);
String deviceState = getDeviceState(sysPath);
Device.Builder builder = Device.Builder.newInstance();
builder.setId(counter.getAndIncrement())
.setMajorNumber(major)
.setMinorNumber(minor)
.setHealthy(STATE_ONLINE.equalsIgnoreCase(deviceState))
.setStatus(deviceState)
.setDevPath(p.toAbsolutePath().toString());
return builder.build();
} catch (IOException e) {
throw new UncheckedIOException("Cannot execute stat command", e);
}
}
private int makeDev(int major, int minor) {
return major * 256 + minor;
}
private char getDevType(Path p, String fromStat) {
if (fromStat.contains("character")) {
return 'c';
} else if (fromStat.contains("block")) {
return 'b';
} else {
throw new IllegalArgumentException(
"File is neither a char nor block device: " + p);
}
}
private String getDeviceState(String sysPath) throws IOException {
Path statePath = Paths.get(sysPath, "os_state");
try (FileInputStream fis =
new FileInputStream(statePath.toString())) {
byte state = (byte) fis.read();
if (state < 0 || DEVICE_STATE.length <= state) {
return String.format("Unknown (%d)", state);
} else {
return DEVICE_STATE[state];
}
}
}
private CommandExecutor createCommandExecutor(String[] command) {
return new Shell.ShellCommandExecutor(
command);
}
@VisibleForTesting
void setCommandExecutorProvider(
Function<String[], CommandExecutor> provider) {
this.commandExecutorProvider = provider;
}
}
| VEDeviceDiscoverer |
java | apache__kafka | clients/src/test/java/org/apache/kafka/common/message/NullableStructMessageTest.java | {
"start": 1275,
"end": 5791
} | class ____ {
@Test
public void testDefaultValues() {
NullableStructMessageData message = new NullableStructMessageData();
assertNull(message.nullableStruct);
assertEquals(new NullableStructMessageData.MyStruct2(), message.nullableStruct2);
assertNull(message.nullableStruct3);
assertEquals(new NullableStructMessageData.MyStruct4(), message.nullableStruct4);
message = roundTrip(message, (short) 2);
assertNull(message.nullableStruct);
assertEquals(new NullableStructMessageData.MyStruct2(), message.nullableStruct2);
assertNull(message.nullableStruct3);
assertEquals(new NullableStructMessageData.MyStruct4(), message.nullableStruct4);
}
@Test
public void testRoundTrip() {
NullableStructMessageData message = new NullableStructMessageData()
.setNullableStruct(new NullableStructMessageData.MyStruct()
.setMyInt(1)
.setMyString("1"))
.setNullableStruct2(new NullableStructMessageData.MyStruct2()
.setMyInt(2)
.setMyString("2"))
.setNullableStruct3(new NullableStructMessageData.MyStruct3()
.setMyInt(3)
.setMyString("3"))
.setNullableStruct4(new NullableStructMessageData.MyStruct4()
.setMyInt(4)
.setMyString("4"));
NullableStructMessageData newMessage = roundTrip(message, (short) 2);
assertEquals(message, newMessage);
}
@Test
public void testNullForAllFields() {
NullableStructMessageData message = new NullableStructMessageData()
.setNullableStruct(null)
.setNullableStruct2(null)
.setNullableStruct3(null)
.setNullableStruct4(null);
message = roundTrip(message, (short) 2);
assertNull(message.nullableStruct);
assertNull(message.nullableStruct2);
assertNull(message.nullableStruct3);
assertNull(message.nullableStruct4);
}
@Test
public void testNullableStruct2CanNotBeNullInVersion0() {
NullableStructMessageData message = new NullableStructMessageData()
.setNullableStruct2(null);
assertThrows(NullPointerException.class, () -> roundTrip(message, (short) 0));
}
@Test
public void testToStringWithNullStructs() {
NullableStructMessageData message = new NullableStructMessageData()
.setNullableStruct(null)
.setNullableStruct2(null)
.setNullableStruct3(null)
.setNullableStruct4(null);
message.toString();
}
/**
* Regression test for KAFKA-18199. Tests that the size of the varint encoding a tagged nullable
* struct's size is calculated correctly.
*/
@Test
public void testTaggedStructSize() {
NullableStructMessageData message = new NullableStructMessageData()
.setNullableStruct(null)
.setNullableStruct2(null)
.setNullableStruct3(null)
.setNullableStruct4(new NullableStructMessageData.MyStruct4()
.setMyInt(4)
.setMyString(new String(new char[121])));
// We want the struct to be 127 bytes long, so that the varint encoding of its size is one
// short of overflowing into a two-byte representation. An extra byte is added to the
// nullable struct size to account for the is-not-null flag.
assertEquals(127, message.nullableStruct4().size(new ObjectSerializationCache(), (short) 2));
NullableStructMessageData newMessage = roundTrip(message, (short) 2);
assertEquals(message, newMessage);
}
private NullableStructMessageData deserialize(ByteBuffer buf, short version) {
NullableStructMessageData message = new NullableStructMessageData();
message.read(new ByteBufferAccessor(buf.duplicate()), version);
return message;
}
private ByteBuffer serialize(NullableStructMessageData message, short version) {
return MessageUtil.toByteBufferAccessor(message, version).buffer();
}
private NullableStructMessageData roundTrip(NullableStructMessageData message, short version) {
ByteBuffer buffer = serialize(message, version);
// Check size calculation
assertEquals(buffer.remaining(), message.size(new ObjectSerializationCache(), version));
return deserialize(buffer.duplicate(), version);
}
}
| NullableStructMessageTest |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/request/target/ViewTarget.java | {
"start": 12027,
"end": 20512
} | class ____ {
// Some negative sizes (Target.SIZE_ORIGINAL) are valid, 0 is never valid.
private static final int PENDING_SIZE = 0;
@VisibleForTesting @Nullable static Integer maxDisplayLength;
private final View view;
private final List<SizeReadyCallback> cbs = new ArrayList<>();
@Synthetic boolean waitForLayout;
@Nullable private SizeDeterminerLayoutListener layoutListener;
SizeDeterminer(@NonNull View view) {
this.view = view;
}
// Use the maximum to avoid depending on the device's current orientation.
private static int getMaxDisplayLength(@NonNull Context context) {
if (maxDisplayLength == null) {
WindowManager windowManager =
(WindowManager) context.getSystemService(Context.WINDOW_SERVICE);
Display display = Preconditions.checkNotNull(windowManager).getDefaultDisplay();
Point displayDimensions = new Point();
display.getSize(displayDimensions);
maxDisplayLength = Math.max(displayDimensions.x, displayDimensions.y);
}
return maxDisplayLength;
}
private void notifyCbs(int width, int height) {
// One or more callbacks may trigger the removal of one or more additional callbacks, so we
// need a copy of the list to avoid a concurrent modification exception. One place this
// happens is when a full request completes from the in memory cache while its thumbnail is
// still being loaded asynchronously. See #2237.
for (SizeReadyCallback cb : new ArrayList<>(cbs)) {
cb.onSizeReady(width, height);
}
}
@Synthetic
void checkCurrentDimens() {
if (cbs.isEmpty()) {
return;
}
int currentWidth = getTargetWidth();
int currentHeight = getTargetHeight();
if (!isViewStateAndSizeValid(currentWidth, currentHeight)) {
return;
}
notifyCbs(currentWidth, currentHeight);
clearCallbacksAndListener();
}
void getSize(@NonNull SizeReadyCallback cb) {
int currentWidth = getTargetWidth();
int currentHeight = getTargetHeight();
if (isViewStateAndSizeValid(currentWidth, currentHeight)) {
cb.onSizeReady(currentWidth, currentHeight);
return;
}
// We want to notify callbacks in the order they were added and we only expect one or two
// callbacks to be added a time, so a List is a reasonable choice.
if (!cbs.contains(cb)) {
cbs.add(cb);
}
if (layoutListener == null) {
ViewTreeObserver observer = view.getViewTreeObserver();
layoutListener = new SizeDeterminerLayoutListener(this);
observer.addOnPreDrawListener(layoutListener);
}
}
/**
* The callback may be called anyway if it is removed by another {@link SizeReadyCallback} or
* otherwise removed while we're notifying the list of callbacks.
*
* <p>See #2237.
*/
void removeCallback(@NonNull SizeReadyCallback cb) {
cbs.remove(cb);
}
void clearCallbacksAndListener() {
// Keep a reference to the layout attachStateListener and remove it here
// rather than having the observer remove itself because the observer
// we add the attachStateListener to will be almost immediately merged into
// another observer and will therefore never be alive. If we instead
// keep a reference to the attachStateListener and remove it here, we get the
// current view tree observer and should succeed.
ViewTreeObserver observer = view.getViewTreeObserver();
if (observer.isAlive()) {
observer.removeOnPreDrawListener(layoutListener);
}
layoutListener = null;
cbs.clear();
}
private boolean isViewStateAndSizeValid(int width, int height) {
return isDimensionValid(width) && isDimensionValid(height);
}
private int getTargetHeight() {
int verticalPadding = view.getPaddingTop() + view.getPaddingBottom();
LayoutParams layoutParams = view.getLayoutParams();
int layoutParamSize = layoutParams != null ? layoutParams.height : PENDING_SIZE;
return getTargetDimen(view.getHeight(), layoutParamSize, verticalPadding);
}
private int getTargetWidth() {
int horizontalPadding = view.getPaddingLeft() + view.getPaddingRight();
LayoutParams layoutParams = view.getLayoutParams();
int layoutParamSize = layoutParams != null ? layoutParams.width : PENDING_SIZE;
return getTargetDimen(view.getWidth(), layoutParamSize, horizontalPadding);
}
private int getTargetDimen(int viewSize, int paramSize, int paddingSize) {
// We consider the View state as valid if the View has non-null layout params and a non-zero
// layout params width and height. This is imperfect. We're making an assumption that View
// parents will obey their child's layout parameters, which isn't always the case.
int adjustedParamSize = paramSize - paddingSize;
if (adjustedParamSize > 0) {
return adjustedParamSize;
}
// Since we always prefer layout parameters with fixed sizes, even if waitForLayout is true,
// we might as well ignore it and just return the layout parameters above if we have them.
// Otherwise we should wait for a layout pass before checking the View's dimensions.
if (waitForLayout && view.isLayoutRequested()) {
return PENDING_SIZE;
}
// We also consider the View state valid if the View has a non-zero width and height. This
// means that the View has gone through at least one layout pass. It does not mean the Views
// width and height are from the current layout pass. For example, if a View is re-used in
// RecyclerView or ListView, this width/height may be from an old position. In some cases
// the dimensions of the View at the old position may be different than the dimensions of the
// View in the new position because the LayoutManager/ViewParent can arbitrarily decide to
// change them. Nevertheless, in most cases this should be a reasonable choice.
int adjustedViewSize = viewSize - paddingSize;
if (adjustedViewSize > 0) {
return adjustedViewSize;
}
// Finally we consider the view valid if the layout parameter size is set to wrap_content.
// It's difficult for Glide to figure out what to do here. Although Target.SIZE_ORIGINAL is a
// coherent choice, it's extremely dangerous because original images may be much too large to
// fit in memory or so large that only a couple can fit in memory, causing OOMs. If users want
// the original image, they can always use .override(Target.SIZE_ORIGINAL). Since wrap_content
// may never resolve to a real size unless we load something, we aim for a square whose length
// is the largest screen size. That way we're loading something and that something has some
// hope of being downsampled to a size that the device can support. We also log a warning that
// tries to explain what Glide is doing and why some alternatives are preferable.
// Since WRAP_CONTENT is sometimes used as a default layout parameter, we always wait for
// layout to complete before using this fallback parameter (ConstraintLayout among others).
if (!view.isLayoutRequested() && paramSize == LayoutParams.WRAP_CONTENT) {
if (Log.isLoggable(TAG, Log.INFO)) {
Log.i(
TAG,
"Glide treats LayoutParams.WRAP_CONTENT as a request for an image the size of this"
+ " device's screen dimensions. If you want to load the original image and are"
+ " ok with the corresponding memory cost and OOMs (depending on the input size),"
+ " use override(Target.SIZE_ORIGINAL). Otherwise, use LayoutParams.MATCH_PARENT,"
+ " set layout_width and layout_height to fixed dimension, or use .override()"
+ " with fixed dimensions.");
}
return getMaxDisplayLength(view.getContext());
}
// If the layout parameters are < padding, the view size is < padding, or the layout
// parameters are set to match_parent or wrap_content and no layout has occurred, we should
// wait for layout and repeat.
return PENDING_SIZE;
}
private boolean isDimensionValid(int size) {
return size > 0 || size == SIZE_ORIGINAL;
}
private static final | SizeDeterminer |
java | apache__rocketmq | filter/src/main/java/org/apache/rocketmq/filter/expression/ComparisonExpression.java | {
"start": 3095,
"end": 4161
} | class ____ extends UnaryExpression implements BooleanExpression {
String search;
public ContainsExpression(Expression right, String search) {
super(right);
this.search = search;
}
public String getExpressionSymbol() {
return "CONTAINS";
}
public Object evaluate(EvaluationContext message) throws Exception {
if (search == null || search.length() == 0) {
return Boolean.FALSE;
}
Object rv = this.getRight().evaluate(message);
if (rv == null) {
return Boolean.FALSE;
}
if (!(rv instanceof String)) {
return Boolean.FALSE;
}
return ((String)rv).contains(search) ? Boolean.TRUE : Boolean.FALSE;
}
public boolean matches(EvaluationContext message) throws Exception {
Object object = evaluate(message);
return object != null && object == Boolean.TRUE;
}
}
static | ContainsExpression |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/annotation/AnnotationUtilsTests.java | {
"start": 47668,
"end": 47743
} | interface ____ {
}
@MetaMeta
@Retention(RetentionPolicy.RUNTIME)
@ | MetaMeta |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/GuardedByCheckerTest.java | {
"start": 53489,
"end": 53584
} | class ____ {
final Object mu = new Object();
private static final | Test |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/intg/AdditionalMappingContributorTests.java | {
"start": 6124,
"end": 6604
} | class ____ {
@Id
private Integer id;
@Basic
private String name;
@SuppressWarnings("unused")
protected Entity1() {
// for use by Hibernate
}
public Entity1(Integer id, String name) {
this.id = id;
this.name = name;
}
public Integer getId() {
return id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
@Entity(name = "Entity2")
@Table(name = "Entity2")
public static | Entity1 |
java | spring-projects__spring-boot | module/spring-boot-actuator/src/test/java/org/springframework/boot/actuate/endpoint/annotation/EndpointDiscovererTests.java | {
"start": 20459,
"end": 20633
} | class ____ extends SpecializedTestEndpoint {
@ReadOperation
@Nullable Object getSpecialOne(@Selector String id) {
return null;
}
}
static | SubSpecializedTestEndpoint |
java | alibaba__nacos | api/src/test/java/com/alibaba/nacos/api/exception/runtime/NacosLoadExceptionTest.java | {
"start": 824,
"end": 1073
} | class ____ {
@Test
void testConstructor() {
NacosLoadException exception = new NacosLoadException("test");
assertEquals("test", exception.getMessage());
assertNull(exception.getCause());
}
} | NacosLoadExceptionTest |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/type/AbstractMethodMetadataTests.java | {
"start": 1211,
"end": 7146
} | class ____ {
@Test
void verifyEquals() {
MethodMetadata withMethod1 = getTagged(WithMethod.class);
MethodMetadata withMethod2 = getTagged(WithMethod.class);
MethodMetadata withMethodWithTwoArguments1 = getTagged(WithMethodWithTwoArguments.class);
MethodMetadata withMethodWithTwoArguments2 = getTagged(WithMethodWithTwoArguments.class);
assertThat(withMethod1).isNotEqualTo(null);
assertThat(withMethod1).isEqualTo(withMethod1);
assertThat(withMethod2).isEqualTo(withMethod2);
assertThat(withMethod1).isEqualTo(withMethod2);
assertThat(withMethod2).isEqualTo(withMethod1);
assertThat(withMethodWithTwoArguments1).isEqualTo(withMethodWithTwoArguments1);
assertThat(withMethodWithTwoArguments2).isEqualTo(withMethodWithTwoArguments2);
assertThat(withMethodWithTwoArguments1).isEqualTo(withMethodWithTwoArguments2);
assertThat(withMethodWithTwoArguments2).isEqualTo(withMethodWithTwoArguments1);
assertThat(withMethod1).isNotEqualTo(withMethodWithTwoArguments1);
assertThat(withMethodWithTwoArguments1).isNotEqualTo(withMethod1);
}
@Test
void verifyHashCode() {
MethodMetadata withMethod1 = getTagged(WithMethod.class);
MethodMetadata withMethod2 = getTagged(WithMethod.class);
MethodMetadata withMethodWithTwoArguments1 = getTagged(WithMethodWithTwoArguments.class);
MethodMetadata withMethodWithTwoArguments2 = getTagged(WithMethodWithTwoArguments.class);
assertThat(withMethod1).hasSameHashCodeAs(withMethod2);
assertThat(withMethodWithTwoArguments1).hasSameHashCodeAs(withMethodWithTwoArguments2);
assertThat(withMethod1).doesNotHaveSameHashCodeAs(withMethodWithTwoArguments1);
}
@Test
void verifyToString() {
assertThat(getTagged(WithMethod.class).toString())
.endsWith(WithMethod.class.getName() + ".test()");
assertThat(getTagged(WithMethodWithOneArgument.class).toString())
.endsWith(WithMethodWithOneArgument.class.getName() + ".test(java.lang.String)");
assertThat(getTagged(WithMethodWithTwoArguments.class).toString())
.endsWith(WithMethodWithTwoArguments.class.getName() + ".test(java.lang.String,java.lang.Integer)");
}
@Test
void getMethodNameReturnsMethodName() {
assertThat(getTagged(WithMethod.class).getMethodName()).isEqualTo("test");
}
@Test
void getDeclaringClassReturnsDeclaringClass() {
assertThat(getTagged(WithMethod.class).getDeclaringClassName()).isEqualTo(
WithMethod.class.getName());
}
@Test
void getReturnTypeReturnsReturnType() {
assertThat(getTagged(WithMethod.class).getReturnTypeName()).isEqualTo(
String.class.getName());
}
@Test
void isAbstractWhenAbstractReturnsTrue() {
assertThat(getTagged(WithAbstractMethod.class).isAbstract()).isTrue();
}
@Test
void isAbstractWhenNotAbstractReturnsFalse() {
assertThat(getTagged(WithMethod.class).isAbstract()).isFalse();
}
@Test
void isStatusWhenStaticReturnsTrue() {
assertThat(getTagged(WithStaticMethod.class).isStatic()).isTrue();
}
@Test
void isStaticWhenNotStaticReturnsFalse() {
assertThat(getTagged(WithMethod.class).isStatic()).isFalse();
}
@Test
void isFinalWhenFinalReturnsTrue() {
assertThat(getTagged(WithFinalMethod.class).isFinal()).isTrue();
}
@Test
void isFinalWhenNonFinalReturnsFalse() {
assertThat(getTagged(WithMethod.class).isFinal()).isFalse();
}
@Test
void isOverridableWhenOverridableReturnsTrue() {
assertThat(getTagged(WithMethod.class).isOverridable()).isTrue();
}
@Test
void isOverridableWhenNonOverridableReturnsFalse() {
assertThat(getTagged(WithStaticMethod.class).isOverridable()).isFalse();
assertThat(getTagged(WithFinalMethod.class).isOverridable()).isFalse();
assertThat(getTagged(WithPrivateMethod.class).isOverridable()).isFalse();
}
@Test
void getAnnotationsReturnsDirectAnnotations() {
MethodMetadata metadata = getTagged(WithDirectAnnotation.class);
assertThat(metadata.getAnnotations().stream().filter(
MergedAnnotation::isDirectlyPresent).map(
a -> a.getType().getName())).containsExactlyInAnyOrder(
Tag.class.getName(),
DirectAnnotation.class.getName());
}
@Test
void isAnnotatedWhenMatchesDirectAnnotationReturnsTrue() {
assertThat(getTagged(WithDirectAnnotation.class).isAnnotated(
DirectAnnotation.class.getName())).isTrue();
}
@Test
void isAnnotatedWhenMatchesMetaAnnotationReturnsTrue() {
assertThat(getTagged(WithMetaAnnotation.class).isAnnotated(
DirectAnnotation.class.getName())).isTrue();
}
@Test
void isAnnotatedWhenDoesNotMatchDirectOrMetaAnnotationReturnsFalse() {
assertThat(getTagged(WithMethod.class).isAnnotated(
DirectAnnotation.class.getName())).isFalse();
}
@Test
void getAnnotationAttributesReturnsAttributes() {
assertThat(getTagged(WithAnnotationAttributes.class).getAnnotationAttributes(
AnnotationAttributes.class.getName())).containsOnly(entry("name", "test"),
entry("size", 1));
}
@Test
void getAllAnnotationAttributesReturnsAllAttributes() {
MultiValueMap<String, Object> attributes = getTagged(WithMetaAnnotationAttributes.class)
.getAllAnnotationAttributes(AnnotationAttributes.class.getName());
assertThat(attributes).containsOnlyKeys("name", "size");
assertThat(attributes.get("name")).containsExactlyInAnyOrder("m1", "m2");
assertThat(attributes.get("size")).containsExactlyInAnyOrder(1, 2);
}
@Test // gh-24375
public void metadataLoadsForNestedAnnotations() {
AnnotationMetadata annotationMetadata = get(AnnotatedComponent.class);
assertThat(annotationMetadata.getAnnotationTypes()).containsExactly(EnclosingAnnotation.class.getName());
}
protected MethodMetadata getTagged(Class<?> source) {
return get(source, Tag.class.getName());
}
protected MethodMetadata get(Class<?> source, String annotationName) {
return get(source).getAnnotatedMethods(annotationName).iterator().next();
}
protected abstract AnnotationMetadata get(Class<?> source);
@Retention(RetentionPolicy.RUNTIME)
@ | AbstractMethodMetadataTests |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/legacy/Company.java | {
"start": 170,
"end": 427
} | class ____ extends Party {
String id;
String president;
String getId() {
return id;
}
void setId(String newValue) {
id = newValue;
}
String getPresident() {
return president;
}
void setPresident(String newValue) {
president = newValue;
}
}
| Company |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/factories/JobMasterServiceProcessFactory.java | {
"start": 1210,
"end": 2326
} | interface ____ {
/**
* Create a new {@link JobMasterServiceProcess} for the given leaderSessionId.
*
* @param leaderSessionId leaderSessionId for which to create a {@link JobMasterServiceProcess}
* @return the newly created {@link JobMasterServiceProcess}
*/
JobMasterServiceProcess create(UUID leaderSessionId);
/**
* Gets the {@link JobID} of the job for which this factory creates {@link
* JobMasterServiceProcess}.
*/
JobID getJobId();
/**
* Creates an {@link ArchivedExecutionGraph} for the job for which this factory creates {@link
* JobMasterServiceProcess} with the given jobStatus and failure cause.
*
* @param jobStatus jobStatus which the {@link ArchivedExecutionGraph} should have
* @param cause cause which the {@link ArchivedExecutionGraph} should be initialized with; null
* iff no failure cause
* @return created {@link ArchivedExecutionGraph}
*/
ArchivedExecutionGraph createArchivedExecutionGraph(
JobStatus jobStatus, @Nullable Throwable cause);
}
| JobMasterServiceProcessFactory |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/origin/Origin.java | {
"start": 1324,
"end": 3215
} | interface ____ {
/**
* Return the parent origin for this instance if there is one. The parent origin
* provides the origin of the item that created this one.
* @return the parent origin or {@code null}
* @since 2.4.0
* @see Origin#parentsFrom(Object)
*/
default @Nullable Origin getParent() {
return null;
}
/**
* Find the {@link Origin} that an object originated from. Checks if the source object
* is an {@link Origin} or {@link OriginProvider} and also searches exception stacks.
* @param source the source object or {@code null}
* @return an {@link Origin} or {@code null}
*/
static @Nullable Origin from(@Nullable Object source) {
if (source instanceof Origin origin) {
return origin;
}
Origin origin = null;
if (source instanceof OriginProvider originProvider) {
origin = originProvider.getOrigin();
}
if (origin == null && source instanceof Throwable throwable) {
return from(throwable.getCause());
}
return origin;
}
/**
* Find the parents of the {@link Origin} that an object originated from. Checks if
* the source object is an {@link Origin} or {@link OriginProvider} and also searches
* exception stacks. Provides a list of all parents up to root {@link Origin},
* starting with the most immediate parent.
* @param source the source object or {@code null}
* @return a list of parents or an empty list if the source is {@code null}, has no
* origin, or no parent
* @since 2.4.0
*/
static List<Origin> parentsFrom(@Nullable Object source) {
Origin origin = from(source);
if (origin == null) {
return Collections.emptyList();
}
Set<Origin> parents = new LinkedHashSet<>();
origin = origin.getParent();
while (origin != null && !parents.contains(origin)) {
parents.add(origin);
origin = origin.getParent();
}
return Collections.unmodifiableList(new ArrayList<>(parents));
}
}
| Origin |
java | quarkusio__quarkus | extensions/vertx-http/deployment/src/test/java/io/quarkus/vertx/http/devmode/management/LiveReloadManagementHandler.java | {
"start": 156,
"end": 466
} | class ____ implements Handler<RoutingContext> {
@Override
public void handle(RoutingContext event) {
LiveReloadManagementBean managementBean = Arc.container().instance(LiveReloadManagementBean.class).get();
event.response().end(managementBean.string());
}
}
| LiveReloadManagementHandler |
java | google__dagger | dagger-compiler/main/java/dagger/internal/codegen/kotlin/KotlinMetadata.java | {
"start": 1400,
"end": 5286
} | class ____ {
// Kotlin suffix for fields that are for a delegated property.
// See:
// https://github.com/JetBrains/kotlin/blob/master/core/compiler.common.jvm/src/org/jetbrains/kotlin/load/java/JvmAbi.kt#L32
private static final String DELEGATED_PROPERTY_NAME_SUFFIX = "$delegate";
// Map that associates field elements with its Kotlin synthetic method for annotations.
private final Map<XFieldElement, Optional<MethodForAnnotations>> elementFieldAnnotationMethodMap =
new HashMap<>();
abstract XTypeElement typeElement();
abstract ClassMetadata classMetadata();
@Memoized
ImmutableMap<String, XMethodElement> methodDescriptors() {
return typeElement().getDeclaredMethods().stream()
.collect(toImmutableMap(XMethodElement::getJvmDescriptor, Function.identity()));
}
/** Gets the synthetic method for annotations of a given field element. */
Optional<XMethodElement> getSyntheticAnnotationMethod(XFieldElement fieldElement) {
return getAnnotationMethod(fieldElement)
.map(
methodForAnnotations -> {
if (methodForAnnotations == MethodForAnnotations.MISSING) {
throw new IllegalStateException(
"Method for annotations is missing for " + fieldElement);
}
return methodForAnnotations.method();
});
}
/**
* Returns true if the synthetic method for annotations is missing. This can occur when inspecting
* the Kotlin metadata of a property from another compilation unit.
*/
boolean isMissingSyntheticAnnotationMethod(XFieldElement fieldElement) {
return getAnnotationMethod(fieldElement)
.map(methodForAnnotations -> methodForAnnotations == MethodForAnnotations.MISSING)
// This can be missing if there was no property annotation at all (e.g. no annotations or
// the qualifier is already properly attached to the field). For these cases, it isn't
// considered missing since there was no method to look for in the first place.
.orElse(false);
}
private Optional<MethodForAnnotations> getAnnotationMethod(XFieldElement fieldElement) {
return elementFieldAnnotationMethodMap.computeIfAbsent(
fieldElement, this::getAnnotationMethodUncached);
}
private Optional<MethodForAnnotations> getAnnotationMethodUncached(XFieldElement fieldElement) {
return Optional.ofNullable(findProperty(fieldElement).getMethodForAnnotationsSignature())
.map(
signature ->
Optional.ofNullable(methodDescriptors().get(signature))
.map(MethodForAnnotations::create)
// The method may be missing across different compilations.
// See https://youtrack.jetbrains.com/issue/KT-34684
.orElse(MethodForAnnotations.MISSING));
}
private PropertyMetadata findProperty(XFieldElement field) {
String fieldDescriptor = field.getJvmDescriptor();
if (classMetadata().getPropertiesBySignature().containsKey(fieldDescriptor)) {
return classMetadata().getPropertiesBySignature().get(fieldDescriptor);
} else {
// Fallback to finding property by name, see: https://youtrack.jetbrains.com/issue/KT-35124
final String propertyName = getPropertyNameFromField(field);
return classMetadata().getPropertiesBySignature().values().stream()
.filter(property -> propertyName.contentEquals(property.getName())) // SUPPRESS_GET_NAME_CHECK
.collect(DaggerCollectors.onlyElement());
}
}
private static String getPropertyNameFromField(XFieldElement field) {
String name = getSimpleName(field);
if (name.endsWith(DELEGATED_PROPERTY_NAME_SUFFIX)) {
return name.substring(0, name.length() - DELEGATED_PROPERTY_NAME_SUFFIX.length());
} else {
return name;
}
}
/** Parse Kotlin | KotlinMetadata |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/operations/converters/table/SchemaBuilderUtil.java | {
"start": 2645,
"end": 12460
} | class ____ {
final SqlValidator sqlValidator;
final Function<SqlNode, String> escapeExpressions;
final DataTypeFactory dataTypeFactory;
protected Map<String, Schema.UnresolvedColumn> columns = new LinkedHashMap<>();
Map<String, UnresolvedWatermarkSpec> watermarkSpecs = new HashMap<>();
UnresolvedPrimaryKey primaryKey = null;
protected SchemaBuilderUtil(
SqlValidator sqlValidator,
Function<SqlNode, String> escapeExpressions,
DataTypeFactory dataTypeFactory) {
this.sqlValidator = sqlValidator;
this.escapeExpressions = escapeExpressions;
this.dataTypeFactory = dataTypeFactory;
}
/** Sets the primary key for the schema. */
void setPrimaryKey(SqlTableConstraint primaryKeyConstraint) {
if (primaryKey != null) {
throw new ValidationException(
"There already exists a primary key constraint in the table.");
}
for (SqlNode primaryKeyNode : primaryKeyConstraint.getColumns()) {
String primaryKey = ((SqlIdentifier) primaryKeyNode).getSimple();
if (!columns.containsKey(primaryKey)) {
throw new ValidationException(
String.format(
"Primary key column '%s' is not defined in the schema at %s",
primaryKey, primaryKeyNode.getParserPosition()));
}
if (!(columns.get(primaryKey) instanceof UnresolvedPhysicalColumn)) {
throw new ValidationException(
String.format(
"Could not create a PRIMARY KEY with column '%s' at %s.\n"
+ "A PRIMARY KEY constraint must be declared on physical columns.",
primaryKey, primaryKeyNode.getParserPosition()));
}
}
primaryKey = toUnresolvedPrimaryKey(primaryKeyConstraint);
}
void addWatermarks(
List<SqlWatermark> derivedWatermarkSpecs,
Map<String, RelDataType> allFieldsTypes,
boolean overwriteWatermark) {
for (SqlWatermark derivedWatermarkSpec : derivedWatermarkSpecs) {
SqlIdentifier eventTimeColumnName = derivedWatermarkSpec.getEventTimeColumnName();
String rowtimeAttribute = eventTimeColumnName.toString();
if (!overwriteWatermark && watermarkSpecs.containsKey(rowtimeAttribute)) {
throw new ValidationException(
String.format(
"There already exists a watermark on column '%s'.",
rowtimeAttribute));
}
verifyRowtimeAttribute(derivedWatermarkSpec, allFieldsTypes);
watermarkSpecs.put(
rowtimeAttribute,
toUnresolvedWatermarkSpec(derivedWatermarkSpec, allFieldsTypes));
}
}
/**
* Builds and returns a {@link Schema} from the columns, watermark specs, and primary key
* specified in the builder.
*/
public Schema build() {
Schema.Builder resultBuilder = Schema.newBuilder();
resultBuilder.fromColumns(new ArrayList<>(columns.values()));
for (UnresolvedWatermarkSpec watermarkSpec : watermarkSpecs.values()) {
resultBuilder.watermark(
watermarkSpec.getColumnName(), watermarkSpec.getWatermarkExpression());
}
if (primaryKey != null) {
resultBuilder.primaryKeyNamed(
primaryKey.getConstraintName(),
primaryKey.getColumnNames().toArray(new String[0]));
}
return resultBuilder.build();
}
/**
* Verify the watermark rowtime attribute is part of the table schema specified in the {@code
* allFieldsTypes}.
*
* @param sqlWatermark The watermark with the rowtime attribute to verify.
* @param allFieldsTypes The table schema to verify the rowtime attribute against.
*/
static void verifyRowtimeAttribute(
SqlWatermark sqlWatermark, Map<String, RelDataType> allFieldsTypes) {
SqlIdentifier eventTimeColumnName = sqlWatermark.getEventTimeColumnName();
String fullRowtimeExpression = eventTimeColumnName.toString();
List<String> components = eventTimeColumnName.names;
if (!allFieldsTypes.containsKey(components.get(0))) {
throw new ValidationException(
String.format(
"The rowtime attribute field '%s' is not defined in the table schema, at %s\n"
+ "Available fields: [%s]",
fullRowtimeExpression,
eventTimeColumnName.getParserPosition(),
allFieldsTypes.keySet().stream()
.collect(Collectors.joining("', '", "'", "'"))));
}
if (components.size() > 1) {
RelDataType componentType = allFieldsTypes.get(components.get(0));
for (int i = 1; i < components.size(); i++) {
RelDataTypeField field = componentType.getField(components.get(i), true, false);
if (field == null) {
throw new ValidationException(
String.format(
"The rowtime attribute field '%s' is not defined in the table schema, at %s\n"
+ "Nested field '%s' was not found in a composite type: %s.",
fullRowtimeExpression,
eventTimeColumnName.getComponent(i).getParserPosition(),
components.get(i),
FlinkTypeFactory.toLogicalType(
allFieldsTypes.get(components.get(0)))));
}
componentType = field.getType();
}
}
}
/** Converts a {@link SqlRegularColumn} to an {@link UnresolvedPhysicalColumn} object. */
protected UnresolvedPhysicalColumn toUnresolvedPhysicalColumn(SqlRegularColumn column) {
final String name = column.getName().getSimple();
final String comment = column.getComment();
final LogicalType logicalType = toLogicalType(toRelDataType(column.getType()));
return new UnresolvedPhysicalColumn(name, fromLogicalToDataType(logicalType), comment);
}
/** Converts a {@link SqlComputedColumn} to an {@link UnresolvedComputedColumn} object. */
UnresolvedComputedColumn toUnresolvedComputedColumn(
SqlComputedColumn column, SqlNode validatedExpression) {
final String name = column.getName().getSimple();
final String comment = column.getComment();
return new UnresolvedComputedColumn(
name, new SqlCallExpression(escapeExpressions.apply(validatedExpression)), comment);
}
/** Converts a {@link SqlMetadataColumn} to an {@link UnresolvedMetadataColumn} object. */
UnresolvedMetadataColumn toUnresolvedMetadataColumn(SqlMetadataColumn column) {
final String name = column.getName().getSimple();
final String comment = column.getComment();
final LogicalType logicalType = toLogicalType(toRelDataType(column.getType()));
return new UnresolvedMetadataColumn(
name,
fromLogicalToDataType(logicalType),
column.getMetadataAlias().orElse(null),
column.isVirtual(),
comment);
}
/** Converts a {@link SqlWatermark} to an {@link UnresolvedWatermarkSpec} object. */
UnresolvedWatermarkSpec toUnresolvedWatermarkSpec(
SqlWatermark watermark, Map<String, RelDataType> accessibleFieldNamesToTypes) {
// this will validate and expand function identifiers.
SqlNode validated =
sqlValidator.validateParameterizedExpression(
watermark.getWatermarkStrategy(), accessibleFieldNamesToTypes);
return new UnresolvedWatermarkSpec(
watermark.getEventTimeColumnName().toString(),
new SqlCallExpression(escapeExpressions.apply(validated)));
}
/** Converts a {@link SqlTableConstraint} to an {@link UnresolvedPrimaryKey} object. */
public UnresolvedPrimaryKey toUnresolvedPrimaryKey(SqlTableConstraint primaryKey) {
List<String> columnNames = List.of(primaryKey.getColumnNames());
String constraintName =
primaryKey
.getConstraintName()
.orElseGet(
() ->
columnNames.stream()
.collect(Collectors.joining("_", "PK_", "")));
return new UnresolvedPrimaryKey(constraintName, columnNames);
}
/**
* Gets the column data type of {@link UnresolvedPhysicalColumn} column and convert it to a
* {@link LogicalType}.
*/
LogicalType getLogicalType(UnresolvedPhysicalColumn column) {
return dataTypeFactory.createDataType(column.getDataType()).getLogicalType();
}
/**
* Gets the column data type of {@link UnresolvedMetadataColumn} column and convert it to a
* {@link LogicalType}.
*/
LogicalType getLogicalType(UnresolvedMetadataColumn column) {
return dataTypeFactory.createDataType(column.getDataType()).getLogicalType();
}
RelDataType toRelDataType(SqlDataTypeSpec type) {
boolean nullable = type.getNullable() == null || type.getNullable();
return type.deriveType(sqlValidator, nullable);
}
}
| SchemaBuilderUtil |
java | apache__kafka | tools/src/main/java/org/apache/kafka/tools/filter/TopicPartitionFilter.java | {
"start": 2155,
"end": 2777
} | class ____ implements TopicPartitionFilter {
private final List<TopicPartitionFilter> filters;
public CompositeTopicPartitionFilter(List<TopicPartitionFilter> filters) {
this.filters = filters;
}
@Override
public boolean isTopicAllowed(String topic) {
return filters.stream().anyMatch(tp -> tp.isTopicAllowed(topic));
}
@Override
public boolean isTopicPartitionAllowed(TopicPartition partition) {
return filters.stream().anyMatch(tp -> tp.isTopicPartitionAllowed(partition));
}
}
}
| CompositeTopicPartitionFilter |
java | quarkusio__quarkus | integration-tests/maven/src/test/java/io/quarkus/maven/it/CreateProjectCodestartMojoIT.java | {
"start": 5282,
"end": 9559
} | class ____ extends BonjourResourceTest");
}
private Path generateProject(String buildtool, String language, String extensions, Map<String, String> options)
throws Exception {
final StringBuilder name = new StringBuilder();
name.append("project-").append(buildtool).append('-').append(language);
if (extensions.isEmpty()) {
name.append("-commandmode");
} else {
name.append('-');
for (int i = 0; i < extensions.length(); ++i) {
char c = extensions.charAt(i);
if (c == ',') {
c = '-';
} else if (c == ':') {
c = '-';
}
name.append(c);
}
}
if (!options.isEmpty()) {
name.append("-custom");
}
testDir = prepareTestDir(name.toString());
LOG.info("creating project in " + testDir.toPath().toString());
return runCreateCommand(buildtool, extensions + (!Objects.equals(language, "java") ? "," + language : ""), options);
}
private static File prepareTestDir(String name) {
File tc = new File("target/codestart-test/" + name);
if (tc.isDirectory()) {
try {
FileUtils.deleteDirectory(tc);
} catch (IOException e) {
throw new RuntimeException("Cannot delete directory: " + tc, e);
}
}
boolean mkdirs = tc.mkdirs();
LOG.log(Level.FINE, "codestart-test created? %s", mkdirs);
return tc;
}
private Path runCreateCommand(String buildTool, String extensions, Map<String, String> options)
throws MavenInvocationException, FileNotFoundException, UnsupportedEncodingException {
// Scaffold the new project
assertThat(testDir).isDirectory();
Properties properties = new Properties();
properties.put("projectGroupId", "org.test");
properties.put("projectArtifactId", "my-test-app");
properties.put("codestartsEnabled", "true");
properties.put("buildTool", buildTool);
properties.put("extensions", extensions);
properties.putAll(options);
InvocationResult result = executeCreate(properties);
assertThat(result.getExitCode()).isZero();
return testDir.toPath().resolve("my-test-app");
}
private InvocationResult executeCreate(Properties params)
throws MavenInvocationException, FileNotFoundException, UnsupportedEncodingException {
Invoker invoker = initInvoker(testDir);
params.setProperty("platformGroupId", ToolsConstants.IO_QUARKUS);
params.setProperty("platformArtifactId", "quarkus-bom");
params.setProperty("platformVersion", getQuarkusCoreVersion());
InvocationRequest request = new DefaultInvocationRequest();
request.setBatchMode(true);
request.setGoals(Collections.singletonList(
getMavenPluginGroupId() + ":" + getMavenPluginArtifactId() + ":" + getMavenPluginVersion() + ":create"));
request.setDebug(false);
request.setShowErrors(true);
request.setProperties(params);
PrintStreamLogger logger = getPrintStreamLogger("create-codestart.log");
invoker.setLogger(logger);
return invoker.execute(request);
}
private PrintStreamLogger getPrintStreamLogger(String s) throws UnsupportedEncodingException, FileNotFoundException {
File log = new File(testDir, s);
return new PrintStreamLogger(new PrintStream(new FileOutputStream(log), false, "UTF-8"),
InvokerLogger.DEBUG);
}
private void checkContent(final Path resource, final String... contentsToFind) {
assertThat(resource).isRegularFile();
Stream.of(contentsToFind)
.forEach(c -> {
try {
assertThat(FileUtils.readFileToString(resource.toFile(), "UTF-8")).contains(c);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
});
}
private void checkDir(final Path dir) throws IOException {
assertThat(dir).isDirectory();
}
}
| BonjourResourceIT |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/time/TimeInStaticInitializerTest.java | {
"start": 835,
"end": 1166
} | class ____ {
private final CompilationTestHelper helper =
CompilationTestHelper.newInstance(TimeInStaticInitializer.class, getClass());
@Test
public void positive() {
helper
.addSourceLines(
"Test.java",
"""
import java.time.Instant;
| TimeInStaticInitializerTest |
java | apache__spark | launcher/src/main/java/org/apache/spark/launcher/InProcessLauncher.java | {
"start": 1767,
"end": 3567
} | class ____ extends AbstractLauncher<InProcessLauncher> {
private static final Logger LOG = Logger.getLogger(InProcessLauncher.class.getName());
/**
* Starts a Spark application.
*
* @see AbstractLauncher#startApplication(SparkAppHandle.Listener...)
* @param listeners Listeners to add to the handle before the app is launched.
* @return A handle for the launched application.
*/
@Override
public SparkAppHandle startApplication(SparkAppHandle.Listener... listeners) throws IOException {
if (builder.isClientMode(builder.getEffectiveConfig())) {
LOG.warning("It's not recommended to run client-mode applications using InProcessLauncher.");
}
Method main = findSparkSubmit();
LauncherServer server = LauncherServer.getOrCreateServer();
InProcessAppHandle handle = new InProcessAppHandle(server);
for (SparkAppHandle.Listener l : listeners) {
handle.addListener(l);
}
String secret = server.registerHandle(handle);
setConf(LauncherProtocol.CONF_LAUNCHER_PORT, String.valueOf(server.getPort()));
setConf(LauncherProtocol.CONF_LAUNCHER_SECRET, secret);
List<String> sparkArgs = builder.buildSparkSubmitArgs();
String[] argv = sparkArgs.toArray(new String[sparkArgs.size()]);
String appName = CommandBuilderUtils.firstNonEmpty(builder.appName, builder.mainClass,
"<unknown>");
handle.start(appName, main, argv);
return handle;
}
@Override
InProcessLauncher self() {
return this;
}
// Visible for testing.
Method findSparkSubmit() throws IOException {
ClassLoader cl = Thread.currentThread().getContextClassLoader();
if (cl == null) {
cl = getClass().getClassLoader();
}
Class<?> sparkSubmit;
// SPARK-22941: first try the new SparkSubmit | InProcessLauncher |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/converted/converter/IgnoreAutoAppliedConverterForIdAndVersionTest.java | {
"start": 937,
"end": 1937
} | class ____ {
public final String sql = "select code from SampleEntity where id = :id";
@Test
public void test(SessionFactoryScope scope) {
scope.inTransaction(
(session) -> {
final SampleEntity sampleEntity = new SampleEntity();
sampleEntity.setId( 1 );
sampleEntity.setCode( 123 );
session.persist( sampleEntity );
}
);
scope.inTransaction(
(session) -> {
final Integer code = session.createNativeQuery( sql, Integer.class )
.setParameter( "id", 1L )
.getSingleResult();
assertEquals( -123, (int) code );
final SampleEntity sampleEntity = session.find( SampleEntity.class, 1 );
assertEquals( 0, (int) sampleEntity.getVersion() );
assertEquals( 123, (int) sampleEntity.getCode() );
}
);
}
@AfterEach
public void dropTestData(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Entity(name = "SampleEntity")
public static | IgnoreAutoAppliedConverterForIdAndVersionTest |
java | alibaba__nacos | config/src/main/java/com/alibaba/nacos/config/server/utils/AccumulateStatCount.java | {
"start": 774,
"end": 1172
} | class ____ {
final AtomicLong total = new AtomicLong(0);
long lastStatValue = 0;
public long increase() {
return total.incrementAndGet();
}
/**
* accumulate stat.
*
* @return stat.
*/
public long stat() {
long tmp = total.get() - lastStatValue;
lastStatValue += tmp;
return tmp;
}
}
| AccumulateStatCount |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/TransientOverrideAsPersistentWithEmbeddableTests.java | {
"start": 2066,
"end": 8851
} | class ____ {
@Test
public void testFindByRootClass(SessionFactoryScope scope) {
scope.inTransaction( session -> {
var editor = session.find( Employee.class, "Jane Smith" );
assertNotNull( editor );
assertEquals( "Senior Editor", editor.getTitle() );
var writer = session.find( Employee.class, "John Smith" );
assertThat( writer, instanceOf( Writer.class ) );
assertEquals( "Writing", writer.getTitle() );
assertNotNull( ( (Writer) writer ).getWriterEmbeddable().getGroup() );
var group = ( (Writer) writer ).getWriterEmbeddable().getGroup();
assertEquals( writer.getTitle(), group.getName() );
var jobEditor = session.find( Job.class, "Edit" );
assertSame( editor, jobEditor.getEmployee() );
var jobWriter = session.find( Job.class, "Write" );
assertSame( writer, jobWriter.getEmployee() );
} );
}
@Test
public void testFindBySubclass(SessionFactoryScope scope) {
scope.inTransaction( session -> {
var editor = session.find( Editor.class, "Jane Smith" );
assertNotNull( editor );
assertEquals( "Senior Editor", editor.getTitle() );
var writer = session.find( Writer.class, "John Smith" );
assertEquals( "Writing", writer.getTitle() );
assertNotNull( writer.getWriterEmbeddable().getGroup() );
var group = writer.getWriterEmbeddable().getGroup();
assertEquals( writer.getTitle(), group.getName() );
var jobEditor = session.find( Job.class, "Edit" );
assertSame( editor, jobEditor.getEmployee() );
var jobWriter = session.find( Job.class, "Write" );
assertSame( writer, jobWriter.getEmployee() );
} );
}
@Test
public void testQueryByRootClass(SessionFactoryScope scope) {
scope.inTransaction( session -> {
//noinspection removal
var employees = session
.createQuery( "from Employee", Employee.class )
.getResultList();
assertEquals( 2, employees.size() );
assertThat( employees.get( 0 ), instanceOf( Editor.class ) );
assertThat( employees.get( 1 ), instanceOf( Writer.class ) );
var editor = (Editor) employees.get( 0 );
assertEquals( "Senior Editor", editor.getTitle() );
var writer = (Writer) employees.get( 1 );
assertEquals( "Writing", writer.getTitle() );
assertNotNull( writer.getWriterEmbeddable().getGroup() );
var group = writer.getWriterEmbeddable().getGroup();
assertEquals( writer.getTitle(), group.getName() );
} );
}
@Test
public void testQueryByRootClassAndOverridenProperty(SessionFactoryScope scope) {
scope.inTransaction( session -> {
//noinspection removal
var editor = session.createQuery( "from Employee where title=:title", Employee.class )
.setParameter( "title", "Senior Editor" )
.getSingleResult();
assertThat( editor, instanceOf( Editor.class ) );
//noinspection removal
var writer = session.createQuery( "from Employee where title=:title", Employee.class )
.setParameter( "title", "Writing" )
.getSingleResult();
assertThat( writer, instanceOf( Writer.class ) );
assertNotNull( ( (Writer) writer ).getWriterEmbeddable().getGroup() );
assertEquals( writer.getTitle(), ( (Writer) writer ).getWriterEmbeddable().getGroup().getName() );
} );
}
@Test
public void testQueryByRootClassAndOverridenPropertyTreat(SessionFactoryScope scope) {
scope.inTransaction( session -> {
//noinspection removal
var editor = session.createQuery(
"from Employee e where treat( e as Editor ).title=:title",
Employee.class
)
.setParameter( "title", "Senior Editor" )
.getSingleResult();
assertThat( editor, instanceOf( Editor.class ) );
//noinspection removal
var writer = session.createQuery(
"from Employee e where treat( e as Writer).title=:title",
Employee.class
)
.setParameter( "title", "Writing" )
.getSingleResult();
assertThat( writer, instanceOf( Writer.class ) );
assertNotNull( ( (Writer) writer ).getWriterEmbeddable().getGroup() );
assertEquals( writer.getTitle(), ( (Writer) writer ).getWriterEmbeddable().getGroup().getName() );
} );
}
@Test
public void testQueryBySublassAndOverridenProperty(SessionFactoryScope scope) {
scope.inTransaction( session -> {
//noinspection removal
var editor = session.createQuery( "from Editor where title=:title", Editor.class )
.setParameter( "title", "Senior Editor" )
.getSingleResult();
assertThat( editor, instanceOf( Editor.class ) );
//noinspection removal
var writer = session.createQuery( "from Writer where title=:title", Writer.class )
.setParameter( "title", "Writing" )
.getSingleResult();
assertNotNull( writer.getWriterEmbeddable().getGroup() );
assertEquals( writer.getTitle(), writer.getWriterEmbeddable().getGroup().getName() );
} );
}
@Test
public void testCriteriaQueryByRootClassAndOverridenProperty(SessionFactoryScope scope) {
scope.inTransaction( session -> {
var builder = session.getCriteriaBuilder();
var query = builder.createQuery( Employee.class );
var root = query.from( Employee.class );
var parameter = builder.parameter( String.class, "title" );
var predicateEditor = builder.equal(
builder.treat( root, Editor.class ).get( "title" ),
parameter
);
query.where( predicateEditor );
//noinspection removal
var editor = session.createQuery( query )
.setParameter( "title", "Senior Editor" )
.getSingleResult();
assertThat( editor, instanceOf( Editor.class ) );
var predicateWriter = builder.equal(
builder.treat( root, Writer.class ).get( "title" ),
parameter
);
query.where( predicateWriter );
//noinspection removal
var writer = session.createQuery( query )
.setParameter( "title", "Writing" )
.getSingleResult();
assertThat( writer, instanceOf( Writer.class ) );
assertNotNull( ( (Writer) writer ).getWriterEmbeddable().getGroup() );
assertEquals( writer.getTitle(), ( (Writer) writer ).getWriterEmbeddable().getGroup().getName() );
} );
}
@BeforeEach
public void setupData(SessionFactoryScope scope) {
scope.inTransaction( session -> {
var jobEditor = new Job( "Edit" );
jobEditor.setEmployee( new Editor( "Jane Smith", "Senior Editor" ) );
var jobWriter = new Job( "Write" );
jobWriter.setEmployee( new Writer( "John Smith", new Group( "Writing" ) ) );
var editor = jobEditor.getEmployee();
var writer = jobWriter.getEmployee();
var group = ((Writer) writer).getWriterEmbeddable().getGroup();
session.persist( editor );
session.persist( group );
session.persist( writer );
session.persist( jobEditor );
session.persist( jobWriter );
} );
}
@AfterEach
public void cleanupData(SessionFactoryScope scope) {
scope.dropData();
}
@MappedSuperclass
public static | TransientOverrideAsPersistentWithEmbeddableTests |
java | alibaba__nacos | maintainer-client/src/main/java/com/alibaba/nacos/maintainer/client/core/AbstractCoreMaintainerService.java | {
"start": 1748,
"end": 14827
} | class ____ implements CoreMaintainerService {
private final ClientHttpProxy clientHttpProxy;
protected AbstractCoreMaintainerService(Properties properties) throws NacosException {
this.clientHttpProxy = new ClientHttpProxy(properties);
ParamUtil.initSerialization();
}
@Override
public Map<String, String> getServerState() throws NacosException {
HttpRequest httpRequest = new HttpRequest.Builder().setHttpMethod(HttpMethod.GET)
.setPath(Constants.AdminApiPath.CORE_STATE_ADMIN_PATH).build();
HttpRestResult<String> httpRestResult = clientHttpProxy.executeSyncHttpRequest(httpRequest);
Result<Map<String, String>> result = JacksonUtils.toObj(httpRestResult.getData(),
new TypeReference<Result<Map<String, String>>>() {
});
return result.getData();
}
@Override
public Boolean liveness() throws NacosException {
HttpRequest httpRequest = new HttpRequest.Builder().setHttpMethod(HttpMethod.GET)
.setPath(Constants.AdminApiPath.CORE_STATE_ADMIN_PATH + "/liveness").build();
HttpRestResult<String> httpRestResult = clientHttpProxy.executeSyncHttpRequest(httpRequest);
return httpRestResult.ok();
}
@Override
public Boolean readiness() throws NacosException {
HttpRequest httpRequest = new HttpRequest.Builder().setHttpMethod(HttpMethod.GET)
.setPath(Constants.AdminApiPath.CORE_OPS_ADMIN_PATH + "/readiness").build();
HttpRestResult<String> httpRestResult = clientHttpProxy.executeSyncHttpRequest(httpRequest);
return httpRestResult.ok();
}
@Override
public String raftOps(String command, String value, String groupId) throws NacosException {
Map<String, String> params = new HashMap<>(8);
params.put("command", command);
params.put("value", value);
params.put("groupId", groupId);
HttpRequest httpRequest = new HttpRequest.Builder().setHttpMethod(HttpMethod.POST)
.setPath(Constants.AdminApiPath.CORE_OPS_ADMIN_PATH + "/raft").setParamValue(params).build();
HttpRestResult<String> httpRestResult = clientHttpProxy.executeSyncHttpRequest(httpRequest);
Result<String> result = JacksonUtils.toObj(httpRestResult.getData(), new TypeReference<Result<String>>() {
});
return result.getData();
}
@Override
public List<IdGeneratorInfo> getIdGenerators() throws NacosException {
HttpRequest httpRequest = new HttpRequest.Builder().setHttpMethod(HttpMethod.GET)
.setPath(Constants.AdminApiPath.CORE_OPS_ADMIN_PATH + "/ids").build();
HttpRestResult<String> httpRestResult = clientHttpProxy.executeSyncHttpRequest(httpRequest);
Result<List<IdGeneratorInfo>> result = JacksonUtils.toObj(httpRestResult.getData(),
new TypeReference<Result<List<IdGeneratorInfo>>>() {
});
return result.getData();
}
@Override
public void updateLogLevel(String logName, String logLevel) throws NacosException {
Map<String, String> params = new HashMap<>(8);
params.put("logName", logName);
params.put("logLevel", logLevel);
HttpRequest httpRequest = new HttpRequest.Builder().setHttpMethod(HttpMethod.PUT)
.setPath(Constants.AdminApiPath.CORE_OPS_ADMIN_PATH + "/log").setParamValue(params).build();
clientHttpProxy.executeSyncHttpRequest(httpRequest);
}
@Override
public Collection<NacosMember> listClusterNodes(String address, String state) throws NacosException {
Map<String, String> params = new HashMap<>(8);
params.put("address", address);
params.put("state", state);
HttpRequest httpRequest = new HttpRequest.Builder().setHttpMethod(HttpMethod.GET)
.setPath(Constants.AdminApiPath.CORE_CLUSTER_ADMIN_PATH + "/node/list").setParamValue(params).build();
HttpRestResult<String> httpRestResult = clientHttpProxy.executeSyncHttpRequest(httpRequest);
Result<Collection<NacosMember>> result = JacksonUtils.toObj(httpRestResult.getData(),
new TypeReference<Result<Collection<NacosMember>>>() {
});
return result.getData();
}
@Override
public Boolean updateLookupMode(String type) throws NacosException {
Map<String, String> params = new HashMap<>(8);
params.put("type", type);
HttpRequest httpRequest = new HttpRequest.Builder().setHttpMethod(HttpMethod.PUT)
.setPath(Constants.AdminApiPath.CORE_CLUSTER_ADMIN_PATH + "/lookup").setParamValue(params).build();
HttpRestResult<String> httpRestResult = clientHttpProxy.executeSyncHttpRequest(httpRequest);
Result<Boolean> result = JacksonUtils.toObj(httpRestResult.getData(), new TypeReference<Result<Boolean>>() {
});
return result.getData();
}
@Override
public Map<String, ConnectionInfo> getCurrentClients() throws NacosException {
HttpRequest httpRequest = new HttpRequest.Builder().setHttpMethod(HttpMethod.GET)
.setPath(Constants.AdminApiPath.CORE_LOADER_ADMIN_PATH + "/current").build();
HttpRestResult<String> httpRestResult = clientHttpProxy.executeSyncHttpRequest(httpRequest);
Result<Map<String, ConnectionInfo>> result = JacksonUtils.toObj(httpRestResult.getData(),
new TypeReference<Result<Map<String, ConnectionInfo>>>() {
});
return result.getData();
}
@Override
public String reloadConnectionCount(Integer count, String redirectAddress) throws NacosException {
Map<String, String> params = new HashMap<>(8);
params.put("count", String.valueOf(count));
params.put("redirectAddress", redirectAddress);
HttpRequest httpRequest = new HttpRequest.Builder().setHttpMethod(HttpMethod.POST)
.setPath(Constants.AdminApiPath.CORE_LOADER_ADMIN_PATH + "/reloadCurrent").setParamValue(params)
.build();
HttpRestResult<String> httpRestResult = clientHttpProxy.executeSyncHttpRequest(httpRequest);
Result<String> result = JacksonUtils.toObj(httpRestResult.getData(), new TypeReference<Result<String>>() {
});
return result.getData();
}
@Override
public String smartReloadCluster(String loaderFactorStr) throws NacosException {
Map<String, String> params = new HashMap<>(8);
params.put("loaderFactorStr", loaderFactorStr);
HttpRequest httpRequest = new HttpRequest.Builder().setHttpMethod(HttpMethod.POST)
.setPath(Constants.AdminApiPath.CORE_LOADER_ADMIN_PATH + "/smartReloadCluster").setParamValue(params)
.build();
HttpRestResult<String> httpRestResult = clientHttpProxy.executeSyncHttpRequest(httpRequest);
Result<String> result = JacksonUtils.toObj(httpRestResult.getData(), new TypeReference<Result<String>>() {
});
return result.getData();
}
@Override
public String reloadSingleClient(String connectionId, String redirectAddress) throws NacosException {
Map<String, String> params = new HashMap<>(8);
params.put("connectionId", connectionId);
params.put("redirectAddress", redirectAddress);
HttpRequest httpRequest = new HttpRequest.Builder().setHttpMethod(HttpMethod.POST)
.setPath(Constants.AdminApiPath.CORE_LOADER_ADMIN_PATH + "/reloadClient").setParamValue(params).build();
HttpRestResult<String> httpRestResult = clientHttpProxy.executeSyncHttpRequest(httpRequest);
Result<String> result = JacksonUtils.toObj(httpRestResult.getData(), new TypeReference<Result<String>>() {
});
return result.getData();
}
@Override
public ServerLoaderMetrics getClusterLoaderMetrics() throws NacosException {
HttpRequest httpRequest = new HttpRequest.Builder().setHttpMethod(HttpMethod.GET)
.setPath(Constants.AdminApiPath.CORE_LOADER_ADMIN_PATH + "/cluster").build();
HttpRestResult<String> httpRestResult = clientHttpProxy.executeSyncHttpRequest(httpRequest);
Result<ServerLoaderMetrics> result = JacksonUtils.toObj(httpRestResult.getData(),
new TypeReference<Result<ServerLoaderMetrics>>() {
});
return result.getData();
}
@Override
public List<Namespace> getNamespaceList() throws NacosException {
HttpRequest httpRequest = new HttpRequest.Builder().setHttpMethod(HttpMethod.GET)
.setPath(Constants.AdminApiPath.CORE_NAMESPACE_ADMIN_PATH + "/list").build();
HttpRestResult<String> httpRestResult = clientHttpProxy.executeSyncHttpRequest(httpRequest);
Result<List<Namespace>> result = JacksonUtils.toObj(httpRestResult.getData(),
new TypeReference<Result<List<Namespace>>>() {
});
return result.getData();
}
@Override
public Namespace getNamespace(String namespaceId) throws NacosException {
Map<String, String> params = new HashMap<>(8);
params.put("namespaceId", namespaceId);
HttpRequest httpRequest = new HttpRequest.Builder().setHttpMethod(HttpMethod.GET)
.setPath(Constants.AdminApiPath.CORE_NAMESPACE_ADMIN_PATH).setParamValue(params).build();
HttpRestResult<String> httpRestResult = clientHttpProxy.executeSyncHttpRequest(httpRequest);
Result<Namespace> result = JacksonUtils.toObj(httpRestResult.getData(), new TypeReference<Result<Namespace>>() {
});
return result.getData();
}
@Override
public Boolean createNamespace(String namespaceId, String namespaceName, String namespaceDesc)
throws NacosException {
Map<String, String> params = new HashMap<>(8);
params.put("namespaceId", namespaceId);
params.put("namespaceName", namespaceName);
params.put("namespaceDesc", namespaceDesc);
HttpRequest httpRequest = new HttpRequest.Builder().setHttpMethod(HttpMethod.POST)
.setPath(Constants.AdminApiPath.CORE_NAMESPACE_ADMIN_PATH).setParamValue(params).build();
HttpRestResult<String> httpRestResult = clientHttpProxy.executeSyncHttpRequest(httpRequest);
Result<Boolean> result = JacksonUtils.toObj(httpRestResult.getData(), new TypeReference<Result<Boolean>>() {
});
return result.getData();
}
@Override
public Boolean updateNamespace(String namespaceId, String namespaceName, String namespaceDesc)
throws NacosException {
Map<String, String> params = new HashMap<>(8);
params.put("namespaceId", namespaceId);
params.put("namespaceName", namespaceName);
params.put("namespaceDesc", namespaceDesc);
HttpRequest httpRequest = new HttpRequest.Builder().setHttpMethod(HttpMethod.PUT)
.setPath(Constants.AdminApiPath.CORE_NAMESPACE_ADMIN_PATH).setParamValue(params).build();
HttpRestResult<String> httpRestResult = clientHttpProxy.executeSyncHttpRequest(httpRequest);
Result<Boolean> result = JacksonUtils.toObj(httpRestResult.getData(), new TypeReference<Result<Boolean>>() {
});
return result.getData();
}
@Override
public Boolean deleteNamespace(String namespaceId) throws NacosException {
Map<String, String> params = new HashMap<>(8);
params.put("namespaceId", namespaceId);
HttpRequest httpRequest = new HttpRequest.Builder().setHttpMethod(HttpMethod.DELETE)
.setPath(Constants.AdminApiPath.CORE_NAMESPACE_ADMIN_PATH).setParamValue(params).build();
HttpRestResult<String> httpRestResult = clientHttpProxy.executeSyncHttpRequest(httpRequest);
Result<Boolean> result = JacksonUtils.toObj(httpRestResult.getData(), new TypeReference<Result<Boolean>>() {
});
return result.getData();
}
@Override
public Boolean checkNamespaceIdExist(String namespaceId) throws NacosException {
Map<String, String> params = new HashMap<>(8);
params.put("namespaceId", namespaceId);
HttpRequest httpRequest = new HttpRequest.Builder().setHttpMethod(HttpMethod.GET)
.setPath(Constants.AdminApiPath.CORE_NAMESPACE_ADMIN_PATH + "/check").setParamValue(params).build();
HttpRestResult<String> httpRestResult = clientHttpProxy.executeSyncHttpRequest(httpRequest);
Result<Integer> result = JacksonUtils.toObj(httpRestResult.getData(), new TypeReference<Result<Integer>>() {
});
return result.getData() > 0;
}
protected ClientHttpProxy getClientHttpProxy() {
return this.clientHttpProxy;
}
@Override
public void shutdown() throws NacosException {
clientHttpProxy.shutdown();
}
}
| AbstractCoreMaintainerService |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng0836PluginParentResolutionTest.java | {
"start": 1041,
"end": 2515
} | class ____ extends AbstractMavenIntegrationTestCase {
/**
* Test that parent POMs referenced by a plugin POM can be resolved from ordinary repos, i.e. non-plugin repos.
* As a motivation for this, imagine the plugin repository hosts only snapshots while the ordinary repository
* hosts releases and a snapshot plugin might easily use a released parent.
*
* @throws Exception in case of failure
*/
@Test
public void testitMNG836() throws Exception {
File testDir = extractResources("/mng-0836");
Verifier verifier = newVerifier(testDir.getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
verifier.deleteArtifacts("org.apache.maven.its.mng836");
verifier.filterFile("settings-template.xml", "settings.xml");
verifier.addCliArgument("--settings");
verifier.addCliArgument("settings.xml");
// Maven 3.x aims to separate plugins and project dependencies (MNG-4191)
// Inline version check: (,3.0-alpha-1),(3.0-alpha-1,3.0-alpha-7) - current Maven version doesn't match
try {
verifier.addCliArgument("validate");
verifier.execute();
verifier.verifyErrorFreeLog();
fail("Plugin parent POM was erroneously resolved from non-plugin repository.");
} catch (VerificationException e) {
// expected
}
}
}
| MavenITmng0836PluginParentResolutionTest |
java | elastic__elasticsearch | x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/parser/EqlBaseVisitor.java | {
"start": 363,
"end": 11487
} | interface ____<T> extends ParseTreeVisitor<T> {
/**
* Visit a parse tree produced by {@link EqlBaseParser#singleStatement}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitSingleStatement(EqlBaseParser.SingleStatementContext ctx);
/**
* Visit a parse tree produced by {@link EqlBaseParser#singleExpression}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitSingleExpression(EqlBaseParser.SingleExpressionContext ctx);
/**
* Visit a parse tree produced by {@link EqlBaseParser#statement}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitStatement(EqlBaseParser.StatementContext ctx);
/**
* Visit a parse tree produced by {@link EqlBaseParser#query}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitQuery(EqlBaseParser.QueryContext ctx);
/**
* Visit a parse tree produced by {@link EqlBaseParser#sequenceParams}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitSequenceParams(EqlBaseParser.SequenceParamsContext ctx);
/**
* Visit a parse tree produced by {@link EqlBaseParser#sequence}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitSequence(EqlBaseParser.SequenceContext ctx);
/**
* Visit a parse tree produced by {@link EqlBaseParser#sample}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitSample(EqlBaseParser.SampleContext ctx);
/**
* Visit a parse tree produced by {@link EqlBaseParser#join}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitJoin(EqlBaseParser.JoinContext ctx);
/**
* Visit a parse tree produced by {@link EqlBaseParser#pipe}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitPipe(EqlBaseParser.PipeContext ctx);
/**
* Visit a parse tree produced by {@link EqlBaseParser#joinKeys}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitJoinKeys(EqlBaseParser.JoinKeysContext ctx);
/**
* Visit a parse tree produced by {@link EqlBaseParser#joinTerm}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitJoinTerm(EqlBaseParser.JoinTermContext ctx);
/**
* Visit a parse tree produced by {@link EqlBaseParser#sequenceTerm}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitSequenceTerm(EqlBaseParser.SequenceTermContext ctx);
/**
* Visit a parse tree produced by {@link EqlBaseParser#subquery}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitSubquery(EqlBaseParser.SubqueryContext ctx);
/**
* Visit a parse tree produced by {@link EqlBaseParser#eventQuery}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitEventQuery(EqlBaseParser.EventQueryContext ctx);
/**
* Visit a parse tree produced by {@link EqlBaseParser#eventFilter}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitEventFilter(EqlBaseParser.EventFilterContext ctx);
/**
* Visit a parse tree produced by {@link EqlBaseParser#expression}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitExpression(EqlBaseParser.ExpressionContext ctx);
/**
* Visit a parse tree produced by the {@code logicalNot}
* labeled alternative in {@link EqlBaseParser#booleanExpression}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitLogicalNot(EqlBaseParser.LogicalNotContext ctx);
/**
* Visit a parse tree produced by the {@code booleanDefault}
* labeled alternative in {@link EqlBaseParser#booleanExpression}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitBooleanDefault(EqlBaseParser.BooleanDefaultContext ctx);
/**
* Visit a parse tree produced by the {@code processCheck}
* labeled alternative in {@link EqlBaseParser#booleanExpression}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitProcessCheck(EqlBaseParser.ProcessCheckContext ctx);
/**
* Visit a parse tree produced by the {@code logicalBinary}
* labeled alternative in {@link EqlBaseParser#booleanExpression}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitLogicalBinary(EqlBaseParser.LogicalBinaryContext ctx);
/**
* Visit a parse tree produced by the {@code valueExpressionDefault}
* labeled alternative in {@link EqlBaseParser#valueExpression}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitValueExpressionDefault(EqlBaseParser.ValueExpressionDefaultContext ctx);
/**
* Visit a parse tree produced by the {@code comparison}
* labeled alternative in {@link EqlBaseParser#valueExpression}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitComparison(EqlBaseParser.ComparisonContext ctx);
/**
* Visit a parse tree produced by the {@code operatorExpressionDefault}
* labeled alternative in {@link EqlBaseParser#operatorExpression}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitOperatorExpressionDefault(EqlBaseParser.OperatorExpressionDefaultContext ctx);
/**
* Visit a parse tree produced by the {@code arithmeticBinary}
* labeled alternative in {@link EqlBaseParser#operatorExpression}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitArithmeticBinary(EqlBaseParser.ArithmeticBinaryContext ctx);
/**
* Visit a parse tree produced by the {@code arithmeticUnary}
* labeled alternative in {@link EqlBaseParser#operatorExpression}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitArithmeticUnary(EqlBaseParser.ArithmeticUnaryContext ctx);
/**
* Visit a parse tree produced by {@link EqlBaseParser#predicate}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitPredicate(EqlBaseParser.PredicateContext ctx);
/**
* Visit a parse tree produced by the {@code constantDefault}
* labeled alternative in {@link EqlBaseParser#primaryExpression}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitConstantDefault(EqlBaseParser.ConstantDefaultContext ctx);
/**
* Visit a parse tree produced by the {@code function}
* labeled alternative in {@link EqlBaseParser#primaryExpression}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitFunction(EqlBaseParser.FunctionContext ctx);
/**
* Visit a parse tree produced by the {@code dereference}
* labeled alternative in {@link EqlBaseParser#primaryExpression}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitDereference(EqlBaseParser.DereferenceContext ctx);
/**
* Visit a parse tree produced by the {@code parenthesizedExpression}
* labeled alternative in {@link EqlBaseParser#primaryExpression}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitParenthesizedExpression(EqlBaseParser.ParenthesizedExpressionContext ctx);
/**
* Visit a parse tree produced by {@link EqlBaseParser#functionExpression}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitFunctionExpression(EqlBaseParser.FunctionExpressionContext ctx);
/**
* Visit a parse tree produced by {@link EqlBaseParser#functionName}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitFunctionName(EqlBaseParser.FunctionNameContext ctx);
/**
* Visit a parse tree produced by the {@code nullLiteral}
* labeled alternative in {@link EqlBaseParser#constant}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitNullLiteral(EqlBaseParser.NullLiteralContext ctx);
/**
* Visit a parse tree produced by the {@code numericLiteral}
* labeled alternative in {@link EqlBaseParser#constant}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitNumericLiteral(EqlBaseParser.NumericLiteralContext ctx);
/**
* Visit a parse tree produced by the {@code booleanLiteral}
* labeled alternative in {@link EqlBaseParser#constant}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitBooleanLiteral(EqlBaseParser.BooleanLiteralContext ctx);
/**
* Visit a parse tree produced by the {@code stringLiteral}
* labeled alternative in {@link EqlBaseParser#constant}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitStringLiteral(EqlBaseParser.StringLiteralContext ctx);
/**
* Visit a parse tree produced by {@link EqlBaseParser#comparisonOperator}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitComparisonOperator(EqlBaseParser.ComparisonOperatorContext ctx);
/**
* Visit a parse tree produced by {@link EqlBaseParser#booleanValue}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitBooleanValue(EqlBaseParser.BooleanValueContext ctx);
/**
* Visit a parse tree produced by {@link EqlBaseParser#qualifiedName}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitQualifiedName(EqlBaseParser.QualifiedNameContext ctx);
/**
* Visit a parse tree produced by {@link EqlBaseParser#identifier}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitIdentifier(EqlBaseParser.IdentifierContext ctx);
/**
* Visit a parse tree produced by {@link EqlBaseParser#timeUnit}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitTimeUnit(EqlBaseParser.TimeUnitContext ctx);
/**
* Visit a parse tree produced by the {@code decimalLiteral}
* labeled alternative in {@link EqlBaseParser#number}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitDecimalLiteral(EqlBaseParser.DecimalLiteralContext ctx);
/**
* Visit a parse tree produced by the {@code integerLiteral}
* labeled alternative in {@link EqlBaseParser#number}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitIntegerLiteral(EqlBaseParser.IntegerLiteralContext ctx);
/**
* Visit a parse tree produced by {@link EqlBaseParser#string}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitString(EqlBaseParser.StringContext ctx);
/**
* Visit a parse tree produced by {@link EqlBaseParser#eventValue}.
* @param ctx the parse tree
* @return the visitor result
*/
T visitEventValue(EqlBaseParser.EventValueContext ctx);
}
| EqlBaseVisitor |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/builders/NamespaceHttpTests.java | {
"start": 19243,
"end": 19674
} | class ____ {
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.authorizeHttpRequests((requests) -> requests
.anyRequest().anonymous())
.sessionManagement((management) -> management
.sessionCreationPolicy(SessionCreationPolicy.NEVER));
return http.build();
// @formatter:on
}
}
@Configuration
@EnableWebSecurity
static | CreateSessionNeverConfig |
java | elastic__elasticsearch | x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/BinaryDvConfirmedQuery.java | {
"start": 13407,
"end": 14981
} | interface ____ {
Automaton getAutomaton(String field);
}
private record PatternAutomatonProvider(String matchPattern, boolean caseInsensitive) implements AutomatonProvider {
@Override
public Automaton getAutomaton(String field) {
return caseInsensitive
? AutomatonQueries.toCaseInsensitiveWildcardAutomaton(new Term(field, matchPattern))
: WildcardQuery.toAutomaton(new Term(field, matchPattern), Operations.DEFAULT_DETERMINIZE_WORK_LIMIT);
}
}
private record RegexAutomatonProvider(String value, int syntaxFlags, int matchFlags, int maxDeterminizedStates)
implements
AutomatonProvider {
@Override
public Automaton getAutomaton(String field) {
RegExp regex = new RegExp(value, syntaxFlags, matchFlags);
return Operations.determinize(regex.toAutomaton(), maxDeterminizedStates);
}
}
private record RangeAutomatonProvider(BytesRef lower, BytesRef upper, boolean includeLower, boolean includeUpper)
implements
AutomatonProvider {
@Override
public Automaton getAutomaton(String field) {
return TermRangeQuery.toAutomaton(lower, upper, includeLower, includeUpper);
}
}
private record FuzzyQueryAutomatonProvider(String searchTerm, FuzzyQuery fuzzyQuery) implements AutomatonProvider {
@Override
public Automaton getAutomaton(String field) {
return fuzzyQuery.getAutomata().automaton;
}
}
}
| AutomatonProvider |
java | apache__camel | components/camel-kafka/src/test/java/org/apache/camel/component/kafka/integration/batching/KafkaBatchingProcessingAutoCommitIT.java | {
"start": 1227,
"end": 3179
} | class ____ extends BatchingProcessingITSupport {
private static final Logger LOG = LoggerFactory.getLogger(KafkaBatchingProcessingManualCommitIT.class);
public static final String TOPIC = "testBatchingProcessingAutoCommit";
private volatile boolean invalidExchangeFormat = false;
@AfterEach
public void after() {
cleanupKafka(TOPIC);
}
@Override
protected RouteBuilder createRouteBuilder() {
// allowManualCommit=true&autoOffsetReset=earliest
String from = "kafka:" + TOPIC
+ "?groupId=KafkaBatchingProcessingIT&pollTimeoutMs=1000&batching=true&maxPollRecords=10&autoOffsetReset=earliest";
return new RouteBuilder() {
@Override
public void configure() {
from(from).routeId("batching").process(e -> {
// The received records are stored as exchanges in a list. This gets the list of those exchanges
final List<?> exchanges = e.getMessage().getBody(List.class);
// Ensure we are actually receiving what we are asking for
if (exchanges == null || exchanges.isEmpty()) {
return;
}
// The records from the batch are stored in a list of exchanges in the original exchange.
for (Object o : exchanges) {
if (o instanceof Exchange exchange) {
LOG.info("Processing exchange with body {}", exchange.getMessage().getBody(String.class));
}
}
}).to(KafkaTestUtil.MOCK_RESULT);
}
};
}
@Test
public void kafkaAutoCommit() throws Exception {
kafkaManualCommitTest(TOPIC);
Assertions.assertFalse(invalidExchangeFormat, "The exchange list should be composed of exchanges");
}
}
| KafkaBatchingProcessingAutoCommitIT |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/runtime/src/main/java/org/jboss/resteasy/reactive/server/core/request/ServerDrivenNegotiation.java | {
"start": 374,
"end": 15232
} | class ____ {
private Map<MediaType, QualityValue> requestedMediaTypes = null;
private Map<String, QualityValue> requestedCharacterSets = null;
private Map<String, QualityValue> requestedEncodings = null;
private Map<Locale, QualityValue> requestedLanguages = null;
private int mediaRadix = 1;
public ServerDrivenNegotiation() {
}
public void setAcceptHeaders(List<String> headerValues) {
requestedMediaTypes = null;
if (headerValues == null)
return;
Map<MediaType, QualityValue> requested = null;
for (String headerValue : headerValues) {
Map<MediaType, QualityValue> mapping = AcceptHeaders.getMediaTypeQualityValues(headerValue);
if (mapping == null)
return;
if (requested == null)
requested = mapping;
else
requested.putAll(mapping);
}
requestedMediaTypes = requested;
for (Iterator<MediaType> it = requested.keySet().iterator(); it.hasNext();) {
mediaRadix = Math.max(mediaRadix, it.next().getParameters().size());
}
}
public void setAcceptCharsetHeaders(List<String> headerValues) {
requestedCharacterSets = null;
if (headerValues == null)
return;
Map<String, QualityValue> requested = null;
for (String headerValue : headerValues) {
Map<String, QualityValue> mapping = AcceptHeaders.getStringQualityValues(headerValue);
if (mapping == null)
return;
if (requested == null)
requested = mapping;
else
requested.putAll(mapping);
}
requestedCharacterSets = requested;
}
public void setAcceptEncodingHeaders(List<String> headerValues) {
requestedEncodings = null;
if (headerValues == null)
return;
Map<String, QualityValue> requested = null;
for (String headerValue : headerValues) {
Map<String, QualityValue> mapping = AcceptHeaders.getStringQualityValues(headerValue);
if (mapping == null)
return;
if (requested == null)
requested = mapping;
else
requested.putAll(mapping);
}
requestedEncodings = requested;
}
public void setAcceptLanguageHeaders(List<String> headerValues) {
requestedLanguages = null;
if (headerValues == null)
return;
Map<Locale, QualityValue> requested = null;
for (String headerValue : headerValues) {
Map<Locale, QualityValue> mapping = AcceptHeaders.getLocaleQualityValues(headerValue);
if (mapping == null)
return;
if (requested == null)
requested = mapping;
else
requested.putAll(mapping);
}
requestedLanguages = requested;
}
public Variant getBestMatch(List<Variant> available) {
// BigDecimal bestQuality = BigDecimal.ZERO;
VariantQuality bestQuality = null;
Variant bestOption = null;
for (Variant option : available) {
VariantQuality quality = new VariantQuality();
if (!applyMediaType(option, quality))
continue;
if (!applyCharacterSet(option, quality))
continue;
if (!applyEncoding(option, quality))
continue;
if (!applyLanguage(option, quality))
continue;
// BigDecimal optionQuality = quality.getOverallQuality();
// if (isBetterOption(bestQuality, bestOption, optionQuality, option))
if (isBetterOption(bestQuality, bestOption, quality, option)) {
// bestQuality = optionQuality;
bestQuality = quality;
bestOption = option;
}
}
return bestOption;
}
/**
* Tests whether {@code option} is preferable over the current {@code bestOption}.
*/
// private static boolean isBetterOption(BigDecimal bestQuality, Variant best,
// BigDecimal optionQuality, Variant option)
private static boolean isBetterOption(VariantQuality bestQuality, Variant best,
VariantQuality optionQuality, Variant option) {
if (best == null)
return true;
// Compare overall quality.
int signum = bestQuality.getOverallQuality().compareTo(optionQuality.getOverallQuality());
if (signum != 0)
return signum < 0;
// Overall quality is the same.
// Assuming the request has an Accept header, a VariantQuality has a non-null
// requestMediaType if and only if it the corresponding Variant has a non-null mediaType.
// If bestQuality and optionQuality both have a non-null requestMediaType, we compare them
// for specificity.
MediaType bestRequestMediaType = bestQuality.getRequestMediaType();
MediaType optionRequestMediaType = optionQuality.getRequestMediaType();
if (bestRequestMediaType != null && optionRequestMediaType != null) {
if (bestRequestMediaType.getType().equals(optionRequestMediaType.getType())) {
if (bestRequestMediaType.getSubtype().equals(optionRequestMediaType.getSubtype())) {
int bestCount = bestRequestMediaType.getParameters().size();
int optionCount = optionRequestMediaType.getParameters().size();
if (optionCount > bestCount) {
return true; // more matching parameters
} else if (optionCount < bestCount) {
return false; // less matching parameters
}
} else if (bestRequestMediaType.getSubtype().equals("*")) {
return true;
} else if (optionRequestMediaType.getSubtype().equals("*")) {
return false;
}
} else if (bestRequestMediaType.getType().equals("*")) {
return true;
} else if (optionRequestMediaType.getType().equals("*")) {
return false;
}
}
// Compare variant media types for specificity.
MediaType bestType = best.getMediaType();
MediaType optionType = option.getMediaType();
if (bestType != null && optionType != null) {
if (bestType.getType().equals(optionType.getType())) {
// Same type
if (bestType.getSubtype().equals(optionType.getSubtype())) {
// Same subtype
int bestCount = bestType.getParameters().size();
int optionCount = optionType.getParameters().size();
if (optionCount > bestCount)
return true; // more matching parameters
else if (optionCount < bestCount)
return false; // less matching parameters
} else if ("*".equals(bestType.getSubtype())) {
return true; // more specific subtype
} else if ("*".equals(optionType.getSubtype())) {
return false; // less specific subtype
}
} else if ("*".equals(bestType.getType())) {
return true; // more specific type
} else if ("*".equals(optionType.getType())) {
return false; // less specific type;
}
}
// Finally, compare specificity of the variants.
return getExplicitness(best) < getExplicitness(option);
}
private static int getExplicitness(Variant variant) {
int explicitness = 0;
if (variant.getMediaType() != null) {
++explicitness;
}
if (variant.getEncoding() != null) {
++explicitness;
}
if (variant.getLanguage() != null) {
++explicitness;
}
return explicitness;
}
private boolean applyMediaType(Variant option, VariantQuality quality) {
if (requestedMediaTypes == null)
return true;
MediaType mediaType = option.getMediaType();
if (mediaType == null)
return true;
String type = mediaType.getType();
if ("*".equals(type)) {
type = null;
}
String subtype = mediaType.getSubtype();
if ("*".equals(subtype)) {
subtype = null;
}
Map<String, String> parameters = mediaType.getParameters();
if (parameters.isEmpty())
parameters = null;
QualityValue bestQuality = QualityValue.NOT_ACCEPTABLE;
int bestMatchCount = -1;
MediaType bestRequestMediaType = null;
for (MediaType requested : requestedMediaTypes.keySet()) {
int matchCount = 0;
if (type != null) {
String requestedType = requested.getType();
if (requestedType.equals(type)) {
matchCount += mediaRadix * 100;
} else if (!"*".equals(requestedType)) {
continue;
}
}
if (subtype != null) {
String requestedSubtype = requested.getSubtype();
if (requestedSubtype.equals(subtype)) {
matchCount += mediaRadix * 10;
} else if (!"*".equals(requestedSubtype)) {
continue;
}
}
Map<String, String> requestedParameters = requested.getParameters();
if (requestedParameters != null && requestedParameters.size() > 0) {
if (!hasRequiredParameters(requestedParameters, parameters))
continue;
matchCount += requestedParameters.size();
}
if (matchCount > bestMatchCount) {
bestMatchCount = matchCount;
bestQuality = requestedMediaTypes.get(requested);
bestRequestMediaType = requested;
} else if (matchCount == bestMatchCount) {
QualityValue qualityValue = requestedMediaTypes.get(requested);
if (bestQuality.compareTo(qualityValue) < 0) {
bestQuality = qualityValue;
bestRequestMediaType = requested;
}
}
}
if (!bestQuality.isAcceptable()) {
return false;
}
quality.setMediaTypeQualityValue(bestQuality)
.setRequestMediaType(bestRequestMediaType);
return true;
}
private boolean hasRequiredParameters(Map<String, String> required, Map<String, String> available) {
if (available == null) {
return false;
}
for (Map.Entry<String, String> requiredEntry : required.entrySet()) {
String name = requiredEntry.getKey();
String value = requiredEntry.getValue();
String availableValue = available.get(name);
if (availableValue == null && "charset".equals(name)) {
if (requestedCharacterSets != null
&& !requestedCharacterSets.containsKey(null)
&& !requestedCharacterSets.containsKey(value)) {
return false;
}
} else if (!value.equals(availableValue)) {
return false;
}
}
return true;
}
private boolean applyCharacterSet(Variant option, VariantQuality quality) {
if (requestedCharacterSets == null)
return true;
MediaType mediaType = option.getMediaType();
if (mediaType == null) {
return true;
}
String charsetParameter = mediaType.getParameters().get("charset");
if (charsetParameter == null) {
return true;
}
QualityValue value = requestedCharacterSets.get(charsetParameter);
if (value == null) { // try wildcard
value = requestedCharacterSets.get(null);
}
if (value == null) { // no match
return false;
}
if (!value.isAcceptable()) {
return false;
}
quality.setCharacterSetQualityValue(value);
return true;
}
private boolean applyEncoding(Variant option, VariantQuality quality) {
if (requestedEncodings == null)
return true;
String encoding = option.getEncoding();
if (encoding == null)
return true;
QualityValue value = requestedEncodings.get(encoding);
if (value == null) { // try wildcard
value = requestedEncodings.get(null);
}
if (value == null) { // no match
return false;
}
if (!value.isAcceptable()) {
return false;
}
quality.setEncodingQualityValue(value);
return true;
}
private boolean hasCountry(Locale locale) {
return locale.getCountry() != null && !"".equals(locale.getCountry().trim());
}
private boolean applyLanguage(Variant option, VariantQuality quality) {
if (requestedLanguages == null) {
return true;
}
Locale language = option.getLanguage();
if (language == null) {
return true;
}
QualityValue value = null;
for (Map.Entry<Locale, QualityValue> entry : requestedLanguages.entrySet()) {
Locale locale = entry.getKey();
QualityValue qualityValue = entry.getValue();
if (locale == null) {
continue;
}
if (locale.getLanguage().equalsIgnoreCase(language.getLanguage())) {
if (hasCountry(locale) && hasCountry(language)) {
if (locale.getCountry().equalsIgnoreCase(language.getCountry())) {
value = qualityValue;
break;
} else {
continue;
}
} else if (hasCountry(locale) == hasCountry(language)) {
value = qualityValue;
break;
} else {
value = qualityValue; // might be a better match so re-loop
}
}
}
if (value == null) {// try wildcard
value = requestedLanguages.get(null);
}
if (value == null) {// no match
return false;
}
if (!value.isAcceptable()) {
return false;
}
quality.setLanguageQualityValue(value);
return true;
}
}
| ServerDrivenNegotiation |
java | netty__netty | codec-classes-quic/src/main/java/io/netty/handler/codec/quic/QuicClientCodecBuilder.java | {
"start": 989,
"end": 1855
} | class ____ extends QuicCodecBuilder<QuicClientCodecBuilder> {
/**
* Creates a new instance.
*/
public QuicClientCodecBuilder() {
super(false);
}
private QuicClientCodecBuilder(QuicCodecBuilder<QuicClientCodecBuilder> builder) {
super(builder);
}
@Override
public QuicClientCodecBuilder clone() {
return new QuicClientCodecBuilder(this);
}
@Override
ChannelHandler build(QuicheConfig config,
Function<QuicChannel, ? extends QuicSslEngine> sslEngineProvider,
Executor sslTaskExecutor,
int localConnIdLength, FlushStrategy flushStrategy) {
return new QuicheQuicClientCodec(config, sslEngineProvider, sslTaskExecutor, localConnIdLength, flushStrategy);
}
}
| QuicClientCodecBuilder |
java | netty__netty | pkitesting/src/main/java/io/netty/pkitesting/RevocationServer.java | {
"start": 1738,
"end": 7664
} | class ____ {
private static volatile RevocationServer instance;
private final HttpServer crlServer;
private final String crlBaseAddress;
private final AtomicInteger issuerCounter;
private final ConcurrentMap<X509Certificate, CrlInfo> issuers;
private final ConcurrentMap<String, CrlInfo> paths;
/**
* Get the shared revocation server instance.
* This will start the server, if it isn't already running, and bind it to a random port on the loopback address.
* @return The revocation server instance.
* @throws Exception If the server failed to start.
*/
public static RevocationServer getInstance() throws Exception {
if (instance != null) {
return instance;
}
synchronized (RevocationServer.class) {
RevocationServer server = instance;
if (server == null) {
server = new RevocationServer();
server.start();
instance = server;
}
return server;
}
}
private RevocationServer() throws Exception {
// Use the JDK built-in HttpServer to avoid any circular dependencies with Netty itself.
crlServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0);
crlBaseAddress = "http://localhost:" + crlServer.getAddress().getPort();
issuerCounter = new AtomicInteger();
issuers = new ConcurrentHashMap<>();
paths = new ConcurrentHashMap<>();
crlServer.createContext("/", exchange -> {
if ("GET".equals(exchange.getRequestMethod())) {
String path = exchange.getRequestURI().getPath();
CrlInfo info = paths.get(path);
if (info == null) {
exchange.sendResponseHeaders(404, 0);
exchange.close();
return;
}
byte[] crl = generateCrl(info);
exchange.getResponseHeaders().put("Content-Type", Collections.singletonList("application/pkix-crl"));
exchange.sendResponseHeaders(200, crl.length);
try (OutputStream out = exchange.getResponseBody()) {
out.write(crl);
out.flush();
}
} else {
exchange.sendResponseHeaders(405, 0);
}
exchange.close();
});
}
private void start() {
if (Thread.currentThread().isDaemon()) {
crlServer.start();
} else {
// It's important the CRL server creates a daemon thread,
// because it's a singleton and won't be stopped except by terminating the JVM.
// Threads in the ForkJoin common pool are always daemon, and JUnit 5 initializes
// it anyway, so we can let it call start() for us.
ForkJoinPool.commonPool().execute(crlServer::start);
}
}
/**
* Register an issuer with the revocation server.
* This must be done before CRLs can be served for that issuer, and before any of its certificates can be revoked.
* @param issuer The issuer to register.
*/
public void register(X509Bundle issuer) {
register(issuer, null);
}
/**
* Register an issuer with the revocation server.
* This must be done before CRLs can be served for that issuer, and before any of its certificates can be revoked.
* @param issuer The issuer to register.
* @param provider The {@code Provider} to use (or {@code null} to fallback to default)
*/
public void register(X509Bundle issuer, Provider provider) {
issuers.computeIfAbsent(issuer.getCertificate(), bundle -> {
String path = "/crl/" + issuerCounter.incrementAndGet() + ".crl";
URI uri = URI.create(crlBaseAddress + path);
CrlInfo info = new CrlInfo(issuer, uri, provider);
paths.put(path, info);
return info;
});
}
/**
* Revoke the given certificate with the given revocation time.
* <p>
* The issuer of the given certificate must be {@linkplain #register(X509Bundle) registered} before its certifiactes
* can be revoked.
* @param cert The certificate to revoke.
* @param time The time of revocation.
*/
public void revoke(X509Bundle cert, Instant time) {
X509Certificate[] certPath = cert.getCertificatePathWithRoot();
X509Certificate issuer = certPath.length == 1 ? certPath[0] : certPath[1];
CrlInfo info = issuers.get(issuer);
if (info != null) {
info.revokedCerts.put(cert.getCertificate().getSerialNumber(), time);
} else {
throw new IllegalArgumentException("Not a registered issuer: " + issuer.getSubjectX500Principal());
}
}
/**
* Get the URI of the Certificate Revocation List for the given issuer.
* @param issuer The issuer to get the CRL for.
* @return The URI to the CRL for the given issuer,
* or {@code null} if the issuer is not {@linkplain #register(X509Bundle) registered}.
*/
public URI getCrlUri(X509Bundle issuer) {
CrlInfo info = issuers.get(issuer.getCertificate());
if (info != null) {
return info.uri;
}
return null;
}
private static byte[] generateCrl(CrlInfo info) {
X509Bundle issuer = info.issuer;
Map<BigInteger, Instant> certs = info.revokedCerts;
Instant now = Instant.now();
CertificateList list = new CertificateList(issuer, now, now, certs.entrySet());
try {
Signed signed = new Signed(list.getEncoded(), issuer);
return signed.getEncoded(info.provider);
} catch (Exception e) {
throw new IllegalStateException("Failed to sign CRL", e);
}
}
private static final | RevocationServer |
java | processing__processing4 | app/src/processing/app/tools/Tool.java | {
"start": 79,
"end": 1069
} | interface ____ for the Processing tools menu
Part of the Processing project - http://processing.org
Copyright (c) 2008 Ben Fry and Casey Reas
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software Foundation,
Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
package processing.app.tools;
import processing.app.Base;
/**
* Interface for items to be shown in the Tools menu.
*/
public | implementation |
java | google__auto | value/src/test/java/com/google/auto/value/processor/AutoAnnotationErrorsTest.java | {
"start": 8420,
"end": 9483
} | class ____ {",
" @AutoAnnotation static TestAnnotation newTestAnnotation("
+ wrongType
+ " value) {",
" return new AutoAnnotation_Test_newTestAnnotation(value);",
" }",
"}");
Compilation compilation =
javac().withProcessors(new AutoAnnotationProcessor()).compile(testSource, testAnnotation);
assertThat(compilation)
.hadErrorContaining(
"method parameter 'value' has type "
+ wrongType
+ " but com.example.TestAnnotation.value has type int[]")
.inFile(testSource)
.onLineContaining("TestAnnotation newTestAnnotation(");
}
}
@Test
public void testExtraParameters() {
JavaFileObject testSource =
JavaFileObjects.forSourceLines(
"com.foo.Test",
"package com.foo;",
"",
"import com.example.TestAnnotation;",
"import com.google.auto.value.AutoAnnotation;",
"",
" | Test |
java | quarkusio__quarkus | devtools/gradle/gradle-application-plugin/src/main/java/io/quarkus/gradle/dsl/CompilerOptions.java | {
"start": 91,
"end": 489
} | class ____ {
private final List<CompilerOption> compilerOptions = new ArrayList<>(1);
public CompilerOption compiler(String name) {
CompilerOption compilerOption = new CompilerOption(name);
compilerOptions.add(compilerOption);
return compilerOption;
}
public List<CompilerOption> getCompilerOptions() {
return compilerOptions;
}
}
| CompilerOptions |
java | google__guice | extensions/throwingproviders/test/com/google/inject/throwingproviders/CheckedProviderTest.java | {
"start": 27997,
"end": 30245
} | class ____<T> implements RemoteProvider<T> {
Exception nextToThrow;
T nextToReturn;
public void throwOnNextGet(Exception nextToThrow) {
this.nextToThrow = nextToThrow;
}
public void setNextToReturn(T nextToReturn) {
this.nextToReturn = nextToReturn;
}
@Override
public T get() throws RemoteException, BindException {
if (nextToThrow instanceof RemoteException) {
throw (RemoteException) nextToThrow;
} else if (nextToThrow instanceof BindException) {
throw (BindException) nextToThrow;
} else if (nextToThrow instanceof RuntimeException) {
throw (RuntimeException) nextToThrow;
} else if (nextToThrow == null) {
return nextToReturn;
} else {
throw new AssertionError("nextToThrow must be a runtime or remote exception");
}
}
}
public void testBindingToInterfaceWithBoundValueType_Bind() throws RemoteException {
bindInjector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
ThrowingProviderBinder.create(binder())
.bind(StringRemoteProvider.class, String.class)
.to(
new StringRemoteProvider() {
@Override
public String get() {
return "A";
}
});
}
});
assertEquals("A", bindInjector.getInstance(StringRemoteProvider.class).get());
}
public void testBindingToInterfaceWithBoundValueType_Provides() throws RemoteException {
providesInjector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
install(ThrowingProviderBinder.forModule(this));
}
@SuppressWarnings("unused")
@CheckedProvides(StringRemoteProvider.class)
String foo() throws RemoteException {
return "A";
}
});
assertEquals("A", providesInjector.getInstance(StringRemoteProvider.class).get());
}
| MockRemoteProvider |
java | spring-projects__spring-boot | build-plugin/spring-boot-maven-plugin/src/intTest/projects/start-stop/src/main/java/org/test/SampleApplication.java | {
"start": 1590,
"end": 1692
} | interface ____ {
boolean isReady();
void shutdown();
}
static final | SpringApplicationAdminMXBean |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PruneRedundantSortClauses.java | {
"start": 601,
"end": 1108
} | class ____ extends OptimizerRules.OptimizerRule<OrderBy> {
@Override
protected LogicalPlan rule(OrderBy plan) {
var referencedAttributes = new ExpressionSet<Order>();
var order = new ArrayList<Order>();
for (Order o : plan.order()) {
if (referencedAttributes.add(o)) {
order.add(o);
}
}
return plan.order().size() == order.size() ? plan : new OrderBy(plan.source(), plan.child(), order);
}
}
| PruneRedundantSortClauses |
java | spring-projects__spring-boot | core/spring-boot-testcontainers/src/dockerTest/java/org/springframework/boot/testcontainers/ImportTestcontainersTests.java | {
"start": 5589,
"end": 5698
} | class ____ {
static @Nullable PostgreSQLContainer container;
}
@ImportTestcontainers
static | NullContainer |
java | spring-projects__spring-framework | spring-jdbc/src/test/java/org/springframework/jdbc/core/DataClassRowMapperTests.java | {
"start": 1137,
"end": 3457
} | class ____ extends AbstractRowMapperTests {
@Test
void staticQueryWithDataClass() throws Exception {
Mock mock = new Mock();
ConstructorPerson person = mock.getJdbcTemplate().queryForObject(
"select name, age, birth_date, balance from people",
new DataClassRowMapper<>(ConstructorPerson.class));
verifyPerson(person);
mock.verifyClosed();
}
@Test
void staticQueryWithDataClassAndGenerics() throws Exception {
Mock mock = new Mock();
ConstructorPersonWithGenerics person = mock.getJdbcTemplate().queryForObject(
"select name, age, birth_date, balance from people",
new DataClassRowMapper<>(ConstructorPersonWithGenerics.class));
assertThat(person.name()).isEqualTo("Bubba");
assertThat(person.age()).isEqualTo(22L);
assertThat(person.birthDate()).usingComparator(Date::compareTo).isEqualTo(new Date(1221222L));
assertThat(person.balance()).containsExactly(new BigDecimal("1234.56"));
mock.verifyClosed();
}
@Test
void staticQueryWithDataClassAndSetters() throws Exception {
Mock mock = new Mock(MockType.FOUR);
ConstructorPersonWithSetters person = mock.getJdbcTemplate().queryForObject(
"select name, age, birthdate, balance from people",
new DataClassRowMapper<>(ConstructorPersonWithSetters.class));
assertThat(person.name()).isEqualTo("BUBBA");
assertThat(person.age()).isEqualTo(22L);
assertThat(person.birthDate()).usingComparator(Date::compareTo).isEqualTo(new Date(1221222L));
assertThat(person.balance()).isEqualTo(new BigDecimal("1234.56"));
mock.verifyClosed();
}
@Test
void staticQueryWithDataRecord() throws Exception {
Mock mock = new Mock();
RecordPerson person = mock.getJdbcTemplate().queryForObject(
"select name, age, birth_date, balance from people",
new DataClassRowMapper<>(RecordPerson.class));
verifyPerson(person);
mock.verifyClosed();
}
protected void verifyPerson(RecordPerson person) {
assertThat(person.name()).isEqualTo("Bubba");
assertThat(person.age()).isEqualTo(22L);
assertThat(person.birth_date()).usingComparator(Date::compareTo).isEqualTo(new Date(1221222L));
assertThat(person.balance()).isEqualTo(new BigDecimal("1234.56"));
verifyPersonViaBeanWrapper(person);
}
record RecordPerson(String name, long age, Date birth_date, BigDecimal balance) {
}
}
| DataClassRowMapperTests |
java | apache__camel | components/camel-twilio/src/generated/java/org/apache/camel/component/twilio/UsageRecordTodayEndpointConfigurationConfigurer.java | {
"start": 739,
"end": 3412
} | class ____ extends org.apache.camel.support.component.PropertyConfigurerSupport implements GeneratedPropertyConfigurer, ExtendedPropertyConfigurerGetter {
private static final Map<String, Object> ALL_OPTIONS;
static {
Map<String, Object> map = new CaseInsensitiveMap();
map.put("ApiName", org.apache.camel.component.twilio.internal.TwilioApiName.class);
map.put("MethodName", java.lang.String.class);
map.put("PathAccountSid", java.lang.String.class);
ALL_OPTIONS = map;
}
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
org.apache.camel.component.twilio.UsageRecordTodayEndpointConfiguration target = (org.apache.camel.component.twilio.UsageRecordTodayEndpointConfiguration) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiname":
case "apiName": target.setApiName(property(camelContext, org.apache.camel.component.twilio.internal.TwilioApiName.class, value)); return true;
case "methodname":
case "methodName": target.setMethodName(property(camelContext, java.lang.String.class, value)); return true;
case "pathaccountsid":
case "pathAccountSid": target.setPathAccountSid(property(camelContext, java.lang.String.class, value)); return true;
default: return false;
}
}
@Override
public Map<String, Object> getAllOptions(Object target) {
return ALL_OPTIONS;
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiname":
case "apiName": return org.apache.camel.component.twilio.internal.TwilioApiName.class;
case "methodname":
case "methodName": return java.lang.String.class;
case "pathaccountsid":
case "pathAccountSid": return java.lang.String.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
org.apache.camel.component.twilio.UsageRecordTodayEndpointConfiguration target = (org.apache.camel.component.twilio.UsageRecordTodayEndpointConfiguration) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiname":
case "apiName": return target.getApiName();
case "methodname":
case "methodName": return target.getMethodName();
case "pathaccountsid":
case "pathAccountSid": return target.getPathAccountSid();
default: return null;
}
}
}
| UsageRecordTodayEndpointConfigurationConfigurer |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/superbuilder/MuscleCar.java | {
"start": 200,
"end": 591
} | class ____ extends Car {
private final float horsePower;
protected MuscleCar(MuscleCarBuilder<?, ?> b) {
super( b );
this.horsePower = b.horsePower;
}
public static MuscleCarBuilder<?, ?> builder() {
return new MuscleCarBuilderImpl();
}
public float getHorsePower() {
return this.horsePower;
}
public abstract static | MuscleCar |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/data/binary/NestedRowData.java | {
"start": 2020,
"end": 12316
} | class ____ extends BinarySection implements RowData, TypedSetters {
private final int arity;
private final int nullBitsSizeInBytes;
public NestedRowData(int arity) {
checkArgument(arity >= 0);
this.arity = arity;
this.nullBitsSizeInBytes = calculateBitSetWidthInBytes(arity);
}
private int getFieldOffset(int pos) {
return offset + nullBitsSizeInBytes + pos * 8;
}
private void assertIndexIsValid(int index) {
assert index >= 0 : "index (" + index + ") should >= 0";
assert index < arity : "index (" + index + ") should < " + arity;
}
@Override
public int getArity() {
return arity;
}
@Override
public RowKind getRowKind() {
byte kindValue = BinarySegmentUtils.getByte(segments, offset);
return RowKind.fromByteValue(kindValue);
}
@Override
public void setRowKind(RowKind kind) {
BinarySegmentUtils.setByte(segments, offset, kind.toByteValue());
}
private void setNotNullAt(int i) {
assertIndexIsValid(i);
BinarySegmentUtils.bitUnSet(segments, offset, i + 8);
}
/** See {@link BinaryRowData#setNullAt(int)}. */
@Override
public void setNullAt(int i) {
assertIndexIsValid(i);
BinarySegmentUtils.bitSet(segments, offset, i + 8);
BinarySegmentUtils.setLong(segments, getFieldOffset(i), 0);
}
@Override
public void setInt(int pos, int value) {
assertIndexIsValid(pos);
setNotNullAt(pos);
BinarySegmentUtils.setInt(segments, getFieldOffset(pos), value);
}
@Override
public void setLong(int pos, long value) {
assertIndexIsValid(pos);
setNotNullAt(pos);
BinarySegmentUtils.setLong(segments, getFieldOffset(pos), value);
}
@Override
public void setDouble(int pos, double value) {
assertIndexIsValid(pos);
setNotNullAt(pos);
BinarySegmentUtils.setDouble(segments, getFieldOffset(pos), value);
}
@Override
public void setDecimal(int pos, DecimalData value, int precision) {
assertIndexIsValid(pos);
if (DecimalData.isCompact(precision)) {
// compact format
setLong(pos, value.toUnscaledLong());
} else {
int fieldOffset = getFieldOffset(pos);
int cursor = (int) (BinarySegmentUtils.getLong(segments, fieldOffset) >>> 32);
assert cursor > 0 : "invalid cursor " + cursor;
// zero-out the bytes
BinarySegmentUtils.setLong(segments, offset + cursor, 0L);
BinarySegmentUtils.setLong(segments, offset + cursor + 8, 0L);
if (value == null) {
setNullAt(pos);
// keep the offset for future update
BinarySegmentUtils.setLong(segments, fieldOffset, ((long) cursor) << 32);
} else {
byte[] bytes = value.toUnscaledBytes();
assert (bytes.length <= 16);
// Write the bytes to the variable length portion.
BinarySegmentUtils.copyFromBytes(segments, offset + cursor, bytes, 0, bytes.length);
setLong(pos, ((long) cursor << 32) | ((long) bytes.length));
}
}
}
@Override
public void setTimestamp(int pos, TimestampData value, int precision) {
assertIndexIsValid(pos);
if (TimestampData.isCompact(precision)) {
setLong(pos, value.getMillisecond());
} else {
int fieldOffset = getFieldOffset(pos);
int cursor = (int) (BinarySegmentUtils.getLong(segments, fieldOffset) >>> 32);
assert cursor > 0 : "invalid cursor " + cursor;
if (value == null) {
setNullAt(pos);
// zero-out the bytes
BinarySegmentUtils.setLong(segments, offset + cursor, 0L);
BinarySegmentUtils.setLong(segments, fieldOffset, ((long) cursor) << 32);
} else {
// write millisecond to variable length portion.
BinarySegmentUtils.setLong(segments, offset + cursor, value.getMillisecond());
// write nanoOfMillisecond to fixed-length portion.
setLong(pos, ((long) cursor << 32) | (long) value.getNanoOfMillisecond());
}
}
}
@Override
public void setBoolean(int pos, boolean value) {
assertIndexIsValid(pos);
setNotNullAt(pos);
BinarySegmentUtils.setBoolean(segments, getFieldOffset(pos), value);
}
@Override
public void setShort(int pos, short value) {
assertIndexIsValid(pos);
setNotNullAt(pos);
BinarySegmentUtils.setShort(segments, getFieldOffset(pos), value);
}
@Override
public void setByte(int pos, byte value) {
assertIndexIsValid(pos);
setNotNullAt(pos);
BinarySegmentUtils.setByte(segments, getFieldOffset(pos), value);
}
@Override
public void setFloat(int pos, float value) {
assertIndexIsValid(pos);
setNotNullAt(pos);
BinarySegmentUtils.setFloat(segments, getFieldOffset(pos), value);
}
@Override
public boolean isNullAt(int pos) {
assertIndexIsValid(pos);
return BinarySegmentUtils.bitGet(segments, offset, pos + 8);
}
@Override
public boolean getBoolean(int pos) {
assertIndexIsValid(pos);
return BinarySegmentUtils.getBoolean(segments, getFieldOffset(pos));
}
@Override
public byte getByte(int pos) {
assertIndexIsValid(pos);
return BinarySegmentUtils.getByte(segments, getFieldOffset(pos));
}
@Override
public short getShort(int pos) {
assertIndexIsValid(pos);
return BinarySegmentUtils.getShort(segments, getFieldOffset(pos));
}
@Override
public int getInt(int pos) {
assertIndexIsValid(pos);
return BinarySegmentUtils.getInt(segments, getFieldOffset(pos));
}
@Override
public long getLong(int pos) {
assertIndexIsValid(pos);
return BinarySegmentUtils.getLong(segments, getFieldOffset(pos));
}
@Override
public float getFloat(int pos) {
assertIndexIsValid(pos);
return BinarySegmentUtils.getFloat(segments, getFieldOffset(pos));
}
@Override
public double getDouble(int pos) {
assertIndexIsValid(pos);
return BinarySegmentUtils.getDouble(segments, getFieldOffset(pos));
}
@Override
public StringData getString(int pos) {
assertIndexIsValid(pos);
int fieldOffset = getFieldOffset(pos);
final long offsetAndLen = BinarySegmentUtils.getLong(segments, fieldOffset);
return BinarySegmentUtils.readStringData(segments, offset, fieldOffset, offsetAndLen);
}
@Override
public DecimalData getDecimal(int pos, int precision, int scale) {
assertIndexIsValid(pos);
if (DecimalData.isCompact(precision)) {
return DecimalData.fromUnscaledLong(
BinarySegmentUtils.getLong(segments, getFieldOffset(pos)), precision, scale);
}
int fieldOffset = getFieldOffset(pos);
final long offsetAndSize = BinarySegmentUtils.getLong(segments, fieldOffset);
return BinarySegmentUtils.readDecimalData(
segments, offset, offsetAndSize, precision, scale);
}
@Override
public TimestampData getTimestamp(int pos, int precision) {
assertIndexIsValid(pos);
if (TimestampData.isCompact(precision)) {
return TimestampData.fromEpochMillis(
BinarySegmentUtils.getLong(segments, getFieldOffset(pos)));
}
int fieldOffset = getFieldOffset(pos);
final long offsetAndNanoOfMilli = BinarySegmentUtils.getLong(segments, fieldOffset);
return BinarySegmentUtils.readTimestampData(segments, offset, offsetAndNanoOfMilli);
}
@Override
public <T> RawValueData<T> getRawValue(int pos) {
assertIndexIsValid(pos);
return BinarySegmentUtils.readRawValueData(segments, offset, getLong(pos));
}
@Override
public byte[] getBinary(int pos) {
assertIndexIsValid(pos);
int fieldOffset = getFieldOffset(pos);
final long offsetAndLen = BinarySegmentUtils.getLong(segments, fieldOffset);
return BinarySegmentUtils.readBinary(segments, offset, fieldOffset, offsetAndLen);
}
@Override
public RowData getRow(int pos, int numFields) {
assertIndexIsValid(pos);
return BinarySegmentUtils.readRowData(segments, numFields, offset, getLong(pos));
}
@Override
public Variant getVariant(int pos) {
assertIndexIsValid(pos);
return BinarySegmentUtils.readVariant(segments, offset, getLong(pos));
}
@Override
public ArrayData getArray(int pos) {
assertIndexIsValid(pos);
return BinarySegmentUtils.readArrayData(segments, offset, getLong(pos));
}
@Override
public MapData getMap(int pos) {
assertIndexIsValid(pos);
return BinarySegmentUtils.readMapData(segments, offset, getLong(pos));
}
public NestedRowData copy() {
return copy(new NestedRowData(arity));
}
public NestedRowData copy(RowData reuse) {
return copyInternal((NestedRowData) reuse);
}
private NestedRowData copyInternal(NestedRowData reuse) {
byte[] bytes = BinarySegmentUtils.copyToBytes(segments, offset, sizeInBytes);
reuse.pointTo(MemorySegmentFactory.wrap(bytes), 0, sizeInBytes);
return reuse;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
// both BinaryRowData and NestedRowData have the same memory format
if (!(o instanceof NestedRowData || o instanceof BinaryRowData)) {
return false;
}
final BinarySection that = (BinarySection) o;
return sizeInBytes == that.sizeInBytes
&& BinarySegmentUtils.equals(
segments, offset, that.segments, that.offset, sizeInBytes);
}
@Override
public int hashCode() {
return BinarySegmentUtils.hashByWords(segments, offset, sizeInBytes);
}
}
| NestedRowData |
java | netty__netty | transport/src/test/java/io/netty/channel/DefaultChannelPipelineTest.java | {
"start": 75795,
"end": 76876
} | class ____ extends ChannelInboundHandlerAdapter {
private final Throwable expected;
private final Promise<Void> promise;
CheckExceptionHandler(Throwable expected, Promise<Void> promise) {
this.expected = expected;
this.promise = promise;
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
if (cause instanceof ChannelPipelineException && cause.getCause() == expected) {
promise.setSuccess(null);
} else {
promise.setFailure(new AssertionError("cause not the expected instance"));
}
}
}
private static void assertHandler(CheckOrderHandler actual, CheckOrderHandler... handlers) throws Throwable {
for (CheckOrderHandler h : handlers) {
if (h == actual) {
actual.checkError();
return;
}
}
fail("handler was not one of the expected handlers");
}
private static final | CheckExceptionHandler |
java | quarkusio__quarkus | extensions/cache/runtime/src/main/java/io/quarkus/cache/CacheManagerInfo.java | {
"start": 93,
"end": 217
} | interface ____ {
boolean supports(Context context);
Supplier<CacheManager> get(Context context);
| CacheManagerInfo |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/builder/BuilderTest2_private.java | {
"start": 596,
"end": 875
} | class ____ {
private int id;
private String name;
public int getId() {
return id;
}
public String getName() {
return name;
}
}
@JSONPOJOBuilder(buildMethod="xxx")
private static | VO |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-jackson/deployment/src/test/java/io/quarkus/resteasy/reactive/jackson/deployment/test/AbstractPet.java | {
"start": 79,
"end": 562
} | class ____ implements SecuredPersonInterface {
private String publicName;
private Veterinarian veterinarian;
public String getPublicName() {
return publicName;
}
public void setPublicName(String publicName) {
this.publicName = publicName;
}
public Veterinarian getVeterinarian() {
return veterinarian;
}
public void setVeterinarian(Veterinarian veterinarian) {
this.veterinarian = veterinarian;
}
}
| AbstractPet |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/cid/EmbeddedIdLazyOneToOneCriteriaQueryTest.java | {
"start": 2444,
"end": 2697
} | class ____ {
@Id
private Integer id;
@OneToOne(mappedBy = "id.entityA", fetch = FetchType.LAZY)
private EntityB entityB;
public EntityA() {
}
public EntityA(Integer id) {
this.id = id;
}
}
@Entity(name = "EntityB")
static | EntityA |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/id/custom/CustomGeneratorTests.java | {
"start": 817,
"end": 1600
} | class ____ {
@Test
public void verifyModel(DomainModelScope scope) {
scope.withHierarchy( TheEntity.class, (descriptor) -> {
final Property idProperty = descriptor.getIdentifierProperty();
final BasicValue value = (BasicValue) idProperty.getValue();
assertThat( value.getCustomIdGeneratorCreator() ).isNotNull();
// final String strategy = value.getIdentifierGeneratorStrategy();
// assertThat( strategy ).isEqualTo( "assigned" );
} );
}
@Test
public void basicUseTest(SessionFactoryScope scope) {
assertThat( CustomSequenceGenerator.generationCount ).isEqualTo( 0 );
scope.inTransaction( (session) -> {
session.persist( new TheEntity( "steve" ) );
} );
assertThat( CustomSequenceGenerator.generationCount ).isEqualTo( 1 );
}
}
| CustomGeneratorTests |
java | reactor__reactor-core | reactor-core/src/test/java/reactor/core/scheduler/SingleWorkerSchedulerTest.java | {
"start": 846,
"end": 2728
} | class ____ extends AbstractSchedulerTest {
@Override
protected Scheduler scheduler() {
return Schedulers.single(Schedulers.immediate());
}
@Override
protected Scheduler freshScheduler() {
return Schedulers.single(Schedulers.immediate());
}
@Override
protected boolean shouldCheckInit() {
return false;
}
@Override
protected boolean shouldCheckDisposeTask() {
return false;
}
@Override
protected boolean shouldCheckDirectTimeScheduling() {
return false;
}
@Override
protected boolean shouldCheckWorkerTimeScheduling() {
return false;
}
@Override
protected boolean shouldCheckSupportRestart() {
return false;
}
@Test
public void scanName() {
Scheduler withNamedFactory = Schedulers.single(Schedulers.newSingle("scanName"));
Scheduler withBasicFactory = Schedulers.single(Schedulers.newParallel(9, Thread::new));
Scheduler.Worker workerWithNamedFactory = withNamedFactory.createWorker();
Scheduler.Worker workerWithBasicFactory = withBasicFactory.createWorker();
try {
assertThat(Scannable.from(withNamedFactory).scan(Scannable.Attr.NAME))
.as("withNamedFactory")
.isEqualTo("singleWorker(ExecutorServiceWorker)");
assertThat(Scannable.from(withBasicFactory).scan(Scannable.Attr.NAME))
.as("withBasicFactory")
.isEqualTo("singleWorker(ExecutorServiceWorker)");
assertThat(Scannable.from(workerWithNamedFactory).scan(Scannable.Attr.NAME))
.as("workerWithNamedFactory")
.isEqualTo("singleWorker(ExecutorServiceWorker).worker");
assertThat(Scannable.from(workerWithBasicFactory).scan(Scannable.Attr.NAME))
.as("workerWithBasicFactory")
.isEqualTo("singleWorker(ExecutorServiceWorker).worker");
}
finally {
withNamedFactory.dispose();
withBasicFactory.dispose();
workerWithNamedFactory.dispose();
workerWithBasicFactory.dispose();
}
}
}
| SingleWorkerSchedulerTest |
java | google__dagger | javatests/dagger/internal/codegen/DaggerSuperficialValidationTest.java | {
"start": 25275,
"end": 25434
} | class ____ implements MissingType {}"),
CompilerTests.kotlinSource(
"test.Foo.kt",
"package test",
"",
" | Foo |
java | apache__camel | components/camel-quartz/src/test/java/org/apache/camel/routepolicy/quartz/DateFactory.java | {
"start": 886,
"end": 1061
} | class ____ {
private DateFactory() {
}
public static Date createDate(int future) {
return new Date(System.currentTimeMillis() + future);
}
}
| DateFactory |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/inheritance/QualifiersInheritanceTest.java | {
"start": 648,
"end": 1497
} | class ____ {
@RegisterExtension
public ArcTestContainer container = ArcTestContainer.builder().removeUnusedBeans(false)
.beanClasses(SuperBean.class, Alpha.class, Bravo.class, InheritedQualifier.class, NonInheritedQualifier.class)
.build();
@SuppressWarnings("serial")
@Test
public void testInheritance() {
ArcContainer container = Arc.container();
// Bravo is not eligible because it has @InheritedQualifier("bravo")
assertTrue(container.select(SuperBean.class, new InheritedQualifier.Literal("super")).isResolvable());
// @NonInheritedQualifier is not inherited
assertFalse(container.select(SuperBean.class, new NonInheritedQualifier.Literal()).isResolvable());
}
@InheritedQualifier("super")
@NonInheritedQualifier
static | QualifiersInheritanceTest |
java | hibernate__hibernate-orm | hibernate-testing/src/main/java/org/hibernate/testing/orm/junit/DialectFeatureChecks.java | {
"start": 35358,
"end": 35540
} | class ____ implements DialectFeatureCheck {
public boolean apply(Dialect dialect) {
return definesFunction( dialect, "array_position" );
}
}
public static | SupportsArrayPosition |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/single/SingleInternalHelper.java | {
"start": 1893,
"end": 2505
} | class ____<T> implements Iterator<Flowable<T>> {
private final Iterator<? extends SingleSource<? extends T>> sit;
ToFlowableIterator(Iterator<? extends SingleSource<? extends T>> sit) {
this.sit = sit;
}
@Override
public boolean hasNext() {
return sit.hasNext();
}
@Override
public Flowable<T> next() {
return new SingleToFlowable<>(sit.next());
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
static final | ToFlowableIterator |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/tracing/BraveTracing.java | {
"start": 9594,
"end": 11181
} | class ____ extends Tracer {
private final brave.Tracing tracing;
private final BraveTracingOptions tracingOptions;
private final boolean includeCommandArgsInSpanTags;
BraveTracer(brave.Tracing tracing, BraveTracingOptions tracingOptions, boolean includeCommandArgsInSpanTags) {
this.tracing = tracing;
this.tracingOptions = tracingOptions;
this.includeCommandArgsInSpanTags = includeCommandArgsInSpanTags;
}
@Override
public Span nextSpan() {
return postProcessSpan(tracing.tracer().nextSpan());
}
@Override
public Span nextSpan(TraceContext traceContext) {
if (!(traceContext instanceof BraveTraceContext)) {
return nextSpan();
}
BraveTraceContext braveTraceContext = BraveTraceContext.class.cast(traceContext);
if (braveTraceContext.traceContext == null) {
return nextSpan();
}
return postProcessSpan(
tracing.tracer().nextSpan(TraceContextOrSamplingFlags.create(braveTraceContext.traceContext)));
}
private Span postProcessSpan(brave.Span span) {
if (span == null || span.isNoop()) {
return NoOpTracing.NoOpSpan.INSTANCE;
}
return new BraveSpan(span.kind(brave.Span.Kind.CLIENT), this.tracingOptions, includeCommandArgsInSpanTags);
}
}
/**
* Brave-specific {@link io.lettuce.core.tracing.Tracer.Span}.
*/
static | BraveTracer |
java | apache__kafka | tools/src/test/java/org/apache/kafka/tools/ConnectPluginPathTest.java | {
"start": 27772,
"end": 31192
} | class ____ {
public CommandResult(int returnCode, String out, String err, PluginScanResult reflective, PluginScanResult serviceLoading) {
this.returnCode = returnCode;
this.out = out;
this.err = err;
this.reflective = reflective;
this.serviceLoading = serviceLoading;
}
int returnCode;
String out;
String err;
PluginScanResult reflective;
PluginScanResult serviceLoading;
}
private static CommandResult runCommand(Object... args) {
ByteArrayOutputStream out = new ByteArrayOutputStream();
ByteArrayOutputStream err = new ByteArrayOutputStream();
int returnCode = ConnectPluginPath.mainNoExit(
Arrays.stream(args)
.map(Object::toString)
.toList()
.toArray(new String[]{}),
new PrintStream(out, true, StandardCharsets.UTF_8),
new PrintStream(err, true, StandardCharsets.UTF_8));
Set<Path> pluginLocations = getPluginLocations(args);
ClassLoader parent = ConnectPluginPath.class.getClassLoader();
ClassLoaderFactory factory = new ClassLoaderFactory();
try (DelegatingClassLoader delegatingClassLoader = factory.newDelegatingClassLoader(parent)) {
Set<PluginSource> sources = PluginUtils.pluginSources(pluginLocations, delegatingClassLoader, factory);
String stdout = out.toString(StandardCharsets.UTF_8);
String stderr = err.toString(StandardCharsets.UTF_8);
log.info("STDOUT:\n{}", stdout);
log.info("STDERR:\n{}", stderr);
return new CommandResult(
returnCode,
stdout,
stderr,
new ReflectionScanner().discoverPlugins(sources),
new ServiceLoaderScanner().discoverPlugins(sources)
);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private static Set<Path> getPluginLocations(Object[] args) {
return Arrays.stream(args)
.flatMap(obj -> {
if (obj instanceof WorkerConfig) {
return ((WorkerConfig) obj).pluginPathElements.stream();
} else {
return Stream.of(obj);
}
})
.flatMap(obj -> {
if (obj instanceof PluginPathElement) {
return ((PluginPathElement) obj).locations.stream();
} else {
return Stream.of(obj);
}
})
.map(obj -> {
if (obj instanceof PluginLocation) {
return ((PluginLocation) obj).path;
} else {
return null;
}
})
.filter(Objects::nonNull)
.collect(Collectors.toSet());
}
/**
* Parse the main table of the list command.
* <p>Map is keyed on the plugin name, with a list of rows which referred to that name if there are multiple.
* Each row is pre-split into columns.
* @param listOutput An executed list command
* @return A parsed form of the table grouped by plugin | CommandResult |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UnicodeInCodeTest.java | {
"start": 2233,
"end": 2487
} | class ____ {
static final String pi = "\u03c0";
}
""")
.doTest();
}
@Test
public void negativeInCharLiteral() {
helper
.addSourceLines(
"Test.java",
"""
| Test |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/TestWatcherTests.java | {
"start": 13998,
"end": 14790
} | class ____ implements BeforeTestExecutionCallback, TestWatcher {
private static final Namespace NAMESPACE = Namespace.create(DataRetrievingTestWatcher.class);
private static final String KEY = "key";
private static final Map<String, @Nullable String> results = new HashMap<>();
@Override
public void beforeTestExecution(ExtensionContext context) throws Exception {
getStore(context).put(KEY, "enigma");
}
@Override
public void testSuccessful(ExtensionContext context) {
results.put(KEY, getStore(context).get(KEY, String.class));
}
private static Store getStore(ExtensionContext context) {
return context.getStore(NAMESPACE);
}
}
@SuppressWarnings("JUnitMalformedDeclaration")
@ExtendWith(DataRetrievingTestWatcher.class)
static | DataRetrievingTestWatcher |
java | apache__camel | components/camel-twilio/src/generated/java/org/apache/camel/component/twilio/SipIpAccessControlListIpAddressEndpointConfiguration.java | {
"start": 2503,
"end": 4727
} | class ____ extends TwilioConfiguration {
@UriParam
@ApiParam(optional = false, apiMethods = {@ApiMethod(methodName = "creator")})
private String friendlyName;
@UriParam
@ApiParam(optional = false, apiMethods = {@ApiMethod(methodName = "creator")})
private String ipAddress;
@UriParam
@ApiParam(optional = false, apiMethods = {@ApiMethod(methodName = "creator"), @ApiMethod(methodName = "deleter"), @ApiMethod(methodName = "fetcher"), @ApiMethod(methodName = "reader"), @ApiMethod(methodName = "updater")})
private String pathAccountSid;
@UriParam
@ApiParam(optional = false, apiMethods = {@ApiMethod(methodName = "creator"), @ApiMethod(methodName = "creator"), @ApiMethod(methodName = "deleter"), @ApiMethod(methodName = "deleter"), @ApiMethod(methodName = "fetcher"), @ApiMethod(methodName = "fetcher"), @ApiMethod(methodName = "reader"), @ApiMethod(methodName = "reader"), @ApiMethod(methodName = "updater"), @ApiMethod(methodName = "updater")})
private String pathIpAccessControlListSid;
@UriParam
@ApiParam(optional = false, apiMethods = {@ApiMethod(methodName = "deleter"), @ApiMethod(methodName = "fetcher"), @ApiMethod(methodName = "updater")})
private String pathSid;
public String getFriendlyName() {
return friendlyName;
}
public void setFriendlyName(String friendlyName) {
this.friendlyName = friendlyName;
}
public String getIpAddress() {
return ipAddress;
}
public void setIpAddress(String ipAddress) {
this.ipAddress = ipAddress;
}
public String getPathAccountSid() {
return pathAccountSid;
}
public void setPathAccountSid(String pathAccountSid) {
this.pathAccountSid = pathAccountSid;
}
public String getPathIpAccessControlListSid() {
return pathIpAccessControlListSid;
}
public void setPathIpAccessControlListSid(String pathIpAccessControlListSid) {
this.pathIpAccessControlListSid = pathIpAccessControlListSid;
}
public String getPathSid() {
return pathSid;
}
public void setPathSid(String pathSid) {
this.pathSid = pathSid;
}
}
| SipIpAccessControlListIpAddressEndpointConfiguration |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.