language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/SpringWebserviceEndpointBuilderFactory.java
|
{
"start": 1465,
"end": 1619
}
|
interface ____ {
/**
* Builder for endpoint consumers for the Spring WebService component.
*/
public
|
SpringWebserviceEndpointBuilderFactory
|
java
|
quarkusio__quarkus
|
integration-tests/kubernetes/quarkus-standard-way/src/test/java/io/quarkus/it/kubernetes/KubernetesWithCustomOutputDirTest.java
|
{
"start": 780,
"end": 2608
}
|
class ____ {
private static final String APP_NAME = "kubernetes-with-custom-output-dir";
@RegisterExtension
static final QuarkusProdModeTest config = new QuarkusProdModeTest()
.withApplicationRoot((jar) -> jar.addClasses(GreetingResource.class))
.setApplicationName(APP_NAME)
.setApplicationVersion("0.1-SNAPSHOT")
.addBuildChainCustomizerEntries(
new QuarkusProdModeTest.BuildChainCustomizerEntry(CustomProjectRootBuildItemProducerProdMode.class,
List.of(CustomProjectRootBuildItem.class, CustomKubernetesOutputDirBuildItem.class),
Collections.emptyList()));
@ProdBuildResults
private ProdModeTestResults prodModeTestResults;
@Test
public void assertGeneratedResources() throws IOException {
final Path kubernetesDir = prodModeTestResults.getBuildDir().getParent().resolve("custom-sources")
.resolve(".kubernetes");
assertThat(kubernetesDir)
.isDirectoryContaining(p -> p.getFileName().endsWith("kubernetes.json"))
.isDirectoryContaining(p -> p.getFileName().endsWith("kubernetes.yml"))
.satisfies(p -> assertThat(p.toFile().listFiles()).hasSize(2));
List<HasMetadata> kubernetesList = DeserializationUtil.deserializeAsList(kubernetesDir.resolve("kubernetes.yml"));
assertThat(kubernetesList).filteredOn(h -> h.getMetadata().getName().equals(APP_NAME))
.filteredOn(e -> e instanceof Deployment).singleElement().satisfies(d -> {
assertThat(d.getMetadata()).satisfies(m -> {
assertThat(m.getName()).isEqualTo(APP_NAME);
});
});
}
public static
|
KubernetesWithCustomOutputDirTest
|
java
|
quarkusio__quarkus
|
integration-tests/devtools/src/test/java/io/quarkus/devtools/codestarts/quarkus/KotlinSerializationCodestartTest.java
|
{
"start": 481,
"end": 2530
}
|
class ____ {
@RegisterExtension
public static QuarkusCodestartTest codestartMavenTest = QuarkusCodestartTest.builder()
.extension(ArtifactKey.fromString("io.quarkus:quarkus-rest-kotlin-serialization"))
.languages(KOTLIN)
.buildTool(BuildTool.MAVEN)
.build();
@RegisterExtension
public static QuarkusCodestartTest codestartGradleTest = QuarkusCodestartTest.builder()
.extension(ArtifactKey.fromString("io.quarkus:quarkus-rest-kotlin-serialization"))
.languages(KOTLIN)
.buildTool(BuildTool.GRADLE)
.build();
@RegisterExtension
public static QuarkusCodestartTest codestartGradleKotlinTest = QuarkusCodestartTest.builder()
.extension(ArtifactKey.fromString("io.quarkus:quarkus-rest-kotlin-serialization"))
.languages(KOTLIN)
.buildTool(BuildTool.GRADLE_KOTLIN_DSL)
.build();
@Test
void testMavenContent() throws Throwable {
codestartMavenTest.assertThatGeneratedFileMatchSnapshot(KOTLIN, "pom.xml")
.satisfies(checkContains("<plugin>kotlinx-serialization</plugin>"))
.satisfies(checkContains("<artifactId>kotlin-maven-serialization</artifactId>"));
}
@Test
void testGradleContent() throws Throwable {
codestartGradleTest.assertThatGeneratedFileMatchSnapshot(KOTLIN, "build.gradle")
.satisfies(checkContains("id 'org.jetbrains.kotlin.plugin.serialization' version "));
}
@Test
void testGradleKotlinContent() throws Throwable {
codestartGradleKotlinTest.assertThatGeneratedFileMatchSnapshot(KOTLIN, "build.gradle.kts")
.satisfies(checkContains("kotlin(\"plugin.serialization\") version "));
}
@Test
void buildAllProjectsMaven() throws Throwable {
codestartMavenTest.buildAllProjects();
}
@Test
void buildAllProjectsGradle() throws Throwable {
codestartGradleTest.buildAllProjects();
}
}
|
KotlinSerializationCodestartTest
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/common/serialization/TestResourceSerDe.java
|
{
"start": 1257,
"end": 2061
}
|
class ____ {
/**
* Testing variables.
*/
private Gson gson;
private Resource resource;
@BeforeEach
public final void setup() {
resource = Resource.newInstance(1024 * 100, 100);
gson = new GsonBuilder()
.registerTypeAdapter(Resource.class, new ResourceSerDe()).create();
}
@Test
public final void testSerialization() {
final String json = gson.toJson(resource, new TypeToken<Resource>() {
}.getType());
final Resource resourceDe = gson.fromJson(json, new TypeToken<Resource>() {
}.getType());
assertEquals(resource.getMemorySize(), resourceDe.getMemorySize());
assertEquals(resource.getVirtualCores(), resourceDe.getVirtualCores());
}
@AfterEach
public final void cleanUp() {
resource = null;
gson = null;
}
}
|
TestResourceSerDe
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/component/xslt/XsltOutputFileTest.java
|
{
"start": 1317,
"end": 2680
}
|
class ____ extends ContextTestSupport {
@Test
public void testXsltOutput() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("<?xml version=\"1.0\" encoding=\"UTF-8\"?><goodbye>world!</goodbye>");
mock.expectedFileExists(testFile("xsltme.xml"));
mock.message(0).body().isInstanceOf(File.class);
template.sendBodyAndHeader("direct:start", "<hello>world!</hello>", Exchange.XSLT_FILE_NAME,
testFile("xsltme.xml").toString());
assertMockEndpointsSatisfied();
}
@Test
public void testXsltOutputFileMissingHeader() {
CamelExecutionException e = assertThrows(CamelExecutionException.class,
() -> template.sendBody("direct:start", "<hello>world!</hello>"),
"Should thrown exception");
NoSuchHeaderException nshe = assertIsInstanceOf(NoSuchHeaderException.class, e.getCause());
assertEquals(Exchange.XSLT_FILE_NAME, nshe.getHeaderName());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").to("xslt:org/apache/camel/component/xslt/example.xsl?output=file").to("mock:result");
}
};
}
}
|
XsltOutputFileTest
|
java
|
apache__camel
|
core/camel-main/src/generated/java/org/apache/camel/main/OtelConfigurationPropertiesConfigurer.java
|
{
"start": 705,
"end": 3784
}
|
class ____ extends org.apache.camel.support.component.PropertyConfigurerSupport implements GeneratedPropertyConfigurer, ExtendedPropertyConfigurerGetter {
private static final Map<String, Object> ALL_OPTIONS;
static {
Map<String, Object> map = new CaseInsensitiveMap();
map.put("Enabled", boolean.class);
map.put("Encoding", boolean.class);
map.put("ExcludePatterns", java.lang.String.class);
map.put("InstrumentationName", java.lang.String.class);
map.put("TraceProcessors", boolean.class);
ALL_OPTIONS = map;
}
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
org.apache.camel.main.OtelConfigurationProperties target = (org.apache.camel.main.OtelConfigurationProperties) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "enabled": target.setEnabled(property(camelContext, boolean.class, value)); return true;
case "encoding": target.setEncoding(property(camelContext, boolean.class, value)); return true;
case "excludepatterns":
case "excludePatterns": target.setExcludePatterns(property(camelContext, java.lang.String.class, value)); return true;
case "instrumentationname":
case "instrumentationName": target.setInstrumentationName(property(camelContext, java.lang.String.class, value)); return true;
case "traceprocessors":
case "traceProcessors": target.setTraceProcessors(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public Map<String, Object> getAllOptions(Object target) {
return ALL_OPTIONS;
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "enabled": return boolean.class;
case "encoding": return boolean.class;
case "excludepatterns":
case "excludePatterns": return java.lang.String.class;
case "instrumentationname":
case "instrumentationName": return java.lang.String.class;
case "traceprocessors":
case "traceProcessors": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
org.apache.camel.main.OtelConfigurationProperties target = (org.apache.camel.main.OtelConfigurationProperties) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "enabled": return target.isEnabled();
case "encoding": return target.isEncoding();
case "excludepatterns":
case "excludePatterns": return target.getExcludePatterns();
case "instrumentationname":
case "instrumentationName": return target.getInstrumentationName();
case "traceprocessors":
case "traceProcessors": return target.isTraceProcessors();
default: return null;
}
}
}
|
OtelConfigurationPropertiesConfigurer
|
java
|
spring-projects__spring-security
|
messaging/src/main/java/org/springframework/security/messaging/handler/invocation/reactive/AuthenticationPrincipalArgumentResolver.java
|
{
"start": 2607,
"end": 3464
}
|
class ____ {
* @MessageMapping("/im")
* public void im(@AuthenticationPrincipal CustomUser customUser) {
* // do something with CustomUser
* }
* }
* </pre>
*
* <p>
* Will resolve the CustomUser argument using {@link Authentication#getPrincipal()} from
* the {@link ReactiveSecurityContextHolder}. If the {@link Authentication} or
* {@link Authentication#getPrincipal()} is null, it will return null. If the types do not
* match, null will be returned unless
* {@link AuthenticationPrincipal#errorOnInvalidType()} is true in which case a
* {@link ClassCastException} will be thrown.
*
* <p>
* Alternatively, users can create a custom meta annotation as shown below:
*
* <pre>
* @Target({ ElementType.PARAMETER })
* @Retention(RetentionPolicy.RUNTIME)
* @AuthenticationPrincipal
* public @
|
MyController
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/serializer/stream/StreamWriterTest_writeLongAndChar.java
|
{
"start": 203,
"end": 720
}
|
class ____ extends TestCase {
public void test_0() throws Exception {
StringWriter out = new StringWriter();
SerializeWriter writer = new SerializeWriter(out, 10);
Assert.assertEquals(10, writer.getBufferLength());
writer.write("abcde");
writer.writeLong(12345678L);
writer.write(',');
writer.close();
String text = out.toString();
Assert.assertEquals("abcde12345678,", text);
}
}
|
StreamWriterTest_writeLongAndChar
|
java
|
elastic__elasticsearch
|
x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryAnalyzeAction.java
|
{
"start": 20854,
"end": 36124
}
|
class ____ implements ActionListener<Void> {
@Override
public void onResponse(Void unused) {
// task complete, nothing to do
}
@Override
public void onFailure(Exception e) {
assert e instanceof ElasticsearchTimeoutException : e;
if (isRunning()) {
// if this CAS fails then we're already failing for some other reason, nbd
setFirstFailure(analysisTimedOutException);
}
}
}
public void run() {
assert queue.isEmpty() : "must only run action once";
assert failure.get() == null : "must only run action once";
logger.info("running analysis of repository [{}] using path [{}]", request.getRepositoryName(), blobPath);
cancellationListener.addTimeout(request.getTimeout(), repository.threadPool(), EsExecutors.DIRECT_EXECUTOR_SERVICE);
cancellationListener.addListener(new CheckForCancelListener());
task.addListener(() -> setFirstFailure(analysisCancelledException));
final Random random = new Random(request.getSeed());
final List<DiscoveryNode> nodes = getSnapshotNodes(discoveryNodes);
if (minClusterTransportVersion.onOrAfter(TransportVersions.V_8_8_0)) {
final String contendedRegisterName = CONTENDED_REGISTER_NAME_PREFIX + UUIDs.randomBase64UUID(random);
final AtomicBoolean contendedRegisterAnalysisComplete = new AtomicBoolean();
final int registerOperations = Math.max(nodes.size(), request.getRegisterOperationCount());
try (
var registerRefs = new RefCountingRunnable(
finalRegisterValueVerifier(
contendedRegisterName,
registerOperations,
random,
Releasables.wrap(requestRefs.acquire(), () -> contendedRegisterAnalysisComplete.set(true))
)
)
) {
for (int i = 0; i < registerOperations; i++) {
final ContendedRegisterAnalyzeAction.Request registerAnalyzeRequest = new ContendedRegisterAnalyzeAction.Request(
request.getRepositoryName(),
blobPath,
contendedRegisterName,
registerOperations,
random.nextInt((registerOperations + 1) * 2)
);
final DiscoveryNode node = nodes.get(i < nodes.size() ? i : random.nextInt(nodes.size()));
final Releasable registerRef = registerRefs.acquire();
queue.add(ref -> runContendedRegisterAnalysis(Releasables.wrap(registerRef, ref), registerAnalyzeRequest, node));
}
}
if (minClusterTransportVersion.onOrAfter(TransportVersions.V_8_12_0)) {
new UncontendedRegisterAnalysis(new Random(random.nextLong()), nodes, contendedRegisterAnalysisComplete).run();
}
}
final List<Long> blobSizes = getBlobSizes(request);
Collections.shuffle(blobSizes, random);
int blobCount = request.getBlobCount();
for (int i = 0; i < blobCount; i++) {
final long targetLength = blobSizes.get(i);
final boolean smallBlob = targetLength <= MAX_ATOMIC_WRITE_SIZE; // avoid the atomic API for larger blobs
final boolean abortWrite = smallBlob && request.isAbortWritePermitted() && rarely(random);
final boolean doCopy = minClusterTransportVersion.supports(REPO_ANALYSIS_COPY_BLOB) && rarely(random) && i > 0;
final String blobName = "test-blob-" + i + "-" + UUIDs.randomBase64UUID(random);
String copyBlobName = null;
if (doCopy) {
copyBlobName = blobName + "-copy";
blobCount--;
if (i >= blobCount) {
break;
}
}
final BlobAnalyzeAction.Request blobAnalyzeRequest = new BlobAnalyzeAction.Request(
request.getRepositoryName(),
blobPath,
blobName,
targetLength,
random.nextLong(),
nodes,
request.getReadNodeCount(),
request.getEarlyReadNodeCount(),
smallBlob && rarely(random),
repository.supportURLRepo() && repository.hasAtomicOverwrites() && smallBlob && rarely(random) && abortWrite == false,
abortWrite,
copyBlobName
);
final DiscoveryNode node = nodes.get(random.nextInt(nodes.size()));
queue.add(ref -> runBlobAnalysis(ref, blobAnalyzeRequest, node));
}
ThrottledIterator.run(getQueueIterator(), (ref, task) -> task.accept(ref), request.getConcurrency(), requestRefs::close);
}
private boolean rarely(Random random) {
return random.nextDouble() < request.getRareActionProbability();
}
private Iterator<Consumer<Releasable>> getQueueIterator() {
return new Iterator<>() {
Consumer<Releasable> nextItem = queue.poll();
@Override
public boolean hasNext() {
return nextItem != null;
}
@Override
public Consumer<Releasable> next() {
assert nextItem != null;
final var currentItem = nextItem;
nextItem = queue.poll();
return currentItem;
}
};
}
private void runBlobAnalysis(Releasable ref, final BlobAnalyzeAction.Request request, DiscoveryNode node) {
if (isRunning()) {
logger.trace("processing [{}] on [{}]", request, node);
// NB although all this is on the SAME thread, the per-blob verification runs on a SNAPSHOT thread so we don't have to worry
// about local requests resulting in a stack overflow here
transportService.sendChildRequest(
node,
BlobAnalyzeAction.NAME,
request,
task,
TransportRequestOptions.EMPTY,
new ActionListenerResponseHandler<>(ActionListener.releaseAfter(new ActionListener<>() {
@Override
public void onResponse(BlobAnalyzeAction.Response response) {
logger.trace("finished [{}] on [{}]", request, node);
if (request.getAbortWrite() == false) {
expectedBlobs.add(request.getBlobName()); // each task cleans up its own mess on failure
}
if (AsyncAction.this.request.detailed) {
synchronized (responses) {
responses.add(response);
}
}
summary.add(response);
}
@Override
public void onFailure(Exception exp) {
logger.debug(() -> "failed [" + request + "] on [" + node + "]", exp);
fail(exp);
}
}, ref), BlobAnalyzeAction.Response::new, TransportResponseHandler.TRANSPORT_WORKER)
);
} else {
ref.close();
}
}
private BlobContainer getBlobContainer() {
return repository.blobStore().blobContainer(repository.basePath().add(blobPath));
}
private void runContendedRegisterAnalysis(Releasable ref, ContendedRegisterAnalyzeAction.Request request, DiscoveryNode node) {
if (isRunning()) {
transportService.sendChildRequest(
node,
ContendedRegisterAnalyzeAction.NAME,
request,
task,
TransportRequestOptions.EMPTY,
new ActionListenerResponseHandler<>(ActionListener.releaseAfter(new ActionListener<>() {
@Override
public void onResponse(ActionResponse.Empty response) {}
@Override
public void onFailure(Exception exp) {
logger.debug(() -> "failed [" + request + "] on [" + node + "]", exp);
fail(exp);
}
}, ref), in -> ActionResponse.Empty.INSTANCE, TransportResponseHandler.TRANSPORT_WORKER)
);
} else {
ref.close();
}
}
private Runnable finalRegisterValueVerifier(String registerName, int expectedFinalRegisterValue, Random random, Releasable ref) {
return new Runnable() {
final CheckedConsumer<ActionListener<OptionalBytesReference>, Exception> finalValueReader = switch (random.nextInt(3)) {
case 0 -> new CheckedConsumer<ActionListener<OptionalBytesReference>, Exception>() {
@Override
public void accept(ActionListener<OptionalBytesReference> listener) {
// All register operations have completed by this point so getRegister is safe
getBlobContainer().getRegister(OperationPurpose.REPOSITORY_ANALYSIS, registerName, listener);
}
@Override
public String toString() {
return "getRegister";
}
};
case 1 -> new CheckedConsumer<ActionListener<OptionalBytesReference>, Exception>() {
@Override
public void accept(ActionListener<OptionalBytesReference> listener) {
getBlobContainer().compareAndExchangeRegister(
OperationPurpose.REPOSITORY_ANALYSIS,
registerName,
bytesFromLong(expectedFinalRegisterValue),
new BytesArray(new byte[] { (byte) 0xff }),
listener
);
}
@Override
public String toString() {
return "compareAndExchangeRegister";
}
};
case 2 -> new CheckedConsumer<ActionListener<OptionalBytesReference>, Exception>() {
@Override
public void accept(ActionListener<OptionalBytesReference> listener) {
getBlobContainer().compareAndSetRegister(
OperationPurpose.REPOSITORY_ANALYSIS,
registerName,
bytesFromLong(expectedFinalRegisterValue),
new BytesArray(new byte[] { (byte) 0xff }),
listener.map(
b -> b
? OptionalBytesReference.of(bytesFromLong(expectedFinalRegisterValue))
: OptionalBytesReference.MISSING
)
);
}
@Override
public String toString() {
return "compareAndSetRegister";
}
};
default -> {
assert false;
throw new IllegalStateException();
}
};
@Override
public void run() {
if (isRunning()) {
transportService.getThreadPool()
.executor(ThreadPool.Names.SNAPSHOT)
.execute(ActionRunnable.wrap(ActionListener.releaseAfter(new ActionListener<>() {
@Override
public void onResponse(OptionalBytesReference actualFinalRegisterValue) {
if (actualFinalRegisterValue.isPresent() == false
|| longFromBytes(actualFinalRegisterValue.bytesReference()) != expectedFinalRegisterValue) {
fail(
new RepositoryVerificationException(
request.getRepositoryName(),
Strings.format(
"""
Successfully completed all [%d] atomic increments of register [%s] so its expected \
value is [%s], but reading its value with [%s] unexpectedly yielded [%s]. This \
anomaly may indicate an atomicity failure amongst concurrent compare-and-exchange \
operations on registers in this repository.""",
expectedFinalRegisterValue,
registerName,
OptionalBytesReference.of(bytesFromLong(expectedFinalRegisterValue)),
finalValueReader.toString(),
actualFinalRegisterValue
)
)
);
}
}
@Override
public void onFailure(Exception exp) {
// Registers are not supported on all repository types, and that's ok.
if (exp instanceof UnsupportedOperationException == false) {
fail(exp);
}
}
}, ref), finalValueReader));
} else {
ref.close();
}
}
};
}
private
|
CheckForCancelListener
|
java
|
apache__camel
|
components/camel-platform-http-vertx/src/test/java/org/apache/camel/component/platform/http/vertx/VertxPlatformHttpEngineWithTypeConverterTest.java
|
{
"start": 4281,
"end": 5364
}
|
class ____ implements TypeConverter {
@Override
public boolean allowNull() {
return false;
}
@Override
public <T> T convertTo(Class<T> type, Object value) throws TypeConversionException {
return null;
}
@Override
public <T> T convertTo(Class<T> type, Exchange exchange, Object value) throws TypeConversionException {
return null;
}
@Override
public <T> T mandatoryConvertTo(Class<T> type, Object value)
throws TypeConversionException {
return null;
}
@Override
public <T> T mandatoryConvertTo(Class<T> type, Exchange exchange, Object value)
throws TypeConversionException {
return null;
}
@Override
public <T> T tryConvertTo(Class<T> type, Object value) {
return null;
}
@Override
public <T> T tryConvertTo(Class<T> type, Exchange exchange, Object value) {
return null;
}
}
}
|
MockTypeConverter
|
java
|
apache__camel
|
dsl/camel-yaml-dsl/camel-yaml-dsl-deserializers/src/generated/java/org/apache/camel/dsl/yaml/deserializers/ModelDeserializers.java
|
{
"start": 62777,
"end": 64857
}
|
class ____ extends YamlDeserializerBase<BatchResequencerConfig> {
public BatchResequencerConfigDeserializer() {
super(BatchResequencerConfig.class);
}
@Override
protected BatchResequencerConfig newInstance() {
return new BatchResequencerConfig();
}
@Override
protected boolean setProperty(BatchResequencerConfig target, String propertyKey,
String propertyName, Node node) {
propertyKey = org.apache.camel.util.StringHelper.dashToCamelCase(propertyKey);
switch(propertyKey) {
case "allowDuplicates": {
String val = asText(node);
target.setAllowDuplicates(val);
break;
}
case "batchSize": {
String val = asText(node);
target.setBatchSize(val);
break;
}
case "batchTimeout": {
String val = asText(node);
target.setBatchTimeout(val);
break;
}
case "ignoreInvalidExchanges": {
String val = asText(node);
target.setIgnoreInvalidExchanges(val);
break;
}
case "reverse": {
String val = asText(node);
target.setReverse(val);
break;
}
default: {
return false;
}
}
return true;
}
}
@YamlType(
types = org.apache.camel.model.app.BeanConstructorDefinition.class,
order = org.apache.camel.dsl.yaml.common.YamlDeserializerResolver.ORDER_LOWEST - 1,
properties = {
@YamlProperty(name = "index", type = "number"),
@YamlProperty(name = "value", type = "string", required = true)
}
)
public static
|
BatchResequencerConfigDeserializer
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/jsontype/TestSubtypes.java
|
{
"start": 770,
"end": 845
}
|
class ____ extends SuperType {
public int c = 2;
}
static
|
SubC
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualTests.java
|
{
"start": 1055,
"end": 6930
}
|
class ____ extends AbstractScalarFunctionTestCase {
public GreaterThanOrEqualTests(@Name("TestCase") Supplier<TestCaseSupplier.TestCase> testCaseSupplier) {
this.testCase = testCaseSupplier.get();
}
@ParametersFactory
public static Iterable<Object[]> parameters() {
List<TestCaseSupplier> suppliers = new ArrayList<>();
suppliers.addAll(
TestCaseSupplier.forBinaryComparisonWithWidening(
new TestCaseSupplier.NumericTypeTestConfigs<>(
new TestCaseSupplier.NumericTypeTestConfig<>(
(Integer.MIN_VALUE >> 1) - 1,
(Integer.MAX_VALUE >> 1) - 1,
(l, r) -> l.intValue() >= r.intValue(),
"GreaterThanOrEqualIntsEvaluator"
),
new TestCaseSupplier.NumericTypeTestConfig<>(
(Long.MIN_VALUE >> 1) - 1,
(Long.MAX_VALUE >> 1) - 1,
(l, r) -> l.longValue() >= r.longValue(),
"GreaterThanOrEqualLongsEvaluator"
),
new TestCaseSupplier.NumericTypeTestConfig<>(
Double.NEGATIVE_INFINITY,
Double.POSITIVE_INFINITY,
// NB: this has different behavior than Double::equals
(l, r) -> l.doubleValue() >= r.doubleValue(),
"GreaterThanOrEqualDoublesEvaluator"
)
),
"lhs",
"rhs",
(lhs, rhs) -> List.of(),
false
)
);
// Unsigned Long cases
// TODO: These should be integrated into the type cross product above, but are currently broken
// see https://github.com/elastic/elasticsearch/issues/102935
suppliers.addAll(
TestCaseSupplier.forBinaryNotCasting(
"GreaterThanOrEqualLongsEvaluator",
"lhs",
"rhs",
(l, r) -> ((BigInteger) l).compareTo((BigInteger) r) >= 0,
DataType.BOOLEAN,
TestCaseSupplier.ulongCases(BigInteger.ZERO, NumericUtils.UNSIGNED_LONG_MAX, true),
TestCaseSupplier.ulongCases(BigInteger.ZERO, NumericUtils.UNSIGNED_LONG_MAX, true),
List.of(),
false
)
);
suppliers.addAll(
TestCaseSupplier.forBinaryNotCasting(
"GreaterThanOrEqualKeywordsEvaluator",
"lhs",
"rhs",
(l, r) -> ((BytesRef) l).compareTo((BytesRef) r) >= 0,
DataType.BOOLEAN,
TestCaseSupplier.ipCases(),
TestCaseSupplier.ipCases(),
List.of(),
false
)
);
suppliers.addAll(
TestCaseSupplier.forBinaryNotCasting(
"GreaterThanOrEqualKeywordsEvaluator",
"lhs",
"rhs",
(l, r) -> ((BytesRef) l).compareTo((BytesRef) r) >= 0,
DataType.BOOLEAN,
TestCaseSupplier.versionCases(""),
TestCaseSupplier.versionCases(""),
List.of(),
false
)
);
// Datetime
suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("GreaterThanOrEqualLongsEvaluator", "lhs", "rhs", (lhs, rhs) -> {
if (lhs instanceof Instant l && rhs instanceof Instant r) {
return l.isAfter(r) || l.equals(r);
}
throw new UnsupportedOperationException("Got some weird types");
}, DataType.BOOLEAN, TestCaseSupplier.dateCases(), TestCaseSupplier.dateCases(), List.of(), false));
suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("GreaterThanOrEqualLongsEvaluator", "lhs", "rhs", (lhs, rhs) -> {
if (lhs instanceof Instant l && rhs instanceof Instant r) {
return l.isAfter(r) || l.equals(r);
}
throw new UnsupportedOperationException("Got some weird types");
}, DataType.BOOLEAN, TestCaseSupplier.dateNanosCases(), TestCaseSupplier.dateNanosCases(), List.of(), false));
suppliers.addAll(
TestCaseSupplier.forBinaryNotCasting(
"GreaterThanOrEqualNanosMillisEvaluator",
"lhs",
"rhs",
(lhs, rhs) -> (((Instant) lhs).isAfter((Instant) rhs) || lhs.equals(rhs)),
DataType.BOOLEAN,
TestCaseSupplier.dateNanosCases(),
TestCaseSupplier.dateCases(),
List.of(),
false
)
);
suppliers.addAll(
TestCaseSupplier.forBinaryNotCasting(
"GreaterThanOrEqualMillisNanosEvaluator",
"lhs",
"rhs",
(lhs, rhs) -> (((Instant) lhs).isAfter((Instant) rhs) || lhs.equals(rhs)),
DataType.BOOLEAN,
TestCaseSupplier.dateCases(),
TestCaseSupplier.dateNanosCases(),
List.of(),
false
)
);
suppliers.addAll(
TestCaseSupplier.stringCases(
(l, r) -> ((BytesRef) l).compareTo((BytesRef) r) >= 0,
(lhsType, rhsType) -> "GreaterThanOrEqualKeywordsEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]",
List.of(),
DataType.BOOLEAN
)
);
return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers);
}
@Override
protected Expression build(Source source, List<Expression> args) {
return new GreaterThanOrEqual(source, args.get(0), args.get(1));
}
}
|
GreaterThanOrEqualTests
|
java
|
mockito__mockito
|
mockito-core/src/test/java/org/mockitousage/strictness/ProductionCode.java
|
{
"start": 393,
"end": 750
}
|
class ____ {
public static void simpleMethod(IMethods mock, String argument) {
mock.simpleMethod(argument);
}
public static void simpleMethod(IMethods mock, int argument) {
mock.simpleMethod(argument);
}
public static void forInteger(IMethods mock, int argument) {
mock.forInteger(argument);
}
}
|
ProductionCode
|
java
|
apache__flink
|
flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/AvroWriters.java
|
{
"start": 1741,
"end": 3816
}
|
class ____ the type to write.
*/
public static <T extends SpecificRecordBase> AvroWriterFactory<T> forSpecificRecord(
Class<T> type) {
String schemaString = SpecificData.get().getSchema(type).toString();
AvroBuilder<T> builder =
(out) -> createAvroDataFileWriter(schemaString, SpecificDatumWriter::new, out);
return new AvroWriterFactory<>(builder);
}
/**
* Creates an {@link AvroWriterFactory} that accepts and writes Avro generic types. The Avro
* writers will use the given schema to build and write the records.
*
* @param schema The schema of the generic type.
*/
public static AvroWriterFactory<GenericRecord> forGenericRecord(Schema schema) {
String schemaString = schema.toString();
// Must override the lambda representation because of a bug in shading lambda
// serialization, see similar issue FLINK-28043 for more details.
AvroBuilder<GenericRecord> builder =
new AvroBuilder<GenericRecord>() {
@Override
public DataFileWriter<GenericRecord> createWriter(OutputStream outputStream)
throws IOException {
return createAvroDataFileWriter(
schemaString,
new Function<Schema, DatumWriter<GenericRecord>>() {
@Override
public DatumWriter<GenericRecord> apply(Schema schema) {
return new GenericDatumWriter<>(schema);
}
},
outputStream);
}
};
return new AvroWriterFactory<>(builder);
}
/**
* Creates an {@link AvroWriterFactory} for the given type. The Avro writers will use reflection
* to create the schema for the type and use that schema to write the records.
*
* @param type The
|
of
|
java
|
micronaut-projects__micronaut-core
|
http/src/main/java/io/micronaut/http/body/ByteBody.java
|
{
"start": 1139,
"end": 2335
}
|
class ____ a stream of bytes from an HTTP connection. These bytes may be streamed or
* fully in memory, depending on implementation.
* <p>Each {@link ByteBody} may only be used once for a "primary" operation (such as
* {@link #toInputStream()}). However, <i>before</i> that primary operation, it may be
* {@link #split() split} multiple times. Splitting returns a new {@link ByteBody} that is
* independent. That means if you want to do two primary operations on the same
* {@link ByteBody}, you can instead split it once and then do one of the primary operations
* on the body returned by {@link #split()}.
* <p>To ensure resource cleanup, {@link #split()} returns a {@link CloseableByteBody}. This
* body must be closed if no terminal operation is performed, otherwise there may be a memory leak
* or stalled connection!
* <p>An individual {@link ByteBody} is <i>not</i> thread-safe: You may not call
* {@link #split()} concurrently from multiple threads for example. However, the new
* {@link ByteBody} returned from {@link #split()} is independent, so you may use it on a
* different thread as this one.
*
* @author Jonas Konrad
* @since 4.5.0
*/
@Experimental
public
|
represents
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
|
{
"start": 1945,
"end": 35569
}
|
class ____ extends BaseReplicationPolicyTest {
public TestReplicationPolicyWithNodeGroup() {
this.blockPlacementPolicy = BlockPlacementPolicyWithNodeGroup.class.getName();
}
@Override
DatanodeDescriptor[] getDatanodeDescriptors(Configuration conf) {
// default is true, in this case this test will against DFSNetworkTopology
// but it run on NetworkTopologyWithNodeGroup, so set to false.
conf.setBoolean(DFS_USE_DFS_NETWORK_TOPOLOGY_KEY, false);
conf.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY,
NetworkTopologyWithNodeGroup.class.getName());
final String[] racks = {
"/d1/r1/n1",
"/d1/r1/n1",
"/d1/r1/n2",
"/d1/r2/n3",
"/d1/r2/n3",
"/d1/r2/n4",
"/d2/r3/n5",
"/d2/r3/n6"
};
storages = DFSTestUtil.createDatanodeStorageInfos(racks);
return DFSTestUtil.toDatanodeDescriptor(storages);
}
private static final DatanodeStorageInfo[] storagesInBoundaryCase;
private static final DatanodeDescriptor[] dataNodesInBoundaryCase;
static {
final String[] racksInBoundaryCase = {
"/d1/r1/n1",
"/d1/r1/n1",
"/d1/r1/n1",
"/d1/r1/n2",
"/d1/r2/n3",
"/d1/r2/n3"
};
storagesInBoundaryCase = DFSTestUtil.createDatanodeStorageInfos(racksInBoundaryCase);
dataNodesInBoundaryCase = DFSTestUtil.toDatanodeDescriptor(storagesInBoundaryCase);
}
private static final DatanodeStorageInfo[] storagesInMoreTargetsCase;
private final static DatanodeDescriptor[] dataNodesInMoreTargetsCase;
static {
final String[] racksInMoreTargetsCase = {
"/r1/n1",
"/r1/n1",
"/r1/n2",
"/r1/n2",
"/r1/n3",
"/r1/n3",
"/r2/n4",
"/r2/n4",
"/r2/n5",
"/r2/n5",
"/r2/n6",
"/r2/n6"
};
storagesInMoreTargetsCase = DFSTestUtil.createDatanodeStorageInfos(racksInMoreTargetsCase);
dataNodesInMoreTargetsCase = DFSTestUtil.toDatanodeDescriptor(storagesInMoreTargetsCase);
};
private final static DatanodeDescriptor NODE =
DFSTestUtil.getDatanodeDescriptor("9.9.9.9", "/d2/r4/n7");
private static final DatanodeStorageInfo[] storagesForDependencies;
private static final DatanodeDescriptor[] dataNodesForDependencies;
static {
final String[] racksForDependencies = {
"/d1/r1/n1",
"/d1/r1/n1",
"/d1/r1/n2",
"/d1/r1/n2",
"/d1/r1/n3",
"/d1/r1/n4"
};
final String[] hostNamesForDependencies = {
"h1",
"h2",
"h3",
"h4",
"h5",
"h6"
};
storagesForDependencies = DFSTestUtil.createDatanodeStorageInfos(
racksForDependencies, hostNamesForDependencies);
dataNodesForDependencies = DFSTestUtil.toDatanodeDescriptor(storagesForDependencies);
};
/**
* Test block placement verification.
* @throws Exception
*/
@Test
public void testVerifyBlockPlacement() throws Exception {
LocatedBlock locatedBlock;
BlockPlacementStatus status;
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
List<DatanodeStorageInfo> set = new ArrayList<>();
// 2 node groups (not enough), 2 racks (enough)
set.clear();
set.add(storages[0]);
set.add(storages[1]);
set.add(storages[4]);
locatedBlock = BlockManager.newLocatedBlock(b,
set.toArray(new DatanodeStorageInfo[set.size()]), 0, false);
status = replicator.verifyBlockPlacement(locatedBlock.getLocations(),
set.size());
assertFalse(status.isPlacementPolicySatisfied());
// 3 node groups (enough), 2 racks (enough)
set.clear();
set.add(storages[0]);
set.add(storages[2]);
set.add(storages[5]);
locatedBlock = BlockManager.newLocatedBlock(b,
set.toArray(new DatanodeStorageInfo[set.size()]), 0, false);
status = replicator.verifyBlockPlacement(locatedBlock.getLocations(),
set.size());
assertTrue(status.isPlacementPolicySatisfied());
// 2 node groups (not enough), 1 rack (not enough)
set.clear();
set.add(storages[0]);
set.add(storages[1]);
set.add(storages[2]);
locatedBlock = BlockManager.newLocatedBlock(b,
set.toArray(new DatanodeStorageInfo[set.size()]), 0, false);
status = replicator.verifyBlockPlacement(locatedBlock.getLocations(),
set.size());
assertFalse(status.isPlacementPolicySatisfied());
assertTrue(status.getErrorDescription().contains("node group"));
assertTrue(status.getErrorDescription().contains("more rack(s)"));
// 3 node groups (enough), 3 racks (enough)
set.clear();
set.add(storages[0]);
set.add(storages[5]);
set.add(storages[7]);
locatedBlock = BlockManager.newLocatedBlock(b,
set.toArray(new DatanodeStorageInfo[set.size()]), 0, false);
status = replicator.verifyBlockPlacement(locatedBlock.getLocations(),
set.size());
assertTrue(status.isPlacementPolicySatisfied());
// 3 node groups (not enough), 3 racks (enough), 4 replicas
set.clear();
set.add(storages[0]);
set.add(storages[1]);
set.add(storages[5]);
set.add(storages[7]);
locatedBlock = BlockManager.newLocatedBlock(b,
set.toArray(new DatanodeStorageInfo[set.size()]), 0, false);
status = replicator.verifyBlockPlacement(locatedBlock.getLocations(),
set.size());
assertFalse(status.isPlacementPolicySatisfied());
assertTrue(status.getErrorDescription().contains("node group"));
assertFalse(status.getErrorDescription().contains("more rack(s)"));
// 2 node groups (not enough), 1 rack (not enough)
set.clear();
set.add(storages[0]);
set.add(storages[1]);
set.add(storages[2]);
locatedBlock = BlockManager.newLocatedBlock(b,
set.toArray(new DatanodeStorageInfo[set.size()]), 0, false);
status = replicator.verifyBlockPlacement(locatedBlock.getLocations(),
set.size());
assertFalse(status.isPlacementPolicySatisfied());
assertTrue(status.getErrorDescription().contains("node group"));
assertTrue(status.getErrorDescription().contains("more rack(s)"));
// 1 node group (not enough), 1 rack (not enough)
set.clear();
set.add(storages[0]);
set.add(storages[1]);
locatedBlock = BlockManager.newLocatedBlock(b,
set.toArray(new DatanodeStorageInfo[set.size()]), 0, false);
status = replicator.verifyBlockPlacement(locatedBlock.getLocations(),
set.size());
assertFalse(status.isPlacementPolicySatisfied());
assertTrue(status.getErrorDescription().contains("node group"));
assertTrue(status.getErrorDescription().contains("more rack(s)"));
}
/**
* Scan the targets list: all targets should be on different NodeGroups.
* Return false if two targets are found on the same NodeGroup.
*/
private static boolean checkTargetsOnDifferentNodeGroup(
DatanodeStorageInfo[] targets) {
if(targets.length == 0)
return true;
Set<String> targetSet = new HashSet<>();
for(DatanodeStorageInfo storage:targets) {
final DatanodeDescriptor node = storage.getDatanodeDescriptor();
String nodeGroup = NetworkTopology.getLastHalf(node.getNetworkLocation());
if(targetSet.contains(nodeGroup)) {
return false;
} else {
targetSet.add(nodeGroup);
}
}
return true;
}
private boolean isOnSameRack(DatanodeDescriptor left, DatanodeStorageInfo right) {
return cluster.isOnSameRack(left, right.getDatanodeDescriptor());
}
private boolean isOnSameNodeGroup(DatanodeStorageInfo left, DatanodeStorageInfo right) {
return isOnSameNodeGroup(left.getDatanodeDescriptor(), right);
}
private boolean isOnSameNodeGroup(DatanodeDescriptor left, DatanodeStorageInfo right) {
return cluster.isOnSameNodeGroup(left, right.getDatanodeDescriptor());
}
private DatanodeStorageInfo[] chooseTarget(
int numOfReplicas,
DatanodeDescriptor writer,
Set<Node> excludedNodes,
List<DatanodeDescriptor> favoredNodes) {
return replicator.chooseTarget(filename, numOfReplicas, writer,
excludedNodes, BLOCK_SIZE, favoredNodes,
TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY, null);
}
/**
* In this testcase, client is dataNodes[0]. So the 1st replica should be
* placed on dataNodes[0], the 2nd replica should be placed on
* different rack and third should be placed on different node (and node group)
* of rack chosen for 2nd node.
* The only excpetion is when the <i>numOfReplicas</i> is 2,
* the 1st is on dataNodes[0] and the 2nd is on a different rack.
* @throws Exception
*/
@Test
public void testChooseTarget1() throws Exception {
updateHeartbeatWithUsage(dataNodes[0],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
0L, 0L, 4, 0); // overloaded
DatanodeStorageInfo[] targets;
targets = chooseTarget(0);
assertEquals(targets.length, 0);
targets = chooseTarget(1);
assertEquals(targets.length, 1);
assertEquals(storages[0], targets[0]);
targets = chooseTarget(2);
assertEquals(targets.length, 2);
assertEquals(storages[0], targets[0]);
assertFalse(isOnSameRack(targets[0], targets[1]));
targets = chooseTarget(3);
assertEquals(targets.length, 3);
assertEquals(storages[0], targets[0]);
assertFalse(isOnSameRack(targets[0], targets[1]));
assertTrue(isOnSameRack(targets[1], targets[2]));
assertFalse(isOnSameNodeGroup(targets[1], targets[2]));
targets = chooseTarget(4);
assertEquals(targets.length, 4);
assertEquals(storages[0], targets[0]);
assertTrue(isOnSameRack(targets[1], targets[2]) ||
isOnSameRack(targets[2], targets[3]));
assertFalse(isOnSameRack(targets[0], targets[2]));
// Make sure no more than one replicas are on the same nodegroup
verifyNoTwoTargetsOnSameNodeGroup(targets);
updateHeartbeatWithUsage(dataNodes[0],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
private void verifyNoTwoTargetsOnSameNodeGroup(DatanodeStorageInfo[] targets) {
Set<String> nodeGroupSet = new HashSet<>();
for (DatanodeStorageInfo target: targets) {
nodeGroupSet.add(target.getDatanodeDescriptor().getNetworkLocation());
}
assertEquals(nodeGroupSet.size(), targets.length);
}
/**
* In this testcase, client is dataNodes[0], but the dataNodes[1] is
* not allowed to be chosen. So the 1st replica should be
* placed on dataNodes[0], the 2nd replica should be placed on a different
* rack, the 3rd should be on same rack as the 2nd replica but in different
* node group, and the rest should be placed on a third rack.
* @throws Exception
*/
@Test
public void testChooseTarget2() throws Exception {
DatanodeStorageInfo[] targets;
BlockPlacementPolicyDefault repl = (BlockPlacementPolicyDefault)replicator;
List<DatanodeStorageInfo> chosenNodes = new ArrayList<>();
Set<Node> excludedNodes = new HashSet<>();
excludedNodes.add(dataNodes[1]);
targets = repl.chooseTarget(filename, 4, dataNodes[0], chosenNodes, false,
excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY,
null);
assertEquals(targets.length, 4);
assertEquals(storages[0], targets[0]);
assertTrue(cluster.isNodeGroupAware());
// Make sure no replicas are on the same nodegroup
for (int i=1;i<4;i++) {
assertFalse(isOnSameNodeGroup(targets[0], targets[i]));
}
assertTrue(isOnSameRack(targets[1], targets[2]) ||
isOnSameRack(targets[2], targets[3]));
assertFalse(isOnSameRack(targets[1], targets[3]));
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
chosenNodes.add(storages[2]);
targets = repl.chooseTarget(filename, 1, dataNodes[0], chosenNodes, true,
excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY,
null);
System.out.println("targets=" + Arrays.asList(targets));
assertEquals(2, targets.length);
//make sure that the chosen node is in the target.
int i = 0;
for(; i < targets.length && !storages[2].equals(targets[i]); i++);
assertTrue(i < targets.length);
}
/**
* In this testcase, client is dataNodes[0], but dataNodes[0] is not qualified
* to be chosen. So the 1st replica should be placed on dataNodes[1],
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 2nd replica but in different nodegroup,
* and the rest should be placed on the third rack.
* @throws Exception
*/
@Test
public void testChooseTarget3() throws Exception {
// make data node 0 to be not qualified to choose
updateHeartbeatWithUsage(dataNodes[0],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
(HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L,
0L, 0L, 0, 0); // no space
DatanodeStorageInfo[] targets;
targets = chooseTarget(0);
assertEquals(targets.length, 0);
targets = chooseTarget(1);
assertEquals(targets.length, 1);
assertEquals(storages[1], targets[0]);
targets = chooseTarget(2);
assertEquals(targets.length, 2);
assertEquals(storages[1], targets[0]);
assertFalse(isOnSameRack(targets[0], targets[1]));
targets = chooseTarget(3);
assertEquals(targets.length, 3);
assertEquals(storages[1], targets[0]);
assertTrue(isOnSameRack(targets[1], targets[2]));
assertFalse(isOnSameRack(targets[0], targets[1]));
targets = chooseTarget(4);
assertEquals(targets.length, 4);
assertEquals(storages[1], targets[0]);
assertTrue(cluster.isNodeGroupAware());
verifyNoTwoTargetsOnSameNodeGroup(targets);
assertTrue(isOnSameRack(targets[1], targets[2]) ||
isOnSameRack(targets[2], targets[3]));
updateHeartbeatWithUsage(dataNodes[0],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
/**
* In this testcase, client is dataNodes[0], but none of the nodes on rack 1
* is qualified to be chosen. So the 1st replica should be placed on either
* rack 2 or rack 3.
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 1st replica, but
* in different node group.
* @throws Exception
*/
@Test
public void testChooseTarget4() throws Exception {
// make data node 0-2 to be not qualified to choose: not enough disk space
for(int i=0; i<3; i++) {
updateHeartbeatWithUsage(dataNodes[i],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
(HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
DatanodeStorageInfo[] targets;
targets = chooseTarget(0);
assertEquals(targets.length, 0);
targets = chooseTarget(1);
assertEquals(targets.length, 1);
assertFalse(isOnSameRack(dataNodes[0], targets[0]));
targets = chooseTarget(2);
assertEquals(targets.length, 2);
assertFalse(isOnSameRack(dataNodes[0], targets[0]));
assertFalse(isOnSameRack(targets[0], targets[1]));
targets = chooseTarget(3);
assertEquals(targets.length, 3);
for(int i=0; i<3; i++) {
assertFalse(isOnSameRack(dataNodes[0], targets[i]));
}
verifyNoTwoTargetsOnSameNodeGroup(targets);
assertTrue(isOnSameRack(targets[0], targets[1]) ||
isOnSameRack(targets[1], targets[2]));
assertFalse(isOnSameRack(targets[0], targets[2]));
}
/**
* In this testcase, client is is a node outside of file system.
* So the 1st replica can be placed on any node.
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 2nd replica,
* @throws Exception
*/
@Test
public void testChooseTarget5() throws Exception {
updateHeartbeatWithUsage();
DatanodeStorageInfo[] targets;
targets = chooseTarget(0, NODE);
assertEquals(targets.length, 0);
targets = chooseTarget(1, NODE);
assertEquals(targets.length, 1);
targets = chooseTarget(2, NODE);
assertEquals(targets.length, 2);
assertFalse(isOnSameRack(targets[0], targets[1]));
targets = chooseTarget(3, NODE);
assertEquals(targets.length, 3);
assertTrue(isOnSameRack(targets[1], targets[2]));
assertFalse(isOnSameRack(targets[0], targets[1]));
verifyNoTwoTargetsOnSameNodeGroup(targets);
}
/**
* In this testcase, client is dataNodes[7], but it is not qualified
* to be chosen. And there is no other node available on client Node group.
* So the 1st replica should be placed on client local rack dataNodes[6]
* @throws Exception
*/
@Test
public void testChooseTargetForLocalStorage() throws Exception {
updateHeartbeatWithUsage(dataNodes[7],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
(HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L,
0L, 0L, 0, 0); // no space
DatanodeStorageInfo[] targets;
targets = chooseTarget(1, dataNodes[7]);
assertEquals(targets.length, 1);
assertTrue(targets[0].getDatanodeDescriptor().equals(dataNodes[6]));
}
/**
* This testcase tests re-replication, when dataNodes[0] is already chosen.
* So the 1st replica can be placed on random rack.
* the 2nd replica should be placed on different node and nodegroup by same rack as
* the 1st replica. The 3rd replica can be placed randomly.
* @throws Exception
*/
@Test
public void testRereplicate1() throws Exception {
updateHeartbeatWithUsage();
List<DatanodeStorageInfo> chosenNodes = new ArrayList<>();
chosenNodes.add(storages[0]);
DatanodeStorageInfo[] targets;
targets = chooseTarget(0, chosenNodes);
assertEquals(targets.length, 0);
targets = chooseTarget(1, chosenNodes);
assertEquals(targets.length, 1);
assertFalse(isOnSameRack(dataNodes[0], targets[0]));
targets = chooseTarget(2, chosenNodes);
assertEquals(targets.length, 2);
assertTrue(isOnSameRack(dataNodes[0], targets[0]));
assertFalse(isOnSameRack(targets[0], targets[1]));
targets = chooseTarget(3, chosenNodes);
assertEquals(targets.length, 3);
assertTrue(isOnSameRack(dataNodes[0], targets[0]));
assertFalse(isOnSameNodeGroup(dataNodes[0], targets[0]));
assertFalse(isOnSameRack(targets[0], targets[2]));
}
/**
* This testcase tests re-replication,
* when dataNodes[0] and dataNodes[1] are already chosen.
* So the 1st replica should be placed on a different rack of rack 1.
* the rest replicas can be placed randomly,
* @throws Exception
*/
@Test
public void testRereplicate2() throws Exception {
updateHeartbeatWithUsage();
List<DatanodeStorageInfo> chosenNodes = new ArrayList<>();
chosenNodes.add(storages[0]);
chosenNodes.add(storages[1]);
DatanodeStorageInfo[] targets;
targets = chooseTarget(0, chosenNodes);
assertEquals(targets.length, 0);
targets = chooseTarget(1, chosenNodes);
assertEquals(targets.length, 1);
assertFalse(isOnSameRack(dataNodes[0], targets[0]));
targets = chooseTarget(2, chosenNodes);
assertEquals(targets.length, 2);
assertFalse(isOnSameRack(dataNodes[0], targets[0]) &&
isOnSameRack(dataNodes[0], targets[1]));
}
/**
* This testcase tests re-replication,
* when dataNodes[0] and dataNodes[3] are already chosen.
* So the 1st replica should be placed on the rack that the writer resides.
* the rest replicas can be placed randomly,
* @throws Exception
*/
@Test
public void testRereplicate3() throws Exception {
updateHeartbeatWithUsage();
List<DatanodeStorageInfo> chosenNodes = new ArrayList<>();
chosenNodes.add(storages[0]);
chosenNodes.add(storages[3]);
DatanodeStorageInfo[] targets;
targets = chooseTarget(0, chosenNodes);
assertEquals(targets.length, 0);
targets = chooseTarget(1, chosenNodes);
assertEquals(targets.length, 1);
assertTrue(isOnSameRack(dataNodes[0], targets[0]));
assertFalse(isOnSameRack(dataNodes[3], targets[0]));
targets = chooseTarget(1, dataNodes[3], chosenNodes);
assertEquals(targets.length, 1);
assertTrue(isOnSameRack(dataNodes[3], targets[0]));
assertFalse(isOnSameNodeGroup(dataNodes[3], targets[0]));
assertFalse(isOnSameRack(dataNodes[0], targets[0]));
targets = chooseTarget(2, chosenNodes);
assertEquals(targets.length, 2);
assertTrue(isOnSameRack(dataNodes[0], targets[0]));
assertFalse(isOnSameNodeGroup(dataNodes[0], targets[0]));
targets = chooseTarget(2, dataNodes[3], chosenNodes);
assertEquals(targets.length, 2);
assertTrue(isOnSameRack(dataNodes[3], targets[0]));
}
/**
* Test for the chooseReplicaToDelete are processed based on
* block locality and free space
*/
@Test
public void testChooseReplicaToDelete() throws Exception {
List<DatanodeStorageInfo> replicaList = new ArrayList<>();
final Map<String, List<DatanodeStorageInfo>> rackMap = new HashMap<>();
storages[0].setRemainingForTests(4*1024*1024);
dataNodes[0].setRemaining(calculateRemaining(dataNodes[0]));
replicaList.add(storages[0]);
storages[1].setRemainingForTests(3*1024*1024);
dataNodes[1].setRemaining(calculateRemaining(dataNodes[1]));
replicaList.add(storages[1]);
storages[2].setRemainingForTests(2*1024*1024);
dataNodes[2].setRemaining(calculateRemaining(dataNodes[2]));
replicaList.add(storages[2]);
storages[4].setRemainingForTests(100 * 1024 * 1024);
storages[5].setRemainingForTests(512 * 1024);
dataNodes[5].setRemaining(calculateRemaining(dataNodes[5]));
replicaList.add(storages[5]);
List<DatanodeStorageInfo> first = new ArrayList<>();
List<DatanodeStorageInfo> second = new ArrayList<>();
replicator.splitNodesWithRack(replicaList,
replicaList, rackMap, first, second);
assertEquals(3, first.size());
assertEquals(1, second.size());
List<StorageType> excessTypes = new ArrayList<>();
excessTypes.add(StorageType.DEFAULT);
DatanodeStorageInfo chosen = ((BlockPlacementPolicyDefault) replicator)
.chooseReplicaToDelete(first, second, excessTypes, rackMap);
// Within first set {dataNodes[0], dataNodes[1], dataNodes[2]},
// dataNodes[0] and dataNodes[1] are in the same nodegroup,
// but dataNodes[1] is chosen as less free space
assertEquals(chosen, storages[1]);
replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosen);
assertEquals(2, first.size());
assertEquals(1, second.size());
// Within first set {dataNodes[0], dataNodes[2]}, dataNodes[2] is chosen
// as less free space
excessTypes.add(StorageType.DEFAULT);
chosen = ((BlockPlacementPolicyDefault) replicator).chooseReplicaToDelete(
first, second, excessTypes, rackMap);
assertEquals(chosen, storages[2]);
replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosen);
assertEquals(0, first.size());
assertEquals(2, second.size());
// Within second set, dataNodes[5] with less free space
excessTypes.add(StorageType.DEFAULT);
chosen = ((BlockPlacementPolicyDefault) replicator).chooseReplicaToDelete(
first, second, excessTypes, rackMap);
assertEquals(chosen, storages[5]);
}
private long calculateRemaining(DatanodeDescriptor dataNode) {
long sum = 0;
for (DatanodeStorageInfo storageInfo: dataNode.getStorageInfos()){
sum += storageInfo.getRemaining();
}
return sum;
}
/**
* Test replica placement policy in case of boundary topology.
* Rack 2 has only 1 node group & can't be placed with two replicas
* The 1st replica will be placed on writer.
* The 2nd replica should be placed on a different rack
* The 3rd replica should be placed on the same rack with writer, but on a
* different node group.
*/
@Test
public void testChooseTargetsOnBoundaryTopology() throws Exception {
for(int i=0; i<dataNodes.length; i++) {
cluster.remove(dataNodes[i]);
}
for(int i=0; i<dataNodesInBoundaryCase.length; i++) {
cluster.add(dataNodesInBoundaryCase[i]);
}
for(int i=0; i<dataNodesInBoundaryCase.length; i++) {
updateHeartbeatWithUsage(dataNodesInBoundaryCase[i],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
DatanodeStorageInfo[] targets;
targets = chooseTarget(0, dataNodesInBoundaryCase[0]);
assertEquals(targets.length, 0);
targets = chooseTarget(1, dataNodesInBoundaryCase[0]);
assertEquals(targets.length, 1);
targets = chooseTarget(2, dataNodesInBoundaryCase[0]);
assertEquals(targets.length, 2);
assertFalse(isOnSameRack(targets[0], targets[1]));
targets = chooseTarget(3, dataNodesInBoundaryCase[0]);
assertEquals(targets.length, 3);
assertTrue(checkTargetsOnDifferentNodeGroup(targets));
}
/**
* Test re-replication policy in boundary case.
* Rack 2 has only one node group & the node in this node group is chosen
* Rack 1 has two nodegroups & one of them is chosen.
* Replica policy should choose the node from node group of Rack1 but not the
* same nodegroup with chosen nodes.
*/
@Test
public void testRereplicateOnBoundaryTopology() throws Exception {
for(int i=0; i<dataNodesInBoundaryCase.length; i++) {
updateHeartbeatWithUsage(dataNodesInBoundaryCase[i],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
List<DatanodeStorageInfo> chosenNodes = new ArrayList<>();
chosenNodes.add(storagesInBoundaryCase[0]);
chosenNodes.add(storagesInBoundaryCase[5]);
DatanodeStorageInfo[] targets;
targets = chooseTarget(1, dataNodesInBoundaryCase[0], chosenNodes);
assertFalse(isOnSameNodeGroup(dataNodesInBoundaryCase[0], targets[0]));
assertFalse(isOnSameNodeGroup(dataNodesInBoundaryCase[5], targets[0]));
assertTrue(checkTargetsOnDifferentNodeGroup(targets));
}
/**
* Test replica placement policy in case of targets more than number of
* NodeGroups.
* The 12-nodes cluster only has 6 NodeGroups, but in some cases, like:
* placing submitted job file, there is requirement to choose more (10)
* targets for placing replica. We should test it can return 6 targets.
*/
@Test
public void testChooseMoreTargetsThanNodeGroups() throws Exception {
for(int i=0; i<dataNodes.length; i++) {
cluster.remove(dataNodes[i]);
}
for(int i=0; i<dataNodesInBoundaryCase.length; i++) {
DatanodeDescriptor node = dataNodesInBoundaryCase[i];
if (cluster.contains(node)) {
cluster.remove(node);
}
}
for(int i=0; i<dataNodesInMoreTargetsCase.length; i++) {
cluster.add(dataNodesInMoreTargetsCase[i]);
}
for(int i=0; i<dataNodesInMoreTargetsCase.length; i++) {
updateHeartbeatWithUsage(dataNodesInMoreTargetsCase[i],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
DatanodeStorageInfo[] targets;
// Test normal case -- 3 replicas
targets = chooseTarget(3, dataNodesInMoreTargetsCase[0]);
assertEquals(targets.length, 3);
assertTrue(checkTargetsOnDifferentNodeGroup(targets));
// Test special case -- replica number over node groups.
targets = chooseTarget(10, dataNodesInMoreTargetsCase[0]);
assertTrue(checkTargetsOnDifferentNodeGroup(targets));
// Verify it only can find 6 targets for placing replicas.
assertEquals(targets.length, 6);
}
@Test
public void testChooseTargetWithDependencies() throws Exception {
for(int i=0; i<dataNodes.length; i++) {
cluster.remove(dataNodes[i]);
}
for(int i=0; i<dataNodesInMoreTargetsCase.length; i++) {
DatanodeDescriptor node = dataNodesInMoreTargetsCase[i];
if (cluster.contains(node)) {
cluster.remove(node);
}
}
Host2NodesMap host2DatanodeMap = namenode.getNamesystem()
.getBlockManager()
.getDatanodeManager().getHost2DatanodeMap();
for(int i=0; i<dataNodesForDependencies.length; i++) {
cluster.add(dataNodesForDependencies[i]);
host2DatanodeMap.add(dataNodesForDependencies[i]);
}
//add dependencies (node1 <-> node2, and node3<->node4)
dataNodesForDependencies[1].addDependentHostName(
dataNodesForDependencies[2].getHostName());
dataNodesForDependencies[2].addDependentHostName(
dataNodesForDependencies[1].getHostName());
dataNodesForDependencies[3].addDependentHostName(
dataNodesForDependencies[4].getHostName());
dataNodesForDependencies[4].addDependentHostName(
dataNodesForDependencies[3].getHostName());
//Update heartbeat
for(int i=0; i<dataNodesForDependencies.length; i++) {
updateHeartbeatWithUsage(dataNodesForDependencies[i],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
List<DatanodeStorageInfo> chosenNodes = new ArrayList<>();
DatanodeStorageInfo[] targets;
Set<Node> excludedNodes = new HashSet<>();
excludedNodes.add(dataNodesForDependencies[5]);
//try to select three targets as there are three node groups
targets = chooseTarget(3, dataNodesForDependencies[1], chosenNodes, excludedNodes);
//Even there are three node groups, verify that
//only two targets are selected due to dependencies
assertEquals(targets.length, 2);
assertEquals(targets[0], storagesForDependencies[1]);
assertTrue(targets[1].equals(storagesForDependencies[3]) || targets[1].equals(storagesForDependencies[4]));
//verify that all data nodes are in the excluded list
assertEquals(excludedNodes.size(), dataNodesForDependencies.length);
for(int i=0; i<dataNodesForDependencies.length; i++) {
assertTrue(excludedNodes.contains(dataNodesForDependencies[i]));
}
}
/**
* In this testcase, favored node is dataNodes[6].
* 1st replica should be placed on favored node.
* @throws Exception
*/
@Test
public void testChooseTargetAsFavouredNodes() throws Exception {
DatanodeStorageInfo[] targets;
List<DatanodeDescriptor> favoredNodes =
new ArrayList<DatanodeDescriptor>();
favoredNodes.add(dataNodes[6]);
favoredNodes.add(dataNodes[0]);
favoredNodes.add(dataNodes[1]);
targets = chooseTarget(1, dataNodes[7], null, favoredNodes);
assertEquals(targets.length, 1);
assertTrue(favoredNodes.contains(targets[0].getDatanodeDescriptor()));
}
/**
* In this testcase, passed 2 favored nodes
* dataNodes[0](Good Node), dataNodes[3](Bad node).
* 1st replica should be placed on good favored node dataNodes[0].
* 2nd replica should be on bad favored node's nodegroup dataNodes[4].
* @throws Exception
*/
@Test
public void testChooseFavoredNodesNodeGroup() throws Exception {
updateHeartbeatWithUsage(dataNodes[3],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
(HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L,
0L, 0L, 0, 0); // no space
DatanodeStorageInfo[] targets;
List<DatanodeDescriptor> expectedTargets =
new ArrayList<DatanodeDescriptor>();
expectedTargets.add(dataNodes[0]);
expectedTargets.add(dataNodes[4]);
List<DatanodeDescriptor> favouredNodes =
new ArrayList<DatanodeDescriptor>();
favouredNodes.add(dataNodes[3]);
favouredNodes.add(dataNodes[0]);
targets = chooseTarget(2, dataNodes[7], null, favouredNodes);
assertTrue(expectedTargets.contains(targets[0].getDatanodeDescriptor()),
"1st Replica is incorrect");
assertTrue(expectedTargets.contains(targets[1].getDatanodeDescriptor()),
"2nd Replica is incorrect");
}
/**
* In this testcase, passed 3 favored nodes
* dataNodes[0],dataNodes[1],dataNodes[2]
*
* Favored nodes on different nodegroup should be selected. Remaining replica
* should go through BlockPlacementPolicy.
*
* @throws Exception
*/
@Test
public void testChooseRemainingReplicasApartFromFavoredNodes()
throws Exception {
DatanodeStorageInfo[] targets;
List<DatanodeDescriptor> expectedTargets =
new ArrayList<DatanodeDescriptor>();
expectedTargets.add(dataNodes[0]);
expectedTargets.add(dataNodes[2]);
expectedTargets.add(dataNodes[3]);
expectedTargets.add(dataNodes[6]);
expectedTargets.add(dataNodes[7]);
List<DatanodeDescriptor> favouredNodes =
new ArrayList<DatanodeDescriptor>();
favouredNodes.add(dataNodes[0]);
favouredNodes.add(dataNodes[1]);
favouredNodes.add(dataNodes[2]);
targets = chooseTarget(3, dataNodes[3], null, favouredNodes);
for (int i = 0; i < targets.length; i++) {
assertTrue(expectedTargets.contains(targets[i].getDatanodeDescriptor()),
"Target should be a part of Expected Targets");
}
}
}
|
TestReplicationPolicyWithNodeGroup
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/nullness/RedundantNullCheckTest.java
|
{
"start": 15575,
"end": 15954
}
|
class ____ {
public static @NonNull String getString() {
return "non-null";
}
}
""")
.addSourceLines(
"Test.java",
"""
import org.jspecify.annotations.NullMarked;
import mylib.NonAnnotatedLibNonNull;
@NullMarked
|
NonAnnotatedLibNonNull
|
java
|
mybatis__mybatis-3
|
src/test/java/org/apache/ibatis/submitted/associationtype/AssociationTypeTest.java
|
{
"start": 1243,
"end": 2675
}
|
class ____ {
private static SqlSessionFactory sqlSessionFactory;
@BeforeAll
static void setUp() throws Exception {
// create a SqlSessionFactory
try (Reader reader = Resources
.getResourceAsReader("org/apache/ibatis/submitted/associationtype/mybatis-config.xml")) {
sqlSessionFactory = new SqlSessionFactoryBuilder().build(reader);
}
// populate in-memory database
BaseDataTest.runScript(sqlSessionFactory.getConfiguration().getEnvironment().getDataSource(),
"org/apache/ibatis/submitted/associationtype/CreateDB.sql");
}
@ParameterizedTest
@EnumSource
void shouldGetAUser(LocalCacheScope localCacheScope) {
sqlSessionFactory.getConfiguration().setLocalCacheScope(localCacheScope);
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
List<Map<String, ?>> results = sqlSession.selectList("getUser");
for (Map<String, ?> r : results) {
Object a1 = r.get("a1");
Object a2 = r.get("a2");
Assertions.assertEquals(String.class, a1.getClass());
Assertions.assertEquals(String.class, a2.getClass());
Assertions.assertSame(a1, a2,
"The result should be put into local cache regardless of localCacheScope setting.");
}
} finally {
// Reset the scope for other tests
sqlSessionFactory.getConfiguration().setLocalCacheScope(LocalCacheScope.SESSION);
}
}
}
|
AssociationTypeTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/metamodel/mapping/PropertyBasedMapping.java
|
{
"start": 290,
"end": 363
}
|
interface ____ {
PropertyAccess getPropertyAccess();
}
|
PropertyBasedMapping
|
java
|
google__auto
|
value/src/test/java/com/google/auto/value/processor/AutoBuilderCompilationTest.java
|
{
"start": 20075,
"end": 20250
}
|
class ____ {",
" Baz of() {",
" return this;",
" }",
"",
" @AutoBuilder(callMethod = \"of\")",
"
|
Baz
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java
|
{
"start": 1238,
"end": 8276
}
|
class ____ implements Compressor {
private static final Logger LOG =
LoggerFactory.getLogger(SnappyCompressor.class.getName());
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
private int directBufferSize;
private Buffer compressedDirectBuf = null;
private int uncompressedDirectBufLen;
private Buffer uncompressedDirectBuf = null;
private byte[] userBuf = null;
private int userBufOff = 0, userBufLen = 0;
private boolean finish, finished;
private long bytesRead = 0L;
private long bytesWritten = 0L;
/**
* Creates a new compressor.
*
* @param directBufferSize size of the direct buffer to be used.
*/
public SnappyCompressor(int directBufferSize) {
this.directBufferSize = directBufferSize;
uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
compressedDirectBuf.position(directBufferSize);
}
/**
* Creates a new compressor with the default buffer size.
*/
public SnappyCompressor() {
this(DEFAULT_DIRECT_BUFFER_SIZE);
}
/**
* Sets input data for compression.
* This should be called whenever #needsInput() returns
* <code>true</code> indicating that more input data is required.
*
* @param b Input data
* @param off Start offset
* @param len Length
*/
@Override
public void setInput(byte[] b, int off, int len) {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
finished = false;
if (len > uncompressedDirectBuf.remaining()) {
// save data; now !needsInput
this.userBuf = b;
this.userBufOff = off;
this.userBufLen = len;
} else {
((ByteBuffer) uncompressedDirectBuf).put(b, off, len);
uncompressedDirectBufLen = uncompressedDirectBuf.position();
}
bytesRead += len;
}
/**
* If a write would exceed the capacity of the direct buffers, it is set
* aside to be loaded by this function while the compressed data are
* consumed.
*/
void setInputFromSavedData() {
if (0 >= userBufLen) {
return;
}
finished = false;
uncompressedDirectBufLen = Math.min(userBufLen, directBufferSize);
((ByteBuffer) uncompressedDirectBuf).put(userBuf, userBufOff,
uncompressedDirectBufLen);
// Note how much data is being fed to snappy
userBufOff += uncompressedDirectBufLen;
userBufLen -= uncompressedDirectBufLen;
}
/**
* Does nothing.
*/
@Override
public void setDictionary(byte[] b, int off, int len) {
// do nothing
}
/**
* Returns true if the input data buffer is empty and
* #setInput() should be called to provide more input.
*
* @return <code>true</code> if the input data buffer is empty and
* #setInput() should be called in order to provide more input.
*/
@Override
public boolean needsInput() {
return !(compressedDirectBuf.remaining() > 0
|| uncompressedDirectBuf.remaining() == 0 || userBufLen > 0);
}
/**
* When called, indicates that compression should end
* with the current contents of the input buffer.
*/
@Override
public void finish() {
finish = true;
}
/**
* Returns true if the end of the compressed
* data output stream has been reached.
*
* @return <code>true</code> if the end of the compressed
* data output stream has been reached.
*/
@Override
public boolean finished() {
// Check if all uncompressed data has been consumed
return (finish && finished && compressedDirectBuf.remaining() == 0);
}
/**
* Fills specified buffer with compressed data. Returns actual number
* of bytes of compressed data. A return value of 0 indicates that
* needsInput() should be called in order to determine if more input
* data is required.
*
* @param b Buffer for the compressed data
* @param off Start offset of the data
* @param len Size of the buffer
* @return The actual number of bytes of compressed data.
*/
@Override
public int compress(byte[] b, int off, int len)
throws IOException {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
// Check if there is compressed data
int n = compressedDirectBuf.remaining();
if (n > 0) {
n = Math.min(n, len);
((ByteBuffer) compressedDirectBuf).get(b, off, n);
bytesWritten += n;
return n;
}
// Re-initialize the snappy's output direct-buffer
compressedDirectBuf.clear();
compressedDirectBuf.limit(0);
if (0 == uncompressedDirectBuf.position()) {
// No compressed data, so we should have !needsInput or !finished
setInputFromSavedData();
if (0 == uncompressedDirectBuf.position()) {
// Called without data; write nothing
finished = true;
return 0;
}
}
// Compress data
n = compressDirectBuf();
compressedDirectBuf.limit(n);
uncompressedDirectBuf.clear(); // snappy consumes all buffer input
// Set 'finished' if snapy has consumed all user-data
if (0 == userBufLen) {
finished = true;
}
// Get atmost 'len' bytes
n = Math.min(n, len);
bytesWritten += n;
((ByteBuffer) compressedDirectBuf).get(b, off, n);
return n;
}
/**
* Resets compressor so that a new set of input data can be processed.
*/
@Override
public void reset() {
finish = false;
finished = false;
uncompressedDirectBuf.clear();
uncompressedDirectBufLen = 0;
compressedDirectBuf.clear();
compressedDirectBuf.limit(0);
userBufOff = userBufLen = 0;
bytesRead = bytesWritten = 0L;
}
/**
* Prepare the compressor to be used in a new stream with settings defined in
* the given Configuration
*
* @param conf Configuration from which new setting are fetched
*/
@Override
public void reinit(Configuration conf) {
reset();
}
/**
* Return number of bytes given to this compressor since last reset.
*/
@Override
public long getBytesRead() {
return bytesRead;
}
/**
* Return number of bytes consumed by callers of compress since last reset.
*/
@Override
public long getBytesWritten() {
return bytesWritten;
}
/**
* Closes the compressor and discards any unprocessed input.
*/
@Override
public void end() {
}
private int compressDirectBuf() throws IOException {
if (uncompressedDirectBufLen == 0) {
return 0;
} else {
// Set the position and limit of `uncompressedDirectBuf` for reading
uncompressedDirectBuf.limit(uncompressedDirectBufLen).position(0);
int size = Snappy.compress((ByteBuffer) uncompressedDirectBuf,
(ByteBuffer) compressedDirectBuf);
uncompressedDirectBufLen = 0;
return size;
}
}
}
|
SnappyCompressor
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/DequeUtilsTests.java
|
{
"start": 603,
"end": 2152
}
|
class ____ extends ESTestCase {
public void testEqualsAndHashCodeWithSameObject() {
var someObject = mock();
var dequeOne = DequeUtils.of(someObject);
var dequeTwo = DequeUtils.of(someObject);
assertTrue(DequeUtils.dequeEquals(dequeOne, dequeTwo));
assertEquals(DequeUtils.dequeHashCode(dequeOne), DequeUtils.dequeHashCode(dequeTwo));
}
public void testEqualsAndHashCodeWithEqualsObject() {
var dequeOne = DequeUtils.of("the same string");
var dequeTwo = DequeUtils.of("the same string");
assertTrue(DequeUtils.dequeEquals(dequeOne, dequeTwo));
assertEquals(DequeUtils.dequeHashCode(dequeOne), DequeUtils.dequeHashCode(dequeTwo));
}
public void testNotEqualsAndHashCode() {
var dequeOne = DequeUtils.of(mock());
var dequeTwo = DequeUtils.of(mock());
assertFalse(DequeUtils.dequeEquals(dequeOne, dequeTwo));
assertNotEquals(DequeUtils.dequeHashCode(dequeOne), DequeUtils.dequeHashCode(dequeTwo));
}
public void testReadFromStream() throws IOException {
var dequeOne = DequeUtils.of("this is a string");
var out = new BytesStreamOutput();
out.writeStringCollection(dequeOne);
var in = new ByteArrayStreamInput(out.bytes().array());
var dequeTwo = DequeUtils.readDeque(in, StreamInput::readString);
assertTrue(DequeUtils.dequeEquals(dequeOne, dequeTwo));
assertEquals(DequeUtils.dequeHashCode(dequeOne), DequeUtils.dequeHashCode(dequeTwo));
}
}
|
DequeUtilsTests
|
java
|
quarkusio__quarkus
|
extensions/smallrye-reactive-messaging-pulsar/deployment/src/test/java/io/quarkus/smallrye/reactivemessaging/pulsar/deployment/DefaultSchemaConfigTest.java
|
{
"start": 48390,
"end": 55555
}
|
class ____ {
// @Outgoing
@Outgoing("channel1")
Publisher<OutgoingMessage<java.nio.ByteBuffer>> method1() {
return null;
}
@Outgoing("channel3")
PublisherBuilder<OutgoingMessage<java.nio.ByteBuffer>> method3() {
return null;
}
@Outgoing("channel5")
Multi<OutgoingMessage<java.nio.ByteBuffer>> method5() {
return null;
}
@Outgoing("channel7")
OutgoingMessage<java.nio.ByteBuffer> method7() {
return null;
}
@Outgoing("channel9")
CompletionStage<OutgoingMessage<java.nio.ByteBuffer>> method9() {
return null;
}
@Outgoing("channel11")
Uni<OutgoingMessage<java.nio.ByteBuffer>> method11() {
return null;
}
// @Incoming
@Incoming("channel13")
Subscriber<PulsarMessage<java.util.UUID>> method13() {
return null;
}
@Incoming("channel15")
SubscriberBuilder<PulsarMessage<java.util.UUID>, Void> method15() {
return null;
}
@Incoming("channel18")
CompletionStage<?> method18(PulsarMessage<java.util.UUID> msg) {
return null;
}
@Incoming("channel20")
Uni<?> method20(PulsarMessage<java.util.UUID> msg) {
return null;
}
// @Incoming @Outgoing
@Incoming("channel22")
@Outgoing("channel23")
Processor<PulsarMessage<java.util.UUID>, OutgoingMessage<java.nio.ByteBuffer>> method22() {
return null;
}
@Incoming("channel26")
@Outgoing("channel27")
ProcessorBuilder<PulsarMessage<java.util.UUID>, OutgoingMessage<java.nio.ByteBuffer>> method24() {
return null;
}
@Incoming("channel30")
@Outgoing("channel31")
Publisher<OutgoingMessage<java.nio.ByteBuffer>> method26(PulsarMessage<java.util.UUID> msg) {
return null;
}
@Incoming("channel34")
@Outgoing("channel35")
PublisherBuilder<OutgoingMessage<java.nio.ByteBuffer>> method28(PulsarMessage<UUID> msg) {
return null;
}
@Incoming("channel38")
@Outgoing("channel39")
Multi<OutgoingMessage<java.nio.ByteBuffer>> method30(PulsarMessage<java.util.UUID> msg) {
return null;
}
@Incoming("channel42")
@Outgoing("channel43")
OutgoingMessage<java.nio.ByteBuffer> method32(PulsarMessage<java.util.UUID> msg) {
return null;
}
@Incoming("channel46")
@Outgoing("channel47")
CompletionStage<OutgoingMessage<ByteBuffer>> method34(PulsarMessage<java.util.UUID> msg) {
return null;
}
@Incoming("channel50")
@Outgoing("channel51")
Uni<OutgoingMessage<java.nio.ByteBuffer>> method36(PulsarMessage<java.util.UUID> msg) {
return null;
}
// @Incoming @Outgoing stream manipulation
@Incoming("channel54")
@Outgoing("channel55")
Publisher<OutgoingMessage<java.nio.ByteBuffer>> method38(Publisher<PulsarMessage<java.util.UUID>> msg) {
return null;
}
@Incoming("channel58")
@Outgoing("channel59")
PublisherBuilder<OutgoingMessage<java.nio.ByteBuffer>> method40(
PublisherBuilder<PulsarMessage<java.util.UUID>> msg) {
return null;
}
@Incoming("channel62")
@Outgoing("channel63")
Multi<OutgoingMessage<java.nio.ByteBuffer>> method42(Multi<PulsarMessage<java.util.UUID>> msg) {
return null;
}
}
// ---
@Test
public void consumerRecordIntUuidInProducerRecordDoubleByteBufferOut() {
// @formatter:off
Tuple[] expectations = {
tuple("mp.messaging.outgoing.channel1.schema", "ByteBufferBYTE_BUFFERSchema"),
tuple("mp.messaging.outgoing.channel3.schema", "ByteBufferBYTE_BUFFERSchema"),
tuple("mp.messaging.outgoing.channel5.schema", "ByteBufferBYTE_BUFFERSchema"),
tuple("mp.messaging.outgoing.channel7.schema", "ByteBufferBYTE_BUFFERSchema"),
tuple("mp.messaging.outgoing.channel9.schema", "ByteBufferBYTE_BUFFERSchema"),
tuple("mp.messaging.outgoing.channel11.schema", "ByteBufferBYTE_BUFFERSchema"),
tuple("mp.messaging.incoming.channel13.schema", "UUIDJSONSchema"),
tuple("mp.messaging.incoming.channel15.schema", "UUIDJSONSchema"),
tuple("mp.messaging.incoming.channel18.schema", "UUIDJSONSchema"),
tuple("mp.messaging.incoming.channel20.schema", "UUIDJSONSchema"),
tuple("mp.messaging.incoming.channel22.schema", "UUIDJSONSchema"),
tuple("mp.messaging.outgoing.channel23.schema", "ByteBufferBYTE_BUFFERSchema"),
tuple("mp.messaging.incoming.channel26.schema", "UUIDJSONSchema"),
tuple("mp.messaging.outgoing.channel27.schema", "ByteBufferBYTE_BUFFERSchema"),
tuple("mp.messaging.incoming.channel30.schema", "UUIDJSONSchema"),
tuple("mp.messaging.outgoing.channel31.schema", "ByteBufferBYTE_BUFFERSchema"),
tuple("mp.messaging.incoming.channel34.schema", "UUIDJSONSchema"),
tuple("mp.messaging.outgoing.channel35.schema", "ByteBufferBYTE_BUFFERSchema"),
tuple("mp.messaging.incoming.channel38.schema", "UUIDJSONSchema"),
tuple("mp.messaging.outgoing.channel39.schema", "ByteBufferBYTE_BUFFERSchema"),
tuple("mp.messaging.incoming.channel42.schema", "UUIDJSONSchema"),
tuple("mp.messaging.outgoing.channel43.schema", "ByteBufferBYTE_BUFFERSchema"),
tuple("mp.messaging.incoming.channel46.schema", "UUIDJSONSchema"),
tuple("mp.messaging.outgoing.channel47.schema", "ByteBufferBYTE_BUFFERSchema"),
tuple("mp.messaging.incoming.channel50.schema", "UUIDJSONSchema"),
tuple("mp.messaging.outgoing.channel51.schema", "ByteBufferBYTE_BUFFERSchema"),
tuple("mp.messaging.incoming.channel54.schema", "UUIDJSONSchema"),
tuple("mp.messaging.outgoing.channel55.schema", "ByteBufferBYTE_BUFFERSchema"),
tuple("mp.messaging.incoming.channel58.schema", "UUIDJSONSchema"),
tuple("mp.messaging.outgoing.channel59.schema", "ByteBufferBYTE_BUFFERSchema"),
tuple("mp.messaging.incoming.channel62.schema", "UUIDJSONSchema"),
tuple("mp.messaging.outgoing.channel63.schema", "ByteBufferBYTE_BUFFERSchema"),
};
var generatedSchemas = Map.of(
"java.nio.ByteBuffer", "ByteBufferBYTE_BUFFERSchema",
"java.util.UUID", "UUIDJSONSchema"
);
// @formatter:on
doTest(expectations, generatedSchemas, ConsumerRecordIntUuidInProducerRecordDoubleByteBufferOut.class);
}
private static
|
KafkaRecordIntUuidInRecordDoubleByteBufferOut
|
java
|
quarkusio__quarkus
|
extensions/redis-client/runtime/src/main/java/io/quarkus/redis/datasource/sortedset/ZAggregateArgs.java
|
{
"start": 244,
"end": 3320
}
|
enum ____ {
SUM,
MIN,
MAX
}
private final List<Double> weights = new ArrayList<>();
private Aggregate aggregate;
/**
* Using the WEIGHTS option, it is possible to specify a multiplication factor for each input sorted set.
* This means that the score of every element in every input sorted set is multiplied by this factor before being
* passed to the aggregation function. When WEIGHTS is not given, the multiplication factors default to 1.
*
* @param weights the weight values
* @return the current {@code ZAggregateArgs}
**/
public ZAggregateArgs weights(double... weights) {
if (weights == null) {
throw new IllegalArgumentException("`weights` cannot be `null`");
}
for (double weight : weights) {
this.weights.add(weight);
}
return this;
}
/**
* With the AGGREGATE option, it is possible to specify how the results of the union are aggregated.
* This option defaults to SUM, where the score of an element is summed across the inputs where it exists.
* When this option is set to either MIN or MAX, the resulting set will contain the minimum or maximum score of
* an element across the inputs where it exists.
*
* @param aggregate the aggregate value
* @return the current {@code ZAggregateArgs}
**/
public ZAggregateArgs aggregate(Aggregate aggregate) {
this.aggregate = aggregate;
return this;
}
/**
* Configure the {@code aggregate} function to be {@code SUM}.
*
* @return the current {@code ZAggregateArgs}
*/
public ZAggregateArgs sum() {
this.aggregate = Aggregate.SUM;
return this;
}
/**
* Configure the {@code aggregate} function to be {@code MIN}.
*
* @return the current {@code ZAggregateArgs}
*/
public ZAggregateArgs min() {
this.aggregate = Aggregate.MIN;
return this;
}
/**
* Configure the {@code aggregate} function to be {@code MAX}.
*
* @return the current {@code ZAggregateArgs}
*/
public ZAggregateArgs max() {
this.aggregate = Aggregate.MAX;
return this;
}
@Override
public List<Object> toArgs() {
List<Object> args = new ArrayList<>();
if (!weights.isEmpty()) {
args.add("WEIGHTS");
for (double w : weights) {
args.add(Double.toString(w));
}
}
if (aggregate != null) {
args.add("AGGREGATE");
switch (aggregate) {
case SUM:
args.add("SUM");
break;
case MIN:
args.add("MIN");
break;
case MAX:
args.add("MAX");
break;
default:
throw new IllegalArgumentException("Aggregation " + aggregate + " not supported");
}
}
return args;
}
}
|
Aggregate
|
java
|
elastic__elasticsearch
|
build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/packer/CacheCacheableTestFixtures.java
|
{
"start": 1474,
"end": 2043
}
|
class ____ extends DefaultTask {
@CompileClasspath
public abstract ConfigurableFileCollection getClasspath();
@Inject
public abstract WorkerExecutor getWorkerExecutor();
/**
* Executes the forbidden apis task.
*/
@TaskAction
public void checkForbidden() {
WorkQueue workQueue = getWorkerExecutor().classLoaderIsolation(spec -> spec.getClasspath().from(getClasspath()));
workQueue.submit(CacheTestFixtureWorkAction.class, params -> params.getClasspath().setFrom(getClasspath()));
}
|
CacheCacheableTestFixtures
|
java
|
apache__flink
|
flink-core/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
|
{
"start": 104478,
"end": 108235
}
|
interface
____ (typeClass.getName().equals(HADOOP_WRITABLE_CLASS)) {
return false;
}
final HashSet<Class<?>> alreadySeen = new HashSet<>();
alreadySeen.add(typeClass);
return hasHadoopWritableInterface(typeClass, alreadySeen);
}
private static boolean hasHadoopWritableInterface(
Class<?> clazz, HashSet<Class<?>> alreadySeen) {
Class<?>[] interfaces = clazz.getInterfaces();
for (Class<?> c : interfaces) {
if (c.getName().equals(HADOOP_WRITABLE_CLASS)) {
return true;
} else if (alreadySeen.add(c) && hasHadoopWritableInterface(c, alreadySeen)) {
return true;
}
}
Class<?> superclass = clazz.getSuperclass();
return superclass != null
&& alreadySeen.add(superclass)
&& hasHadoopWritableInterface(superclass, alreadySeen);
}
// visible for testing
public static <T> TypeInformation<T> createHadoopWritableTypeInfo(Class<T> clazz) {
checkNotNull(clazz);
Class<?> typeInfoClass;
try {
typeInfoClass =
Class.forName(
HADOOP_WRITABLE_TYPEINFO_CLASS,
false,
Thread.currentThread().getContextClassLoader());
} catch (ClassNotFoundException e) {
throw new RuntimeException(
"Could not load the TypeInformation for the class '"
+ HADOOP_WRITABLE_CLASS
+ "'. You may be missing the 'flink-hadoop-compatibility' dependency.");
}
try {
Constructor<?> constr = typeInfoClass.getConstructor(Class.class);
@SuppressWarnings("unchecked")
TypeInformation<T> typeInfo = (TypeInformation<T>) constr.newInstance(clazz);
return typeInfo;
} catch (NoSuchMethodException | IllegalAccessException | InstantiationException e) {
throw new RuntimeException(
"Incompatible versions of the Hadoop Compatibility classes found.");
} catch (InvocationTargetException e) {
throw new RuntimeException(
"Cannot create Hadoop WritableTypeInfo.", e.getTargetException());
}
}
// visible for testing
static void validateIfWritable(TypeInformation<?> typeInfo, Type type) {
try {
// try to load the writable type info
Class<?> writableTypeInfoClass =
Class.forName(
HADOOP_WRITABLE_TYPEINFO_CLASS,
false,
typeInfo.getClass().getClassLoader());
if (writableTypeInfoClass.isAssignableFrom(typeInfo.getClass())) {
// this is actually a writable type info
// check if the type is a writable
if (!(type instanceof Class && isHadoopWritable((Class<?>) type))) {
throw new InvalidTypesException(HADOOP_WRITABLE_CLASS + " type expected.");
}
// check writable type contents
Class<?> clazz = (Class<?>) type;
if (typeInfo.getTypeClass() != clazz) {
throw new InvalidTypesException(
"Writable type '"
+ typeInfo.getTypeClass().getCanonicalName()
+ "' expected but was '"
+ clazz.getCanonicalName()
+ "'.");
}
}
} catch (ClassNotFoundException e) {
//
|
if
|
java
|
apache__flink
|
flink-datastream/src/main/java/org/apache/flink/datastream/impl/operators/KeyedTwoInputBroadcastProcessOperator.java
|
{
"start": 2237,
"end": 5350
}
|
class ____<KEY, IN1, IN2, OUT>
extends TwoInputBroadcastProcessOperator<IN1, IN2, OUT>
implements Triggerable<KEY, VoidNamespace> {
private transient InternalTimerService<VoidNamespace> timerService;
// TODO Restore this keySet when task initialized from checkpoint.
private transient Set<Object> keySet;
@Nullable private final KeySelector<OUT, KEY> outKeySelector;
public KeyedTwoInputBroadcastProcessOperator(
TwoInputBroadcastStreamProcessFunction<IN1, IN2, OUT> userFunction) {
this(userFunction, null);
}
public KeyedTwoInputBroadcastProcessOperator(
TwoInputBroadcastStreamProcessFunction<IN1, IN2, OUT> userFunction,
@Nullable KeySelector<OUT, KEY> outKeySelector) {
super(userFunction);
this.outKeySelector = outKeySelector;
}
@Override
public void open() throws Exception {
this.timerService =
getInternalTimerService("processing timer", VoidNamespaceSerializer.INSTANCE, this);
this.keySet = new HashSet<>();
super.open();
}
@Override
protected TimestampCollector<OUT> getOutputCollector() {
return outKeySelector == null
? new OutputCollector<>(output)
: new KeyCheckedOutputCollector<>(
new OutputCollector<>(output), outKeySelector, () -> (KEY) getCurrentKey());
}
@Override
protected Object currentKey() {
return getCurrentKey();
}
protected ProcessingTimeManager getProcessingTimeManager() {
return new DefaultProcessingTimeManager(timerService);
}
@Override
public void onEventTime(InternalTimer<KEY, VoidNamespace> timer) throws Exception {
if (userFunction instanceof EventTimeWrappedTwoInputBroadcastStreamProcessFunction) {
((EventTimeWrappedTwoInputBroadcastStreamProcessFunction<IN1, IN2, OUT>) userFunction)
.onEventTime(timer.getTimestamp(), getOutputCollector(), partitionedContext);
}
}
@Override
public void onProcessingTime(InternalTimer<KEY, VoidNamespace> timer) throws Exception {
userFunction.onProcessingTimer(
timer.getTimestamp(), getOutputCollector(), partitionedContext);
}
@Override
protected NonPartitionedContext<OUT> getNonPartitionedContext() {
return new DefaultNonPartitionedContext<>(
context,
partitionedContext,
collector,
true,
keySet,
output,
watermarkDeclarationMap);
}
@Override
public void newKeySelected(Object newKey) {
keySet.add(newKey);
}
@Override
public boolean isAsyncKeyOrderedProcessingEnabled() {
return true;
}
@Override
protected InternalTimerService<VoidNamespace> getTimerService() {
return timerService;
}
@Override
protected Supplier<Long> getEventTimeSupplier() {
return () -> timerService.currentWatermark();
}
}
|
KeyedTwoInputBroadcastProcessOperator
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/mysql/MySqlError_test.java
|
{
"start": 778,
"end": 1355
}
|
class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "insert into userdetectitem26 (nick,volume) values(?,?) " +
"ON DUPLICATE KEY UPDATE title = ?,picURL = ?,scoreExceed = ?,score=";
Exception error = null;
try {
MySqlStatementParser parser = new MySqlStatementParser(sql);
parser.parseStatementList();
} catch (Exception e) {
error = e;
}
assertNotNull(error);
assertEquals("EOF, score=", error.getMessage());
}
}
|
MySqlError_test
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/test/dubbo/EnumTest.java
|
{
"start": 45,
"end": 90
}
|
enum ____ {
Pig,
Dog,
Cat
}
|
EnumTest
|
java
|
apache__camel
|
components/camel-cxf/camel-cxf-rest/src/main/java/org/apache/camel/component/cxf/jaxrs/CxfRsHeaderFilterStrategy.java
|
{
"start": 986,
"end": 1519
}
|
class ____ extends DefaultHeaderFilterStrategy {
public CxfRsHeaderFilterStrategy() {
initialize();
}
protected void initialize() {
getOutFilter().add(CxfConstants.OPERATION_NAME.toLowerCase());
getOutFilter().add("Content-Type".toLowerCase());
// Support to filter the Content-Type case insensitive
setLowerCase(true);
// filter headers begin with "Camel" or "org.apache.camel"
setOutFilterStartsWith(CAMEL_FILTER_STARTS_WITH);
}
}
|
CxfRsHeaderFilterStrategy
|
java
|
dropwizard__dropwizard
|
dropwizard-logging/src/main/java/io/dropwizard/logging/common/layout/DropwizardLayoutFactory.java
|
{
"start": 343,
"end": 587
}
|
class ____ implements LayoutFactory<ILoggingEvent> {
@Override
public PatternLayoutBase<ILoggingEvent> build(LoggerContext context, TimeZone timeZone) {
return new DropwizardLayout(context, timeZone);
}
}
|
DropwizardLayoutFactory
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/api/connector/sink/lib/OutputFormatSink.java
|
{
"start": 1707,
"end": 2304
}
|
class ____<IN> implements Sink<IN> {
private final OutputFormat<IN> format;
public OutputFormatSink(OutputFormat<IN> format) {
this.format = format;
}
@Override
public SinkWriter<IN> createWriter(WriterInitContext writerContext) throws IOException {
RuntimeContext runtimeContext = null;
if (writerContext instanceof InitContextBase) {
runtimeContext = ((InitContextBase) writerContext).getRuntimeContext();
}
return new InputFormatSinkWriter<>(writerContext, format, runtimeContext);
}
private static
|
OutputFormatSink
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/security/AuthenticationRedirectExceptionHeaderTest.java
|
{
"start": 1331,
"end": 2817
}
|
class ____ {
private static final String APP_PROPS = "" +
"quarkus.http.auth.permission.default.paths=/*\n" +
"quarkus.http.auth.permission.default.policy=authenticated";
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addAsResource(new StringAsset(APP_PROPS), "application.properties"));
@Test
public void testHeaders() {
// case-insensitive test that Pragma, cache-control and location headers are only present once
// there were duplicate headers when both default auth failure handler and auth ex mapper set headers
var response = RestAssured
.given()
.redirects()
.follow(false)
.when()
.get("/secured-route");
response.then().statusCode(FOUND);
assertEquals(1, getHeaderCount(response, LOCATION.toString()));
assertEquals(1, getHeaderCount(response, CACHE_CONTROL.toString()));
assertEquals(1, getHeaderCount(response, "Pragma"));
}
private static int getHeaderCount(Response response, String headerName) {
headerName = headerName.toLowerCase();
return (int) response.headers().asList().stream().map(Header::getName).map(String::toLowerCase)
.filter(headerName::equals).count();
}
@ApplicationScoped
public static
|
AuthenticationRedirectExceptionHeaderTest
|
java
|
spring-projects__spring-security
|
web/src/test/java/org/springframework/security/web/aot/hint/WebMvcSecurityRuntimeHintsTests.java
|
{
"start": 1380,
"end": 2674
}
|
class ____ {
private final RuntimeHints hints = new RuntimeHints();
@BeforeEach
void setup() {
SpringFactoriesLoader.forResourceLocation("META-INF/spring/aot.factories")
.load(RuntimeHintsRegistrar.class)
.forEach((registrar) -> registrar.registerHints(this.hints, ClassUtils.getDefaultClassLoader()));
}
@Test
void webSecurityExpressionRootHasHints() {
assertThat(RuntimeHintsPredicates.reflection()
.onType(WebSecurityExpressionRoot.class)
.withMemberCategories(MemberCategory.INVOKE_DECLARED_METHODS, MemberCategory.ACCESS_DECLARED_FIELDS))
.accepts(this.hints);
}
@Test
void supplierCsrfTokenHasHints() {
assertThat(RuntimeHintsPredicates.reflection()
.onType(TypeReference
.of("org.springframework.security.web.csrf.CsrfTokenRequestAttributeHandler$SupplierCsrfToken"))
.withMemberCategories(MemberCategory.INVOKE_DECLARED_METHODS)).accepts(this.hints);
}
@Test
void cssHasHints() {
assertThat(RuntimeHintsPredicates.resource().forResource("org/springframework/security/default-ui.css"))
.accepts(this.hints);
}
@Test
void webauthnJavascriptHasHints() {
assertThat(RuntimeHintsPredicates.resource()
.forResource("org/springframework/security/spring-security-webauthn.js")).accepts(this.hints);
}
}
|
WebMvcSecurityRuntimeHintsTests
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/caching/mocked/ReadWriteCacheTest.java
|
{
"start": 8640,
"end": 9024
}
|
class ____ {
@Id
private Long id;
private String title;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getTitle() {
return title;
}
public void setTitle(String title) {
this.title = title;
}
public String toString() {
return "Book[id=" + id + ",title=" + title + "]";
}
}
public static
|
Book
|
java
|
apache__flink
|
flink-python/src/main/java/org/apache/flink/python/util/HashMapWrapper.java
|
{
"start": 1203,
"end": 2717
}
|
class ____ {
private final Class<?> keyCls;
private final Class<?> valueCls;
private final HashMap underlyingMap;
public HashMapWrapper(Class<?> keyCls, Class<?> valueCls) {
this.keyCls = keyCls;
this.valueCls = valueCls;
this.underlyingMap = new HashMap();
}
public void put(Object key, Object value) throws Exception {
final Object typedKey;
final Object typedValue;
if (keyCls != null) {
typedKey = convert(keyCls, key);
} else {
typedKey = key;
}
if (valueCls != null) {
typedValue = convert(valueCls, value);
} else {
typedValue = value;
}
underlyingMap.put(typedKey, typedValue);
}
public HashMap asMap() {
return underlyingMap;
}
private static Object convert(Class<?> cls, Object data) {
final Number typedData = (Number) data;
if (cls == Integer.class) {
return typedData.intValue();
} else if (cls == Long.class) {
return typedData.longValue();
} else if (cls == Float.class) {
return typedData.floatValue();
} else if (cls == Double.class) {
return typedData.doubleValue();
} else if (cls == Byte.class) {
return typedData.byteValue();
} else if (cls == Short.class) {
return typedData.shortValue();
} else {
throw new RuntimeException("Unexpected
|
HashMapWrapper
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/internal/util/collections/InstanceIdentityMap.java
|
{
"start": 9054,
"end": 9217
}
|
class ____ extends PagedArrayIterator<Map.Entry<K, V>> {
@Override
public Map.Entry<K, V> next() {
return new Entry( nextIndex() );
}
private
|
EntryIterator
|
java
|
quarkusio__quarkus
|
independent-projects/tools/registry-client/src/main/java/io/quarkus/registry/config/RegistryPlatformsConfig.java
|
{
"start": 935,
"end": 1586
}
|
interface ____ extends RegistryPlatformsConfig, RegistryArtifactConfig.Mutable {
@Override
RegistryPlatformsConfig.Mutable setArtifact(ArtifactCoords artifact);
@Override
RegistryPlatformsConfig.Mutable setDisabled(boolean disabled);
RegistryPlatformsConfig.Mutable setExtensionCatalogsIncluded(Boolean extensionCatalogsIncluded);
/** @return an immutable copy of this config */
@Override
RegistryPlatformsConfig build();
}
/**
* @return a new mutable instance
*/
static Mutable builder() {
return new RegistryPlatformsConfigImpl.Builder();
}
}
|
Mutable
|
java
|
google__error-prone
|
check_api/src/main/java/com/google/errorprone/util/ErrorProneComment.java
|
{
"start": 1003,
"end": 2281
}
|
class ____ {
private final int pos;
private final int endPos;
private final int offset;
private final Supplier<String> text;
private final ErrorProneCommentStyle style;
ErrorProneComment(
int pos, int endPos, int offset, Supplier<String> text, ErrorProneCommentStyle style) {
this.pos = pos;
this.endPos = endPos;
this.offset = offset;
this.text = Suppliers.memoize(text);
this.style = style;
}
public ErrorProneComment withOffset(int offset) {
return new ErrorProneComment(pos, endPos, offset, text, style);
}
public int getPos() {
return pos + offset;
}
public int getEndPos() {
return endPos + offset;
}
/**
* Returns the source position of the character at index {@code index} in the comment text.
*
* <p>The handling of javadoc comments in javac has more logic to skip over leading whitespace and
* '*' characters when indexing into doc comments, but we don't need any of that.
*/
public int getSourcePos(int index) {
checkArgument(
0 <= index && index < (endPos - pos),
"Expected %s in the range [0, %s)",
index,
endPos - pos);
return pos + index + offset;
}
/** A compatibility wrapper for {@link CommentStyle}. */
public
|
ErrorProneComment
|
java
|
apache__camel
|
components/camel-bindy/src/test/java/org/apache/camel/dataformat/bindy/model/fix/sorted/header/Header.java
|
{
"start": 1096,
"end": 2984
}
|
class ____ {
@KeyValuePairField(tag = 8, position = 1)
// Message Header
private String beginString;
@KeyValuePairField(tag = 9, position = 2)
// Checksum
private int bodyLength;
@KeyValuePairField(tag = 34, position = 4)
// Sequence number
private int msgSeqNum;
@KeyValuePairField(tag = 35, position = 3)
// Message Type
private String msgType;
@KeyValuePairField(tag = 49, position = 5)
// Company sender Id
private String sendCompId;
@KeyValuePairField(tag = 56, position = 6)
// target company id
private String targetCompId;
public String getBeginString() {
return beginString;
}
public void setBeginString(String beginString) {
this.beginString = beginString;
}
public int getBodyLength() {
return bodyLength;
}
public void setBodyLength(int bodyLength) {
this.bodyLength = bodyLength;
}
public int getMsgSeqNum() {
return msgSeqNum;
}
public void setMsgSeqNum(int msgSeqNum) {
this.msgSeqNum = msgSeqNum;
}
public String getMsgType() {
return msgType;
}
public void setMsgType(String msgType) {
this.msgType = msgType;
}
public String getSendCompId() {
return sendCompId;
}
public void setSendCompId(String sendCompId) {
this.sendCompId = sendCompId;
}
public String getTargetCompId() {
return targetCompId;
}
public void setTargetCompId(String targetCompId) {
this.targetCompId = targetCompId;
}
@Override
public String toString() {
return Header.class.getName() + " --> 8: " + this.beginString + ", 9: " + this.bodyLength + ", 34: " + this.msgSeqNum
+ " , 35: " + this.msgType + ", 49: "
+ this.sendCompId + ", 56: " + this.targetCompId;
}
}
|
Header
|
java
|
quarkusio__quarkus
|
independent-projects/bootstrap/core/src/main/java/io/quarkus/bootstrap/app/BootstrapProfile.java
|
{
"start": 226,
"end": 2087
}
|
class ____ {
public static final String QUARKUS_PROFILE_ENV = "QUARKUS_PROFILE";
public static final String QUARKUS_PROFILE_PROP = "quarkus.profile";
public static final String QUARKUS_TEST_PROFILE_PROP = "quarkus.test.profile";
private static final String BACKWARD_COMPATIBLE_QUARKUS_PROFILE_PROP = "quarkus-profile";
public static final String DEV = "dev";
public static final String PROD = "prod";
public static final String TEST = "test";
private static String runtimeDefaultProfile = null;
public static void setRuntimeDefaultProfile(final String profile) {
runtimeDefaultProfile = profile;
}
public static String getActiveProfile(QuarkusBootstrap.Mode mode) {
if (mode == QuarkusBootstrap.Mode.TEST) {
String profile = System.getProperty(QUARKUS_TEST_PROFILE_PROP);
if (profile != null) {
return profile;
}
return "test";
}
String profile = System.getProperty(QUARKUS_PROFILE_PROP);
if (profile != null) {
return profile;
}
profile = System.getProperty(BACKWARD_COMPATIBLE_QUARKUS_PROFILE_PROP);
if (profile != null) {
return profile;
}
profile = System.getenv(QUARKUS_PROFILE_ENV);
if (profile != null) {
return profile;
}
profile = runtimeDefaultProfile;
if (profile != null) {
return profile;
}
switch (mode) {
case REMOTE_DEV_SERVER:
case DEV:
case CONTINUOUS_TEST:
return DEV;
case REMOTE_DEV_CLIENT:
case PROD:
case RUN:
return PROD;
default:
throw new RuntimeException("unknown mode:" + mode);
}
}
}
|
BootstrapProfile
|
java
|
spring-projects__spring-framework
|
integration-tests/src/test/java/org/springframework/scheduling/annotation/ScheduledAndTransactionalAnnotationIntegrationTests.java
|
{
"start": 6858,
"end": 7385
}
|
class ____ implements MyRepositoryWithScheduledMethod {
private final AtomicInteger count = new AtomicInteger();
@Autowired(required = false)
private MyAspect myAspect;
@Override
@Transactional
@Scheduled(fixedDelay = 5)
public void scheduled() {
this.count.incrementAndGet();
}
@Override
public int getInvocationCount() {
if (this.myAspect != null) {
assertThat(this.myAspect.count.get()).isEqualTo(this.count.get());
}
return this.count.get();
}
}
}
|
MyRepositoryWithScheduledMethodImpl
|
java
|
reactor__reactor-core
|
reactor-core/src/test/java/reactor/core/publisher/SinksTest.java
|
{
"start": 8299,
"end": 8616
}
|
class ____ {
final Supplier<Sinks.Many<Integer>> supplier = () -> Sinks.many().multicast().directBestEffort();
@TestFactory
Stream<DynamicContainer> checkSemantics() {
return Stream.of(
expectMulticast(supplier, 0, true),
expectReplay(supplier, NONE)
);
}
}
@Nested
|
MulticastDirectBestEffort
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
|
{
"start": 57393,
"end": 57638
}
|
interface ____ performs the actual {@code ReplicaRequest} on the replica
* shards. It also encapsulates the logic required for failing the replica
* if deemed necessary as well as marking it as stale when needed.
*/
protected
|
that
|
java
|
apache__camel
|
components/camel-mock/src/main/java/org/apache/camel/component/mock/AssertionTask.java
|
{
"start": 967,
"end": 1162
}
|
interface ____ extends Runnable {
/**
* Asserts on the n'th received message
*
* @param index the n'th received message
*/
void assertOnIndex(int index);
}
|
AssertionTask
|
java
|
apache__camel
|
components/camel-bindy/src/test/java/org/apache/camel/dataformat/bindy/csv/BindyCsvSkipFieldTest.java
|
{
"start": 2222,
"end": 3591
}
|
class ____ extends RouteBuilder {
BindyCsvDataFormat camelDataFormat = new BindyCsvDataFormat(CsvSkipField.class);
@Override
public void configure() {
from(URI_DIRECT_START).unmarshal(camelDataFormat)
.process(new Processor() {
@Override
public void process(Exchange exchange) {
CsvSkipField csvSkipField = (CsvSkipField) exchange.getIn().getBody();
ObjectHelper.equal("VOA", csvSkipField.getAttention());
ObjectHelper.equal("12 abc street", csvSkipField.getAddressLine1());
ObjectHelper.equal("Melbourne", csvSkipField.getCity());
ObjectHelper.equal("VIC", csvSkipField.getState());
ObjectHelper.equal("3000", csvSkipField.getZip());
ObjectHelper.equal("Australia", csvSkipField.getCountry());
ObjectHelper.equal("end of record", csvSkipField.getDummy2());
}
})
.marshal(camelDataFormat)
.convertBodyTo(String.class)
.to(URI_MOCK_RESULT);
}
}
@CsvRecord(separator = ",", skipField = true)
public static
|
ContextConfig
|
java
|
grpc__grpc-java
|
api/src/main/java/io/grpc/ConfiguratorRegistry.java
|
{
"start": 832,
"end": 1010
}
|
class ____ responsible for maintaining a list of configurators and providing access to
* them. The default registry can be obtained using {@link #getDefaultRegistry()}.
*/
final
|
is
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/issues/AdviceWithWeaveByToUriPollEnrichTest.java
|
{
"start": 1117,
"end": 2911
}
|
class ____ extends ContextTestSupport {
@Test
public void testAdvicePollEnrichToString() throws Exception {
RouteDefinition route = context.getRouteDefinitions().get(0);
AdviceWith.adviceWith(route, context, new AdviceWithRouteBuilder() {
@Override
public void configure() throws Exception {
weaveByToString("pollEnrich*").replace().to("mock:foo");
mockEndpointsAndSkip("direct:foo*");
}
});
getMockEndpoint("mock:result").expectedBodiesReceived("Hello World");
getMockEndpoint("mock:foo").expectedBodiesReceived("Hello World");
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
}
@Test
public void testAdvicePollEnrichToUri() throws Exception {
RouteDefinition route = context.getRouteDefinitions().get(0);
AdviceWith.adviceWith(route, context, new AdviceWithRouteBuilder() {
@Override
public void configure() throws Exception {
weaveByToUri("seda:bar").replace().to("mock:foo");
mockEndpointsAndSkip("direct:foo*");
}
});
getMockEndpoint("mock:result").expectedBodiesReceived("Hello World");
getMockEndpoint("mock:foo").expectedBodiesReceived("Hello World");
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.pollEnrich("seda:bar", 1000).to("mock:result");
}
};
}
}
|
AdviceWithWeaveByToUriPollEnrichTest
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/jdbc/MultipleDataSourcesAndTransactionManagersTransactionalSqlScriptsTests.java
|
{
"start": 1853,
"end": 2768
}
|
class ____ {
@Autowired
DataSource dataSource1;
@Autowired
DataSource dataSource2;
@Test
@Sql("data-add-dogbert.sql")
void database1() {
assertUsers(new JdbcTemplate(dataSource1), "Dilbert", "Dogbert");
}
@Test
@Transactional(transactionManager = "txMgr2")
@Sql(scripts = "data-add-catbert.sql", config = @SqlConfig(dataSource = "dataSource2", transactionManager = "txMgr2"))
void database2() {
assertUsers(new JdbcTemplate(dataSource2), "Dilbert", "Catbert");
}
private void assertUsers(JdbcTemplate jdbcTemplate, String... users) {
List<String> expected = Arrays.asList(users);
Collections.sort(expected);
List<String> actual = jdbcTemplate.queryForList("select name from user", String.class);
Collections.sort(actual);
assertThat(actual).as("Users in database;").isEqualTo(expected);
}
@Configuration
static
|
MultipleDataSourcesAndTransactionManagersTransactionalSqlScriptsTests
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/convert/ScalarConversionTest.java
|
{
"start": 236,
"end": 1855
}
|
class ____
{
// 08-Jan-2025, tatu: Need to allow null-to-int coercion here
private final ObjectMapper MAPPER = jsonMapperBuilder()
.disable(DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES)
.build();
// [databind#1433]
@Test
public void testConvertValueNullPrimitive() throws Exception
{
assertEquals(Byte.valueOf((byte) 0), MAPPER.convertValue(null, Byte.TYPE));
assertEquals(Short.valueOf((short) 0), MAPPER.convertValue(null, Short.TYPE));
assertEquals(Integer.valueOf(0), MAPPER.convertValue(null, Integer.TYPE));
assertEquals(Long.valueOf(0L), MAPPER.convertValue(null, Long.TYPE));
assertEquals(Float.valueOf(0f), MAPPER.convertValue(null, Float.TYPE));
assertEquals(Double.valueOf(0d), MAPPER.convertValue(null, Double.TYPE));
assertEquals(Character.valueOf('\0'), MAPPER.convertValue(null, Character.TYPE));
assertEquals(Boolean.FALSE, MAPPER.convertValue(null, Boolean.TYPE));
}
// [databind#1433]
@Test
public void testConvertValueNullBoxed() throws Exception
{
assertNull(MAPPER.convertValue(null, Byte.class));
assertNull(MAPPER.convertValue(null, Short.class));
assertNull(MAPPER.convertValue(null, Integer.class));
assertNull(MAPPER.convertValue(null, Long.class));
assertNull(MAPPER.convertValue(null, Float.class));
assertNull(MAPPER.convertValue(null, Double.class));
assertNull(MAPPER.convertValue(null, Character.class));
assertNull(MAPPER.convertValue(null, Boolean.class));
}
}
|
ScalarConversionTest
|
java
|
quarkusio__quarkus
|
devtools/gradle/gradle-extension-plugin/src/test/java/io/quarkus/extension/gradle/tasks/ExtensionDescriptorTaskTest.java
|
{
"start": 593,
"end": 11134
}
|
class ____ {
@TempDir
File testProjectDir;
private File buildFile;
@BeforeEach
public void setupProject() throws IOException {
buildFile = new File(testProjectDir, "build.gradle");
File settingFile = new File(testProjectDir, "settings.gradle");
String settingsContent = "rootProject.name = 'test'";
TestUtils.writeFile(settingFile, settingsContent);
}
@Test
public void shouldCreateFilesWithDefaultValues() throws IOException {
TestUtils.writeFile(buildFile, TestUtils.getDefaultGradleBuildFileContent(true, Collections.emptyList(), ""));
TestUtils.runExtensionDescriptorTask(testProjectDir);
File extensionPropertiesFile = new File(testProjectDir, "build/resources/main/META-INF/quarkus-extension.properties");
assertThat(extensionPropertiesFile).exists();
Properties extensionProperty = TestUtils.readPropertyFile(extensionPropertiesFile.toPath());
assertThat(extensionProperty).containsEntry("deployment-artifact", "org.acme:test-deployment:1.0.0");
File extensionDescriptorFile = new File(testProjectDir, "build/resources/main/META-INF/quarkus-extension.yaml");
assertThat(extensionDescriptorFile).exists();
ObjectNode extensionDescriptor = TestUtils.readExtensionFile(extensionDescriptorFile.toPath());
assertThat(extensionDescriptor.has("name")).isTrue();
assertThat(extensionDescriptor.has("artifact")).isTrue();
assertThat(extensionDescriptor.get("name").asText()).isEqualTo("test");
assertThat(extensionDescriptor.get("artifact").asText()).isEqualTo("org.acme:test::jar:1.0.0");
assertThat(extensionDescriptor.has("description")).isFalse();
// Assert metadata node
assertThat(extensionDescriptor.has("metadata")).isTrue();
JsonNode metadata = extensionDescriptor.get("metadata");
assertThat(metadata.has("built-with-quarkus-core")).isTrue();
assertThat(metadata.get("built-with-quarkus-core").asText()).isEqualTo(TestUtils.getCurrentQuarkusVersion());
assertThat(metadata.has("extension-dependencies")).isTrue();
assertThat(metadata.get("extension-dependencies").isArray()).isTrue();
ArrayNode extensionNodes = (ArrayNode) metadata.get("extension-dependencies");
List<String> extensions = new ArrayList<>();
for (JsonNode extension : extensionNodes) {
extensions.add(extension.asText());
}
assertThat(extensions).hasSize(2);
assertThat(extensions).contains("io.quarkus:quarkus-core", "io.quarkus:quarkus-arc");
}
@Test
public void shouldUseCustomDeploymentArtifactName() throws IOException {
String buildFileContent = TestUtils.getDefaultGradleBuildFileContent(true, Collections.emptyList(),
"deploymentArtifact = 'custom.group:custom-deployment-artifact:0.1.0'");
TestUtils.writeFile(buildFile, buildFileContent);
TestUtils.runExtensionDescriptorTask(testProjectDir);
File extensionPropertiesFile = new File(testProjectDir, "build/resources/main/META-INF/quarkus-extension.properties");
assertThat(extensionPropertiesFile).exists();
Properties extensionProperty = TestUtils.readPropertyFile(extensionPropertiesFile.toPath());
assertThat(extensionProperty).containsEntry("deployment-artifact", "custom.group:custom-deployment-artifact:0.1.0");
}
@Test
public void shouldContainsConditionalDependencies() throws IOException {
String buildFileContent = TestUtils.getDefaultGradleBuildFileContent(true, Collections.emptyList(),
"conditionalDependencies= ['org.acme:ext-a:0.1.0', 'org.acme:ext-b:0.1.0']");
TestUtils.writeFile(buildFile, buildFileContent);
TestUtils.runExtensionDescriptorTask(testProjectDir);
File extensionPropertiesFile = new File(testProjectDir, "build/resources/main/META-INF/quarkus-extension.properties");
assertThat(extensionPropertiesFile).exists();
Properties extensionProperty = TestUtils.readPropertyFile(extensionPropertiesFile.toPath());
assertThat(extensionProperty).containsEntry("deployment-artifact", "org.acme:test-deployment:1.0.0");
assertThat(extensionProperty).containsEntry("conditional-dependencies",
"org.acme:ext-a::jar:0.1.0 org.acme:ext-b::jar:0.1.0");
}
@Test
public void shouldContainsParentFirstArtifacts() throws IOException {
String buildFileContent = TestUtils.getDefaultGradleBuildFileContent(true, Collections.emptyList(),
"parentFirstArtifacts = ['org.acme:ext-a:0.1.0', 'org.acme:ext-b:0.1.0']");
TestUtils.writeFile(buildFile, buildFileContent);
TestUtils.runExtensionDescriptorTask(testProjectDir);
File extensionPropertiesFile = new File(testProjectDir, "build/resources/main/META-INF/quarkus-extension.properties");
assertThat(extensionPropertiesFile).exists();
Properties extensionProperty = TestUtils.readPropertyFile(extensionPropertiesFile.toPath());
assertThat(extensionProperty).containsEntry("deployment-artifact", "org.acme:test-deployment:1.0.0");
assertThat(extensionProperty).containsEntry("parent-first-artifacts", "org.acme:ext-a:0.1.0,org.acme:ext-b:0.1.0");
}
@Test
public void shouldContainsRemoveResources() throws IOException {
String buildFileContent = TestUtils.getDefaultGradleBuildFileContent(true, Collections.emptyList(),
"removedResources { \n" +
"artifact('org.acme:acme-resources').resource('META-INF/a') \n" +
"artifact('org.acme:acme-resources-two').resource('META-INF/b').resource('META-INF/c') \n" +
"}\n");
TestUtils.writeFile(buildFile, buildFileContent);
TestUtils.runExtensionDescriptorTask(testProjectDir);
File extensionPropertiesFile = new File(testProjectDir, "build/resources/main/META-INF/quarkus-extension.properties");
assertThat(extensionPropertiesFile).exists();
Properties extensionProperty = TestUtils.readPropertyFile(extensionPropertiesFile.toPath());
assertThat(extensionProperty).containsEntry("deployment-artifact", "org.acme:test-deployment:1.0.0");
assertThat(extensionProperty).containsEntry("removed-resources.org.acme:acme-resources::jar", "META-INF/a");
assertThat(extensionProperty).containsEntry("removed-resources.org.acme:acme-resources-two::jar",
"META-INF/b,META-INF/c");
}
@Test
public void shouldGenerateDescriptorBasedOnExistingFile() throws IOException {
TestUtils.writeFile(buildFile, TestUtils.getDefaultGradleBuildFileContent(true, Collections.emptyList(), ""));
File metaInfDir = new File(testProjectDir, "src/main/resources/META-INF");
metaInfDir.mkdirs();
String description = "name: extension-name\n" +
"description: this is a sample extension\n";
TestUtils.writeFile(new File(metaInfDir, "quarkus-extension.yaml"), description);
TestUtils.runExtensionDescriptorTask(testProjectDir);
File extensionDescriptorFile = new File(testProjectDir, "build/resources/main/META-INF/quarkus-extension.yaml");
assertThat(extensionDescriptorFile).exists();
ObjectNode extensionDescriptor = TestUtils.readExtensionFile(extensionDescriptorFile.toPath());
assertThat(extensionDescriptor.has("name")).isTrue();
assertThat(extensionDescriptor.get("name").asText()).isEqualTo("extension-name");
assertThat(extensionDescriptor.has("description")).isTrue();
assertThat(extensionDescriptor.get("description").asText()).isEqualTo("this is a sample extension");
}
@Test
public void shouldGenerateDescriptorWithCapabilities() throws IOException {
String buildFileContent = TestUtils.getDefaultGradleBuildFileContent(true, Collections.emptyList(),
"capabilities { \n" +
" provides 'org.acme:ext-a:0.1.0' \n" +
" provides 'org.acme:ext-b:0.1.0' onlyIf(['org.acme:ext-b:0.1.0']) onlyIfNot(['org.acme:ext-c:0.1.0']) \n"
+
" requires 'sunshine' onlyIf(['org.acme:ext-b:0.1.0']) \n" +
"}\n");
TestUtils.writeFile(buildFile, buildFileContent);
TestUtils.runExtensionDescriptorTask(testProjectDir);
File extensionPropertiesFile = new File(testProjectDir, "build/resources/main/META-INF/quarkus-extension.properties");
assertThat(extensionPropertiesFile).exists();
Properties extensionProperty = TestUtils.readPropertyFile(extensionPropertiesFile.toPath());
assertThat(extensionProperty).containsEntry("provides-capabilities",
"org.acme:ext-a:0.1.0,org.acme:ext-b:0.1.0?org.acme:ext-b:0.1.0?!org.acme:ext-c:0.1.0");
assertThat(extensionProperty).containsEntry("requires-capabilities",
"sunshine?org.acme:ext-b:0.1.0");
}
/*
* This test will fail if run in an IDE without extra config - it needs an environment variable, and
* that is increasingly hard to do on Java 17+; see https://github.com/junit-pioneer/junit-pioneer/issues/509
*/
@Test
public void shouldGenerateScmInformation() throws IOException {
TestUtils.writeFile(buildFile, TestUtils.getDefaultGradleBuildFileContent(true, Collections.emptyList(), ""));
File metaInfDir = new File(testProjectDir, "src/main/resources/META-INF");
metaInfDir.mkdirs();
String description = "name: extension-name\n" +
"description: this is a sample extension\n";
TestUtils.writeFile(new File(metaInfDir, "quarkus-extension.yaml"), description);
TestUtils.runExtensionDescriptorTask(testProjectDir);
File extensionDescriptorFile = new File(testProjectDir, "build/resources/main/META-INF/quarkus-extension.yaml");
assertThat(extensionDescriptorFile).exists();
ObjectNode extensionDescriptor = TestUtils.readExtensionFile(extensionDescriptorFile.toPath());
assertThat(extensionDescriptor.get("metadata").get("scm-url")).isNotNull();
assertThat(extensionDescriptor.get("metadata").get("scm-url").asText())
.as("Check source location %s", extensionDescriptor.get("scm-url"))
.isEqualTo("https://github.com/some/repo");
}
}
|
ExtensionDescriptorTaskTest
|
java
|
google__guava
|
guava/src/com/google/common/collect/Synchronized.java
|
{
"start": 10135,
"end": 12622
}
|
class ____<E extends @Nullable Object>
extends SynchronizedCollection<E> implements List<E> {
SynchronizedList(List<E> delegate, @Nullable Object mutex) {
super(delegate, mutex);
}
@Override
List<E> delegate() {
return (List<E>) super.delegate();
}
@Override
public void add(int index, E element) {
synchronized (mutex) {
delegate().add(index, element);
}
}
@Override
public boolean addAll(int index, Collection<? extends E> c) {
synchronized (mutex) {
return delegate().addAll(index, c);
}
}
@Override
public E get(int index) {
synchronized (mutex) {
return delegate().get(index);
}
}
@Override
public int indexOf(@Nullable Object o) {
synchronized (mutex) {
return delegate().indexOf(o);
}
}
@Override
public int lastIndexOf(@Nullable Object o) {
synchronized (mutex) {
return delegate().lastIndexOf(o);
}
}
@Override
public ListIterator<E> listIterator() {
return delegate().listIterator(); // manually synchronized
}
@Override
public ListIterator<E> listIterator(int index) {
return delegate().listIterator(index); // manually synchronized
}
@Override
public E remove(int index) {
synchronized (mutex) {
return delegate().remove(index);
}
}
@Override
public E set(int index, E element) {
synchronized (mutex) {
return delegate().set(index, element);
}
}
@Override
public void replaceAll(UnaryOperator<E> operator) {
synchronized (mutex) {
delegate().replaceAll(operator);
}
}
@Override
public void sort(@Nullable Comparator<? super E> c) {
synchronized (mutex) {
delegate().sort(c);
}
}
@Override
public List<E> subList(int fromIndex, int toIndex) {
synchronized (mutex) {
return list(delegate().subList(fromIndex, toIndex), mutex);
}
}
@Override
public boolean equals(@Nullable Object o) {
if (o == this) {
return true;
}
synchronized (mutex) {
return delegate().equals(o);
}
}
@Override
public int hashCode() {
synchronized (mutex) {
return delegate().hashCode();
}
}
@GwtIncompatible @J2ktIncompatible private static final long serialVersionUID = 0;
}
static final
|
SynchronizedList
|
java
|
mybatis__mybatis-3
|
src/test/java/org/apache/ibatis/submitted/foreach_map/StringStringMapEntry.java
|
{
"start": 707,
"end": 1800
}
|
class ____ {
public StringStringMapEntry() {
}
public StringStringMapEntry(String key, String value) {
this.key = key;
this.value = value;
}
public Object getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
StringStringMapEntry mapEntry = (StringStringMapEntry) o;
return key != null ? key.equals(mapEntry.key)
: mapEntry.key == null && value != null ? value.equals(mapEntry.value) : mapEntry.value == null;
}
@Override
public int hashCode() {
int result = key != null ? key.hashCode() : 0;
return 31 * result + (value != null ? value.hashCode() : 0);
}
@Override
public String toString() {
return '{' + key + '=' + value + '}';
}
private String key;
private String value;
}
|
StringStringMapEntry
|
java
|
spring-projects__spring-boot
|
module/spring-boot-jdbc/src/main/java/org/springframework/boot/jdbc/health/DataSourceHealthIndicator.java
|
{
"start": 5516,
"end": 5993
}
|
class ____ implements RowMapper<Object> {
@Override
public Object mapRow(ResultSet rs, int rowNum) throws SQLException {
ResultSetMetaData metaData = rs.getMetaData();
int columns = metaData.getColumnCount();
if (columns != 1) {
throw new IncorrectResultSetColumnCountException(1, columns);
}
Object result = JdbcUtils.getResultSetValue(rs, 1);
Assert.state(result != null, "'result' must not be null");
return result;
}
}
}
|
SingleColumnRowMapper
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/UngroupedOverloadsTest.java
|
{
"start": 13431,
"end": 13584
}
|
class ____ {
public void foo() {}
public void foo(int x) {}
private static
|
UngroupedOverloadsRefactoringMultiple
|
java
|
google__guava
|
android/guava-testlib/src/com/google/common/collect/testing/google/MultimapPutAllMultimapTester.java
|
{
"start": 1700,
"end": 4323
}
|
class ____<K, V>
extends AbstractMultimapTester<K, V, Multimap<K, V>> {
@MapFeature.Require(absent = SUPPORTS_PUT)
public void testPutUnsupported() {
assertThrows(
UnsupportedOperationException.class,
() -> multimap().putAll(getSubjectGenerator().create(mapEntry(k3(), v3()))));
}
@MapFeature.Require(SUPPORTS_PUT)
// Empty multimaps *do* have defined equals semantics.
@SuppressWarnings("UndefinedEquals")
public void testPutAllIntoEmpty() {
Multimap<K, V> target = getSubjectGenerator().create();
assertEquals(!multimap().isEmpty(), target.putAll(multimap()));
assertEquals(multimap(), target);
}
@MapFeature.Require(SUPPORTS_PUT)
public void testPutAll() {
Multimap<K, V> source =
getSubjectGenerator().create(mapEntry(k0(), v3()), mapEntry(k3(), v3()));
assertTrue(multimap().putAll(source));
assertTrue(multimap().containsEntry(k0(), v3()));
assertTrue(multimap().containsEntry(k3(), v3()));
}
@MapFeature.Require({SUPPORTS_PUT, ALLOWS_NULL_VALUES})
public void testPutAllWithNullValue() {
Multimap<K, V> source = getSubjectGenerator().create(mapEntry(k0(), null));
assertTrue(multimap().putAll(source));
assertTrue(multimap().containsEntry(k0(), null));
}
@MapFeature.Require({SUPPORTS_PUT, ALLOWS_NULL_KEYS})
public void testPutAllWithNullKey() {
Multimap<K, V> source = getSubjectGenerator().create(mapEntry(null, v0()));
assertTrue(multimap().putAll(source));
assertTrue(multimap().containsEntry(null, v0()));
}
@MapFeature.Require(value = SUPPORTS_PUT, absent = ALLOWS_NULL_VALUES)
public void testPutAllRejectsNullValue() {
Multimap<K, V> source = getSubjectGenerator().create(mapEntry(k0(), null));
assertThrows(NullPointerException.class, () -> multimap().putAll(source));
expectUnchanged();
}
@MapFeature.Require(value = SUPPORTS_PUT, absent = ALLOWS_NULL_KEYS)
public void testPutAllRejectsNullKey() {
Multimap<K, V> source = getSubjectGenerator().create(mapEntry(null, v0()));
assertThrows(NullPointerException.class, () -> multimap().putAll(source));
expectUnchanged();
}
@MapFeature.Require(SUPPORTS_PUT)
public void testPutAllPropagatesToGet() {
Multimap<K, V> source =
getSubjectGenerator().create(mapEntry(k0(), v3()), mapEntry(k3(), v3()));
Collection<V> getCollection = multimap().get(k0());
int getCollectionSize = getCollection.size();
assertTrue(multimap().putAll(source));
assertEquals(getCollectionSize + 1, getCollection.size());
assertContains(getCollection, v3());
}
}
|
MultimapPutAllMultimapTester
|
java
|
mockito__mockito
|
mockito-core/src/main/java/org/mockito/internal/configuration/plugins/DefaultMockitoPlugins.java
|
{
"start": 1456,
"end": 5200
}
|
class ____
DEFAULT_PLUGINS.put(PluginSwitch.class.getName(), DefaultPluginSwitch.class.getName());
DEFAULT_PLUGINS.put(
MockMaker.class.getName(),
"org.mockito.internal.creation.bytebuddy.InlineByteBuddyMockMaker");
DEFAULT_PLUGINS.put(
StackTraceCleanerProvider.class.getName(),
"org.mockito.internal.exceptions.stacktrace.DefaultStackTraceCleanerProvider");
DEFAULT_PLUGINS.put(
InstantiatorProvider2.class.getName(),
"org.mockito.internal.creation.instance.DefaultInstantiatorProvider");
DEFAULT_PLUGINS.put(
AnnotationEngine.class.getName(),
"org.mockito.internal.configuration.InjectingAnnotationEngine");
DEFAULT_PLUGINS.put(
INLINE_ALIAS, "org.mockito.internal.creation.bytebuddy.InlineByteBuddyMockMaker");
DEFAULT_PLUGINS.put(PROXY_ALIAS, "org.mockito.internal.creation.proxy.ProxyMockMaker");
DEFAULT_PLUGINS.put(
SUBCLASS_ALIAS, "org.mockito.internal.creation.bytebuddy.ByteBuddyMockMaker");
DEFAULT_PLUGINS.put(
MockitoLogger.class.getName(), "org.mockito.internal.util.ConsoleMockitoLogger");
DEFAULT_PLUGINS.put(
MemberAccessor.class.getName(),
"org.mockito.internal.util.reflection.ModuleMemberAccessor");
DEFAULT_PLUGINS.put(
MODULE_ALIAS, "org.mockito.internal.util.reflection.ModuleMemberAccessor");
DEFAULT_PLUGINS.put(
REFLECTION_ALIAS, "org.mockito.internal.util.reflection.ReflectionMemberAccessor");
DEFAULT_PLUGINS.put(
DoNotMockEnforcerWithType.class.getName(),
"org.mockito.internal.configuration.DefaultDoNotMockEnforcer");
MOCK_MAKER_ALIASES.add(INLINE_ALIAS);
MOCK_MAKER_ALIASES.add(PROXY_ALIAS);
MOCK_MAKER_ALIASES.add(SUBCLASS_ALIAS);
MEMBER_ACCESSOR_ALIASES.add(MODULE_ALIAS);
MEMBER_ACCESSOR_ALIASES.add(REFLECTION_ALIAS);
}
@Override
public <T> T getDefaultPlugin(Class<T> pluginType) {
String className = DEFAULT_PLUGINS.get(pluginType.getName());
return create(pluginType, className);
}
public static String getDefaultPluginClass(String classOrAlias) {
return DEFAULT_PLUGINS.get(classOrAlias);
}
/**
* Creates an instance of given plugin type, using specific implementation class.
*/
private <T> T create(Class<T> pluginType, String className) {
if (className == null) {
throw new IllegalStateException(
"No default implementation for requested Mockito plugin type: "
+ pluginType.getName()
+ "\n"
+ "Is this a valid Mockito plugin type? If yes, please report this problem to Mockito team.\n"
+ "Otherwise, please check if you are passing valid plugin type.\n"
+ "Examples of valid plugin types: MockMaker, StackTraceCleanerProvider.");
}
try {
// Default implementation. Use our own ClassLoader instead of the context
// ClassLoader, as the default implementation is assumed to be part of
// Mockito and may not be available via the context ClassLoader.
return pluginType.cast(Class.forName(className).getDeclaredConstructor().newInstance());
} catch (Exception e) {
throw new IllegalStateException(
"Internal problem occurred, please report it. "
+ "Mockito is unable to load the default implementation of
|
name
|
java
|
spring-projects__spring-boot
|
module/spring-boot-security/src/test/java/org/springframework/boot/security/autoconfigure/UserDetailsServiceAutoConfigurationTests.java
|
{
"start": 3878,
"end": 12010
}
|
class ____ {
private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner()
.withUserConfiguration(TestSecurityConfiguration.class)
.withConfiguration(AutoConfigurations.of(UserDetailsServiceAutoConfiguration.class));
@Test
void shouldSupplyUserDetailsServiceInServletApp() {
this.contextRunner.with(AlternativeFormOfAuthentication.nonPresent())
.run((context) -> assertThat(context).hasSingleBean(UserDetailsService.class));
}
@Test
void shouldNotSupplyUserDetailsServiceInReactiveApp() {
new ReactiveWebApplicationContextRunner().withUserConfiguration(TestSecurityConfiguration.class)
.withConfiguration(AutoConfigurations.of(UserDetailsServiceAutoConfiguration.class))
.with(AlternativeFormOfAuthentication.nonPresent())
.run((context) -> assertThat(context).doesNotHaveBean(UserDetailsService.class));
}
@Test
void shouldNotSupplyUserDetailsServiceInNonWebApp() {
new ApplicationContextRunner().withUserConfiguration(TestSecurityConfiguration.class)
.withConfiguration(AutoConfigurations.of(UserDetailsServiceAutoConfiguration.class))
.with(AlternativeFormOfAuthentication.nonPresent())
.run((context) -> assertThat(context).doesNotHaveBean(UserDetailsService.class));
}
@Test
void testDefaultUsernamePassword(CapturedOutput output) {
this.contextRunner.with(AlternativeFormOfAuthentication.nonPresent()).run((context) -> {
assertThat(outcomeOfMissingAlternativeCondition(context).isMatch()).isTrue();
UserDetailsService manager = context.getBean(UserDetailsService.class);
assertThat(output).contains("Using generated security password:");
assertThat(manager.loadUserByUsername("user")).isNotNull();
});
}
@Test
void defaultUserNotCreatedIfAuthenticationManagerBeanPresent(CapturedOutput output) {
this.contextRunner.with(AlternativeFormOfAuthentication.nonPresent())
.withUserConfiguration(TestAuthenticationManagerConfiguration.class)
.run((context) -> {
assertThat(outcomeOfMissingAlternativeCondition(context).isMatch()).isTrue();
AuthenticationManager manager = context.getBean(AuthenticationManager.class);
assertThat(manager)
.isEqualTo(context.getBean(TestAuthenticationManagerConfiguration.class).authenticationManager);
assertThat(output).doesNotContain("Using generated security password: ");
TestingAuthenticationToken token = new TestingAuthenticationToken("foo", "bar");
assertThat(manager.authenticate(token)).isNotNull();
});
}
@Test
void defaultUserNotCreatedIfAuthenticationManagerResolverBeanPresent(CapturedOutput output) {
this.contextRunner.with(AlternativeFormOfAuthentication.nonPresent())
.withUserConfiguration(TestAuthenticationManagerResolverConfiguration.class)
.run((context) -> {
assertThat(outcomeOfMissingAlternativeCondition(context).isMatch()).isTrue();
assertThat(output).doesNotContain("Using generated security password: ");
});
}
@Test
void defaultUserNotCreatedIfUserDetailsServiceBeanPresent(CapturedOutput output) {
this.contextRunner.with(AlternativeFormOfAuthentication.nonPresent())
.withUserConfiguration(TestUserDetailsServiceConfiguration.class)
.run((context) -> {
assertThat(outcomeOfMissingAlternativeCondition(context).isMatch()).isTrue();
UserDetailsService userDetailsService = context.getBean(UserDetailsService.class);
assertThat(output).doesNotContain("Using generated security password: ");
assertThat(userDetailsService.loadUserByUsername("foo")).isNotNull();
});
}
@Test
void defaultUserNotCreatedIfAuthenticationProviderBeanPresent(CapturedOutput output) {
this.contextRunner.with(AlternativeFormOfAuthentication.nonPresent())
.withUserConfiguration(TestAuthenticationProviderConfiguration.class)
.run((context) -> {
assertThat(outcomeOfMissingAlternativeCondition(context).isMatch()).isTrue();
AuthenticationProvider provider = context.getBean(AuthenticationProvider.class);
assertThat(output).doesNotContain("Using generated security password: ");
TestingAuthenticationToken token = new TestingAuthenticationToken("foo", "bar");
assertThat(provider.authenticate(token)).isNotNull();
});
}
@Test
void defaultUserNotCreatedIfJwtDecoderBeanPresent() {
this.contextRunner.with(AlternativeFormOfAuthentication.nonPresent())
.withUserConfiguration(TestConfigWithJwtDecoder.class)
.run((context) -> {
assertThat(outcomeOfMissingAlternativeCondition(context).isMatch()).isTrue();
assertThat(context).hasSingleBean(JwtDecoder.class);
assertThat(context).doesNotHaveBean(UserDetailsService.class);
});
}
@Test
void userDetailsServiceWhenPasswordEncoderAbsentAndDefaultPassword() {
this.contextRunner.with(AlternativeFormOfAuthentication.nonPresent())
.withUserConfiguration(TestSecurityConfiguration.class)
.run(((context) -> {
InMemoryUserDetailsManager userDetailsService = context.getBean(InMemoryUserDetailsManager.class);
String password = userDetailsService.loadUserByUsername("user").getPassword();
assertThat(password).startsWith("{noop}");
}));
}
@Test
void userDetailsServiceWhenPasswordEncoderAbsentAndRawPassword() {
testPasswordEncoding(TestSecurityConfiguration.class, "secret", "{noop}secret");
}
@Test
void userDetailsServiceWhenPasswordEncoderAbsentAndEncodedPassword() {
String password = "{bcrypt}$2a$10$sCBi9fy9814vUPf2ZRbtp.fR5/VgRk2iBFZ.ypu5IyZ28bZgxrVDa";
testPasswordEncoding(TestSecurityConfiguration.class, password, password);
}
@Test
void userDetailsServiceWhenPasswordEncoderBeanPresent() {
testPasswordEncoding(TestConfigWithPasswordEncoder.class, "secret", "secret");
}
@ParameterizedTest
@EnumSource
void whenClassOfAlternativeIsPresentUserDetailsServiceBacksOff(AlternativeFormOfAuthentication alternative) {
this.contextRunner.with(alternative.present())
.run((context) -> assertThat(context).doesNotHaveBean(InMemoryUserDetailsManager.class));
}
@ParameterizedTest
@EnumSource
void whenAlternativeIsPresentAndUsernameIsConfiguredThenUserDetailsServiceIsAutoConfigured(
AlternativeFormOfAuthentication alternative) {
this.contextRunner.with(alternative.present())
.withPropertyValues("spring.security.user.name=alice")
.run(((context) -> assertThat(context).hasSingleBean(InMemoryUserDetailsManager.class)));
}
@ParameterizedTest
@EnumSource
void whenAlternativeIsPresentAndPasswordIsConfiguredThenUserDetailsServiceIsAutoConfigured(
AlternativeFormOfAuthentication alternative) {
this.contextRunner.with(alternative.present())
.withPropertyValues("spring.security.user.password=secret")
.run(((context) -> assertThat(context).hasSingleBean(InMemoryUserDetailsManager.class)));
}
private void testPasswordEncoding(Class<?> configClass, String providedPassword, String expectedPassword) {
this.contextRunner.with(AlternativeFormOfAuthentication.nonPresent())
.withUserConfiguration(configClass)
.withPropertyValues("spring.security.user.password=" + providedPassword)
.run(((context) -> {
InMemoryUserDetailsManager userDetailsService = context.getBean(InMemoryUserDetailsManager.class);
String password = userDetailsService.loadUserByUsername("user").getPassword();
assertThat(password).isEqualTo(expectedPassword);
}));
}
private ConditionOutcome outcomeOfMissingAlternativeCondition(ConfigurableApplicationContext context) {
ConditionAndOutcomes conditionAndOutcomes = ConditionEvaluationReport.get(context.getBeanFactory())
.getConditionAndOutcomesBySource()
.get(UserDetailsServiceAutoConfiguration.class.getName());
assertThat(conditionAndOutcomes).isNotNull();
for (ConditionAndOutcome conditionAndOutcome : conditionAndOutcomes) {
if (conditionAndOutcome
.getCondition() instanceof MissingAlternativeUserDetailsManagerOrUserPropertiesConfigured) {
return conditionAndOutcome.getOutcome();
}
}
fail("No outcome for MissingAlternativeUserDetailsManagerOrUserPropertiesConfigured found");
throw new AssertionError("Should not be reached");
}
@Configuration(proxyBeanMethods = false)
static
|
UserDetailsServiceAutoConfigurationTests
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/SemanticTextInferenceFieldsIT.java
|
{
"start": 13524,
"end": 13637
}
|
enum ____ {
NONE,
INFERENCE_FIELDS_EXCLUDED,
INFERENCE_FIELDS_INCLUDED
}
}
|
ExpectedSource
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/parser/error/ParseErrorTest_12.java
|
{
"start": 189,
"end": 490
}
|
class ____ extends TestCase {
public void test_for_error() throws Exception {
Exception error = null;
try {
JSON.parse("new \"Date\"");
} catch (JSONException ex) {
error = ex;
}
Assert.assertNotNull(error);
}
}
|
ParseErrorTest_12
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/method/PreAuthorizeServiceImpl.java
|
{
"start": 718,
"end": 894
}
|
class ____ {
@PreAuthorizeAdminRole
public void preAuthorizeAdminRole() {
}
@ContactPermission
public void contactPermission(Contact contact) {
}
}
|
PreAuthorizeServiceImpl
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/jsontype/TestPolymorphicDeduction.java
|
{
"start": 1045,
"end": 1224
}
|
interface ____ {}
@JsonTypeInfo(use = DEDUCTION)
@JsonSubTypes( {@Type(LiveCat.class), @Type(DeadCat.class)})
// A supertype containing common properties
public static
|
Feline
|
java
|
apache__spark
|
sql/catalyst/src/main/java/org/apache/spark/sql/connector/read/VariantAccessInfo.java
|
{
"start": 1631,
"end": 3922
}
|
class ____ implements Serializable {
private final String columnName;
private final StructType extractedSchema;
/**
* Creates variant access information for a variant column.
*
* @param columnName The name of the variant column
* @param extractedSchema The schema representing extracted fields from the variant.
* Each field represents one variant field access, with field names
* typically being ordinals (e.g., "0", "1", "2") and metadata
* containing variant-specific information like JSON path.
*/
public VariantAccessInfo(String columnName, StructType extractedSchema) {
this.columnName = Objects.requireNonNull(columnName, "columnName cannot be null");
this.extractedSchema =
Objects.requireNonNull(extractedSchema, "extractedSchema cannot be null");
}
/**
* Returns the name of the variant column.
*/
public String columnName() {
return columnName;
}
/**
* Returns the schema representing fields extracted from the variant column.
* <p>
* The schema structure is:
* <ul>
* <li>Field names: Typically ordinals ("0", "1", "2", ...) representing access order</li>
* <li>Field types: The target data type for each field extraction</li>
* <li>Field metadata: Contains variant-specific information such as JSON path,
* timezone, and error handling mode</li>
* </ul>
* <p>
* Data sources should use this schema to determine what fields to extract from the variant
* and what types they should be converted to.
*/
public StructType extractedSchema() {
return extractedSchema;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
VariantAccessInfo that = (VariantAccessInfo) o;
return columnName.equals(that.columnName) &&
extractedSchema.equals(that.extractedSchema);
}
@Override
public int hashCode() {
return Objects.hash(columnName, extractedSchema);
}
@Override
public String toString() {
return "VariantAccessInfo{" +
"columnName='" + columnName + '\'' +
", extractedSchema=" + extractedSchema +
'}';
}
}
|
VariantAccessInfo
|
java
|
apache__camel
|
core/camel-api/src/main/java/org/apache/camel/BinaryPredicate.java
|
{
"start": 1178,
"end": 2065
}
|
interface ____ extends Predicate {
/**
* Gets the operator
*
* @return the operator text
*/
String getOperator();
/**
* Gets the left hand side expression
*
* @return the left expression
*/
Expression getLeft();
/**
* Gets the right hand side expression
*
* @return the right expression
*/
Expression getRight();
/**
* Evaluates the predicate on the message exchange and returns <tt>null</tt> if this exchange matches the predicate.
* If it did <b>not</b> match, then a failure message is returned detailing the reason, which can be used by end
* users to understand the failure.
*
* @param exchange the message exchange
* @return <tt>null</tt> if the predicate matches.
*/
String matchesReturningFailureMessage(Exchange exchange);
}
|
BinaryPredicate
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/legacy/Role.java
|
{
"start": 240,
"end": 742
}
|
class ____ {
long id;
String name;
Set interventions = new HashSet();
private List bunchOfStrings;
long getId() {
return id;
}
void setId(long newValue) {
id = newValue;
}
String getName() {
return name;
}
void setName(String newValue) {
name = newValue;
}
public Set getInterventions() {
return interventions;
}
public void setInterventions(Set iv) {
interventions = iv;
}
List getBunchOfStrings() {
return bunchOfStrings;
}
void setBunchOfStrings(List s) {
bunchOfStrings = s;
}
}
|
Role
|
java
|
spring-projects__spring-data-jpa
|
spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/support/JpaMetamodelEntityInformationIntegrationTests.java
|
{
"start": 1496,
"end": 10746
}
|
class ____ {
@PersistenceContext EntityManager em;
abstract String getMetadadataPersistenceUnitName();
@Test
void detectsIdTypeForEntity() {
JpaEntityInformation<User, ?> information = getEntityInformation(User.class, em);
assertThat(information.getIdType()).isAssignableFrom(Integer.class);
}
@Test // DATAJPA-141
void detectsIdTypeForMappedSuperclass() {
JpaEntityInformation<?, ?> information = getEntityInformation(AbstractPersistable.class, em);
assertThat(information.getIdType()).isEqualTo(Serializable.class);
}
@Test // DATAJPA-50
void detectsIdClass() {
EntityInformation<PersistableWithIdClass, ?> information = getEntityInformation(PersistableWithIdClass.class, em);
assertThat(information.getIdType()).isAssignableFrom(PersistableWithIdClassPK.class);
}
@Test // DATAJPA-50
void returnsIdOfPersistableInstanceCorrectly() {
PersistableWithIdClass entity = new PersistableWithIdClass(2L, 4L);
JpaEntityInformation<PersistableWithIdClass, ?> information = getEntityInformation(PersistableWithIdClass.class,
em);
Object id = information.getId(entity);
assertThat(id).isEqualTo(new PersistableWithIdClassPK(2L, 4L));
}
@Test // GH-2330
void returnsIdOfSingleAttributeIdClassCorrectly() {
PersistableWithSingleIdClass entity = new PersistableWithSingleIdClass(2L);
JpaEntityInformation<PersistableWithSingleIdClass, ?> information = getEntityInformation(
PersistableWithSingleIdClass.class, em);
Object id = information.getId(entity);
assertThat(id).isEqualTo(new PersistableWithSingleIdClassPK(2L));
}
@Test // DATAJPA-413
void returnsIdOfEntityWithIdClassCorrectly() {
Item item = new Item(2, 1);
JpaEntityInformation<Item, ?> information = getEntityInformation(Item.class, em);
Object id = information.getId(item);
assertThat(id).isEqualTo(new ItemId(2, 1));
}
@Test // DATAJPA-413
void returnsDerivedIdOfEntityWithIdClassCorrectly() {
Item item = new Item(1, 2);
Site site = new Site(3);
ItemSite itemSite = new ItemSite(item, site);
JpaEntityInformation<ItemSite, ?> information = getEntityInformation(ItemSite.class, em);
Object id = information.getId(itemSite);
assertThat(id).isEqualTo(new ItemSiteId(new ItemId(1, 2), 3));
}
@Test // DATAJPA-413
void returnsPartialEmptyDerivedIdOfEntityWithIdClassCorrectly() {
Item item = new Item(1, null);
Site site = new Site(3);
ItemSite itemSite = new ItemSite(item, site);
JpaEntityInformation<ItemSite, ?> information = getEntityInformation(ItemSite.class, em);
Object id = information.getId(itemSite);
assertThat(id).isEqualTo(new ItemSiteId(new ItemId(1, null), 3));
}
@Test // DATAJPA-119
void favoursVersionAnnotationIfPresent() {
EntityInformation<VersionedUser, Long> information = new JpaMetamodelEntityInformation<>(VersionedUser.class,
em.getMetamodel(), em.getEntityManagerFactory().getPersistenceUnitUtil());
VersionedUser entity = new VersionedUser();
assertThat(information.isNew(entity)).isTrue();
entity.setId(1L);
assertThat(information.isNew(entity)).isTrue();
entity.setVersion(1L);
assertThat(information.isNew(entity)).isFalse();
entity.setId(null);
assertThat(information.isNew(entity)).isFalse();
}
@Test // DATAJPA-348
void findsIdClassOnMappedSuperclass() {
EntityManagerFactory emf = Persistence.createEntityManagerFactory(getMetadadataPersistenceUnitName());
EntityManager em = emf.createEntityManager();
EntityInformation<Sample, BaseIdClass> information = new JpaMetamodelEntityInformation<>(Sample.class,
em.getMetamodel(), em.getEntityManagerFactory().getPersistenceUnitUtil());
assertThat(information.getIdType()).isEqualTo(BaseIdClass.class);
}
@Test // DATACMNS-357
void detectsNewStateForEntityWithPrimitiveId() {
EntityInformation<SampleWithPrimitiveId, Long> information = new JpaMetamodelEntityInformation<>(
SampleWithPrimitiveId.class, em.getMetamodel(), em.getEntityManagerFactory().getPersistenceUnitUtil());
SampleWithPrimitiveId sample = new SampleWithPrimitiveId();
assertThat(information.isNew(sample)).isTrue();
sample.setId(5L);
assertThat(information.isNew(sample)).isFalse();
}
@Test // DATAJPA-509
void jpaMetamodelEntityInformationShouldRespectExplicitlyConfiguredEntityNameFromOrmXml() {
JpaEntityInformation<Role, Integer> info = new JpaMetamodelEntityInformation<>(Role.class, em.getMetamodel(),
em.getEntityManagerFactory().getPersistenceUnitUtil());
assertThat(info.getEntityName()).isEqualTo("ROLE");
}
@Test // DATAJPA-561
void considersEntityWithPrimitiveVersionPropertySetToDefaultNew() {
EntityInformation<PrimitiveVersionProperty, Serializable> information = new JpaMetamodelEntityInformation<>(
PrimitiveVersionProperty.class, em.getMetamodel(), em.getEntityManagerFactory().getPersistenceUnitUtil());
assertThat(information.isNew(new PrimitiveVersionProperty())).isTrue();
}
@Test // DATAJPA-568
void considersEntityAsNotNewWhenHavingIdSetAndUsingPrimitiveTypeForVersionProperty() {
EntityInformation<PrimitiveVersionProperty, Serializable> information = new JpaMetamodelEntityInformation<>(
PrimitiveVersionProperty.class, em.getMetamodel(), em.getEntityManagerFactory().getPersistenceUnitUtil());
PrimitiveVersionProperty pvp = new PrimitiveVersionProperty();
pvp.id = 100L;
assertThat(information.isNew(pvp)).isFalse();
}
@Test // DATAJPA-568
void fallsBackToIdInspectionForAPrimitiveVersionProperty() {
EntityInformation<PrimitiveVersionProperty, Serializable> information = new JpaMetamodelEntityInformation<>(
PrimitiveVersionProperty.class, em.getMetamodel(), em.getEntityManagerFactory().getPersistenceUnitUtil());
PrimitiveVersionProperty pvp = new PrimitiveVersionProperty();
pvp.version = 1L;
assertThat(information.isNew(pvp)).isTrue();
pvp.id = 1L;
assertThat(information.isNew(pvp)).isFalse();
}
@Test // DATAJPA-582
// @Disabled
void considersEntityWithUnsetCompoundIdNew() {
EntityInformation<SampleWithIdClass, ?> information = getEntityInformation(SampleWithIdClass.class, em);
assertThat(information.isNew(new SampleWithIdClass())).isTrue();
}
@Test // DATAJPA-582
void considersEntityWithSetTimestampVersionNotNew() {
EntityInformation<SampleWithTimestampVersion, ?> information = getEntityInformation(
SampleWithTimestampVersion.class, em);
SampleWithTimestampVersion entity = new SampleWithTimestampVersion();
entity.version = new Timestamp(new Date().getTime());
assertThat(information.isNew(entity)).isFalse();
}
@Test // DATAJPA-582, DATAJPA-581
void considersEntityWithNonPrimitiveNonNullIdTypeNotNew() {
EntityInformation<User, ?> information = getEntityInformation(User.class, em);
User user = new User();
assertThat(information.isNew(user)).isTrue();
user.setId(0);
assertThat(information.isNew(user)).isFalse();
}
@Test // DATAJPA-820
void detectsVersionPropertyOnMappedSuperClass() {
EntityInformation<ConcreteType1, ?> information = getEntityInformation(ConcreteType1.class, em);
assertThat(ReflectionTestUtils.getField(information, "versionAttribute")).isNotNull();
}
@Test // DATAJPA-1105
void correctlyDeterminesIdValueForNestedIdClassesWithNonPrimitiveNonManagedType() {
EntityManagerFactory emf = Persistence.createEntityManagerFactory(getMetadadataPersistenceUnitName());
EntityManager em = emf.createEntityManager();
JpaEntityInformation<EntityWithNestedIdClass, ?> information = getEntityInformation(EntityWithNestedIdClass.class,
em);
EntityWithNestedIdClass entity = new EntityWithNestedIdClass();
entity.id = 23L;
entity.reference = new EntityWithIdClass();
entity.reference.id1 = "one";
entity.reference.id2 = "two";
Object id = information.getId(entity);
assertThat(id).isNotNull();
}
@Test // DATAJPA-1416
@Disabled
void proxiedIdClassElement() {
JpaEntityInformation<SampleWithIdClassIncludingEntity, ?> information = getEntityInformation(
SampleWithIdClassIncludingEntity.class, em);
SampleWithIdClassIncludingEntity entity = new SampleWithIdClassIncludingEntity();
entity.setFirst(23L);
SampleWithIdClassIncludingEntity.OtherEntity$$PsudoProxy inner = new SampleWithIdClassIncludingEntity.OtherEntity$$PsudoProxy();
inner.setOtherId(42L);
entity.setSecond(inner);
Object id = information.getId(entity);
assertThat(id).isInstanceOf(SampleWithIdClassIncludingEntity.SampleWithIdClassPK.class);
SampleWithIdClassIncludingEntity.SampleWithIdClassPK pk = (SampleWithIdClassIncludingEntity.SampleWithIdClassPK) id;
assertThat(pk.getFirst()).isEqualTo(23L);
assertThat(pk.getSecond()).isEqualTo(42L);
}
@Test // DATAJPA-1576
void prefersPrivateGetterOverFieldAccess() {
EntityManagerFactory emf = Persistence.createEntityManagerFactory(getMetadadataPersistenceUnitName());
EntityManager em = emf.createEntityManager();
JpaEntityInformation<EntityWithPrivateIdGetter, ?> information = getEntityInformation(
EntityWithPrivateIdGetter.class, em);
EntityWithPrivateIdGetter entity = new EntityWithPrivateIdGetter();
Object id = information.getId(entity);
assertThat(id).isEqualTo(42L);
}
@SuppressWarnings("serial")
private static
|
JpaMetamodelEntityInformationIntegrationTests
|
java
|
spring-projects__spring-framework
|
spring-webflux/src/test/java/org/springframework/web/reactive/result/method/annotation/RequestMappingMessageConversionIntegrationTests.java
|
{
"start": 20978,
"end": 22699
}
|
class ____ {
@GetMapping("/person")
Person getPerson() {
return new Person("Robert");
}
@GetMapping("/completable-future")
CompletableFuture<Person> getCompletableFuture() {
return CompletableFuture.completedFuture(new Person("Robert"));
}
@GetMapping("/mono")
Mono<Person> getMono() {
return Mono.just(new Person("Robert"));
}
@GetMapping("/mono-empty")
Mono<Person> getMonoEmpty() {
return Mono.empty();
}
@GetMapping("/mono-declared-as-object")
Object getMonoDeclaredAsObject() {
return Mono.just(new Person("Robert"));
}
@GetMapping("/single")
Single<Person> getSingle() {
return Single.just(new Person("Robert"));
}
@GetMapping("/mono-response-entity")
ResponseEntity<Mono<Person>> getMonoResponseEntity() {
Mono<Person> body = Mono.just(new Person("Robert"));
return ResponseEntity.ok(body);
}
@GetMapping("/mono-response-entity-xml")
ResponseEntity<Mono<Person>> getMonoResponseEntityXml() {
Mono<Person> body = Mono.just(new Person("Robert"));
return ResponseEntity.ok().contentType(MediaType.APPLICATION_XML).body(body);
}
@GetMapping("/list")
List<Person> getList() {
return asList(new Person("Robert"), new Person("Marie"));
}
@GetMapping("/publisher")
Publisher<Person> getPublisher() {
return Flux.just(new Person("Robert"), new Person("Marie"));
}
@GetMapping("/flux")
Flux<Person> getFlux() {
return Flux.just(new Person("Robert"), new Person("Marie"));
}
@GetMapping("/observable")
Observable<Person> getObservable() {
return Observable.just(new Person("Robert"), new Person("Marie"));
}
}
@RestController
@SuppressWarnings("unused")
private static
|
PersonResponseBodyController
|
java
|
apache__camel
|
components/camel-google/camel-google-mail/src/generated/java/org/apache/camel/component/google/mail/internal/GoogleMailApiCollection.java
|
{
"start": 1172,
"end": 4888
}
|
class ____ extends ApiCollection<GoogleMailApiName, GoogleMailConfiguration> {
private GoogleMailApiCollection() {
final Map<String, String> aliases = new HashMap<>();
final Map<GoogleMailApiName, ApiMethodHelper<? extends ApiMethod>> apiHelpers = new EnumMap<>(GoogleMailApiName.class);
final Map<Class<? extends ApiMethod>, GoogleMailApiName> apiMethods = new HashMap<>();
List<String> nullableArgs;
aliases.clear();
nullableArgs = Arrays.asList();
apiHelpers.put(GoogleMailApiName.THREADS, new ApiMethodHelper<>(GmailUsersThreadsApiMethod.class, aliases, nullableArgs));
apiMethods.put(GmailUsersThreadsApiMethod.class, GoogleMailApiName.THREADS);
aliases.clear();
nullableArgs = Arrays.asList();
apiHelpers.put(GoogleMailApiName.MESSAGES, new ApiMethodHelper<>(GmailUsersMessagesApiMethod.class, aliases, nullableArgs));
apiMethods.put(GmailUsersMessagesApiMethod.class, GoogleMailApiName.MESSAGES);
aliases.clear();
nullableArgs = Arrays.asList();
apiHelpers.put(GoogleMailApiName.ATTACHMENTS, new ApiMethodHelper<>(GmailUsersMessagesAttachmentsApiMethod.class, aliases, nullableArgs));
apiMethods.put(GmailUsersMessagesAttachmentsApiMethod.class, GoogleMailApiName.ATTACHMENTS);
aliases.clear();
nullableArgs = Arrays.asList();
apiHelpers.put(GoogleMailApiName.LABELS, new ApiMethodHelper<>(GmailUsersLabelsApiMethod.class, aliases, nullableArgs));
apiMethods.put(GmailUsersLabelsApiMethod.class, GoogleMailApiName.LABELS);
aliases.clear();
nullableArgs = Arrays.asList();
apiHelpers.put(GoogleMailApiName.HISTORY, new ApiMethodHelper<>(GmailUsersHistoryApiMethod.class, aliases, nullableArgs));
apiMethods.put(GmailUsersHistoryApiMethod.class, GoogleMailApiName.HISTORY);
aliases.clear();
nullableArgs = Arrays.asList();
apiHelpers.put(GoogleMailApiName.DRAFTS, new ApiMethodHelper<>(GmailUsersDraftsApiMethod.class, aliases, nullableArgs));
apiMethods.put(GmailUsersDraftsApiMethod.class, GoogleMailApiName.DRAFTS);
aliases.clear();
nullableArgs = Arrays.asList();
apiHelpers.put(GoogleMailApiName.USERS, new ApiMethodHelper<>(GmailUsersApiMethod.class, aliases, nullableArgs));
apiMethods.put(GmailUsersApiMethod.class, GoogleMailApiName.USERS);
setApiHelpers(apiHelpers);
setApiMethods(apiMethods);
}
public GoogleMailConfiguration getEndpointConfiguration(GoogleMailApiName apiName) {
GoogleMailConfiguration result = null;
switch (apiName) {
case THREADS:
result = new GmailUsersThreadsEndpointConfiguration();
break;
case MESSAGES:
result = new GmailUsersMessagesEndpointConfiguration();
break;
case ATTACHMENTS:
result = new GmailUsersMessagesAttachmentsEndpointConfiguration();
break;
case LABELS:
result = new GmailUsersLabelsEndpointConfiguration();
break;
case HISTORY:
result = new GmailUsersHistoryEndpointConfiguration();
break;
case DRAFTS:
result = new GmailUsersDraftsEndpointConfiguration();
break;
case USERS:
result = new GmailUsersEndpointConfiguration();
break;
}
return result;
}
public static GoogleMailApiCollection getCollection() {
return GoogleMailApiCollectionHolder.INSTANCE;
}
private static final
|
GoogleMailApiCollection
|
java
|
elastic__elasticsearch
|
qa/system-indices/src/main/java/org/elasticsearch/system/indices/SystemIndicesQA.java
|
{
"start": 6101,
"end": 6908
}
|
class ____ extends BaseRestHandler {
@Override
public String getName() {
return "create net new system index for qa";
}
@Override
public List<Route> routes() {
return List.of(new Route(Method.PUT, "/_net_new_sys_index/_create"));
}
@Override
protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException {
return channel -> client.admin()
.indices()
.create(new CreateIndexRequest(".net-new-system-index-primary"), new RestToXContentListener<>(channel));
}
@Override
public boolean allowSystemIndexAccessByDefault() {
return true;
}
}
private static
|
CreateNetNewSystemIndexHandler
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/internal/iterables/Iterables_assertAreAtLeast_Test.java
|
{
"start": 1532,
"end": 2891
}
|
class ____ extends IterablesWithConditionsBaseTest {
@Test
void should_pass_if_satisfies_at_least_times_condition() {
actual = newArrayList("Yoda", "Luke", "Leia");
iterables.assertAreAtLeast(someInfo(), actual, 2, jedi);
verify(conditions).assertIsNotNull(jedi);
}
@Test
void should_pass_if_all_satisfies_condition_() {
actual = newArrayList("Yoda", "Luke", "Obiwan");
iterables.assertAreAtLeast(someInfo(), actual, 2, jedi);
verify(conditions).assertIsNotNull(jedi);
}
@Test
void should_throw_error_if_condition_is_null() {
assertThatNullPointerException().isThrownBy(() -> {
actual = newArrayList("Yoda", "Luke");
iterables.assertAreAtLeast(someInfo(), actual, 2, null);
}).withMessage("The condition to evaluate should not be null");
verify(conditions).assertIsNotNull(null);
}
@Test
void should_fail_if_condition_is_not_met_enough() {
testCondition.shouldMatch(false);
AssertionInfo info = someInfo();
actual = newArrayList("Yoda", "Solo", "Leia");
Throwable error = catchThrowable(() -> iterables.assertAreAtLeast(someInfo(), actual, 2, jedi));
assertThat(error).isInstanceOf(AssertionError.class);
verify(conditions).assertIsNotNull(jedi);
verify(failures).failure(info, elementsShouldBeAtLeast(actual, 2, jedi));
}
}
|
Iterables_assertAreAtLeast_Test
|
java
|
micronaut-projects__micronaut-core
|
http-server-tck/src/main/java/io/micronaut/http/server/tck/tests/OctetTest.java
|
{
"start": 2528,
"end": 3231
}
|
class ____ {
static final byte[] BODY_BYTES = IntStream.iterate(1, i -> i + 1)
.limit(256)
.map(i -> (byte) i)
.collect(ByteArrayOutputStream::new, ByteArrayOutputStream::write, (a, b) -> a.write(b.toByteArray(), 0, b.size()))
.toByteArray();
@Get(produces = MediaType.APPLICATION_OCTET_STREAM)
HttpResponse<byte[]> byteArray() {
return HttpResponse.ok(BODY_BYTES);
}
@Get(value = "/byteBody", produces = MediaType.APPLICATION_OCTET_STREAM)
ByteBody byteBody(ServerHttpRequest<?> request) {
return request.byteBodyFactory().adapt(BODY_BYTES.clone());
}
}
}
|
OctetController
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
|
{
"start": 3453,
"end": 13621
}
|
class ____ {
// These are a few internal configuration keys used for unit tests.
// They can't be set unless the static boolean allowUnitTestSettings has
// been set to true.
@VisibleForTesting
static final String INTERNAL_DFS_DATANODE_SCAN_PERIOD_MS =
"internal.dfs.datanode.scan.period.ms.key";
@VisibleForTesting
static final String INTERNAL_VOLUME_SCANNER_SCAN_RESULT_HANDLER =
"internal.volume.scanner.scan.result.handler";
@VisibleForTesting
static final String INTERNAL_DFS_BLOCK_SCANNER_MAX_STALENESS_MS =
"internal.dfs.block.scanner.max_staleness.ms";
@VisibleForTesting
static final long INTERNAL_DFS_BLOCK_SCANNER_MAX_STALENESS_MS_DEFAULT =
TimeUnit.MILLISECONDS.convert(15, TimeUnit.MINUTES);
@VisibleForTesting
static final String INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS =
"dfs.block.scanner.cursor.save.interval.ms";
@VisibleForTesting
static final long
INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS_DEFAULT =
TimeUnit.MILLISECONDS.convert(10, TimeUnit.MINUTES);
static boolean allowUnitTestSettings = false;
final long targetBytesPerSec;
final long maxStalenessMs;
final long scanPeriodMs;
final long cursorSaveMs;
final boolean skipRecentAccessed;
final Class<? extends ScanResultHandler> resultHandler;
private static long getUnitTestLong(Configuration conf, String key,
long defVal) {
if (allowUnitTestSettings) {
return conf.getLong(key, defVal);
} else {
return defVal;
}
}
/**
* Determine the configured block scanner interval.
*
* For compatibility with prior releases of HDFS, if the
* configured value is zero then the scan period is
* set to 3 weeks.
*
* If the configured value is less than zero then the scanner
* is disabled.
*
* @param conf Configuration object.
* @return block scan period in milliseconds.
*/
private static long getConfiguredScanPeriodMs(Configuration conf) {
long tempScanPeriodMs = getUnitTestLong(
conf, INTERNAL_DFS_DATANODE_SCAN_PERIOD_MS,
TimeUnit.MILLISECONDS.convert(conf.getLong(
DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,
DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT), TimeUnit.HOURS));
if (tempScanPeriodMs == 0) {
tempScanPeriodMs = TimeUnit.MILLISECONDS.convert(
DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT, TimeUnit.HOURS);
}
return tempScanPeriodMs;
}
@SuppressWarnings("unchecked")
Conf(Configuration conf) {
this.targetBytesPerSec = Math.max(0L, conf.getLong(
DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND,
DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT));
this.maxStalenessMs = Math.max(0L, getUnitTestLong(conf,
INTERNAL_DFS_BLOCK_SCANNER_MAX_STALENESS_MS,
INTERNAL_DFS_BLOCK_SCANNER_MAX_STALENESS_MS_DEFAULT));
this.scanPeriodMs = getConfiguredScanPeriodMs(conf);
this.cursorSaveMs = Math.max(0L, getUnitTestLong(conf,
INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS,
INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS_DEFAULT));
this.skipRecentAccessed = conf.getBoolean(
DFS_BLOCK_SCANNER_SKIP_RECENT_ACCESSED,
DFS_BLOCK_SCANNER_SKIP_RECENT_ACCESSED_DEFAULT);
if (allowUnitTestSettings) {
this.resultHandler = (Class<? extends ScanResultHandler>)
conf.getClass(INTERNAL_VOLUME_SCANNER_SCAN_RESULT_HANDLER,
ScanResultHandler.class);
} else {
this.resultHandler = ScanResultHandler.class;
}
}
}
public BlockScanner(DataNode datanode) {
this(datanode, datanode.getConf());
}
public BlockScanner(DataNode datanode, Configuration conf) {
this.datanode = datanode;
setJoinVolumeScannersTimeOutMs(
conf.getLong(DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY,
DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT));
this.conf = new Conf(conf);
if (isEnabled()) {
LOG.info("Initialized block scanner with targetBytesPerSec {}",
this.conf.targetBytesPerSec);
} else {
LOG.info("Disabled block scanner.");
}
}
/**
* Returns true if the block scanner is enabled.
*
* If the block scanner is disabled, no volume scanners will be created, and
* no threads will start.
*/
public boolean isEnabled() {
return (conf.scanPeriodMs > 0) && (conf.targetBytesPerSec > 0);
}
/**
* Returns true if there is any scanner thread registered.
*/
public synchronized boolean hasAnyRegisteredScanner() {
return !scanners.isEmpty();
}
/**
* Set up a scanner for the given block pool and volume.
*
* @param ref A reference to the volume.
*/
public synchronized void addVolumeScanner(FsVolumeReference ref) {
boolean success = false;
try {
FsVolumeSpi volume = ref.getVolume();
if (!isEnabled()) {
LOG.debug("Not adding volume scanner for {}, because the block " +
"scanner is disabled.", volume);
return;
}
VolumeScanner scanner = scanners.get(volume.getStorageID());
if (scanner != null) {
LOG.error("Already have a scanner for volume {}.",
volume);
return;
}
LOG.debug("Adding scanner for volume {} (StorageID {})",
volume, volume.getStorageID());
scanner = new VolumeScanner(conf, datanode, ref);
scanner.start();
scanners.put(volume.getStorageID(), scanner);
success = true;
} finally {
if (!success) {
// If we didn't create a new VolumeScanner object, we don't
// need this reference to the volume.
IOUtils.cleanupWithLogger(null, ref);
}
}
}
/**
* Stops and removes a volume scanner.
*
* This function will block until the volume scanner has stopped.
*
* @param volume The volume to remove.
*/
public synchronized void removeVolumeScanner(FsVolumeSpi volume) {
if (!isEnabled()) {
LOG.debug("Not removing volume scanner for {}, because the block " +
"scanner is disabled.", volume.getStorageID());
return;
}
VolumeScanner scanner = scanners.get(volume.getStorageID());
if (scanner == null) {
LOG.warn("No scanner found to remove for volumeId {}",
volume.getStorageID());
return;
}
LOG.info("Removing scanner for volume {} (StorageID {})",
volume, volume.getStorageID());
scanner.shutdown();
scanners.remove(volume.getStorageID());
Uninterruptibles.joinUninterruptibly(scanner, 5, TimeUnit.MINUTES);
}
/**
* Stops and removes all volume scanners.
*
* This function is called on shutdown. It will return even if some of
* the scanners don't terminate in time. Since the scanners are daemon
* threads and do not alter the block content, it is safe to ignore
* such conditions on shutdown.
*/
public synchronized void removeAllVolumeScanners() {
for (Entry<String, VolumeScanner> entry : scanners.entrySet()) {
entry.getValue().shutdown();
}
for (Entry<String, VolumeScanner> entry : scanners.entrySet()) {
Uninterruptibles.joinUninterruptibly(entry.getValue(),
getJoinVolumeScannersTimeOutMs(), TimeUnit.MILLISECONDS);
}
scanners.clear();
}
/**
* Enable scanning a given block pool id.
*
* @param bpid The block pool id to enable scanning for.
*/
synchronized void enableBlockPoolId(String bpid) {
Preconditions.checkNotNull(bpid);
for (VolumeScanner scanner : scanners.values()) {
scanner.enableBlockPoolId(bpid);
}
}
/**
* Disable scanning a given block pool id.
*
* @param bpid The block pool id to disable scanning for.
*/
synchronized void disableBlockPoolId(String bpid) {
Preconditions.checkNotNull(bpid);
for (VolumeScanner scanner : scanners.values()) {
scanner.disableBlockPoolId(bpid);
}
}
@VisibleForTesting
synchronized VolumeScanner.Statistics getVolumeStats(String volumeId) {
VolumeScanner scanner = scanners.get(volumeId);
if (scanner == null) {
return null;
}
return scanner.getStatistics();
}
synchronized void printStats(StringBuilder p) {
// print out all bpids that we're scanning ?
for (Entry<String, VolumeScanner> entry : scanners.entrySet()) {
entry.getValue().printStats(p);
}
}
/**
* Mark a block as "suspect."
*
* This means that we should try to rescan it soon. Note that the
* VolumeScanner keeps a list of recently suspicious blocks, which
* it uses to avoid rescanning the same block over and over in a short
* time frame.
*
* @param storageId The ID of the storage where the block replica
* is being stored.
* @param block The block's ID and block pool id.
*/
synchronized void markSuspectBlock(String storageId, ExtendedBlock block) {
if (!isEnabled()) {
LOG.debug("Not scanning suspicious block {} on {}, because the block " +
"scanner is disabled.", block, storageId);
return;
}
VolumeScanner scanner = scanners.get(storageId);
if (scanner == null) {
// This could happen if the volume is in the process of being removed.
// The removal process shuts down the VolumeScanner, but the volume
// object stays around as long as there are references to it (which
// should not be that long.)
LOG.info("Not scanning suspicious block {} on {}, because there is no " +
"volume scanner for that storageId.", block, storageId);
return;
}
scanner.markSuspectBlock(block);
}
public long getJoinVolumeScannersTimeOutMs() {
return joinVolumeScannersTimeOutMs;
}
public void setJoinVolumeScannersTimeOutMs(long joinScannersTimeOutMs) {
this.joinVolumeScannersTimeOutMs = joinScannersTimeOutMs;
}
@InterfaceAudience.Private
public static
|
Conf
|
java
|
apache__camel
|
components/camel-jms/src/test/java/org/apache/camel/component/jms/issues/JmsInOutParallelTest.java
|
{
"start": 1541,
"end": 3750
}
|
class ____ extends AbstractJMSTest {
@Order(2)
@RegisterExtension
public static CamelContextExtension camelContextExtension = new DefaultCamelContextExtension();
protected CamelContext context;
protected ProducerTemplate template;
protected ConsumerTemplate consumer;
@Test
public void testInOutParallel() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:received");
mock.setAssertPeriod(2000);
mock.expectedMessageCount(5);
String outPayload = template.requestBody("direct:test", "test", String.class);
assertEquals("Fully done", outPayload);
mock.assertIsSatisfied();
}
@Override
protected String getComponentName() {
return "activemq";
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("direct:test")
.setBody(constant("1,2,3,4,5"))
.to(ExchangePattern.InOut, "activemq:queue:JmsInOutParallelTest.1?requestTimeout=2000")
.split().tokenize(",").parallelProcessing()
.to(ExchangePattern.InOut, "activemq:queue:JmsInOutParallelTest.2?requestTimeout=2000")
.to("mock:received")
.end()
.setBody(constant("Fully done"))
.log("Finished");
from("activemq:queue:JmsInOutParallelTest.1")
.log("Received on queue test1");
from("activemq:queue:JmsInOutParallelTest.2")
.log("Received on queue test2")
.setBody(constant("Some reply"))
.delay(constant(100));
}
};
}
@Override
public CamelContextExtension getCamelContextExtension() {
return camelContextExtension;
}
@BeforeEach
void setUpRequirements() {
context = camelContextExtension.getContext();
template = camelContextExtension.getProducerTemplate();
consumer = camelContextExtension.getConsumerTemplate();
}
}
|
JmsInOutParallelTest
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java
|
{
"start": 8338,
"end": 8506
}
|
class ____ this interface
* when initializing a GcTimeMonitor to receive alerts when GC time
* percentage exceeds the specified threshold.
*/
public
|
implementing
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/generated/GeneratedNamespaceTableAggsHandleFunction.java
|
{
"start": 1098,
"end": 1688
}
|
class ____<N>
extends GeneratedClass<NamespaceTableAggsHandleFunction<N>> {
private static final long serialVersionUID = 2L;
@VisibleForTesting
public GeneratedNamespaceTableAggsHandleFunction(
String className, String code, Object[] references) {
super(className, code, references, new Configuration());
}
public GeneratedNamespaceTableAggsHandleFunction(
String className, String code, Object[] references, ReadableConfig conf) {
super(className, code, references, conf);
}
}
|
GeneratedNamespaceTableAggsHandleFunction
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/format/EnumFormatShapeTest.java
|
{
"start": 1158,
"end": 1237
}
|
enum ____
{
A, B;
}
// for [databind#572]
static
|
PoAsArray
|
java
|
apache__camel
|
components/camel-zip-deflater/src/test/java/org/apache/camel/dataformat/deflater/GzipDataFormatFileDeleteTest.java
|
{
"start": 1341,
"end": 2739
}
|
class ____ extends CamelTestSupport {
@Override
public void doPreSetup() {
deleteDirectory("target/data/gzip");
}
@Test
public void testGzipFileDelete() throws Exception {
NotifyBuilder oneExchangeDone = new NotifyBuilder(context).whenDone(1).create();
getMockEndpoint("mock:result").expectedMessageCount(1);
template.sendBodyAndHeader("file:target/data/gzip", "Hello World", Exchange.FILE_NAME, "hello.txt");
MockEndpoint.assertIsSatisfied(context);
// wait till the exchange is done which means the file should then have been deleted
oneExchangeDone.matchesWaitTime();
File in = new File("target/data/gzip/hello.txt");
assertFalse(in.exists(), "Should have been deleted " + in);
File out = new File("target/data/gzip/out/hello.txt.gz");
assertTrue(out.exists(), "Should have been created " + out);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("file:target/data/gzip?initialDelay=0&delay=10&delete=true")
.marshal().gzipDeflater()
.to("file:target/data/gzip/out?fileName=${file:name}.gz")
.to("mock:result");
}
};
}
}
|
GzipDataFormatFileDeleteTest
|
java
|
square__retrofit
|
retrofit/java-test/src/test/java/retrofit2/RetrofitTest.java
|
{
"start": 2477,
"end": 3162
}
|
interface ____ {
@GET("/")
Call<String> disallowed();
@POST("/")
Call<ResponseBody> disallowed(@Body String body);
@GET("/")
Call<retrofit2.Response> badType1();
@GET("/")
Call<okhttp3.Response> badType2();
@GET("/")
Call<ResponseBody> getResponseBody();
@SkipCallbackExecutor
@GET("/")
Call<ResponseBody> getResponseBodySkippedExecutor();
@GET("/")
Call<Void> getVoid();
@POST("/")
Call<ResponseBody> postRequestBody(@Body RequestBody body);
@GET("/")
Call<ResponseBody> queryString(@Query("foo") String foo);
@GET("/")
Call<ResponseBody> queryObject(@Query("foo") Object foo);
}
|
CallMethod
|
java
|
apache__dubbo
|
dubbo-plugin/dubbo-mutiny/src/main/java/org/apache/dubbo/mutiny/calls/MutinyClientCalls.java
|
{
"start": 1445,
"end": 6341
}
|
class ____ {
private MutinyClientCalls() {}
/**
* Implements a unary -> unary call as Uni -> Uni
*
* @param invoker invoker
* @param uniRequest the uni with request
* @param methodDescriptor the method descriptor
* @return the uni with response
*/
public static <TRequest, TResponse, TInvoker> Uni<TResponse> oneToOne(
Invoker<TInvoker> invoker, Uni<TRequest> uniRequest, StubMethodDescriptor methodDescriptor) {
try {
return uniRequest.onItem().transformToUni(request -> Uni.createFrom()
.emitter((UniEmitter<? super TResponse> emitter) -> {
StubInvocationUtil.unaryCall(
invoker, methodDescriptor, request, new StreamObserver<TResponse>() {
@Override
public void onNext(TResponse value) {
emitter.complete(value);
}
@Override
public void onError(Throwable t) {
emitter.fail(t);
}
@Override
public void onCompleted() {
// No-op
}
});
}));
} catch (Throwable throwable) {
return Uni.createFrom().failure(throwable);
}
}
/**
* Implements a unary -> stream call as Uni -> Multi
*
* @param invoker invoker
* @param uniRequest the uni with request
* @param methodDescriptor the method descriptor
* @return the multi with response
*/
public static <TRequest, TResponse, TInvoker> Multi<TResponse> oneToMany(
Invoker<TInvoker> invoker, Uni<TRequest> uniRequest, StubMethodDescriptor methodDescriptor) {
try {
return uniRequest.onItem().transformToMulti(request -> {
ClientTripleMutinyPublisher<TResponse> clientPublisher = new ClientTripleMutinyPublisher<>();
StubInvocationUtil.serverStreamCall(invoker, methodDescriptor, request, clientPublisher);
return clientPublisher;
});
} catch (Throwable throwable) {
return Multi.createFrom().failure(throwable);
}
}
/**
* Implements a stream -> unary call as Multi -> Uni
*
* @param invoker invoker
* @param multiRequest the multi with request
* @param methodDescriptor the method descriptor
* @return the uni with response
*/
public static <TRequest, TResponse, TInvoker> Uni<TResponse> manyToOne(
Invoker<TInvoker> invoker, Multi<TRequest> multiRequest, StubMethodDescriptor methodDescriptor) {
try {
ClientTripleMutinySubscriber<TRequest> clientSubscriber =
multiRequest.subscribe().withSubscriber(new ClientTripleMutinySubscriber<>());
ClientTripleMutinyPublisher<TResponse> clientPublisher = new ClientTripleMutinyPublisher<>(
s -> clientSubscriber.subscribe((CallStreamObserver<TRequest>) s), clientSubscriber::cancel);
return Uni.createFrom()
.publisher(clientPublisher)
.onSubscription()
.invoke(() -> StubInvocationUtil.biOrClientStreamCall(invoker, methodDescriptor, clientPublisher));
} catch (Throwable err) {
return Uni.createFrom().failure(err);
}
}
/**
* Implements a stream -> stream call as Multi -> Multi
*
* @param invoker invoker
* @param multiRequest the multi with request
* @param methodDescriptor the method descriptor
* @return the multi with response
*/
public static <TRequest, TResponse, TInvoker> Multi<TResponse> manyToMany(
Invoker<TInvoker> invoker, Multi<TRequest> multiRequest, StubMethodDescriptor methodDescriptor) {
try {
ClientTripleMutinySubscriber<TRequest> clientSubscriber =
multiRequest.subscribe().withSubscriber(new ClientTripleMutinySubscriber<>());
ClientTripleMutinyPublisher<TResponse> clientPublisher = new ClientTripleMutinyPublisher<>(
s -> clientSubscriber.subscribe((CallStreamObserver<TRequest>) s), clientSubscriber::cancel);
return Multi.createFrom()
.publisher(clientPublisher)
.onSubscription()
.invoke(() -> StubInvocationUtil.biOrClientStreamCall(invoker, methodDescriptor, clientPublisher));
} catch (Throwable err) {
return Multi.createFrom().failure(err);
}
}
}
|
MutinyClientCalls
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/AclSetuserArgs.java
|
{
"start": 24235,
"end": 24501
}
|
class ____ extends StringArgument {
AddPassword(String password) {
super(">" + password);
}
@Override
public String toString() {
return getClass().getSimpleName();
}
}
private static
|
AddPassword
|
java
|
netty__netty
|
example/src/main/java/io/netty/example/udt/echo/bytes/ByteEchoServer.java
|
{
"start": 1357,
"end": 2895
}
|
class ____ {
static final int PORT = Integer.parseInt(System.getProperty("port", "8007"));
public static void main(String[] args) throws Exception {
final ThreadFactory factory = new DefaultThreadFactory("udt");
final EventLoopGroup group = new MultiThreadIoEventLoopGroup(
1, factory, NioIoHandler.newFactory(NioUdtProvider.BYTE_PROVIDER));
// Configure the server.
try {
final ServerBootstrap boot = new ServerBootstrap();
boot.group(group)
.channelFactory(NioUdtProvider.BYTE_ACCEPTOR)
.option(ChannelOption.SO_BACKLOG, 10)
.handler(new LoggingHandler(LogLevel.INFO))
.childHandler(new ChannelInitializer<UdtChannel>() {
@Override
public void initChannel(final UdtChannel ch)
throws Exception {
ch.pipeline().addLast(
new LoggingHandler(LogLevel.INFO),
new ByteEchoServerHandler());
}
});
// Start the server.
final ChannelFuture future = boot.bind(PORT).sync();
// Wait until the server socket is closed.
future.channel().closeFuture().sync();
} finally {
// Shut down all event loops to terminate all threads.
group.shutdownGracefully();
}
}
}
|
ByteEchoServer
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java
|
{
"start": 1062,
"end": 3153
}
|
class ____ extends BroadcastShardRequest {
private final QueryBuilder query;
private final boolean explain;
private final boolean rewrite;
private final long nowInMillis;
private final AliasFilter filteringAliases;
public ShardValidateQueryRequest(StreamInput in) throws IOException {
super(in);
query = in.readNamedWriteable(QueryBuilder.class);
if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) {
int typesSize = in.readVInt();
if (typesSize > 0) {
for (int i = 0; i < typesSize; i++) {
in.readString();
}
}
}
filteringAliases = AliasFilter.readFrom(in);
explain = in.readBoolean();
rewrite = in.readBoolean();
nowInMillis = in.readVLong();
}
public ShardValidateQueryRequest(ShardId shardId, AliasFilter filteringAliases, ValidateQueryRequest request) {
super(shardId, request);
this.query = request.query();
this.explain = request.explain();
this.rewrite = request.rewrite();
this.filteringAliases = Objects.requireNonNull(filteringAliases, "filteringAliases must not be null");
this.nowInMillis = request.nowInMillis;
}
public QueryBuilder query() {
return query;
}
public boolean explain() {
return this.explain;
}
public boolean rewrite() {
return this.rewrite;
}
public AliasFilter filteringAliases() {
return filteringAliases;
}
public long nowInMillis() {
return this.nowInMillis;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeNamedWriteable(query);
if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) {
out.writeVInt(0); // no types to filter
}
filteringAliases.writeTo(out);
out.writeBoolean(explain);
out.writeBoolean(rewrite);
out.writeVLong(nowInMillis);
}
}
|
ShardValidateQueryRequest
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestBuildExternalComponents.java
|
{
"start": 1817,
"end": 4242
}
|
class ____ {
protected Configuration conf = new YarnConfiguration();
private File basedir;
// Check component names match with expected
private static void checkComponentNames(List<Component> components,
Set<String> expectedComponents) {
assertEquals(expectedComponents.size(), components.size());
for (Component comp : components) {
assertTrue(expectedComponents.contains(comp.getName()));
}
}
// 1. Build the def file and store on fs
// 2. check component names
private void buildAndCheckComponents(String appName, String appDef,
SliderFileSystem sfs, Set<String> names) throws Throwable {
AppAdminClient client = AppAdminClient.createAppAdminClient(AppAdminClient
.UNIT_TEST_TYPE, conf);
client.actionSave(ExampleAppJson.resourceName(appDef), null, null,
null);
// verify generated conf
List<Component> components =
ServiceApiUtil.getComponents(sfs, appName);
checkComponentNames(components, names);
}
@BeforeEach
public void setup() throws IOException {
basedir = new File("target", "apps");
if (basedir.exists()) {
FileUtils.deleteDirectory(basedir);
} else {
basedir.mkdirs();
}
conf.set(YARN_SERVICE_BASE_PATH, basedir.getAbsolutePath());
}
@AfterEach
public void tearDown() throws IOException {
if (basedir != null) {
FileUtils.deleteDirectory(basedir);
}
}
// Test applications defining external components(SERVICE type)
// can be resolved correctly
@Test
public void testExternalComponentBuild() throws Throwable {
SliderFileSystem sfs = new SliderFileSystem(conf);
Set<String> nameSet = new HashSet<>();
nameSet.add("simple");
nameSet.add("master");
nameSet.add("worker");
// app-1 has 3 components: simple, master, worker
buildAndCheckComponents("app-1", ExampleAppJson.APP_JSON, sfs, nameSet);
buildAndCheckComponents("external-0", ExampleAppJson.EXTERNAL_JSON_0, sfs,
nameSet);
nameSet.add("other");
// external1 has 3 components: simple(SERVICE - app1), master and other
buildAndCheckComponents("external-1", ExampleAppJson.EXTERNAL_JSON_1, sfs,
nameSet);
nameSet.add("another");
// external2 has 2 components: ext(SERVICE - external1), another
buildAndCheckComponents("external-2", ExampleAppJson.EXTERNAL_JSON_2, sfs,
nameSet);
}
}
|
TestBuildExternalComponents
|
java
|
resilience4j__resilience4j
|
resilience4j-spring6/src/test/java/io/github/resilience4j/spring6/ratelimiter/configure/RateLimiterInitializationInAspectTest.java
|
{
"start": 1235,
"end": 3540
}
|
class ____ {
@Bean
public RateLimiterRegistry rateLimiterRegistry() {
RateLimiterConfig backendRateLimiterConfig = RateLimiterConfig.custom()
.limitForPeriod(1)
.limitRefreshPeriod(Duration.ofSeconds(10))
.timeoutDuration(Duration.ofMillis(1))
.build();
return RateLimiterRegistry.custom()
.withRateLimiterConfig(RateLimiterConfig.ofDefaults())
.addRateLimiterConfig(BACKEND, backendRateLimiterConfig)
.build();
}
}
@Autowired
@Qualifier("rateLimiterDummyService")
TestDummyService testDummyService;
@Autowired
RateLimiterRegistry registry;
@Before
public void setUp() {
// ensure no rate limiters are initialized
assertThat(registry.getAllRateLimiters()).isEmpty();
}
@After
public void tearDown() {
registry.getAllRateLimiters().stream().map(RateLimiter::getName).forEach(registry::remove);
}
@Test
public void testCorrectConfigIsUsedInAspect() {
// one successful call within 10s
assertThat(testDummyService.syncSuccess()).isEqualTo("ok");
assertThat(testDummyService.syncSuccess()).isEqualTo("recovered");
}
@Test
public void testDefaultConfigurationIsUsedIfNoConfigurationAspect() {
assertThat(testDummyService.spelSyncNoCfg("foo")).isEqualTo("foo");
assertThat(testDummyService.spelSyncNoCfg("foo")).isEqualTo("foo");
assertThat(registry.getAllRateLimiters()).hasSize(1)
.allMatch(limiter -> limiter.getName().equals("foo"))
.allMatch(limiter -> limiter.getRateLimiterConfig() == registry.getDefaultConfig());
}
@Test
public void testSpecifiedConfigurationIsUsedIfConfigurationAspect() {
assertThat(testDummyService.spelSyncWithCfg("foo")).isEqualTo("foo");
assertThat(testDummyService.spelSyncWithCfg("foo")).isEqualTo("recovered");
assertThat(registry.getAllRateLimiters()).hasSize(1)
.allMatch(limiter -> limiter.getName().equals("foo"))
.allMatch(limiter -> limiter.getRateLimiterConfig() == registry.getConfiguration(BACKEND).orElse(null));
}
}
|
TestConfig
|
java
|
apache__camel
|
components/camel-mail/src/test/java/org/apache/camel/component/mail/MailFetchSizeTest.java
|
{
"start": 1457,
"end": 4135
}
|
class ____ extends CamelTestSupport {
private static final MailboxUser jones = Mailbox.getOrCreateUser("jones", "secret");
@Override
public void doPreSetup() throws Exception {
prepareMailbox();
}
@Test
public void testFetchSize() throws Exception {
assertEquals(5, jones.getInbox().getMessageCount());
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(2);
mock.expectedBodiesReceived("Message 0\r\n", "Message 1\r\n");
// should be done within 2 seconds as no delay when started
mock.setResultWaitTime(2000L);
mock.assertIsSatisfied();
Awaitility.await().atMost(500, TimeUnit.MILLISECONDS)
.untilAsserted(() -> assertEquals(3, jones.getInbox().getMessageCount()));
// reset mock to assert the next batch of 2 messages polled
mock.reset();
mock.expectedMessageCount(2);
mock.expectedBodiesReceived("Message 2\r\n", "Message 3\r\n");
// should be done within 2 (delay) + 1 seconds (polling)
mock.setResultWaitTime(3000L);
mock.assertIsSatisfied();
Awaitility.await().atMost(500, TimeUnit.MILLISECONDS)
.untilAsserted(() -> assertEquals(1, jones.getInbox().getMessageCount()));
// reset mock to assert the last message polled
mock.reset();
mock.expectedMessageCount(1);
mock.expectedBodiesReceived("Message 4\r\n");
mock.assertIsSatisfied();
}
private void prepareMailbox() throws Exception {
// connect to mailbox
Mailbox.clearAll();
JavaMailSender sender = new DefaultJavaMailSender();
Store store = sender.getSession().getStore("imap");
store.connect("localhost", Mailbox.getPort(Protocol.imap), jones.getLogin(), jones.getPassword());
Folder folder = store.getFolder("INBOX");
folder.open(Folder.READ_WRITE);
folder.expunge();
// inserts 5 new messages
Message[] messages = new Message[5];
for (int i = 0; i < 5; i++) {
messages[i] = new MimeMessage(sender.getSession());
messages[i].setHeader("Message-ID", Integer.toString(i));
messages[i].setText("Message " + i);
}
folder.appendMessages(messages);
folder.close(true);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from(jones.uriPrefix(Protocol.pop3) + "&fetchSize=2&delay=2000"
+ "&delete=true").to("mock:result");
}
};
}
}
|
MailFetchSizeTest
|
java
|
apache__flink
|
flink-table/flink-table-api-java-bridge/src/main/java/org/apache/flink/connector/datagen/table/types/RowDataGenerator.java
|
{
"start": 1429,
"end": 3087
}
|
class ____ implements DataGenerator<RowData> {
private static final long serialVersionUID = 1L;
private final DataGenerator<?>[] fieldGenerators;
private final List<String> fieldNames;
private final float nullRate;
public RowDataGenerator(
DataGenerator<?>[] fieldGenerators, List<String> fieldNames, float nullRate) {
this.fieldGenerators = fieldGenerators;
this.fieldNames = fieldNames;
this.nullRate = nullRate;
}
@Override
public void open(
String name, FunctionInitializationContext context, RuntimeContext runtimeContext)
throws Exception {
for (int i = 0; i < fieldGenerators.length; i++) {
fieldGenerators[i].open(fieldNames.get(i), context, runtimeContext);
}
}
@Override
public void snapshotState(FunctionSnapshotContext context) throws Exception {
for (DataGenerator<?> generator : fieldGenerators) {
generator.snapshotState(context);
}
}
@Override
public boolean hasNext() {
for (DataGenerator<?> generator : fieldGenerators) {
if (!generator.hasNext()) {
return false;
}
}
return true;
}
@Override
public RowData next() {
if (nullRate == 0f || ThreadLocalRandom.current().nextFloat() > nullRate) {
GenericRowData row = new GenericRowData(fieldNames.size());
for (int i = 0; i < fieldGenerators.length; i++) {
row.setField(i, fieldGenerators[i].next());
}
return row;
}
return null;
}
}
|
RowDataGenerator
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinatorGateway.java
|
{
"start": 1242,
"end": 2033
}
|
interface ____ extends RpcGateway {
void acknowledgeCheckpoint(
final JobID jobID,
final ExecutionAttemptID executionAttemptID,
final long checkpointId,
final CheckpointMetrics checkpointMetrics,
@Nullable final SerializedValue<TaskStateSnapshot> subtaskState);
void declineCheckpoint(DeclineCheckpoint declineCheckpoint);
void reportCheckpointMetrics(
JobID jobID,
ExecutionAttemptID executionAttemptID,
long checkpointId,
CheckpointMetrics checkpointMetrics);
void reportInitializationMetrics(
JobID jobId,
ExecutionAttemptID executionAttemptId,
SubTaskInitializationMetrics initializationMetrics);
}
|
CheckpointCoordinatorGateway
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/operations/DeletePushDownUtils.java
|
{
"start": 8179,
"end": 12257
}
|
class ____
extends ReduceExpressionsRule<ReduceExpressionsRule.Config> {
private static final ReduceExpressionsRule.Config config =
FilterReduceExpressionsRule.FilterReduceExpressionsRuleConfig.DEFAULT;
private static final ReduceExpressionsRuleProxy INSTANCE = new ReduceExpressionsRuleProxy();
public ReduceExpressionsRuleProxy() {
super(config);
}
@Override
public void onMatch(RelOptRuleCall relOptRuleCall) {
throw new UnsupportedOperationException("This shouldn't be called");
}
private boolean reduce(RelNode rel, List<RexNode> expList) {
return reduceExpressions(
rel,
expList,
RelOptPredicateList.EMPTY,
true,
config.matchNullability(),
config.treatDynamicCallsAsConstant());
}
}
/** Return the ResolvedExpression according to Filter. */
private static List<ResolvedExpression> resolveFilter(FlinkContext context, Filter filter) {
Tuple2<RexNode[], RexNode[]> extractedPredicates =
FlinkRexUtil.extractPredicates(
filter.getInput().getRowType().getFieldNames().toArray(new String[0]),
filter.getCondition(),
filter,
filter.getCluster().getRexBuilder());
RexNode[] convertiblePredicates = extractedPredicates._1;
RexNode[] unconvertedPredicates = extractedPredicates._2;
if (unconvertedPredicates.length != 0) {
// if contain any unconverted condition, return null
return null;
}
RexNodeToExpressionConverter converter =
new RexNodeToExpressionConverter(
filter.getCluster().getRexBuilder(),
filter.getInput().getRowType().getFieldNames().toArray(new String[0]),
context.getFunctionCatalog(),
context.getCatalogManager());
List<Expression> filters =
Arrays.stream(convertiblePredicates)
.map(
p -> {
Option<ResolvedExpression> expr = p.accept(converter);
if (expr.isDefined()) {
return expr.get();
} else {
throw new TableException(
String.format(
"%s can not be converted to Expression",
p));
}
})
.collect(Collectors.toList());
ExpressionResolver resolver =
ExpressionResolver.resolverFor(
context.getTableConfig(),
context.getClassLoader(),
name -> Optional.empty(),
context.getFunctionCatalog()
.asLookup(
str -> {
throw new TableException(
"We should not need to lookup any expressions at this point");
}),
context.getCatalogManager().getDataTypeFactory(),
(sqlExpression, inputRowType, outputType) -> {
throw new TableException(
"SQL expression parsing is not supported at this location.");
})
.build();
return resolver.resolve(filters);
}
}
|
ReduceExpressionsRuleProxy
|
java
|
apache__dubbo
|
dubbo-plugin/dubbo-native/src/main/java/org/apache/dubbo/aot/generate/JarScanner.java
|
{
"start": 1258,
"end": 4805
}
|
class ____ {
private static final String PACKAGE_NAME_PREFIX = "org/apache/dubbo";
private final Map<String, String> classNameCache;
private Map<String, Class<?>> classesCache;
private final List<String> resourcePathCache;
protected Map<String, Class<?>> getClasses() {
if (classesCache == null || classesCache.size() == 0) {
this.classesCache = forNames(classNameCache.values());
}
return classesCache;
}
public JarScanner() {
classNameCache = new HashMap<>();
resourcePathCache = new ArrayList<>();
scanURL(PACKAGE_NAME_PREFIX);
}
protected Map<String, Class<?>> forNames(Collection<String> classNames) {
Map<String, Class<?>> classes = new HashMap<>();
classNames.forEach((it) -> {
try {
Class<?> c = Class.forName(it);
classes.put(it, c);
} catch (Throwable ignored) {
}
});
return classes;
}
private void scanURL(String prefixName) {
try {
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
Enumeration<URL> resources = classLoader.getResources(prefixName);
while (resources.hasMoreElements()) {
URL resource = resources.nextElement();
String protocol = resource.getProtocol();
if ("file".equals(protocol)) {
scanFile(resource.getPath());
} else if ("jar".equals(protocol)) {
try (JarFile jar = ((JarURLConnection) resource.openConnection()).getJarFile()) {
scanJar(jar);
}
}
}
} catch (Throwable ex) {
throw new RuntimeException(ex);
}
}
private void scanFile(String resource) {
File directory = new File(resource);
File[] listFiles = directory.listFiles();
if (listFiles != null) {
for (File file : listFiles) {
if (file.isDirectory()) {
scanFile(file.getPath());
} else {
String path = file.getPath();
if (matchedDubboClasses(path)) {
classNameCache.put(path, toClassName(path));
}
}
}
}
}
private void scanJar(JarFile jar) {
Enumeration<JarEntry> entry = jar.entries();
JarEntry jarEntry;
String name;
while (entry.hasMoreElements()) {
jarEntry = entry.nextElement();
name = jarEntry.getName();
if (name.charAt(0) == '/') {
name = name.substring(1);
}
if (jarEntry.isDirectory()) {
continue;
}
if (matchedDubboClasses(name)) {
classNameCache.put(name, toClassName(name));
} else {
resourcePathCache.add(name);
}
}
}
protected List<String> getResourcePath() {
return resourcePathCache;
}
private boolean matchedDubboClasses(String path) {
return path.startsWith(PACKAGE_NAME_PREFIX) && path.endsWith(".class");
}
private String toClassName(String path) {
return path.contains(File.separator)
? path.substring(0, path.length() - 6).replace(File.separator, ".")
: path.substring(0, path.length() - 6).replace("/", ".");
}
}
|
JarScanner
|
java
|
hibernate__hibernate-orm
|
tooling/metamodel-generator/src/test/java/org/hibernate/processor/test/collectionbasictype/Like.java
|
{
"start": 414,
"end": 526
}
|
class ____<T extends Like.I1 & Like.I2> {
@Id
private Long id;
public abstract Reference<T> getObject();
|
Like
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/ser/AnyGetterOrdering4388Test.java
|
{
"start": 2350,
"end": 2547
}
|
class ____ {
public int a = 1;
@JsonIgnore
public int b = 2;
@JsonAnyGetter
public Map<String, Object> map = new HashMap<>();
}
static
|
IgnoreOnFieldPojo
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/streaming/runtime/operators/TimestampsAndWatermarksOperator.java
|
{
"start": 2581,
"end": 8001
}
|
class ____<T> extends AbstractStreamOperator<T>
implements OneInputStreamOperator<T, T>, ProcessingTimeCallback {
private static final long serialVersionUID = 1L;
private final WatermarkStrategy<T> watermarkStrategy;
/** The timestamp assigner. */
private transient TimestampAssigner<T> timestampAssigner;
/** The watermark generator, initialized during runtime. */
private transient WatermarkGenerator<T> watermarkGenerator;
/** The watermark output gateway, initialized during runtime. */
private transient WatermarkOutput wmOutput;
/** The interval (in milliseconds) for periodic watermark probes. Initialized during runtime. */
private transient long watermarkInterval;
/** Whether to emit intermediate watermarks or only one final watermark at the end of input. */
private final boolean emitProgressiveWatermarks;
/** {@link PausableRelativeClock} that will be paused in case of backpressure. */
private transient PausableRelativeClock inputActivityClock;
public TimestampsAndWatermarksOperator(
StreamOperatorParameters<T> parameters,
WatermarkStrategy<T> watermarkStrategy,
boolean emitProgressiveWatermarks) {
super(parameters);
this.watermarkStrategy = checkNotNull(watermarkStrategy);
this.emitProgressiveWatermarks = emitProgressiveWatermarks;
}
@Override
public void open() throws Exception {
super.open();
inputActivityClock = new PausableRelativeClock(getProcessingTimeService().getClock());
getContainingTask()
.getEnvironment()
.getMetricGroup()
.getIOMetricGroup()
.registerBackPressureListener(inputActivityClock);
timestampAssigner = watermarkStrategy.createTimestampAssigner(this::getMetricGroup);
watermarkGenerator =
emitProgressiveWatermarks
? watermarkStrategy.createWatermarkGenerator(
new WatermarkGeneratorSupplier.Context() {
@Override
public MetricGroup getMetricGroup() {
return TimestampsAndWatermarksOperator.this
.getMetricGroup();
}
@Override
public RelativeClock getInputActivityClock() {
return inputActivityClock;
}
})
: new NoWatermarksGenerator<>();
wmOutput = new WatermarkEmitter(output);
watermarkInterval = getExecutionConfig().getAutoWatermarkInterval();
if (watermarkInterval > 0 && emitProgressiveWatermarks) {
final long now = getProcessingTimeService().getCurrentProcessingTime();
getProcessingTimeService().registerTimer(now + watermarkInterval, this);
}
}
@Override
public void close() throws Exception {
getContainingTask()
.getEnvironment()
.getMetricGroup()
.getIOMetricGroup()
.unregisterBackPressureListener(inputActivityClock);
super.close();
}
@Override
public void processElement(final StreamRecord<T> element) throws Exception {
final T event = element.getValue();
final long previousTimestamp =
element.hasTimestamp() ? element.getTimestamp() : Long.MIN_VALUE;
final long newTimestamp = timestampAssigner.extractTimestamp(event, previousTimestamp);
element.setTimestamp(newTimestamp);
output.collect(element);
watermarkGenerator.onEvent(event, newTimestamp, wmOutput);
}
@Override
public void onProcessingTime(long timestamp) throws Exception {
watermarkGenerator.onPeriodicEmit(wmOutput);
final long now = getProcessingTimeService().getCurrentProcessingTime();
getProcessingTimeService().registerTimer(now + watermarkInterval, this);
}
/**
* Override the base implementation to completely ignore watermarks propagated from upstream,
* except for the "end of time" watermark.
*/
@Override
public void processWatermark(org.apache.flink.streaming.api.watermark.Watermark mark)
throws Exception {
// if we receive a Long.MAX_VALUE watermark we forward it since it is used
// to signal the end of input and to not block watermark progress downstream
if (mark.getTimestamp() == Long.MAX_VALUE) {
wmOutput.emitWatermark(Watermark.MAX_WATERMARK);
}
}
/** Override the base implementation to completely ignore statuses propagated from upstream. */
@Override
public void processWatermarkStatus(WatermarkStatus watermarkStatus) throws Exception {}
@Override
public void finish() throws Exception {
super.finish();
watermarkGenerator.onPeriodicEmit(wmOutput);
}
// ------------------------------------------------------------------------
/**
* Implementation of the {@code WatermarkEmitter}, based on the components that are available
* inside a stream operator.
*/
public static final
|
TimestampsAndWatermarksOperator
|
java
|
square__javapoet
|
src/test/java/com/squareup/javapoet/TypeSpecTest.java
|
{
"start": 52963,
"end": 53626
}
|
class ____ {\n"
+ " void loopForever() {\n"
+ " do {\n"
+ " System.out.println(\"hello\");\n"
+ " } while (5 < 6);\n"
+ " }\n"
+ "}\n");
}
@Test public void inlineIndent() throws Exception {
TypeSpec taco = TypeSpec.classBuilder("Taco")
.addMethod(MethodSpec.methodBuilder("inlineIndent")
.addCode("if (3 < 4) {\n$>$T.out.println($S);\n$<}\n", System.class, "hello")
.build())
.build();
assertThat(toString(taco)).isEqualTo(""
+ "package com.squareup.tacos;\n"
+ "\n"
+ "import java.lang.System;\n"
+ "\n"
+ "
|
Taco
|
java
|
hibernate__hibernate-orm
|
hibernate-envers/src/test/java/org/hibernate/orm/test/envers/entities/components/Component4.java
|
{
"start": 404,
"end": 1875
}
|
class ____ {
@Column(name = "the_key")
private String key;
@Column(name = "val")
private String value;
@NotAudited
private String description;
public Component4() {
}
public Component4(String key, String value, String description) {
this.key = key;
this.value = value;
this.description = description;
}
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((key == null) ? 0 : key.hashCode());
result = prime * result + ((value == null) ? 0 : value.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if ( this == obj ) {
return true;
}
if ( !(obj instanceof Component4) ) {
return false;
}
Component4 other = (Component4) obj;
if ( key != null ? !key.equals( other.key ) : other.key != null ) {
return false;
}
if ( value != null ? !value.equals( other.value ) : other.value != null ) {
return false;
}
return true;
}
@Override
public String toString() {
return "Component4[key = " + key + ", value = " + value + ", description = " + description + "]";
}
}
|
Component4
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/onetoone/OneToOneMapsIdChangeParentTest.java
|
{
"start": 1042,
"end": 2063
}
|
class ____ {
@RegisterExtension
public LoggerInspectionExtension logInspection =
LoggerInspectionExtension.builder().setLogger( CORE_LOGGER ).build();
private final Triggerable triggerable = logInspection.watchForLogMessages( "HHH000502:" );
@Test
public void test(EntityManagerFactoryScope scope) {
Child _child = scope.fromTransaction( entityManager -> {
Parent firstParent = new Parent();
firstParent.setId( 1L );
entityManager.persist( firstParent );
Child child = new Child();
child.setParent( firstParent );
entityManager.persist( child );
return child;
} );
triggerable.reset();
assertFalse( triggerable.wasTriggered() );
scope.inTransaction( entityManager -> {
Parent secondParent = new Parent();
secondParent.setId( 2L );
entityManager.persist( secondParent );
_child.setParent( secondParent );
entityManager.merge( _child );
} );
assertTrue( triggerable.wasTriggered() );
}
@Entity(name = "Parent")
public static
|
OneToOneMapsIdChangeParentTest
|
java
|
spring-cloud__spring-cloud-gateway
|
spring-cloud-gateway-server-webmvc/src/test/java/org/springframework/cloud/gateway/server/mvc/test/TestLoadBalancerConfig.java
|
{
"start": 1401,
"end": 1712
}
|
class ____ {
@LocalServerPort
protected int port = 0;
@Bean
public ServiceInstanceListSupplier staticServiceInstanceListSupplier() {
return ServiceInstanceListSuppliers.from("testservice",
new DefaultServiceInstance("testservice" + "-1", "testservice", "localhost", port, false));
}
}
}
|
Local
|
java
|
quarkusio__quarkus
|
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/injection/constructornoinject/MultiInjectConstructorFailureTest.java
|
{
"start": 496,
"end": 1042
}
|
class ____ {
@RegisterExtension
public ArcTestContainer container = ArcTestContainer.builder()
.beanClasses(CombineHarvester.class, Head.class)
.shouldFail()
.build();
@Test
public void testInjection() {
Throwable error = container.getFailure();
assertNotNull(error);
assertTrue(error instanceof DefinitionException);
assertTrue(error.getMessage().contains("Multiple @Inject constructors found"));
}
@Dependent
static
|
MultiInjectConstructorFailureTest
|
java
|
quarkusio__quarkus
|
extensions/micrometer/runtime/src/main/java/io/quarkus/micrometer/runtime/config/VertxConfigGroup.java
|
{
"start": 210,
"end": 660
}
|
interface ____ extends MicrometerConfig.CapabilityEnabled {
/**
* Vert.x metrics support.
* <p>
* Support for Vert.x metrics will be enabled if Micrometer
* support is enabled, Vert.x MetricsOptions is on the classpath
* and either this value is true, or this value is unset and
* {@code quarkus.micrometer.binder-enabled-default} is true.
*
*/
@Override
Optional<Boolean> enabled();
}
|
VertxConfigGroup
|
java
|
apache__camel
|
core/camel-xml-jaxp/src/main/java/org/apache/camel/converter/jaxp/XmlConverter.java
|
{
"start": 14382,
"end": 14859
}
|
class ____ add new kinds of conversion).
*/
@Converter(order = 22)
public SAXSource toSAXSource(Path file, Exchange exchange) throws IOException, SAXException, TransformerException {
InputStream is = IOHelper.buffered(Files.newInputStream(file));
return toSAXSource(is, exchange);
}
/**
* Converts the source instance to a {@link StAXSource} or returns null if the conversion is not supported (making
* it easy to derive from this
|
to
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.