language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | elastic__elasticsearch | libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterTests.java | {
"start": 14753,
"end": 15966
} | class
____ (isInstance) {
++extraArgs;
}
Class<?>[] targetParameterTypes = targetMethod.getParameterTypes();
Class<?>[] checkParameterTypes = new Class<?>[targetParameterTypes.length + extraArgs];
checkParameterTypes[0] = Class.class;
if (isInstance) {
checkParameterTypes[1] = Testable.class;
}
System.arraycopy(targetParameterTypes, 0, checkParameterTypes, extraArgs, targetParameterTypes.length);
var checkMethod = MockEntitlementChecker.class.getMethod(methodName, checkParameterTypes);
return new CheckMethod(
Type.getInternalName(MockEntitlementChecker.class),
checkMethod.getName(),
Arrays.stream(Type.getArgumentTypes(checkMethod)).map(Type::getDescriptor).toList()
);
}
private static void unwrapInvocationException(InvocationTargetException e) {
Throwable cause = e.getCause();
if (cause instanceof TestException n) {
// Sometimes we're expecting this one!
throw n;
} else {
throw new AssertionError(cause);
}
}
/**
* Calling a static method of a dynamically loaded | if |
java | elastic__elasticsearch | x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java | {
"start": 11380,
"end": 11503
} | class ____ provides functionality common to both the auto and
* interactive setup modes.
*/
private abstract | that |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/MissingOverrideTest.java | {
"start": 1733,
"end": 1896
} | class ____ {
abstract void f();
}
""")
.addSourceLines(
"Test.java",
"""
public | Super |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/ExtendedLogMetaRequest.java | {
"start": 2397,
"end": 4475
} | class ____ {
private String user;
private String appId;
private String containerId;
private MatchExpression nodeId = new MatchExpression(null);
private MatchExpression fileName = new MatchExpression(null);
private ComparisonCollection fileSize = new ComparisonCollection(null);
private ComparisonCollection modificationTime =
new ComparisonCollection(null);
public ExtendedLogMetaRequestBuilder setUser(String userName) {
this.user = userName;
return this;
}
public ExtendedLogMetaRequestBuilder setAppId(String applicationId) {
this.appId = applicationId;
return this;
}
public ExtendedLogMetaRequestBuilder setContainerId(String container) {
this.containerId = container;
return this;
}
public ExtendedLogMetaRequestBuilder setNodeId(String node) {
try {
this.nodeId = new MatchExpression(node);
} catch (PatternSyntaxException e) {
throw new IllegalArgumentException("Node Id expression is invalid", e);
}
return this;
}
public ExtendedLogMetaRequestBuilder setFileName(String file) {
try {
this.fileName = new MatchExpression(file);
} catch (PatternSyntaxException e) {
throw new IllegalArgumentException("Filename expression is invalid", e);
}
return this;
}
public ExtendedLogMetaRequestBuilder setFileSize(Set<String> fileSizes) {
this.fileSize = new ComparisonCollection(fileSizes);
return this;
}
public ExtendedLogMetaRequestBuilder setModificationTime(
Set<String> modificationTimes) {
this.modificationTime = new ComparisonCollection(modificationTimes);
return this;
}
public boolean isUserSet() {
return user != null;
}
public ExtendedLogMetaRequest build() {
return new ExtendedLogMetaRequest(user, appId, containerId, nodeId,
fileName, fileSize, modificationTime);
}
}
/**
* A collection of {@code ComparisonExpression}.
*/
public static | ExtendedLogMetaRequestBuilder |
java | spring-projects__spring-security | messaging/src/main/java/org/springframework/security/messaging/access/intercept/MessageMatcherDelegatingAuthorizationManager.java | {
"start": 8872,
"end": 13351
} | class ____ {
private final List<? extends MessageMatcher<?>> messageMatchers;
/**
* Creates a new instance
* @param messageMatchers the {@link MessageMatcher} instances to map to this
* constraint
*/
private Constraint(List<? extends MessageMatcher<?>> messageMatchers) {
Assert.notEmpty(messageMatchers, "messageMatchers cannot be null or empty");
this.messageMatchers = messageMatchers;
}
/**
* Shortcut for specifying {@link Message} instances require a particular
* role. If you do not want to have "ROLE_" automatically inserted see
* {@link #hasAuthority(String)}.
* @param role the role to require (i.e. USER, ADMIN, etc). Note, it should
* not start with "ROLE_" as this is automatically inserted.
* @return the {@link Builder} for further customization
*/
public Builder hasRole(String role) {
return access(AuthorityAuthorizationManager.hasRole(role));
}
/**
* Shortcut for specifying {@link Message} instances require any of a number
* of roles. If you do not want to have "ROLE_" automatically inserted see
* {@link #hasAnyAuthority(String...)}
* @param roles the roles to require (i.e. USER, ADMIN, etc). Note, it should
* not start with "ROLE_" as this is automatically inserted.
* @return the {@link Builder} for further customization
*/
public Builder hasAnyRole(String... roles) {
return access(AuthorityAuthorizationManager.hasAnyRole(roles));
}
/**
* Specify that {@link Message} instances require a particular authority.
* @param authority the authority to require (i.e. ROLE_USER, ROLE_ADMIN,
* etc).
* @return the {@link Builder} for further customization
*/
public Builder hasAuthority(String authority) {
return access(AuthorityAuthorizationManager.hasAuthority(authority));
}
/**
* Specify that {@link Message} instances requires any of a number
* authorities.
* @param authorities the requests require at least one of the authorities
* (i.e. "ROLE_USER","ROLE_ADMIN" would mean either "ROLE_USER" or
* "ROLE_ADMIN" is required).
* @return the {@link Builder} for further customization
*/
public Builder hasAnyAuthority(String... authorities) {
return access(AuthorityAuthorizationManager.hasAnyAuthority(authorities));
}
/**
* Specify that Messages are allowed by anyone.
* @return the {@link Builder} for further customization
*/
public Builder permitAll() {
return access(SingleResultAuthorizationManager.permitAll());
}
/**
* Specify that Messages are not allowed by anyone.
* @return the {@link Builder} for further customization
*/
public Builder denyAll() {
return access(SingleResultAuthorizationManager.denyAll());
}
/**
* Specify that Messages are allowed by any authenticated user.
* @return the {@link Builder} for further customization
*/
public Builder authenticated() {
return access(AuthenticatedAuthorizationManager.authenticated());
}
/**
* Specify that Messages are allowed by users who have authenticated and were
* not "remembered".
* @return the {@link Builder} for further customization
* @since 5.8
*/
public Builder fullyAuthenticated() {
return access(AuthenticatedAuthorizationManager.fullyAuthenticated());
}
/**
* Specify that Messages are allowed by users that have been remembered.
* @return the {@link Builder} for further customization
* @since 5.8
*/
public Builder rememberMe() {
return access(AuthenticatedAuthorizationManager.rememberMe());
}
/**
* Specify that Messages are allowed by anonymous users.
* @return the {@link Builder} for further customization
* @since 5.8
*/
public Builder anonymous() {
return access(AuthenticatedAuthorizationManager.anonymous());
}
/**
* Allows specifying that Messages are secured by an arbitrary expression
* @param authorizationManager the {@link AuthorizationManager} to secure the
* destinations
* @return the {@link Builder} for further customization
*/
public Builder access(AuthorizationManager<MessageAuthorizationContext<?>> authorizationManager) {
for (MessageMatcher<?> messageMatcher : this.messageMatchers) {
Builder.this.mappings.add(new Entry<>(messageMatcher, authorizationManager));
}
return Builder.this;
}
}
}
private static final | Constraint |
java | quarkusio__quarkus | integration-tests/bouncycastle/src/test/java/io/quarkus/it/bouncycastle/BouncyCastleITCase.java | {
"start": 122,
"end": 180
} | class ____ extends BouncyCastleTestCase {
}
| BouncyCastleITCase |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/BulkPutRoleRequestBuilderFactory.java | {
"start": 371,
"end": 474
} | interface ____ {
BulkPutRoleRequestBuilder create(Client client);
| BulkPutRoleRequestBuilderFactory |
java | apache__flink | flink-streaming-java/src/test/java/org/apache/flink/streaming/api/functions/sink/filesystem/BucketStateGenerator.java | {
"start": 1645,
"end": 7824
} | class ____ {
private final String bucketId;
private final String inProgressContent;
private final String pendingContent;
private final BucketStatePathResolver pathResolver;
public BucketStateGenerator(
String bucketId,
String inProgressContent,
String pendingContent,
java.nio.file.Path basePath,
int currentVersion) {
this.bucketId = bucketId;
this.inProgressContent = inProgressContent;
this.pendingContent = pendingContent;
this.pathResolver = new BucketStatePathResolver(basePath, currentVersion);
}
public void prepareDeserializationEmpty() throws IOException {
final String scenarioName = "empty";
final java.nio.file.Path scenarioPath = pathResolver.getResourcePath(scenarioName);
FileUtils.deleteDirectory(scenarioPath.toFile());
Files.createDirectories(scenarioPath);
final java.nio.file.Path outputPath = pathResolver.getOutputPath(scenarioName);
final Path testBucketPath = new Path(outputPath.resolve(bucketId).toString());
final Bucket<String, String> bucket = createNewBucket(testBucketPath);
final BucketState<String> bucketState = bucket.onReceptionOfCheckpoint(0);
byte[] bytes =
SimpleVersionedSerialization.writeVersionAndSerialize(
bucketStateSerializer(), bucketState);
Files.write(pathResolver.getSnapshotPath(scenarioName), bytes);
}
public void prepareDeserializationOnlyInProgress() throws IOException {
final String scenarioName = "only-in-progress";
final java.nio.file.Path scenarioPath = pathResolver.getResourcePath(scenarioName);
FileUtils.deleteDirectory(scenarioPath.toFile());
Files.createDirectories(scenarioPath);
final java.nio.file.Path outputPath = pathResolver.getOutputPath(scenarioName);
final Path testBucketPath = new Path(outputPath.resolve(bucketId).toString());
final Bucket<String, String> bucket = createNewBucket(testBucketPath);
bucket.write(inProgressContent, System.currentTimeMillis());
final BucketState<String> bucketState = bucket.onReceptionOfCheckpoint(0);
final byte[] bytes =
SimpleVersionedSerialization.writeVersionAndSerialize(
bucketStateSerializer(), bucketState);
Files.write(pathResolver.getSnapshotPath(scenarioName), bytes);
}
public void prepareDeserializationFull() throws IOException {
prepareDeserializationFull(true, "full");
}
public void prepareDeserializationNullInProgress() throws IOException {
prepareDeserializationFull(false, "full-no-in-progress");
}
private void prepareDeserializationFull(final boolean withInProgress, final String scenarioName)
throws IOException {
final java.nio.file.Path scenarioPath = pathResolver.getResourcePath(scenarioName);
FileUtils.deleteDirectory(Paths.get(scenarioPath.toString() + "-template").toFile());
Files.createDirectories(scenarioPath);
final int noOfPendingCheckpoints = 5;
final java.nio.file.Path outputPath = pathResolver.getOutputPath(scenarioName);
final Path testBucketPath = new Path(outputPath.resolve(bucketId).toString());
final Bucket<String, String> bucket = createNewBucket(testBucketPath);
BucketState<String> bucketState = null;
// pending for checkpoints
for (int i = 0; i < noOfPendingCheckpoints; i++) {
// write 10 bytes to the in progress file
bucket.write(pendingContent, System.currentTimeMillis());
bucket.write(pendingContent, System.currentTimeMillis());
// every checkpoint would produce a pending file
bucketState = bucket.onReceptionOfCheckpoint(i);
}
if (withInProgress) {
// create a in progress file
bucket.write(inProgressContent, System.currentTimeMillis());
// 5 pending files and 1 in progress file
bucketState = bucket.onReceptionOfCheckpoint(noOfPendingCheckpoints);
}
final byte[] bytes =
SimpleVersionedSerialization.writeVersionAndSerialize(
bucketStateSerializer(), bucketState);
Files.write(pathResolver.getSnapshotPath(scenarioName), bytes);
// copy the scenario file to a template directory.
// it is because that the test `testSerializationFull` would change the in progress file to
// pending files.
moveToTemplateDirectory(scenarioPath);
}
private static RowWiseBucketWriter<String, String> createBucketWriter() throws IOException {
return new RowWiseBucketWriter<>(
FileSystem.getLocalFileSystem().createRecoverableWriter(),
new SimpleStringEncoder<>());
}
private static SimpleVersionedSerializer<BucketState<String>> bucketStateSerializer()
throws IOException {
final RowWiseBucketWriter bucketWriter = createBucketWriter();
return new BucketStateSerializer<>(
bucketWriter.getProperties().getInProgressFileRecoverableSerializer(),
bucketWriter.getProperties().getPendingFileRecoverableSerializer(),
SimpleVersionedStringSerializer.INSTANCE);
}
private Bucket<String, String> createNewBucket(Path bucketPath) throws IOException {
return Bucket.getNew(
0,
bucketId,
bucketPath,
0,
createBucketWriter(),
DefaultRollingPolicy.builder().withMaxPartSize(new MemorySize(10)).build(),
null,
OutputFileConfig.builder().build());
}
private void moveToTemplateDirectory(java.nio.file.Path scenarioPath) throws IOException {
FileUtils.copy(
new Path(scenarioPath.toString()),
new Path(scenarioPath.toString() + "-template"),
false);
FileUtils.deleteDirectory(scenarioPath.toFile());
}
}
| BucketStateGenerator |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/monitor/jvm/HotThreadsTests.java | {
"start": 1429,
"end": 8151
} | class ____ extends ESTestCase {
private long nanosHelper(long millis) {
return millis * 1_000_000;
}
public void testSupportedThreadsReportType() {
for (String type : new String[] { "unsupported", "", null, "CPU", "WAIT", "BLOCK", "MEM" }) {
expectThrows(IllegalArgumentException.class, () -> new HotThreads().type(HotThreads.ReportType.of(type)));
}
for (String type : new String[] { "cpu", "wait", "block", "mem" }) {
try {
new HotThreads().type(HotThreads.ReportType.of(type));
} catch (IllegalArgumentException e) {
fail(Strings.format("IllegalArgumentException called when creating HotThreads for supported type [%s]", type));
}
}
}
private void assertIdleThreadHelper(ThreadInfo threadInfo, List<StackTraceElement> stack) {
when(threadInfo.getStackTrace()).thenReturn(stack.toArray(new StackTraceElement[0]));
assertTrue(HotThreads.isIdleThread(threadInfo));
}
private List<StackTraceElement> makeThreadStackHelper(List<String[]> names) {
return names.stream().map(e -> {
// Cannot mock StackTraceElement because it's final
return new StackTraceElement(e[0], e[1], "Some_File", 1);
}).toList();
}
public void testIdleThreadsDetection() {
for (String threadName : new String[] {
"Signal Dispatcher",
"Finalizer",
"Reference Handler",
"Notification Thread",
"Common-Cleaner",
"process reaper",
"DestroyJavaVM" }) {
ThreadInfo mockedThreadInfo = mock(ThreadInfo.class);
when(mockedThreadInfo.getThreadName()).thenReturn(threadName);
assertTrue(HotThreads.isKnownJDKThread(mockedThreadInfo));
assertTrue(HotThreads.isIdleThread(mockedThreadInfo));
}
for (String threadName : new String[] { "Text", "", null, "Finalizer".toLowerCase(Locale.ROOT) }) {
ThreadInfo mockedThreadInfo = mock(ThreadInfo.class);
when(mockedThreadInfo.getThreadName()).thenReturn(threadName);
when(mockedThreadInfo.getStackTrace()).thenReturn(new StackTraceElement[0]);
assertFalse(HotThreads.isKnownJDKThread(mockedThreadInfo));
assertFalse(HotThreads.isIdleThread(mockedThreadInfo));
}
List<StackTraceElement> testJvmStack = makeThreadStackHelper(
List.of(
new String[] { "org.elasticsearch.monitor.test", "methodOne" },
new String[] { "org.elasticsearch.monitor.testOther", "methodTwo" },
new String[] { "org.elasticsearch.monitor.test", "methodThree" },
new String[] { "org.elasticsearch.monitor.testOther", "methodFour" }
)
);
for (StackTraceElement stackFrame : testJvmStack) {
assertFalse(HotThreads.isKnownIdleStackFrame(stackFrame.getClassName(), stackFrame.getMethodName()));
}
ThreadInfo notIdleThread = mock(ThreadInfo.class);
when(notIdleThread.getThreadName()).thenReturn("Not Idle Thread");
when(notIdleThread.getStackTrace()).thenReturn(testJvmStack.toArray(new StackTraceElement[0]));
assertFalse(HotThreads.isIdleThread(notIdleThread));
List<StackTraceElement> idleThreadStackElements = makeThreadStackHelper(
List.of(
new String[] { "java.util.concurrent.ThreadPoolExecutor", "getTask" },
new String[] { "sun.nio.ch.SelectorImpl", "select" },
new String[] { "org.elasticsearch.threadpool.ThreadPool$CachedTimeThread", "run" },
new String[] { "org.elasticsearch.indices.ttl.IndicesTTLService$Notifier", "await" },
new String[] { "java.util.concurrent.LinkedTransferQueue", "poll" },
new String[] { "com.sun.jmx.remote.internal.ServerCommunicatorAdmin$Timeout", "run" }
)
);
for (StackTraceElement extraFrame : idleThreadStackElements) {
ThreadInfo idleThread = mock(ThreadInfo.class);
when(idleThread.getThreadName()).thenReturn("Idle Thread");
when(idleThread.getStackTrace()).thenReturn(new StackTraceElement[] { extraFrame });
assertTrue(HotThreads.isKnownIdleStackFrame(extraFrame.getClassName(), extraFrame.getMethodName()));
assertTrue(HotThreads.isIdleThread(idleThread));
List<StackTraceElement> topOfStack = new ArrayList<>(testJvmStack);
topOfStack.add(0, extraFrame);
assertIdleThreadHelper(idleThread, topOfStack);
List<StackTraceElement> bottomOfStack = new ArrayList<>(testJvmStack);
bottomOfStack.add(extraFrame);
assertIdleThreadHelper(idleThread, bottomOfStack);
if (testJvmStack.size() > 1) {
List<StackTraceElement> middleOfStack = new ArrayList<>(testJvmStack);
middleOfStack.add(between(Math.min(1, testJvmStack.size()), Math.max(0, testJvmStack.size() - 1)), extraFrame);
assertIdleThreadHelper(idleThread, middleOfStack);
}
}
}
public void testSimilarity() {
StackTraceElement[] stackOne = makeThreadStackHelper(
List.of(
new String[] { "org.elasticsearch.monitor.test", "methodOne" },
new String[] { "org.elasticsearch.monitor.testOther", "methodTwo" }
)
).toArray(new StackTraceElement[0]);
StackTraceElement[] stackTwo = makeThreadStackHelper(
List.of(
new String[] { "org.elasticsearch.monitor.test1", "methodOne" },
new String[] { "org.elasticsearch.monitor.testOther", "methodTwo" }
)
).toArray(new StackTraceElement[0]);
StackTraceElement[] stackThree = makeThreadStackHelper(
List.of(
new String[] { "org.elasticsearch.monitor.testOther", "methodTwo" },
new String[] { "org.elasticsearch.monitor.test", "methodOne" }
)
).toArray(new StackTraceElement[0]);
StackTraceElement[] stackFour = makeThreadStackHelper(
List.of(
new String[] { "org.elasticsearch.monitor.testPrior", "methodOther" },
new String[] { "org.elasticsearch.monitor.test", "methodOne" },
new String[] { "org.elasticsearch.monitor.testOther", "methodTwo" }
)
).toArray(new StackTraceElement[0]);
HotThreads hotThreads = new HotThreads();
// We can simplify this with records when the toolchain is upgraded
| HotThreadsTests |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/Produce.java | {
"start": 1661,
"end": 1985
} | interface ____ {
/**
* The uri to produce to
*/
String value() default "";
/**
* Use the field or getter on the bean to provide the uri to produce to
*/
String property() default "";
/**
* Whether to use bean parameter binding
*/
boolean binding() default true;
}
| Produce |
java | apache__camel | components/camel-jms/src/test/java/org/apache/camel/component/jms/issues/JmsInOutIssueTest.java | {
"start": 1559,
"end": 4111
} | class ____ extends AbstractJMSTest {
@Order(2)
@RegisterExtension
public static CamelContextExtension camelContextExtension = new DefaultCamelContextExtension();
protected CamelContext context;
protected ProducerTemplate template;
protected ConsumerTemplate consumer;
@Test
public void testInOutWithRequestBody() {
String reply = template.requestBody("activemq:queue:inJmsInOutIssueTest", "Hello World", String.class);
assertEquals("Bye World", reply);
}
@Test
public void testInOutTwoTimes() {
String reply = template.requestBody("activemq:queue:inJmsInOutIssueTest", "Hello World", String.class);
assertEquals("Bye World", reply);
reply = template.requestBody("activemq:queue:inJmsInOutIssueTest", "Hello Camel", String.class);
assertEquals("Bye World", reply);
}
@Test
public void testInOutWithAsyncRequestBody() throws Exception {
Future<String> reply = template.asyncRequestBody("activemq:queue:inJmsInOutIssueTest", "Hello World", String.class);
assertEquals("Bye World", reply.get());
}
@Test
public void testInOutWithSendExchange() {
Exchange out = template.send("activemq:queue:inJmsInOutIssueTest", ExchangePattern.InOut,
exchange -> exchange.getIn().setBody("Hello World"));
assertEquals("Bye World", out.getMessage().getBody());
}
@Test
public void testInOutWithAsyncSendExchange() throws Exception {
Future<Exchange> out = template.asyncSend("activemq:queue:inJmsInOutIssueTest", exchange -> {
exchange.setPattern(ExchangePattern.InOut);
exchange.getIn().setBody("Hello World");
});
assertEquals("Bye World", out.get().getMessage().getBody());
}
@Override
protected String getComponentName() {
return "activemq";
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("activemq:queue:inJmsInOutIssueTest").process(exchange -> exchange.getMessage().setBody("Bye World"));
}
};
}
@Override
public CamelContextExtension getCamelContextExtension() {
return camelContextExtension;
}
@BeforeEach
void setUpRequirements() {
context = camelContextExtension.getContext();
template = camelContextExtension.getProducerTemplate();
consumer = camelContextExtension.getConsumerTemplate();
}
}
| JmsInOutIssueTest |
java | apache__camel | components/camel-salesforce/camel-salesforce-component/src/test/java/org/apache/camel/component/salesforce/dto/generated/User.java | {
"start": 280,
"end": 635
} | class ____ extends AbstractDescribedSObjectBase {
private String Username;
@JsonProperty("Username")
public String getUsername() {
return Username;
}
public void setUsername(String username) {
Username = username;
}
@Override
public final SObjectDescription description() {
return null;
}
}
| User |
java | quarkusio__quarkus | extensions/quartz/deployment/src/test/java/io/quarkus/quartz/test/DuplicatedContextTest.java | {
"start": 1269,
"end": 2451
} | class ____ {
private final AtomicInteger blockingCalled = new AtomicInteger();
private final AtomicInteger nonBlockingCalled = new AtomicInteger();
@Scheduled(every = "1m")
public void blocking() {
Context context = Vertx.currentContext();
Assertions.assertNotNull(context);
Assertions.assertTrue(VertxContext.isDuplicatedContext(context));
Assertions.assertTrue(VertxContext.isOnDuplicatedContext());
blockingCalled.incrementAndGet();
}
@Scheduled(every = "1m")
public Uni<Void> nonblocking() {
Context context = Vertx.currentContext();
Assertions.assertNotNull(context);
Assertions.assertTrue(VertxContext.isDuplicatedContext(context));
Assertions.assertTrue(VertxContext.isOnDuplicatedContext());
nonBlockingCalled.incrementAndGet();
return Uni.createFrom().voidItem();
}
public int blockingCalled() {
return blockingCalled.get();
}
public int nonBlockingCalled() {
return nonBlockingCalled.get();
}
}
}
| MyScheduledClass |
java | elastic__elasticsearch | plugins/repository-hdfs/src/javaRestTest/java/org/elasticsearch/repositories/hdfs/SecureHaHdfsFailoverTestSuiteIT.java | {
"start": 937,
"end": 2418
} | class ____ extends AbstractHaHdfsFailoverTestSuiteIT {
public static Krb5kDcContainer krb5Fixture = new Krb5kDcContainer();
public static HdfsFixture hdfsFixture = new HdfsFixture().withHAService("ha-hdfs")
.withKerberos(() -> krb5Fixture.getPrincipal(), () -> krb5Fixture.getKeytab());
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
.distribution(DistributionType.DEFAULT)
.plugin("repository-hdfs")
.setting("xpack.license.self_generated.type", "trial")
.setting("xpack.security.enabled", "false")
.systemProperty("java.security.krb5.conf", () -> krb5Fixture.getConfPath().toString())
.configFile("repository-hdfs/krb5.conf", Resource.fromString(() -> krb5Fixture.getConf()))
.configFile("repository-hdfs/krb5.keytab", Resource.fromFile(() -> krb5Fixture.getEsKeytab()))
.build();
@ClassRule
public static TestRule ruleChain = RuleChain.outerRule(krb5Fixture).around(hdfsFixture).around(cluster);
@Override
HdfsFixture getHdfsFixture() {
return hdfsFixture;
}
@Override
protected String getTestRestCluster() {
return cluster.getHttpAddresses();
}
protected String securityCredentials() {
return String.format(java.util.Locale.ROOT, """
"security.principal": "%s","conf.dfs.data.transfer.protection": "authentication",""", krb5Fixture.getEsPrincipal());
}
}
| SecureHaHdfsFailoverTestSuiteIT |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/error/ShouldNotBeExactlyInstance_create_Test.java | {
"start": 1274,
"end": 2439
} | class ____ {
@Test
void should_create_error_message() {
// GIVEN
ErrorMessageFactory factory = shouldNotBeExactlyInstance("Yoda", String.class);
// WHEN
String message = factory.create(new TestDescription("Test"), new StandardRepresentation());
// THEN
then(message).isEqualTo(format("[Test] %n" +
"Expecting%n" +
" \"Yoda\"%n" +
"not to be of exact type:%n" +
" java.lang.String"));
}
@Test
void should_create_error_message_with_stack_trace_for_throwable() {
// GIVEN
IllegalArgumentException throwable = new IllegalArgumentException();
// WHEN
String message = shouldNotBeExactlyInstance(throwable, IllegalArgumentException.class).create();
// THEN
then(message).isEqualTo(format("%nExpecting%n" +
" \"" + getStackTrace(throwable) + "\"%n" +
"not to be of exact type:%n" +
" java.lang.IllegalArgumentException"));
}
}
| ShouldNotBeExactlyInstance_create_Test |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java | {
"start": 1174,
"end": 5152
} | class ____ extends AcknowledgedRequest<DeleteIndexRequest> implements IndicesRequest.Replaceable {
public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.builder()
.concreteTargetOptions(IndicesOptions.ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS)
.wildcardOptions(
IndicesOptions.WildcardOptions.builder()
.matchOpen(true)
.matchClosed(true)
.allowEmptyExpressions(true)
.resolveAliases(false)
.build()
)
.gatekeeperOptions(
IndicesOptions.GatekeeperOptions.builder()
.allowAliasToMultipleIndices(false)
.allowClosedIndices(true)
.ignoreThrottled(false)
.allowSelectors(false)
.build()
)
.build();
private String[] indices;
// Delete index should work by default on both open and closed indices.
private IndicesOptions indicesOptions = DEFAULT_INDICES_OPTIONS;
public DeleteIndexRequest(StreamInput in) throws IOException {
super(in);
indices = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in);
}
public DeleteIndexRequest() {
super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT);
}
/**
* Constructs a new delete index request for the specified index.
*
* @param index The index to delete. Use "_all" to delete all indices.
*/
public DeleteIndexRequest(String index) {
super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT);
this.indices = new String[] { index };
}
/**
* Constructs a new delete index request for the specified indices.
*
* @param indices The indices to delete. Use "_all" to delete all indices.
*/
public DeleteIndexRequest(String... indices) {
super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT);
this.indices = indices;
}
@Override
public IndicesOptions indicesOptions() {
return indicesOptions;
}
public DeleteIndexRequest indicesOptions(IndicesOptions indicesOptions) {
this.indicesOptions = indicesOptions;
return this;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (CollectionUtils.isEmpty(indices)) {
validationException = addValidationError("index / indices is missing", validationException);
}
return validationException;
}
@Override
public DeleteIndexRequest indices(String... indices) {
this.indices = indices;
return this;
}
/**
* The index to delete.
*/
@Override
public String[] indices() {
return indices;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArray(indices);
indicesOptions.writeIndicesOptions(out);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
DeleteIndexRequest that = (DeleteIndexRequest) o;
return Arrays.equals(indices, that.indices) && Objects.equals(indicesOptions, that.indicesOptions);
}
@Override
public int hashCode() {
int result = Objects.hash(indicesOptions);
result = 31 * result + Arrays.hashCode(indices);
return result;
}
@Override
public String getDescription() {
final var stringBuilder = new StringBuilder("indices[");
Strings.collectionToDelimitedStringWithLimit(Arrays.asList(indices), ",", 1024, stringBuilder);
stringBuilder.append("]");
return stringBuilder.toString();
}
}
| DeleteIndexRequest |
java | quarkusio__quarkus | independent-projects/tools/devtools-common/src/main/java/io/quarkus/devtools/commands/handlers/RemoveExtensionsCommandHandler.java | {
"start": 1073,
"end": 2755
} | class ____ implements QuarkusCommandHandler {
@Override
public QuarkusCommandOutcome execute(QuarkusCommandInvocation invocation) throws QuarkusCommandException {
final Set<String> extensionsQuery = invocation.getValue(RemoveExtensions.EXTENSIONS, Collections.emptySet());
if (extensionsQuery.isEmpty()) {
return QuarkusCommandOutcome.success().setValue(RemoveExtensions.OUTCOME_UPDATED, false);
}
final List<ArtifactCoords> extensionsToRemove = computeCoordsFromQuery(invocation, extensionsQuery);
if (extensionsToRemove == null) {
return QuarkusCommandOutcome.failure("no extensions to remove").setValue(RemoveExtensions.OUTCOME_UPDATED, false);
}
final ExtensionManager extensionManager = invocation.getValue(EXTENSION_MANAGER,
invocation.getQuarkusProject().getExtensionManager());
try {
final Set<ArtifactKey> keys = extensionsToRemove.stream().map(ArtifactCoords::getKey)
.collect(Collectors.toSet());
final UninstallResult result = extensionManager.uninstall(keys);
result.getUninstalled()
.forEach(a -> invocation.log()
.info(MessageIcons.SUCCESS_ICON + " Extension " + a.getGroupId() + ":" + a.getArtifactId()
+ " has been uninstalled"));
return QuarkusCommandOutcome.success().setValue(RemoveExtensions.OUTCOME_UPDATED, result.isSourceUpdated());
} catch (IOException e) {
throw new QuarkusCommandException("Failed to remove extensions", e);
}
}
}
| RemoveExtensionsCommandHandler |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/injection/guice/multibindings/Multibinder.java | {
"start": 2319,
"end": 3464
} | class ____ {
* {@literal @}Inject
* public SnackMachine(Set<Snack> snacks) { ... }
* }</code></pre>
* <p>
* Create multibindings from different modules is supported. For example, it
* is okay to have both {@code CandyModule} and {@code ChipsModule} to both
* create their own {@code Multibinder<Snack>}, and to each contribute bindings
* to the set of snacks. When that set is injected, it will contain elements
* from both modules.
* <p>
* Elements are resolved at set injection time. If an element is bound to a
* provider, that provider's get method will be called each time the set is
* injected (unless the binding is also scoped).
* <p>
* Annotations are be used to create different sets of the same element
* type. Each distinct annotation gets its own independent collection of
* elements.
* <p>
* <strong>Elements must be distinct.</strong> If multiple bound elements
* have the same value, set injection will fail.
* <p>
* <strong>Elements must be non-null.</strong> If any set element is null,
* set injection will fail.
*
* @author jessewilson@google.com (Jesse Wilson)
*/
public abstract | SnackMachine |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/javadoc/UrlInSeeTest.java | {
"start": 1341,
"end": 1502
} | class ____ {}
""")
.addOutputLines(
"Test.java",
"""
/** See https://foo for more details */
| Test |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/web/client/UnorderedRequestExpectationManagerTests.java | {
"start": 1681,
"end": 4725
} | class ____ {
private final UnorderedRequestExpectationManager manager = new UnorderedRequestExpectationManager();
@Test
void unexpectedRequest() {
assertThatExceptionOfType(AssertionError.class)
.isThrownBy(() -> this.manager.validateRequest(createRequest(GET, "/foo")))
.withMessage("""
No further requests expected: HTTP GET /foo
0 request(s) executed.
""");
}
@Test
void zeroExpectedRequests() {
this.manager.verify();
}
@Test
void multipleRequests() throws Exception {
this.manager.expectRequest(once(), requestTo("/foo")).andExpect(method(GET)).andRespond(withSuccess());
this.manager.expectRequest(once(), requestTo("/bar")).andExpect(method(GET)).andRespond(withSuccess());
this.manager.validateRequest(createRequest(GET, "/bar"));
this.manager.validateRequest(createRequest(GET, "/foo"));
this.manager.verify();
}
@Test
void repeatedRequests() throws Exception {
this.manager.expectRequest(twice(), requestTo("/foo")).andExpect(method(GET)).andRespond(withSuccess());
this.manager.expectRequest(twice(), requestTo("/bar")).andExpect(method(GET)).andRespond(withSuccess());
this.manager.validateRequest(createRequest(GET, "/bar"));
this.manager.validateRequest(createRequest(GET, "/foo"));
this.manager.validateRequest(createRequest(GET, "/foo"));
this.manager.validateRequest(createRequest(GET, "/bar"));
this.manager.verify();
}
@Test
void repeatedRequestsTooMany() throws Exception {
this.manager.expectRequest(max(2), requestTo("/foo")).andExpect(method(GET)).andRespond(withSuccess());
this.manager.expectRequest(max(2), requestTo("/bar")).andExpect(method(GET)).andRespond(withSuccess());
this.manager.validateRequest(createRequest(GET, "/bar"));
this.manager.validateRequest(createRequest(GET, "/foo"));
this.manager.validateRequest(createRequest(GET, "/bar"));
this.manager.validateRequest(createRequest(GET, "/foo"));
assertThatExceptionOfType(AssertionError.class)
.isThrownBy(() -> this.manager.validateRequest(createRequest(GET, "/foo")))
.withMessage("""
No further requests expected: HTTP GET /foo
4 request(s) executed:
GET /bar
GET /foo
GET /bar
GET /foo
""");
}
@Test
void repeatedRequestsTooFew() throws Exception {
this.manager.expectRequest(min(2), requestTo("/foo")).andExpect(method(GET)).andRespond(withSuccess());
this.manager.expectRequest(min(2), requestTo("/bar")).andExpect(method(GET)).andRespond(withSuccess());
this.manager.validateRequest(createRequest(GET, "/bar"));
this.manager.validateRequest(createRequest(GET, "/foo"));
this.manager.validateRequest(createRequest(GET, "/foo"));
assertThatExceptionOfType(AssertionError.class)
.isThrownBy(this.manager::verify)
.withMessageContaining("""
3 request(s) executed:
GET /bar
GET /foo
GET /foo
""");
}
private ClientHttpRequest createRequest(HttpMethod method, String url) {
return new MockClientHttpRequest(method, URI.create(url));
}
}
| UnorderedRequestExpectationManagerTests |
java | google__dagger | javatests/dagger/functional/builder/BuildMethodCovariantReturnInheritedTest.java | {
"start": 928,
"end": 1013
} | interface ____ {
Object build();
}
@Component.Builder
| BuilderSupertype |
java | quarkusio__quarkus | extensions/oidc-common/runtime/src/main/java/io/quarkus/oidc/common/runtime/config/OidcCommonConfig.java | {
"start": 383,
"end": 3309
} | interface ____ {
/**
* The base URL of the OpenID Connect (OIDC) server, for example, `https://host:port/auth`.
* Do not set this property if you use 'quarkus-oidc' and the public key verification ({@link #publicKey})
* or certificate chain verification only ({@link #certificateChain}) is required.
* The OIDC discovery endpoint is called by default by appending a `.well-known/openid-configuration` path to this URL.
* For Keycloak, use `https://host:port/realms/{realm}`, replacing `{realm}` with the Keycloak realm name.
*/
Optional<String> authServerUrl();
/**
* Discovery of the OIDC endpoints.
* If not enabled, you must configure the OIDC endpoint URLs individually.
*/
@ConfigDocDefault("true")
Optional<Boolean> discoveryEnabled();
/**
* The relative path or absolute URL of the OIDC dynamic client registration endpoint.
* Set if {@link #discoveryEnabled} is `false` or a discovered token endpoint path must be customized.
*/
Optional<String> registrationPath();
/**
* The duration to attempt the initial connection to an OIDC server.
* For example, setting the duration to `20S` allows 10 retries, each 2 seconds apart.
* This property is only effective when the initial OIDC connection is created.
* For dropped connections, use the `connection-retry-count` property instead.
*/
Optional<Duration> connectionDelay();
/**
* The number of times to retry re-establishing an existing OIDC connection if it is temporarily lost.
* Different from `connection-delay`, which applies only to initial connection attempts.
* For instance, if a request to the OIDC token endpoint fails due to a connection issue, it will be retried as per this
* setting.
*/
@WithDefault("3")
int connectionRetryCount();
/**
* The number of seconds after which the current OIDC connection request times out.
*/
@WithDefault("10s")
Duration connectionTimeout();
/**
* Whether DNS lookup should be performed on the worker thread.
* Use this option when you can see logged warnings about blocked Vert.x event loop by HTTP requests to OIDC server.
*/
@WithDefault("false")
boolean useBlockingDnsLookup();
/**
* The maximum size of the connection pool used by the WebClient.
*/
OptionalInt maxPoolSize();
/**
* Follow redirects automatically when WebClient gets HTTP 302.
* When this property is disabled only a single redirect to exactly the same original URI
* is allowed but only if one or more cookies were set during the redirect request.
*/
@WithDefault("true")
boolean followRedirects();
/**
* HTTP proxy configuration.
*/
@ConfigDocSection
Proxy proxy();
/**
* TLS configuration.
*/
@ConfigDocSection
Tls tls();
| OidcCommonConfig |
java | spring-projects__spring-framework | spring-tx/src/test/java/org/springframework/transaction/annotation/AnnotationTransactionNamespaceHandlerTests.java | {
"start": 1755,
"end": 4588
} | class ____ {
private final ConfigurableApplicationContext context = new ClassPathXmlApplicationContext(
"org/springframework/transaction/annotation/annotationTransactionNamespaceHandlerTests.xml");
@AfterEach
void tearDown() {
this.context.close();
}
@Test
void isProxy() {
TransactionalTestBean bean = getTestBean();
assertThat(AopUtils.isAopProxy(bean)).as("testBean is not a proxy").isTrue();
Map<String, Object> services = this.context.getBeansWithAnnotation(Service.class);
assertThat(services.containsKey("testBean")).as("Stereotype annotation not visible").isTrue();
}
@Test
void invokeTransactional() {
TransactionalTestBean testBean = getTestBean();
CallCountingTransactionManager ptm = (CallCountingTransactionManager) context.getBean("transactionManager");
// try with transactional
assertThat(ptm.begun).as("Should not have any started transactions").isEqualTo(0);
testBean.findAllFoos();
assertThat(ptm.begun).as("Should have 1 started transaction").isEqualTo(1);
assertThat(ptm.commits).as("Should have 1 committed transaction").isEqualTo(1);
// try with non-transaction
testBean.doSomething();
assertThat(ptm.begun).as("Should not have started another transaction").isEqualTo(1);
// try with exceptional
assertThatExceptionOfType(Throwable.class).isThrownBy(() ->
testBean.exceptional(new IllegalArgumentException("foo")))
.satisfies(ex -> {
assertThat(ptm.begun).as("Should have another started transaction").isEqualTo(2);
assertThat(ptm.rollbacks).as("Should have 1 rolled back transaction").isEqualTo(1);
});
}
@Test
void nonPublicMethodsNotAdvised() {
TransactionalTestBean testBean = getTestBean();
CallCountingTransactionManager ptm = (CallCountingTransactionManager) context.getBean("transactionManager");
assertThat(ptm.begun).as("Should not have any started transactions").isEqualTo(0);
testBean.annotationsOnProtectedAreIgnored();
assertThat(ptm.begun).as("Should not have any started transactions").isEqualTo(0);
}
@Test
void mBeanExportAlsoWorks() throws Exception {
MBeanServer server = ManagementFactory.getPlatformMBeanServer();
Object actual = server.invoke(ObjectName.getInstance("test:type=TestBean"), "doSomething", new Object[0], new String[0]);
assertThat(actual).isEqualTo("done");
}
@Test
void transactionalEventListenerRegisteredProperly() {
assertThat(this.context.containsBean(TransactionManagementConfigUtils
.TRANSACTIONAL_EVENT_LISTENER_FACTORY_BEAN_NAME)).isTrue();
assertThat(this.context.getBeansOfType(TransactionalEventListenerFactory.class)).hasSize(1);
}
private TransactionalTestBean getTestBean() {
return (TransactionalTestBean) context.getBean("testBean");
}
@Service
@ManagedResource("test:type=TestBean")
public static | AnnotationTransactionNamespaceHandlerTests |
java | netty__netty | codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpDataTest.java | {
"start": 1635,
"end": 5714
} | interface ____ {
}
static HttpData[] data() {
return new HttpData[]{
new MemoryAttribute("test", 10),
new MemoryFileUpload("test", "", "text/plain", null, CharsetUtil.UTF_8, 10),
new MixedAttribute("test", 10, -1),
new MixedFileUpload("test", "", "text/plain", null, CharsetUtil.UTF_8, 10, -1),
new DiskAttribute("test", 10),
new DiskFileUpload("test", "", "text/plain", null, CharsetUtil.UTF_8, 10)
};
}
@BeforeAll
static void setUp() {
Random rndm = new Random();
rndm.nextBytes(BYTES);
}
@ParameterizedHttpDataTest
void testAddContentEmptyBuffer(HttpData httpData) throws IOException {
ByteBuf content = PooledByteBufAllocator.DEFAULT.buffer();
httpData.addContent(content, false);
assertThat(content.refCnt()).isEqualTo(0);
}
@ParameterizedHttpDataTest
void testCompletedFlagPreservedAfterRetainDuplicate(HttpData httpData) throws IOException {
httpData.addContent(Unpooled.wrappedBuffer("foo".getBytes(CharsetUtil.UTF_8)), false);
assertThat(httpData.isCompleted()).isFalse();
HttpData duplicate = httpData.retainedDuplicate();
assertThat(duplicate.isCompleted()).isFalse();
assertThat(duplicate.release()).isTrue();
httpData.addContent(Unpooled.wrappedBuffer("bar".getBytes(CharsetUtil.UTF_8)), true);
assertThat(httpData.isCompleted()).isTrue();
duplicate = httpData.retainedDuplicate();
assertThat(duplicate.isCompleted()).isTrue();
assertThat(duplicate.release()).isTrue();
}
@Test
void testAddContentExceedsDefinedSizeDiskFileUpload() {
doTestAddContentExceedsSize(
new DiskFileUpload("test", "", "application/json", null, CharsetUtil.UTF_8, 10),
"Out of size: 64 > 10");
}
@Test
void testAddContentExceedsDefinedSizeMemoryFileUpload() {
doTestAddContentExceedsSize(
new MemoryFileUpload("test", "", "application/json", null, CharsetUtil.UTF_8, 10),
"Out of size: 64 > 10");
}
@ParameterizedHttpDataTest
void testAddContentExceedsMaxSize(final HttpData httpData) {
httpData.setMaxSize(10);
doTestAddContentExceedsSize(httpData, "Size exceed allowed maximum capacity");
}
@ParameterizedHttpDataTest
void testSetContentExceedsDefinedSize(final HttpData httpData) {
doTestSetContentExceedsSize(httpData, "Out of size: 64 > 10");
}
@ParameterizedHttpDataTest
void testSetContentExceedsMaxSize(final HttpData httpData) {
httpData.setMaxSize(10);
doTestSetContentExceedsSize(httpData, "Size exceed allowed maximum capacity");
}
private static void doTestAddContentExceedsSize(final HttpData httpData, String expectedMessage) {
final ByteBuf content = PooledByteBufAllocator.DEFAULT.buffer();
content.writeBytes(BYTES);
assertThatExceptionOfType(IOException.class)
.isThrownBy(new ThrowableAssert.ThrowingCallable() {
@Override
public void call() throws Throwable {
httpData.addContent(content, false);
}
})
.withMessage(expectedMessage);
assertThat(content.refCnt()).isEqualTo(0);
}
private static void doTestSetContentExceedsSize(final HttpData httpData, String expectedMessage) {
final ByteBuf content = PooledByteBufAllocator.DEFAULT.buffer();
content.writeBytes(BYTES);
assertThatExceptionOfType(IOException.class)
.isThrownBy(new ThrowableAssert.ThrowingCallable() {
@Override
public void call() throws Throwable {
httpData.setContent(content);
}
})
.withMessage(expectedMessage);
assertThat(content.refCnt()).isEqualTo(0);
}
}
| ParameterizedHttpDataTest |
java | spring-projects__spring-framework | spring-webflux/src/main/java/org/springframework/web/reactive/function/server/RequestPredicates.java | {
"start": 42962,
"end": 44270
} | class ____ extends DelegatingServerRequest {
private final Map<String, Object> attributes;
public ExtendedAttributesServerRequestWrapper(ServerRequest delegate, Map<String, Object> newAttributes) {
super(delegate);
Assert.notNull(newAttributes, "NewAttributes must not be null");
Map<String, Object> oldAttributes = delegate.attributes();
this.attributes = CollectionUtils.compositeMap(newAttributes, oldAttributes, newAttributes::put,
newAttributes::putAll);
}
@Override
public Optional<Object> attribute(String name) {
return Optional.ofNullable(this.attributes.get(name));
}
@Override
public Map<String, Object> attributes() {
return this.attributes;
}
@Override
public String pathVariable(String name) {
Map<String, String> pathVariables = pathVariables();
if (pathVariables.containsKey(name)) {
return pathVariables.get(name);
}
else {
throw new IllegalArgumentException("No path variable with name \"" + name + "\" available");
}
}
@Override
@SuppressWarnings("unchecked")
public Map<String, String> pathVariables() {
return (Map<String, String>) this.attributes.getOrDefault(
RouterFunctions.URI_TEMPLATE_VARIABLES_ATTRIBUTE, Collections.emptyMap());
}
}
private static | ExtendedAttributesServerRequestWrapper |
java | spring-projects__spring-framework | spring-jdbc/src/test/java/org/springframework/jdbc/object/StoredProcedureTests.java | {
"start": 2526,
"end": 6518
} | class ____ {
private Connection connection = mock();
private DataSource dataSource = mock();
private CallableStatement callableStatement = mock();
private boolean verifyClosedAfter = true;
@BeforeEach
void setup() throws Exception {
given(dataSource.getConnection()).willReturn(connection);
given(callableStatement.getConnection()).willReturn(connection);
}
@AfterEach
void verifyClosed() throws Exception {
if (verifyClosedAfter) {
verify(callableStatement).close();
verify(connection, atLeastOnce()).close();
}
}
@Test
void testNoSuchStoredProcedure() throws Exception {
SQLException sqlException = new SQLException(
"Syntax error or access violation exception", "42000");
given(callableStatement.execute()).willThrow(sqlException);
given(connection.prepareCall("{call " + NoSuchStoredProcedure.SQL + "()}")).willReturn(
callableStatement);
NoSuchStoredProcedure sproc = new NoSuchStoredProcedure(dataSource);
assertThatExceptionOfType(BadSqlGrammarException.class).isThrownBy(
sproc::execute);
}
private void testAddInvoice(final int amount, final int custid) {
AddInvoice adder = new AddInvoice(dataSource);
int id = adder.execute(amount, custid);
assertThat(id).isEqualTo(4);
}
private void testAddInvoiceUsingObjectArray(final int amount, final int custid) {
AddInvoiceUsingObjectArray adder = new AddInvoiceUsingObjectArray(dataSource);
int id = adder.execute(amount, custid);
assertThat(id).isEqualTo(5);
}
@Test
void testAddInvoices() throws Exception {
given(callableStatement.execute()).willReturn(false);
given(callableStatement.getUpdateCount()).willReturn(-1);
given(callableStatement.getObject(3)).willReturn(4);
given(connection.prepareCall("{call " + AddInvoice.SQL + "(?, ?, ?)}")
).willReturn(callableStatement);
testAddInvoice(1106, 3);
verify(callableStatement).setObject(1, 1106, Types.INTEGER);
verify(callableStatement).setObject(2, 3, Types.INTEGER);
verify(callableStatement).registerOutParameter(3, Types.INTEGER);
}
@Test
void testAddInvoicesUsingObjectArray() throws Exception {
given(callableStatement.execute()).willReturn(false);
given(callableStatement.getUpdateCount()).willReturn(-1);
given(callableStatement.getObject(3)).willReturn(5);
given(connection.prepareCall("{call " + AddInvoice.SQL + "(?, ?, ?)}")
).willReturn(callableStatement);
testAddInvoiceUsingObjectArray(1106, 4);
verify(callableStatement).setObject(1, 1106, Types.INTEGER);
verify(callableStatement).setObject(2, 4, Types.INTEGER);
verify(callableStatement).registerOutParameter(3, Types.INTEGER);
}
@Test
void testAddInvoicesWithinTransaction() throws Exception {
given(callableStatement.execute()).willReturn(false);
given(callableStatement.getUpdateCount()).willReturn(-1);
given(callableStatement.getObject(3)).willReturn(4);
given(connection.prepareCall("{call " + AddInvoice.SQL + "(?, ?, ?)}")).willReturn(callableStatement);
TransactionSynchronizationManager.bindResource(dataSource, new ConnectionHolder(connection));
try {
testAddInvoice(1106, 3);
verify(callableStatement).setObject(1, 1106, Types.INTEGER);
verify(callableStatement).setObject(2, 3, Types.INTEGER);
verify(callableStatement).registerOutParameter(3, Types.INTEGER);
verify(connection, never()).close();
}
finally {
TransactionSynchronizationManager.unbindResource(dataSource);
connection.close();
}
}
/**
* Confirm no connection was used to get metadata. Does not use superclass replay
* mechanism.
*/
@Test
void testStoredProcedureConfiguredViaJdbcTemplateWithCustomExceptionTranslator()
throws Exception {
given(callableStatement.execute()).willReturn(false);
given(callableStatement.getUpdateCount()).willReturn(-1);
given(callableStatement.getObject(2)).willReturn(5);
given(connection.prepareCall("{call " + StoredProcedureConfiguredViaJdbcTemplate.SQL + "(?, ?)}")).willReturn(callableStatement);
| StoredProcedureTests |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexOperatorTests.java | {
"start": 5027,
"end": 24247
} | class ____ extends AsyncOperatorTestCase {
private static final int LOOKUP_SIZE = 1000;
private static final int LESS_THAN_VALUE = 40;
private final ThreadPool threadPool = threadPool();
private final Directory lookupIndexDirectory = newDirectory();
private final List<Releasable> releasables = new ArrayList<>();
private final boolean applyRightFilterAsJoinOnFilter;
private int numberOfJoinColumns; // we only allow 1 or 2 columns due to simpleInput() implementation
private EsqlBinaryComparison.BinaryComparisonOperation operation;
@ParametersFactory
public static Iterable<Object[]> parametersFactory() {
List<Object[]> operations = new ArrayList<>();
operations.add(new Object[] { null });
if (EsqlCapabilities.Cap.LOOKUP_JOIN_ON_BOOLEAN_EXPRESSION.isEnabled()) {
for (EsqlBinaryComparison.BinaryComparisonOperation operation : EsqlBinaryComparison.BinaryComparisonOperation.values()) {
// we skip NEQ because there are too many matches and the test can timeout
if (operation != EsqlBinaryComparison.BinaryComparisonOperation.NEQ) {
operations.add(new Object[] { operation });
}
}
}
return operations;
}
public LookupFromIndexOperatorTests(EsqlBinaryComparison.BinaryComparisonOperation operation) {
super();
this.operation = operation;
this.applyRightFilterAsJoinOnFilter = randomBoolean();
}
@Before
public void buildLookupIndex() throws IOException {
numberOfJoinColumns = 1 + randomInt(1); // 1 or 2 join columns
try (RandomIndexWriter writer = new RandomIndexWriter(random(), lookupIndexDirectory)) {
String suffix = (operation == null) ? "" : ("_" + "right");
for (int i = 0; i < LOOKUP_SIZE; i++) {
List<IndexableField> fields = new ArrayList<>();
fields.add(new LongField("match0" + suffix, i, Field.Store.NO));
if (numberOfJoinColumns == 2) {
fields.add(new LongField("match1" + suffix, i + 1, Field.Store.NO));
}
fields.add(new KeywordFieldMapper.KeywordField("lkwd", new BytesRef("l" + i), KeywordFieldMapper.Defaults.FIELD_TYPE));
fields.add(new IntField("lint", i, Field.Store.NO));
writer.addDocument(fields);
}
}
}
@Override
protected SourceOperator simpleInput(BlockFactory blockFactory, int size) {
if (numberOfJoinColumns == 1) {
return new SequenceLongBlockSourceOperator(blockFactory, LongStream.range(0, size).map(l -> l % LOOKUP_SIZE));
} else if (numberOfJoinColumns == 2) {
return new TupleLongLongBlockSourceOperator(
blockFactory,
LongStream.range(0, size).mapToObj(l -> Tuple.tuple(l % LOOKUP_SIZE, l % LOOKUP_SIZE + 1))
);
} else {
throw new IllegalStateException("numberOfJoinColumns must be 1 or 2, got: " + numberOfJoinColumns);
}
}
@Override
protected void assertSimpleOutput(List<Page> input, List<Page> results) {
/*
* We've configured there to be just a single result per input so the total
* row count is the same. But lookup cuts into pages of length 256 so the
* result is going to have more pages usually.
*/
int inputCount = input.stream().mapToInt(Page::getPositionCount).sum();
int outputCount = results.stream().mapToInt(Page::getPositionCount).sum();
if (operation == null || operation.equals(EsqlBinaryComparison.BinaryComparisonOperation.EQ)) {
assertThat(outputCount, equalTo(input.stream().mapToInt(Page::getPositionCount).sum()));
} else {
// For lookup join on non-equality, output count should be >= input count
assertThat("Output count should be >= input count for left outer join", outputCount, greaterThanOrEqualTo(inputCount));
}
for (Page r : results) {
assertThat(r.getBlockCount(), equalTo(numberOfJoinColumns + 2));
LongVector match = r.<LongBlock>getBlock(0).asVector();
BytesRefBlock lkwdBlock = r.getBlock(numberOfJoinColumns);
IntBlock lintBlock = r.getBlock(numberOfJoinColumns + 1);
for (int p = 0; p < r.getPositionCount(); p++) {
long m = match.getLong(p);
if (lkwdBlock.isNull(p) || lintBlock.isNull(p)) {
// If the joined values are null, this means no match was found (or it was filtered out)
// verify that both the columns are null
assertTrue("at " + p, lkwdBlock.isNull(p));
assertTrue("at " + p, lintBlock.isNull(p));
} else {
String joinedLkwd = lkwdBlock.getBytesRef(lkwdBlock.getFirstValueIndex(p), new BytesRef()).utf8ToString();
// there was a match, verify that the join on condition was satisfied
int joinedLint = lintBlock.getInt(lintBlock.getFirstValueIndex(p));
boolean conditionSatisfied = compare(m, joinedLint, operation);
assertTrue("Join condition not satisfied: " + m + " " + operation + " " + joinedLint, conditionSatisfied);
// Verify that the joined lkwd matches the lint value
assertThat(joinedLkwd, equalTo("l" + joinedLint));
}
}
}
}
/**
* Compares two values using the specified binary comparison operation.
*
* @param left the left operand
* @param right the right operand
* @param op the binary comparison operation (null means equality join)
* @return true if the comparison condition is satisfied, false otherwise
*/
private boolean compare(long left, long right, EsqlBinaryComparison.BinaryComparisonOperation op) {
if (op == null) {
// field based join is the same as equals comparison
op = EsqlBinaryComparison.BinaryComparisonOperation.EQ;
}
// Use the operator's fold method for comparison
Literal leftLiteral = new Literal(Source.EMPTY, left, DataType.LONG);
Literal rightLiteral = new Literal(Source.EMPTY, right, DataType.LONG);
EsqlBinaryComparison operatorInstance = op.buildNewInstance(Source.EMPTY, leftLiteral, rightLiteral);
Object result = operatorInstance.fold(FoldContext.small());
if (result instanceof Boolean) {
return (Boolean) result;
}
throw new IllegalArgumentException("Operator fold did not return a boolean");
}
@Override
protected Operator.OperatorFactory simple(SimpleOptions options) {
String sessionId = "test";
CancellableTask parentTask = new CancellableTask(0, "test", "test", "test", TaskId.EMPTY_TASK_ID, Map.of());
int maxOutstandingRequests = 1;
DataType inputDataType = DataType.LONG;
String lookupIndex = "idx";
List<NamedExpression> loadFields = List.of(
new ReferenceAttribute(Source.EMPTY, "lkwd", DataType.KEYWORD),
new ReferenceAttribute(Source.EMPTY, "lint", DataType.INTEGER)
);
List<MatchConfig> matchFields = new ArrayList<>();
String suffix = (operation == null) ? "" : ("_left");
for (int i = 0; i < numberOfJoinColumns; i++) {
String matchField = "match" + i + suffix;
matchFields.add(new MatchConfig(matchField, i, inputDataType));
}
Expression joinOnExpression = null;
FragmentExec rightPlanWithOptionalPreJoinFilter = buildLessThanFilter(LESS_THAN_VALUE);
if (operation != null) {
List<Expression> conditions = new ArrayList<>();
for (int i = 0; i < numberOfJoinColumns; i++) {
String matchFieldLeft = "match" + i + "_left";
String matchFieldRight = "match" + i + "_right";
FieldAttribute left = new FieldAttribute(
Source.EMPTY,
matchFieldLeft,
new EsField(matchFieldLeft, inputDataType, Map.of(), true, EsField.TimeSeriesFieldType.NONE)
);
FieldAttribute right = new FieldAttribute(
Source.EMPTY,
matchFieldRight,
new EsField(matchFieldRight.replace("left", "right"), inputDataType, Map.of(), true, EsField.TimeSeriesFieldType.NONE)
);
conditions.add(operation.buildNewInstance(Source.EMPTY, left, right));
}
if (applyRightFilterAsJoinOnFilter) {
if (rightPlanWithOptionalPreJoinFilter instanceof FragmentExec fragmentExec
&& fragmentExec.fragment() instanceof Filter filterPlan) {
conditions.add(filterPlan.condition());
rightPlanWithOptionalPreJoinFilter = null;
}
}
joinOnExpression = Predicates.combineAnd(conditions);
}
return new LookupFromIndexOperator.Factory(
matchFields,
sessionId,
parentTask,
maxOutstandingRequests,
this::lookupService,
lookupIndex,
lookupIndex,
loadFields,
Source.EMPTY,
rightPlanWithOptionalPreJoinFilter,
joinOnExpression
);
}
private FragmentExec buildLessThanFilter(int value) {
FieldAttribute filterAttribute = new FieldAttribute(
Source.EMPTY,
"lint",
new EsField("lint", DataType.INTEGER, Collections.emptyMap(), true, EsField.TimeSeriesFieldType.NONE)
);
Expression lessThan = new LessThan(Source.EMPTY, filterAttribute, new Literal(Source.EMPTY, value, DataType.INTEGER));
EsRelation esRelation = new EsRelation(Source.EMPTY, "test", IndexMode.LOOKUP, Map.of(), Map.of(), Map.of(), List.of());
Filter filter = new Filter(Source.EMPTY, esRelation, lessThan);
return new FragmentExec(filter);
}
@Override
protected Matcher<String> expectedDescriptionOfSimple() {
return expectedToStringOfSimple();
}
@Override
public void testSimpleDescription() {
Operator.OperatorFactory factory = simple();
String description = factory.describe();
assertThat(description, expectedDescriptionOfSimple());
try (Operator op = factory.get(driverContext())) {
// we use a special pattern here because the description can contain new lines for the right_pre_join_plan
String pattern = "^\\w*\\[[\\s\\S]*\\]$";
assertThat(description, matchesPattern(pattern));
}
}
@Override
protected Matcher<String> expectedToStringOfSimple() {
StringBuilder sb = new StringBuilder();
String suffix = (operation == null) ? "" : ("_left");
sb.append("LookupOperator\\[index=idx load_fields=\\[lkwd\\{r}#\\d+, lint\\{r}#\\d+] ");
for (int i = 0; i < numberOfJoinColumns; i++) {
// match_field=match<i>_left (index first, then suffix)
sb.append("input_type=LONG match_field=match").append(i).append(suffix).append(" inputChannel=").append(i).append(" ");
}
if (applyRightFilterAsJoinOnFilter && operation != null) {
// When applyRightFilterAsJoinOnFilter is true, right_pre_join_plan should be null
sb.append("right_pre_join_plan=null");
} else {
// Accept either the legacy physical plan rendering (FilterExec/EsQueryExec) or the new FragmentExec rendering
sb.append("right_pre_join_plan=(?:");
// Legacy pattern
sb.append("FilterExec\\[lint\\{f}#\\d+ < ")
.append(LESS_THAN_VALUE)
.append(
"\\[INTEGER]]\\n\\\\_EsQueryExec\\[test], indexMode\\[lookup],\\s*(?:query\\[\\]|\\[\\])?,?\\s*"
+ "limit\\[\\],?\\s*sort\\[(?:\\[\\])?\\]\\s*estimatedRowSize\\[null\\]\\s*queryBuilderAndTags \\[(?:\\[\\]\\])\\]"
);
sb.append("|");
// New FragmentExec pattern - match the actual output format
sb.append("FragmentExec\\[filter=null, estimatedRowSize=\\d+, reducer=\\[\\], fragment=\\[<>\\n")
.append("Filter\\[lint\\{f}#\\d+ < ")
.append(LESS_THAN_VALUE)
.append("\\[INTEGER]]\\n")
.append("\\\\_EsRelation\\[test]\\[LOOKUP]\\[\\]<>\\]\\]");
sb.append(")");
}
// Accept join_on_expression=null or a valid join predicate
if (applyRightFilterAsJoinOnFilter && operation != null) {
// When applyRightFilterAsJoinOnFilter is true and operation is not null, the join expression includes the filter condition
sb.append(
" join_on_expression=(match\\d+left [=!<>]+ match\\d+right( "
+ "AND match\\d+left [=!<>]+ match\\d+right)* AND lint\\{f}#\\d+ < "
).append(LESS_THAN_VALUE).append("\\[INTEGER]|)\\]");
} else {
// Standard pattern for other cases
sb.append(" join_on_expression=(null|match\\d+left [=!<>]+ match\\d+right( AND match\\d+left [=!<>]+ match\\d+right)*|)\\]");
}
return matchesPattern(sb.toString());
}
private LookupFromIndexService lookupService(DriverContext mainContext) {
boolean beCranky = mainContext.bigArrays().breakerService() instanceof CrankyCircuitBreakerService;
DiscoveryNode localNode = DiscoveryNodeUtils.create("node", "node");
var builtInClusterSettings = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
builtInClusterSettings.addAll(EsqlFlags.ALL_ESQL_FLAGS_SETTINGS);
ClusterService clusterService = ClusterServiceUtils.createClusterService(
threadPool,
localNode,
Settings.builder()
// Reserve 0 bytes in the sub-driver so we are more likely to hit the cranky breaker in it.
.put(BlockFactory.LOCAL_BREAKER_OVER_RESERVED_SIZE_SETTING, ByteSizeValue.ofKb(0))
.put(BlockFactory.LOCAL_BREAKER_OVER_RESERVED_MAX_SIZE_SETTING, ByteSizeValue.ofKb(0))
.build(),
new ClusterSettings(Settings.EMPTY, builtInClusterSettings)
);
IndicesService indicesService = mock(IndicesService.class);
IndexNameExpressionResolver indexNameExpressionResolver = TestIndexNameExpressionResolver.newInstance();
releasables.add(clusterService::stop);
final var projectId = randomProjectIdOrDefault();
ClusterServiceUtils.setState(clusterService, ClusterStateCreationUtils.state(projectId, "idx", 1, 1));
if (beCranky) {
logger.info("building a cranky lookup");
}
DriverContext ctx = beCranky ? crankyDriverContext() : driverContext();
BigArrays bigArrays = ctx.bigArrays();
BlockFactory blockFactory = ctx.blockFactory();
return new LookupFromIndexService(
clusterService,
indicesService,
lookupShardContextFactory(),
transportService(clusterService),
indexNameExpressionResolver,
bigArrays,
blockFactory,
TestProjectResolvers.singleProject(projectId)
);
}
private ThreadPool threadPool() {
return new TestThreadPool(
getTestClass().getSimpleName(),
new FixedExecutorBuilder(
Settings.EMPTY,
EsqlPlugin.ESQL_WORKER_THREAD_POOL_NAME,
1,
1024,
"esql",
EsExecutors.TaskTrackingConfig.DEFAULT
)
);
}
private TransportService transportService(ClusterService clusterService) {
MockTransport mockTransport = new MockTransport();
releasables.add(mockTransport);
TransportService transportService = mockTransport.createTransportService(
Settings.EMPTY,
threadPool,
TransportService.NOOP_TRANSPORT_INTERCEPTOR,
boundAddress -> clusterService.localNode(),
clusterService.getClusterSettings(),
Set.of()
);
releasables.add(transportService);
transportService.start();
transportService.acceptIncomingRequests();
return transportService;
}
private AbstractLookupService.LookupShardContextFactory lookupShardContextFactory() {
return shardId -> {
MapperServiceTestCase mapperHelper = new MapperServiceTestCase() {
};
String suffix = (operation == null) ? "" : ("_right");
StringBuilder props = new StringBuilder();
props.append(String.format(Locale.ROOT, "\"match0%s\": { \"type\": \"long\" }", suffix));
if (numberOfJoinColumns == 2) {
props.append(String.format(Locale.ROOT, ", \"match1%s\": { \"type\": \"long\" }", suffix));
}
props.append(", \"lkwd\": { \"type\": \"keyword\" }, \"lint\": { \"type\": \"integer\" }");
String mapping = String.format(Locale.ROOT, "{\n \"doc\": { \"properties\": { %s } }\n}", props.toString());
MapperService mapperService = mapperHelper.createMapperService(mapping);
DirectoryReader reader = DirectoryReader.open(lookupIndexDirectory);
SearchExecutionContext executionCtx = mapperHelper.createSearchExecutionContext(mapperService, newSearcher(reader));
var ctx = new EsPhysicalOperationProviders.DefaultShardContext(0, new NoOpReleasable(), executionCtx, AliasFilter.EMPTY);
return new AbstractLookupService.LookupShardContext(ctx, executionCtx, () -> {
try {
IOUtils.close(reader, mapperService);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
});
};
}
@After
public void closeIndex() throws IOException {
IOUtils.close(lookupIndexDirectory);
}
@After
public void release() {
Releasables.close(Releasables.wrap(releasables.reversed()), () -> terminate(threadPool));
}
@Override
protected MapMatcher extendStatusMatcher(MapMatcher mapMatcher, List<Page> input, List<Page> output) {
var totalInputRows = input.stream().mapToInt(Page::getPositionCount).sum();
var totalOutputRows = output.stream().mapToInt(Page::getPositionCount).sum();
return mapMatcher.entry("total_rows", totalInputRows).entry("pages_emitted", output.size()).entry("rows_emitted", totalOutputRows);
}
@Override
public void testSimpleCircuitBreaking() {
// only test field based join and EQ to prevents timeouts in Ci
if (operation == null || operation.equals(EsqlBinaryComparison.BinaryComparisonOperation.EQ)) {
super.testSimpleCircuitBreaking();
}
}
}
| LookupFromIndexOperatorTests |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/processor/SpringSetBodyTest.java | {
"start": 1117,
"end": 1669
} | class ____ extends ContextTestSupport {
@Test
public void testSendAMessageWhosInBodyIsTransformed() throws Exception {
MockEndpoint resultEndpoint = getMockEndpoint("mock:end");
resultEndpoint.expectedBodiesReceived("Hello World!");
sendBody("direct:start", "Hello");
resultEndpoint.assertIsSatisfied();
}
@Override
protected CamelContext createCamelContext() throws Exception {
return createSpringCamelContext(this, "org/apache/camel/spring/processor/setBody.xml");
}
}
| SpringSetBodyTest |
java | quarkusio__quarkus | core/builder/src/main/java/io/quarkus/builder/BuildStepBuilder.java | {
"start": 514,
"end": 11043
} | class ____ {
private static final StackTraceElement[] EMPTY_STACK_TRACE = new StackTraceElement[0];
private final BuildChainBuilder buildChainBuilder;
private final Map<ItemId, Consume> consumes = new HashMap<>();
private final Map<ItemId, Produce> produces = new HashMap<>();
private BuildStep buildStep;
BuildStepBuilder(final BuildChainBuilder buildChainBuilder) {
this.buildChainBuilder = buildChainBuilder;
}
/**
* Set the build step for this builder. If no build step is specified, then this step will be excluded from
* the final chain.
*
* @param buildStep the build step
* @return this builder
*/
public BuildStepBuilder setBuildStep(final BuildStep buildStep) {
this.buildStep = buildStep;
return this;
}
/**
* This build step should complete before any build steps which consume the given item {@code type} are initiated.
* If no such build steps exist, no ordering constraint is enacted.
*
* @param type the item type (must not be {@code null})
* @return this builder
*/
public BuildStepBuilder beforeConsume(Class<? extends BuildItem> type) {
Assert.checkNotNullParam("type", type);
addProduces(new ItemId(type), Constraint.ORDER_ONLY, ProduceFlags.NONE);
return this;
}
/**
* This build step should complete before any build steps which consume the given item {@code type} are initiated.
* If no such build steps exist, no ordering constraint is enacted.
*
* @param type the item type (must not be {@code null})
* @param flag the producer flag to apply (must not be {@code null})
* @return this builder
*/
public BuildStepBuilder beforeConsume(Class<? extends BuildItem> type, ProduceFlag flag) {
Assert.checkNotNullParam("type", type);
Assert.checkNotNullParam("flag", flag);
addProduces(new ItemId(type), Constraint.ORDER_ONLY, ProduceFlags.of(flag));
return this;
}
/**
* This build step should be initiated after any build steps which produce the given item {@code type} are completed.
* If no such build steps exist, no ordering constraint is enacted.
*
* @param type the item type (must not be {@code null})
* @return this builder
*/
public BuildStepBuilder afterProduce(Class<? extends BuildItem> type) {
Assert.checkNotNullParam("type", type);
addConsumes(new ItemId(type), Constraint.ORDER_ONLY, ConsumeFlags.of(ConsumeFlag.OPTIONAL));
return this;
}
/**
* Similarly to {@link #beforeConsume(Class)}, establish that this build step must come before the consumer(s) of the
* given item {@code type}; however, only one {@code producer} may exist for the given item. In addition, the
* build step may produce an actual value for this item, which will be shared to all consumers during deployment.
*
* @param type the item type (must not be {@code null})
* @return this builder
*/
public BuildStepBuilder produces(Class<? extends BuildItem> type) {
Assert.checkNotNullParam("type", type);
checkType(type);
addProduces(new ItemId(type), Constraint.REAL, ProduceFlags.NONE);
return this;
}
/**
* Similarly to {@link #beforeConsume(Class)}, establish that this build step must come before the consumer(s) of the
* given item {@code type}; however, only one {@code producer} may exist for the given item. In addition, the
* build step may produce an actual value for this item, which will be shared to all consumers during deployment.
*
* @param type the item type (must not be {@code null})
* @param flag the producer flag to apply (must not be {@code null})
* @return this builder
*/
public BuildStepBuilder produces(Class<? extends BuildItem> type, ProduceFlag flag) {
Assert.checkNotNullParam("type", type);
Assert.checkNotNullParam("flag", flag);
checkType(type);
addProduces(new ItemId(type), Constraint.REAL, ProduceFlags.of(flag));
return this;
}
/**
* Similarly to {@link #beforeConsume(Class)}, establish that this build step must come before the consumer(s) of the
* given item {@code type}; however, only one {@code producer} may exist for the given item. In addition, the
* build step may produce an actual value for this item, which will be shared to all consumers during deployment.
*
* @param type the item type (must not be {@code null})
* @param flag1 the first producer flag to apply (must not be {@code null})
* @param flag2 the second producer flag to apply (must not be {@code null})
* @return this builder
*/
public BuildStepBuilder produces(Class<? extends BuildItem> type, ProduceFlag flag1, ProduceFlag flag2) {
Assert.checkNotNullParam("type", type);
Assert.checkNotNullParam("flag", flag1);
checkType(type);
addProduces(new ItemId(type), Constraint.REAL, ProduceFlags.of(flag1).with(flag2));
return this;
}
/**
* Similarly to {@link #beforeConsume(Class)}, establish that this build step must come before the consumer(s) of the
* given item {@code type}; however, only one {@code producer} may exist for the given item. In addition, the
* build step may produce an actual value for this item, which will be shared to all consumers during deployment.
*
* @param type the item type (must not be {@code null})
* @param flags the producer flag to apply (must not be {@code null})
* @return this builder
*/
public BuildStepBuilder produces(Class<? extends BuildItem> type, ProduceFlags flags) {
Assert.checkNotNullParam("type", type);
Assert.checkNotNullParam("flag", flags);
checkType(type);
addProduces(new ItemId(type), Constraint.REAL, flags);
return this;
}
/**
* This build step consumes the given produced item. The item must be produced somewhere in the chain. If
* no such producer exists, the chain will not be constructed; instead, an error will be raised.
*
* @param type the item type (must not be {@code null})
* @return this builder
*/
public BuildStepBuilder consumes(Class<? extends BuildItem> type) {
Assert.checkNotNullParam("type", type);
checkType(type);
addConsumes(new ItemId(type), Constraint.REAL, ConsumeFlags.NONE);
return this;
}
/**
* This build step consumes the given produced item. The item must be produced somewhere in the chain. If
* no such producer exists, the chain will not be constructed; instead, an error will be raised.
*
* @param type the item type (must not be {@code null})
* @param flags a set of flags which modify the consume operation (must not be {@code null})
* @return this builder
*/
public BuildStepBuilder consumes(Class<? extends BuildItem> type, ConsumeFlags flags) {
Assert.checkNotNullParam("type", type);
checkType(type);
addConsumes(new ItemId(type), Constraint.REAL, flags);
return this;
}
/**
* Build this step into the chain.
*
* @return the chain builder that this step was added to
*/
public BuildChainBuilder build() {
final BuildChainBuilder chainBuilder = this.buildChainBuilder;
if (produces.isEmpty()) {
throw new IllegalArgumentException(
"Build step '" + buildStep.getId()
+ "' does not produce any build item and thus will never get executed."
+ " Either change the return type of the method to a build item type,"
+ " add a parameter of type BuildProducer<[some build item type]>/Consumer<[some build item type]>,"
+ " or annotate the method with @Produces."
+ " Use @Produce(ArtifactResultBuildItem.class) if you want to always execute this step.");
}
if (BuildChainBuilder.LOG_CONFLICT_CAUSING) {
chainBuilder.addStep(this, new Exception().getStackTrace());
} else {
chainBuilder.addStep(this, EMPTY_STACK_TRACE);
}
return chainBuilder;
}
/**
* Build this step into the chain if the supplier returns {@code true}.
*
* @param supp the {@code boolean} supplier (must not be {@code null})
* @return the chain builder that this step was added to, or {@code null} if it was not added
*/
public BuildChainBuilder buildIf(BooleanSupplier supp) {
return supp.getAsBoolean() ? build() : null;
}
// -- //
BuildStep getBuildStep() {
return buildStep;
}
private void addConsumes(final ItemId itemId, final Constraint constraint, final ConsumeFlags flags) {
Assert.checkNotNullParam("flags", flags);
consumes.compute(itemId,
(id, c) -> c == null ? new Consume(this, itemId, constraint, flags) : c.combine(constraint, flags));
}
private void addProduces(final ItemId itemId, final Constraint constraint, final ProduceFlags flags) {
produces.compute(itemId,
(id, p) -> p == null ? new Produce(this, itemId, constraint, flags) : p.combine(constraint, flags));
}
Map<ItemId, Consume> getConsumes() {
return consumes;
}
Map<ItemId, Produce> getProduces() {
return produces;
}
Set<ItemId> getRealConsumes() {
final HashMap<ItemId, Consume> map = new HashMap<>(consumes);
map.entrySet().removeIf(e -> e.getValue().constraint() == Constraint.ORDER_ONLY);
return map.keySet();
}
Set<ItemId> getRealProduces() {
final HashMap<ItemId, Produce> map = new HashMap<>(produces);
map.entrySet().removeIf(e -> e.getValue().getConstraint() == Constraint.ORDER_ONLY);
return map.keySet();
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("BuildStep [");
builder.append(buildStep);
builder.append("]");
return builder.toString();
}
private void checkType(Class<?> type) {
int modifiers = type.getModifiers();
if (Modifier.isInterface(modifiers) || Modifier.isAbstract(modifiers)) {
throw new IllegalArgumentException("Cannot consume/produce | BuildStepBuilder |
java | apache__camel | components/camel-twilio/src/generated/java/org/apache/camel/component/twilio/UsageRecordDailyEndpointConfigurationConfigurer.java | {
"start": 739,
"end": 3412
} | class ____ extends org.apache.camel.support.component.PropertyConfigurerSupport implements GeneratedPropertyConfigurer, ExtendedPropertyConfigurerGetter {
private static final Map<String, Object> ALL_OPTIONS;
static {
Map<String, Object> map = new CaseInsensitiveMap();
map.put("ApiName", org.apache.camel.component.twilio.internal.TwilioApiName.class);
map.put("MethodName", java.lang.String.class);
map.put("PathAccountSid", java.lang.String.class);
ALL_OPTIONS = map;
}
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
org.apache.camel.component.twilio.UsageRecordDailyEndpointConfiguration target = (org.apache.camel.component.twilio.UsageRecordDailyEndpointConfiguration) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiname":
case "apiName": target.setApiName(property(camelContext, org.apache.camel.component.twilio.internal.TwilioApiName.class, value)); return true;
case "methodname":
case "methodName": target.setMethodName(property(camelContext, java.lang.String.class, value)); return true;
case "pathaccountsid":
case "pathAccountSid": target.setPathAccountSid(property(camelContext, java.lang.String.class, value)); return true;
default: return false;
}
}
@Override
public Map<String, Object> getAllOptions(Object target) {
return ALL_OPTIONS;
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiname":
case "apiName": return org.apache.camel.component.twilio.internal.TwilioApiName.class;
case "methodname":
case "methodName": return java.lang.String.class;
case "pathaccountsid":
case "pathAccountSid": return java.lang.String.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
org.apache.camel.component.twilio.UsageRecordDailyEndpointConfiguration target = (org.apache.camel.component.twilio.UsageRecordDailyEndpointConfiguration) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiname":
case "apiName": return target.getApiName();
case "methodname":
case "methodName": return target.getMethodName();
case "pathaccountsid":
case "pathAccountSid": return target.getPathAccountSid();
default: return null;
}
}
}
| UsageRecordDailyEndpointConfigurationConfigurer |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/physical/stream/FlinkDuplicateChangesTraitInitProgram.java | {
"start": 1518,
"end": 2243
} | class ____
implements FlinkOptimizeProgram<StreamOptimizeContext> {
@Override
public RelNode optimize(RelNode root, StreamOptimizeContext context) {
DuplicateChangesTrait trait;
if (isSink(root)) {
trait = DuplicateChangesTrait.NONE;
} else if (context.isAllowDuplicateChanges()) {
trait = DuplicateChangesTrait.ALLOW;
} else {
trait = DuplicateChangesTrait.DISALLOW;
}
return root.copy(root.getTraitSet().plus(trait), root.getInputs());
}
private boolean isSink(RelNode root) {
return root instanceof StreamPhysicalSink || root instanceof StreamPhysicalLegacySink;
}
}
| FlinkDuplicateChangesTraitInitProgram |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/NatsEndpointBuilderFactory.java | {
"start": 23078,
"end": 37042
} | interface ____
extends
EndpointConsumerBuilder {
default NatsEndpointConsumerBuilder basic() {
return (NatsEndpointConsumerBuilder) this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedNatsEndpointConsumerBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedNatsEndpointConsumerBuilder bridgeErrorHandler(String bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedNatsEndpointConsumerBuilder exceptionHandler(org.apache.camel.spi.ExceptionHandler exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedNatsEndpointConsumerBuilder exceptionHandler(String exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option is a: <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedNatsEndpointConsumerBuilder exchangePattern(org.apache.camel.ExchangePattern exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option will be converted to a
* <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedNatsEndpointConsumerBuilder exchangePattern(String exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Reference an already instantiated connection to Nats server.
*
* The option is a: <code>io.nats.client.Connection</code> type.
*
* Group: advanced
*
* @param connection the value to set
* @return the dsl builder
*/
default AdvancedNatsEndpointConsumerBuilder connection(io.nats.client.Connection connection) {
doSetProperty("connection", connection);
return this;
}
/**
* Reference an already instantiated connection to Nats server.
*
* The option will be converted to a
* <code>io.nats.client.Connection</code> type.
*
* Group: advanced
*
* @param connection the value to set
* @return the dsl builder
*/
default AdvancedNatsEndpointConsumerBuilder connection(String connection) {
doSetProperty("connection", connection);
return this;
}
/**
* Sets a custom ConsumerConfiguration object for the JetStream
* consumer. This is an advanced option typically used when you need to
* configure properties not exposed as simple Camel URI parameters. When
* set, this object will be used to build the final consumer
* subscription options.
*
* The option is a:
* <code>io.nats.client.api.ConsumerConfiguration</code> type.
*
* Group: advanced
*
* @param consumerConfiguration the value to set
* @return the dsl builder
*/
default AdvancedNatsEndpointConsumerBuilder consumerConfiguration(io.nats.client.api.ConsumerConfiguration consumerConfiguration) {
doSetProperty("consumerConfiguration", consumerConfiguration);
return this;
}
/**
* Sets a custom ConsumerConfiguration object for the JetStream
* consumer. This is an advanced option typically used when you need to
* configure properties not exposed as simple Camel URI parameters. When
* set, this object will be used to build the final consumer
* subscription options.
*
* The option will be converted to a
* <code>io.nats.client.api.ConsumerConfiguration</code> type.
*
* Group: advanced
*
* @param consumerConfiguration the value to set
* @return the dsl builder
*/
default AdvancedNatsEndpointConsumerBuilder consumerConfiguration(String consumerConfiguration) {
doSetProperty("consumerConfiguration", consumerConfiguration);
return this;
}
/**
* Sets the name to assign to the JetStream durable consumer. Setting
* this value makes the consumer durable. The value is used to set the
* durable() field in the underlying NATS ConsumerConfiguration.Builder.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: advanced
*
* @param durableName the value to set
* @return the dsl builder
*/
default AdvancedNatsEndpointConsumerBuilder durableName(String durableName) {
doSetProperty("durableName", durableName);
return this;
}
/**
* To use a custom header filter strategy.
*
* The option is a:
* <code>org.apache.camel.spi.HeaderFilterStrategy</code> type.
*
* Group: advanced
*
* @param headerFilterStrategy the value to set
* @return the dsl builder
*/
default AdvancedNatsEndpointConsumerBuilder headerFilterStrategy(org.apache.camel.spi.HeaderFilterStrategy headerFilterStrategy) {
doSetProperty("headerFilterStrategy", headerFilterStrategy);
return this;
}
/**
* To use a custom header filter strategy.
*
* The option will be converted to a
* <code>org.apache.camel.spi.HeaderFilterStrategy</code> type.
*
* Group: advanced
*
* @param headerFilterStrategy the value to set
* @return the dsl builder
*/
default AdvancedNatsEndpointConsumerBuilder headerFilterStrategy(String headerFilterStrategy) {
doSetProperty("headerFilterStrategy", headerFilterStrategy);
return this;
}
/**
* Sets whether to operate JetStream requests asynchronously.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param jetstreamAsync the value to set
* @return the dsl builder
*/
default AdvancedNatsEndpointConsumerBuilder jetstreamAsync(boolean jetstreamAsync) {
doSetProperty("jetstreamAsync", jetstreamAsync);
return this;
}
/**
* Sets whether to operate JetStream requests asynchronously.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param jetstreamAsync the value to set
* @return the dsl builder
*/
default AdvancedNatsEndpointConsumerBuilder jetstreamAsync(String jetstreamAsync) {
doSetProperty("jetstreamAsync", jetstreamAsync);
return this;
}
/**
* Sets the consumer subscription type for JetStream. Set to true to use
* a Pull Subscription (consumer explicitly requests messages). Set to
* false to use a Push Subscription (messages are automatically
* delivered).
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param pullSubscription the value to set
* @return the dsl builder
*/
default AdvancedNatsEndpointConsumerBuilder pullSubscription(boolean pullSubscription) {
doSetProperty("pullSubscription", pullSubscription);
return this;
}
/**
* Sets the consumer subscription type for JetStream. Set to true to use
* a Pull Subscription (consumer explicitly requests messages). Set to
* false to use a Push Subscription (messages are automatically
* delivered).
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param pullSubscription the value to set
* @return the dsl builder
*/
default AdvancedNatsEndpointConsumerBuilder pullSubscription(String pullSubscription) {
doSetProperty("pullSubscription", pullSubscription);
return this;
}
/**
* Whether or not connection trace messages should be printed to
* standard out for fine grained debugging of connection issues.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param traceConnection the value to set
* @return the dsl builder
*/
default AdvancedNatsEndpointConsumerBuilder traceConnection(boolean traceConnection) {
doSetProperty("traceConnection", traceConnection);
return this;
}
/**
* Whether or not connection trace messages should be printed to
* standard out for fine grained debugging of connection issues.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param traceConnection the value to set
* @return the dsl builder
*/
default AdvancedNatsEndpointConsumerBuilder traceConnection(String traceConnection) {
doSetProperty("traceConnection", traceConnection);
return this;
}
}
/**
* Builder for endpoint producers for the Nats component.
*/
public | AdvancedNatsEndpointConsumerBuilder |
java | apache__spark | core/src/test/java/org/apache/spark/memory/TestPartialSpillingMemoryConsumer.java | {
"start": 1024,
"end": 1666
} | class ____ extends TestMemoryConsumer {
private long spilledBytes = 0L;
public TestPartialSpillingMemoryConsumer(TaskMemoryManager memoryManager, MemoryMode mode) {
super(memoryManager, mode);
}
public TestPartialSpillingMemoryConsumer(TaskMemoryManager memoryManager) {
super(memoryManager);
}
@Override
public long spill(long size, MemoryConsumer trigger) throws IOException {
long used = getUsed();
long released = Math.min(used, size);
free(released);
spilledBytes += released;
return released;
}
public long getSpilledBytes() {
return spilledBytes;
}
}
| TestPartialSpillingMemoryConsumer |
java | lettuce-io__lettuce-core | src/test/java/io/lettuce/core/TestRedisPublisher.java | {
"start": 251,
"end": 790
} | class ____<K, V, T> extends RedisPublisher<K, V, T> {
public TestRedisPublisher(RedisCommand<K, V, T> staticCommand, StatefulConnection<K, V> connection, boolean dissolve) {
super(staticCommand, connection, dissolve, ImmediateEventExecutor.INSTANCE);
}
public TestRedisPublisher(Supplier<RedisCommand<K, V, T>> redisCommandSupplier, StatefulConnection<K, V> connection,
boolean dissolve) {
super(redisCommandSupplier, connection, dissolve, ImmediateEventExecutor.INSTANCE);
}
}
| TestRedisPublisher |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/onetoone/flush/DirtyFlushTest.java | {
"start": 2216,
"end": 2366
} | class ____ {
@Id
int id;
@Version
int version;
@OneToOne // internally Hibernate will use `@ManyToOne` for this field
User user;
}
}
| Profile |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/serializer/filters/ValueFilterTest_field_long.java | {
"start": 364,
"end": 3652
} | class ____ extends TestCase {
public void test_valuefilter() throws Exception {
ValueFilter filter = new ValueFilter() {
public Object process(Object source, String name, Object value) {
if (name.equals("id")) {
return "AAA";
}
return value;
}
};
SerializeWriter out = new SerializeWriter();
JSONSerializer serializer = new JSONSerializer(out);
serializer.getValueFilters().add(filter);
Bean a = new Bean();
serializer.write(a);
String text = out.toString();
Assert.assertEquals("{\"id\":\"AAA\"}", text);
}
public void test_toJSONString() throws Exception {
ValueFilter filter = new ValueFilter() {
public Object process(Object source, String name, Object value) {
if (name.equals("id")) {
return "AAA";
}
return value;
}
};
Assert.assertEquals("{\"id\":\"AAA\"}", JSON.toJSONString(new Bean(), filter));
}
public void test_valuefilter_1() throws Exception {
ValueFilter filter = new ValueFilter() {
public Object process(Object source, String name, Object value) {
if (name.equals("name")) {
return "AAA";
}
return value;
}
};
SerializeWriter out = new SerializeWriter();
JSONSerializer serializer = new JSONSerializer(out);
serializer.getValueFilters().add(filter);
Bean a = new Bean();
serializer.write(a);
String text = out.toString();
Assert.assertEquals("{\"id\":0,\"name\":\"AAA\"}", text);
}
public void test_valuefilter_2() throws Exception {
ValueFilter filter = new ValueFilter() {
public Object process(Object source, String name, Object value) {
if (name.equals("name")) {
return "AAA";
}
return value;
}
};
SerializeWriter out = new SerializeWriter();
JSONSerializer serializer = new JSONSerializer(out);
serializer.getValueFilters().add(filter);
Map<String, Object> map = new HashMap<String, Object>();
map.put("name", null);
serializer.write(map);
String text = out.toString();
Assert.assertEquals("{\"name\":\"AAA\"}", text);
}
public void test_valuefilter_3() throws Exception {
ValueFilter filter = new ValueFilter() {
public Object process(Object source, String name, Object value) {
if (name.equals("name")) {
return null;
}
return value;
}
};
SerializeWriter out = new SerializeWriter();
JSONSerializer serializer = new JSONSerializer(out);
serializer.getValueFilters().add(filter);
Map<String, Object> map = new HashMap<String, Object>();
map.put("name", "AA");
serializer.write(map);
String text = out.toString();
Assert.assertEquals("{}", text);
}
public static | ValueFilterTest_field_long |
java | apache__camel | components/camel-box/camel-box-component/src/test/java/org/apache/camel/component/box/BoxFoldersManagerIT.java | {
"start": 1837,
"end": 13172
} | class ____ extends AbstractBoxITSupport {
private static final Logger LOG = LoggerFactory.getLogger(BoxFoldersManagerIT.class);
private static final String PATH_PREFIX = BoxApiCollection.getCollection()
.getApiName(BoxFoldersManagerApiMethod.class).getName();
private static final String CAMEL_TEST_FOLDER = "CamelTestFolder";
private static final String CAMEL_TEST_FOLDER_DESCRIPTION = "This is a description of CamelTestFolder";
private static final String CAMEL_TEST_COPY_FOLDER = BoxFoldersManagerIT.CAMEL_TEST_FOLDER + "_Copy";
private static final String CAMEL_TEST_MOVE_FOLDER = BoxFoldersManagerIT.CAMEL_TEST_FOLDER + "_Move";
private static final String CAMEL_TEST_RENAME_FOLDER = BoxFoldersManagerIT.CAMEL_TEST_FOLDER
+ "_Rename";
private static final String CAMEL_TEST_ROOT_FOLDER_ID = "0";
private static final String CAMEL_TEST_DESTINATION_FOLDER_ID = "0";
@Test
public void testCreateFolder() {
// delete folder created in test setup.
deleteTestFolder();
final Map<String, Object> headers = new HashMap<>();
// parameter type is String
headers.put("CamelBox.parentFolderId", "0");
// parameter type is String
headers.put("CamelBox.folderName", CAMEL_TEST_FOLDER);
testFolder = requestBodyAndHeaders("direct://CREATEFOLDER", null, headers);
assertNotNull(testFolder, "createFolder result");
assertEquals(CAMEL_TEST_FOLDER, testFolder.getInfo().getName(), "createFolder folder name");
LOG.debug("createFolder: {}", testFolder);
}
@Test
public void testCreateFolderByPath() {
// delete folder created in test setup.
deleteTestFolder();
final Map<String, Object> headers = new HashMap<>();
// parameter type is String
headers.put("CamelBox.parentFolderId", "0");
// parameter type is String[]
headers.put("CamelBox.path", new String[] { CAMEL_TEST_FOLDER });
testFolder = requestBodyAndHeaders("direct://CREATEFOLDER", null, headers);
assertNotNull(testFolder, "createFolder result");
assertEquals(CAMEL_TEST_FOLDER, testFolder.getInfo().getName(), "createFolder folder name");
LOG.debug("createFolder: {}", testFolder);
}
@Test
public void testDeleteFolder() {
// using String message body for single parameter "folderId"
requestBody("direct://DELETEFOLDER", testFolder.getID());
}
@Test
public void testCopyFolder() {
com.box.sdk.BoxFolder result = null;
try {
final Map<String, Object> headers = new HashMap<>();
// parameter type is String
headers.put("CamelBox.folderId", testFolder.getID());
// parameter type is String
headers.put("CamelBox.destinationFolderId", CAMEL_TEST_DESTINATION_FOLDER_ID);
// parameter type is String
headers.put("CamelBox.newName", CAMEL_TEST_COPY_FOLDER);
result = requestBodyAndHeaders("direct://COPYFOLDER", null, headers);
assertNotNull(result, "copyFolder result");
assertEquals(CAMEL_TEST_COPY_FOLDER, result.getInfo().getName(), "copyFolder folder name");
LOG.debug("copyFolder: {}", result);
} finally {
if (result != null) {
try {
result.delete(true);
} catch (Exception t) {
}
}
}
}
@Test
public void testCreateSharedLink() {
final Map<String, Object> headers = new HashMap<>();
// parameter type is String
headers.put("CamelBox.folderId", testFolder.getID());
// parameter type is com.box.sdk.BoxSharedLink.Access
headers.put("CamelBox.access", BoxSharedLink.Access.COLLABORATORS);
// parameter type is java.util.Date
headers.put("CamelBox.unshareDate", null);
// parameter type is com.box.sdk.BoxSharedLink.Permissions
headers.put("CamelBox.permissions", new BoxSharedLink.Permissions());
final com.box.sdk.BoxSharedLink result = requestBodyAndHeaders("direct://CREATEFOLDERSHAREDLINK", null,
headers);
assertNotNull(result, "createFolderSharedLink result");
LOG.debug("createFolderSharedLink: {}", result);
}
@Test
public void testGetFolder() {
// using String[] message body for single parameter "path"
final com.box.sdk.BoxFolder result = requestBody("direct://GETFOLDER", new String[] { CAMEL_TEST_FOLDER });
assertNotNull(result, "getFolder result");
assertEquals(testFolder.getID(), result.getID(), "getFolder folder id");
LOG.debug("getFolder: {}", result);
}
@Test
public void testGetFolderInfo() {
final Map<String, Object> headers = new HashMap<>();
// parameter type is String
headers.put("CamelBox.folderId", testFolder.getID());
// parameter type is String[]
headers.put("CamelBox.fields", new String[] { "name" });
final com.box.sdk.BoxFolder.Info result = requestBodyAndHeaders("direct://GETFOLDERINFO", null, headers);
assertNotNull(result, "getFolderInfo result");
assertNotNull(result.getName(), "getFolderInfo result.getName()");
assertEquals(CAMEL_TEST_FOLDER, result.getName(), "getFolderInfo info name");
LOG.debug("getFolderInfo: {}", result);
}
@Test
public void testGetFolderItems() {
final Map<String, Object> headers = new HashMap<>();
// parameter type is String
headers.put("CamelBox.folderId", CAMEL_TEST_ROOT_FOLDER_ID);
// parameter type is Long
headers.put("CamelBox.offset", null);
// parameter type is Long
headers.put("CamelBox.limit", null);
// parameter type is String[]
headers.put("CamelBox.fields", null);
@SuppressWarnings("rawtypes")
final java.util.Collection result = requestBodyAndHeaders("direct://GETFOLDERITEMS", null, headers);
assertNotNull(result, "getFolderItems result");
LOG.debug("getFolderItems: {}", result);
}
@Test
public void testGetRootFolder() {
final com.box.sdk.BoxFolder result = requestBody("direct://GETROOTFOLDER", null);
assertNotNull(result, "getRootFolder result");
LOG.debug("getRootFolder: {}", result);
}
@Test
public void testMoveFolder() {
final Map<String, Object> headers = new HashMap<>();
// parameter type is String
headers.put("CamelBox.folderId", testFolder.getID());
// parameter type is String
headers.put("CamelBox.destinationFolderId", CAMEL_TEST_DESTINATION_FOLDER_ID);
// parameter type is String
headers.put("CamelBox.newName", CAMEL_TEST_MOVE_FOLDER);
final com.box.sdk.BoxFolder result = requestBodyAndHeaders("direct://MOVEFOLDER", null, headers);
assertNotNull(result, "moveFolder result");
assertEquals(CAMEL_TEST_MOVE_FOLDER, result.getInfo().getName(), "moveFolder folder name");
LOG.debug("moveFolder: {}", result);
}
@Test
public void testRenameFolder() {
final Map<String, Object> headers = new HashMap<>();
// parameter type is String
headers.put("CamelBox.folderId", testFolder.getID());
// parameter type is String
headers.put("CamelBox.newFolderName", CAMEL_TEST_RENAME_FOLDER);
final com.box.sdk.BoxFolder result = requestBodyAndHeaders("direct://RENAMEFOLDER", null, headers);
assertNotNull(result, "renameFolder result");
assertEquals(CAMEL_TEST_RENAME_FOLDER, result.getInfo().getName(), "moveFolder folder name");
LOG.debug("renameFolder: {}", result);
}
@Test
public void testUpdateInfo() {
final BoxFolder.Info testFolderInfo = testFolder.getInfo();
final Map<String, Object> headers = new HashMap<>();
// parameter type is String
headers.put("CamelBox.folderId", testFolder.getID());
// parameter type is com.box.sdk.BoxFolder.Info
testFolderInfo.setDescription(CAMEL_TEST_FOLDER_DESCRIPTION);
headers.put("CamelBox.info", testFolderInfo);
final com.box.sdk.BoxFolder result = requestBodyAndHeaders("direct://UPDATEFOLDERINFO", null, headers);
assertNotNull(result, "updateInfo result");
assertEquals(CAMEL_TEST_FOLDER_DESCRIPTION, result.getInfo().getDescription(), "update folder info description");
LOG.debug("updateInfo: {}", result);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
// test route for copyFolder
from("direct://COPYFOLDER").to("box://" + PATH_PREFIX + "/copyFolder");
// test route for createFolder
from("direct://CREATEFOLDER").to("box://" + PATH_PREFIX + "/createFolder");
// test route for createFolderSharedLink
from("direct://CREATEFOLDERSHAREDLINK").to("box://" + PATH_PREFIX + "/createFolderSharedLink");
// test route for deleteFolder
from("direct://DELETEFOLDER").to("box://" + PATH_PREFIX + "/deleteFolder?inBody=folderId");
// test route for getFolder
from("direct://GETFOLDER").to("box://" + PATH_PREFIX + "/getFolder?inBody=path");
// test route for getFolderInfo
from("direct://GETFOLDERINFO").to("box://" + PATH_PREFIX + "/getFolderInfo");
// test route for getFolderItems
from("direct://GETFOLDERITEMS").to("box://" + PATH_PREFIX + "/getFolderItems");
// test route for getRootFolder
from("direct://GETROOTFOLDER").to("box://" + PATH_PREFIX + "/getRootFolder");
// test route for moveFolder
from("direct://MOVEFOLDER").to("box://" + PATH_PREFIX + "/moveFolder");
// test route for renameFolder
from("direct://RENAMEFOLDER").to("box://" + PATH_PREFIX + "/renameFolder");
// test route for updateFolderInfo
from("direct://UPDATEFOLDERINFO").to("box://" + PATH_PREFIX + "/updateFolderInfo");
}
};
}
@BeforeEach
public void setupTest() {
createTestFolder();
}
@AfterEach
public void teardownTest() {
deleteTestFolder();
}
public BoxAPIConnection getConnection() {
BoxEndpoint endpoint = (BoxEndpoint) context().getEndpoint("box://" + PATH_PREFIX + "/copyFolder");
return endpoint.getBoxConnection();
}
private void createTestFolder() {
BoxFolder rootFolder = BoxFolder.getRootFolder(getConnection());
testFolder = rootFolder.createFolder(CAMEL_TEST_FOLDER).getResource();
}
private int sizeOfIterable(Iterable<?> it) {
if (it instanceof Collection) {
return ((Collection<?>) it).size();
} else {
int i = 0;
for (@SuppressWarnings("unused")
Object obj : it) {
i++;
}
return i;
}
}
}
| BoxFoldersManagerIT |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/text/translate/OctalUnescaper.java | {
"start": 1579,
"end": 3238
} | class ____ extends CharSequenceTranslator {
/**
* Constructs a new instance.
*/
public OctalUnescaper() {
// empty
}
/**
* Checks if the given char is the character representation of one of the digit from 0 to 3.
* @param ch the char to check.
* @return true if the given char is the character representation of one of the digits from 0 to 3.
*/
private boolean isZeroToThree(final char ch) {
return ch >= '0' && ch <= '3';
}
/**
* {@inheritDoc}
*/
@Override
public int translate(final CharSequence input, final int index, final Writer out) throws IOException {
final int remaining = input.length() - index - 1; // how many characters left, ignoring the first \
final StringBuilder builder = new StringBuilder();
if (input.charAt(index) == '\\' && remaining > 0 && CharUtils.isOctal(input.charAt(index + 1))) {
final int next = index + 1;
final int next2 = index + 2;
final int next3 = index + 3;
// we know this is good as we checked it in the if block above
builder.append(input.charAt(next));
if (remaining > 1 && CharUtils.isOctal(input.charAt(next2))) {
builder.append(input.charAt(next2));
if (remaining > 2 && isZeroToThree(input.charAt(next)) && CharUtils.isOctal(input.charAt(next3))) {
builder.append(input.charAt(next3));
}
}
out.write(Integer.parseInt(builder.toString(), 8));
return 1 + builder.length();
}
return 0;
}
}
| OctalUnescaper |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/jpa/spi/NativeQueryConstructorTransformer.java | {
"start": 1903,
"end": 2709
} | class ____ have a single constructor with exactly "
+ elements.length + " parameters", resultClass );
}
}
return constructor;
}
public NativeQueryConstructorTransformer(Class<T> resultClass) {
this.resultClass = resultClass;
}
@Override
public T transformTuple(Object[] tuple, String[] aliases) {
try {
return constructor( tuple ).newInstance( tuple );
}
catch (Exception e) {
throw new InstantiationException( "Cannot instantiate query result type", resultClass, e );
}
}
@Override
public boolean equals(Object obj) {
return obj instanceof NativeQueryConstructorTransformer<?> that
&& this.resultClass == that.resultClass;
// should be safe to ignore the cached constructor here
}
@Override
public int hashCode() {
return resultClass.hashCode();
}
}
| must |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/annotation/AnnotationTypeMapping.java | {
"start": 20608,
"end": 22492
} | class ____ {
private int size;
private final int[] indexes = new int[attributes.size()];
void update() {
this.size = 0;
Arrays.fill(this.indexes, -1);
for (int i = 0; i < MirrorSets.this.assigned.length; i++) {
if (MirrorSets.this.assigned[i] == this) {
this.indexes[this.size] = i;
this.size++;
}
}
}
<A> int resolve(@Nullable Object source, @Nullable A annotation, ValueExtractor valueExtractor) {
int result = -1;
Object lastValue = null;
for (int i = 0; i < this.size; i++) {
Method attribute = attributes.get(this.indexes[i]);
Object value = valueExtractor.extract(attribute, annotation);
boolean isDefaultValue = (value == null ||
isEquivalentToDefaultValue(attribute, value, valueExtractor));
if (isDefaultValue || ObjectUtils.nullSafeEquals(lastValue, value)) {
if (result == -1) {
result = this.indexes[i];
}
continue;
}
if (lastValue != null && !ObjectUtils.nullSafeEquals(lastValue, value)) {
String on = (source != null) ? " declared on " + source : "";
throw new AnnotationConfigurationException(String.format(
"Different @AliasFor mirror values for annotation [%s]%s; attribute '%s' " +
"and its alias '%s' are declared with values of [%s] and [%s].",
getAnnotationType().getName(), on,
attributes.get(result).getName(),
attribute.getName(),
ObjectUtils.nullSafeToString(lastValue),
ObjectUtils.nullSafeToString(value)));
}
result = this.indexes[i];
lastValue = value;
}
return result;
}
int size() {
return this.size;
}
Method get(int index) {
int attributeIndex = this.indexes[index];
return attributes.get(attributeIndex);
}
int getAttributeIndex(int index) {
return this.indexes[index];
}
}
}
}
| MirrorSet |
java | quarkusio__quarkus | core/runtime/src/main/java/io/quarkus/runtime/graal/Target_io_smallrye_common_net_Inet.java | {
"start": 676,
"end": 1248
} | class ____ {
@Alias
@InjectAccessors(Inet4AnyAccessor.class)
public static Inet4Address INET4_ANY;
@Alias
@InjectAccessors(Inet4LoopbackAccessor.class)
public static Inet4Address INET4_LOOPBACK;
@Alias
@InjectAccessors(Inet4BroadcastAccessor.class)
public static Inet4Address INET4_BROADCAST;
@Alias
@InjectAccessors(Inet6AnyAccessor.class)
public static Inet6Address INET6_ANY;
@Alias
@InjectAccessors(Inet6LoopbackAccessor.class)
public static Inet6Address INET6_LOOPBACK;
}
| Target_io_smallrye_common_net_Inet |
java | google__guava | android/guava/src/com/google/common/util/concurrent/ForwardingListenableFuture.java | {
"start": 1985,
"end": 2395
} | class ____<V extends @Nullable Object>
extends ForwardingListenableFuture<V> {
private final ListenableFuture<V> delegate;
protected SimpleForwardingListenableFuture(ListenableFuture<V> delegate) {
this.delegate = Preconditions.checkNotNull(delegate);
}
@Override
protected final ListenableFuture<V> delegate() {
return delegate;
}
}
}
| SimpleForwardingListenableFuture |
java | elastic__elasticsearch | x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderServiceTests.java | {
"start": 3738,
"end": 37652
} | class ____ extends AutoscalingTestCase {
private static final List<String> SOME_ALLOCATION_DECIDERS = Arrays.asList(
SameShardAllocationDecider.NAME,
AwarenessAllocationDecider.NAME,
EnableAllocationDecider.NAME
);
public void testIsDiskOnlyDecision() {
Decision.Multi decision = new Decision.Multi();
if (randomBoolean()) {
decision.add(randomFrom(Decision.YES, Decision.ALWAYS, Decision.THROTTLE));
}
decision.add(new Decision.Single(Decision.Type.NO, DiskThresholdDecider.NAME, "test"));
randomSubsetOf(SOME_ALLOCATION_DECIDERS).stream()
.map(
label -> new Decision.Single(
randomValueOtherThan(Decision.Type.NO, () -> randomFrom(Decision.Type.values())),
label,
"test " + label
)
)
.forEach(decision::add);
assertThat(ReactiveStorageDeciderService.isDiskOnlyNoDecision(decision), is(true));
}
public void testIsNotDiskOnlyDecision() {
Decision.Multi decision = new Decision.Multi();
if (randomBoolean()) {
decision.add(randomFrom(Decision.YES, Decision.ALWAYS, Decision.THROTTLE, Decision.NO));
}
if (randomBoolean()) {
decision.add(new Decision.Single(Decision.Type.NO, DiskThresholdDecider.NAME, "test"));
if (randomBoolean()) {
decision.add(Decision.NO);
} else {
decision.add(new Decision.Single(Decision.Type.NO, randomFrom(SOME_ALLOCATION_DECIDERS), "test"));
}
} else if (randomBoolean()) {
decision.add(new Decision.Single(Decision.Type.YES, DiskThresholdDecider.NAME, "test"));
}
randomSubsetOf(SOME_ALLOCATION_DECIDERS).stream()
.map(label -> new Decision.Single(randomFrom(Decision.Type.values()), label, "test " + label))
.forEach(decision::add);
assertThat(ReactiveStorageDeciderService.isDiskOnlyNoDecision(decision), is(false));
}
public void testIsFilterTierOnlyDecision() {
Decision.Multi decision = new Decision.Multi();
if (randomBoolean()) {
decision.add(randomFrom(Decision.YES, Decision.ALWAYS, Decision.THROTTLE));
}
decision.add(new Decision.Single(Decision.Type.NO, FilterAllocationDecider.NAME, "test"));
randomSubsetOf(SOME_ALLOCATION_DECIDERS).stream()
.map(
label -> new Decision.Single(
randomValueOtherThan(Decision.Type.NO, () -> randomFrom(Decision.Type.values())),
label,
"test " + label
)
)
.forEach(decision::add);
IndexMetadata indexMetadata = IndexMetadata.builder("test")
.settings(settings(IndexVersion.current()).put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_PREFIX + ".data", "hot"))
.numberOfShards(randomIntBetween(1, 10))
.numberOfReplicas(randomIntBetween(1, 10))
.build();
assertThat(ReactiveStorageDeciderService.isFilterTierOnlyDecision(decision, indexMetadata), is(true));
}
public void testIsNotTierOnlyDecision() {
Decision.Multi decision = new Decision.Multi();
if (randomBoolean()) {
decision.add(randomFrom(Decision.YES, Decision.ALWAYS, Decision.THROTTLE, Decision.NO));
}
Settings.Builder settings = settings(IndexVersion.current());
if (randomBoolean()) {
decision.add(new Decision.Single(Decision.Type.NO, FilterAllocationDecider.NAME, "test"));
if (randomBoolean()) {
decision.add(Decision.NO);
} else if (randomBoolean()) {
if (randomBoolean()) {
settings.put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_PREFIX + "._id", randomAlphaOfLength(5));
} else {
decision.add(new Decision.Single(Decision.Type.NO, DataTierAllocationDecider.NAME, "test"));
}
} else {
decision.add(
new Decision.Single(
Decision.Type.NO,
randomValueOtherThan(SameShardAllocationDecider.NAME, () -> randomFrom(SOME_ALLOCATION_DECIDERS)),
"test"
)
);
}
} else if (randomBoolean()) {
decision.add(new Decision.Single(Decision.Type.YES, FilterAllocationDecider.NAME, "test"));
}
randomSubsetOf(SOME_ALLOCATION_DECIDERS).stream()
.map(label -> new Decision.Single(randomFrom(Decision.Type.values()), label, "test " + label))
.forEach(decision::add);
assertThat(ReactiveStorageDeciderService.isFilterTierOnlyDecision(decision, metaWithSettings(settings)), is(false));
}
public void testFilterLooksLikeTier() {
Settings.Builder settings = settings(IndexVersion.current());
for (int i = 0; i < between(0, 10); ++i) {
String key = randomValueOtherThanMany(name -> name.startsWith("_") || name.equals("name"), () -> randomAlphaOfLength(5));
settings.put(
randomFrom(
IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_PREFIX,
IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX,
IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX
) + "." + key,
randomAlphaOfLength(5)
);
}
assertThat(ReactiveStorageDeciderService.filterLooksLikeTier(metaWithSettings(settings)), is(true));
settings.put(
randomFrom(
IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_PREFIX,
IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX,
IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX
) + "." + randomFrom("_ip", "_host_ip", "_publish_ip", "host", "_id", "_name", "name"),
"1.2.3.4"
);
assertThat(ReactiveStorageDeciderService.filterLooksLikeTier(metaWithSettings(settings)), is(false));
}
private IndexMetadata metaWithSettings(Settings.Builder settings) {
return IndexMetadata.builder("test")
.settings(settings)
.numberOfShards(randomIntBetween(1, 10))
.numberOfReplicas(randomIntBetween(0, 10))
.build();
}
public void testSizeOf() {
ClusterState.Builder stateBuilder = ClusterState.builder(ClusterName.DEFAULT);
Metadata.Builder metaBuilder = Metadata.builder();
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(5))
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 10))
.numberOfReplicas(randomIntBetween(1, 10))
.build();
metaBuilder.put(indexMetadata, true);
stateBuilder.metadata(metaBuilder);
stateBuilder.routingTable(RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(indexMetadata).build());
addNode(stateBuilder);
addNode(stateBuilder);
ClusterState initialClusterState = stateBuilder.build();
int shardId = randomInt(indexMetadata.getNumberOfShards() - 1);
IndexShardRoutingTable subjectRoutings = initialClusterState.routingTable()
.shardRoutingTable(indexMetadata.getIndex().getName(), shardId);
RoutingAllocation allocation = new RoutingAllocation(
new AllocationDeciders(List.of()),
initialClusterState.mutableRoutingNodes(),
initialClusterState,
null,
null,
System.nanoTime()
);
ShardRouting primaryShard = subjectRoutings.primaryShard();
ShardRouting replicaShard = subjectRoutings.replicaShards().get(0);
DiscoveryNode[] nodes = initialClusterState.nodes().getAllNodes().toArray(DiscoveryNode[]::new);
boolean useReplica = randomBoolean();
if (useReplica || randomBoolean()) {
startShard(allocation, primaryShard, nodes[0].getId());
if (randomBoolean()) {
startShard(allocation, replicaShard, nodes[1].getId());
}
}
final ClusterState clusterState = updateClusterState(initialClusterState, allocation);
Map<String, Long> shardSize = new HashMap<>();
IntStream.range(0, randomInt(10))
.mapToObj(i -> randomFrom(clusterState.routingTable().allShards().toList()))
.filter(s -> s.shardId().getId() != shardId)
.forEach(s -> shardSize.put(shardIdentifier(s), randomNonNegativeLong()));
long expected = randomLongBetween(1, Long.MAX_VALUE);
if (useReplica == false || randomBoolean()) {
shardSize.put(shardIdentifier(primaryShard), expected);
} else {
shardSize.put(shardIdentifier(replicaShard), expected);
}
ShardRouting subjectShard = useReplica ? replicaShard : primaryShard;
validateSizeOf(clusterState, subjectShard, shardSize, expected);
validateSizeOf(clusterState, subjectShard, Map.of(), ByteSizeUnit.KB.toBytes(1));
assertThat(createAllocationState(shardSize, clusterState).maxNodeLockedSize(), equalTo(0L));
}
public void testMaxNodeLockedSizeUsingAttributes() {
ClusterState.Builder stateBuilder = ClusterState.builder(ClusterName.DEFAULT);
Metadata.Builder metaBuilder = Metadata.builder();
int numberOfShards = randomIntBetween(1, 10);
int numberOfReplicas = randomIntBetween(1, 10);
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(5))
.settings(addRandomNodeLockUsingAttributes(settings(IndexVersion.current())))
.numberOfShards(numberOfShards)
.numberOfReplicas(numberOfReplicas)
.build();
metaBuilder.put(indexMetadata, true);
stateBuilder.metadata(metaBuilder);
stateBuilder.routingTable(RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(indexMetadata).build());
ClusterState clusterState = stateBuilder.build();
long baseSize = between(1, 10);
Map<String, Long> shardSizes = IntStream.range(0, numberOfShards)
.mapToObj(s -> clusterState.getRoutingTable().index(indexMetadata.getIndex()).shard(s))
.flatMap(irt -> Stream.of(irt.primaryShard(), irt.replicaShards().get(0)))
.collect(
Collectors.toMap(
ClusterInfo::shardIdentifierFromRouting,
s -> s.primary() ? s.shardId().getId() + baseSize : between(1, 100)
)
);
// keep the calculation in 2x until the end to avoid rounding.
long nodeLockedSize = (baseSize * 2 + numberOfShards - 1) * numberOfShards / 2;
assertThat(createAllocationState(shardSizes, clusterState).maxNodeLockedSize(), equalTo(nodeLockedSize));
ClusterState withResizeSource = ClusterState.builder(clusterState)
.metadata(
Metadata.builder(clusterState.metadata())
.put(
IndexMetadata.builder(indexMetadata)
.settings(
Settings.builder()
.put(indexMetadata.getSettings())
.put(IndexMetadata.INDEX_RESIZE_SOURCE_UUID_KEY, randomAlphaOfLength(9))
)
)
)
.build();
assertThat(createAllocationState(shardSizes, withResizeSource).maxNodeLockedSize(), equalTo(nodeLockedSize * 2));
}
public void testNodeLockSplitClone() {
ClusterState.Builder stateBuilder = ClusterState.builder(ClusterName.DEFAULT);
Metadata.Builder metaBuilder = Metadata.builder();
IndexMetadata sourceIndexMetadata = IndexMetadata.builder(randomAlphaOfLength(5))
.settings(settings(IndexVersion.current()))
.numberOfShards(1)
.numberOfReplicas(between(1, 10))
.build();
int numberOfShards = randomIntBetween(1, 2);
int numberOfReplicas = randomIntBetween(1, 10);
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(5))
.settings(
settings(IndexVersion.current()).put(IndexMetadata.INDEX_RESIZE_SOURCE_UUID_KEY, sourceIndexMetadata.getIndexUUID())
.put(IndexMetadata.INDEX_RESIZE_SOURCE_NAME_KEY, sourceIndexMetadata.getIndex().getName())
)
.numberOfShards(numberOfShards)
.numberOfReplicas(numberOfReplicas)
.build();
metaBuilder.put(sourceIndexMetadata, true);
metaBuilder.put(indexMetadata, true);
stateBuilder.metadata(metaBuilder);
stateBuilder.routingTable(
RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsNew(sourceIndexMetadata)
.addAsNew(indexMetadata)
.build()
);
ClusterState clusterState = stateBuilder.build();
long sourceSize = between(1, 10);
Map<String, Long> shardSizes = Map.of(
ClusterInfo.shardIdentifierFromRouting(
clusterState.getRoutingTable().index(sourceIndexMetadata.getIndex()).shard(0).primaryShard()
),
sourceSize
);
assertThat(createAllocationState(shardSizes, clusterState).maxNodeLockedSize(), equalTo(sourceSize * 2));
}
private Settings.Builder addRandomNodeLockUsingAttributes(Settings.Builder settings) {
String setting = randomFrom(
IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING,
IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING,
IndexMetadata.INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING
).getKey();
String attribute = randomFrom(DiscoveryNodeFilters.SINGLE_NODE_NAMES);
return settings.put(setting + attribute, randomAlphaOfLength(5));
}
public void validateSizeOf(ClusterState clusterState, ShardRouting subjectShard, Map<String, Long> shardSize, long expected) {
assertThat(createAllocationState(shardSize, clusterState).sizeOf(subjectShard), equalTo(expected));
}
private ReactiveStorageDeciderService.AllocationState createAllocationState(Map<String, Long> shardSize, ClusterState clusterState) {
ClusterInfo info = ClusterInfo.builder().shardSizes(shardSize).build();
ReactiveStorageDeciderService.AllocationState allocationState = new ReactiveStorageDeciderService.AllocationState(
clusterState,
null,
TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY,
null,
info,
null,
Set.of(),
Set.of()
);
return allocationState;
}
private void startShard(RoutingAllocation allocation, ShardRouting unassignedShard, String nodeId) {
for (RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); iterator
.hasNext();) {
ShardRouting candidate = iterator.next();
if (candidate == unassignedShard) {
ShardRouting initialized = iterator.initialize(
nodeId,
null,
ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE,
allocation.changes()
);
allocation.routingNodes().startShard(initialized, allocation.changes(), ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
return;
}
}
fail("must find shard: " + unassignedShard);
}
public void testSizeOfSnapshot() {
ClusterState.Builder stateBuilder = ClusterState.builder(ClusterName.DEFAULT);
Metadata.Builder metaBuilder = Metadata.builder();
RecoverySource.SnapshotRecoverySource recoverySource = new RecoverySource.SnapshotRecoverySource(
UUIDs.randomBase64UUID(),
new Snapshot(randomAlphaOfLength(5), new SnapshotId(randomAlphaOfLength(5), UUIDs.randomBase64UUID())),
IndexVersion.current(),
new IndexId(randomAlphaOfLength(5), UUIDs.randomBase64UUID())
);
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(5))
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 10))
.numberOfReplicas(randomIntBetween(0, 10))
.build();
metaBuilder.put(indexMetadata, true);
stateBuilder.metadata(metaBuilder);
stateBuilder.routingTable(
RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsNewRestore(indexMetadata, recoverySource, new HashSet<>())
.build()
);
ClusterState clusterState = stateBuilder.build();
int shardId = randomInt(indexMetadata.getNumberOfShards() - 1);
ShardRouting primaryShard = clusterState.routingTable()
.shardRoutingTable(indexMetadata.getIndex().getName(), shardId)
.primaryShard();
Map<InternalSnapshotsInfoService.SnapshotShard, Long> shardSizeBuilder = new HashMap<>();
IntStream.range(0, randomInt(10))
.mapToObj(i -> randomFrom(clusterState.routingTable().allShards().toList()))
.filter(s -> s.shardId().getId() != shardId)
.forEach(s -> shardSizeBuilder.put(snapshotShardSizeKey(recoverySource, s), randomNonNegativeLong()));
long expected = randomLongBetween(1, Long.MAX_VALUE);
shardSizeBuilder.put(snapshotShardSizeKey(recoverySource, primaryShard), expected);
validateSizeOfSnapshotShard(clusterState, primaryShard, shardSizeBuilder, expected);
validateSizeOfSnapshotShard(clusterState, primaryShard, Map.of(), ByteSizeUnit.KB.toBytes(1));
}
private void validateSizeOfSnapshotShard(
ClusterState clusterState,
ShardRouting primaryShard,
Map<InternalSnapshotsInfoService.SnapshotShard, Long> shardSizeBuilder,
long expected
) {
SnapshotShardSizeInfo shardSizeInfo = new SnapshotShardSizeInfo(shardSizeBuilder);
ReactiveStorageDeciderService.AllocationState allocationState = new ReactiveStorageDeciderService.AllocationState(
clusterState,
null,
TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY,
null,
null,
shardSizeInfo,
Set.of(),
Set.of()
);
assertThat(allocationState.sizeOf(primaryShard), equalTo(expected));
}
private InternalSnapshotsInfoService.SnapshotShard snapshotShardSizeKey(
RecoverySource.SnapshotRecoverySource recoverySource,
ShardRouting primaryShard
) {
return new InternalSnapshotsInfoService.SnapshotShard(recoverySource.snapshot(), recoverySource.index(), primaryShard.shardId());
}
static void addNode(ClusterState.Builder stateBuilder) {
addNode(stateBuilder, DiscoveryNodeRole.DATA_ROLE);
}
static void addNode(ClusterState.Builder stateBuilder, DiscoveryNodeRole role) {
stateBuilder.nodes(
DiscoveryNodes.builder(stateBuilder.nodes())
.add(DiscoveryNodeUtils.builder(UUIDs.randomBase64UUID()).name("test").roles(Set.of(role)).build())
);
}
public void testUnmovableSize() {
Settings.Builder settingsBuilder = Settings.builder();
if (randomBoolean()) {
// disk is 100 kb. Default is 90 percent. 10KB free is equivalent to default.
String tenKb = ByteSizeValue.ofKb(10).toString();
settingsBuilder.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), tenKb);
// also set these to pass validation
settingsBuilder.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), tenKb);
settingsBuilder.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), tenKb);
}
Settings settings = settingsBuilder.build();
DiskThresholdSettings thresholdSettings = new DiskThresholdSettings(
settings,
new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)
);
String nodeId = randomAlphaOfLength(5);
ClusterState.Builder stateBuilder = ClusterState.builder(ClusterName.DEFAULT);
Metadata.Builder metaBuilder = Metadata.builder();
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(5))
.settings(settings(IndexVersion.current()))
.numberOfShards(1)
.numberOfReplicas(10)
.build();
metaBuilder.put(indexMetadata, true);
final Metadata metadata = metaBuilder.build();
stateBuilder.metadata(metadata);
stateBuilder.routingTable(GlobalRoutingTableTestHelper.buildRoutingTable(metadata, RoutingTable.Builder::addAsNew));
ClusterState clusterState = stateBuilder.build();
var shards = IntStream.range(0, between(1, 10))
.mapToObj(i -> Tuple.tuple(new ShardId(indexMetadata.getIndex(), randomInt(10)), randomBoolean()))
.distinct()
.map(t -> TestShardRouting.newShardRouting(t.v1(), nodeId, t.v2(), ShardRoutingState.STARTED))
.toList();
long minShardSize = randomLongBetween(1, 10);
ShardRouting missingShard = randomBoolean() ? randomFrom(shards) : null;
Map<String, Long> shardSize = new HashMap<>();
for (ShardRouting shard : shards) {
if (shard != missingShard) {
shardSize.put(shardIdentifier(shard), ByteSizeUnit.KB.toBytes(randomLongBetween(minShardSize, 100)));
}
}
if (shardSize.isEmpty() == false) {
shardSize.put(randomFrom(shardSize.keySet()), ByteSizeUnit.KB.toBytes(minShardSize));
}
var diskUsages = Map.of(nodeId, new DiskUsage(nodeId, null, null, ByteSizeUnit.KB.toBytes(100), ByteSizeUnit.KB.toBytes(5)));
ClusterInfo info = ClusterInfo.builder()
.leastAvailableSpaceUsage(diskUsages)
.mostAvailableSpaceUsage(diskUsages)
.shardSizes(shardSize)
.build();
ReactiveStorageDeciderService.AllocationState allocationState = new ReactiveStorageDeciderService.AllocationState(
clusterState,
null,
TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY,
thresholdSettings,
info,
null,
Set.of(),
Set.of()
);
long result = allocationState.unmovableSize(nodeId, shards);
Predicate<ShardRouting> shardSizeKnown = shard -> shard.primary()
? info.getShardSize(shard.shardId(), true) != null
: info.getShardSize(shard.shardId(), true) != null || info.getShardSize(shard.shardId(), false) != null;
if ((missingShard != null && shardSizeKnown.test(missingShard) == false) || minShardSize < 5) {
// the diff between used and high watermark is 5 KB.
assertThat(result, equalTo(ByteSizeUnit.KB.toBytes(5)));
} else {
assertThat(result, equalTo(ByteSizeUnit.KB.toBytes(minShardSize)));
}
}
public void testCanRemainOnlyHighestTierPreference() {
ClusterState.Builder stateBuilder = ClusterState.builder(ClusterName.DEFAULT);
addNode(stateBuilder);
Metadata.Builder metaBuilder = Metadata.builder();
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(5))
.settings(settings(IndexVersion.current()))
.numberOfShards(10)
.numberOfReplicas(1)
.build();
metaBuilder.put(indexMetadata, true);
var metadata = metaBuilder.build();
stateBuilder.metadata(metadata);
stateBuilder.routingTable(GlobalRoutingTableTestHelper.buildRoutingTable(metadata, RoutingTable.Builder::addAsNew));
ClusterState clusterState = stateBuilder.build();
ShardRouting shardRouting = TestShardRouting.newShardRouting(
indexMetadata.getIndex().getName(),
randomInt(10),
clusterState.nodes().iterator().next().getId(),
randomBoolean(),
ShardRoutingState.STARTED
);
AllocationDecider no = new AllocationDecider() {
@Override
public Decision canRemain(
IndexMetadata indexMetadata,
ShardRouting shardRouting,
RoutingNode node,
RoutingAllocation allocation
) {
return Decision.NO;
}
};
assertTrue(canRemainWithNoNodes(clusterState, shardRouting));
assertFalse(canRemainWithNoNodes(clusterState, shardRouting, no));
ClusterState clusterStateWithHotPreference = addPreference(indexMetadata, clusterState, "data_hot");
assertTrue(canRemainWithNoNodes(clusterStateWithHotPreference, shardRouting));
assertFalse(canRemainWithNoNodes(clusterStateWithHotPreference, shardRouting, no));
ClusterState clusterStateWithWarmHotPreference = addPreference(indexMetadata, clusterState, "data_warm,data_hot");
assertFalse(canRemainWithNoNodes(clusterStateWithWarmHotPreference, shardRouting));
assertFalse(canRemainWithNoNodes(clusterStateWithWarmHotPreference, shardRouting, no));
}
public ClusterState addPreference(IndexMetadata indexMetadata, ClusterState clusterState, String preference) {
IndexMetadata indexMetadataWithPreference = IndexMetadata.builder(indexMetadata)
.settings(Settings.builder().put(indexMetadata.getSettings()).put(DataTier.TIER_PREFERENCE, preference))
.build();
return ClusterState.builder(clusterState)
.metadata(Metadata.builder(clusterState.metadata()).put(indexMetadataWithPreference, false))
.build();
}
public boolean canRemainWithNoNodes(ClusterState clusterState, ShardRouting shardRouting, AllocationDecider... deciders) {
AllocationDeciders allocationDeciders = new AllocationDeciders(Arrays.asList(deciders));
ReactiveStorageDeciderService.AllocationState allocationState = new ReactiveStorageDeciderService.AllocationState(
clusterState,
allocationDeciders,
TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY,
new DiskThresholdSettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)),
ClusterInfo.EMPTY,
null,
Set.of(),
Set.of(DiscoveryNodeRole.DATA_WARM_NODE_ROLE)
);
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, clusterState, null, null, randomLong());
return allocationState.canRemainOnlyHighestTierPreference(shardRouting, allocation);
}
public void testNeedsThisTier() {
ClusterState.Builder stateBuilder = ClusterState.builder(ClusterName.DEFAULT);
addNode(stateBuilder, DiscoveryNodeRole.DATA_HOT_NODE_ROLE);
Metadata.Builder metaBuilder = Metadata.builder();
Settings.Builder settings = settings(IndexVersion.current());
if (randomBoolean()) {
settings.put(DataTier.TIER_PREFERENCE, randomBoolean() ? DataTier.DATA_HOT : "data_hot,data_warm");
}
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(5))
.settings(settings)
.numberOfShards(10)
.numberOfReplicas(1)
.build();
metaBuilder.put(indexMetadata, true);
stateBuilder.metadata(metaBuilder);
ClusterState clusterState = stateBuilder.build();
ShardRouting shardRouting = TestShardRouting.newShardRouting(
indexMetadata.getIndex().getName(),
randomInt(10),
clusterState.nodes().iterator().next().getId(),
randomBoolean(),
ShardRoutingState.STARTED
);
verifyNeedsWarmTier(clusterState, shardRouting, false);
verifyNeedsWarmTier(addPreference(indexMetadata, clusterState, DataTier.DATA_COLD), shardRouting, false);
verifyNeedsWarmTier(addPreference(indexMetadata, clusterState, DataTier.DATA_WARM), shardRouting, true);
verifyNeedsWarmTier(addPreference(indexMetadata, clusterState, "data_warm,data_hot"), shardRouting, true);
verifyNeedsWarmTier(addPreference(indexMetadata, clusterState, "data_warm,data_cold"), shardRouting, true);
}
public void testNeedsThisTierLegacy() {
ClusterState.Builder stateBuilder = ClusterState.builder(ClusterName.DEFAULT);
addNode(stateBuilder);
Metadata.Builder metaBuilder = Metadata.builder();
Settings.Builder settings = settings(IndexVersion.current());
if (randomBoolean()) {
settings.put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + ".data", DataTier.DATA_HOT);
}
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(5))
.settings(settings)
.numberOfShards(10)
.numberOfReplicas(1)
.build();
metaBuilder.put(indexMetadata, true);
stateBuilder.metadata(metaBuilder);
ClusterState clusterState = stateBuilder.build();
boolean primary = randomBoolean();
ShardRouting shardRouting = TestShardRouting.newShardRouting(
indexMetadata.getIndex().getName(),
randomInt(10),
clusterState.nodes().iterator().next().getId(),
primary,
ShardRoutingState.STARTED
);
AllocationDecider noFilter = new AllocationDecider() {
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
return Decision.single(Decision.Type.NO, FilterAllocationDecider.NAME, "test");
}
};
AllocationDecider noSameShard = new AllocationDecider() {
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
return Decision.single(Decision.Type.NO, SameShardAllocationDecider.NAME, "test");
}
};
AllocationDecider no = new AllocationDecider() {
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
return Decision.single(Decision.Type.NO, AwarenessAllocationDecider.NAME, "test");
}
};
verifyNeedsWarmTier(clusterState, shardRouting, false);
verifyNeedsWarmTier(clusterState, shardRouting, primary, noFilter);
verifyNeedsWarmTier(clusterState, shardRouting, primary, noFilter, noSameShard);
verifyNeedsWarmTier(clusterState, shardRouting, false, noFilter, no);
}
private void verifyNeedsWarmTier(
ClusterState clusterState,
ShardRouting shardRouting,
boolean expected,
AllocationDecider... deciders
) {
AllocationDeciders allocationDeciders = new AllocationDeciders(Arrays.asList(deciders));
ReactiveStorageDeciderService.AllocationState allocationState = new ReactiveStorageDeciderService.AllocationState(
clusterState,
allocationDeciders,
TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY,
new DiskThresholdSettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)),
ClusterInfo.EMPTY,
null,
Set.of(),
Set.of(DiscoveryNodeRole.DATA_WARM_NODE_ROLE)
);
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, clusterState, null, null, randomLong());
assertThat(allocationState.needsThisTier(shardRouting, allocation), is(expected));
}
public void testMessage() {
assertThat(ReactiveStorageDeciderService.message(0, 0), equalTo("storage ok"));
assertThat(ReactiveStorageDeciderService.message(0, 1023), equalTo("not enough storage available, needs 1023b"));
assertThat(ReactiveStorageDeciderService.message(1024, 0), equalTo("not enough storage available, needs 1kb"));
assertThat(ReactiveStorageDeciderService.message(0, 1024), equalTo("not enough storage available, needs 1kb"));
assertThat(ReactiveStorageDeciderService.message(1023, 1), equalTo("not enough storage available, needs 1kb"));
}
private String shardIdentifier(ShardRouting s) {
return ClusterInfo.shardIdentifierFromRouting(s);
}
public static ClusterState updateClusterState(ClusterState oldState, RoutingAllocation allocation) {
assert allocation.metadata() == oldState.metadata();
if (allocation.routingNodesChanged() == false) {
return oldState;
}
final GlobalRoutingTable oldRoutingTable = oldState.globalRoutingTable();
final GlobalRoutingTable newRoutingTable = oldRoutingTable.rebuild(allocation.routingNodes(), allocation.metadata());
final Metadata newMetadata = allocation.updateMetadataWithRoutingChanges(newRoutingTable);
assert newRoutingTable.validate(newMetadata); // validates the routing table is coherent with the cluster state metadata
return ClusterState.builder(oldState).routingTable(newRoutingTable).metadata(newMetadata).build();
}
}
| ReactiveStorageDeciderServiceTests |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java | {
"start": 30052,
"end": 31341
} | class
____.Writer writer = SequenceFile.createWriter(
config,
SequenceFile.Writer.file(path),
SequenceFile.Writer.keyClass(SimpleSerializable.class),
SequenceFile.Writer.valueClass(SimpleSerializable.class));
int max = 10;
try {
SimpleSerializable val = new SimpleSerializable();
val.setId(-1);
for (int i = 0; i < max; i++) {
SimpleSerializable key = new SimpleSerializable();
key.setId(i);
writer.append(key, val);
}
} finally {
writer.close();
}
// override name so it gets forced to the new serializable
WritableName.setName(AnotherSimpleSerializable.class, SimpleSerializable.class.getName());
// read and expect our new serializable, and all the correct values read
SequenceFile.Reader reader = new SequenceFile.Reader(
config,
SequenceFile.Reader.file(path));
AnotherSimpleSerializable key = new AnotherSimpleSerializable();
int count = 0;
while (true) {
key = (AnotherSimpleSerializable) reader.next(key);
if (key == null) {
// make sure we exhausted all the ints we wrote
assertEquals(count, max);
break;
}
assertEquals(count++, key.getId());
}
}
public static | SequenceFile |
java | apache__camel | components/camel-pqc/src/test/java/org/apache/camel/component/pqc/PQCMLKEMGenerateEncapsulationAESNoAutowiredTest.java | {
"start": 1495,
"end": 4036
} | class ____ extends CamelTestSupport {
@EndpointInject("mock:encapsulate")
protected MockEndpoint resultEncapsulate;
@Produce("direct:encapsulate")
protected ProducerTemplate templateEncapsulate;
@EndpointInject("mock:extract")
protected MockEndpoint resultExtract;
public PQCMLKEMGenerateEncapsulationAESNoAutowiredTest() throws NoSuchAlgorithmException {
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:encapsulate").to(
"pqc:keyenc?operation=generateSecretKeyEncapsulation&symmetricKeyAlgorithm=AES&keyEncapsulationAlgorithm=MLKEM")
.to("mock:encapsulate")
.to("pqc:keyenc?operation=extractSecretKeyEncapsulation&symmetricKeyAlgorithm=AES&keyEncapsulationAlgorithm=MLKEM")
.to("mock:extract");
}
};
}
@BeforeAll
public static void startup() throws Exception {
Security.addProvider(new BouncyCastleProvider());
Security.addProvider(new BouncyCastlePQCProvider());
}
@Test
void testSignAndVerify() throws Exception {
resultEncapsulate.expectedMessageCount(1);
resultExtract.expectedMessageCount(1);
templateEncapsulate.sendBody("Hello");
resultEncapsulate.assertIsSatisfied();
assertNotNull(resultEncapsulate.getExchanges().get(0).getMessage().getBody(SecretKeyWithEncapsulation.class));
assertEquals(PQCSymmetricAlgorithms.AES.getAlgorithm(),
resultEncapsulate.getExchanges().get(0).getMessage().getBody(SecretKeyWithEncapsulation.class).getAlgorithm());
SecretKeyWithEncapsulation secEncrypted
= resultEncapsulate.getExchanges().get(0).getMessage().getBody(SecretKeyWithEncapsulation.class);
assertNotNull(resultExtract.getExchanges().get(0).getMessage().getBody(SecretKeyWithEncapsulation.class));
assertEquals(PQCSymmetricAlgorithms.AES.getAlgorithm(),
resultExtract.getExchanges().get(0).getMessage().getBody(SecretKeyWithEncapsulation.class).getAlgorithm());
SecretKeyWithEncapsulation secEncryptedExtracted
= resultExtract.getExchanges().get(0).getMessage().getBody(SecretKeyWithEncapsulation.class);
assertTrue(Arrays.areEqual(secEncrypted.getEncoded(), secEncryptedExtracted.getEncoded()));
}
}
| PQCMLKEMGenerateEncapsulationAESNoAutowiredTest |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/plan/stats/TableStats.java | {
"start": 1089,
"end": 4327
} | class ____ {
/**
* Unknown definition for table stats: Unknown {@link #rowCount} is -1. Unknown {@link
* #colStats} is not exist in map.
*/
public static final TableStats UNKNOWN = new TableStats(-1, new HashMap<>());
/** cardinality of table. */
private final long rowCount;
/** colStats statistics of table columns. */
private final Map<String, ColumnStats> colStats;
public TableStats(long rowCount) {
this(rowCount, new HashMap<>());
}
public TableStats(long rowCount, Map<String, ColumnStats> colStats) {
this.rowCount = rowCount;
this.colStats = colStats;
}
public long getRowCount() {
return rowCount;
}
public Map<String, ColumnStats> getColumnStats() {
return colStats;
}
/**
* Create a deep copy of "this" instance.
*
* @return a deep copy
*/
public TableStats copy() {
TableStats copy = new TableStats(this.rowCount);
for (Map.Entry<String, ColumnStats> entry : this.colStats.entrySet()) {
copy.colStats.put(entry.getKey(), entry.getValue().copy());
}
return copy;
}
/**
* Merges two table stats. When the stats are unknown, whatever the other are, we need return
* unknown stats. See {@link #UNKNOWN}.
*
* @param other The other table stats to merge.
* @return The merged table stats.
*/
public TableStats merge(TableStats other, @Nullable Set<String> partitionKeys) {
if (this.rowCount < 0 || other.rowCount < 0) {
return TableStats.UNKNOWN;
}
long rowCount =
this.rowCount >= 0 && other.rowCount >= 0
? this.rowCount + other.rowCount
: UNKNOWN.rowCount;
return new TableStats(rowCount, mergeColumnStates(other, partitionKeys));
}
private Map<String, ColumnStats> mergeColumnStates(
TableStats other, @Nullable Set<String> partitionKeys) {
Map<String, ColumnStats> colStats = new HashMap<>();
for (Map.Entry<String, ColumnStats> entry : this.colStats.entrySet()) {
String col = entry.getKey();
ColumnStats stats = entry.getValue();
ColumnStats otherStats = other.colStats.get(col);
if (otherStats != null) {
if (partitionKeys != null) {
colStats.put(col, stats.merge(otherStats, partitionKeys.contains(col)));
} else {
colStats.put(col, stats.merge(otherStats, false));
}
}
}
return colStats;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
TableStats that = (TableStats) o;
return rowCount == that.rowCount && Objects.equals(colStats, that.colStats);
}
@Override
public int hashCode() {
return Objects.hash(rowCount, colStats);
}
@Override
public String toString() {
return "TableStats{" + "rowCount=" + rowCount + ", colStats=" + colStats + '}';
}
}
| TableStats |
java | lettuce-io__lettuce-core | src/test/java/io/lettuce/core/search/RediSearchGeospatialIntegrationTests.java | {
"start": 1959,
"end": 21675
} | class ____ {
// Index names
private static final String GEO_INDEX = "geo-idx";
private static final String GEOSHAPE_INDEX = "geoshape-idx";
private static final String CARTESIAN_INDEX = "cartesian-idx";
protected static RedisClient client;
protected static RedisCommands<String, String> redis;
public RediSearchGeospatialIntegrationTests() {
RedisURI redisURI = RedisURI.Builder.redis("127.0.0.1").withPort(16379).build();
client = RedisClient.create(redisURI);
client.setOptions(getOptions());
redis = client.connect().sync();
}
protected ClientOptions getOptions() {
return ClientOptions.builder().build();
}
@BeforeEach
public void prepare() {
redis.flushall();
}
/**
* Test basic GEO field functionality with longitude-latitude coordinates and radius queries. Based on Redis documentation
* examples for simple geospatial point storage and search.
*/
@Test
void testGeoFieldBasicFunctionality() {
// Create index with GEO field for location data
FieldArgs<String> locationField = GeoFieldArgs.<String> builder().name("location").build();
FieldArgs<String> nameField = TextFieldArgs.<String> builder().name("name").build();
FieldArgs<String> cityField = TextFieldArgs.<String> builder().name("city").build();
CreateArgs<String, String> createArgs = CreateArgs.<String, String> builder().withPrefix("store:")
.on(CreateArgs.TargetType.HASH).build();
String result = redis.ftCreate(GEO_INDEX, createArgs, Arrays.asList(locationField, nameField, cityField));
assertThat(result).isEqualTo("OK");
// Add stores with geographical coordinates (longitude, latitude)
Map<String, String> store1 = new HashMap<>();
store1.put("name", "Downtown Electronics");
store1.put("city", "Denver");
store1.put("location", "-104.991531, 39.742043"); // Denver coordinates
redis.hmset("store:1", store1);
Map<String, String> store2 = new HashMap<>();
store2.put("name", "Mountain Gear");
store2.put("city", "Boulder");
store2.put("location", "-105.2705456, 40.0149856"); // Boulder coordinates
redis.hmset("store:2", store2);
Map<String, String> store3 = new HashMap<>();
store3.put("name", "Tech Hub");
store3.put("city", "Colorado Springs");
store3.put("location", "-104.800644, 38.846127"); // Colorado Springs coordinates
redis.hmset("store:3", store3);
// Test 1: Find stores within 50 miles of Denver
SearchReply<String, String> results = redis.ftSearch(GEO_INDEX, "@location:[-104.991531 39.742043 50 mi]");
assertThat(results.getCount()).isEqualTo(2); // Denver and Boulder stores
assertThat(results.getResults()).hasSize(2);
// Test 2: Find stores within 100 miles of Colorado Springs
results = redis.ftSearch(GEO_INDEX, "@location:[-104.800644 38.846127 100 mi]");
assertThat(results.getCount()).isEqualTo(3); // All stores within 100 miles
assertThat(results.getResults()).hasSize(3);
// Test 3: Find stores within 20 miles of Denver (should only find Denver store)
results = redis.ftSearch(GEO_INDEX, "@location:[-104.991531 39.742043 20 mi]");
assertThat(results.getCount()).isEqualTo(1); // Only Denver store
assertThat(results.getResults()).hasSize(1);
assertThat(results.getResults().get(0).getFields().get("name")).isEqualTo("Downtown Electronics");
// Cleanup
redis.ftDropindex(GEO_INDEX);
}
/**
* Test GEO field with multiple locations per document using JSON array format. Demonstrates how a single document can have
* multiple geographical locations.
*/
@Test
void testGeoFieldMultipleLocations() {
// Create index for products with multiple store locations
FieldArgs<String> locationField = GeoFieldArgs.<String> builder().name("locations").build();
FieldArgs<String> productField = TextFieldArgs.<String> builder().name("product").build();
CreateArgs<String, String> createArgs = CreateArgs.<String, String> builder().withPrefix("product:")
.on(CreateArgs.TargetType.HASH).build();
redis.ftCreate(GEO_INDEX, createArgs, Arrays.asList(locationField, productField));
// Add product available at multiple locations
Map<String, String> product1 = new HashMap<>();
product1.put("product", "Laptop Pro");
// Multiple locations as comma-separated string (alternative format)
product1.put("locations", "-104.991531, 39.742043"); // Denver only for this test
redis.hmset("product:1", product1);
Map<String, String> product2 = new HashMap<>();
product2.put("product", "Wireless Headphones");
product2.put("locations", "-105.2705456, 40.0149856"); // Boulder
redis.hmset("product:2", product2);
// Test search for products available near Denver (use smaller radius to be more specific)
SearchReply<String, String> results = redis.ftSearch(GEO_INDEX, "@locations:[-104.991531 39.742043 10 mi]");
assertThat(results.getCount()).isEqualTo(1);
assertThat(results.getResults().get(0).getFields().get("product")).isEqualTo("Laptop Pro");
// Cleanup
redis.ftDropindex(GEO_INDEX);
}
/**
* Test GEOSHAPE field with POINT primitives using spherical coordinates. Demonstrates basic point storage and spatial
* queries using Well-Known Text format.
*/
@Test
void testGeoshapePointSphericalCoordinates() {
// Create index with GEOSHAPE field using spherical coordinates (default)
FieldArgs<String> geomField = GeoshapeFieldArgs.<String> builder().name("geom").spherical().build();
FieldArgs<String> nameField = TextFieldArgs.<String> builder().name("name").build();
CreateArgs<String, String> createArgs = CreateArgs.<String, String> builder().withPrefix("location:")
.on(CreateArgs.TargetType.HASH).build();
redis.ftCreate(GEOSHAPE_INDEX, createArgs, Arrays.asList(geomField, nameField));
// Add locations using WKT POINT format with geographical coordinates
Map<String, String> location1 = new HashMap<>();
location1.put("name", "Central Park");
location1.put("geom", "POINT (-73.965355 40.782865)"); // Central Park, NYC
redis.hmset("location:1", location1);
Map<String, String> location2 = new HashMap<>();
location2.put("name", "Times Square");
location2.put("geom", "POINT (-73.985130 40.758896)"); // Times Square, NYC
redis.hmset("location:2", location2);
Map<String, String> location3 = new HashMap<>();
location3.put("name", "Brooklyn Bridge");
location3.put("geom", "POINT (-73.996736 40.706086)"); // Brooklyn Bridge, NYC
redis.hmset("location:3", location3);
// Test 1: Find points within Manhattan area (rough polygon)
String manhattanPolygon = "POLYGON ((-74.047 40.680, -74.047 40.820, -73.910 40.820, -73.910 40.680, -74.047 40.680))";
SearchArgs<String, String> withinArgs = SearchArgs.<String, String> builder().param("area", manhattanPolygon).build();
SearchReply<String, String> results = redis.ftSearch(GEOSHAPE_INDEX, "@geom:[WITHIN $area]", withinArgs);
assertThat(results.getCount()).isEqualTo(3); // All locations are in Manhattan
assertThat(results.getResults()).hasSize(3);
// Cleanup
redis.ftDropindex(GEOSHAPE_INDEX);
}
/**
* Test GEOSHAPE field with POLYGON primitives and spatial relationship queries. Demonstrates advanced polygon storage and
* WITHIN, CONTAINS, INTERSECTS, DISJOINT operations.
*/
@Test
void testGeoshapePolygonSpatialRelationships() {
// 7.2 has a different behavior, but we do not want to test corner cases for old versions
assumeTrue(RedisConditions.of(redis).hasVersionGreaterOrEqualsTo("7.4"));
// Create index with GEOSHAPE field using Cartesian coordinates for easier testing
FieldArgs<String> geomField = GeoshapeFieldArgs.<String> builder().name("geom").flat().build();
FieldArgs<String> nameField = TextFieldArgs.<String> builder().name("name").build();
CreateArgs<String, String> createArgs = CreateArgs.<String, String> builder().withPrefix("shape:")
.on(CreateArgs.TargetType.HASH).build();
redis.ftCreate(CARTESIAN_INDEX, createArgs, Arrays.asList(geomField, nameField));
// Add shapes using WKT format with Cartesian coordinates
Map<String, String> shape1 = new HashMap<>();
shape1.put("name", "Large Square");
shape1.put("geom", "POLYGON ((0 0, 0 4, 4 4, 4 0, 0 0))"); // Large square
redis.hmset("shape:1", shape1);
Map<String, String> shape2 = new HashMap<>();
shape2.put("name", "Small Square");
shape2.put("geom", "POLYGON ((1 1, 1 2, 2 2, 2 1, 1 1))"); // Small square inside large square
redis.hmset("shape:2", shape2);
Map<String, String> shape3 = new HashMap<>();
shape3.put("name", "Overlapping Rectangle");
shape3.put("geom", "POLYGON ((3 1, 3 3, 5 3, 5 1, 3 1))"); // Rectangle overlapping large square
redis.hmset("shape:3", shape3);
Map<String, String> shape4 = new HashMap<>();
shape4.put("name", "Separate Triangle");
shape4.put("geom", "POLYGON ((6 6, 7 8, 8 6, 6 6))"); // Triangle separate from other shapes
redis.hmset("shape:4", shape4);
// Add a point for testing
Map<String, String> point1 = new HashMap<>();
point1.put("name", "Center Point");
point1.put("geom", "POINT (1.5 1.5)"); // Point inside small square
redis.hmset("shape:5", point1);
// Test 1: WITHIN - Find shapes within the large square
String largeSquare = "POLYGON ((0 0, 0 4, 4 4, 4 0, 0 0))";
SearchArgs<String, String> withinArgs = SearchArgs.<String, String> builder().param("container", largeSquare).build();
SearchReply<String, String> results = redis.ftSearch(CARTESIAN_INDEX, "@geom:[WITHIN $container]", withinArgs);
// Should find small square and center point (both entirely within large square)
assertThat(results.getCount()).isGreaterThanOrEqualTo(2);
// Test 2: CONTAINS - Find shapes that contain a specific point
String testPoint = "POINT (1.5 1.5)";
SearchArgs<String, String> containsArgs = SearchArgs.<String, String> builder().param("point", testPoint).build();
results = redis.ftSearch(CARTESIAN_INDEX, "@geom:[CONTAINS $point]", containsArgs);
// Should find large square and small square (both contain the point)
assertThat(results.getCount()).isGreaterThanOrEqualTo(2);
// Test 3: INTERSECTS - Find shapes that intersect with a test area
String testArea = "POLYGON ((2 0, 2 2, 4 2, 4 0, 2 0))";
SearchArgs<String, String> intersectsArgs = SearchArgs.<String, String> builder().param("area", testArea).build();
results = redis.ftSearch(CARTESIAN_INDEX, "@geom:[INTERSECTS $area]", intersectsArgs);
// Should find large square and overlapping rectangle
assertThat(results.getCount()).isGreaterThanOrEqualTo(2);
// Test 4: DISJOINT - Find shapes that don't overlap with a test area
SearchArgs<String, String> disjointArgs = SearchArgs.<String, String> builder().param("area", testArea).build();
results = redis.ftSearch(CARTESIAN_INDEX, "@geom:[DISJOINT $area]", disjointArgs);
// Should find separate triangle and possibly others
assertThat(results.getCount()).isGreaterThanOrEqualTo(1);
// Cleanup
redis.ftDropindex(CARTESIAN_INDEX);
}
/**
* Test complex geospatial queries combining GEO and GEOSHAPE fields with other field types. Demonstrates real-world
* scenarios with mixed field types and complex query conditions.
*/
@Test
void testComplexGeospatialQueries() {
// 7.2 has a different behavior, but we do not want to test corner cases for old versions
assumeTrue(RedisConditions.of(redis).hasVersionGreaterOrEqualsTo("7.4"));
// Create index with mixed field types including geospatial
FieldArgs<String> locationField = GeoFieldArgs.<String> builder().name("location").build();
FieldArgs<String> serviceAreaField = GeoshapeFieldArgs.<String> builder().name("service_area").spherical().build();
FieldArgs<String> nameField = TextFieldArgs.<String> builder().name("name").build();
FieldArgs<String> categoryField = TextFieldArgs.<String> builder().name("category").build();
FieldArgs<String> ratingField = TextFieldArgs.<String> builder().name("rating").build();
CreateArgs<String, String> createArgs = CreateArgs.<String, String> builder().withPrefix("business:")
.on(CreateArgs.TargetType.HASH).build();
redis.ftCreate(GEO_INDEX, createArgs,
Arrays.asList(locationField, serviceAreaField, nameField, categoryField, ratingField));
// Add businesses with both point locations and service areas
Map<String, String> business1 = new HashMap<>();
business1.put("name", "Downtown Pizza");
business1.put("category", "restaurant");
business1.put("rating", "4.5");
business1.put("location", "-104.991531, 39.742043"); // Denver
business1.put("service_area", "POLYGON ((-105.1 39.6, -105.1 39.9, -104.8 39.9, -104.8 39.6, -105.1 39.6))");
redis.hmset("business:1", business1);
Map<String, String> business2 = new HashMap<>();
business2.put("name", "Mountain Coffee");
business2.put("category", "cafe");
business2.put("rating", "4.8");
business2.put("location", "-105.2705456, 40.0149856"); // Boulder
business2.put("service_area", "POLYGON ((-105.4 39.9, -105.4 40.2, -105.1 40.2, -105.1 39.9, -105.4 39.9))");
redis.hmset("business:2", business2);
// Test 1: Find restaurants within 30 miles of a location
SearchReply<String, String> results = redis.ftSearch(GEO_INDEX,
"(@category:restaurant) (@location:[-104.991531 39.742043 30 mi])");
assertThat(results.getCount()).isEqualTo(1);
assertThat(results.getResults().get(0).getFields().get("name")).isEqualTo("Downtown Pizza");
// Test 2: Find businesses whose service area contains a specific point
String customerLocation = "POINT (-105.0 39.8)";
SearchArgs<String, String> serviceArgs = SearchArgs.<String, String> builder().param("customer", customerLocation)
.build();
results = redis.ftSearch(GEO_INDEX, "@service_area:[CONTAINS $customer]", serviceArgs);
assertThat(results.getCount()).isGreaterThanOrEqualTo(1);
// Test 3: Find high-rated cafes with service areas intersecting a region
String searchRegion = "POLYGON ((-105.3 40.0, -105.3 40.1, -105.2 40.1, -105.2 40.0, -105.3 40.0))";
SearchArgs<String, String> complexArgs = SearchArgs.<String, String> builder().param("region", searchRegion).build();
results = redis.ftSearch(GEO_INDEX, "(@category:cafe) (@service_area:[INTERSECTS $region])", complexArgs);
assertThat(results.getCount()).isGreaterThanOrEqualTo(0); // May or may not find results depending on exact coordinates
// Cleanup
redis.ftDropindex(GEO_INDEX);
}
/**
* Test geospatial queries with different distance units and coordinate systems. Demonstrates unit conversions and
* coordinate system differences.
*/
@Test
void testGeospatialUnitsAndCoordinateSystems() {
// Create index for testing different units
FieldArgs<String> locationField = GeoFieldArgs.<String> builder().name("location").build();
FieldArgs<String> nameField = TextFieldArgs.<String> builder().name("name").build();
CreateArgs<String, String> createArgs = CreateArgs.<String, String> builder().withPrefix("poi:")
.on(CreateArgs.TargetType.HASH).build();
redis.ftCreate(GEO_INDEX, createArgs, Arrays.asList(locationField, nameField));
// Add points of interest
Map<String, String> poi1 = new HashMap<>();
poi1.put("name", "City Center");
poi1.put("location", "0.0, 0.0"); // Origin point
redis.hmset("poi:1", poi1);
Map<String, String> poi2 = new HashMap<>();
poi2.put("name", "North Point");
poi2.put("location", "0.0, 0.01"); // ~1.1 km north
redis.hmset("poi:2", poi2);
Map<String, String> poi3 = new HashMap<>();
poi3.put("name", "East Point");
poi3.put("location", "0.01, 0.0"); // ~1.1 km east
redis.hmset("poi:3", poi3);
// Test 1: Search with kilometers
SearchReply<String, String> results = redis.ftSearch(GEO_INDEX, "@location:[0.0 0.0 2 km]");
assertThat(results.getCount()).isEqualTo(3); // All points within 2 km
// Test 2: Search with miles
results = redis.ftSearch(GEO_INDEX, "@location:[0.0 0.0 1 mi]");
assertThat(results.getCount()).isEqualTo(3); // All points within 1 mile
// Test 3: Search with meters
results = redis.ftSearch(GEO_INDEX, "@location:[0.0 0.0 500 m]");
assertThat(results.getCount()).isEqualTo(1); // Only center point within 500m
// Cleanup
redis.ftDropindex(GEO_INDEX);
}
/**
* Test error handling and edge cases for geospatial queries. Demonstrates proper handling of invalid coordinates, malformed
* WKT, and boundary conditions.
*/
@Test
void testGeospatialErrorHandling() {
// Create index for error testing
FieldArgs<String> locationField = GeoFieldArgs.<String> builder().name("location").build();
FieldArgs<String> geomField = GeoshapeFieldArgs.<String> builder().name("geom").build();
FieldArgs<String> nameField = TextFieldArgs.<String> builder().name("name").build();
CreateArgs<String, String> createArgs = CreateArgs.<String, String> builder().withPrefix("test:")
.on(CreateArgs.TargetType.HASH).build();
redis.ftCreate(GEO_INDEX, createArgs, Arrays.asList(locationField, geomField, nameField));
// Add valid test data
Map<String, String> validData = new HashMap<>();
validData.put("name", "Valid Location");
validData.put("location", "-104.991531, 39.742043");
validData.put("geom", "POINT (-104.991531 39.742043)");
redis.hmset("test:1", validData);
// Test 1: Valid query should work
SearchReply<String, String> results = redis.ftSearch(GEO_INDEX, "@location:[-104.991531 39.742043 10 mi]");
assertThat(results.getCount()).isEqualTo(1);
// Test 2: Query with no results should return empty
results = redis.ftSearch(GEO_INDEX, "@location:[0.0 0.0 1 m]");
assertThat(results.getCount()).isEqualTo(0);
// Test 3: Valid GEOSHAPE query
String validPolygon = "POLYGON ((-105 39, -105 40, -104 40, -104 39, -105 39))";
SearchArgs<String, String> validArgs = SearchArgs.<String, String> builder().param("area", validPolygon).build();
results = redis.ftSearch(GEO_INDEX, "@geom:[WITHIN $area]", validArgs);
assertThat(results.getCount()).isEqualTo(1);
// Cleanup
redis.ftDropindex(GEO_INDEX);
}
}
| RediSearchGeospatialIntegrationTests |
java | google__guava | android/guava-tests/test/com/google/common/io/CountingOutputStreamTest.java | {
"start": 871,
"end": 2011
} | class ____ extends IoTestCase {
public void testCount() throws Exception {
int written = 0;
ByteArrayOutputStream out = new ByteArrayOutputStream();
CountingOutputStream counter = new CountingOutputStream(out);
assertEquals(written, out.size());
assertEquals(written, counter.getCount());
counter.write(0);
written += 1;
assertEquals(written, out.size());
assertEquals(written, counter.getCount());
byte[] data = new byte[10];
counter.write(data);
written += 10;
assertEquals(written, out.size());
assertEquals(written, counter.getCount());
counter.write(data, 0, 5);
written += 5;
assertEquals(written, out.size());
assertEquals(written, counter.getCount());
counter.write(data, 2, 5);
written += 5;
assertEquals(written, out.size());
assertEquals(written, counter.getCount());
// Test that illegal arguments do not affect count
assertThrows(IndexOutOfBoundsException.class, () -> counter.write(data, 0, data.length + 1));
assertEquals(written, out.size());
assertEquals(written, counter.getCount());
}
}
| CountingOutputStreamTest |
java | apache__flink | flink-table/flink-table-code-splitter/src/main/java/org/apache/flink/table/codesplit/ReturnAndJumpCounter.java | {
"start": 1015,
"end": 1402
} | class ____ extends JavaParserBaseVisitor<Void> {
private int counter = 0;
@Override
public Void visitStatement(StatementContext ctx) {
if (ctx.RETURN() != null || ctx.BREAK() != null || ctx.CONTINUE() != null) {
counter++;
}
return visitChildren(ctx);
}
public int getCounter() {
return counter;
}
}
| ReturnAndJumpCounter |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/batch/SetAndBagCollectionTest.java | {
"start": 1214,
"end": 2879
} | class ____ {
@BeforeAll
public void setUp(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
Customer customer = new Customer( 1, "First" );
Order order1 = new Order( 1, "First Order" );
Order order2 = new Order( 2, "Second Order" );
customer.addOrder( order1 );
customer.addOrder( order2 );
Item item1 = new Item( 1, "first" );
Item item2 = new Item( 2, "second" );
Item item3 = new Item( 3, "third" );
order1.addItem( item1 );
order1.addItem( item2 );
order1.addItem( item3 );
order1.addItem( item3 );
Item item4 = new Item( 4, "fourth" );
Item item5 = new Item( 5, "fifth" );
order2.addItem( item4 );
order2.addItem( item5 );
session.persist( item1 );
session.persist( item2 );
session.persist( item3 );
session.persist( item4 );
session.persist( item5 );
session.persist( order1 );
session.persist( order2 );
session.persist( customer );
}
);
}
@Test
public void testThatRetrievedBagElementsAreofTheRightCardinality(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
Customer customer = session.get( Customer.class, 1 );
Set<Order> orders = customer.getOrders();
assertThat( orders.size() ).isEqualTo( 2 );
orders.forEach(
order -> {
Collection<Item> items = order.getItems();
if ( order.getId() == 1 ) {
assertThat( items.size() ).isEqualTo( 4 );
}
else {
assertThat( items.size() ).isEqualTo( 2 );
}
}
);
}
);
}
@Entity(name = "Customer")
public static | SetAndBagCollectionTest |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/DefaultJSONParserTest_error.java | {
"start": 251,
"end": 1609
} | class ____ extends TestCase {
public void test_error_1() {
String text = "{\"obj\":{}]}";
char[] chars = text.toCharArray();
DefaultJSONParser parser = new DefaultJSONParser(chars, chars.length, ParserConfig.getGlobalInstance(), 0);
JSONException error = null;
try {
parser.parseObject();
} catch (JSONException ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public void test_error_2() {
String text = "{\"obj\":[]]}";
char[] chars = text.toCharArray();
DefaultJSONParser parser = new DefaultJSONParser(chars, chars.length, ParserConfig.getGlobalInstance(), 0);
JSONException error = null;
try {
parser.parseObject();
} catch (JSONException ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public void test_error_3() {
String text = "{\"obj\":true]}";
char[] chars = text.toCharArray();
DefaultJSONParser parser = new DefaultJSONParser(chars, chars.length, ParserConfig.getGlobalInstance(), 0);
JSONException error = null;
try {
parser.parseObject();
} catch (JSONException ex) {
error = ex;
}
Assert.assertNotNull(error);
}
}
| DefaultJSONParserTest_error |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/http/codec/multipart/MultipartParser.java | {
"start": 17718,
"end": 18069
} | class ____ implements State {
public static final DisposedState INSTANCE = new DisposedState();
private DisposedState() {
}
@Override
public void onNext(DataBuffer buf) {
DataBufferUtils.release(buf);
}
@Override
public void onComplete() {
}
@Override
public String toString() {
return "DISPOSED";
}
}
}
| DisposedState |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/mappedBy/OneToManyMappedByTypeTest.java | {
"start": 4780,
"end": 4914
} | class ____ {
@Id
private Long id;
@ManyToOne
private EntityC parent;
}
@Entity( name = "EntityC" )
public static | EntityBWrong |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsBoolsEvaluator.java | {
"start": 4955,
"end": 5718
} | class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory lhs;
private final EvalOperator.ExpressionEvaluator.Factory rhs;
public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory lhs,
EvalOperator.ExpressionEvaluator.Factory rhs) {
this.source = source;
this.lhs = lhs;
this.rhs = rhs;
}
@Override
public NotEqualsBoolsEvaluator get(DriverContext context) {
return new NotEqualsBoolsEvaluator(source, lhs.get(context), rhs.get(context), context);
}
@Override
public String toString() {
return "NotEqualsBoolsEvaluator[" + "lhs=" + lhs + ", rhs=" + rhs + "]";
}
}
}
| Factory |
java | grpc__grpc-java | examples/src/main/java/io/grpc/examples/healthservice/HealthServiceServer.java | {
"start": 3174,
"end": 4686
} | class ____ extends GreeterGrpc.GreeterImplBase {
boolean isServing = true;
@Override
public void sayHello(HelloRequest req, StreamObserver<HelloReply> responseObserver) {
if (!isServing) {
responseObserver.onError(
Status.INTERNAL.withDescription("Not Serving right now").asRuntimeException());
return;
}
if (isNameLongEnough(req)) {
HelloReply reply = HelloReply.newBuilder().setMessage("Hello " + req.getName()).build();
responseObserver.onNext(reply);
responseObserver.onCompleted();
} else {
logger.warning("Tiny message received, throwing a temper tantrum");
health.setStatus("", ServingStatus.NOT_SERVING);
isServing = false;
// In 10 seconds set it back to serving
new Thread(new Runnable() {
@Override
public void run() {
try {
Thread.sleep(10000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return;
}
isServing = true;
health.setStatus("", ServingStatus.SERVING);
logger.info("tantrum complete");
}
}).start();
responseObserver.onError(
Status.INVALID_ARGUMENT.withDescription("Offended by short name").asRuntimeException());
}
}
private boolean isNameLongEnough(HelloRequest req) {
return isServing && req.getName().length() >= 5;
}
}
}
| GreeterImpl |
java | google__dagger | hilt-compiler/main/java/dagger/hilt/android/processor/internal/androidentrypoint/AndroidEntryPointMetadata.java | {
"start": 2844,
"end": 2946
} | class ____ with @AndroidEntryPoint. */
public abstract XTypeElement element();
/** The base | annotated |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/support/ContextLoaderUtilsContextHierarchyTests.java | {
"start": 24430,
"end": 24583
} | interface ____ {
}
@ContextHierarchy(@ContextConfiguration({ "B-one.xml", "B-two.xml" }))
@Retention(RetentionPolicy.RUNTIME)
private @ | ContextHierarchyA |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java | {
"start": 5330,
"end": 6250
} | interface ____ extends Closeable {
/**
* Associate an inode with its parent directory.
*/
public void putDirChild(long parentId, long childId) throws IOException;
/**
* Associate a directory with its inode Id.
*/
public void putDir(INode dir) throws IOException;
/** Get the full path of the parent directory for the given inode. */
public String getParentPath(long inode) throws IOException;
/** Synchronize metadata to persistent storage, if possible */
public void sync() throws IOException;
/** Returns the name of inode. */
String getName(long id) throws IOException;
/**
* Returns the id of the parent's inode, if mentioned in
* INodeDirectorySection, throws IgnoreSnapshotException otherwise.
*/
long getParentId(long id) throws IOException;
}
/**
* Maintain all the metadata in memory.
*/
private static | MetadataMap |
java | quarkusio__quarkus | extensions/grpc/runtime/src/main/java/io/quarkus/grpc/runtime/config/GrpcClientConfiguration.java | {
"start": 8830,
"end": 9129
} | interface ____ {
/**
* Path to the key file (PFX format).
*/
Optional<String> path();
/**
* Password of the key.
*/
Optional<String> password();
}
@ConfigGroup
| PfxConfiguration |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java | {
"start": 62721,
"end": 98606
} | class ____ implements Closeable {
private final String testName;
private final TemporarySocketDirectory sockDir;
private boolean closed = false;
private final boolean formerTcpReadsDisabled;
public ShortCircuitTestContext(String testName) {
this.testName = testName;
this.sockDir = new TemporarySocketDirectory();
DomainSocket.disableBindPathValidation();
formerTcpReadsDisabled = DFSInputStream.tcpReadsDisabledForTesting;
assumeTrue(DomainSocket.getLoadingFailureReason() == null);
}
public Configuration newConfiguration() {
Configuration conf = new Configuration();
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
new File(sockDir.getDir(),
testName + "._PORT.sock").getAbsolutePath());
return conf;
}
public String getTestName() {
return testName;
}
public void close() throws IOException {
if (closed) return;
closed = true;
DFSInputStream.tcpReadsDisabledForTesting = formerTcpReadsDisabled;
sockDir.close();
}
}
/**
* Verify that two files have the same contents.
*
* @param fs The file system containing the two files.
* @param p1 The path of the first file.
* @param p2 The path of the second file.
* @param len The length of the two files.
* @throws IOException
*/
public static void verifyFilesEqual(FileSystem fs, Path p1, Path p2, int len)
throws IOException {
try (FSDataInputStream in1 = fs.open(p1);
FSDataInputStream in2 = fs.open(p2)) {
for (int i = 0; i < len; i++) {
assertEquals(in1.read(), in2.read(), "Mismatch at byte " + i);
}
}
}
/**
* Verify that two files have different contents.
*
* @param fs The file system containing the two files.
* @param p1 The path of the first file.
* @param p2 The path of the second file.
* @param len The length of the two files.
* @throws IOException
*/
public static void verifyFilesNotEqual(FileSystem fs, Path p1, Path p2,
int len) throws IOException {
try (FSDataInputStream in1 = fs.open(p1);
FSDataInputStream in2 = fs.open(p2)) {
for (int i = 0; i < len; i++) {
if (in1.read() != in2.read()) {
return;
}
}
fail("files are equal, but should not be");
}
}
/**
* Helper function that verified blocks of a file are placed on the
* expected storage type.
*
* @param fs The file system containing the the file.
* @param client The DFS client used to access the file
* @param path name to the file to verify
* @param storageType expected storage type
* @returns true if file exists and its blocks are located on the expected
* storage type.
* false otherwise.
*/
public static boolean verifyFileReplicasOnStorageType(FileSystem fs,
DFSClient client, Path path, StorageType storageType) throws IOException {
if (!fs.exists(path)) {
LOG.info("verifyFileReplicasOnStorageType: file " + path + "does not exist");
return false;
}
long fileLength = client.getFileInfo(path.toString()).getLen();
LocatedBlocks locatedBlocks =
client.getLocatedBlocks(path.toString(), 0, fileLength);
for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
if (locatedBlock.getStorageTypes()[0] != storageType) {
LOG.info("verifyFileReplicasOnStorageType: for file " + path +
". Expect blk" + locatedBlock +
" on Type: " + storageType + ". Actual Type: " +
locatedBlock.getStorageTypes()[0]);
return false;
}
}
return true;
}
/**
* Verify the aggregated {@link ClientProtocol#getStats()} block counts equal
* the sum of {@link ClientProtocol#getReplicatedBlockStats()} and
* {@link ClientProtocol#getECBlockGroupStats()}.
* @throws Exception
*/
public static void verifyClientStats(Configuration conf,
MiniDFSCluster cluster) throws Exception {
ClientProtocol client = NameNodeProxies.createProxy(conf,
cluster.getFileSystem(0).getUri(),
ClientProtocol.class).getProxy();
long[] aggregatedStats = cluster.getNameNode().getRpcServer().getStats();
ReplicatedBlockStats replicatedBlockStats =
client.getReplicatedBlockStats();
ECBlockGroupStats ecBlockGroupStats = client.getECBlockGroupStats();
assertEquals(aggregatedStats[ClientProtocol.GET_STATS_LOW_REDUNDANCY_IDX],
aggregatedStats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX],
"Under replicated stats not matching!");
assertEquals(aggregatedStats[ClientProtocol.GET_STATS_LOW_REDUNDANCY_IDX],
replicatedBlockStats.getLowRedundancyBlocks() +
ecBlockGroupStats.getLowRedundancyBlockGroups(),
"Low redundancy stats not matching!");
assertEquals(aggregatedStats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX],
replicatedBlockStats.getCorruptBlocks() + ecBlockGroupStats.getCorruptBlockGroups(),
"Corrupt blocks stats not matching!");
assertEquals(aggregatedStats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX],
replicatedBlockStats.getMissingReplicaBlocks() +
ecBlockGroupStats.getMissingBlockGroups(),
"Missing blocks stats not matching!");
assertEquals(aggregatedStats[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX],
replicatedBlockStats.getMissingReplicationOneBlocks(),
"Missing blocks with replication factor one not matching!");
assertEquals(aggregatedStats[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX],
replicatedBlockStats.getBytesInFutureBlocks() +
ecBlockGroupStats.getBytesInFutureBlockGroups(),
"Bytes in future blocks stats not matching!");
assertEquals(aggregatedStats[ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX],
replicatedBlockStats.getPendingDeletionBlocks() +
ecBlockGroupStats.getPendingDeletionBlocks(),
"Pending deletion blocks stats not matching!");
}
/**
* Helper function to create a key in the Key Provider. Defaults
* to the first indexed NameNode's Key Provider.
*
* @param keyName The name of the key to create
* @param cluster The cluster to create it in
* @param conf Configuration to use
*/
public static void createKey(String keyName, MiniDFSCluster cluster,
Configuration conf)
throws NoSuchAlgorithmException, IOException {
createKey(keyName, cluster, 0, conf);
}
/**
* Helper function to create a key in the Key Provider.
*
* @param keyName The name of the key to create
* @param cluster The cluster to create it in
* @param idx The NameNode index
* @param conf Configuration to use
*/
public static void createKey(String keyName, MiniDFSCluster cluster,
int idx, Configuration conf)
throws NoSuchAlgorithmException, IOException {
NameNode nn = cluster.getNameNode(idx);
KeyProvider provider = nn.getNamesystem().getProvider();
final KeyProvider.Options options = KeyProvider.options(conf);
options.setDescription(keyName);
options.setBitLength(128);
provider.createKey(keyName, options);
provider.flush();
}
/**
* Helper function to delete a key in the Key Provider. Defaults
* to the first indexed NameNode's Key Provider.
*
* @param keyName The name of the key to create
* @param cluster The cluster to create it in
*/
public static void deleteKey(String keyName, MiniDFSCluster cluster)
throws NoSuchAlgorithmException, IOException {
deleteKey(keyName, cluster, 0);
}
/**
* Helper function to delete a key in the Key Provider.
*
* @param keyName The name of the key to create
* @param cluster The cluster to create it in
* @param idx The NameNode index
*/
public static void deleteKey(String keyName, MiniDFSCluster cluster, int idx)
throws NoSuchAlgorithmException, IOException {
NameNode nn = cluster.getNameNode(idx);
KeyProvider provider = nn.getNamesystem().getProvider();
provider.deleteKey(keyName);
provider.flush();
}
/**
* @return the node which is expected to run the recovery of the
* given block, which is known to be under construction inside the
* given NameNOde.
*/
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
ExtendedBlock blk) {
BlockManager bm0 = nn.getNamesystem().getBlockManager();
BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
assertTrue(!storedBlock.isComplete(),
"Block " + blk + " should be under construction, " + "got: " + storedBlock);
// We expect that the replica with the most recent heart beat will be
// the one to be in charge of the synchronization / recovery protocol.
final DatanodeStorageInfo[] storages = storedBlock
.getUnderConstructionFeature().getExpectedStorageLocations();
DatanodeStorageInfo expectedPrimary = storages[0];
long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor()
.getLastUpdateMonotonic();
for (int i = 1; i < storages.length; i++) {
final long lastUpdate = storages[i].getDatanodeDescriptor()
.getLastUpdateMonotonic();
if (lastUpdate > mostRecentLastUpdate) {
expectedPrimary = storages[i];
mostRecentLastUpdate = lastUpdate;
}
}
return expectedPrimary.getDatanodeDescriptor();
}
public static void toolRun(Tool tool, String cmd, int retcode, String contain)
throws Exception {
String [] cmds = StringUtils.split(cmd, ' ');
System.out.flush();
System.err.flush();
PrintStream origOut = System.out;
PrintStream origErr = System.err;
String output = null;
int ret = 0;
try {
ByteArrayOutputStream bs = new ByteArrayOutputStream(1024);
try (PrintStream out = new PrintStream(bs)) {
System.setOut(out);
System.setErr(out);
ret = tool.run(cmds);
System.out.flush();
System.err.flush();
}
output = bs.toString();
} finally {
System.setOut(origOut);
System.setErr(origErr);
}
System.out.println("Output for command: " + cmd + " retcode: " + ret);
if (output != null) {
System.out.println(output);
}
assertEquals(retcode, ret);
if (contain != null) {
assertTrue(output.contains(contain),
"The real output is: " + output + ".\n It should contain: " + contain);
}
}
public static void FsShellRun(String cmd, int retcode, String contain,
Configuration conf) throws Exception {
FsShell shell = new FsShell(new Configuration(conf));
toolRun(shell, cmd, retcode, contain);
}
public static void DFSAdminRun(String cmd, int retcode, String contain,
Configuration conf) throws Exception {
DFSAdmin admin = new DFSAdmin(new Configuration(conf));
toolRun(admin, cmd, retcode, contain);
}
public static void FsShellRun(String cmd, Configuration conf)
throws Exception {
FsShellRun(cmd, 0, null, conf);
}
/**
* Wait for datanode to reach alive or dead state for waitTime given in
* milliseconds.
*/
public static void waitForDatanodeState(
final MiniDFSCluster cluster, final String nodeID,
final boolean alive, int waitTime)
throws TimeoutException, InterruptedException {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
FSNamesystem namesystem = cluster.getNamesystem();
final DatanodeDescriptor dd = BlockManagerTestUtil.getDatanode(
namesystem, nodeID);
return (dd.isAlive() == alive);
}
}, 100, waitTime);
}
/**
* Change the length of a block at datanode dnIndex.
*/
public static boolean changeReplicaLength(MiniDFSCluster cluster,
ExtendedBlock blk, int dnIndex, int lenDelta) throws IOException {
File blockFile = cluster.getBlockFile(dnIndex, blk);
if (blockFile != null && blockFile.exists()) {
try (RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw")) {
raFile.setLength(raFile.length() + lenDelta);
}
return true;
}
LOG.info("failed to change length of block " + blk);
return false;
}
public static void setNameNodeLogLevel(Level level) {
GenericTestUtils.setLogLevel(FSNamesystem.LOG, level);
GenericTestUtils.setLogLevel(BlockManager.LOG, level);
GenericTestUtils.setLogLevel(LeaseManager.LOG, level);
GenericTestUtils.setLogLevel(NameNode.LOG, level);
GenericTestUtils.setLogLevel(NameNode.stateChangeLog, level);
GenericTestUtils.setLogLevel(NameNode.blockStateChangeLog, level);
}
/**
* Get the NamenodeProtocol RPC proxy for the NN associated with this
* DFSClient object
*
* @param nameNodeUri the URI of the NN to get a proxy for.
*
* @return the Namenode RPC proxy associated with this DFSClient object
*/
@VisibleForTesting
public static NamenodeProtocol getNamenodeProtocolProxy(Configuration conf,
URI nameNodeUri, UserGroupInformation ugi)
throws IOException {
return NameNodeProxies.createNonHAProxy(conf,
DFSUtilClient.getNNAddress(nameNodeUri), NamenodeProtocol.class, ugi,
false).getProxy();
}
/**
* Get the RefreshUserMappingsProtocol RPC proxy for the NN associated with
* this DFSClient object
*
* @param nnAddr the address of the NN to get a proxy for.
*
* @return the RefreshUserMappingsProtocol RPC proxy associated with this
* DFSClient object
*/
@VisibleForTesting
public static RefreshUserMappingsProtocol getRefreshUserMappingsProtocolProxy(
Configuration conf, InetSocketAddress nnAddr) throws IOException {
return NameNodeProxies.createNonHAProxy(
conf, nnAddr, RefreshUserMappingsProtocol.class,
UserGroupInformation.getCurrentUser(), false).getProxy();
}
/**
* Set the datanode dead
*/
public static void setDatanodeDead(DatanodeInfo dn) {
dn.setLastUpdate(0);
// Set this to a large negative value.
// On short-lived VMs, the monotonic time can be less than the heartbeat
// expiry time. Setting this to 0 will fail to immediately mark the DN as
// dead.
dn.setLastUpdateMonotonic(Long.MIN_VALUE/2);
}
/**
* Update lastUpdate and lastUpdateMonotonic with some offset.
*/
public static void resetLastUpdatesWithOffset(DatanodeInfo dn, long offset) {
dn.setLastUpdate(Time.now() + offset);
dn.setLastUpdateMonotonic(Time.monotonicNow() + offset);
}
/**
* This method takes a set of block locations and fills the provided buffer
* with expected bytes based on simulated content from
* {@link SimulatedFSDataset}.
*
* @param lbs The block locations of a file
* @param expected The buffer to be filled with expected bytes on the above
* locations.
*/
public static void fillExpectedBuf(LocatedBlocks lbs, byte[] expected) {
Block[] blks = new Block[lbs.getLocatedBlocks().size()];
for (int i = 0; i < lbs.getLocatedBlocks().size(); i++) {
blks[i] = lbs.getLocatedBlocks().get(i).getBlock().getLocalBlock();
}
int bufPos = 0;
for (Block b : blks) {
for (long blkPos = 0; blkPos < b.getNumBytes(); blkPos++) {
assert bufPos < expected.length;
expected[bufPos++] = SimulatedFSDataset.simulatedByte(b, blkPos);
}
}
}
public static StorageReceivedDeletedBlocks[] makeReportForReceivedBlock(
Block block, BlockStatus blockStatus, DatanodeStorage storage) {
ReceivedDeletedBlockInfo[] receivedBlocks = new ReceivedDeletedBlockInfo[1];
receivedBlocks[0] = new ReceivedDeletedBlockInfo(block, blockStatus, null);
StorageReceivedDeletedBlocks[] reports = new StorageReceivedDeletedBlocks[1];
reports[0] = new StorageReceivedDeletedBlocks(storage, receivedBlocks);
return reports;
}
/**
* Creates the metadata of a file in striped layout. This method only
* manipulates the NameNode state without injecting data to DataNode.
* You should disable periodical heartbeat before use this.
* @param file Path of the file to create
* @param dir Parent path of the file
* @param numBlocks Number of striped block groups to add to the file
* @param numStripesPerBlk Number of striped cells in each block
* @param toMkdir
*/
public static void createStripedFile(MiniDFSCluster cluster, Path file,
Path dir, int numBlocks, int numStripesPerBlk, boolean toMkdir)
throws Exception {
createStripedFile(cluster, file, dir, numBlocks, numStripesPerBlk,
toMkdir, StripedFileTestUtil.getDefaultECPolicy());
}
/**
* Creates the metadata of a file in striped layout. This method only
* manipulates the NameNode state without injecting data to DataNode.
* You should disable periodical heartbeat before use this.
* @param file Path of the file to create
* @param dir Parent path of the file
* @param numBlocks Number of striped block groups to add to the file
* @param numStripesPerBlk Number of striped cells in each block
* @param toMkdir
* @param ecPolicy erasure coding policy apply to created file. A null value
* means using default erasure coding policy.
*/
public static void createStripedFile(MiniDFSCluster cluster, Path file,
Path dir, int numBlocks, int numStripesPerBlk, boolean toMkdir,
ErasureCodingPolicy ecPolicy) throws Exception {
DistributedFileSystem dfs = cluster.getFileSystem();
// If outer test already set EC policy, dir should be left as null
if (toMkdir) {
assert dir != null;
dfs.mkdirs(dir);
try {
dfs.getClient()
.setErasureCodingPolicy(dir.toString(), ecPolicy.getName());
} catch (IOException e) {
if (!e.getMessage().contains("non-empty directory")) {
throw e;
}
}
}
cluster.getNameNodeRpc()
.create(file.toString(), new FsPermission((short)0755),
dfs.getClient().getClientName(),
new EnumSetWritable<>(EnumSet.of(CreateFlag.CREATE)),
false, (short) 1, 128 * 1024 * 1024L, null, null, null);
FSNamesystem ns = cluster.getNamesystem();
FSDirectory fsdir = ns.getFSDirectory();
INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
ExtendedBlock previous = null;
for (int i = 0; i < numBlocks; i++) {
Block newBlock = addBlockToFile(true, cluster.getDataNodes(), dfs, ns,
file.toString(), fileNode, dfs.getClient().getClientName(),
previous, numStripesPerBlk, 0);
previous = new ExtendedBlock(ns.getBlockPoolId(), newBlock);
}
dfs.getClient().namenode.complete(file.toString(),
dfs.getClient().getClientName(), previous, fileNode.getId());
}
/**
* Adds a block or a striped block group to a file.
* This method only manipulates NameNode
* states of the file and the block without injecting data to DataNode.
* It does mimic block reports.
* You should disable periodical heartbeat before use this.
* @param isStripedBlock a boolean tell if the block added a striped block
* @param dataNodes List DataNodes to host the striped block group
* @param previous Previous block in the file
* @param numStripes Number of stripes in each block group
* @param len block size for a non striped block added
* @return The added block or block group
*/
public static Block addBlockToFile(boolean isStripedBlock,
List<DataNode> dataNodes, DistributedFileSystem fs, FSNamesystem ns,
String file, INodeFile fileNode,
String clientName, ExtendedBlock previous, int numStripes, int len)
throws Exception {
fs.getClient().namenode.addBlock(file, clientName, previous, null,
fileNode.getId(), null, null);
final BlockInfo lastBlock = fileNode.getLastBlock();
final int groupSize = fileNode.getPreferredBlockReplication();
assert dataNodes.size() >= groupSize;
// 1. RECEIVING_BLOCK IBR
for (int i = 0; i < groupSize; i++) {
DataNode dn = dataNodes.get(i);
final Block block = new Block(lastBlock.getBlockId() + i, 0,
lastBlock.getGenerationStamp());
DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString());
StorageReceivedDeletedBlocks[] reports = DFSTestUtil
.makeReportForReceivedBlock(block,
ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, storage);
for (StorageReceivedDeletedBlocks report : reports) {
ns.processIncrementalBlockReport(dn.getDatanodeId(), report);
}
}
final ErasureCodingPolicy ecPolicy =
fs.getErasureCodingPolicy(new Path(file));
// 2. RECEIVED_BLOCK IBR
long blockSize = isStripedBlock ?
numStripes * ecPolicy.getCellSize() : len;
for (int i = 0; i < groupSize; i++) {
DataNode dn = dataNodes.get(i);
final Block block = new Block(lastBlock.getBlockId() + i,
blockSize, lastBlock.getGenerationStamp());
DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString());
StorageReceivedDeletedBlocks[] reports = DFSTestUtil
.makeReportForReceivedBlock(block,
ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
for (StorageReceivedDeletedBlocks report : reports) {
ns.processIncrementalBlockReport(dn.getDatanodeId(), report);
}
}
long bytes = isStripedBlock ?
numStripes * ecPolicy.getCellSize() * ecPolicy.getNumDataUnits() : len;
lastBlock.setNumBytes(bytes);
return lastBlock;
}
/*
* Copy a block from sourceProxy to destination. If the block becomes
* over-replicated, preferably remove it from source.
* Return true if a block is successfully copied; otherwise false.
*/
public static boolean replaceBlock(ExtendedBlock block, DatanodeInfo source,
DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
return replaceBlock(block, source, sourceProxy, destination,
StorageType.DEFAULT, Status.SUCCESS);
}
/*
* Replace block
*/
public static boolean replaceBlock(ExtendedBlock block, DatanodeInfo source,
DatanodeInfo sourceProxy, DatanodeInfo destination,
StorageType targetStorageType, Status opStatus) throws IOException,
SocketException {
Socket sock = new Socket();
try {
sock.connect(NetUtils.createSocketAddr(destination.getXferAddr()),
HdfsConstants.READ_TIMEOUT);
sock.setKeepAlive(true);
// sendRequest
DataOutputStream out = new DataOutputStream(sock.getOutputStream());
new Sender(out).replaceBlock(block, targetStorageType,
BlockTokenSecretManager.DUMMY_TOKEN, source.getDatanodeUuid(),
sourceProxy, null);
out.flush();
// receiveResponse
DataInputStream reply = new DataInputStream(sock.getInputStream());
BlockOpResponseProto proto = BlockOpResponseProto.parseDelimitedFrom(
reply);
while (proto.getStatus() == Status.IN_PROGRESS) {
proto = BlockOpResponseProto.parseDelimitedFrom(reply);
}
return proto.getStatus() == opStatus;
} finally {
sock.close();
}
}
/**
* Because currently DFSStripedOutputStream does not support hflush/hsync,
* tests can use this method to flush all the buffered data to DataNodes.
*/
public static ExtendedBlock flushInternal(DFSStripedOutputStream out)
throws IOException {
out.flushAllInternals();
return out.getBlock();
}
public static ExtendedBlock flushBuffer(DFSStripedOutputStream out)
throws IOException {
out.flush();
return out.getBlock();
}
public static void waitForMetric(final JMXGet jmx, final String metricName, final int expectedValue)
throws TimeoutException, InterruptedException {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
final int currentValue = Integer.parseInt(jmx.getValue(metricName));
return currentValue == expectedValue;
} catch (Exception e) {
throw new RuntimeException(
"Test failed due to unexpected exception", e);
}
}
}, 50, 60000);
}
/**
* Close current file system and create a new instance as given
* {@link UserGroupInformation}.
*/
public static FileSystem login(final FileSystem fs,
final Configuration conf, final UserGroupInformation ugi)
throws IOException, InterruptedException {
if (fs != null) {
fs.close();
}
return DFSTestUtil.getFileSystemAs(ugi, conf);
}
/**
* Test if the given {@link FileStatus} user, group owner and its permission
* are expected, throw {@link AssertionError} if any value is not expected.
*/
public static void verifyFilePermission(FileStatus stat, String owner,
String group, FsAction u, FsAction g, FsAction o) {
if(stat != null) {
if(!Strings.isNullOrEmpty(owner)) {
assertEquals(owner, stat.getOwner());
}
if(!Strings.isNullOrEmpty(group)) {
assertEquals(group, stat.getGroup());
}
FsPermission permission = stat.getPermission();
if(u != null) {
assertEquals(u, permission.getUserAction());
}
if (g != null) {
assertEquals(g, permission.getGroupAction());
}
if (o != null) {
assertEquals(o, permission.getOtherAction());
}
}
}
public static void verifyDelete(FsShell shell, FileSystem fs, Path path,
boolean shouldExistInTrash) throws Exception {
Path trashPath = Path.mergePaths(shell.getCurrentTrashDir(path), path);
verifyDelete(shell, fs, path, trashPath, shouldExistInTrash);
}
public static void verifyDelete(FsShell shell, FileSystem fs, Path path,
Path trashPath, boolean shouldExistInTrash) throws Exception {
assertTrue(fs.exists(path), path + " file does not exist");
// Verify that trashPath has a path component named ".Trash"
Path checkTrash = trashPath;
while (!checkTrash.isRoot() && !checkTrash.getName().equals(".Trash")) {
checkTrash = checkTrash.getParent();
}
assertEquals(".Trash", checkTrash.getName(),
"No .Trash component found in trash path " + trashPath);
String[] argv = new String[]{"-rm", "-r", path.toString()};
int res = ToolRunner.run(shell, argv);
assertEquals(0, res, "rm failed");
if (shouldExistInTrash) {
assertTrue(fs.exists(trashPath), "File not in trash : " + trashPath);
} else {
assertFalse(fs.exists(trashPath), "File in trash : " + trashPath);
}
}
/**
* Create open files under root path.
* @param fs the filesystem.
* @param filePrefix the prefix of the files.
* @param numFilesToCreate the number of files to create.
*/
public static Map<Path, FSDataOutputStream> createOpenFiles(FileSystem fs,
String filePrefix, int numFilesToCreate) throws IOException {
return createOpenFiles(fs, new Path("/"), filePrefix, numFilesToCreate);
}
/**
* Create open files.
* @param fs the filesystem.
* @param baseDir the base path of the files.
* @param filePrefix the prefix of the files.
* @param numFilesToCreate the number of files to create.
*/
public static Map<Path, FSDataOutputStream> createOpenFiles(FileSystem fs,
Path baseDir, String filePrefix, int numFilesToCreate)
throws IOException {
final Map<Path, FSDataOutputStream> filesCreated = new HashMap<>();
final byte[] buffer = new byte[(int) (1024 * 1.75)];
final Random rand = new Random(0xFEED0BACL);
for (int i = 0; i < numFilesToCreate; i++) {
Path file = new Path(baseDir, filePrefix + "-" + i);
FSDataOutputStream stm = fs.create(file, true, 1024, (short) 1, 1024);
rand.nextBytes(buffer);
stm.write(buffer);
filesCreated.put(file, stm);
}
return filesCreated;
}
public static HashSet<Path> closeOpenFiles(
HashMap<Path, FSDataOutputStream> openFilesMap,
int numFilesToClose) throws IOException {
HashSet<Path> closedFiles = new HashSet<>();
for (Iterator<Entry<Path, FSDataOutputStream>> it =
openFilesMap.entrySet().iterator(); it.hasNext();) {
Entry<Path, FSDataOutputStream> entry = it.next();
LOG.info("Closing file: " + entry.getKey());
entry.getValue().close();
closedFiles.add(entry.getKey());
it.remove();
numFilesToClose--;
if (numFilesToClose == 0) {
break;
}
}
return closedFiles;
}
/**
* Setup cluster with desired number of DN, racks, and specified number of
* rack that only has 1 DN. Other racks will be evenly setup with the number
* of DNs.
*
* @param conf the conf object to start the cluster.
* @param numDatanodes number of total Datanodes.
* @param numRacks number of total racks
* @param numSingleDnRacks number of racks that only has 1 DN
* @throws Exception
*/
public static MiniDFSCluster setupCluster(final Configuration conf,
final int numDatanodes,
final int numRacks,
final int numSingleDnRacks)
throws Exception {
assert numDatanodes > numRacks;
assert numRacks > numSingleDnRacks;
assert numSingleDnRacks >= 0;
final String[] racks = new String[numDatanodes];
for (int i = 0; i < numSingleDnRacks; i++) {
racks[i] = "/rack" + i;
}
for (int i = numSingleDnRacks; i < numDatanodes; i++) {
racks[i] =
"/rack" + (numSingleDnRacks + (i % (numRacks - numSingleDnRacks)));
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDatanodes)
.racks(racks)
.build();
cluster.waitActive();
return cluster;
}
/**
* Check the correctness of the snapshotDiff report.
* Make sure all items in the passed entries are in the snapshotDiff
* report.
*/
public static void verifySnapshotDiffReport(DistributedFileSystem fs,
Path dir, String from, String to,
DiffReportEntry... entries) throws IOException {
SnapshotDiffReport report = fs.getSnapshotDiffReport(dir, from, to);
// reverse the order of from and to
SnapshotDiffReport inverseReport = fs
.getSnapshotDiffReport(dir, to, from);
LOG.info(report.toString());
LOG.info(inverseReport.toString() + "\n");
assertEquals(entries.length, report.getDiffList().size());
assertEquals(entries.length, inverseReport.getDiffList().size());
for (DiffReportEntry entry : entries) {
if (entry.getType() == DiffType.MODIFY) {
assertTrue(report.getDiffList().contains(entry));
assertTrue(inverseReport.getDiffList().contains(entry));
} else if (entry.getType() == DiffType.DELETE) {
assertTrue(report.getDiffList().contains(entry));
assertTrue(inverseReport.getDiffList().contains(
new DiffReportEntry(DiffType.CREATE, entry.getSourcePath())));
} else if (entry.getType() == DiffType.CREATE) {
assertTrue(report.getDiffList().contains(entry));
assertTrue(inverseReport.getDiffList().contains(
new DiffReportEntry(DiffType.DELETE, entry.getSourcePath())));
}
}
}
/**
* Check whether the Block movement has been successfully
* completed to satisfy the storage policy for the given file.
* @param fileName file name.
* @param expectedStorageType storage type.
* @param expectedStorageCount expected storage type.
* @param timeout timeout.
* @param fs distributedFileSystem.
* @throws Exception
*/
public static void waitExpectedStorageType(String fileName,
final StorageType expectedStorageType, int expectedStorageCount,
int timeout, DistributedFileSystem fs) throws Exception {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
final LocatedBlock lb;
try {
lb = fs.getClient().getLocatedBlocks(fileName, 0).get(0);
} catch (IOException e) {
LOG.error("Exception while getting located blocks", e);
return false;
}
int actualStorageCount = 0;
for(StorageType type : lb.getStorageTypes()) {
if (expectedStorageType == type) {
actualStorageCount++;
}
}
LOG.info(
expectedStorageType + " replica count, expected="
+ expectedStorageCount + " and actual=" + actualStorageCount);
return expectedStorageCount == actualStorageCount;
}
}, 500, timeout);
}
/**
* Waits for removal of a specified Xattr on a specified file.
*
* @param srcPath
* file name.
* @param xattr
* name of the extended attribute.
* @param ns
* Namesystem
* @param timeout
* max wait time
* @throws Exception
*/
public static void waitForXattrRemoved(String srcPath, String xattr,
Namesystem ns, int timeout) throws TimeoutException, InterruptedException,
UnresolvedLinkException, AccessControlException,
ParentNotDirectoryException {
final INode inode = ns.getFSDirectory().getINode(srcPath);
final XAttr satisfyXAttr = XAttrHelper.buildXAttr(xattr);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
return !existingXAttrs.contains(satisfyXAttr);
}
}, 100, timeout);
}
/**
* Get namenode connector using the given configuration and file path.
*
* @param conf
* hdfs configuration
* @param filePath
* file path
* @param namenodeCount
* number of namenodes
* @param createMoverPath
* create move path flag to skip the path creation
* @return Namenode connector.
* @throws IOException
*/
public static NameNodeConnector getNameNodeConnector(Configuration conf,
Path filePath, int namenodeCount, boolean createMoverPath)
throws IOException {
final Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
assertEquals(namenodeCount, namenodes.size());
NameNodeConnector.checkOtherInstanceRunning(createMoverPath);
while (true) {
try {
final List<NameNodeConnector> nncs = NameNodeConnector
.newNameNodeConnectors(namenodes,
StoragePolicySatisfier.class.getSimpleName(),
filePath, conf,
NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS);
return nncs.get(0);
} catch (IOException e) {
LOG.warn("Failed to connect with namenode", e);
// Ignore
}
}
}
/**
* Run the fsck command using the specified params.
*
* @param conf HDFS configuration to use
* @param expectedErrCode The error code expected to be returned by
* the fsck command
* @param checkErrorCode Should the error code be checked
* @param path actual arguments to the fsck command
**/
public static String runFsck(Configuration conf, int expectedErrCode,
boolean checkErrorCode, String... path)
throws Exception {
ByteArrayOutputStream bStream = new ByteArrayOutputStream();
PrintStream out = new PrintStream(bStream, true);
int errCode = ToolRunner.run(new DFSck(conf, out), path);
if (checkErrorCode) {
assertEquals(expectedErrCode, errCode);
}
return bStream.toString();
}
}
| ShortCircuitTestContext |
java | spring-projects__spring-framework | spring-messaging/src/main/java/org/springframework/messaging/simp/SimpAttributesContextHolder.java | {
"start": 825,
"end": 1038
} | class ____ expose SiMP attributes associated with a session (for example, WebSocket)
* in the form of a thread-bound {@link SimpAttributes} object.
*
* @author Rossen Stoyanchev
* @since 4.1
*/
public abstract | to |
java | elastic__elasticsearch | test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/DefaultDistributionDescriptor.java | {
"start": 617,
"end": 1777
} | class ____ implements DistributionDescriptor {
private final Version version;
private final boolean snapshot;
private final Path distributionDir;
private final DistributionType type;
public DefaultDistributionDescriptor(Version version, boolean snapshot, Path extractedDir, DistributionType type) {
this.version = version;
this.snapshot = snapshot;
this.distributionDir = extractedDir;
this.type = type;
}
public Version getVersion() {
return version;
}
public boolean isSnapshot() {
return snapshot;
}
public Path getDistributionDir() {
return distributionDir.resolve("elasticsearch-" + version + (snapshot ? "-SNAPSHOT" : ""));
}
public DistributionType getType() {
return type;
}
@Override
public String toString() {
return "DefaultDistributionDescriptor{"
+ "version="
+ version
+ ", snapshot="
+ snapshot
+ ", distributionDir="
+ distributionDir
+ ", type="
+ type
+ '}';
}
}
| DefaultDistributionDescriptor |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/visitor/functions/Concat.java | {
"start": 955,
"end": 2115
} | class ____ implements Function {
public static final Concat instance = new Concat();
public Object eval(SQLEvalVisitor visitor, SQLMethodInvokeExpr x) {
StringBuilder buf = new StringBuilder();
for (SQLExpr item : x.getArguments()) {
item.accept(visitor);
Object itemValue = item.getAttribute(EVAL_VALUE);
if (itemValue == null) {
return null;
}
buf.append(itemValue.toString());
}
return buf.toString();
}
public Object eval(SQLMethodInvokeExpr x) {
StringBuilder buf = new StringBuilder();
for (SQLExpr param : x.getArguments()) {
if (param instanceof SQLValuableExpr) {
Object val = ((SQLValuableExpr) param).getValue();
if (val instanceof String) {
buf.append(val);
continue;
} else if (val instanceof Integer) {
buf.append(val);
continue;
}
}
return SQLEvalVisitor.EVAL_ERROR;
}
return buf.toString();
}
}
| Concat |
java | apache__camel | components/camel-cxf/camel-cxf-spring-transport/src/test/java/org/apache/camel/component/cxf/common/message/DefaultCxfMessageMapperTest.java | {
"start": 1843,
"end": 5281
} | class ____ {
@Test
public void testRequestUriAndPath() {
final String requestURI = "/path;a=b";
final String requestPath = "/path";
DefaultCxfMessageMapper mapper = new DefaultCxfMessageMapper();
Exchange camelExchange = setupCamelExchange(requestURI, requestPath, null);
Message cxfMessage = mapper.createCxfMessageFromCamelExchange(
camelExchange, mock(HeaderFilterStrategy.class));
assertEquals(requestURI, cxfMessage.get(Message.REQUEST_URI).toString());
assertEquals(requestPath, cxfMessage.get(Message.BASE_PATH).toString());
}
@Test
public void testSecurityContext() {
DefaultCxfMessageMapper mapper = new DefaultCxfMessageMapper();
HttpServletRequest request = mock(HttpServletRequest.class);
when(request.getUserPrincipal()).thenReturn(new SimplePrincipal("barry"));
when(request.isUserInRole("role1")).thenReturn(true);
when(request.isUserInRole("role2")).thenReturn(false);
Exchange camelExchange = setupCamelExchange("/", "/", request);
Message cxfMessage = mapper.createCxfMessageFromCamelExchange(
camelExchange, mock(HeaderFilterStrategy.class));
SecurityContext sc = cxfMessage.get(SecurityContext.class);
assertNotNull(sc);
assertEquals("barry", sc.getUserPrincipal().getName());
assertTrue(sc.isUserInRole("role1"));
assertFalse(sc.isUserInRole("role2"));
}
private Exchange setupCamelExchange(String requestURI, String requestPath, HttpServletRequest request) {
org.apache.camel.Message camelMessage = mock(org.apache.camel.Message.class);
org.apache.camel.http.common.HttpMessage camelHttpMessage = mock(org.apache.camel.http.common.HttpMessage.class);
Exchange camelExchange = mock(Exchange.class);
when(camelExchange.getProperty(CamelTransportConstants.CXF_EXCHANGE,
org.apache.cxf.message.Exchange.class)).thenReturn(new ExchangeImpl());
when(camelExchange.hasOut()).thenReturn(false);
when(camelExchange.getIn()).thenReturn(camelMessage);
when(camelMessage.getHeaders()).thenReturn(Collections.emptyMap());
when(camelMessage.getHeader(Exchange.CONTENT_TYPE, String.class)).thenReturn(null);
when(camelMessage.getHeader("Accept", String.class)).thenReturn(null);
when(camelMessage.getHeader(Exchange.HTTP_CHARACTER_ENCODING, String.class)).thenReturn(null);
when(camelMessage.getHeader(Exchange.CHARSET_NAME, String.class)).thenReturn(null);
when(camelMessage.getHeader(Exchange.HTTP_URI, String.class)).thenReturn(requestURI);
when(camelMessage.getHeader(Exchange.HTTP_PATH, String.class)).thenReturn(requestPath);
when(camelMessage.getHeader(Exchange.HTTP_BASE_URI, String.class)).thenReturn(requestPath);
when(camelMessage.getHeader(Exchange.HTTP_METHOD, String.class)).thenReturn("GET");
when(camelMessage.getHeader(Exchange.HTTP_QUERY, String.class)).thenReturn("");
when(camelExchange.getIn(HttpMessage.class)).thenReturn(camelHttpMessage);
when(camelHttpMessage.getRequest()).thenReturn(request);
when(camelHttpMessage.getResponse()).thenReturn(null);
when(camelMessage.getBody(InputStream.class)).thenReturn(new ByteArrayInputStream("".getBytes()));
return camelExchange;
}
}
| DefaultCxfMessageMapperTest |
java | apache__camel | components/camel-huawei/camel-huaweicloud-frs/src/test/java/org/apache/camel/component/huaweicloud/frs/TestConfiguration.java | {
"start": 991,
"end": 2108
} | class ____ {
private static final Logger LOGGER = LoggerFactory.getLogger(TestConfiguration.class.getName());
private static Map<String, String> propertyMap;
public TestConfiguration() {
initPropertyMap();
}
public void initPropertyMap() {
Properties properties = null;
if (propertyMap == null) {
propertyMap = new HashMap<>();
String propertyFileName = "test_configuration.properties";
try {
properties = TestSupport.loadExternalProperties(getClass().getClassLoader(), "test_configuration.properties");
for (String key : properties.stringPropertyNames()) {
propertyMap.put(key, properties.getProperty(key));
}
} catch (Exception e) {
LOGGER.error("Cannot load property file {}, reason {}", propertyFileName, e.getMessage());
}
}
}
public String getProperty(String key) {
if (propertyMap == null) {
initPropertyMap();
}
return propertyMap.get(key);
}
}
| TestConfiguration |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/NullableConstructorTest.java | {
"start": 2643,
"end": 2753
} | class ____ {
<@Nullable T> Test(T t) {}
}
""")
.doTest();
}
}
| Test |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointFaultInjector.java | {
"start": 921,
"end": 1012
} | class ____ faciliate some fault injection tests for the checkpointing
* process.
*/
public | to |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/xml/ejb3/Ejb3XmlManyToOneTest.java | {
"start": 1034,
"end": 14378
} | class ____ extends Ejb3XmlTestCase {
@Test
public void testNoJoins() {
final MemberDetails memberDetails = getAttributeMember( Entity1.class, "field1", "many-to-one.orm1.xml" );
assertThat( memberDetails.hasDirectAnnotationUsage( ManyToOne.class ) ).isTrue();
assertThat( memberDetails.hasDirectAnnotationUsage( JoinColumn.class ) ).isFalse();
assertThat( memberDetails.hasDirectAnnotationUsage( JoinColumns.class ) ).isFalse();
assertThat( memberDetails.hasDirectAnnotationUsage( JoinTable.class ) ).isFalse();
assertThat( memberDetails.hasDirectAnnotationUsage( Id.class ) ).isFalse();
assertThat( memberDetails.hasDirectAnnotationUsage( MapsId.class ) ).isFalse();
final ManyToOne manyToOneUsage = memberDetails.getDirectAnnotationUsage( ManyToOne.class );
assertThat( manyToOneUsage.cascade() ).isEmpty();
assertThat( manyToOneUsage.fetch() ).isEqualTo( FetchType.EAGER );
assertThat( manyToOneUsage.optional() ).isTrue();
assertThat( manyToOneUsage.targetEntity() ).isEqualTo( void.class );
}
/**
* When there's a single join column, we still wrap it with a JoinColumns
* annotation.
*/
@Test
public void testSingleJoinColumn() {
final MemberDetails memberDetails = getAttributeMember( Entity1.class, "field1", "many-to-one.orm2.xml" );
assertThat( memberDetails.hasDirectAnnotationUsage( ManyToOne.class ) ).isTrue();
assertThat( memberDetails.hasDirectAnnotationUsage( JoinColumnsOrFormulas.class ) ).isTrue();
assertThat( memberDetails.hasDirectAnnotationUsage( JoinColumnOrFormula.class ) ).isFalse();
assertThat( memberDetails.hasDirectAnnotationUsage( JoinColumns.class ) ).isFalse();
assertThat( memberDetails.hasDirectAnnotationUsage( JoinColumn.class ) ).isFalse();
assertThat( memberDetails.hasDirectAnnotationUsage( JoinFormula.class ) ).isFalse();
assertThat( memberDetails.hasDirectAnnotationUsage( JoinTable.class ) ).isFalse();
assertThat( memberDetails.hasDirectAnnotationUsage( Id.class ) ).isFalse();
assertThat( memberDetails.hasDirectAnnotationUsage( MapsId.class ) ).isFalse();
final JoinColumnsOrFormulas joinColumnsOrFormulasUsage = memberDetails.getDirectAnnotationUsage( JoinColumnsOrFormulas.class );
final JoinColumn joinColumnUsage = joinColumnsOrFormulasUsage.value()[0].column();
assertThat( joinColumnUsage.name() ).isEqualTo( "col1" );
assertThat( joinColumnUsage.referencedColumnName() ).isEqualTo( "col2" );
assertThat( joinColumnUsage.table() ).isEqualTo( "table1" );
}
@Test
public void testMultipleJoinColumns() {
final MemberDetails memberDetails = getAttributeMember( Entity1.class, "field1", "many-to-one.orm3.xml" );
assertThat( memberDetails.hasDirectAnnotationUsage( ManyToOne.class ) ).isTrue();
assertThat( memberDetails.hasDirectAnnotationUsage( JoinColumnsOrFormulas.class ) ).isTrue();
assertThat( memberDetails.hasDirectAnnotationUsage( JoinColumnOrFormula.class ) ).isFalse();
assertThat( memberDetails.hasDirectAnnotationUsage( JoinFormula.class ) ).isFalse();
assertThat( memberDetails.hasDirectAnnotationUsage( JoinColumn.class ) ).isFalse();
assertThat( memberDetails.hasDirectAnnotationUsage( JoinTable.class ) ).isFalse();
assertThat( memberDetails.hasDirectAnnotationUsage( Id.class ) ).isFalse();
assertThat( memberDetails.hasDirectAnnotationUsage( MapsId.class ) ).isFalse();
final JoinColumnsOrFormulas joinColumnsOrFormulasUsage = memberDetails.getDirectAnnotationUsage( JoinColumnsOrFormulas.class );
final JoinColumnOrFormula[] joinColumnOrFormulaUsages = joinColumnsOrFormulasUsage.value();
assertThat( joinColumnOrFormulaUsages ).hasSize( 2 );
final JoinColumn joinColumnUsage0 = joinColumnOrFormulaUsages[0].column();
assertThat( joinColumnUsage0.name() ).isEmpty();
assertThat( joinColumnUsage0.referencedColumnName() ).isEmpty();
assertThat( joinColumnUsage0.table() ).isEmpty();
assertThat( joinColumnUsage0.columnDefinition() ).isEmpty();
assertThat( joinColumnUsage0.insertable() ).isTrue();
assertThat( joinColumnUsage0.updatable() ).isTrue();
assertThat( joinColumnUsage0.nullable() ).isTrue();
assertThat( joinColumnUsage0.unique() ).isFalse();
final JoinColumn joinColumnUsage1 = joinColumnOrFormulaUsages[1].column();
assertThat( joinColumnUsage1.name() ).isEqualTo( "col1" );
assertThat( joinColumnUsage1.referencedColumnName() ).isEqualTo( "col2" );
assertThat( joinColumnUsage1.table() ).isEqualTo( "table1" );
assertThat( joinColumnUsage1.columnDefinition() ).isEqualTo( "int" );
assertThat( joinColumnUsage1.insertable() ).isFalse();
assertThat( joinColumnUsage1.updatable() ).isFalse();
assertThat( joinColumnUsage1.nullable() ).isFalse();
assertThat( joinColumnUsage1.unique() ).isTrue();
}
@Test
public void testJoinTableNoChildren() {
final MemberDetails memberDetails = getAttributeMember( Entity1.class, "field1", "many-to-one.orm4.xml" );
assertThat( memberDetails.hasDirectAnnotationUsage( ManyToOne.class ) ).isTrue();
assertThat( memberDetails.hasDirectAnnotationUsage( JoinTable.class ) ).isTrue();
assertThat( memberDetails.hasDirectAnnotationUsage( JoinColumns.class ) ).isFalse();
assertThat( memberDetails.hasDirectAnnotationUsage( JoinColumn.class ) ).isFalse();
assertThat( memberDetails.hasDirectAnnotationUsage( Id.class ) ).isFalse();
assertThat( memberDetails.hasDirectAnnotationUsage( MapsId.class ) ).isFalse();
final JoinTable joinTableUsage = memberDetails.getDirectAnnotationUsage( JoinTable.class );
assertThat( joinTableUsage.catalog() ).isEmpty();
assertThat( joinTableUsage.schema() ).isEmpty();
assertThat( joinTableUsage.name() ).isEmpty();
assertThat( joinTableUsage.joinColumns() ).isEmpty();
assertThat( joinTableUsage.inverseJoinColumns() ).isEmpty();
assertThat( joinTableUsage.uniqueConstraints() ).isEmpty();
}
@Test
public void testJoinTableAllChildren() {
final MemberDetails memberDetails = getAttributeMember( Entity1.class, "field1", "many-to-one.orm5.xml" );
assertThat( memberDetails.hasDirectAnnotationUsage( ManyToOne.class ) ).isTrue();
assertThat( memberDetails.hasDirectAnnotationUsage( JoinTable.class ) ).isTrue();
assertThat( memberDetails.hasDirectAnnotationUsage( JoinColumns.class ) ).isFalse();
assertThat( memberDetails.hasDirectAnnotationUsage( JoinColumn.class ) ).isFalse();
assertThat( memberDetails.hasDirectAnnotationUsage( Id.class ) ).isFalse();
assertThat( memberDetails.hasDirectAnnotationUsage( MapsId.class ) ).isFalse();
final JoinTable joinTableUsage = memberDetails.getDirectAnnotationUsage( JoinTable.class );
assertThat( joinTableUsage.catalog() ).isEqualTo( "cat1" );
assertThat( joinTableUsage.schema() ).isEqualTo( "schema1" );
assertThat( joinTableUsage.name() ).isEqualTo( "table1" );
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// JoinColumns
final JoinColumn[] joinColumnUsages = joinTableUsage.joinColumns();
assertThat( joinColumnUsages ).hasSize( 2 );
final JoinColumn joinColumnUsage0 = joinColumnUsages[0];
assertThat( joinColumnUsage0.name() ).isEmpty();
assertThat( joinColumnUsage0.referencedColumnName() ).isEmpty();
assertThat( joinColumnUsage0.table() ).isEmpty();
assertThat( joinColumnUsage0.columnDefinition() ).isEmpty();
assertThat( joinColumnUsage0.insertable() ).isTrue();
assertThat( joinColumnUsage0.updatable() ).isTrue();
assertThat( joinColumnUsage0.nullable() ).isTrue();
assertThat( joinColumnUsage0.unique() ).isFalse();
final JoinColumn joinColumnUsage1 = joinColumnUsages[1];
assertThat( joinColumnUsage1.name() ).isEqualTo( "col1" );
assertThat( joinColumnUsage1.referencedColumnName() ).isEqualTo( "col2" );
assertThat( joinColumnUsage1.table() ).isEqualTo( "table2" );
assertThat( joinColumnUsage1.columnDefinition() ).isEqualTo( "int" );
assertThat( joinColumnUsage1.insertable() ).isFalse();
assertThat( joinColumnUsage1.updatable() ).isFalse();
assertThat( joinColumnUsage1.nullable() ).isFalse();
assertThat( joinColumnUsage1.unique() ).isTrue();
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// InverseJoinColumns
final JoinColumn[] inverseJoinColumnUsages = joinTableUsage.inverseJoinColumns();
assertThat( inverseJoinColumnUsages ).hasSize( 2 );
final JoinColumn inverseJoinColumnUsage0 = inverseJoinColumnUsages[0];
assertThat( inverseJoinColumnUsage0.name() ).isEmpty();
assertThat( inverseJoinColumnUsage0.referencedColumnName() ).isEmpty();
assertThat( inverseJoinColumnUsage0.table() ).isEmpty();
assertThat( inverseJoinColumnUsage0.columnDefinition() ).isEmpty();
assertThat( inverseJoinColumnUsage0.insertable() ).isTrue();
assertThat( inverseJoinColumnUsage0.updatable() ).isTrue();
assertThat( inverseJoinColumnUsage0.nullable() ).isTrue();
assertThat( inverseJoinColumnUsage0.unique() ).isFalse();
final JoinColumn inverseJoinColumnUsage1 = inverseJoinColumnUsages[1];
assertThat( inverseJoinColumnUsage1.name() ).isEqualTo( "col3" );
assertThat( inverseJoinColumnUsage1.referencedColumnName() ).isEqualTo( "col4" );
assertThat( inverseJoinColumnUsage1.table() ).isEqualTo( "table3" );
assertThat( inverseJoinColumnUsage1.columnDefinition() ).isEqualTo( "int" );
assertThat( inverseJoinColumnUsage1.insertable() ).isFalse();
assertThat( inverseJoinColumnUsage1.updatable() ).isFalse();
assertThat( inverseJoinColumnUsage1.nullable() ).isFalse();
assertThat( inverseJoinColumnUsage1.unique() ).isTrue();
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// UniqueConstraints
final UniqueConstraint[] uniqueConstraintUsages = joinTableUsage.uniqueConstraints();
assertThat( uniqueConstraintUsages ).hasSize( 2 );
final UniqueConstraint uniqueConstraintUsage0 = uniqueConstraintUsages[0];
assertThat( uniqueConstraintUsage0.name() ).isEmpty();
assertThat( uniqueConstraintUsage0.columnNames() ).containsExactly( "col5" );
final UniqueConstraint uniqueConstraintUsage1 = uniqueConstraintUsages[1];
assertThat( uniqueConstraintUsage1.name() ).isEqualTo( "uq1" );
assertThat( uniqueConstraintUsage1.columnNames() ).containsExactly( "col6", "col7" );
}
@Test
public void testAllAttributes() {
final MemberDetails memberDetails = getAttributeMember( Entity1.class, "field1", "many-to-one.orm6.xml" );
assertThat( memberDetails.hasDirectAnnotationUsage( ManyToOne.class ) ).isTrue();
assertThat( memberDetails.hasDirectAnnotationUsage( Id.class ) ).isTrue();
assertThat( memberDetails.hasDirectAnnotationUsage( MapsId.class ) ).isTrue();
assertThat( memberDetails.hasDirectAnnotationUsage( Access.class ) ).isTrue();
final ManyToOne manyToOneUsage = memberDetails.getDirectAnnotationUsage( ManyToOne.class );
assertThat( manyToOneUsage.cascade() ).isEmpty();
assertThat( manyToOneUsage.fetch() ).isEqualTo( FetchType.LAZY );
assertThat( manyToOneUsage.optional() ).isFalse();
assertThat( manyToOneUsage.targetEntity() ).isEqualTo( void.class );
final Target targetUsage = memberDetails.getDirectAnnotationUsage( Target.class );
assertThat( targetUsage.value() ).isEqualTo( Entity3.class.getName() );
final MapsId mapsIdUsage = memberDetails.getDirectAnnotationUsage( MapsId.class );
assertThat( mapsIdUsage.value() ).isEqualTo( "col1" );
final Access accessUsage = memberDetails.getDirectAnnotationUsage( Access.class );
assertThat( accessUsage.value() ).isEqualTo( AccessType.PROPERTY );
}
@Test
public void testCascadeAll() {
final MemberDetails memberDetails = getAttributeMember( Entity1.class, "field1", "many-to-one.orm7.xml" );
assertThat( memberDetails.hasDirectAnnotationUsage( ManyToOne.class ) ).isTrue();
final ManyToOne manyToOneUsage = memberDetails.getDirectAnnotationUsage( ManyToOne.class );
assertThat( manyToOneUsage.cascade() ).isEmpty();
final Cascade cascadeUsage = memberDetails.getDirectAnnotationUsage( Cascade.class );
assertThat( cascadeUsage.value() ).containsExactly( CascadeType.ALL );
}
@Test
public void testCascadeSomeWithDefaultPersist() {
final MemberDetails memberDetails = getAttributeMember( Entity1.class, "field1", "many-to-one.orm8.xml" );
final ManyToOne manyToOneUsage = memberDetails.getDirectAnnotationUsage( ManyToOne.class );
assertThat( manyToOneUsage.cascade() ).isEmpty();
final Cascade cascadeUsage = memberDetails.getDirectAnnotationUsage( Cascade.class );
assertThat( cascadeUsage.value() ).containsOnly(
CascadeType.PERSIST,
CascadeType.REMOVE,
CascadeType.REFRESH,
CascadeType.DETACH
);
}
/**
* Make sure that it doesn't break the handler when {@link CascadeType#ALL}
* is specified in addition to a default cascade-persist or individual
* cascade settings.
*/
@Test
public void testCascadeAllPlusMore() {
final MemberDetails memberDetails = getAttributeMember( Entity1.class, "field1", "many-to-one.orm9.xml" );
final ManyToOne manyToOneUsage = memberDetails.getDirectAnnotationUsage( ManyToOne.class );
assertThat( manyToOneUsage.cascade() ).isEmpty();
final Cascade cascadeUsage = memberDetails.getDirectAnnotationUsage( Cascade.class );
assertThat( cascadeUsage.value() ).containsOnly(
CascadeType.ALL,
CascadeType.PERSIST,
CascadeType.MERGE,
CascadeType.REMOVE,
CascadeType.REFRESH,
CascadeType.DETACH
);
}
}
| Ejb3XmlManyToOneTest |
java | netty__netty | codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2LocalFlowController.java | {
"start": 13581,
"end": 14379
} | class ____ extends DefaultState {
AutoRefillState(Http2Stream stream, int initialWindowSize) {
super(stream, initialWindowSize);
}
@Override
public void receiveFlowControlledFrame(int dataLength) throws Http2Exception {
super.receiveFlowControlledFrame(dataLength);
// Need to call the super to consume the bytes, since this.consumeBytes does nothing.
super.consumeBytes(dataLength);
}
@Override
public boolean consumeBytes(int numBytes) throws Http2Exception {
// Do nothing, since the bytes are already consumed upon receiving the data.
return false;
}
}
/**
* Flow control window state for an individual stream.
*/
private | AutoRefillState |
java | apache__camel | tooling/camel-tooling-util/src/main/java/org/apache/camel/tooling/util/srcgen/JavaClass.java | {
"start": 1102,
"end": 23127
} | class ____ {
ClassLoader classLoader;
JavaClass parent;
String packageName;
String name;
String extendsName = "java.lang.Object";
List<String> implementNames = new ArrayList<>();
List<String> imports = new ArrayList<>();
List<Annotation> annotations = new ArrayList<>();
List<Property> properties = new ArrayList<>();
List<Field> fields = new ArrayList<>();
List<Method> methods = new ArrayList<>();
List<JavaClass> nested = new ArrayList<>();
List<String> values = new ArrayList<>();
Javadoc javadoc = new Javadoc();
boolean isStatic;
boolean isPublic = true;
boolean isPackagePrivate;
boolean isAbstract;
boolean isClass = true;
boolean isEnum;
int maxImportPerPackage = 10;
public JavaClass() {
}
public JavaClass(ClassLoader classLoader) {
this.classLoader = classLoader;
}
protected JavaClass(JavaClass parent) {
this.parent = parent;
}
protected ClassLoader getClassLoader() {
if (classLoader == null && parent != null) {
return parent.getClassLoader();
} else {
return classLoader;
}
}
public void setMaxImportPerPackage(int maxImportPerPackage) {
this.maxImportPerPackage = maxImportPerPackage;
}
public JavaClass setStatic(boolean aStatic) {
isStatic = aStatic;
return this;
}
public JavaClass setPackagePrivate() {
isPublic = false;
isPackagePrivate = true;
return this;
}
public JavaClass setPublic() {
isPublic = true;
isPackagePrivate = false;
return this;
}
public String getPackage() {
return packageName;
}
public JavaClass setPackage(String packageName) {
this.packageName = packageName;
return this;
}
public String getName() {
return name;
}
public JavaClass setName(String name) {
this.name = name;
return this;
}
public String getCanonicalName() {
if (parent != null) {
return parent.getCanonicalName() + "$" + name;
} else {
return packageName + "." + name;
}
}
public JavaClass extendSuperType(JavaClass extend) {
return extendSuperType(extend.getName());
}
public JavaClass extendSuperType(String extendsName) {
this.extendsName = extendsName;
return this;
}
public String getSuperType() {
return extendsName;
}
public JavaClass implementInterface(String implementName) {
this.implementNames.add(implementName);
return this;
}
public List<String> getImports() {
return imports;
}
public void addImport(Class<?> clazz) {
addImport(clazz.getName());
}
public void addImport(String importName) {
this.imports.add(importName);
}
public void removeImport(String importName) {
this.imports.remove(importName);
}
public void removeImport(JavaClass importName) {
removeImport(importName.getCanonicalName());
}
public Annotation addAnnotation(String type) {
try {
Class<?> cl = getClassLoader().loadClass(type);
return addAnnotation((Class<? extends java.lang.annotation.Annotation>) cl);
} catch (ClassNotFoundException e) {
throw new IllegalArgumentException("Unable to parse type", e);
}
}
public <A extends java.lang.annotation.Annotation> Annotation addAnnotation(Class<A> type) {
if (!java.lang.annotation.Annotation.class.isAssignableFrom(type)) {
throw new IllegalStateException("Not an annotation: " + type.getName());
}
Annotation ann = new Annotation(type);
annotations.add(ann);
return ann;
}
public Property addProperty(String type, String name) {
try {
return addProperty(GenericType.parse(type, getClassLoader()), name);
} catch (ClassNotFoundException e) {
throw new IllegalArgumentException("Unable to parse type " + type + " for property " + name, e);
}
}
public Property addProperty(GenericType type, String name) {
Property prop = new Property(type, name);
properties.add(prop);
return prop;
}
public Javadoc getJavaDoc() {
return javadoc;
}
public Field addField() {
Field field = new Field();
fields.add(field);
return field;
}
public Method addMethod() {
return addMethod(new Method());
}
public Method addMethod(Method method) {
methods.add(method);
return method;
}
public JavaClass addNestedType() {
JavaClass clazz = new JavaClass(this);
nested.add(clazz);
return clazz;
}
public void addValue(String value) {
values.add(value);
}
public boolean isClass() {
return isClass;
}
public JavaClass setClass(boolean isClass) {
this.isClass = isClass;
return this;
}
public boolean isAbstract() {
return isAbstract;
}
public JavaClass setAbstract(boolean isAbstract) {
this.isAbstract = isAbstract;
return this;
}
public boolean isEnum() {
return isEnum;
}
public JavaClass setEnum(boolean isEnum) {
this.isEnum = isEnum;
return this;
}
public List<Property> getProperties() {
return properties;
}
@Override
public String toString() {
return "JavaClass[" + getCanonicalName() + "]";
}
public String printClass() {
return printClass(true);
}
public String printClass(boolean innerClassesLast) {
StringBuilder sb = new StringBuilder(4096);
Set<String> imports = new TreeSet<>(Comparator.comparing(JavaClass::importOrder));
imports.addAll(this.imports);
addImports(imports);
nested.forEach(jc -> jc.addImports(imports));
imports.removeIf(f -> f.startsWith("java.lang.") || f.startsWith(packageName + "."));
imports.removeIf(GenericType::isPrimitive);
Map<String, List<String>> importsByPackages = new LinkedHashMap<>();
for (String imp : imports) {
String key = imp.substring(0, imp.lastIndexOf('.'));
importsByPackages.computeIfAbsent(key, k -> new ArrayList<>()).add(imp);
}
imports.clear();
for (Map.Entry<String, List<String>> e : importsByPackages.entrySet()) {
if (e.getValue().size() < maxImportPerPackage) {
imports.addAll(e.getValue());
} else {
imports.add(e.getKey() + ".*");
}
}
sb.append("package ").append(packageName).append(";\n");
sb.append("\n");
if (!imports.isEmpty()) {
for (String imp : imports) {
sb.append("import ").append(imp).append(";\n");
}
sb.append("\n");
}
printClass(innerClassesLast, sb, "");
return sb.toString();
}
private void printClass(boolean innerClassesLast, StringBuilder sb, String indent) {
printJavadoc(sb, indent, javadoc);
printAnnotations(sb, indent, annotations);
if (isEnum) {
sb.append(indent)
.append(isPublic ? "public " : "")
.append(isStatic ? "static " : "")
.append("enum ").append(name).append(" {\n")
.append(indent)
.append(" ")
.append(String.join(",\n" + indent + " ", values))
.append(";\n")
.append(indent)
.append("}");
return;
}
StringBuilder sb2 = new StringBuilder(4096);
sb2.append(indent);
if (isPublic) {
sb2.append("public ");
}
if (isStatic) {
sb2.append("static ");
}
sb2.append(isClass ? "class " : "interface ").append(name);
if (extendsName != null && !"java.lang.Object".equals(extendsName)) {
sb2.append(" extends ").append(extendsName);
}
if (!implementNames.isEmpty()) {
sb2.append(isClass ? " implements " : " extends ")
.append(String.join(", ", implementNames));
}
sb2.append(" {");
if (sb2.length() < 80) {
sb.append(sb2).append("\n");
} else {
sb.append(indent);
if (isPublic) {
sb.append("public ");
}
if (isStatic) {
sb.append("static ");
}
sb.append(isClass ? "class " : "interface ").append(name);
if (extendsName != null && !"java.lang.Object".equals(extendsName)) {
sb.append("\n");
sb.append(indent).append(" extends\n");
sb.append(indent).append(" ").append(extendsName);
}
if (!implementNames.isEmpty()) {
sb.append("\n");
sb.append(indent).append(isClass ? " implements\n" : " extends\n");
sb.append(
implementNames.stream().map(name -> indent + " " + name).collect(Collectors.joining(",\n")));
}
sb.append(" {\n");
}
if (parent == null) {
sb.append("\n");
}
for (Field field : fields) {
printField(sb, indent + " ", field);
}
for (Property property : properties) {
if (property.field != null) {
printField(sb, indent + " ", property.field);
}
}
if (!innerClassesLast) {
for (JavaClass nest : nested) {
sb.append("\n");
nest.printClass(innerClassesLast, sb, indent + " ");
sb.append("\n");
}
}
for (Method method : methods) {
printMethod(sb, indent + " ", method);
}
for (Property property : properties) {
if (property.accessor != null) {
printMethod(sb, indent + " ", property.accessor);
}
if (property.mutator != null) {
printMethod(sb, indent + " ", property.mutator);
}
}
if (innerClassesLast) {
for (JavaClass nest : nested) {
sb.append("\n");
nest.printClass(innerClassesLast, sb, indent + " ");
sb.append("\n");
}
}
sb.append(indent).append("}");
}
private void addImports(Set<String> imports) {
annotations.forEach(ann -> addImports(imports, ann));
fields.forEach(f -> addImports(imports, f));
methods.forEach(m -> addImports(imports, m));
properties.forEach(p -> addImports(imports, p));
}
private void addImports(Set<String> imports, Annotation annotation) {
addImports(imports, annotation.type);
}
private void addImports(Set<String> imports, Property property) {
addImports(imports, property.field);
addImports(imports, property.accessor);
addImports(imports, property.mutator);
}
private void addImports(Set<String> imports, Field field) {
if (field != null) {
field.annotations.forEach(a -> addImports(imports, a));
addImports(imports, field.type);
}
}
private void addImports(Set<String> imports, Method method) {
if (method != null) {
method.annotations.forEach(a -> addImports(imports, a));
addImports(imports, method.returnType);
method.parameters.forEach(p -> addImports(imports, p.type));
}
}
private void addImports(Set<String> imports, GenericType type) {
if (type != null) {
addImports(imports, type.getRawClass());
for (int i = 0; i < type.size(); i++) {
addImports(imports, type.getActualTypeArgument(i));
}
}
}
private void addImports(Set<String> imports, Class<?> clazz) {
if (clazz != null) {
if (clazz.isArray()) {
addImports(imports, clazz.getComponentType());
} else {
imports.add(clazz.getName().replace('$', '.'));
}
}
}
private void printMethod(StringBuilder sb, String indent, Method method) {
if (fields.size() + properties.size() > 0) {
sb.append("\n");
}
if (method.javadoc.text != null) {
printJavadoc(sb, indent, method.javadoc);
}
printAnnotations(sb, indent, method.annotations);
if (method.signature != null) {
sb.append(indent);
sb.append(method.signature);
if (!method.isAbstract) {
sb.append(" {");
}
} else {
StringBuilder sb2 = new StringBuilder(2048);
sb2.append(indent);
if (method.isPublic) {
sb2.append("public ");
} else if (method.isProtected) {
sb2.append("protected ");
} else if (method.isPrivate) {
sb2.append("private ");
}
if (method.isDefault) {
sb2.append("default ");
}
if (method.isStatic) {
sb2.append("static ");
}
if (!method.isConstructor) {
if (method.returnTypeLiteral != null) {
sb2.append(method.returnTypeLiteral);
} else if (method.returnType != null) {
sb2.append(shortName(method.returnType));
} else {
sb2.append("void");
}
sb2.append(" ");
}
sb2.append(method.name);
sb2.append("(");
sb2.append(method.parameters.stream()
.map(p -> p.vararg
? typeOf(p) + "... " + p.name
: typeOf(p) + " " + p.name)
.collect(Collectors.joining(", ")));
sb2.append(") ");
if (!method.exceptions.isEmpty()) {
sb2.append("throws ");
sb2.append(method.exceptions.stream().map(this::shortName).collect(Collectors.joining(", ", "", " ")));
}
if (!method.isAbstract) {
sb2.append("{");
}
if (sb2.length() < 84) {
sb.append(sb2);
} else {
sb.append(indent);
if (method.isPublic) {
sb.append("public ");
} else if (method.isProtected) {
sb.append("protected ");
} else if (method.isPrivate) {
sb.append("private ");
}
if (method.isStatic) {
sb.append("static ");
}
if (method.isDefault) {
sb.append("default ");
}
if (!method.isConstructor) {
if (method.returnTypeLiteral != null) {
sb.append(method.returnTypeLiteral);
} else if (method.returnType != null) {
sb.append(shortName(method.returnType));
} else {
sb.append("void");
}
sb.append(" ");
}
sb.append(method.name);
if (!method.parameters.isEmpty()) {
sb.append("(\n");
sb.append(method.parameters.stream()
.map(p -> p.vararg
? indent + " " + typeOf(p) + "... " + p.name
: indent + " " + typeOf(p) + " " + p.name)
.collect(Collectors.joining(",\n")));
sb.append(")");
} else {
sb.append("()");
}
if (!method.exceptions.isEmpty()) {
sb.append("\n throws");
sb.append(method.exceptions.stream().map(this::shortName).collect(Collectors.joining(", ", " ", "")));
}
if (!method.isAbstract) {
sb.append(" {");
}
}
}
if (!method.isAbstract) {
sb.append("\n");
for (String l : method.body.split("\n")) {
sb.append(indent);
sb.append(" ");
sb.append(l);
sb.append("\n");
}
sb.append(indent).append("}\n");
} else {
sb.append(";\n");
}
}
private void printField(StringBuilder sb, String indent, Field field) {
if (field.javadoc.text != null) {
printJavadoc(sb, indent, field.javadoc);
}
if (field.comment != null) {
printComment(sb, indent, field.comment);
}
printAnnotations(sb, indent, field.annotations);
sb.append(indent);
if (field.isPublic) {
sb.append("public ");
} else if (field.isPrivate) {
sb.append("private ");
}
if (field.isStatic) {
sb.append("static ");
}
if (field.isFinal) {
sb.append("final ");
}
sb.append(shortName(field.type));
sb.append(" ");
sb.append(field.name);
if (field.literalInit != null) {
sb.append(" = ");
sb.append(field.literalInit);
}
sb.append(";\n");
}
private void printJavadoc(StringBuilder sb, String indent, Javadoc doc) {
List<String> lines = formatJavadocOrCommentStringAsList(doc.text, indent);
if (!lines.isEmpty()) {
sb.append(indent).append("/**\n");
for (String line : lines) {
sb.append(indent).append(" * ").append(line).append("\n");
}
sb.append(indent).append(" */\n");
}
}
private void printComment(StringBuilder stringBuilder, String indent, String comment) {
List<String> lines = formatJavadocOrCommentStringAsList(comment, indent);
if (!lines.isEmpty()) {
for (String line : lines) {
stringBuilder.append(indent).append("// ").append(line).append("\n");
}
}
}
private List<String> formatJavadocOrCommentStringAsList(String text, String indent) {
List<String> lines = new ArrayList<>();
int len = 78 - indent.length();
String rem = text;
if (rem != null) {
while (!rem.isEmpty()) {
int idx = rem.length() >= len ? rem.substring(0, len).lastIndexOf(' ') : -1;
int idx2 = rem.indexOf('\n');
if (idx2 >= 0 && (idx < 0 || idx2 < idx || idx2 < len)) {
idx = idx2;
}
if (idx >= 0) {
String s = rem.substring(0, idx);
while (s.endsWith(" ")) {
s = s.substring(0, s.length() - 1);
}
String l = rem.substring(idx + 1);
while (l.startsWith(" ")) {
l = l.substring(1);
}
lines.add(s);
rem = l;
} else {
lines.add(rem);
rem = "";
}
}
}
return lines;
}
private void printAnnotations(StringBuilder sb, String indent, List<Annotation> anns) {
if (anns != null) {
for (Annotation ann : anns) {
sb.append(indent);
sb.append("@");
sb.append(shortName(ann.type.getName()));
if (!ann.values.isEmpty()) {
sb.append("(");
int i = 0;
for (Map.Entry<String, String> e : ann.values.entrySet()) {
if (i++ > 0) {
sb.append(", ");
}
if (Objects.equals(e.getKey(), "value") && ann.values.size() == 1) {
sb.append(e.getValue());
} else {
sb.append(e.getKey()).append(" = ").append(e.getValue());
}
}
sb.append(")");
}
sb.append("\n");
}
}
}
private String typeOf(Param p) {
return p.typeLiteral != null ? p.typeLiteral : shortName(p.type);
}
private String shortName(GenericType name) {
return shortName(name.toString());
}
private String shortName(String name) {
String s = name.replace('$', '.');
// int idx = s.lastIndexOf('.');
// return idx > 0 ? s.substring(idx + 1) : s;
s = s.replaceAll("([a-z][a-z0-9]+\\.([a-z][a-z0-9_]+\\.)*([A-Z][a-zA-Z0-9_]+\\.)?)([A-za-z]+)", "$4");
if (s.startsWith(this.name + ".")) {
s = s.substring(this.name.length() + 1);
}
return s;
}
private static String importOrder(String s1) {
// java comes first
if (s1.startsWith("java.")) {
s1 = "___" + s1;
}
// then javax comes next
if (s1.startsWith("javax.")) {
s1 = "__" + s1;
}
// org.w3c is for some odd reason also before others
if (s1.startsWith("org.w3c.")) {
s1 = "_" + s1;
}
return s1;
}
}
| JavaClass |
java | resilience4j__resilience4j | resilience4j-bulkhead/src/main/java/io/github/resilience4j/bulkhead/BulkheadRegistry.java | {
"start": 11133,
"end": 14250
} | class ____ {
private static final String DEFAULT_CONFIG = "default";
private RegistryStore<Bulkhead> registryStore;
private Map<String, BulkheadConfig> bulkheadConfigsMap;
private List<RegistryEventConsumer<Bulkhead>> registryEventConsumers;
private Map<String, String> tags;
public Builder() {
this.bulkheadConfigsMap = new java.util.HashMap<>();
this.registryEventConsumers = new ArrayList<>();
}
public Builder withRegistryStore(RegistryStore<Bulkhead> registryStore) {
this.registryStore = registryStore;
return this;
}
/**
* Configures a BulkheadRegistry with a custom default Bulkhead configuration.
*
* @param bulkheadConfig a custom default Bulkhead configuration
* @return a {@link BulkheadRegistry.Builder}
*/
public Builder withBulkheadConfig(BulkheadConfig bulkheadConfig) {
bulkheadConfigsMap.put(DEFAULT_CONFIG, bulkheadConfig);
return this;
}
/**
* Configures a BulkheadRegistry with a custom Bulkhead configuration.
*
* @param configName configName for a custom shared Bulkhead configuration
* @param configuration a custom shared Bulkhead configuration
* @return a {@link BulkheadRegistry.Builder}
* @throws IllegalArgumentException if {@code configName.equals("default")}
*/
public Builder addBulkheadConfig(String configName, BulkheadConfig configuration) {
if (configName.equals(DEFAULT_CONFIG)) {
throw new IllegalArgumentException(
"You cannot add another configuration with name 'default' as it is preserved for default configuration");
}
bulkheadConfigsMap.put(configName, configuration);
return this;
}
/**
* Configures a BulkheadRegistry with a Bulkhead registry event consumer.
*
* @param registryEventConsumer a Bulkhead registry event consumer.
* @return a {@link BulkheadRegistry.Builder}
*/
public Builder addRegistryEventConsumer(RegistryEventConsumer<Bulkhead> registryEventConsumer) {
this.registryEventConsumers.add(registryEventConsumer);
return this;
}
/**
* Configures a BulkheadRegistry with Tags.
* <p>
* Tags added to the registry will be added to every instance created by this registry.
*
* @param tags default tags to add to the registry.
* @return a {@link BulkheadRegistry.Builder}
*/
public Builder withTags(Map<String, String> tags) {
this.tags = tags;
return this;
}
/**
* Builds a BulkheadRegistry
*
* @return the BulkheadRegistry
*/
public BulkheadRegistry build() {
return new InMemoryBulkheadRegistry(bulkheadConfigsMap, registryEventConsumers, tags,
registryStore);
}
}
}
| Builder |
java | google__auto | value/src/test/java/com/google/auto/value/processor/AutoValueCompilationTest.java | {
"start": 91082,
"end": 91910
} | interface ____ {",
" Object blim(int x);",
" Baz build();",
" }",
"}");
Compilation compilation =
javac()
.withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor())
.compile(javaFileObject);
assertThat(compilation)
.hadErrorContaining("Setter methods must return foo.bar.Baz.Builder")
.inFile(javaFileObject)
.onLineContaining("Object blim(int x)");
}
@Test
public void autoValueBuilderWrongTypeGetter() {
JavaFileObject javaFileObject =
JavaFileObjects.forSourceLines(
"foo.bar.Baz",
"package foo.bar;",
"",
"import com.google.auto.value.AutoValue;",
"",
"@AutoValue",
"public abstract | Builder |
java | google__guava | android/guava/src/com/google/common/cache/LocalCache.java | {
"start": 137631,
"end": 141465
} | class ____<K, V> extends ForwardingCache<K, V>
implements Serializable {
@GwtIncompatible @J2ktIncompatible private static final long serialVersionUID = 1;
final Strength keyStrength;
final Strength valueStrength;
final Equivalence<Object> keyEquivalence;
final Equivalence<Object> valueEquivalence;
final long expireAfterWriteNanos;
final long expireAfterAccessNanos;
final long maxWeight;
final Weigher<K, V> weigher;
final int concurrencyLevel;
final RemovalListener<? super K, ? super V> removalListener;
final @Nullable Ticker ticker;
final CacheLoader<? super K, V> loader;
transient @Nullable Cache<K, V> delegate;
ManualSerializationProxy(LocalCache<K, V> cache) {
this(
cache.keyStrength,
cache.valueStrength,
cache.keyEquivalence,
cache.valueEquivalence,
cache.expireAfterWriteNanos,
cache.expireAfterAccessNanos,
cache.maxWeight,
cache.weigher,
cache.concurrencyLevel,
cache.removalListener,
cache.ticker,
cache.defaultLoader);
}
private ManualSerializationProxy(
Strength keyStrength,
Strength valueStrength,
Equivalence<Object> keyEquivalence,
Equivalence<Object> valueEquivalence,
long expireAfterWriteNanos,
long expireAfterAccessNanos,
long maxWeight,
Weigher<K, V> weigher,
int concurrencyLevel,
RemovalListener<? super K, ? super V> removalListener,
Ticker ticker,
CacheLoader<? super K, V> loader) {
this.keyStrength = keyStrength;
this.valueStrength = valueStrength;
this.keyEquivalence = keyEquivalence;
this.valueEquivalence = valueEquivalence;
this.expireAfterWriteNanos = expireAfterWriteNanos;
this.expireAfterAccessNanos = expireAfterAccessNanos;
this.maxWeight = maxWeight;
this.weigher = weigher;
this.concurrencyLevel = concurrencyLevel;
this.removalListener = removalListener;
this.ticker = (ticker == Ticker.systemTicker() || ticker == NULL_TICKER) ? null : ticker;
this.loader = loader;
}
CacheBuilder<K, V> recreateCacheBuilder() {
CacheBuilder<K, V> builder =
CacheBuilder.newBuilder()
.setKeyStrength(keyStrength)
.setValueStrength(valueStrength)
.keyEquivalence(keyEquivalence)
.valueEquivalence(valueEquivalence)
.concurrencyLevel(concurrencyLevel)
.removalListener(removalListener);
builder.strictParsing = false;
if (expireAfterWriteNanos > 0) {
builder.expireAfterWrite(expireAfterWriteNanos, NANOSECONDS);
}
if (expireAfterAccessNanos > 0) {
builder.expireAfterAccess(expireAfterAccessNanos, NANOSECONDS);
}
if (weigher != OneWeigher.INSTANCE) {
Object unused = builder.weigher(weigher);
if (maxWeight != UNSET_INT) {
builder.maximumWeight(maxWeight);
}
} else {
if (maxWeight != UNSET_INT) {
builder.maximumSize(maxWeight);
}
}
if (ticker != null) {
builder.ticker(ticker);
}
return builder;
}
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject();
CacheBuilder<K, V> builder = recreateCacheBuilder();
this.delegate = builder.build();
}
private Object readResolve() {
return delegate;
}
@Override
protected Cache<K, V> delegate() {
return delegate;
}
}
/**
* Serializes the configuration of a LocalCache, reconstituting it as an LoadingCache using
* CacheBuilder upon deserialization. An instance of this | ManualSerializationProxy |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/stream/KeyToOneCollectionFetchScrollTest.java | {
"start": 1298,
"end": 2824
} | class ____ {
@BeforeAll
public void setUp(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final EntityA a1 = new EntityA("a1");
final EntityA a2 = new EntityA("a2");
final EntityB b1 = new EntityB("b1");
b1.a1 = a1;
b1.a2 = a2;
session.persist( a1 );
session.persist( a2 );
session.persist( b1 );
final EntityA a3 = new EntityA("a3");
final EntityA a4 = new EntityA("a4");
final EntityB b2 = new EntityB("b2");
b2.a1 = a3;
b2.a2 = a4;
session.persist( a3 );
session.persist( a4 );
session.persist( b2 );
} );
}
@AfterAll
public void tearDown(SessionFactoryScope scope) {
scope.inTransaction( session -> {
session.createMutationQuery( "delete from EntityC" ).executeUpdate();
session.createMutationQuery( "delete from EntityB" ).executeUpdate();
session.createMutationQuery( "delete from EntityA" ).executeUpdate();
} );
}
@Test
public void testScrollWithKeyToOne(SessionFactoryScope scope) {
scope.inTransaction( session -> {
try (final Stream<EntityB> stream = session.createQuery(
"select b from EntityB b join fetch b.a1 join fetch b.a2 left join fetch b.c c order by b.name",
EntityB.class
).getResultStream()) {
final List<EntityB> list = stream.collect( Collectors.toList() );
assertThat( list ).hasSize( 2 );
assertThat( list.get( 0 ).getA1() ).isNotNull();
assertThat( list.get( 0 ).getC() ).hasSize( 0 );
}
} );
}
@MappedSuperclass
public static abstract | KeyToOneCollectionFetchScrollTest |
java | apache__flink | flink-table/flink-sql-gateway/src/main/java/org/apache/flink/table/gateway/rest/message/session/ConfigureSessionRequestBody.java | {
"start": 1328,
"end": 2234
} | class ____ implements RequestBody {
private static final String FIELD_NAME_STATEMENT = "statement";
private static final String FIELD_NAME_EXECUTION_TIMEOUT = "executionTimeout";
@JsonProperty(FIELD_NAME_STATEMENT)
private final String statement;
@JsonProperty(FIELD_NAME_EXECUTION_TIMEOUT)
@Nullable
private final Long timeout;
public ConfigureSessionRequestBody(String statement) {
this(statement, null);
}
@JsonCreator
public ConfigureSessionRequestBody(
@JsonProperty(FIELD_NAME_STATEMENT) String statement,
@Nullable @JsonProperty(FIELD_NAME_EXECUTION_TIMEOUT) Long timeout) {
this.statement = statement;
this.timeout = timeout;
}
public String getStatement() {
return statement;
}
@Nullable
public Long getTimeout() {
return timeout;
}
}
| ConfigureSessionRequestBody |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/converted/converter/registrations/Thing2Converter.java | {
"start": 431,
"end": 677
} | class ____ implements AttributeConverter<Thing2,String> {
@Override
public String convertToDatabaseColumn(Thing2 attribute) {
return null;
}
@Override
public Thing2 convertToEntityAttribute(String dbData) {
return null;
}
}
| Thing2Converter |
java | google__dagger | javatests/dagger/internal/codegen/InjectConstructorFactoryGeneratorTest.java | {
"start": 41615,
"end": 42282
} | class ____ {",
" @Inject <T> void method();",
"}");
daggerCompiler(file)
.compile(
subject -> {
subject.hasErrorCount(1);
subject.hasErrorContaining("Methods with @Inject may not declare type parameters")
.onSource(file)
.onLine(6);
});
}
@Test public void multipleQualifiersOnInjectMethodParameter() {
Source file =
CompilerTests.javaSource(
"test.MultipleQualifierMethodParam",
"package test;",
"",
"import javax.inject.Inject;",
"",
" | AbstractInjectMethod |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/condition/ConditionalOnPropertyTests.java | {
"start": 12120,
"end": 12344
} | class ____ {
@Bean
String foo() {
return "foo";
}
}
@Configuration(proxyBeanMethods = false)
@ConditionalOnProperty(name = "simple.myProperty", havingValue = "bar", matchIfMissing = true)
static | SimpleValueConfig |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsNone.java | {
"start": 977,
"end": 1401
} | class ____ extends Credentials {
public CredentialsNone() {
super(AuthFlavor.AUTH_NONE);
mCredentialsLength = 0;
}
@Override
public void read(XDR xdr) {
mCredentialsLength = xdr.readInt();
Preconditions.checkState(mCredentialsLength == 0);
}
@Override
public void write(XDR xdr) {
Preconditions.checkState(mCredentialsLength == 0);
xdr.writeInt(mCredentialsLength);
}
}
| CredentialsNone |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java | {
"start": 3220,
"end": 7214
} | class ____ {
private final Set<ClusterPrivilege> clusterPrivileges = new HashSet<>();
private final List<Automaton> actionAutomatons = new ArrayList<>();
private final List<PermissionCheck> permissionChecks = new ArrayList<>();
private final RestrictedIndices restrictedIndices;
public Builder(RestrictedIndices restrictedIndices) {
this.restrictedIndices = restrictedIndices;
}
public Builder() {
this.restrictedIndices = null;
}
public Builder add(
final ClusterPrivilege clusterPrivilege,
final Set<String> allowedActionPatterns,
final Set<String> excludeActionPatterns
) {
this.clusterPrivileges.add(clusterPrivilege);
final Automaton actionAutomaton = createAutomaton(allowedActionPatterns, excludeActionPatterns);
this.actionAutomatons.add(actionAutomaton);
return this;
}
public Builder add(
final ClusterPrivilege clusterPrivilege,
final Set<String> allowedActionPatterns,
final Predicate<TransportRequest> requestPredicate
) {
final Automaton actionAutomaton = createAutomaton(allowedActionPatterns, Set.of());
return add(clusterPrivilege, new ActionRequestBasedPermissionCheck(clusterPrivilege, actionAutomaton, requestPredicate));
}
public Builder add(final ClusterPrivilege clusterPrivilege, final PermissionCheck permissionCheck) {
this.clusterPrivileges.add(clusterPrivilege);
this.permissionChecks.add(permissionCheck);
return this;
}
public Builder addWithPredicateSupplier(
final ClusterPrivilege clusterPrivilege,
final Set<String> allowedActionPatterns,
final Function<RestrictedIndices, Predicate<TransportRequest>> requestPredicateSupplier
) {
final Automaton actionAutomaton = createAutomaton(allowedActionPatterns, Set.of());
Predicate<TransportRequest> requestPredicate = requestPredicateSupplier.apply(restrictedIndices);
return add(clusterPrivilege, new ActionRequestBasedPermissionCheck(clusterPrivilege, actionAutomaton, requestPredicate));
}
public ClusterPermission build() {
if (clusterPrivileges.isEmpty()) {
return NONE;
}
List<PermissionCheck> checks = this.permissionChecks;
if (false == actionAutomatons.isEmpty()) {
final Automaton mergedAutomaton = Automatons.unionAndMinimize(this.actionAutomatons);
checks = new ArrayList<>(this.permissionChecks.size() + 1);
checks.add(new AutomatonPermissionCheck(mergedAutomaton));
checks.addAll(this.permissionChecks);
}
return new ClusterPermission(this.clusterPrivileges, checks);
}
private static Automaton createAutomaton(Set<String> allowedActionPatterns, Set<String> excludeActionPatterns) {
allowedActionPatterns = (allowedActionPatterns == null) ? Set.of() : allowedActionPatterns;
excludeActionPatterns = (excludeActionPatterns == null) ? Set.of() : excludeActionPatterns;
if (allowedActionPatterns.isEmpty()) {
return Automatons.EMPTY;
} else if (excludeActionPatterns.isEmpty()) {
return Automatons.patterns(allowedActionPatterns);
} else {
final Automaton allowedAutomaton = Automatons.patterns(allowedActionPatterns);
final Automaton excludedAutomaton = Automatons.patterns(excludeActionPatterns);
return Automatons.minusAndMinimize(allowedAutomaton, excludedAutomaton);
}
}
}
/**
* Evaluates whether the cluster actions (optionally for a given request)
* is permitted by this permission.
*/
public | Builder |
java | quarkusio__quarkus | extensions/redis-client/runtime/src/test/java/io/quarkus/redis/datasource/TransactionalBloomCommandsTest.java | {
"start": 743,
"end": 3515
} | class ____ extends DatasourceTestBase {
private RedisDataSource blocking;
private ReactiveRedisDataSource reactive;
@BeforeEach
void initialize() {
blocking = new BlockingRedisDataSourceImpl(vertx, redis, api, Duration.ofSeconds(60));
reactive = new ReactiveRedisDataSourceImpl(vertx, redis, api);
}
@AfterEach
public void clear() {
blocking.flushall();
}
@Test
public void bloomBlocking() {
TransactionResult result = blocking.withTransaction(tx -> {
TransactionalBloomCommands<String, String> bloom = tx.bloom(String.class);
assertThat(bloom.getDataSource()).isEqualTo(tx);
bloom.bfmadd(key, "a", "b", "c", "d", "a"); // 0 -> 4 true, 1 false
bloom.bfadd(key, "x"); // 1 -> true
bloom.bfexists(key, "a"); // 2 -> true
bloom.bfmexists(key, "a", "b", "z"); // 3 -> true, true, false
bloom.bfinsert(key, new BfInsertArgs(), "v", "w", "b"); // 4 -> true, true, false
});
assertThat(result.size()).isEqualTo(5);
assertThat(result.discarded()).isFalse();
assertThat((List<Boolean>) result.get(0)).containsExactly(true, true, true, true, false);
assertThat((boolean) result.get(1)).isTrue();
assertThat((boolean) result.get(2)).isTrue();
assertThat((List<Boolean>) result.get(3)).containsExactly(true, true, false);
assertThat((List<Boolean>) result.get(4)).containsExactly(true, true, false);
}
@Test
public void bloomReactive() {
TransactionResult result = reactive.withTransaction(tx -> {
ReactiveTransactionalBloomCommands<String, String> bloom = tx.bloom(String.class);
assertThat(bloom.getDataSource()).isEqualTo(tx);
return bloom.bfmadd(key, "a", "b", "c", "d", "a") // 0 -> 4 true, 1 false
.chain(() -> bloom.bfadd(key, "x")) // 1 -> true
.chain(() -> bloom.bfexists(key, "a")) // 2 -> true
.chain(() -> bloom.bfmexists(key, "a", "b", "z")) // 3 -> true, true, false
.chain(() -> bloom.bfinsert(key, new BfInsertArgs(), "v", "w", "b")); // 4 -> true, true, false
}).await().indefinitely();
assertThat(result.size()).isEqualTo(5);
assertThat(result.discarded()).isFalse();
assertThat((List<Boolean>) result.get(0)).containsExactly(true, true, true, true, false);
assertThat((boolean) result.get(1)).isTrue();
assertThat((boolean) result.get(2)).isTrue();
assertThat((List<Boolean>) result.get(3)).containsExactly(true, true, false);
assertThat((List<Boolean>) result.get(4)).containsExactly(true, true, false);
}
}
| TransactionalBloomCommandsTest |
java | apache__logging-log4j2 | log4j-layout-template-json/src/main/java/org/apache/logging/log4j/layout/template/json/resolver/MessageParameterResolverFactory.java | {
"start": 1157,
"end": 1809
} | class ____ implements EventResolverFactory {
private static final MessageParameterResolverFactory INSTANCE = new MessageParameterResolverFactory();
private MessageParameterResolverFactory() {}
@PluginFactory
public static MessageParameterResolverFactory getInstance() {
return INSTANCE;
}
@Override
public String getName() {
return MessageParameterResolver.getName();
}
@Override
public MessageParameterResolver create(final EventResolverContext context, final TemplateResolverConfig config) {
return new MessageParameterResolver(context, config);
}
}
| MessageParameterResolverFactory |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1200/Issue1222.java | {
"start": 259,
"end": 577
} | class ____ extends TestCase {
public void test_for_issue() throws Exception {
Model model = new Model();
model.type = Type.A;
String text = JSON.toJSONString(model, SerializerFeature.WriteEnumUsingToString);
assertEquals("{\"type\":\"TypeA\"}", text);
}
public static | Issue1222 |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/pool/FullTest.java | {
"start": 154,
"end": 884
} | class ____ extends TestCase {
private DruidDataSource dataSource;
protected void setUp() throws Exception {
dataSource = new DruidDataSource();
dataSource.setUrl("jdbc:mock:xxx");
dataSource.setTestOnBorrow(false);
dataSource.init();
}
protected void tearDown() throws Exception {
dataSource.close();
}
public void test_restart() throws Exception {
assertEquals(false, dataSource.isFull());
dataSource.fill();
assertEquals(true, dataSource.isFull());
Connection conn = dataSource.getConnection();
assertEquals(true, dataSource.isFull());
conn.close();
assertEquals(true, dataSource.isFull());
}
}
| FullTest |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/jdk/EnumTyping4733Test.java | {
"start": 1371,
"end": 1679
} | enum ____ implements InterName {
A1,
A2 {
@Override
public void yes() { }
};
}
// Failed before fix for [databind#4733]
@JsonTypeInfo(use = Id.SIMPLE_NAME)
@JsonSubTypes({
@JsonSubTypes.Type(value = A_SIMPLE_NAME.class),
})
| A_NAME |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java | {
"start": 78480,
"end": 88205
} | class ____ implements NamedDiff<Custom> {
private static final TransportVersion PROJECT_ID_IN_SNAPSHOT = TransportVersion.fromName("project_id_in_snapshot");
private final SnapshotsInProgress after;
private final DiffableUtils.MapDiff<ProjectRepo, ByRepo, Map<ProjectRepo, ByRepo>> mapDiff;
private final Set<String> nodeIdsForRemoval;
SnapshotInProgressDiff(SnapshotsInProgress before, SnapshotsInProgress after) {
this.mapDiff = DiffableUtils.diff(before.entries, after.entries, PROJECT_REPO_SERIALIZER);
this.nodeIdsForRemoval = after.nodesIdsForRemoval;
this.after = after;
}
SnapshotInProgressDiff(StreamInput in) throws IOException {
if (in.getTransportVersion().supports(PROJECT_ID_IN_SNAPSHOT) == false) {
final var oldMapDiff = DiffableUtils.readJdkMapDiff(
in,
DiffableUtils.getStringKeySerializer(),
i -> new ByRepo(i.readCollectionAsImmutableList(Entry::readFrom)),
i -> new ByRepo.ByRepoDiff(
DiffableUtils.readJdkMapDiff(i, DiffableUtils.getStringKeySerializer(), Entry::readFrom, EntryDiff::new),
DiffableUtils.readJdkMapDiff(i, DiffableUtils.getStringKeySerializer(), ByRepo.INT_DIFF_VALUE_SERIALIZER)
)
);
this.mapDiff = DiffableUtils.jdkMapDiffWithUpdatedKeys(
oldMapDiff,
repository -> new ProjectRepo(ProjectId.DEFAULT, repository),
PROJECT_REPO_SERIALIZER
);
} else {
this.mapDiff = DiffableUtils.readJdkMapDiff(
in,
PROJECT_REPO_SERIALIZER,
i -> new ByRepo(i.readCollectionAsImmutableList(Entry::readFrom)),
i -> new ByRepo.ByRepoDiff(
DiffableUtils.readJdkMapDiff(i, DiffableUtils.getStringKeySerializer(), Entry::readFrom, EntryDiff::new),
DiffableUtils.readJdkMapDiff(i, DiffableUtils.getStringKeySerializer(), ByRepo.INT_DIFF_VALUE_SERIALIZER)
)
);
}
this.nodeIdsForRemoval = readNodeIdsForRemoval(in);
this.after = null;
}
@Override
public SnapshotsInProgress apply(Custom part) {
final var snapshotsInProgress = (SnapshotsInProgress) part;
return new SnapshotsInProgress(mapDiff.apply(snapshotsInProgress.entries), this.nodeIdsForRemoval);
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersion.minimumCompatible();
}
@Override
public String getWriteableName() {
return TYPE;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
assert after != null : "should only write instances that were diffed from this node's state";
if (out.getTransportVersion().onOrAfter(DIFFABLE_VERSION)) {
if (out.getTransportVersion().supports(PROJECT_ID_IN_SNAPSHOT) == false) {
DiffableUtils.jdkMapDiffWithUpdatedKeys(mapDiff, projectRepo -> {
if (ProjectId.DEFAULT.equals(projectRepo.projectId()) == false) {
final var message = "Cannot write instance with non-default project id "
+ projectRepo.projectId()
+ " to version before "
+ PROJECT_ID_IN_SNAPSHOT;
assert false : message;
throw new IllegalArgumentException(message);
}
return projectRepo.name();
}, DiffableUtils.getStringKeySerializer()).writeTo(out);
} else {
mapDiff.writeTo(out);
}
} else {
new SimpleDiffable.CompleteDiff<>(after).writeTo(out);
}
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) {
out.writeStringCollection(nodeIdsForRemoval);
} else {
assert nodeIdsForRemoval.isEmpty() : nodeIdsForRemoval;
}
}
}
/**
* Wrapper for the list of snapshots per repository to allow for diffing changes in individual entries as well as position changes
* of entries in the list.
*
* @param entries all snapshots executing for a single repository
*/
private record ByRepo(List<Entry> entries) implements Diffable<ByRepo> {
static final ByRepo EMPTY = new ByRepo(List.of());
private static final DiffableUtils.NonDiffableValueSerializer<String, Integer> INT_DIFF_VALUE_SERIALIZER =
new DiffableUtils.NonDiffableValueSerializer<>() {
@Override
public void write(Integer value, StreamOutput out) throws IOException {
out.writeVInt(value);
}
@Override
public Integer read(StreamInput in, String key) throws IOException {
return in.readVInt();
}
};
private ByRepo(List<Entry> entries) {
this.entries = List.copyOf(entries);
}
/**
* Calculate summaries of how many shards and snapshots are in each shard/snapshot state
*
* @return a {@link Tuple} containing the snapshot and shard state summaries respectively
*/
public Tuple<Map<State, Integer>, Map<ShardState, Integer>> calculateStateSummaries() {
final int[] snapshotCounts = new int[State.values().length];
final int[] shardCounts = new int[ShardState.values().length];
for (Entry entry : entries) {
snapshotCounts[entry.state().ordinal()]++;
if (entry.isClone()) {
// Can't get shards for clone entry
continue;
}
for (ShardSnapshotStatus shardSnapshotStatus : entry.shards().values()) {
shardCounts[shardSnapshotStatus.state().ordinal()]++;
}
}
final Map<State, Integer> snapshotStates = Arrays.stream(State.values())
.collect(Collectors.toUnmodifiableMap(state -> state, state -> snapshotCounts[state.ordinal()]));
final Map<ShardState, Integer> shardStates = Arrays.stream(ShardState.values())
.collect(Collectors.toUnmodifiableMap(shardState -> shardState, state -> shardCounts[state.ordinal()]));
return Tuple.tuple(snapshotStates, shardStates);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeCollection(entries);
}
@Override
public Diff<ByRepo> diff(ByRepo previousState) {
return new ByRepoDiff(
DiffableUtils.diff(toMapByUUID(previousState), toMapByUUID(this), DiffableUtils.getStringKeySerializer()),
DiffableUtils.diff(
toPositionMap(previousState),
toPositionMap(this),
DiffableUtils.getStringKeySerializer(),
INT_DIFF_VALUE_SERIALIZER
)
);
}
public static Map<String, Integer> toPositionMap(ByRepo part) {
final Map<String, Integer> res = Maps.newMapWithExpectedSize(part.entries.size());
for (int i = 0; i < part.entries.size(); i++) {
final String snapshotUUID = part.entries.get(i).snapshot().getSnapshotId().getUUID();
assert res.containsKey(snapshotUUID) == false;
res.put(snapshotUUID, i);
}
return res;
}
public static Map<String, Entry> toMapByUUID(ByRepo part) {
final Map<String, Entry> res = Maps.newMapWithExpectedSize(part.entries.size());
for (Entry entry : part.entries) {
final String snapshotUUID = entry.snapshot().getSnapshotId().getUUID();
assert res.containsKey(snapshotUUID) == false;
res.put(snapshotUUID, entry);
}
return res;
}
/**
* @param diffBySnapshotUUID diff of a map of snapshot UUID to snapshot entry
* @param positionDiff diff of a map with snapshot UUID keys and positions in {@link ByRepo#entries} as values. Used to efficiently
* diff an entry moving to another index in the list
*/
private record ByRepoDiff(
DiffableUtils.MapDiff<String, Entry, Map<String, Entry>> diffBySnapshotUUID,
DiffableUtils.MapDiff<String, Integer, Map<String, Integer>> positionDiff
) implements Diff<ByRepo> {
@Override
public ByRepo apply(ByRepo part) {
final var updated = diffBySnapshotUUID.apply(toMapByUUID(part));
final var updatedPositions = positionDiff.apply(toPositionMap(part));
final Entry[] arr = new Entry[updated.size()];
updatedPositions.forEach((uuid, position) -> arr[position] = updated.get(uuid));
return new ByRepo(List.of(arr));
}
@Override
public void writeTo(StreamOutput out) throws IOException {
diffBySnapshotUUID.writeTo(out);
positionDiff.writeTo(out);
}
}
}
}
| SnapshotInProgressDiff |
java | alibaba__nacos | common/src/main/java/com/alibaba/nacos/common/utils/TlsTypeResolve.java | {
"start": 790,
"end": 1448
} | class ____ {
/**
* JDK SSL is very slower than OPENSSL, recommend use openSSl.
*
* @param provider name of ssl provider.
* @return SslProvider.
*/
public static SslProvider getSslProvider(String provider) {
if (SslProvider.OPENSSL.name().equalsIgnoreCase(provider)) {
return SslProvider.OPENSSL;
}
if (SslProvider.JDK.name().equalsIgnoreCase(provider)) {
return SslProvider.JDK;
}
if (SslProvider.OPENSSL_REFCNT.name().equalsIgnoreCase(provider)) {
return SslProvider.OPENSSL_REFCNT;
}
return SslProvider.OPENSSL;
}
}
| TlsTypeResolve |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng8400CanonicalMavenHomeTest.java | {
"start": 1241,
"end": 3011
} | class ____ extends AbstractMavenIntegrationTestCase {
/**
* Verify that properties are aligned (all use canonical maven home)
*/
@Test
void testIt() throws Exception {
Path basedir = extractResources("/mng-8400").getAbsoluteFile().toPath();
Path tempDir = basedir.resolve("tmp");
Files.createDirectories(tempDir);
Path linkedMavenHome = tempDir.resolve("linked-maven-home");
Path oldMavenHome = Paths.get(System.getProperty("maven.home"));
Files.deleteIfExists(linkedMavenHome);
Files.createSymbolicLink(linkedMavenHome, oldMavenHome);
System.setProperty("maven.home", linkedMavenHome.toString());
Verifier verifier = newVerifier(basedir.toString(), null);
verifier.addCliArgument("-DasProperties");
verifier.addCliArgument("-DtoFile=dump.properties");
verifier.addCliArgument("eu.maveniverse.maven.plugins:toolbox:" + verifier.getToolboxVersion() + ":gav-dump");
verifier.execute();
verifier.verifyErrorFreeLog();
String dump = Files.readString(basedir.resolve("dump.properties"), StandardCharsets.UTF_8);
Properties props = new Properties();
props.load(new ByteArrayInputStream(dump.getBytes(StandardCharsets.UTF_8)));
Path installationSettingsXml = Paths.get(props.getProperty("maven.settings"));
Path installationToolchainsXml = Paths.get(props.getProperty("maven.toolchains"));
Path mavenHome = Paths.get(props.getProperty("maven.home"));
assertEquals(mavenHome, installationSettingsXml.getParent().getParent()); // remove conf
assertEquals(mavenHome, installationToolchainsXml.getParent().getParent()); // remove conf
}
}
| MavenITmng8400CanonicalMavenHomeTest |
java | apache__flink | flink-docs/src/test/java/org/apache/flink/docs/configuration/ConfigOptionsDocsCompletenessITCase.java | {
"start": 18227,
"end": 19872
} | class ____ {
protected final String key;
protected final String defaultValue;
protected final String typeValue;
protected final String description;
private Option(String key, String defaultValue, String typeValue, String description) {
this.key = key;
this.defaultValue = defaultValue;
this.typeValue = typeValue;
this.description = description;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Option option = (Option) o;
return Objects.equals(key, option.key)
&& Objects.equals(defaultValue, option.defaultValue)
&& Objects.equals(typeValue, option.typeValue)
&& Objects.equals(description, option.description);
}
@Override
public int hashCode() {
return Objects.hash(key, defaultValue, typeValue, description);
}
@Override
public String toString() {
return "Option{"
+ "key='"
+ key
+ '\''
+ ", defaultValue='"
+ defaultValue
+ '\''
+ ", typeValue='"
+ typeValue
+ '\''
+ ", description='"
+ description
+ '\''
+ '}';
}
}
}
| Option |
java | mockito__mockito | mockito-extensions/mockito-junit-jupiter/src/test/java/org/mockitousage/GenericTypeMockTest.java | {
"start": 2533,
"end": 3338
} | class ____ {
Set<? extends Date> dateSet;
Set<? extends Number> numberSet;
}
@Mock Set<Time> timeSetMock; // java.sql.Time extends Date
@Mock Set<Integer> integerSetMock;
@InjectMocks UnderTestWithWildcard underTestWithWildcard = new UnderTestWithWildcard();
@Test
void testWildcard() {
assertNotNull(timeSetMock);
assertNotNull(integerSetMock);
// this also tests whether WildcardType.upperBounds() is evaluated,
// i.e. that we match <? extends Date> to <Time> type parameter
assertEquals(timeSetMock, underTestWithWildcard.dateSet);
assertEquals(integerSetMock, underTestWithWildcard.numberSet);
}
}
@Nested
public | UnderTestWithWildcard |
java | quarkusio__quarkus | core/runtime/src/main/java/io/quarkus/devservices/crossclassloader/runtime/RunningDevServicesRegistry.java | {
"start": 405,
"end": 593
} | class ____ only use language-level classes and classes defined in this same package.
* Other Quarkus classes might be in a different classloader.
* <p>
* Warning: The methods in this | should |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/grant/MySqlGrantTest_20.java | {
"start": 969,
"end": 2368
} | class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "GRANT GRANT OPTION ON mydb.* TO 'someuser'@'somehost';";
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement stmt = statementList.get(0);
// print(statementList);
assertEquals(1, statementList.size());
MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor();
stmt.accept(visitor);
String output = SQLUtils.toMySqlString(stmt);
assertEquals("GRANT GRANT OPTION ON mydb.* TO 'someuser'@'somehost';", //
output);
//
// System.out.println("Tables : " + visitor.getTables());
// System.out.println("fields : " + visitor.getColumns());
// System.out.println("coditions : " + visitor.getConditions());
// System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(1, visitor.getTables().size());
assertEquals(0, visitor.getColumns().size());
assertEquals(0, visitor.getConditions().size());
// assertTrue(visitor.getTables().containsKey(new TableStat.Name("City")));
// assertTrue(visitor.getTables().containsKey(new TableStat.Name("t2")));
// assertTrue(visitor.getColumns().contains(new Column("t2", "id")));
}
}
| MySqlGrantTest_20 |
java | apache__camel | components/camel-debezium/camel-debezium-oracle/src/generated/java/org/apache/camel/component/debezium/oracle/configuration/OracleConnectorEmbeddedDebeziumConfiguration.java | {
"start": 465,
"end": 28505
} | class ____
extends
EmbeddedDebeziumConfiguration {
private static final String LABEL_NAME = "consumer,oracle";
@UriParam(label = LABEL_NAME, defaultValue = "shared")
private String snapshotLockingMode = "shared";
@UriParam(label = LABEL_NAME, defaultValue = "false")
private boolean logMiningBufferDropOnStop = false;
@UriParam(label = LABEL_NAME)
private String messageKeyColumns;
@UriParam(label = LABEL_NAME, defaultValue = "io.debezium.pipeline.txmetadata.DefaultTransactionMetadataFactory")
private String transactionMetadataFactory = "io.debezium.pipeline.txmetadata.DefaultTransactionMetadataFactory";
@UriParam(label = LABEL_NAME)
private String customMetricTags;
@UriParam(label = LABEL_NAME)
private String openlogreplicatorHost;
@UriParam(label = LABEL_NAME, defaultValue = "source")
private String signalEnabledChannels = "source";
@UriParam(label = LABEL_NAME, defaultValue = "true")
private boolean includeSchemaChanges = true;
@UriParam(label = LABEL_NAME, defaultValue = "false")
private boolean logMiningIncludeRedoSql = false;
@UriParam(label = LABEL_NAME)
private String signalDataCollection;
@UriParam(label = LABEL_NAME)
private String logMiningReadonlyHostname;
@UriParam(label = LABEL_NAME)
private String converters;
@UriParam(label = LABEL_NAME)
private int snapshotFetchSize;
@UriParam(label = LABEL_NAME)
private String openlineageIntegrationJobTags;
@UriParam(label = LABEL_NAME, defaultValue = "10s", javaType = "java.time.Duration")
private long snapshotLockTimeoutMs = 10000;
@UriParam(label = LABEL_NAME, defaultValue = "1000000")
private long logMiningScnGapDetectionGapSizeMin = 1000000;
@UriParam(label = LABEL_NAME)
private String databaseDbname;
@UriParam(label = LABEL_NAME, defaultValue = "disabled")
private String snapshotTablesOrderByRowCount = "disabled";
@UriParam(label = LABEL_NAME, defaultValue = "1s", javaType = "java.time.Duration")
private long logMiningSleepTimeDefaultMs = 1000;
@UriParam(label = LABEL_NAME)
private String snapshotSelectStatementOverrides;
@UriParam(label = LABEL_NAME, defaultValue = "20000")
private long logMiningBatchSizeIncrement = 20000;
@UriParam(label = LABEL_NAME, defaultValue = "10s", javaType = "java.time.Duration")
private long logMiningArchiveLogOnlyScnPollIntervalMs = 10000;
@UriParam(label = LABEL_NAME, defaultValue = "false")
private boolean logMiningRestartConnection = false;
@UriParam(label = LABEL_NAME, defaultValue = "false")
private boolean legacyDecimalHandlingStrategy = false;
@UriParam(label = LABEL_NAME)
private String tableExcludeList;
@UriParam(label = LABEL_NAME, defaultValue = "2048")
private int maxBatchSize = 2048;
@UriParam(label = LABEL_NAME)
private String logMiningBufferInfinispanCacheTransactions;
@UriParam(label = LABEL_NAME, defaultValue = "io.debezium.schema.SchemaTopicNamingStrategy")
private String topicNamingStrategy = "io.debezium.schema.SchemaTopicNamingStrategy";
@UriParam(label = LABEL_NAME, defaultValue = "initial")
private String snapshotMode = "initial";
@UriParam(label = LABEL_NAME, defaultValue = "false")
private boolean snapshotModeConfigurationBasedSnapshotData = false;
@UriParam(label = LABEL_NAME)
private String logMiningBufferEhcacheSchemachangesConfig;
@UriParam(label = LABEL_NAME)
private String openlineageIntegrationJobOwners;
@UriParam(label = LABEL_NAME, defaultValue = "./openlineage.yml")
private String openlineageIntegrationConfigFilePath = "./openlineage.yml";
@UriParam(label = LABEL_NAME, defaultValue = "10s", javaType = "java.time.Duration")
private long retriableRestartConnectorWaitMs = 10000;
@UriParam(label = LABEL_NAME, defaultValue = "0ms", javaType = "java.time.Duration")
private long snapshotDelayMs = 0;
@UriParam(label = LABEL_NAME, defaultValue = "online_catalog")
private String logMiningStrategy = "online_catalog";
@UriParam(label = LABEL_NAME, defaultValue = "4s", javaType = "java.time.Duration")
private long executorShutdownTimeoutMs = 4000;
@UriParam(label = LABEL_NAME, defaultValue = "false")
private boolean snapshotModeConfigurationBasedSnapshotOnDataError = false;
@UriParam(label = LABEL_NAME)
private String schemaHistoryInternalFileFilename;
@UriParam(label = LABEL_NAME, defaultValue = "false")
private boolean tombstonesOnDelete = false;
@UriParam(label = LABEL_NAME, defaultValue = "precise")
private String decimalHandlingMode = "precise";
@UriParam(label = LABEL_NAME, defaultValue = "bytes")
private String binaryHandlingMode = "bytes";
@UriParam(label = LABEL_NAME)
private String databaseOutServerName;
@UriParam(label = LABEL_NAME)
private String openlineageIntegrationDatasetKafkaBootstrapServers;
@UriParam(label = LABEL_NAME, defaultValue = "0")
private long archiveLogHours = 0;
@UriParam(label = LABEL_NAME)
private String snapshotIncludeCollectionList;
@UriParam(label = LABEL_NAME, defaultValue = "false")
private boolean snapshotModeConfigurationBasedStartStream = false;
@UriParam(label = LABEL_NAME)
private String databasePdbName;
@UriParam(label = LABEL_NAME, defaultValue = "LogMiner")
private String databaseConnectionAdapter = "LogMiner";
@UriParam(label = LABEL_NAME, defaultValue = "LOG_MINING_FLUSH")
private String logMiningFlushTableName = "LOG_MINING_FLUSH";
@UriParam(label = LABEL_NAME)
private String openlogreplicatorSource;
@UriParam(label = LABEL_NAME, defaultValue = "memory")
private String logMiningBufferType = "memory";
@UriParam(label = LABEL_NAME, defaultValue = "5s", javaType = "java.time.Duration")
private long signalPollIntervalMs = 5000;
@UriParam(label = LABEL_NAME)
private String notificationEnabledChannels;
@UriParam(label = LABEL_NAME, defaultValue = "fail")
private String eventProcessingFailureHandlingMode = "fail";
@UriParam(label = LABEL_NAME, defaultValue = "1")
private int snapshotMaxThreads = 1;
@UriParam(label = LABEL_NAME)
private String notificationSinkTopicName;
@UriParam(label = LABEL_NAME)
private String snapshotModeCustomName;
@UriParam(label = LABEL_NAME, defaultValue = "none")
private String logMiningQueryFilterMode = "none";
@UriParam(label = LABEL_NAME, defaultValue = "none")
private String schemaNameAdjustmentMode = "none";
@UriParam(label = LABEL_NAME, defaultValue = "20000")
private long logMiningBatchSizeDefault = 20000;
@UriParam(label = LABEL_NAME)
private String tableIncludeList;
@UriParam(label = LABEL_NAME)
private String logMiningBufferEhcacheProcessedtransactionsConfig;
@UriParam(label = LABEL_NAME, defaultValue = "0ms", javaType = "java.time.Duration")
private long streamingDelayMs = 0;
@UriParam(label = LABEL_NAME)
private String openlineageIntegrationJobNamespace;
@UriParam(label = LABEL_NAME, defaultValue = "10m", javaType = "java.time.Duration")
private int databaseQueryTimeoutMs = 600000;
@UriParam(label = LABEL_NAME, defaultValue = "10000")
private int queryFetchSize = 10000;
@UriParam(label = LABEL_NAME)
private String logMiningBufferEhcacheGlobalConfig;
@UriParam(label = LABEL_NAME, defaultValue = "0ms", javaType = "java.time.Duration")
private long logMiningSleepTimeMinMs = 0;
@UriParam(label = LABEL_NAME, defaultValue = "__debezium_unavailable_value")
private String unavailableValuePlaceholder = "__debezium_unavailable_value";
@UriParam(label = LABEL_NAME)
private String logMiningClientidIncludeList;
@UriParam(label = LABEL_NAME)
private String heartbeatActionQuery;
@UriParam(label = LABEL_NAME)
private String logMiningClientidExcludeList;
@UriParam(label = LABEL_NAME, defaultValue = "500ms", javaType = "java.time.Duration")
private long pollIntervalMs = 500;
@UriParam(label = LABEL_NAME, defaultValue = "0")
private int guardrailCollectionsMax = 0;
@UriParam(label = LABEL_NAME)
private String logMiningUsernameIncludeList;
@UriParam(label = LABEL_NAME, defaultValue = "false")
private boolean lobEnabled = false;
@UriParam(label = LABEL_NAME, defaultValue = "numeric")
private String intervalHandlingMode = "numeric";
@UriParam(label = LABEL_NAME, defaultValue = "__debezium-heartbeat")
private String heartbeatTopicsPrefix = "__debezium-heartbeat";
@UriParam(label = LABEL_NAME, defaultValue = "false")
private boolean logMiningArchiveLogOnlyMode = false;
@UriParam(label = LABEL_NAME)
private String logMiningPathDictionary;
@UriParam(label = LABEL_NAME)
private String logMiningBufferInfinispanCacheSchemaChanges;
@UriParam(label = LABEL_NAME, defaultValue = "3s", javaType = "java.time.Duration")
private long logMiningSleepTimeMaxMs = 3000;
@UriParam(label = LABEL_NAME)
private String databaseUser;
@UriParam(label = LABEL_NAME)
private String datatypePropagateSourceType;
@UriParam(label = LABEL_NAME, defaultValue = "INSERT_INSERT")
private String incrementalSnapshotWatermarkingStrategy = "INSERT_INSERT";
@UriParam(label = LABEL_NAME, defaultValue = "0ms", javaType = "java.time.Duration")
private int heartbeatIntervalMs = 0;
@UriParam(label = LABEL_NAME, defaultValue = "false")
private boolean snapshotModeConfigurationBasedSnapshotOnSchemaError = false;
@UriParam(label = LABEL_NAME, defaultValue = "false")
private boolean schemaHistoryInternalSkipUnparseableDdl = false;
@UriParam(label = LABEL_NAME)
private String columnIncludeList;
@UriParam(label = LABEL_NAME)
private String logMiningUsernameExcludeList;
@UriParam(label = LABEL_NAME)
private String columnPropagateSourceType;
@UriParam(label = LABEL_NAME)
private String logMiningBufferEhcacheTransactionsConfig;
@UriParam(label = LABEL_NAME)
private String logMiningBufferInfinispanCacheProcessedTransactions;
@UriParam(label = LABEL_NAME, defaultValue = "-1")
private int errorsMaxRetries = -1;
@UriParam(label = LABEL_NAME)
@Metadata(required = true)
private String databasePassword;
@UriParam(label = LABEL_NAME)
private String logMiningBufferInfinispanCacheEvents;
@UriParam(label = LABEL_NAME, defaultValue = "t")
private String skippedOperations = "t";
@UriParam(label = LABEL_NAME, defaultValue = "Debezium change data capture job")
private String openlineageIntegrationJobDescription = "Debezium change data capture job";
@UriParam(label = LABEL_NAME)
private String archiveDestinationName;
@UriParam(label = LABEL_NAME, defaultValue = "20s", javaType = "java.time.Duration")
private long logMiningScnGapDetectionTimeIntervalMaxMs = 20000;
@UriParam(label = LABEL_NAME, defaultValue = "true")
private boolean extendedHeadersEnabled = true;
@UriParam(label = LABEL_NAME, defaultValue = "8192")
private int maxQueueSize = 8192;
@UriParam(label = LABEL_NAME, defaultValue = "warn")
private String guardrailCollectionsLimitAction = "warn";
@UriParam(label = LABEL_NAME)
private String racNodes;
@UriParam(label = LABEL_NAME)
private String logMiningBufferInfinispanCacheGlobal;
@UriParam(label = LABEL_NAME, defaultValue = "0")
private long logMiningBufferTransactionEventsThreshold = 0;
@UriParam(label = LABEL_NAME, defaultValue = "0ms", javaType = "java.time.Duration")
private long logMiningTransactionRetentionMs = 0;
@UriParam(label = LABEL_NAME, defaultValue = "false")
private boolean provideTransactionMetadata = false;
@UriParam(label = LABEL_NAME, defaultValue = "false")
private boolean schemaHistoryInternalStoreOnlyCapturedTablesDdl = false;
@UriParam(label = LABEL_NAME, defaultValue = "false")
private boolean schemaHistoryInternalStoreOnlyCapturedDatabasesDdl = false;
@UriParam(label = LABEL_NAME, defaultValue = "0")
private int snapshotDatabaseErrorsMaxRetries = 0;
@UriParam(label = LABEL_NAME)
@Metadata(required = true)
private String topicPrefix;
@UriParam(label = LABEL_NAME, defaultValue = "false")
private boolean includeSchemaComments = false;
@UriParam(label = LABEL_NAME, defaultValue = "io.debezium.connector.oracle.OracleSourceInfoStructMaker")
private String sourceinfoStructMaker = "io.debezium.connector.oracle.OracleSourceInfoStructMaker";
@UriParam(label = LABEL_NAME, defaultValue = "false")
private boolean openlineageIntegrationEnabled = false;
@UriParam(label = LABEL_NAME)
private int openlogreplicatorPort;
@UriParam(label = LABEL_NAME)
private String logMiningBufferEhcacheEventsConfig;
@UriParam(label = LABEL_NAME, defaultValue = "100000")
private long logMiningBatchSizeMax = 100000;
@UriParam(label = LABEL_NAME, defaultValue = "0")
private long maxQueueSizeInBytes = 0;
@UriParam(label = LABEL_NAME)
private String databaseUrl;
@UriParam(label = LABEL_NAME, defaultValue = "false")
private boolean snapshotModeConfigurationBasedSnapshotSchema = false;
@UriParam(label = LABEL_NAME, defaultValue = "adaptive")
private String timePrecisionMode = "adaptive";
@UriParam(label = LABEL_NAME)
private String postProcessors;
@UriParam(label = LABEL_NAME, defaultValue = "1528")
private int databasePort = 1528;
@UriParam(label = LABEL_NAME, defaultValue = "200ms", javaType = "java.time.Duration")
private long logMiningSleepTimeIncrementMs = 200;
@UriParam(label = LABEL_NAME, defaultValue = "io.debezium.storage.kafka.history.KafkaSchemaHistory")
private String schemaHistoryInternal = "io.debezium.storage.kafka.history.KafkaSchemaHistory";
@UriParam(label = LABEL_NAME)
private String columnExcludeList;
@UriParam(label = LABEL_NAME, defaultValue = "0ms", javaType = "java.time.Duration")
private long logMiningSessionMaxMs = 0;
@UriParam(label = LABEL_NAME)
private String databaseHostname;
@UriParam(label = LABEL_NAME, defaultValue = "1000")
private long logMiningBatchSizeMin = 1000;
@UriParam(label = LABEL_NAME, defaultValue = "1m", javaType = "java.time.Duration")
private long connectionValidationTimeoutMs = 60000;
/**
* Controls how the connector holds locks on tables while performing the
* schema snapshot. The default is 'shared', which means the connector will
* hold a table lock that prevents exclusive table access for just the
* initial portion of the snapshot while the database schemas and other
* metadata are being read. The remaining work in a snapshot involves
* selecting all rows from each table, and this is done using a flashback
* query that requires no locks. However, in some cases it may be desirable
* to avoid locks entirely which can be done by specifying 'none'. This mode
* is only safe to use if no schema changes are happening while the snapshot
* is taken.
*/
public void setSnapshotLockingMode(String snapshotLockingMode) {
this.snapshotLockingMode = snapshotLockingMode;
}
public String getSnapshotLockingMode() {
return snapshotLockingMode;
}
/**
* When set to true the underlying buffer cache is not retained when the
* connector is stopped. When set to false (the default), the buffer cache
* is retained across restarts.
*/
public void setLogMiningBufferDropOnStop(boolean logMiningBufferDropOnStop) {
this.logMiningBufferDropOnStop = logMiningBufferDropOnStop;
}
public boolean isLogMiningBufferDropOnStop() {
return logMiningBufferDropOnStop;
}
/**
* A semicolon-separated list of expressions that match fully-qualified
* tables and column(s) to be used as message key. Each expression must
* match the pattern '<fully-qualified table name>:<key columns>', where the
* table names could be defined as (DB_NAME.TABLE_NAME) or
* (SCHEMA_NAME.TABLE_NAME), depending on the specific connector, and the
* key columns are a comma-separated list of columns representing the custom
* key. For any table without an explicit key configuration the table's
* primary key column(s) will be used as message key. Example:
* dbserver1.inventory.orderlines:orderId,orderLineId;dbserver1.inventory.orders:id
*/
public void setMessageKeyColumns(String messageKeyColumns) {
this.messageKeyColumns = messageKeyColumns;
}
public String getMessageKeyColumns() {
return messageKeyColumns;
}
/**
* Class to make transaction context & transaction struct/schemas
*/
public void setTransactionMetadataFactory(String transactionMetadataFactory) {
this.transactionMetadataFactory = transactionMetadataFactory;
}
public String getTransactionMetadataFactory() {
return transactionMetadataFactory;
}
/**
* The custom metric tags will accept key-value pairs to customize the MBean
* object name which should be appended the end of regular name, each key
* would represent a tag for the MBean object name, and the corresponding
* value would be the value of that tag the key is. For example: k1=v1,k2=v2
*/
public void setCustomMetricTags(String customMetricTags) {
this.customMetricTags = customMetricTags;
}
public String getCustomMetricTags() {
return customMetricTags;
}
/**
* The hostname of the OpenLogReplicator network service
*/
public void setOpenlogreplicatorHost(String openlogreplicatorHost) {
this.openlogreplicatorHost = openlogreplicatorHost;
}
public String getOpenlogreplicatorHost() {
return openlogreplicatorHost;
}
/**
* List of channels names that are enabled. Source channel is enabled by
* default
*/
public void setSignalEnabledChannels(String signalEnabledChannels) {
this.signalEnabledChannels = signalEnabledChannels;
}
public String getSignalEnabledChannels() {
return signalEnabledChannels;
}
/**
* Whether the connector should publish changes in the database schema to a
* Kafka topic with the same name as the database server ID. Each schema
* change will be recorded using a key that contains the database name and
* whose value include logical description of the new schema and optionally
* the DDL statement(s). The default is 'true'. This is independent of how
* the connector internally records database schema history.
*/
public void setIncludeSchemaChanges(boolean includeSchemaChanges) {
this.includeSchemaChanges = includeSchemaChanges;
}
public boolean isIncludeSchemaChanges() {
return includeSchemaChanges;
}
/**
* When enabled, the transaction log REDO SQL will be included in the source
* information block.
*/
public void setLogMiningIncludeRedoSql(boolean logMiningIncludeRedoSql) {
this.logMiningIncludeRedoSql = logMiningIncludeRedoSql;
}
public boolean isLogMiningIncludeRedoSql() {
return logMiningIncludeRedoSql;
}
/**
* The name of the data collection that is used to send signals/commands to
* Debezium. Signaling is disabled when not set.
*/
public void setSignalDataCollection(String signalDataCollection) {
this.signalDataCollection = signalDataCollection;
}
public String getSignalDataCollection() {
return signalDataCollection;
}
/**
* The hostname the connector will use to connect and perform read-only
* operations for the the replica.
*/
public void setLogMiningReadonlyHostname(String logMiningReadonlyHostname) {
this.logMiningReadonlyHostname = logMiningReadonlyHostname;
}
public String getLogMiningReadonlyHostname() {
return logMiningReadonlyHostname;
}
/**
* Optional list of custom converters that would be used instead of default
* ones. The converters are defined using '<converter.prefix>.type' config
* option and configured using options '<converter.prefix>.<option>'
*/
public void setConverters(String converters) {
this.converters = converters;
}
public String getConverters() {
return converters;
}
/**
* The maximum number of records that should be loaded into memory while
* performing a snapshot.
*/
public void setSnapshotFetchSize(int snapshotFetchSize) {
this.snapshotFetchSize = snapshotFetchSize;
}
public int getSnapshotFetchSize() {
return snapshotFetchSize;
}
/**
* The job's tags emitted by Debezium. A comma-separated list of key-value
* pairs.For example: k1=v1,k2=v2
*/
public void setOpenlineageIntegrationJobTags(
String openlineageIntegrationJobTags) {
this.openlineageIntegrationJobTags = openlineageIntegrationJobTags;
}
public String getOpenlineageIntegrationJobTags() {
return openlineageIntegrationJobTags;
}
/**
* The maximum number of millis to wait for table locks at the beginning of
* a snapshot. If locks cannot be acquired in this time frame, the snapshot
* will be aborted. Defaults to 10 seconds
*/
public void setSnapshotLockTimeoutMs(long snapshotLockTimeoutMs) {
this.snapshotLockTimeoutMs = snapshotLockTimeoutMs;
}
public long getSnapshotLockTimeoutMs() {
return snapshotLockTimeoutMs;
}
/**
* Used for SCN gap detection, if the difference between current SCN and
* previous end SCN is bigger than this value, and the time difference of
* current SCN and previous end SCN is smaller than
* log.mining.scn.gap.detection.time.interval.max.ms, consider it a SCN gap.
*/
public void setLogMiningScnGapDetectionGapSizeMin(
long logMiningScnGapDetectionGapSizeMin) {
this.logMiningScnGapDetectionGapSizeMin = logMiningScnGapDetectionGapSizeMin;
}
public long getLogMiningScnGapDetectionGapSizeMin() {
return logMiningScnGapDetectionGapSizeMin;
}
/**
* The name of the database from which the connector should capture changes
*/
public void setDatabaseDbname(String databaseDbname) {
this.databaseDbname = databaseDbname;
}
public String getDatabaseDbname() {
return databaseDbname;
}
/**
* Controls the order in which tables are processed in the initial snapshot.
* A `descending` value will order the tables by row count descending. A
* `ascending` value will order the tables by row count ascending. A value
* of `disabled` (the default) will disable ordering by row count.
*/
public void setSnapshotTablesOrderByRowCount(
String snapshotTablesOrderByRowCount) {
this.snapshotTablesOrderByRowCount = snapshotTablesOrderByRowCount;
}
public String getSnapshotTablesOrderByRowCount() {
return snapshotTablesOrderByRowCount;
}
/**
* The amount of time that the connector will sleep after reading data from
* redo/archive logs and before starting reading data again. Value is in
* milliseconds.
*/
public void setLogMiningSleepTimeDefaultMs(long logMiningSleepTimeDefaultMs) {
this.logMiningSleepTimeDefaultMs = logMiningSleepTimeDefaultMs;
}
public long getLogMiningSleepTimeDefaultMs() {
return logMiningSleepTimeDefaultMs;
}
/**
* This property contains a comma-separated list of fully-qualified tables
* (DB_NAME.TABLE_NAME) or (SCHEMA_NAME.TABLE_NAME), depending on the
* specific connectors. Select statements for the individual tables are
* specified in further configuration properties, one for each table,
* identified by the id
* 'snapshot.select.statement.overrides.[DB_NAME].[TABLE_NAME]' or
* 'snapshot.select.statement.overrides.[SCHEMA_NAME].[TABLE_NAME]',
* respectively. The value of those properties is the select statement to
* use when retrieving data from the specific table during snapshotting. A
* possible use case for large append-only tables is setting a specific
* point where to start (resume) snapshotting, in case a previous
* snapshotting was interrupted.
*/
public void setSnapshotSelectStatementOverrides(
String snapshotSelectStatementOverrides) {
this.snapshotSelectStatementOverrides = snapshotSelectStatementOverrides;
}
public String getSnapshotSelectStatementOverrides() {
return snapshotSelectStatementOverrides;
}
/**
* Active batch size will be also increased/decreased by this amount for
* tuning connector throughput when needed.
*/
public void setLogMiningBatchSizeIncrement(long logMiningBatchSizeIncrement) {
this.logMiningBatchSizeIncrement = logMiningBatchSizeIncrement;
}
public long getLogMiningBatchSizeIncrement() {
return logMiningBatchSizeIncrement;
}
/**
* The interval in milliseconds to wait between polls checking to see if the
* SCN is in the archive logs.
*/
public void setLogMiningArchiveLogOnlyScnPollIntervalMs(
long logMiningArchiveLogOnlyScnPollIntervalMs) {
this.logMiningArchiveLogOnlyScnPollIntervalMs = logMiningArchiveLogOnlyScnPollIntervalMs;
}
public long getLogMiningArchiveLogOnlyScnPollIntervalMs() {
return logMiningArchiveLogOnlyScnPollIntervalMs;
}
/**
* Debezium opens a database connection and keeps that connection open
* throughout the entire streaming phase. In some situations, this can lead
* to excessive SGA memory usage. By setting this option to 'true' (the
* default is 'false'), the connector will close and re-open a database
* connection after every detected log switch or if the
* log.mining.session.max.ms has been reached.
*/
public void setLogMiningRestartConnection(boolean logMiningRestartConnection) {
this.logMiningRestartConnection = logMiningRestartConnection;
}
public boolean isLogMiningRestartConnection() {
return logMiningRestartConnection;
}
/**
* Uses the legacy decimal handling behavior before DBZ-7882
*/
public void setLegacyDecimalHandlingStrategy(
boolean legacyDecimalHandlingStrategy) {
this.legacyDecimalHandlingStrategy = legacyDecimalHandlingStrategy;
}
public boolean isLegacyDecimalHandlingStrategy() {
return legacyDecimalHandlingStrategy;
}
/**
* A comma-separated list of regular expressions that match the
* fully-qualified names of tables to be excluded from monitoring
*/
public void setTableExcludeList(String tableExcludeList) {
this.tableExcludeList = tableExcludeList;
}
public String getTableExcludeList() {
return tableExcludeList;
}
/**
* Maximum size of each batch of source records. Defaults to 2048.
*/
public void setMaxBatchSize(int maxBatchSize) {
this.maxBatchSize = maxBatchSize;
}
public int getMaxBatchSize() {
return maxBatchSize;
}
/**
* Specifies the XML configuration for the Infinispan 'transactions' cache
*/
public void setLogMiningBufferInfinispanCacheTransactions(
String logMiningBufferInfinispanCacheTransactions) {
this.logMiningBufferInfinispanCacheTransactions = logMiningBufferInfinispanCacheTransactions;
}
public String getLogMiningBufferInfinispanCacheTransactions() {
return logMiningBufferInfinispanCacheTransactions;
}
/**
* The name of the TopicNamingStrategy | OracleConnectorEmbeddedDebeziumConfiguration |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/load/data/HttpUrlFetcher.java | {
"start": 783,
"end": 8142
} | class ____ implements DataFetcher<InputStream> {
private static final String TAG = "HttpUrlFetcher";
private static final int MAXIMUM_REDIRECTS = 5;
@VisibleForTesting static final String REDIRECT_HEADER_FIELD = "Location";
@VisibleForTesting
static final HttpUrlConnectionFactory DEFAULT_CONNECTION_FACTORY =
new DefaultHttpUrlConnectionFactory();
/** Returned when a connection error prevented us from receiving an http error. */
@VisibleForTesting static final int INVALID_STATUS_CODE = -1;
private final GlideUrl glideUrl;
private final int timeout;
private final HttpUrlConnectionFactory connectionFactory;
private HttpURLConnection urlConnection;
private InputStream stream;
private volatile boolean isCancelled;
public HttpUrlFetcher(GlideUrl glideUrl, int timeout) {
this(glideUrl, timeout, DEFAULT_CONNECTION_FACTORY);
}
@VisibleForTesting
HttpUrlFetcher(GlideUrl glideUrl, int timeout, HttpUrlConnectionFactory connectionFactory) {
this.glideUrl = glideUrl;
this.timeout = timeout;
this.connectionFactory = connectionFactory;
}
@Override
public void loadData(
@NonNull Priority priority, @NonNull DataCallback<? super InputStream> callback) {
long startTime = LogTime.getLogTime();
try {
InputStream result = loadDataWithRedirects(glideUrl.toURL(), 0, null, glideUrl.getHeaders());
callback.onDataReady(result);
} catch (IOException e) {
if (Log.isLoggable(TAG, Log.DEBUG)) {
Log.d(TAG, "Failed to load data for url", e);
}
callback.onLoadFailed(e);
} finally {
if (Log.isLoggable(TAG, Log.VERBOSE)) {
Log.v(TAG, "Finished http url fetcher fetch in " + LogTime.getElapsedMillis(startTime));
}
}
}
private InputStream loadDataWithRedirects(
URL url, int redirects, URL lastUrl, Map<String, String> headers) throws HttpException {
if (redirects >= MAXIMUM_REDIRECTS) {
throw new HttpException(
"Too many (> " + MAXIMUM_REDIRECTS + ") redirects!", INVALID_STATUS_CODE);
} else {
// Comparing the URLs using .equals performs additional network I/O and is generally broken.
// See http://michaelscharf.blogspot.com/2006/11/javaneturlequals-and-hashcode-make.html.
try {
if (lastUrl != null && url.toURI().equals(lastUrl.toURI())) {
throw new HttpException("In re-direct loop", INVALID_STATUS_CODE);
}
} catch (URISyntaxException e) {
// Do nothing, this is best effort.
}
}
urlConnection = buildAndConfigureConnection(url, headers);
try {
// Connect explicitly to avoid errors in decoders if connection fails.
urlConnection.connect();
// Set the stream so that it's closed in cleanup to avoid resource leaks. See #2352.
stream = urlConnection.getInputStream();
} catch (IOException e) {
throw new HttpException(
"Failed to connect or obtain data", getHttpStatusCodeOrInvalid(urlConnection), e);
}
if (isCancelled) {
return null;
}
final int statusCode = getHttpStatusCodeOrInvalid(urlConnection);
if (isHttpOk(statusCode)) {
return getStreamForSuccessfulRequest(urlConnection);
} else if (isHttpRedirect(statusCode)) {
String redirectUrlString = urlConnection.getHeaderField(REDIRECT_HEADER_FIELD);
if (TextUtils.isEmpty(redirectUrlString)) {
throw new HttpException("Received empty or null redirect url", statusCode);
}
URL redirectUrl;
try {
redirectUrl = new URL(url, redirectUrlString);
} catch (MalformedURLException e) {
throw new HttpException("Bad redirect url: " + redirectUrlString, statusCode, e);
}
// Closing the stream specifically is required to avoid leaking ResponseBodys in addition
// to disconnecting the url connection below. See #2352.
cleanup();
return loadDataWithRedirects(redirectUrl, redirects + 1, url, headers);
} else if (statusCode == INVALID_STATUS_CODE) {
throw new HttpException(statusCode);
} else {
try {
throw new HttpException(urlConnection.getResponseMessage(), statusCode);
} catch (IOException e) {
throw new HttpException("Failed to get a response message", statusCode, e);
}
}
}
private static int getHttpStatusCodeOrInvalid(HttpURLConnection urlConnection) {
try {
return urlConnection.getResponseCode();
} catch (IOException e) {
if (Log.isLoggable(TAG, Log.DEBUG)) {
Log.d(TAG, "Failed to get a response code", e);
}
}
return INVALID_STATUS_CODE;
}
private HttpURLConnection buildAndConfigureConnection(URL url, Map<String, String> headers)
throws HttpException {
HttpURLConnection urlConnection;
try {
urlConnection = connectionFactory.build(url);
} catch (IOException e) {
throw new HttpException("URL.openConnection threw", /* statusCode= */ 0, e);
}
for (Map.Entry<String, String> headerEntry : headers.entrySet()) {
urlConnection.addRequestProperty(headerEntry.getKey(), headerEntry.getValue());
}
urlConnection.setConnectTimeout(timeout);
urlConnection.setReadTimeout(timeout);
urlConnection.setUseCaches(false);
urlConnection.setDoInput(true);
// Stop the urlConnection instance of HttpUrlConnection from following redirects so that
// redirects will be handled by recursive calls to this method, loadDataWithRedirects.
urlConnection.setInstanceFollowRedirects(false);
return urlConnection;
}
// Referencing constants is less clear than a simple static method.
private static boolean isHttpOk(int statusCode) {
return statusCode / 100 == 2;
}
// Referencing constants is less clear than a simple static method.
private static boolean isHttpRedirect(int statusCode) {
return statusCode / 100 == 3;
}
private InputStream getStreamForSuccessfulRequest(HttpURLConnection urlConnection)
throws HttpException {
try {
if (TextUtils.isEmpty(urlConnection.getContentEncoding())) {
int contentLength = urlConnection.getContentLength();
stream = ContentLengthInputStream.obtain(urlConnection.getInputStream(), contentLength);
} else {
if (Log.isLoggable(TAG, Log.DEBUG)) {
Log.d(TAG, "Got non empty content encoding: " + urlConnection.getContentEncoding());
}
stream = urlConnection.getInputStream();
}
} catch (IOException e) {
throw new HttpException(
"Failed to obtain InputStream", getHttpStatusCodeOrInvalid(urlConnection), e);
}
return stream;
}
@Override
public void cleanup() {
if (stream != null) {
try {
stream.close();
} catch (IOException e) {
// Ignore
}
}
if (urlConnection != null) {
urlConnection.disconnect();
}
urlConnection = null;
}
@Override
public void cancel() {
// TODO: we should consider disconnecting the url connection here, but we can't do so
// directly because cancel is often called on the main thread.
isCancelled = true;
}
@NonNull
@Override
public Class<InputStream> getDataClass() {
return InputStream.class;
}
@NonNull
@Override
public DataSource getDataSource() {
return DataSource.REMOTE;
}
| HttpUrlFetcher |
java | apache__avro | lang/java/avro/src/main/java/org/apache/avro/generic/GenericDatumWriter.java | {
"start": 1889,
"end": 4970
} | class ____<D> implements DatumWriter<D> {
private final GenericData data;
private Schema root;
public GenericDatumWriter() {
this(GenericData.get());
}
protected GenericDatumWriter(GenericData data) {
this.data = data;
}
public GenericDatumWriter(Schema root) {
this();
setSchema(root);
}
public GenericDatumWriter(Schema root, GenericData data) {
this(data);
setSchema(root);
}
/** Return the {@link GenericData} implementation. */
public GenericData getData() {
return data;
}
public void setSchema(Schema root) {
this.root = root;
}
public void write(D datum, Encoder out) throws IOException {
Objects.requireNonNull(out, "Encoder cannot be null");
try {
write(root, datum, out);
} catch (TracingNullPointException | TracingClassCastException | TracingAvroTypeException e) {
throw e.summarize(root);
}
}
/** Called to write data. */
protected void write(Schema schema, Object datum, Encoder out) throws IOException {
LogicalType logicalType = schema.getLogicalType();
if (datum != null && logicalType != null) {
Conversion<?> conversion = getData().getConversionByClass(datum.getClass(), logicalType);
writeWithoutConversion(schema, convert(schema, logicalType, conversion, datum), out);
} else {
writeWithoutConversion(schema, datum, out);
}
}
/**
* Convert a high level representation of a logical type (such as a BigDecimal)
* to its underlying representation object (such as a ByteBuffer).
*
* @throws IllegalArgumentException if a null schema or logicalType is passed in
* while datum and conversion are not null.
* Please be noticed that the exception type
* has changed. With version 1.8.0 and earlier,
* in above circumstance, the exception thrown
* out depends on the implementation of
* conversion (most likely a
* NullPointerException). Now, an
* IllegalArgumentException will be thrown out
* instead.
*/
protected <T> Object convert(Schema schema, LogicalType logicalType, Conversion<T> conversion, Object datum) {
try {
if (conversion == null) {
return datum;
} else {
return Conversions.convertToRawType(datum, schema, logicalType, conversion);
}
} catch (AvroRuntimeException e) {
Throwable cause = e.getCause();
if (cause != null && cause.getClass() == ClassCastException.class) {
// This is to keep backwards compatibility. The convert function here used to
// throw CCE. After being moved to Conversions, it throws AvroRuntimeException
// instead. To keep as much same behaviour as before, this function checks if
// the cause is a CCE. If yes, rethrow it in case any child | GenericDatumWriter |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/context/aot/ReflectiveProcessorAotContributionBuilder.java | {
"start": 2112,
"end": 2427
} | class ____ {
private static final ReflectiveRuntimeHintsRegistrar registrar = new ReflectiveRuntimeHintsRegistrar();
private final Set<Class<?>> classes = new LinkedHashSet<>();
/**
* Process the given classes by checking the ones that use {@link Reflective}.
* <p>A | ReflectiveProcessorAotContributionBuilder |
java | apache__logging-log4j2 | log4j-layout-template-json-test/src/test/java/org/apache/logging/log4j/layout/template/json/resolver/TimestampResolverTest.java | {
"start": 1432,
"end": 2838
} | class ____ {
/**
* Reproduces <a href="https://issues.apache.org/jira/browse/LOG4J2-3183">LOG4J2-3183</a>.
*/
@Test
void epoch_nanos_should_not_overlap() {
// Create the template.
final Object eventTemplate = asMap("$resolver", "timestamp", "epoch", asMap("unit", "nanos"));
// Create the logging context.
withContextFromTemplate("TimestampResolverTest", eventTemplate, (loggerContext, appender) -> {
// Log some.
final Logger logger = loggerContext.getLogger(TimestampResolverTest.class);
final int logEventCount = 5;
for (int logEventIndex = 0; logEventIndex < logEventCount; logEventIndex++) {
if (logEventIndex > 0) {
uncheckedSleep(1);
}
logger.info("message #{}", logEventIndex);
}
// Read logged events.
final List<Long> logEvents = appender.getData().stream()
.map(jsonBytes -> {
final String json = new String(jsonBytes, StandardCharsets.UTF_8);
return (long) readJson(json);
})
.collect(Collectors.toList());
// Verify logged events.
Assertions.assertThat(logEvents).hasSize(logEventCount).doesNotHaveDuplicates();
});
}
}
| TimestampResolverTest |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/JSONScannerTest_scanFieldBoolean.java | {
"start": 139,
"end": 5415
} | class ____ extends TestCase {
public void test_true() throws Exception {
String text = "{\"value\":true}";
VO obj = JSON.parseObject(text, VO.class);
Assert.assertEquals(true, obj.getValue());
}
public void test_false() throws Exception {
String text = "{\"value\":false}";
VO obj = JSON.parseObject(text, VO.class);
Assert.assertEquals(false, obj.getValue());
}
public void test_1() throws Exception {
String text = "{\"value\":\"true\"}";
VO obj = JSON.parseObject(text, VO.class);
Assert.assertEquals(true, obj.getValue());
}
public void test_2() throws Exception {
String text = "{\"value\":\"false\"}";
VO obj = JSON.parseObject(text, VO.class);
Assert.assertEquals(false, obj.getValue());
}
public void test_3() throws Exception {
String text = "{\"value\":\"1\"}";
VO obj = JSON.parseObject(text, VO.class);
Assert.assertEquals(true, obj.getValue());
}
public void test_5() throws Exception {
String text = "{\"value\":false}";
VO obj = JSON.parseObject(text, VO.class);
Assert.assertEquals(false, obj.getValue());
}
public void test_error_0() {
Exception error = null;
try {
String text = "{\"value\":true\\n\"";
JSON.parseObject(text, VO.class);
} catch (Exception ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public void test_error_1() {
Exception error = null;
try {
String text = "{\"value\":a";
JSON.parseObject(text, VO.class);
} catch (Exception ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public void test_error_2() {
Exception error = null;
try {
String text = "{\"value\":teue}";
JSON.parseObject(text, VO.class);
} catch (Exception ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public void test_error_3() {
Exception error = null;
try {
String text = "{\"value\":tree}";
JSON.parseObject(text, VO.class);
} catch (Exception ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public void test_error_4() {
Exception error = null;
try {
String text = "{\"value\":truu}";
JSON.parseObject(text, VO.class);
} catch (Exception ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public void test_error_5() {
Exception error = null;
try {
String text = "{\"value\":fflse}";
JSON.parseObject(text, VO.class);
} catch (Exception ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public void test_error_6() {
Exception error = null;
try {
String text = "{\"value\":fasse}";
JSON.parseObject(text, VO.class);
} catch (Exception ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public void test_error_7() {
Exception error = null;
try {
String text = "{\"value\":falee}";
JSON.parseObject(text, VO.class);
} catch (Exception ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public void test_error_8() {
Exception error = null;
try {
String text = "{\"value\":falss}";
JSON.parseObject(text, VO.class);
} catch (Exception ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public void test_error_9() {
Exception error = null;
try {
String text = "{\"value\":false]";
JSON.parseObject(text, VO.class);
} catch (Exception ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public void test_error_10() {
Exception error = null;
try {
String text = "{\"value\":false}{";
JSON.parseObject(text, VO.class);
} catch (Exception ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public void test_error_11() {
Exception error = null;
try {
String text = "{\"value\":false}}";
JSON.parseObject(text, VO.class);
} catch (Exception ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public void test_error_12() {
Exception error = null;
try {
String text = "{\"value\":false}]";
JSON.parseObject(text, VO.class);
} catch (Exception ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public void test_error_13() {
Exception error = null;
try {
String text = "{\"value\":false},";
JSON.parseObject(text, VO.class);
} catch (Exception ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public static | JSONScannerTest_scanFieldBoolean |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/testutil/DatabindTestUtil.java | {
"start": 8559,
"end": 8710
} | class ____ {
public int i;
public IntWrapper() { }
public IntWrapper(int value) { i = value; }
}
public static | IntWrapper |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/main/java/org/springframework/data/jpa/repository/query/DeclaredQueries.java | {
"start": 734,
"end": 865
} | class ____ {@code DeclaredQuery} implementations.
*
* @author Christoph Strobl
* @author Mark Paluch
* @since 4.0
*/
| encapsulating |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.