language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | spring-projects__spring-framework | spring-messaging/src/main/java/org/springframework/messaging/simp/broker/OrderedMessageChannelDecorator.java | {
"start": 5855,
"end": 6410
} | class ____ implements Runnable {
private final Message<?> message;
private final @Nullable AtomicInteger handledCount;
private PostHandleTask(Message<?> message) {
this.message = message;
this.handledCount = (subscriberCount > 1 ? new AtomicInteger(0) : null);
}
@Override
public void run() {
if (this.handledCount == null || this.handledCount.addAndGet(1) == subscriberCount) {
if (OrderedMessageChannelDecorator.this.removeMessage(this.message)) {
sendNextMessage();
}
}
}
}
private static final | PostHandleTask |
java | alibaba__nacos | test/core-test/src/test/java/com/alibaba/nacos/test/common/NacosAsyncRestTemplateCoreITCase.java | {
"start": 2116,
"end": 2657
} | class ____ integration tests for NacosAsyncRestTemplate. These tests cover various HTTP methods such as
* POST, GET, PUT, and DELETE to ensure the correct functioning of asynchronous HTTP requests in the context of Nacos.
*
* @author mai.jh
*/
@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
@TestMethodOrder(MethodName.class)
@ExtendWith(SpringExtension.class)
@SpringBootTest(classes = Nacos.class, properties = {
"server.servlet.context-path=/nacos"}, webEnvironment = SpringBootTest.WebEnvironment.DEFINED_PORT)
| provides |
java | apache__kafka | clients/src/test/java/org/apache/kafka/clients/consumer/internals/BaseHeartbeatThreadTest.java | {
"start": 1202,
"end": 2516
} | class ____ {
@Test
public void testIsEnabled() {
try (BaseHeartbeatThread baseHeartbeatThread = new BaseHeartbeatThread("test", true)) {
assertFalse(baseHeartbeatThread.isEnabled());
baseHeartbeatThread.enable();
assertTrue(baseHeartbeatThread.isEnabled());
baseHeartbeatThread.disable();
assertFalse(baseHeartbeatThread.isEnabled());
}
}
@Test
public void testIsFailed() {
try (BaseHeartbeatThread baseHeartbeatThread = new BaseHeartbeatThread("test", true)) {
assertFalse(baseHeartbeatThread.isFailed());
assertNull(baseHeartbeatThread.failureCause());
FencedInstanceIdException exception = new FencedInstanceIdException("test");
baseHeartbeatThread.setFailureCause(exception);
assertTrue(baseHeartbeatThread.isFailed());
assertEquals(exception, baseHeartbeatThread.failureCause());
}
}
@Test
public void testIsClosed() {
try (BaseHeartbeatThread baseHeartbeatThread = new BaseHeartbeatThread("test", true)) {
assertFalse(baseHeartbeatThread.isClosed());
baseHeartbeatThread.close();
assertTrue(baseHeartbeatThread.isClosed());
}
}
}
| BaseHeartbeatThreadTest |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/tools/picocli/CommandLine.java | {
"start": 81033,
"end": 81407
} | interface ____ an abstract class. For example, a field can
* be declared to have type {@code java.lang.Number}, and annotating {@code @Parameters(type=Short.class)}
* ensures that the positional parameter value is converted to a {@code Short} before setting the field value.
* </p><p>
* For array fields whose <em>component</em> type is an | or |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/selection/primitives/PrimitiveMapper.java | {
"start": 244,
"end": 630
} | class ____ {
private static boolean calledUpon;
public MyLong fromPrimitiveInt(int size) {
calledUpon = true;
return new MyLong( (long) size );
}
public static boolean isCalledUpon() {
return calledUpon;
}
public static void setCalledUpon( boolean calledUpon ) {
PrimitiveMapper.calledUpon = calledUpon;
}
}
| PrimitiveMapper |
java | grpc__grpc-java | examples/example-dualstack/src/main/java/io/grpc/examples/dualstack/ExampleDualStackNameResolverProvider.java | {
"start": 734,
"end": 1357
} | class ____ extends NameResolverProvider {
public static final String exampleScheme = "example";
@Override
public NameResolver newNameResolver(URI targetUri, NameResolver.Args args) {
return new ExampleDualStackNameResolver(targetUri);
}
@Override
protected boolean isAvailable() {
return true;
}
@Override
protected int priority() {
return 5;
}
@Override
// gRPC choose the first NameResolverProvider that supports the target URI scheme.
public String getDefaultScheme() {
return exampleScheme;
}
}
| ExampleDualStackNameResolverProvider |
java | lettuce-io__lettuce-core | src/test/java/io/lettuce/core/ConnectionFutureUnitTests.java | {
"start": 452,
"end": 3931
} | class ____ {
@Test
void shouldComposeTransformToError() {
CompletableFuture<String> foo = new CompletableFuture<>();
ConnectionFuture<Object> transformed = ConnectionFuture.from(null, foo).thenCompose((s, t) -> {
if (t != null) {
return Futures.failed(new IllegalStateException(t));
}
return Futures.failed(new IllegalStateException());
});
foo.complete("foo");
assertThat(transformed.toCompletableFuture()).isDone();
assertThat(transformed.toCompletableFuture()).isCompletedExceptionally();
assertThatThrownBy(transformed::join).hasRootCauseInstanceOf(IllegalStateException.class);
}
@Test
void composeTransformShouldFailWhileTransformation() {
CompletableFuture<String> foo = new CompletableFuture<>();
ConnectionFuture<Object> transformed = ConnectionFuture.from(null, foo).thenCompose((s, t) -> {
throw new IllegalStateException();
});
foo.complete("foo");
assertThat(transformed.toCompletableFuture()).isDone();
assertThat(transformed.toCompletableFuture()).isCompletedExceptionally();
assertThatThrownBy(transformed::join).hasRootCauseInstanceOf(IllegalStateException.class);
}
@Test
void composeTransformShouldFailWhileTransformationRetainOriginalException() {
CompletableFuture<String> foo = new CompletableFuture<>();
ConnectionFuture<Object> transformed = ConnectionFuture.from(null, foo).thenCompose((s, t) -> {
throw new IllegalStateException();
});
Throwable t = new Throwable();
foo.completeExceptionally(t);
assertThat(transformed.toCompletableFuture()).isDone();
assertThat(transformed.toCompletableFuture()).isCompletedExceptionally();
try {
transformed.join();
} catch (CompletionException e) {
assertThat(e).hasRootCauseInstanceOf(IllegalStateException.class);
assertThat(e.getCause()).hasSuppressedException(t);
}
}
@Test
void shouldComposeWithErrorFlow() {
CompletableFuture<String> foo = new CompletableFuture<>();
CompletableFuture<String> exceptional = new CompletableFuture<>();
ConnectionFuture<Object> transformed1 = ConnectionFuture.from(null, foo).thenCompose((s, t) -> {
if (t != null) {
return Futures.failed(new IllegalStateException(t));
}
return CompletableFuture.completedFuture(s);
});
ConnectionFuture<Object> transformed2 = ConnectionFuture.from(null, exceptional).thenCompose((s, t) -> {
if (t != null) {
return Futures.failed(new IllegalStateException(t));
}
return CompletableFuture.completedFuture(s);
});
foo.complete("foo");
exceptional.completeExceptionally(new IllegalArgumentException("foo"));
assertThat(transformed1.toCompletableFuture()).isDone();
assertThat(transformed1.toCompletableFuture()).isCompletedWithValue("foo");
assertThat(transformed2.toCompletableFuture()).isDone();
assertThat(transformed2.toCompletableFuture()).isCompletedExceptionally();
assertThatThrownBy(transformed2::join).hasCauseInstanceOf(IllegalStateException.class)
.hasRootCauseInstanceOf(IllegalArgumentException.class);
}
}
| ConnectionFutureUnitTests |
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/response/GoogleVertexAiErrorResponseEntityTests.java | {
"start": 733,
"end": 2572
} | class ____ extends ESTestCase {
private static HttpResult getMockResult(String jsonString) {
var response = mock(HttpResponse.class);
return new HttpResult(response, Strings.toUTF8Bytes(jsonString));
}
public void testErrorResponse_ExtractsError() {
var result = getMockResult("""
{
"error": {
"code": 400,
"message": "error message",
"status": "INVALID_ARGUMENT",
"details": [
{
"@type": "type.googleapis.com/google.rpc.BadRequest",
"fieldViolations": [
{
"description": "Invalid JSON payload received. Unknown name \\"abc\\": Cannot find field."
}
]
}
]
}
}
""");
var error = GoogleVertexAiErrorResponseEntity.fromResponse(result);
assertNotNull(error);
assertThat(error.getErrorMessage(), is("error message"));
}
public void testErrorResponse_ReturnsUndefinedObjectIfNoError() {
var result = getMockResult("""
{
"foo": "bar"
}
""");
var error = GoogleVertexAiErrorResponseEntity.fromResponse(result);
assertThat(error, sameInstance(ErrorResponse.UNDEFINED_ERROR));
}
public void testErrorResponse_ReturnsUndefinedObjectIfNotJson() {
var result = getMockResult("error message");
var error = GoogleVertexAiErrorResponseEntity.fromResponse(result);
assertThat(error, sameInstance(ErrorResponse.UNDEFINED_ERROR));
}
}
| GoogleVertexAiErrorResponseEntityTests |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/request/transition/ViewPropertyTransition.java | {
"start": 1406,
"end": 1524
} | interface ____ allows an animation to be applied on or started from an {@link
* android.view.View}.
*/
public | that |
java | spring-projects__spring-boot | module/spring-boot-micrometer-metrics/src/main/java/org/springframework/boot/micrometer/metrics/docker/compose/otlp/OpenTelemetryMetricsDockerComposeConnectionDetailsFactory.java | {
"start": 2011,
"end": 2514
} | class ____ extends DockerComposeConnectionDetails
implements OtlpMetricsConnectionDetails {
private final String host;
private final int port;
private OpenTelemetryMetricsDockerComposeConnectionDetails(RunningService source) {
super(source);
this.host = source.host();
this.port = source.ports().get(OTLP_PORT);
}
@Override
public String getUrl() {
return "http://%s:%d/v1/metrics".formatted(this.host, this.port);
}
}
}
| OpenTelemetryMetricsDockerComposeConnectionDetails |
java | elastic__elasticsearch | qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/AbstractHttpSmokeTestIT.java | {
"start": 658,
"end": 946
} | class ____ extends ESRestTestCase {
@ClassRule
public static ElasticsearchCluster cluster = ElasticsearchCluster.local().module("rest-root").build();
@Override
protected String getTestRestCluster() {
return cluster.getHttpAddresses();
}
}
| AbstractHttpSmokeTestIT |
java | google__jimfs | jimfs/src/main/java/com/google/common/jimfs/Handler.java | {
"start": 1590,
"end": 2699
} | class ____ the {@code jimfs}
* (the name of the protocol) package of {@code com.google.common}.
*
* @throws SecurityException if the system property that needs to be set to register this handler
* can't be read or written.
*/
static void register() {
register(Handler.class);
}
/** Generic method that would allow registration of any properly placed {@code Handler} class. */
static void register(Class<? extends URLStreamHandler> handlerClass) {
checkArgument("Handler".equals(handlerClass.getSimpleName()));
String pkg = handlerClass.getPackage().getName();
int lastDot = pkg.lastIndexOf('.');
checkArgument(lastDot > 0, "package for Handler (%s) must have a parent package", pkg);
String parentPackage = pkg.substring(0, lastDot);
String packages = System.getProperty(JAVA_PROTOCOL_HANDLER_PACKAGES);
if (packages == null) {
packages = parentPackage;
} else {
packages += "|" + parentPackage;
}
System.setProperty(JAVA_PROTOCOL_HANDLER_PACKAGES, packages);
}
/** @deprecated Not intended to be called directly; this | in |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java | {
"start": 9664,
"end": 10384
} | class ____ extends TestSecurityMockRM {
public MyMockRM(Configuration conf, RMStateStore store) {
super(conf, store);
}
@Override
protected RMSecretManagerService createRMSecretManagerService() {
return new RMSecretManagerService(testConf, rmContext) {
@Override
protected RMDelegationTokenSecretManager
createRMDelegationTokenSecretManager(Configuration conf,
RMContext rmContext) {
// KeyUpdateInterval-> 1 seconds
// TokenMaxLifetime-> 2 seconds.
return new TestRMDelegationTokenSecretManager(1000, 1000, 2000, 1000,
rmContext);
}
};
}
}
public | MyMockRM |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/engine/jdbc/env/spi/SchemaNameResolver.java | {
"start": 679,
"end": 1046
} | interface ____ {
/**
* Given a JDBC {@link Connection}, resolve the name of the schema (if one) to which it connects.
*
* @param connection The JDBC connection
* @param dialect The {@link Dialect}
*
* @return The name of the schema; may be null.
*/
String resolveSchemaName(Connection connection, Dialect dialect) throws SQLException;
}
| SchemaNameResolver |
java | quarkusio__quarkus | integration-tests/hibernate-orm-panache/src/main/java/io/quarkus/it/panache/defaultpu/TestEndpoint.java | {
"start": 76649,
"end": 97161
} | class ____ multiple constructors but no parameters with @ProjectedFieldName annotation
SemanticException semanticException = Assertions.assertThrowsExactly(SemanticException.class,
() -> Person.findAll().project(PersonNameDoubleConstructor.class).firstResult());
Assertions.assertEquals("Could not interpret path expression 'fakeParameter'", semanticException.getMessage());
semanticException = Assertions.assertThrowsExactly(SemanticException.class,
() -> Person.find("name", "2").project(PersonNameDoubleConstructor.class).firstResult());
Assertions.assertEquals("Could not interpret path expression 'fakeParameter'", semanticException.getMessage());
semanticException = Assertions.assertThrowsExactly(SemanticException.class,
() -> Person.find("name = ?1", "2").project(PersonNameDoubleConstructor.class).firstResult());
Assertions.assertEquals("Could not interpret path expression 'fakeParameter'", semanticException.getMessage());
person = Person.find(String.format(
"select uniqueName, name%sfrom io.quarkus.it.panache.defaultpu.Person%swhere name = ?1",
LINE_SEPARATOR, LINE_SEPARATOR), "2")
.project(PersonNameDoubleConstructor.class)
.firstResult();
Assertions.assertEquals("2", person.name);
semanticException = Assertions.assertThrowsExactly(SemanticException.class, () -> Person
.find("name = :name", Parameters.with("name", "2")).project(PersonNameDoubleConstructor.class).firstResult());
Assertions.assertEquals("Could not interpret path expression 'fakeParameter'", semanticException.getMessage());
semanticException = Assertions.assertThrowsExactly(SemanticException.class,
() -> Person.find("#Person.getByName", Parameters.with("name", "2")).project(PersonNameDoubleConstructor.class)
.firstResult());
Assertions.assertEquals("Could not interpret path expression 'fakeParameter'", semanticException.getMessage());
final PanacheQuery<? extends PersonName> failQuery = Person.findAll().project(PersonNameDoubleConstructor.class).page(0,
2);
semanticException = Assertions.assertThrowsExactly(SemanticException.class, () -> failQuery.list().size());
Assertions.assertEquals("Could not interpret path expression 'fakeParameter'", semanticException.getMessage());
failQuery.nextPage();
semanticException = Assertions.assertThrowsExactly(SemanticException.class, () -> failQuery.list().size());
Assertions.assertEquals("Could not interpret path expression 'fakeParameter'", semanticException.getMessage());
semanticException = Assertions.assertThrowsExactly(SemanticException.class,
() -> Person.findAll().project(PersonNameDoubleConstructor.class).count());
Assertions.assertEquals("Could not interpret path expression 'fakeParameter'", semanticException.getMessage());
return "OK";
}
@GET
@Path("model3")
@Transactional
public String testModel3() {
Assertions.assertEquals(1, Person.count());
Person person = Person.findAll().firstResult();
Assertions.assertEquals("2", person.name);
Dog.deleteAll();
Person.deleteAll();
Address.deleteAll();
Assertions.assertEquals(0, Person.count());
return "OK";
}
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
@GET
@Path("ignored-properties")
public Person ignoredProperties() throws NoSuchMethodException, SecurityException {
Person.class.getMethod("$$_hibernate_read_id");
Person.class.getMethod("$$_hibernate_read_name");
try {
Person.class.getMethod("$$_hibernate_read_persistent");
Assertions.fail();
} catch (NoSuchMethodException e) {
}
// no need to persist it, we can fake it
Person person = new Person();
person.id = 666l;
person.name = "Eddie";
person.status = Status.DECEASED;
return person;
}
@Inject
Bug5274EntityRepository bug5274EntityRepository;
@GET
@Path("5274")
@Transactional
public String testBug5274() {
bug5274EntityRepository.count();
return "OK";
}
@Inject
Bug5885EntityRepository bug5885EntityRepository;
@GET
@Path("5885")
@Transactional
public String testBug5885() {
bug5885EntityRepository.findById(1L);
return "OK";
}
@GET
@Path("testJaxbAnnotationTransfer")
public String testJaxbAnnotationTransfer() throws Exception {
// Test for fix to this bug: https://github.com/quarkusio/quarkus/issues/6021
// Ensure that any JAX-B annotations are properly moved to generated getters
Method m = JAXBEntity.class.getMethod("getNamedAnnotatedProp");
XmlAttribute anno = m.getAnnotation(XmlAttribute.class);
assertNotNull(anno);
assertEquals("Named", anno.name());
assertNull(m.getAnnotation(XmlTransient.class));
m = JAXBEntity.class.getMethod("getDefaultAnnotatedProp");
anno = m.getAnnotation(XmlAttribute.class);
assertNotNull(anno);
assertEquals("##default", anno.name());
assertNull(m.getAnnotation(XmlTransient.class));
m = JAXBEntity.class.getMethod("getUnAnnotatedProp");
assertNull(m.getAnnotation(XmlAttribute.class));
assertNull(m.getAnnotation(XmlTransient.class));
m = JAXBEntity.class.getMethod("getTransientProp");
assertNull(m.getAnnotation(XmlAttribute.class));
assertNotNull(m.getAnnotation(XmlTransient.class));
m = JAXBEntity.class.getMethod("getArrayAnnotatedProp");
assertNull(m.getAnnotation(XmlTransient.class));
XmlElements elementsAnno = m.getAnnotation(XmlElements.class);
assertNotNull(elementsAnno);
assertNotNull(elementsAnno.value());
assertEquals(2, elementsAnno.value().length);
assertEquals("array1", elementsAnno.value()[0].name());
assertEquals("array2", elementsAnno.value()[1].name());
// Ensure that all original fields were labeled @XmlTransient and had their original JAX-B annotations removed
ensureFieldSanitized("namedAnnotatedProp");
ensureFieldSanitized("transientProp");
ensureFieldSanitized("defaultAnnotatedProp");
ensureFieldSanitized("unAnnotatedProp");
ensureFieldSanitized("arrayAnnotatedProp");
return "OK";
}
private void ensureFieldSanitized(String fieldName) throws Exception {
Field f = JAXBEntity.class.getDeclaredField(fieldName);
assertNull(f.getAnnotation(XmlAttribute.class));
assertNotNull(f.getAnnotation(XmlTransient.class));
}
@GET
@Path("composite")
@Transactional
public String testCompositeKey() {
ObjectWithCompositeId obj = new ObjectWithCompositeId();
obj.part1 = "part1";
obj.part2 = "part2";
obj.description = "description";
obj.persist();
ObjectWithCompositeId.ObjectKey key = new ObjectWithCompositeId.ObjectKey("part1", "part2");
ObjectWithCompositeId result = ObjectWithCompositeId.findById(key);
assertNotNull(result);
boolean deleted = ObjectWithCompositeId.deleteById(key);
assertTrue(deleted);
ObjectWithCompositeId.ObjectKey notExistingKey = new ObjectWithCompositeId.ObjectKey("notexist1", "notexist2");
deleted = ObjectWithCompositeId.deleteById(key);
assertFalse(deleted);
ObjectWithEmbeddableId.ObjectKey embeddedKey = new ObjectWithEmbeddableId.ObjectKey("part1", "part2");
ObjectWithEmbeddableId embeddable = new ObjectWithEmbeddableId();
embeddable.key = embeddedKey;
embeddable.description = "description";
embeddable.persist();
ObjectWithEmbeddableId embeddableResult = ObjectWithEmbeddableId.findById(embeddedKey);
assertNotNull(embeddableResult);
deleted = ObjectWithEmbeddableId.deleteById(embeddedKey);
assertTrue(deleted);
ObjectWithEmbeddableId.ObjectKey notExistingEmbeddedKey = new ObjectWithEmbeddableId.ObjectKey("notexist1",
"notexist2");
deleted = ObjectWithEmbeddableId.deleteById(embeddedKey);
assertFalse(deleted);
return "OK";
}
@GET
@Path("7721")
@Transactional
public String testBug7721() {
Bug7721Entity entity = new Bug7721Entity();
entity.persist();
entity.delete();
return "OK";
}
@GET
@Path("8254")
@Transactional
public String testBug8254() {
CatOwner owner = new CatOwner("8254");
owner.persist();
new Cat("Cat 1", owner).persist();
new Cat("Cat 2", owner).persist();
new Cat("Cat 3", owner).persist();
// This used to fail with an invalid query "SELECT COUNT(*) SELECT DISTINCT cat.owner FROM Cat cat WHERE cat.owner = ?1"
// Should now result in a valid query "SELECT COUNT(DISTINCT cat.owner) FROM Cat cat WHERE cat.owner = ?1"
assertEquals(1L, CatOwner.find("SELECT DISTINCT cat.owner FROM Cat cat WHERE cat.owner = ?1", owner).count());
// This used to fail with an invalid query "SELECT COUNT(*) SELECT cat.owner FROM Cat cat WHERE cat.owner = ?1"
// Should now result in a valid query "SELECT COUNT(cat.owner) FROM Cat cat WHERE cat.owner = ?1"
assertEquals(3L, CatOwner.find("SELECT cat.owner FROM Cat cat WHERE cat.owner = ?1", owner).count());
// This used to fail with an invalid query "SELECT COUNT(*) SELECT cat FROM Cat cat WHERE cat.owner = ?1"
// Should now result in a valid query "SELECT COUNT(cat) FROM Cat cat WHERE cat.owner = ?1"
assertEquals(3L, Cat.find("SELECT cat FROM Cat cat WHERE cat.owner = ?1", owner).count());
// This didn't use to fail. Make sure it still doesn't.
assertEquals(3L, Cat.find("FROM Cat WHERE owner = ?1", owner).count());
assertEquals(3L, Cat.find("owner", owner).count());
assertEquals(1L, CatOwner.find("name = ?1", "8254").count());
Cat.deleteAll();
CatOwner.deleteAll();
return "OK";
}
@GET
@Path("9025")
@Transactional
public String testBug9025() {
Fruit apple = new Fruit("apple", "red");
Fruit orange = new Fruit("orange", "orange");
Fruit banana = new Fruit("banana", "yellow");
Fruit.persist(apple, orange, banana);
PanacheQuery<Fruit> query = Fruit.find(
"select name, color from Fruit").page(Page.ofSize(1));
List<Fruit> results = query.list();
int pageCount = query.pageCount();
return "OK";
}
@GET
@Path("9036")
@Transactional
public String testBug9036() {
Person.deleteAll();
Person emptyPerson = new Person();
emptyPerson.persist();
Person deadPerson = new Person();
deadPerson.name = "Stef";
deadPerson.status = Status.DECEASED;
deadPerson.persist();
Person livePerson = new Person();
livePerson.name = "Stef";
livePerson.status = Status.LIVING;
livePerson.persist();
assertEquals(3, Person.count());
assertEquals(3, Person.listAll().size());
// should be filtered
PanacheQuery<Person> query = Person.findAll(Sort.by("id")).filter("Person.isAlive").filter("Person.hasName",
Parameters.with("name", "Stef"));
assertEquals(1, query.count());
assertEquals(1, query.list().size());
assertEquals(livePerson, query.list().get(0));
assertEquals(1, query.stream().count());
assertEquals(livePerson, query.firstResult());
assertEquals(livePerson, query.singleResult());
// these should be unaffected
assertEquals(3, Person.count());
assertEquals(3, Person.listAll().size());
Person.deleteAll();
return "OK";
}
@GET
@Path("testFilterWithCollections")
@Transactional
public String testFilterWithCollections() {
Person.deleteAll();
Person stefPerson = new Person();
stefPerson.name = "Stef";
stefPerson.persist();
Person josePerson = new Person();
josePerson.name = "Jose";
josePerson.persist();
Person victorPerson = new Person();
victorPerson.name = "Victor";
victorPerson.persist();
assertEquals(3, Person.count());
List<String> namesParameter = Arrays.asList("Jose", "Victor");
// Try with different collection types
List<Object> collectionsValues = Arrays.asList(
// Using directly a list:
namesParameter,
// Using another collection,
new HashSet<>(namesParameter),
// Using array
namesParameter.toArray(new String[namesParameter.size()]));
for (Object collectionValue : collectionsValues) {
// should be filtered
List<Person> found = Person.findAll(Sort.by("id")).filter("Person.name.in",
Parameters.with("names", collectionValue))
.list();
assertEquals(2, found.size(),
"Expected 2 entries when using parameter " + collectionValue.getClass());
assertTrue(found.stream().anyMatch(p -> p.name.contains("Jose")),
"Jose was not found when using parameter " + collectionValue.getClass());
assertTrue(found.stream().anyMatch(p -> p.name.contains("Victor")),
"Victor was not found when using parameter " + collectionValue.getClass());
}
Person.deleteAll();
return "OK";
}
@GET
@Path("testSortByNullPrecedence")
@Transactional
public String testSortByNullPrecedence() {
Person.deleteAll();
Person stefPerson = new Person();
stefPerson.name = "Stef";
stefPerson.persist();
Person josePerson = new Person();
josePerson.name = null;
josePerson.persist();
List<Person> persons = Person.findAll(Sort.by("name", Sort.NullPrecedence.NULLS_FIRST)).list();
assertEquals(josePerson.id, persons.get(0).id);
persons = Person.findAll(Sort.by("name", Sort.NullPrecedence.NULLS_LAST)).list();
assertEquals(josePerson.id, persons.get(persons.size() - 1).id);
Person.deleteAll();
return "OK";
}
@GET
@Path("testSortByEmbedded")
@Transactional
public String testSortByEmbedded() {
Person.deleteAll();
Person stefPerson = new Person();
stefPerson.name = "Stef";
stefPerson.description = new PersonDescription();
stefPerson.description.size = 0;
stefPerson.persist();
Person josePerson = new Person();
josePerson.name = "Jose";
josePerson.description = new PersonDescription();
josePerson.description.size = 100;
josePerson.persist();
List<Person> persons = Person.findAll(Sort.by("description.size", Sort.Direction.Descending)).list();
assertEquals(josePerson.id, persons.get(0).id);
persons = Person.findAll(Sort.by("description.size", Sort.Direction.Ascending)).list();
assertEquals(josePerson.id, persons.get(persons.size() - 1).id);
Person.deleteAll();
return "OK";
}
@GET
@Path("testEnhancement27184DeleteDetached")
// NOT @Transactional
public String testEnhancement27184DeleteDetached() {
QuarkusTransaction.begin();
Person.deleteAll();
QuarkusTransaction.commit();
QuarkusTransaction.begin();
Person person = new Person();
person.name = "Yoann";
person.persist();
QuarkusTransaction.commit();
QuarkusTransaction.begin();
assertTrue(Person.findByIdOptional(person.id).isPresent());
QuarkusTransaction.commit();
QuarkusTransaction.begin();
// 'person' is detached at this point,
// since the previous transaction and session were closed.
// We want .delete() to work regardless.
person.delete();
QuarkusTransaction.commit();
QuarkusTransaction.begin();
assertFalse(Person.findByIdOptional(person.id).isPresent());
QuarkusTransaction.commit();
QuarkusTransaction.begin();
Person.deleteAll();
QuarkusTransaction.commit();
return "OK";
}
@GET
@Path("26308")
@Transactional
public String testBug26308() {
testBug26308Query("from Person2 p left join fetch p.address");
// This cannot work, see https://docs.hibernate.org/orm/7.0/migration-guide/#create-query
//testBug26308Query("from Person2 p left join p.address");
// This must be used instead:
testBug26308Query("from Person2 this left join this.address");
testBug26308Query("select p from Person2 p left join fetch p.address");
testBug26308Query("select p from Person2 p left join p.address");
testBug26308Query("from Person2 p left join fetch p.address select p");
testBug26308Query("from Person2 p left join p.address select p");
return "OK";
}
private void testBug26308Query(String hql) {
PanacheQuery<Person> query = Person.find(hql);
Assertions.assertEquals(0, query.list().size());
Assertions.assertEquals(0, query.count());
}
@GET
@Path("36496")
@Transactional
public String testBug36496() {
PanacheQuery<Person> query = Person.find("WITH id AS (SELECT p.id AS pid FROM Person2 AS p) SELECT p FROM Person2 p");
Assertions.assertEquals(0, query.list().size());
Assertions.assertEquals(0, query.count());
Assertions.assertEquals(0,
Person.count("WITH id AS (SELECT p.id AS pid FROM Person2 AS p) SELECT count(*) FROM Person2 p"));
return "OK";
}
@GET
@Path("31117")
@Transactional
public String testBug31117() {
Person.deleteAll();
Person p = new Person();
p.name = "stef";
p.persist();
Assertions.assertEquals(1, Person.find("\r\n \n\nfrom\n Person2\nwhere\n\rname = ?1", "stef").list().size());
Assertions.assertEquals(1, Person.find("\r\n \n\nfrom\n Person2\nwhere\n\rname = ?1", "stef").count());
Assertions.assertEquals(1, Person.count("\r\n \n\nfrom\n Person2\nwhere\n\rname = ?1", "stef"));
Assertions.assertEquals(1, Person.update("\r\n \n\nupdate\n Person2\nset\n\rname='foo' where\n\rname = ?1", "stef"));
Assertions.assertEquals(1, Person.delete("\r\n \n\ndelete\nfrom\n Person2\nwhere\nname = ?1", "foo"));
return "OK";
}
@GET
@Path("42416")
public String testBug42416() {
createSomeEntities42416();
runSomeTests42416();
return "OK";
}
@Transactional
public void createSomeEntities42416() {
Fruit.deleteAll();
Fruit f = new Fruit("apple", "red");
f.persist();
Fruit f2 = new Fruit("apple", "yellow");
f2.persist();
}
@Transactional
public void runSomeTests42416() {
try {
Fruit.find("where name = ?1", "apple").singleResult();
} catch (jakarta.persistence.NonUniqueResultException e) {
// all good let's continue
}
try {
Fruit.find("where name = ?1", "not-a-fruit").singleResult();
} catch (jakarta.persistence.NoResultException e) {
// all good let's continue
}
try {
Fruit.find("where name = ?1", "apple").singleResultOptional();
} catch (jakarta.persistence.NonUniqueResultException e) {
// all good let's continue
}
}
@GET
@Path("40962")
@Transactional
public String testBug40962() {
// should not throw
Bug40962Entity.find("name = :name ORDER BY locate(location, :location) DESC",
Map.of("name", "Demo", "location", "something")).count();
Bug40962Entity.find("FROM Bug40962Entity WHERE name = :name ORDER BY locate(location, :location) DESC",
Map.of("name", "Demo", "location", "something")).count();
return "OK";
}
}
| with |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestWritableSerialization.java | {
"start": 1529,
"end": 3135
} | class ____ {
private static final Configuration conf = new Configuration();
@Test
public void testWritableSerialization() throws Exception {
Text before = new Text("test writable");
Text after = SerializationTestUtil.testSerialization(conf, before);
assertEquals(before, after);
}
@Test
public void testWritableConfigurable() throws Exception {
//set the configuration parameter
conf.set(CONF_TEST_KEY, CONF_TEST_VALUE);
//reuse TestGenericWritable inner classes to test
//writables that also implement Configurable.
FooGenericWritable generic = new FooGenericWritable();
generic.setConf(conf);
Baz baz = new Baz();
generic.set(baz);
Baz result = SerializationTestUtil.testSerialization(conf, baz);
assertEquals(baz, result);
assertNotNull(result.getConf());
}
@Test
@SuppressWarnings({"rawtypes", "unchecked"})
public void testWritableComparatorJavaSerialization() throws Exception {
Serialization ser = new JavaSerialization();
Serializer<TestWC> serializer = ser.getSerializer(TestWC.class);
DataOutputBuffer dob = new DataOutputBuffer();
serializer.open(dob);
TestWC orig = new TestWC(0);
serializer.serialize(orig);
serializer.close();
Deserializer<TestWC> deserializer = ser.getDeserializer(TestWC.class);
DataInputBuffer dib = new DataInputBuffer();
dib.reset(dob.getData(), 0, dob.getLength());
deserializer.open(dib);
TestWC deser = deserializer.deserialize(null);
deserializer.close();
assertEquals(orig, deser);
}
static | TestWritableSerialization |
java | google__dagger | dagger-runtime/main/java/dagger/Lazy.java | {
"start": 4011,
"end": 4638
} | class ____ {
* {@literal @Inject} LazyCounter counter1;
* {@literal @Inject} LazyCounter counter2;
*
* void print() {
* counter1.print();
* counter2.print();
* }
* }
* </code></pre>
*
* The output demonstrates that each {@code Lazy} works independently:
*
* <pre><code>
* printing...
* computing...
* 100
* 100
* 100
* printing...
* computing...
* 101
* 101
* 101
* </code></pre>
*
* Use {@link javax.inject.Singleton @Singleton} to share one instance among all clients, and {@code
* Lazy} for lazy computation in a single client.
*/
public | LazyCounters |
java | spring-projects__spring-boot | module/spring-boot-micrometer-metrics/src/test/java/org/springframework/boot/micrometer/metrics/autoconfigure/ssl/SslMeterBinderTests.java | {
"start": 1465,
"end": 5480
} | class ____ {
private static final Clock CLOCK = Clock.fixed(Instant.parse("2024-10-21T13:51:40Z"), ZoneId.of("UTC"));
@Test
void shouldRegisterChainExpiryMetrics() {
DefaultSslBundleRegistry sslBundleRegistry = createSslBundleRegistry("classpath:certificates/chains.p12");
MeterRegistry meterRegistry = bindToRegistry(sslBundleRegistry);
assertThat(Duration.ofSeconds(findExpiryGauge(meterRegistry, "ca", "419224ce190242b2c44069dd3c560192b3b669f3")))
.hasDays(1095);
assertThat(Duration
.ofSeconds(findExpiryGauge(meterRegistry, "intermediary", "60f79365fc46bf69149754d377680192b3b6bcf5")))
.hasDays(730);
assertThat(Duration
.ofSeconds(findExpiryGauge(meterRegistry, "server", "504c45129526ac050abb11459b1f0192b3b70fe9")))
.hasDays(365);
assertThat(Duration
.ofSeconds(findExpiryGauge(meterRegistry, "expired", "562bc5dcf4f26bb179abb13068180192b3bb53dc")))
.hasDays(-386);
assertThat(Duration
.ofSeconds(findExpiryGauge(meterRegistry, "not-yet-valid", "7df79335f274e2cfa7467fd5f9ce0192b3bcf4aa")))
.hasDays(36889);
}
@Test
void shouldWatchUpdatesForBundlesRegisteredAfterConstruction() {
DefaultSslBundleRegistry sslBundleRegistry = new DefaultSslBundleRegistry();
sslBundleRegistry.registerBundle("dummy",
SslBundle.of(createSslStoreBundle("classpath:certificates/chains2.p12")));
MeterRegistry meterRegistry = bindToRegistry(sslBundleRegistry);
sslBundleRegistry.registerBundle("test-0",
SslBundle.of(createSslStoreBundle("classpath:certificates/chains2.p12")));
sslBundleRegistry.updateBundle("test-0",
SslBundle.of(createSslStoreBundle("classpath:certificates/chains.p12")));
assertThat(Duration.ofSeconds(findExpiryGauge(meterRegistry, "ca", "419224ce190242b2c44069dd3c560192b3b669f3")))
.hasDays(1095);
assertThat(Duration
.ofSeconds(findExpiryGauge(meterRegistry, "intermediary", "60f79365fc46bf69149754d377680192b3b6bcf5")))
.hasDays(730);
assertThat(Duration
.ofSeconds(findExpiryGauge(meterRegistry, "server", "504c45129526ac050abb11459b1f0192b3b70fe9")))
.hasDays(365);
assertThat(Duration
.ofSeconds(findExpiryGauge(meterRegistry, "expired", "562bc5dcf4f26bb179abb13068180192b3bb53dc")))
.hasDays(-386);
assertThat(Duration
.ofSeconds(findExpiryGauge(meterRegistry, "not-yet-valid", "7df79335f274e2cfa7467fd5f9ce0192b3bcf4aa")))
.hasDays(36889);
}
@Test
void shouldRegisterMetricsIfNoBundleExistsAtBindTime() {
DefaultSslBundleRegistry sslBundleRegistry = new DefaultSslBundleRegistry();
MeterRegistry meterRegistry = bindToRegistry(sslBundleRegistry);
sslBundleRegistry.registerBundle("dummy",
SslBundle.of(createSslStoreBundle("classpath:certificates/chains.p12")));
assertThat(meterRegistry.getMeters()).isNotEmpty();
}
private long findExpiryGauge(MeterRegistry meterRegistry, String chain, String certificateSerialNumber) {
return (long) meterRegistry.get("ssl.chain.expiry")
.tag("bundle", "test-0")
.tag("chain", chain)
.tag("certificate", certificateSerialNumber)
.gauge()
.value();
}
private SimpleMeterRegistry bindToRegistry(SslBundles sslBundles) {
SslInfo sslInfo = new SslInfo(sslBundles);
SslMeterBinder binder = new SslMeterBinder(sslInfo, sslBundles, CLOCK);
SimpleMeterRegistry meterRegistry = new SimpleMeterRegistry();
binder.bindTo(meterRegistry);
return meterRegistry;
}
private SslStoreBundle createSslStoreBundle(String location) {
JksSslStoreDetails keyStoreDetails = JksSslStoreDetails.forLocation(location).withPassword("secret");
return new JksSslStoreBundle(keyStoreDetails, null);
}
private DefaultSslBundleRegistry createSslBundleRegistry(String... locations) {
DefaultSslBundleRegistry sslBundleRegistry = new DefaultSslBundleRegistry();
for (int i = 0; i < locations.length; i++) {
SslStoreBundle sslStoreBundle = createSslStoreBundle(locations[i]);
sslBundleRegistry.registerBundle("test-%d".formatted(i), SslBundle.of(sslStoreBundle));
}
return sslBundleRegistry;
}
}
| SslMeterBinderTests |
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceParameterizedTests.java | {
"start": 525,
"end": 783
} | class ____ extends AbstractInferenceServiceParameterizedTests {
public OpenAiServiceParameterizedTests(AbstractInferenceServiceParameterizedTests.TestCase testCase) {
super(createTestConfiguration(), testCase);
}
}
| OpenAiServiceParameterizedTests |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/orderupdates/OrderUpdateNestedEmbeddedIdTest.java | {
"start": 1471,
"end": 2342
} | class ____ {
@AfterAll
public void tearDown(SessionFactoryScope scope) {
scope.inTransaction( session -> {
session.createMutationQuery( "delete from Child2" ).executeUpdate();
session.createMutationQuery( "delete from Child1" ).executeUpdate();
session.createMutationQuery( "delete from Parent" ).executeUpdate();
} );
}
@Test
public void testParentPersist(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final ParentId parentId = new ParentId( "parent_1" );
final Child1Id child1Id1 = new Child1Id( parentId, 1 );
final Child1Id child1Id2 = new Child1Id( parentId, 2 );
final Parent parent = new Parent(
parentId,
List.of( new Child1( child1Id1, List.of() ), new Child1( child1Id2, List.of( new Child2() ) ) )
);
session.persist( parent );
} );
}
@Embeddable
public static | OrderUpdateNestedEmbeddedIdTest |
java | google__auto | factory/src/test/resources/bad/AnnotationsToApplyMultiple.java | {
"start": 684,
"end": 815
} | interface ____ {
SuppressWarnings suppressWarnings() default @SuppressWarnings("Immutable");
}
@AutoFactory.AnnotationsToApply
@ | This |
java | apache__rocketmq | tools/src/test/java/org/apache/rocketmq/tools/command/message/QueryMsgByUniqueKeySubCommandTest.java | {
"start": 3454,
"end": 12808
} | class ____ {
private static QueryMsgByUniqueKeySubCommand cmd = new QueryMsgByUniqueKeySubCommand();
private static DefaultMQAdminExt defaultMQAdminExt;
private static DefaultMQAdminExtImpl defaultMQAdminExtImpl;
private static MQClientInstance mqClientInstance = MQClientManager.getInstance().getOrCreateMQClientInstance(new ClientConfig());
private static MQClientAPIImpl mQClientAPIImpl;
private static MQAdminImpl mQAdminImpl;
@Before
public void before() throws NoSuchFieldException, IllegalAccessException, InterruptedException, RemotingException, MQClientException, MQBrokerException {
mQClientAPIImpl = mock(MQClientAPIImpl.class);
mQAdminImpl = mock(MQAdminImpl.class);
defaultMQAdminExt = new DefaultMQAdminExt();
defaultMQAdminExtImpl = new DefaultMQAdminExtImpl(defaultMQAdminExt, 1000);
Field field = DefaultMQAdminExtImpl.class.getDeclaredField("mqClientInstance");
field.setAccessible(true);
field.set(defaultMQAdminExtImpl, mqClientInstance);
field = MQClientInstance.class.getDeclaredField("mQClientAPIImpl");
field.setAccessible(true);
field.set(mqClientInstance, mQClientAPIImpl);
field = MQClientInstance.class.getDeclaredField("mQAdminImpl");
field.setAccessible(true);
field.set(mqClientInstance, mQAdminImpl);
field = DefaultMQAdminExt.class.getDeclaredField("defaultMQAdminExtImpl");
field.setAccessible(true);
field.set(defaultMQAdminExt, defaultMQAdminExtImpl);
ConsumeMessageDirectlyResult result = new ConsumeMessageDirectlyResult();
result.setConsumeResult(CMResult.CR_SUCCESS);
result.setRemark("customRemark_122333444");
when(mQClientAPIImpl.consumeMessageDirectly(anyString(), anyString(), anyString(), anyString(), anyString(), anyLong())).thenReturn(result);
MessageExt retMsgExt = new MessageExt();
retMsgExt.setMsgId("0A3A54F7BF7D18B4AAC28A3FA2CF0000");
retMsgExt.setBody("this is message ext body".getBytes());
retMsgExt.setTopic("testTopic");
retMsgExt.setTags("testTags");
retMsgExt.setStoreHost(new InetSocketAddress("127.0.0.1", 8899));
retMsgExt.setBornHost(new InetSocketAddress("127.0.0.1", 7788));
retMsgExt.setQueueId(1);
retMsgExt.setQueueOffset(12L);
retMsgExt.setCommitLogOffset(123);
retMsgExt.setReconsumeTimes(2);
retMsgExt.setBornTimestamp(System.currentTimeMillis());
retMsgExt.setStoreTimestamp(System.currentTimeMillis());
when(mQAdminImpl.viewMessage(anyString(), anyString())).thenReturn(retMsgExt);
when(mQAdminImpl.queryMessageByUniqKey(anyString(), anyString())).thenReturn(retMsgExt);
QueryResult queryResult = new QueryResult(0, Lists.newArrayList(retMsgExt));
when(mQAdminImpl.queryMessageByUniqKey(anyString(), anyString(), anyString(), anyInt(), anyLong(), anyLong())).thenReturn(queryResult);
TopicRouteData topicRouteData = new TopicRouteData();
List<BrokerData> brokerDataList = new ArrayList<>();
BrokerData brokerData = new BrokerData();
HashMap<Long, String> brokerAddrs = new HashMap<>();
brokerAddrs.put(MixAll.MASTER_ID, "127.0.0.1:9876");
brokerData.setBrokerAddrs(brokerAddrs);
brokerDataList.add(brokerData);
topicRouteData.setBrokerDatas(brokerDataList);
when(mQClientAPIImpl.getTopicRouteInfoFromNameServer(anyString(), anyLong())).thenReturn(topicRouteData);
GroupList groupList = new GroupList();
HashSet<String> groupSets = new HashSet<>();
groupSets.add("testGroup");
groupList.setGroupList(groupSets);
when(mQClientAPIImpl.queryTopicConsumeByWho(anyString(), anyString(), anyLong())).thenReturn(groupList);
ConsumeStats consumeStats = new ConsumeStats();
consumeStats.setConsumeTps(100 * 10000);
HashMap<MessageQueue, OffsetWrapper> offsetTable = new HashMap<>();
MessageQueue messageQueue = new MessageQueue();
messageQueue.setBrokerName("messageQueue BrokerName testing");
messageQueue.setTopic("messageQueue topic");
messageQueue.setQueueId(1);
OffsetWrapper offsetWrapper = new OffsetWrapper();
offsetWrapper.setBrokerOffset(100);
offsetWrapper.setConsumerOffset(200);
offsetWrapper.setLastTimestamp(System.currentTimeMillis());
offsetTable.put(messageQueue, offsetWrapper);
consumeStats.setOffsetTable(offsetTable);
when(mQClientAPIImpl.getConsumeStats(anyString(), anyString(), (String) isNull(), anyLong())).thenReturn(consumeStats);
ClusterInfo clusterInfo = new ClusterInfo();
HashMap<String, BrokerData> brokerAddrTable = new HashMap<>();
brokerAddrTable.put("key", brokerData);
clusterInfo.setBrokerAddrTable(brokerAddrTable);
HashMap<String, Set<String>> clusterAddrTable = new HashMap<>();
Set<String> addrSet = new HashSet<>();
addrSet.add("127.0.0.1:9876");
clusterAddrTable.put("key", addrSet);
clusterInfo.setClusterAddrTable(clusterAddrTable);
when(mQClientAPIImpl.getBrokerClusterInfo(anyLong())).thenReturn(clusterInfo);
field = QueryMsgByUniqueKeySubCommand.class.getDeclaredField("defaultMQAdminExt");
field.setAccessible(true);
field.set(cmd, defaultMQAdminExt);
}
@Test
public void testExecuteConsumeActively() throws SubCommandException, InterruptedException, MQBrokerException, RemotingTimeoutException, RemotingSendRequestException, RemotingConnectException {
ConsumerConnection consumerConnection = new ConsumerConnection();
consumerConnection.setConsumeType(ConsumeType.CONSUME_ACTIVELY);
HashSet<Connection> connectionSet = new HashSet<>();
Connection conn = new Connection();
conn.setClientId("clientIdTest");
conn.setClientAddr("clientAddrTest");
conn.setLanguage(LanguageCode.JAVA);
conn.setVersion(1);
connectionSet.add(conn);
consumerConnection.setConnectionSet(connectionSet);
when(mQClientAPIImpl.getConsumerConnectionList(anyString(), anyString(), anyLong())).thenReturn(consumerConnection);
Options options = ServerUtil.buildCommandlineOptions(new Options());
String[] args = new String[] {"-t myTopicTest", "-i msgId", "-c DefaultCluster"};
CommandLine commandLine = ServerUtil.parseCmdLine("mqadmin ", args,
cmd.buildCommandlineOptions(options), new DefaultParser());
cmd.execute(commandLine, options, null);
}
@Test
public void testExecuteConsumePassively() throws SubCommandException, InterruptedException, MQBrokerException, RemotingTimeoutException, RemotingSendRequestException, RemotingConnectException {
ConsumerConnection consumerConnection = new ConsumerConnection();
consumerConnection.setConsumeType(ConsumeType.CONSUME_PASSIVELY);
HashSet<Connection> connectionSet = new HashSet<>();
Connection conn = new Connection();
conn.setClientId("clientIdTestStr");
conn.setClientAddr("clientAddrTestStr");
conn.setLanguage(LanguageCode.JAVA);
conn.setVersion(2);
connectionSet.add(conn);
consumerConnection.setConnectionSet(connectionSet);
when(mQClientAPIImpl.getConsumerConnectionList(anyString(), anyString(), anyLong())).thenReturn(consumerConnection);
Options options = ServerUtil.buildCommandlineOptions(new Options());
String[] args = new String[] {"-t myTopicTest", "-i 7F000001000004D20000000000000066", "-c DefaultCluster"};
CommandLine commandLine = ServerUtil.parseCmdLine("mqadmin ", args,
cmd.buildCommandlineOptions(options), new DefaultParser());
cmd.execute(commandLine, options, null);
}
@Test
public void testExecuteWithConsumerGroupAndClientId() throws SubCommandException {
Options options = ServerUtil.buildCommandlineOptions(new Options());
String[] args = new String[] {"-t myTopicTest", "-i 0A3A54F7BF7D18B4AAC28A3FA2CF0000", "-g producerGroupName", "-d clientId", "-c DefaultCluster"};
CommandLine commandLine = ServerUtil.parseCmdLine("mqadmin ", args,
cmd.buildCommandlineOptions(options), new DefaultParser());
cmd.execute(commandLine, options, null);
}
@Test
public void testExecute() throws SubCommandException {
System.setProperty("rocketmq.namesrv.addr", "127.0.0.1:9876");
String[] args = new String[]{"-t myTopicTest", "-i 0A3A54F7BF7D18B4AAC28A3FA2CF0000", "-c DefaultCluster"};
Options options = ServerUtil.buildCommandlineOptions(new Options());
CommandLine commandLine = ServerUtil.parseCmdLine("mqadmin ", args,
cmd.buildCommandlineOptions(options), new DefaultParser());
cmd.execute(commandLine, options, null);
args = new String[] {"-t myTopicTest", "-i 0A3A54F7BF7D18B4AAC28A3FA2CF0000", "-g producerGroupName", "-d clientId", "-c DefaultCluster"};
commandLine = ServerUtil.parseCmdLine("mqadmin ", args, cmd.buildCommandlineOptions(options),
new DefaultParser());
cmd.execute(commandLine, options, null);
}
}
| QueryMsgByUniqueKeySubCommandTest |
java | elastic__elasticsearch | x-pack/plugin/old-lucene-versions/src/test/java/org/elasticsearch/xpack/lucene/bwc/codecs/lucene50/Lucene50ScoreSkipReader.java | {
"start": 1292,
"end": 5539
} | class ____ extends Lucene50SkipReader {
private final byte[][] impactData;
private final int[] impactDataLength;
private final ByteArrayDataInput badi = new ByteArrayDataInput();
private final Impacts impacts;
private int numLevels = 1;
private final MutableImpactList[] perLevelImpacts;
Lucene50ScoreSkipReader(
int version,
IndexInput skipStream,
int maxSkipLevels,
boolean hasPos,
boolean hasOffsets,
boolean hasPayloads
) {
super(version, skipStream, maxSkipLevels, hasPos, hasOffsets, hasPayloads);
if (version < BWCLucene50PostingsFormat.VERSION_IMPACT_SKIP_DATA) {
throw new IllegalStateException("Cannot skip based on scores if impacts are not indexed");
}
this.impactData = new byte[maxSkipLevels][];
Arrays.fill(impactData, new byte[0]);
this.impactDataLength = new int[maxSkipLevels];
this.perLevelImpacts = new MutableImpactList[maxSkipLevels];
for (int i = 0; i < perLevelImpacts.length; ++i) {
perLevelImpacts[i] = new MutableImpactList();
}
impacts = new Impacts() {
@Override
public int numLevels() {
return numLevels;
}
@Override
public int getDocIdUpTo(int level) {
return skipDoc[level];
}
@Override
public List<Impact> getImpacts(int level) {
assert level < numLevels;
if (impactDataLength[level] > 0) {
badi.reset(impactData[level], 0, impactDataLength[level]);
perLevelImpacts[level] = readImpacts(badi, perLevelImpacts[level]);
impactDataLength[level] = 0;
}
return perLevelImpacts[level];
}
};
}
@Override
public int skipTo(int target) throws IOException {
int result = super.skipTo(target);
if (numberOfSkipLevels > 0) {
numLevels = numberOfSkipLevels;
} else {
// End of postings don't have skip data anymore, so we fill with dummy data
// like SlowImpactsEnum.
numLevels = 1;
perLevelImpacts[0].length = 1;
perLevelImpacts[0].impacts[0].freq = Integer.MAX_VALUE;
perLevelImpacts[0].impacts[0].norm = 1L;
impactDataLength[0] = 0;
}
return result;
}
Impacts getImpacts() {
return impacts;
}
@Override
protected void readImpacts(int level, IndexInput skipStream) throws IOException {
int length = skipStream.readVInt();
if (impactData[level].length < length) {
impactData[level] = new byte[ArrayUtil.oversize(length, Byte.BYTES)];
}
skipStream.readBytes(impactData[level], 0, length);
impactDataLength[level] = length;
}
static MutableImpactList readImpacts(ByteArrayDataInput in, MutableImpactList reuse) {
int maxNumImpacts = in.length(); // at most one impact per byte
if (reuse.impacts.length < maxNumImpacts) {
int oldLength = reuse.impacts.length;
reuse.impacts = ArrayUtil.grow(reuse.impacts, maxNumImpacts);
for (int i = oldLength; i < reuse.impacts.length; ++i) {
reuse.impacts[i] = new Impact(Integer.MAX_VALUE, 1L);
}
}
int freq = 0;
long norm = 0;
int length = 0;
while (in.getPosition() < in.length()) {
int freqDelta = in.readVInt();
if ((freqDelta & 0x01) != 0) {
freq += 1 + (freqDelta >>> 1);
try {
norm += 1 + in.readZLong();
} catch (IOException e) {
throw new RuntimeException(e); // cannot happen on a BADI
}
} else {
freq += 1 + (freqDelta >>> 1);
norm++;
}
Impact impact = reuse.impacts[length];
impact.freq = freq;
impact.norm = norm;
length++;
}
reuse.length = length;
return reuse;
}
static | Lucene50ScoreSkipReader |
java | apache__maven | compat/maven-model-builder/src/main/java/org/apache/maven/model/building/ModelSource2.java | {
"start": 1036,
"end": 1612
} | interface ____ loading of parent POM(s) from the same backing store and allows
* construction of MavenProject instances without the need to have parent POM(s) available from local or remote
* repositories.
* <p>
* ModelSource2 instances are cached in {@link ModelBuildingRequest#getModelCache()}. Implementations must guarantee
* that the connection to the backing store remains active until request's {@link ModelCache} is discarded or flushed.
*
* @deprecated use {@code org.apache.maven.api.services.ModelBuilder} instead
*/
@Deprecated(since = "4.0.0")
public | supports |
java | quarkusio__quarkus | extensions/vertx/deployment/src/test/java/io/quarkus/vertx/deployment/MessageConsumerContextTest.java | {
"start": 853,
"end": 3289
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar.addClasses(MessageConsumers.class));
@Inject
MessageConsumers messageConsumers;
@Inject
EventBus eventBus;
@RepeatedTest(5)
public void testSend() throws InterruptedException {
MessageConsumers.MESSAGES.clear();
MessageConsumers.latch = new CountDownLatch(3);
eventBus.send("send", "foo");
eventBus.send("send", "bar");
eventBus.send("send", "baz");
assertTrue(MessageConsumers.latch.await(3, TimeUnit.SECONDS));
if (Runtime.getRuntime().availableProcessors() > 1) {
assertEquals(3, MessageConsumers.MESSAGES.size());
} else {
assertTrue(MessageConsumers.MESSAGES.size() >= 2);
}
}
@RepeatedTest(5)
public void testPublish() throws InterruptedException {
MessageConsumers.MESSAGES.clear();
MessageConsumers.latch = new CountDownLatch(9); // 3 messages x 3 consumers
eventBus.publish("pub", "foo");
eventBus.publish("pub", "bar");
eventBus.publish("pub", "baz");
assertTrue(MessageConsumers.latch.await(3, TimeUnit.SECONDS));
if (Runtime.getRuntime().availableProcessors() > 1) {
// The 2 event loops and additional worker contexts
assertTrue(MessageConsumers.MESSAGES.size() >= 3);
} else {
assertTrue(MessageConsumers.MESSAGES.size() >= 2);
}
}
@RepeatedTest(5)
public void testRequestReply() throws InterruptedException {
MessageConsumers.MESSAGES.clear();
Uni<String> uni1 = eventBus.<String> request("req", "foo").map(Message::body);
Uni<String> uni2 = eventBus.<String> request("req", "bar").map(Message::body);
Uni<String> uni3 = eventBus.<String> request("req", "baz").map(Message::body);
Uni.combine().all().unis(uni1, uni2, uni3).asTuple()
.map(tuple -> {
assertEquals("FOO", tuple.getItem1());
assertEquals("BAR", tuple.getItem2());
assertEquals("BAZ", tuple.getItem3());
return "done";
})
.await().atMost(Duration.ofSeconds(3));
assertTrue(MessageConsumers.MESSAGES.size() >= 2);
}
@ApplicationScoped
static | MessageConsumerContextTest |
java | elastic__elasticsearch | x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CCRInfoTransportAction.java | {
"start": 2036,
"end": 5149
} | class ____ extends XPackFeatureUsage {
private final int numberOfFollowerIndices;
private final int numberOfAutoFollowPatterns;
private final Long lastFollowTimeInMillis;
public Usage(
boolean available,
boolean enabled,
int numberOfFollowerIndices,
int numberOfAutoFollowPatterns,
Long lastFollowTimeInMillis
) {
super(XPackField.CCR, available, enabled);
this.numberOfFollowerIndices = numberOfFollowerIndices;
this.numberOfAutoFollowPatterns = numberOfAutoFollowPatterns;
this.lastFollowTimeInMillis = lastFollowTimeInMillis;
}
public Usage(StreamInput in) throws IOException {
super(in);
numberOfFollowerIndices = in.readVInt();
numberOfAutoFollowPatterns = in.readVInt();
if (in.readBoolean()) {
lastFollowTimeInMillis = in.readVLong();
} else {
lastFollowTimeInMillis = null;
}
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersion.zero();
}
public int getNumberOfFollowerIndices() {
return numberOfFollowerIndices;
}
public int getNumberOfAutoFollowPatterns() {
return numberOfAutoFollowPatterns;
}
public Long getLastFollowTimeInMillis() {
return lastFollowTimeInMillis;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(numberOfFollowerIndices);
out.writeVInt(numberOfAutoFollowPatterns);
if (lastFollowTimeInMillis != null) {
out.writeBoolean(true);
out.writeVLong(lastFollowTimeInMillis);
} else {
out.writeBoolean(false);
}
}
@Override
protected void innerXContent(XContentBuilder builder, Params params) throws IOException {
super.innerXContent(builder, params);
builder.field("follower_indices_count", numberOfFollowerIndices);
builder.field("auto_follow_patterns_count", numberOfAutoFollowPatterns);
if (lastFollowTimeInMillis != null) {
builder.field("last_follow_time_in_millis", lastFollowTimeInMillis);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Usage usage = (Usage) o;
return numberOfFollowerIndices == usage.numberOfFollowerIndices
&& numberOfAutoFollowPatterns == usage.numberOfAutoFollowPatterns
&& Objects.equals(lastFollowTimeInMillis, usage.lastFollowTimeInMillis);
}
@Override
public int hashCode() {
return Objects.hash(numberOfFollowerIndices, numberOfAutoFollowPatterns, lastFollowTimeInMillis);
}
}
}
| Usage |
java | quarkusio__quarkus | integration-tests/injectmock/src/test/java/io/quarkus/it/mockbean/NestedTest.java | {
"start": 416,
"end": 1252
} | class ____ {
@InjectMock
SuffixService suffixService;
@Test
public void testGreet() {
Mockito.when(messageService.getMessage()).thenReturn("hi");
Mockito.when(suffixService.getSuffix()).thenReturn("!");
given()
.when().get("/greeting")
.then()
.statusCode(200)
.body(is("HI!"));
}
@Test
public void testGreetAgain() {
Mockito.when(messageService.getMessage()).thenReturn("yolo");
Mockito.when(suffixService.getSuffix()).thenReturn("!!!");
given()
.when().get("/greeting")
.then()
.statusCode(200)
.body(is("YOLO!!!"));
}
}
}
| ActualTest |
java | grpc__grpc-java | core/src/main/java/io/grpc/internal/KeepAliveEnforcer.java | {
"start": 3534,
"end": 3729
} | class ____ implements Ticker {
public static final SystemTicker INSTANCE = new SystemTicker();
@Override
public long nanoTime() {
return System.nanoTime();
}
}
}
| SystemTicker |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/bugs/ArgumentCaptorDontCapturePreviouslyVerifiedTest.java | {
"start": 434,
"end": 1096
} | class ____ {
@Test
public void previous_verified_invocation_should_still_capture_args() {
IMethods mock = mock(IMethods.class);
mock.oneArg("first");
ArgumentCaptor<String> argument = ArgumentCaptor.forClass(String.class);
verify(mock, times(1)).oneArg(argument.capture());
assertThat(argument.getAllValues()).hasSize(1);
// additional interactions
mock.oneArg("second");
argument = ArgumentCaptor.forClass(String.class);
verify(mock, times(2)).oneArg(argument.capture());
assertThat(argument.getAllValues()).hasSize(2);
}
}
| ArgumentCaptorDontCapturePreviouslyVerifiedTest |
java | resilience4j__resilience4j | resilience4j-circuitbreaker/src/jmh/java/io/github/resilience4j/circuitbreaker/CircuitBreakerBenchmark.java | {
"start": 1153,
"end": 3967
} | class ____ {
private static final int ITERATION_COUNT = 10;
private static final int WARMUP_COUNT = 3;
private static final int THREAD_COUNT = 2;
private static final int FORK_COUNT = 2;
private Supplier<String> protectedSupplier;
private Supplier<String> protectedSupplierWithOneConsumer;
private Supplier<String> protectedSupplierWithDiffConsumer;
private Supplier<String> stringSupplier;
public static void main(String[] args) throws RunnerException {
Options options = new OptionsBuilder()
.include(CircuitBreakerBenchmark.class.getName())
.addProfiler(GCProfiler.class)
.build();
new Runner(options).run();
}
@Setup
public void setUp() {
stringSupplier = () -> {
Blackhole.consumeCPU(100);
return "Hello Benchmark";
};
CircuitBreaker circuitBreaker = CircuitBreaker.ofDefaults("testCircuitBreaker");
protectedSupplier = circuitBreaker.decorateSupplier(stringSupplier);
CircuitBreaker withOneConsumer = CircuitBreaker.ofDefaults("testCircuitBreakerWithSb");
withOneConsumer.getEventPublisher().onEvent(event -> {});
protectedSupplierWithOneConsumer = CircuitBreaker.decorateSupplier(withOneConsumer, stringSupplier);
CircuitBreaker withDiffConsumer = CircuitBreaker.ofDefaults("testCircuitBreakerWithDiffSb");
withDiffConsumer.getEventPublisher()
.onIgnoredError(event ->{})
.onCallNotPermitted(event ->{})
.onSuccess(event -> {})
.onError(event -> {});
protectedSupplierWithDiffConsumer = CircuitBreaker.decorateSupplier(withDiffConsumer, stringSupplier);
}
@Benchmark
@Fork(value = FORK_COUNT)
@Threads(value = THREAD_COUNT)
@Warmup(iterations = WARMUP_COUNT)
@Measurement(iterations = ITERATION_COUNT)
public String directSupplier() {
return stringSupplier.get();
}
@Benchmark
@Fork(value = FORK_COUNT)
@Threads(value = THREAD_COUNT)
@Warmup(iterations = WARMUP_COUNT)
@Measurement(iterations = ITERATION_COUNT)
public String protectedSupplier() {
return protectedSupplier.get();
}
@Benchmark
@Fork(value = FORK_COUNT)
@Threads(value = THREAD_COUNT)
@Warmup(iterations = WARMUP_COUNT)
@Measurement(iterations = ITERATION_COUNT)
public String protectedSupplierWithOneConsumer() {
return protectedSupplierWithOneConsumer.get();
}
@Benchmark
@Fork(value = FORK_COUNT)
@Threads(value = THREAD_COUNT)
@Warmup(iterations = WARMUP_COUNT)
@Measurement(iterations = ITERATION_COUNT)
public String protectedSupplierWithDiffConsumer() {
return protectedSupplierWithDiffConsumer.get();
}
} | CircuitBreakerBenchmark |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/capabilities/TelemetryAware.java | {
"start": 419,
"end": 572
} | interface ____ {
/**
* @return the label reported in the telemetry data. Only needs to be overwritten if the label doesn't match the | TelemetryAware |
java | apache__camel | components/camel-hazelcast/src/test/java/org/apache/camel/processor/aggregate/hazelcast/HazelcastAggregationRepositoryCamelTestSupport.java | {
"start": 1450,
"end": 2447
} | class ____ extends CamelTestSupport {
@RegisterExtension
public static HazelcastService hazelcastService = HazelcastServiceFactory.createService();
@RegisterExtension
public static TestEntityNameGenerator nameGenerator = new TestEntityNameGenerator();
private static HazelcastInstance hzOne;
private static HazelcastInstance hzTwo;
protected static HazelcastInstance getFirstInstance() {
return hzOne;
}
protected static HazelcastInstance getSecondInstance() {
return hzTwo;
}
@BeforeAll
public static void setUpHazelcastCluster() {
hzOne = Hazelcast.newHazelcastInstance(hazelcastService.createConfiguration(null, 0, "hzOne", "aggregation"));
hzTwo = Hazelcast.newHazelcastInstance(hazelcastService.createConfiguration(null, 0, "hzTwo", "aggregation"));
}
@AfterAll
public static void shutDownHazelcastCluster() {
Hazelcast.shutdownAll();
}
}
| HazelcastAggregationRepositoryCamelTestSupport |
java | apache__camel | components/camel-couchdb/src/main/java/org/apache/camel/component/couchdb/CouchDbException.java | {
"start": 938,
"end": 1286
} | class ____ extends CamelExchangeException {
private static final long serialVersionUID = 1L;
public CouchDbException(String message, Exchange exchange) {
super(message, exchange);
}
public CouchDbException(String message, Exchange exchange, Throwable cause) {
super(message, exchange, cause);
}
}
| CouchDbException |
java | hibernate__hibernate-orm | tooling/metamodel-generator/src/test/java/org/hibernate/processor/test/xmlonly/Period.java | {
"start": 172,
"end": 229
} | class ____ {
private Date start;
private Date end;
}
| Period |
java | google__guice | extensions/grapher/test/com/google/inject/grapher/AbstractInjectorGrapherTest.java | {
"start": 1372,
"end": 1495
} | class ____ extends TestCase {
private static final String TEST_STRING = "test";
private static | AbstractInjectorGrapherTest |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/WeatherEndpointBuilderFactory.java | {
"start": 56129,
"end": 61526
} | interface ____ extends EndpointProducerBuilder {
default WeatherEndpointProducerBuilder basic() {
return (WeatherEndpointProducerBuilder) this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedWeatherEndpointProducerBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedWeatherEndpointProducerBuilder lazyStartProducer(String lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* A custum geolocation provider to determine the longitude and latitude
* to use when no location information is set. The default implementaion
* uses the ipstack API and requires geolocationAccessKey and
* geolocationRequestHostIP.
*
* The option is a:
* <code>org.apache.camel.component.weather.geolocation.GeoLocationProvider</code> type.
*
* Group: advanced
*
* @param geoLocationProvider the value to set
* @return the dsl builder
*/
default AdvancedWeatherEndpointProducerBuilder geoLocationProvider(org.apache.camel.component.weather.geolocation.GeoLocationProvider geoLocationProvider) {
doSetProperty("geoLocationProvider", geoLocationProvider);
return this;
}
/**
* A custum geolocation provider to determine the longitude and latitude
* to use when no location information is set. The default implementaion
* uses the ipstack API and requires geolocationAccessKey and
* geolocationRequestHostIP.
*
* The option will be converted to a
* <code>org.apache.camel.component.weather.geolocation.GeoLocationProvider</code> type.
*
* Group: advanced
*
* @param geoLocationProvider the value to set
* @return the dsl builder
*/
default AdvancedWeatherEndpointProducerBuilder geoLocationProvider(String geoLocationProvider) {
doSetProperty("geoLocationProvider", geoLocationProvider);
return this;
}
/**
* To use an existing configured http client (for example with http
* proxy).
*
* The option is a:
* <code>org.apache.hc.client5.http.impl.classic.CloseableHttpClient</code> type.
*
* Group: advanced
*
* @param httpClient the value to set
* @return the dsl builder
*/
default AdvancedWeatherEndpointProducerBuilder httpClient(org.apache.hc.client5.http.impl.classic.CloseableHttpClient httpClient) {
doSetProperty("httpClient", httpClient);
return this;
}
/**
* To use an existing configured http client (for example with http
* proxy).
*
* The option will be converted to a
* <code>org.apache.hc.client5.http.impl.classic.CloseableHttpClient</code> type.
*
* Group: advanced
*
* @param httpClient the value to set
* @return the dsl builder
*/
default AdvancedWeatherEndpointProducerBuilder httpClient(String httpClient) {
doSetProperty("httpClient", httpClient);
return this;
}
}
/**
* Builder for endpoint for the Weather component.
*/
public | AdvancedWeatherEndpointProducerBuilder |
java | apache__dubbo | dubbo-common/src/main/java/org/apache/dubbo/config/annotation/DubboService.java | {
"start": 3946,
"end": 6014
} | interface ____ + Local if not set
*/
String stub() default "";
/**
* Cluster strategy, legal values include: failover, failfast, failsafe, failback, forking
* you can use {@link org.apache.dubbo.common.constants.ClusterRules#FAIL_FAST} ……
*/
String cluster() default ClusterRules.EMPTY;
/**
* How the proxy is generated, legal values include: jdk, javassist
*/
String proxy() default "";
/**
* Maximum connections service provider can accept, default value is -1 - connection is shared
*/
int connections() default -1;
/**
* The callback instance limit peer connection
* <p>
* see org.apache.dubbo.common.constants.CommonConstants.DEFAULT_CALLBACK_INSTANCES
*/
int callbacks() default -1;
/**
* Callback method name when connected, default value is empty string
*/
String onconnect() default "";
/**
* Callback method name when disconnected, default value is empty string
*/
String ondisconnect() default "";
/**
* Service owner, default value is empty string
*/
String owner() default "";
/**
* Service layer, default value is empty string
*/
String layer() default "";
/**
* Service invocation retry times
*
* @see org.apache.dubbo.common.constants.CommonConstants#DEFAULT_RETRIES
*/
int retries() default -1;
/**
* Load balance strategy, legal values include: random, roundrobin, leastactive
*
* you can use {@link org.apache.dubbo.common.constants.LoadbalanceRules#RANDOM} ……
*/
String loadbalance() default LoadbalanceRules.EMPTY;
/**
* Whether to enable async invocation, default value is false
*/
boolean async() default false;
/**
* Maximum active requests allowed, default value is -1
*/
int actives() default -1;
/**
* Whether the async request has already been sent, the default value is false
*/
boolean sent() default false;
/**
* Service mock name, use | name |
java | micronaut-projects__micronaut-core | http-server-netty/src/main/java/io/micronaut/http/server/netty/configuration/NettyHttpServerConfiguration.java | {
"start": 44912,
"end": 51080
} | class ____ implements EventLoopGroupConfiguration {
private int threads;
private double threadCoreRatio = DEFAULT_THREAD_CORE_RATIO;
private Integer ioRatio;
private String executor;
private boolean preferNativeTransport = false;
private List<String> transport;
private Duration shutdownQuietPeriod = Duration.ofSeconds(DEFAULT_SHUTDOWN_QUIET_PERIOD);
private Duration shutdownTimeout = Duration.ofSeconds(DEFAULT_SHUTDOWN_TIMEOUT);
private String name;
private boolean loomCarrier = false;
/**
* @param name The name;
*/
EventLoopConfig(String name) {
this.name = name;
}
@NonNull
@Override
public String getName() {
return name;
}
/**
* Sets the name to use.
* @param name The name
*/
public void setEventLoopGroup(String name) {
if (StringUtils.isNotEmpty(name)) {
this.name = name;
}
}
/**
* Sets the number of threads for the event loop group.
* @param threads The number of threads
*/
public void setThreads(int threads) {
this.threads = threads;
}
/**
* Sets the I/O ratio.
* @param ioRatio The I/O ratio
*/
public void setIoRatio(Integer ioRatio) {
this.ioRatio = ioRatio;
}
/**
* A named executor service to use for event loop threads
* (optional). This property is very specialized. In particular,
* it will <i>not</i> solve read timeouts or fix blocking
* operations on the event loop, in fact it may do the opposite.
* Don't use unless you really know what this does.
*
* @param executor The executor
*/
public void setExecutor(String executor) {
this.executor = executor;
}
/**
* @param preferNativeTransport Set whether to prefer the native transport if available
*/
public void setPreferNativeTransport(boolean preferNativeTransport) {
this.preferNativeTransport = preferNativeTransport;
}
/**
* @param shutdownQuietPeriod Set the shutdown quiet period
*/
public void setShutdownQuietPeriod(Duration shutdownQuietPeriod) {
if (shutdownQuietPeriod != null) {
this.shutdownQuietPeriod = shutdownQuietPeriod;
}
}
/**
* @param shutdownTimeout Set the shutdown timeout (must be >= shutdownQuietPeriod)
*/
public void setShutdownTimeout(Duration shutdownTimeout) {
if (shutdownTimeout != null) {
this.shutdownTimeout = shutdownTimeout;
}
}
/**
* @return The number of threads to use
*/
public int getNumOfThreads() {
return threads;
}
/**
* @return The I/O ratio to use
*/
@Override
public Optional<Integer> getIoRatio() {
if (ioRatio != null) {
return Optional.of(ioRatio);
}
return Optional.empty();
}
/**
* @return The name of the configured executor to use
*/
@Override
public Optional<String> getExecutorName() {
if (executor != null) {
return Optional.of(executor);
}
return Optional.empty();
}
@Override
public int getNumThreads() {
return threads;
}
@Override
public double getThreadCoreRatio() {
return threadCoreRatio;
}
/**
* The number of threads per core to use if {@link #getNumThreads()} is set to 0.
*
* @param threadCoreRatio The thread-to-core ratio
* @since 4.8.0
*/
public void setThreadCoreRatio(double threadCoreRatio) {
this.threadCoreRatio = threadCoreRatio;
}
@Override
public boolean isPreferNativeTransport() {
return preferNativeTransport;
}
@Override
public @NonNull List<String> getTransport() {
return transport == null ? EventLoopGroupConfiguration.super.getTransport() : transport;
}
/**
* The transports to use for this event loop, in order of preference. Supported values are
* {@code io_uring,epoll,kqueue,nio}. The first available transport out of those listed will
* be used (nio is always available). If no listed transport is available, an exception will be
* thrown.
* <p>By default, only {@code nio} is used, even if native transports are available. If the
* legacy {@link #isPreferNativeTransport() prefer-native-transport} property is set to
* {@code true}, this defaults to {@code io_uring,epoll,kqueue,nio}.
*
* @param transport The available transports, in order of preference
*/
public void setTransport(@NonNull List<String> transport) {
this.transport = transport;
}
@Override
public Duration getShutdownQuietPeriod() {
return shutdownQuietPeriod;
}
@Override
public Duration getShutdownTimeout() {
return shutdownTimeout;
}
@Override
public boolean isLoomCarrier() {
return loomCarrier;
}
/**
* @param loomCarrier When set to {@code true}, use a special <i>experimental</i> event
* loop that can also execute virtual threads, in order to improve
* virtual thread performance.
*/
public void setLoomCarrier(boolean loomCarrier) {
this.loomCarrier = loomCarrier;
}
}
/**
* Netty listener configuration.
*
* @author yawkat
* @since 3.5.0
*/
@EachProperty("listeners")
public static final | EventLoopConfig |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/state/internals/CachingWindowStore.java | {
"start": 2233,
"end": 19782
} | class ____
extends WrappedStateStore<WindowStore<Bytes, byte[]>, byte[], byte[]>
implements WindowStore<Bytes, byte[]>, CachedStateStore<byte[], byte[]> {
private static final Logger LOG = LoggerFactory.getLogger(CachingWindowStore.class);
private final long windowSize;
private final SegmentedCacheFunction cacheFunction;
private final SegmentedBytesStore.KeySchema keySchema = new WindowKeySchema();
private String cacheName;
private boolean sendOldValues;
private InternalProcessorContext<?, ?> internalContext;
private StateSerdes<Bytes, byte[]> bytesSerdes;
private CacheFlushListener<byte[], byte[]> flushListener;
private final AtomicLong maxObservedTimestamp;
CachingWindowStore(final WindowStore<Bytes, byte[]> underlying,
final long windowSize,
final long segmentInterval) {
super(underlying);
this.windowSize = windowSize;
this.cacheFunction = new SegmentedCacheFunction(keySchema, segmentInterval);
this.maxObservedTimestamp = new AtomicLong(RecordQueue.UNKNOWN);
}
@Override
public void init(final StateStoreContext stateStoreContext, final StateStore root) {
final String changelogTopic = ProcessorContextUtils.changelogFor(stateStoreContext, name(), Boolean.TRUE);
internalContext = asInternalProcessorContext(stateStoreContext);
bytesSerdes = new StateSerdes<>(
changelogTopic,
Serdes.Bytes(),
Serdes.ByteArray());
cacheName = internalContext.taskId() + "-" + name();
internalContext.registerCacheFlushListener(cacheName, entries -> {
for (final ThreadCache.DirtyEntry entry : entries) {
putAndMaybeForward(entry, internalContext);
}
});
super.init(stateStoreContext, root);
}
private void putAndMaybeForward(final ThreadCache.DirtyEntry entry,
final InternalProcessorContext<?, ?> context) {
final byte[] binaryWindowKey = cacheFunction.key(entry.key()).get();
final Windowed<Bytes> windowedKeyBytes = WindowKeySchema.fromStoreBytesKey(binaryWindowKey, windowSize);
final long windowStartTimestamp = windowedKeyBytes.window().start();
final Bytes binaryKey = windowedKeyBytes.key();
if (flushListener != null) {
final byte[] rawNewValue = entry.newValue();
final byte[] rawOldValue = rawNewValue == null || sendOldValues ?
wrapped().fetch(binaryKey, windowStartTimestamp) : null;
// this is an optimization: if this key did not exist in underlying store and also not in the cache,
// we can skip flushing to downstream as well as writing to underlying store
if (rawNewValue != null || rawOldValue != null) {
// we need to get the old values if needed, and then put to store, and then flush
final ProcessorRecordContext current = context.recordContext();
try {
context.setRecordContext(entry.entry().context());
wrapped().put(binaryKey, entry.newValue(), windowStartTimestamp);
flushListener.apply(
new Record<>(
binaryWindowKey,
new Change<>(rawNewValue, sendOldValues ? rawOldValue : null),
entry.entry().context().timestamp(),
entry.entry().context().headers()));
} finally {
context.setRecordContext(current);
}
}
} else {
final ProcessorRecordContext current = context.recordContext();
try {
context.setRecordContext(entry.entry().context());
wrapped().put(binaryKey, entry.newValue(), windowStartTimestamp);
} finally {
context.setRecordContext(current);
}
}
}
@Override
public boolean setFlushListener(final CacheFlushListener<byte[], byte[]> flushListener,
final boolean sendOldValues) {
this.flushListener = flushListener;
this.sendOldValues = sendOldValues;
return true;
}
@Override
public synchronized void put(final Bytes key,
final byte[] value,
final long windowStartTimestamp) {
// since this function may not access the underlying inner store, we need to validate
// if store is open outside as well.
validateStoreOpen();
final Bytes keyBytes = WindowKeySchema.toStoreKeyBinary(key, windowStartTimestamp, 0);
final LRUCacheEntry entry =
new LRUCacheEntry(
value,
internalContext.recordContext().headers(),
true,
internalContext.recordContext().offset(),
internalContext.recordContext().timestamp(),
internalContext.recordContext().partition(),
internalContext.recordContext().topic(),
internalContext.recordContext().sourceRawKey(),
internalContext.recordContext().sourceRawValue()
);
internalContext.cache().put(cacheName, cacheFunction.cacheKey(keyBytes), entry);
maxObservedTimestamp.set(Math.max(keySchema.segmentTimestamp(keyBytes), maxObservedTimestamp.get()));
}
@Override
public byte[] fetch(final Bytes key,
final long timestamp) {
validateStoreOpen();
final Bytes bytesKey = WindowKeySchema.toStoreKeyBinary(key, timestamp, 0);
final Bytes cacheKey = cacheFunction.cacheKey(bytesKey);
if (internalContext.cache() == null) {
return wrapped().fetch(key, timestamp);
}
final LRUCacheEntry entry = internalContext.cache().get(cacheName, cacheKey);
if (entry == null) {
return wrapped().fetch(key, timestamp);
} else {
return entry.value();
}
}
@Override
public synchronized WindowStoreIterator<byte[]> fetch(final Bytes key,
final long timeFrom,
final long timeTo) {
// since this function may not access the underlying inner store, we need to validate
// if store is open outside as well.
validateStoreOpen();
final WindowStoreIterator<byte[]> underlyingIterator = wrapped().fetch(key, timeFrom, timeTo);
if (internalContext.cache() == null) {
return underlyingIterator;
}
final PeekingKeyValueIterator<Bytes, LRUCacheEntry> cacheIterator = wrapped().persistent() ?
new CacheIteratorWrapper(key, timeFrom, timeTo, true) :
internalContext.cache().range(
cacheName,
cacheFunction.cacheKey(keySchema.lowerRangeFixedSize(key, timeFrom)),
cacheFunction.cacheKey(keySchema.upperRangeFixedSize(key, timeTo))
);
final HasNextCondition hasNextCondition = keySchema.hasNextCondition(key, key, timeFrom, timeTo, true);
final PeekingKeyValueIterator<Bytes, LRUCacheEntry> filteredCacheIterator =
new FilteredCacheIterator(cacheIterator, hasNextCondition, cacheFunction);
return new MergedSortedCacheWindowStoreIterator(filteredCacheIterator, underlyingIterator, true);
}
@Override
public synchronized WindowStoreIterator<byte[]> backwardFetch(final Bytes key,
final long timeFrom,
final long timeTo) {
// since this function may not access the underlying inner store, we need to validate
// if store is open outside as well.
validateStoreOpen();
final WindowStoreIterator<byte[]> underlyingIterator = wrapped().backwardFetch(key, timeFrom, timeTo);
if (internalContext.cache() == null) {
return underlyingIterator;
}
final PeekingKeyValueIterator<Bytes, LRUCacheEntry> cacheIterator = wrapped().persistent() ?
new CacheIteratorWrapper(key, timeFrom, timeTo, false) :
internalContext.cache().reverseRange(
cacheName,
cacheFunction.cacheKey(keySchema.lowerRangeFixedSize(key, timeFrom)),
cacheFunction.cacheKey(keySchema.upperRangeFixedSize(key, timeTo))
);
final HasNextCondition hasNextCondition = keySchema.hasNextCondition(key, key, timeFrom, timeTo, false);
final PeekingKeyValueIterator<Bytes, LRUCacheEntry> filteredCacheIterator =
new FilteredCacheIterator(cacheIterator, hasNextCondition, cacheFunction);
return new MergedSortedCacheWindowStoreIterator(filteredCacheIterator, underlyingIterator, false);
}
@Override
public KeyValueIterator<Windowed<Bytes>, byte[]> fetch(final Bytes keyFrom,
final Bytes keyTo,
final long timeFrom,
final long timeTo) {
if (keyFrom != null && keyTo != null && keyFrom.compareTo(keyTo) > 0) {
LOG.warn("Returning empty iterator for fetch with invalid key range: from > to. " +
"This may be due to range arguments set in the wrong order, " +
"or serdes that don't preserve ordering when lexicographically comparing the serialized bytes. " +
"Note that the built-in numerical serdes do not follow this for negative numbers");
return KeyValueIterators.emptyIterator();
}
// since this function may not access the underlying inner store, we need to validate
// if store is open outside as well.
validateStoreOpen();
final KeyValueIterator<Windowed<Bytes>, byte[]> underlyingIterator =
wrapped().fetch(keyFrom, keyTo, timeFrom, timeTo);
if (internalContext.cache() == null) {
return underlyingIterator;
}
final PeekingKeyValueIterator<Bytes, LRUCacheEntry> cacheIterator = wrapped().persistent() ?
new CacheIteratorWrapper(keyFrom, keyTo, timeFrom, timeTo, true) :
internalContext.cache().range(
cacheName,
keyFrom == null ? null : cacheFunction.cacheKey(keySchema.lowerRange(keyFrom, timeFrom)),
keyTo == null ? null : cacheFunction.cacheKey(keySchema.upperRange(keyTo, timeTo))
);
final HasNextCondition hasNextCondition = keySchema.hasNextCondition(keyFrom, keyTo, timeFrom, timeTo, true);
final PeekingKeyValueIterator<Bytes, LRUCacheEntry> filteredCacheIterator =
new FilteredCacheIterator(cacheIterator, hasNextCondition, cacheFunction);
return new MergedSortedCacheWindowStoreKeyValueIterator(
filteredCacheIterator,
underlyingIterator,
bytesSerdes,
windowSize,
cacheFunction,
true
);
}
@Override
public KeyValueIterator<Windowed<Bytes>, byte[]> backwardFetch(final Bytes keyFrom,
final Bytes keyTo,
final long timeFrom,
final long timeTo) {
if (keyFrom != null && keyTo != null && keyFrom.compareTo(keyTo) > 0) {
LOG.warn("Returning empty iterator for fetch with invalid key range: from > to. "
+ "This may be due to serdes that don't preserve ordering when lexicographically comparing the serialized bytes. " +
"Note that the built-in numerical serdes do not follow this for negative numbers");
return KeyValueIterators.emptyIterator();
}
// since this function may not access the underlying inner store, we need to validate
// if store is open outside as well.
validateStoreOpen();
final KeyValueIterator<Windowed<Bytes>, byte[]> underlyingIterator =
wrapped().backwardFetch(keyFrom, keyTo, timeFrom, timeTo);
if (internalContext.cache() == null) {
return underlyingIterator;
}
final PeekingKeyValueIterator<Bytes, LRUCacheEntry> cacheIterator = wrapped().persistent() ?
new CacheIteratorWrapper(keyFrom, keyTo, timeFrom, timeTo, false) :
internalContext.cache().reverseRange(
cacheName,
keyFrom == null ? null : cacheFunction.cacheKey(keySchema.lowerRange(keyFrom, timeFrom)),
keyTo == null ? null : cacheFunction.cacheKey(keySchema.upperRange(keyTo, timeTo))
);
final HasNextCondition hasNextCondition = keySchema.hasNextCondition(keyFrom, keyTo, timeFrom, timeTo, false);
final PeekingKeyValueIterator<Bytes, LRUCacheEntry> filteredCacheIterator =
new FilteredCacheIterator(cacheIterator, hasNextCondition, cacheFunction);
return new MergedSortedCacheWindowStoreKeyValueIterator(
filteredCacheIterator,
underlyingIterator,
bytesSerdes,
windowSize,
cacheFunction,
false
);
}
@Override
public KeyValueIterator<Windowed<Bytes>, byte[]> fetchAll(final long timeFrom,
final long timeTo) {
validateStoreOpen();
final KeyValueIterator<Windowed<Bytes>, byte[]> underlyingIterator = wrapped().fetchAll(timeFrom, timeTo);
final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = internalContext.cache().all(cacheName);
final HasNextCondition hasNextCondition = keySchema.hasNextCondition(null, null, timeFrom, timeTo, true);
final PeekingKeyValueIterator<Bytes, LRUCacheEntry> filteredCacheIterator =
new FilteredCacheIterator(cacheIterator, hasNextCondition, cacheFunction);
return new MergedSortedCacheWindowStoreKeyValueIterator(
filteredCacheIterator,
underlyingIterator,
bytesSerdes,
windowSize,
cacheFunction,
true
);
}
@Override
public KeyValueIterator<Windowed<Bytes>, byte[]> backwardFetchAll(final long timeFrom,
final long timeTo) {
validateStoreOpen();
final KeyValueIterator<Windowed<Bytes>, byte[]> underlyingIterator = wrapped().backwardFetchAll(timeFrom, timeTo);
final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = internalContext.cache().reverseAll(cacheName);
final HasNextCondition hasNextCondition = keySchema.hasNextCondition(null, null, timeFrom, timeTo, false);
final PeekingKeyValueIterator<Bytes, LRUCacheEntry> filteredCacheIterator =
new FilteredCacheIterator(cacheIterator, hasNextCondition, cacheFunction);
return new MergedSortedCacheWindowStoreKeyValueIterator(
filteredCacheIterator,
underlyingIterator,
bytesSerdes,
windowSize,
cacheFunction,
false
);
}
@Override
public KeyValueIterator<Windowed<Bytes>, byte[]> all() {
validateStoreOpen();
final KeyValueIterator<Windowed<Bytes>, byte[]> underlyingIterator = wrapped().all();
final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = internalContext.cache().all(cacheName);
return new MergedSortedCacheWindowStoreKeyValueIterator(
cacheIterator,
underlyingIterator,
bytesSerdes,
windowSize,
cacheFunction,
true
);
}
@Override
public KeyValueIterator<Windowed<Bytes>, byte[]> backwardAll() {
validateStoreOpen();
final KeyValueIterator<Windowed<Bytes>, byte[]> underlyingIterator = wrapped().backwardAll();
final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = internalContext.cache().reverseAll(cacheName);
return new MergedSortedCacheWindowStoreKeyValueIterator(
cacheIterator,
underlyingIterator,
bytesSerdes,
windowSize,
cacheFunction,
false
);
}
@Override
public synchronized void flush() {
internalContext.cache().flush(cacheName);
wrapped().flush();
}
@Override
public void flushCache() {
internalContext.cache().flush(cacheName);
}
@Override
public void clearCache() {
internalContext.cache().clear(cacheName);
}
@Override
public synchronized void close() {
final LinkedList<RuntimeException> suppressed = executeAll(
() -> internalContext.cache().flush(cacheName),
() -> internalContext.cache().close(cacheName),
wrapped()::close
);
if (!suppressed.isEmpty()) {
throwSuppressed("Caught an exception while closing caching window store for store " + name(),
suppressed);
}
}
private | CachingWindowStore |
java | junit-team__junit5 | junit-platform-console/src/main/java/org/junit/platform/console/options/SelectorConverter.java | {
"start": 2443,
"end": 2795
} | class ____ implements ITypeConverter<FileSelector> {
@Override
public FileSelector convert(String value) {
URI uri = URI.create(value);
String path = ResourceUtils.stripQueryComponent(uri).getPath();
FilePosition filePosition = FilePosition.fromQuery(uri.getQuery()).orElse(null);
return selectFile(path, filePosition);
}
}
static | File |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java | {
"start": 34227,
"end": 35779
} | class ____ be closed and the event dispatcher will be shutdown
* after this.
*
* @throws Exception error occur.
*/
protected abstract void closeInternal() throws Exception;
/**
* 1) Versioning scheme: major.minor. For e.g. 1.0, 1.1, 1.2...1.25, 2.0 etc.
* 2) Any incompatible change of state-store is a major upgrade, and any
* compatible change of state-store is a minor upgrade.
* 3) If theres's no version, treat it as CURRENT_VERSION_INFO.
* 4) Within a minor upgrade, say 1.1 to 1.2:
* overwrite the version info and proceed as normal.
* 5) Within a major upgrade, say 1.2 to 2.0:
* throw exception and indicate user to use a separate upgrade tool to
* upgrade RM state.
*
* @throws Exception error occur.
*/
public void checkVersion() throws Exception {
Version loadedVersion = loadVersion();
LOG.info("Loaded RM state version info " + loadedVersion);
if (loadedVersion != null && loadedVersion.equals(getCurrentVersion())) {
return;
}
// if there is no version info, treat it as CURRENT_VERSION_INFO;
if (loadedVersion == null) {
loadedVersion = getCurrentVersion();
}
if (loadedVersion.isCompatibleTo(getCurrentVersion())) {
LOG.info("Storing RM state version info " + getCurrentVersion());
storeVersion();
} else {
throw new RMStateVersionIncompatibleException(
"Expecting RM state version " + getCurrentVersion()
+ ", but loading version " + loadedVersion);
}
}
/**
* Derived | will |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/ext/sql/JavaSqlTimeSerializer.java | {
"start": 513,
"end": 1134
} | class ____
extends StdScalarSerializer<java.sql.Time>
{
public final static JavaSqlTimeSerializer instance = new JavaSqlTimeSerializer();
public JavaSqlTimeSerializer() { super(java.sql.Time.class); }
@Override
public void serialize(java.sql.Time value, JsonGenerator g, SerializationContext provider)
throws JacksonException
{
g.writeString(value.toString());
}
@Override
public void acceptJsonFormatVisitor(JsonFormatVisitorWrapper visitor, JavaType typeHint)
{
visitStringFormat(visitor, typeHint, JsonValueFormat.DATE_TIME);
}
}
| JavaSqlTimeSerializer |
java | spring-projects__spring-security | config/src/main/java/org/springframework/security/config/annotation/web/configurers/HeadersConfigurer.java | {
"start": 25527,
"end": 27244
} | class ____ {
private XFrameOptionsHeaderWriter writer;
private FrameOptionsConfig() {
enable();
}
/**
* Specify to DENY framing any content from this application.
* @return the {@link HeadersConfigurer} for additional customization.
*/
public HeadersConfigurer<H> deny() {
this.writer = new XFrameOptionsHeaderWriter(XFrameOptionsMode.DENY);
return HeadersConfigurer.this;
}
/**
* <p>
* Specify to allow any request that comes from the same origin to frame this
* application. For example, if the application was hosted on example.com, then
* example.com could frame the application, but evil.com could not frame the
* application.
* </p>
* @return the {@link HeadersConfigurer} for additional customization.
*/
public HeadersConfigurer<H> sameOrigin() {
this.writer = new XFrameOptionsHeaderWriter(XFrameOptionsMode.SAMEORIGIN);
return HeadersConfigurer.this;
}
/**
* Prevents the header from being added to the response.
* @return the {@link HeadersConfigurer} for additional configuration.
*/
public HeadersConfigurer<H> disable() {
this.writer = null;
return HeadersConfigurer.this;
}
/**
* Enables FrameOptionsConfig if it is not already enabled.
* @return the FrameOptionsConfig for additional customization.
*/
private FrameOptionsConfig enable() {
if (this.writer == null) {
this.writer = new XFrameOptionsHeaderWriter(XFrameOptionsMode.DENY);
}
return this;
}
}
/**
* @deprecated see <a href=
* "https://owasp.org/www-community/controls/Certificate_and_Public_Key_Pinning">Certificate
* and Public Key Pinning</a> for more context
*/
@Deprecated
public final | FrameOptionsConfig |
java | junit-team__junit5 | platform-tests/src/test/java/org/junit/platform/commons/support/AnnotationSupportTests.java | {
"start": 14546,
"end": 14770
} | class ____ {
@FieldMarker
static String staticField1 = "s1";
@FieldMarker
static String staticField2 = "s2";
@FieldMarker
String instanceField1 = "i1";
@FieldMarker
String instanceField2 = "i2";
}
}
| Fields |
java | apache__logging-log4j2 | log4j-jul/src/main/java/org/apache/logging/log4j/jul/LogManager.java | {
"start": 1857,
"end": 4244
} | class ____ extends java.util.logging.LogManager {
private static final org.apache.logging.log4j.Logger LOGGER = StatusLogger.getLogger();
private final AbstractLoggerAdapter loggerAdapter;
// Contains the set of logger names that are actively being requested using getLogger.
private final ThreadLocal<Set<String>> recursive = ThreadLocal.withInitial(HashSet::new);
public LogManager() {
AbstractLoggerAdapter adapter = null;
final String overrideAdaptorClassName =
PropertiesUtil.getProperties().getStringProperty(Constants.LOGGER_ADAPTOR_PROPERTY);
if (overrideAdaptorClassName != null) {
try {
LOGGER.info("Trying to use LoggerAdaptor [{}] specified by Log4j property.", overrideAdaptorClassName);
adapter = LoaderUtil.newCheckedInstanceOf(overrideAdaptorClassName, AbstractLoggerAdapter.class);
} catch (final Exception e) {
LOGGER.error("Specified LoggerAdapter [{}] is incompatible.", overrideAdaptorClassName, e);
}
}
if (adapter == null) {
// Use API by default
// See https://github.com/apache/logging-log4j2/issues/2353
adapter = new ApiLoggerAdapter();
}
loggerAdapter = adapter;
LOGGER.info("Registered Log4j as the java.util.logging.LogManager.");
}
@Override
public boolean addLogger(final Logger logger) {
// in order to prevent non-bridged loggers from being registered, we always return false to indicate that
// the named logger should be obtained through getLogger(name)
return false;
}
@Override
public Logger getLogger(final String name) {
LOGGER.trace("Call to LogManager.getLogger({})", name);
final Set<String> activeRequests = recursive.get();
if (activeRequests.add(name)) {
try {
return loggerAdapter.getLogger(name);
} finally {
activeRequests.remove(name);
}
}
LOGGER.warn("Recursive call to getLogger for {} ignored.", name);
return new NoOpLogger(name);
}
@Override
public Enumeration<String> getLoggerNames() {
return Collections.enumeration(
loggerAdapter.getLoggersInContext(loggerAdapter.getContext()).keySet());
}
}
| LogManager |
java | spring-projects__spring-framework | spring-tx/src/main/java/org/springframework/transaction/reactive/AbstractReactiveTransactionManager.java | {
"start": 3733,
"end": 45699
} | class ____
implements ReactiveTransactionManager, ConfigurableTransactionManager, Serializable {
protected transient Log logger = LogFactory.getLog(getClass());
private Collection<TransactionExecutionListener> transactionExecutionListeners = new ArrayList<>();
@Override
public final void setTransactionExecutionListeners(Collection<TransactionExecutionListener> listeners) {
this.transactionExecutionListeners = listeners;
}
@Override
public final Collection<TransactionExecutionListener> getTransactionExecutionListeners() {
return this.transactionExecutionListeners;
}
//---------------------------------------------------------------------
// Implementation of ReactiveTransactionManager
//---------------------------------------------------------------------
/**
* This implementation handles propagation behavior. Delegates to
* {@code doGetTransaction}, {@code isExistingTransaction}
* and {@code doBegin}.
* @see #doGetTransaction
* @see #isExistingTransaction
* @see #doBegin
*/
@Override
public final Mono<ReactiveTransaction> getReactiveTransaction(@Nullable TransactionDefinition definition) {
// Use defaults if no transaction definition given.
TransactionDefinition def = (definition != null ? definition : TransactionDefinition.withDefaults());
return TransactionSynchronizationManager.forCurrentTransaction().flatMap(synchronizationManager -> {
Object transaction = doGetTransaction(synchronizationManager);
// Cache debug flag to avoid repeated checks.
boolean debugEnabled = logger.isDebugEnabled();
if (isExistingTransaction(transaction)) {
// Existing transaction found -> check propagation behavior to find out how to behave.
return handleExistingTransaction(synchronizationManager, def, transaction, debugEnabled);
}
// Check definition settings for new transaction.
if (def.getTimeout() < TransactionDefinition.TIMEOUT_DEFAULT) {
return Mono.error(new InvalidTimeoutException("Invalid transaction timeout", def.getTimeout()));
}
// No existing transaction found -> check propagation behavior to find out how to proceed.
if (def.getPropagationBehavior() == TransactionDefinition.PROPAGATION_MANDATORY) {
return Mono.error(new IllegalTransactionStateException(
"No existing transaction found for transaction marked with propagation 'mandatory'"));
}
else if (def.getPropagationBehavior() == TransactionDefinition.PROPAGATION_REQUIRED ||
def.getPropagationBehavior() == TransactionDefinition.PROPAGATION_REQUIRES_NEW ||
def.getPropagationBehavior() == TransactionDefinition.PROPAGATION_NESTED) {
return TransactionContextManager.currentContext()
.map(TransactionSynchronizationManager::new)
.flatMap(nestedSynchronizationManager ->
suspend(nestedSynchronizationManager, null)
.map(Optional::of)
.defaultIfEmpty(Optional.empty())
.flatMap(suspendedResources -> {
if (debugEnabled) {
logger.debug("Creating new transaction with name [" + def.getName() + "]: " + def);
}
return Mono.defer(() -> {
GenericReactiveTransaction status = newReactiveTransaction(
nestedSynchronizationManager, def, transaction, true,
false, debugEnabled, suspendedResources.orElse(null));
this.transactionExecutionListeners.forEach(listener -> listener.beforeBegin(status));
return doBegin(nestedSynchronizationManager, transaction, def)
.doOnSuccess(ignore -> prepareSynchronization(nestedSynchronizationManager, status, def))
.doOnError(ex -> this.transactionExecutionListeners.forEach(listener -> listener.afterBegin(status, ex)))
.thenReturn(status);
}).doOnSuccess(status -> this.transactionExecutionListeners.forEach(listener -> listener.afterBegin(status, null)))
.onErrorResume(ErrorPredicates.RUNTIME_OR_ERROR,
ex -> resume(nestedSynchronizationManager, null, suspendedResources.orElse(null))
.then(Mono.error(ex)));
}));
}
else {
// Create "empty" transaction: no actual transaction, but potentially synchronization.
if (def.getIsolationLevel() != TransactionDefinition.ISOLATION_DEFAULT && logger.isWarnEnabled()) {
logger.warn("Custom isolation level specified but no actual transaction initiated; " +
"isolation level will effectively be ignored: " + def);
}
return Mono.just(prepareReactiveTransaction(synchronizationManager, def, null, true, debugEnabled, null));
}
});
}
/**
* Create a ReactiveTransaction for an existing transaction.
*/
private Mono<ReactiveTransaction> handleExistingTransaction(TransactionSynchronizationManager synchronizationManager,
TransactionDefinition definition, Object transaction, boolean debugEnabled) {
if (definition.getPropagationBehavior() == TransactionDefinition.PROPAGATION_NEVER) {
return Mono.error(new IllegalTransactionStateException(
"Existing transaction found for transaction marked with propagation 'never'"));
}
if (definition.getPropagationBehavior() == TransactionDefinition.PROPAGATION_NOT_SUPPORTED) {
if (debugEnabled) {
logger.debug("Suspending current transaction");
}
Mono<SuspendedResourcesHolder> suspend = suspend(synchronizationManager, transaction);
return suspend.map(suspendedResources -> prepareReactiveTransaction(synchronizationManager,
definition, null, false, debugEnabled, suspendedResources)) //
.switchIfEmpty(Mono.fromSupplier(() -> prepareReactiveTransaction(synchronizationManager,
definition, null, false, debugEnabled, null)))
.cast(ReactiveTransaction.class);
}
if (definition.getPropagationBehavior() == TransactionDefinition.PROPAGATION_REQUIRES_NEW) {
if (debugEnabled) {
logger.debug("Suspending current transaction, creating new transaction with name [" +
definition.getName() + "]");
}
Mono<SuspendedResourcesHolder> suspendedResources = suspend(synchronizationManager, transaction);
return suspendedResources.flatMap(suspendedResourcesHolder -> {
GenericReactiveTransaction status = newReactiveTransaction(synchronizationManager,
definition, transaction, true, false, debugEnabled, suspendedResourcesHolder);
this.transactionExecutionListeners.forEach(listener -> listener.beforeBegin(status));
return doBegin(synchronizationManager, transaction, definition)
.doOnSuccess(ignore -> prepareSynchronization(synchronizationManager, status, definition))
.doOnError(ex -> this.transactionExecutionListeners.forEach(listener -> listener.afterBegin(status, ex)))
.thenReturn(status)
.doOnSuccess(ignore -> this.transactionExecutionListeners.forEach(listener -> listener.afterBegin(status, null)))
.onErrorResume(ErrorPredicates.RUNTIME_OR_ERROR, beginEx ->
resumeAfterBeginException(synchronizationManager, transaction, suspendedResourcesHolder, beginEx)
.then(Mono.error(beginEx)));
});
}
if (definition.getPropagationBehavior() == TransactionDefinition.PROPAGATION_NESTED) {
if (debugEnabled) {
logger.debug("Creating nested transaction with name [" + definition.getName() + "]");
}
// Nested transaction through nested begin and commit/rollback calls.
GenericReactiveTransaction status = newReactiveTransaction(synchronizationManager,
definition, transaction, true, true, debugEnabled, null);
return doBegin(synchronizationManager, transaction, definition).doOnSuccess(ignore ->
prepareSynchronization(synchronizationManager, status, definition)).thenReturn(status);
}
// PROPAGATION_REQUIRED, PROPAGATION_SUPPORTS, PROPAGATION_MANDATORY:
// regular participation in existing transaction.
if (debugEnabled) {
logger.debug("Participating in existing transaction");
}
return Mono.just(prepareReactiveTransaction(
synchronizationManager, definition, transaction, false, debugEnabled, null));
}
/**
* Create a new ReactiveTransaction for the given arguments,
* also initializing transaction synchronization as appropriate.
* @see #newReactiveTransaction
* @see #prepareReactiveTransaction
*/
private GenericReactiveTransaction prepareReactiveTransaction(
TransactionSynchronizationManager synchronizationManager, TransactionDefinition definition,
@Nullable Object transaction, boolean newTransaction, boolean debug, @Nullable Object suspendedResources) {
GenericReactiveTransaction status = newReactiveTransaction(synchronizationManager,
definition, transaction, newTransaction, false, debug, suspendedResources);
prepareSynchronization(synchronizationManager, status, definition);
return status;
}
/**
* Create a ReactiveTransaction instance for the given arguments.
*/
private GenericReactiveTransaction newReactiveTransaction(
TransactionSynchronizationManager synchronizationManager, TransactionDefinition definition,
@Nullable Object transaction, boolean newTransaction, boolean nested, boolean debug,
@Nullable Object suspendedResources) {
return new GenericReactiveTransaction(definition.getName(), transaction,
newTransaction, !synchronizationManager.isSynchronizationActive(),
nested, definition.isReadOnly(), debug, suspendedResources);
}
/**
* Initialize transaction synchronization as appropriate.
*/
private void prepareSynchronization(TransactionSynchronizationManager synchronizationManager,
GenericReactiveTransaction status, TransactionDefinition definition) {
if (status.isNewSynchronization()) {
synchronizationManager.setActualTransactionActive(status.hasTransaction());
synchronizationManager.setCurrentTransactionIsolationLevel(
definition.getIsolationLevel() != TransactionDefinition.ISOLATION_DEFAULT ?
definition.getIsolationLevel() : null);
synchronizationManager.setCurrentTransactionReadOnly(definition.isReadOnly());
synchronizationManager.setCurrentTransactionName(definition.getName());
synchronizationManager.initSynchronization();
}
}
/**
* Suspend the given transaction. Suspends transaction synchronization first,
* then delegates to the {@code doSuspend} template method.
* @param synchronizationManager the synchronization manager bound to the current transaction
* @param transaction the current transaction object
* (or {@code null} to just suspend active synchronizations, if any)
* @return an object that holds suspended resources
* (or {@code null} if neither transaction nor synchronization active)
* @see #doSuspend
* @see #resume
*/
private Mono<SuspendedResourcesHolder> suspend(TransactionSynchronizationManager synchronizationManager,
@Nullable Object transaction) {
if (synchronizationManager.isSynchronizationActive()) {
Mono<List<TransactionSynchronization>> suspendedSynchronizations = doSuspendSynchronization(synchronizationManager);
return suspendedSynchronizations.flatMap(synchronizations -> {
Mono<Optional<Object>> suspendedResources = (transaction != null ?
doSuspend(synchronizationManager, transaction).map(Optional::of).defaultIfEmpty(Optional.empty()) :
Mono.just(Optional.empty()));
return suspendedResources.map(it -> {
String name = synchronizationManager.getCurrentTransactionName();
synchronizationManager.setCurrentTransactionName(null);
boolean readOnly = synchronizationManager.isCurrentTransactionReadOnly();
synchronizationManager.setCurrentTransactionReadOnly(false);
Integer isolationLevel = synchronizationManager.getCurrentTransactionIsolationLevel();
synchronizationManager.setCurrentTransactionIsolationLevel(null);
boolean wasActive = synchronizationManager.isActualTransactionActive();
synchronizationManager.setActualTransactionActive(false);
return new SuspendedResourcesHolder(
it.orElse(null), synchronizations, name, readOnly, isolationLevel, wasActive);
}).onErrorResume(ErrorPredicates.RUNTIME_OR_ERROR,
ex -> doResumeSynchronization(synchronizationManager, synchronizations)
.cast(SuspendedResourcesHolder.class));
});
}
else if (transaction != null) {
// Transaction active but no synchronization active.
Mono<Optional<Object>> suspendedResources =
doSuspend(synchronizationManager, transaction).map(Optional::of).defaultIfEmpty(Optional.empty());
return suspendedResources.map(it -> new SuspendedResourcesHolder(it.orElse(null)));
}
else {
// Neither transaction nor synchronization active.
return Mono.empty();
}
}
/**
* Resume the given transaction. Delegates to the {@code doResume}
* template method first, then resuming transaction synchronization.
* @param synchronizationManager the synchronization manager bound to the current transaction
* @param transaction the current transaction object
* @param resourcesHolder the object that holds suspended resources,
* as returned by {@code suspend} (or {@code null} to just
* resume synchronizations, if any)
* @see #doResume
* @see #suspend
*/
private Mono<Void> resume(TransactionSynchronizationManager synchronizationManager,
@Nullable Object transaction, @Nullable SuspendedResourcesHolder resourcesHolder) {
Mono<Void> resume = Mono.empty();
if (resourcesHolder != null) {
Object suspendedResources = resourcesHolder.suspendedResources;
if (suspendedResources != null) {
resume = doResume(synchronizationManager, transaction, suspendedResources);
}
List<TransactionSynchronization> suspendedSynchronizations = resourcesHolder.suspendedSynchronizations;
if (suspendedSynchronizations != null) {
synchronizationManager.setActualTransactionActive(resourcesHolder.wasActive);
synchronizationManager.setCurrentTransactionIsolationLevel(resourcesHolder.isolationLevel);
synchronizationManager.setCurrentTransactionReadOnly(resourcesHolder.readOnly);
synchronizationManager.setCurrentTransactionName(resourcesHolder.name);
return resume.then(doResumeSynchronization(synchronizationManager, suspendedSynchronizations));
}
}
return resume;
}
/**
* Resume outer transaction after inner transaction begin failed.
*/
private Mono<Void> resumeAfterBeginException(TransactionSynchronizationManager synchronizationManager,
Object transaction, @Nullable SuspendedResourcesHolder suspendedResources, Throwable beginEx) {
String exMessage = "Inner transaction begin exception overridden by outer transaction resume exception";
return resume(synchronizationManager, transaction, suspendedResources).doOnError(ErrorPredicates.RUNTIME_OR_ERROR,
ex -> logger.error(exMessage, beginEx));
}
/**
* Suspend all current synchronizations and deactivate transaction
* synchronization for the current transaction context.
* @param synchronizationManager the synchronization manager bound to the current transaction
* @return the List of suspended TransactionSynchronization objects
*/
private Mono<List<TransactionSynchronization>> doSuspendSynchronization(
TransactionSynchronizationManager synchronizationManager) {
List<TransactionSynchronization> suspendedSynchronizations = synchronizationManager.getSynchronizations();
return Flux.fromIterable(suspendedSynchronizations)
.concatMap(TransactionSynchronization::suspend)
.then(Mono.defer(() -> {
synchronizationManager.clearSynchronization();
return Mono.just(suspendedSynchronizations);
}));
}
/**
* Reactivate transaction synchronization for the current transaction context
* and resume all given synchronizations.
* @param synchronizationManager the synchronization manager bound to the current transaction
* @param suspendedSynchronizations a List of TransactionSynchronization objects
*/
private Mono<Void> doResumeSynchronization(TransactionSynchronizationManager synchronizationManager,
List<TransactionSynchronization> suspendedSynchronizations) {
synchronizationManager.initSynchronization();
return Flux.fromIterable(suspendedSynchronizations)
.concatMap(synchronization -> synchronization.resume()
.doOnSuccess(ignore -> synchronizationManager.registerSynchronization(synchronization))).then();
}
/**
* This implementation of commit handles participating in existing
* transactions and programmatic rollback requests.
* Delegates to {@code isRollbackOnly}, {@code doCommit}
* and {@code rollback}.
* @see ReactiveTransaction#isRollbackOnly()
* @see #doCommit
* @see #rollback
*/
@Override
public final Mono<Void> commit(ReactiveTransaction transaction) {
if (transaction.isCompleted()) {
return Mono.error(new IllegalTransactionStateException(
"Transaction is already completed - do not call commit or rollback more than once per transaction"));
}
return TransactionSynchronizationManager.forCurrentTransaction().flatMap(synchronizationManager -> {
GenericReactiveTransaction reactiveTx = (GenericReactiveTransaction) transaction;
if (reactiveTx.isRollbackOnly()) {
if (reactiveTx.isDebug()) {
logger.debug("Transactional code has requested rollback");
}
return processRollback(synchronizationManager, reactiveTx);
}
return processCommit(synchronizationManager, reactiveTx);
});
}
/**
* Process an actual commit.
* Rollback-only flags have already been checked and applied.
* @param synchronizationManager the synchronization manager bound to the current transaction
* @param status object representing the transaction
*/
private Mono<Void> processCommit(TransactionSynchronizationManager synchronizationManager,
GenericReactiveTransaction status) {
AtomicBoolean beforeCompletionInvoked = new AtomicBoolean();
Mono<Void> commit = prepareForCommit(synchronizationManager, status)
.then(triggerBeforeCommit(synchronizationManager, status))
.then(triggerBeforeCompletion(synchronizationManager, status))
.then(Mono.defer(() -> {
beforeCompletionInvoked.set(true);
if (status.isNewTransaction()) {
if (status.isDebug()) {
logger.debug("Initiating transaction commit");
}
this.transactionExecutionListeners.forEach(listener -> listener.beforeCommit(status));
return doCommit(synchronizationManager, status);
}
return Mono.empty();
}))
.onErrorResume(ex -> {
Mono<Void> propagateException = Mono.error(ex);
// Store result in a local variable in order to appease the
// Eclipse compiler with regard to inferred generics.
Mono<Void> result = propagateException;
if (ErrorPredicates.UNEXPECTED_ROLLBACK.test(ex)) {
result = triggerAfterCompletion(synchronizationManager, status, TransactionSynchronization.STATUS_ROLLED_BACK)
.then(Mono.defer(() -> {
if (status.isNewTransaction()) {
this.transactionExecutionListeners.forEach(listener -> listener.afterRollback(status, null));
}
return propagateException;
}));
}
else if (ErrorPredicates.TRANSACTION_EXCEPTION.test(ex)) {
result = triggerAfterCompletion(synchronizationManager, status, TransactionSynchronization.STATUS_UNKNOWN)
.then(Mono.defer(() -> {
if (status.isNewTransaction()) {
this.transactionExecutionListeners.forEach(listener -> listener.afterCommit(status, ex));
}
return propagateException;
}));
}
else if (ErrorPredicates.RUNTIME_OR_ERROR.test(ex)) {
Mono<Void> mono = Mono.empty();
if (!beforeCompletionInvoked.get()) {
mono = triggerBeforeCompletion(synchronizationManager, status);
}
result = mono.then(doRollbackOnCommitException(synchronizationManager, status, ex))
.then(propagateException);
}
return result;
})
.then(Mono.defer(() -> triggerAfterCommit(synchronizationManager, status).onErrorResume(ex ->
triggerAfterCompletion(synchronizationManager, status, TransactionSynchronization.STATUS_COMMITTED).then(Mono.error(ex)))
.then(triggerAfterCompletion(synchronizationManager, status, TransactionSynchronization.STATUS_COMMITTED))
.then(Mono.defer(() -> {
if (status.isNewTransaction()) {
this.transactionExecutionListeners.forEach(listener -> listener.afterCommit(status, null));
}
return Mono.empty();
}))));
return commit
.onErrorResume(ex -> cleanupAfterCompletion(synchronizationManager, status).then(Mono.error(ex)))
.then(cleanupAfterCompletion(synchronizationManager, status));
}
/**
* This implementation of rollback handles participating in existing transactions.
* Delegates to {@code doRollback} and {@code doSetRollbackOnly}.
* @see #doRollback
* @see #doSetRollbackOnly
*/
@Override
public final Mono<Void> rollback(ReactiveTransaction transaction) {
if (transaction.isCompleted()) {
return Mono.error(new IllegalTransactionStateException(
"Transaction is already completed - do not call commit or rollback more than once per transaction"));
}
return TransactionSynchronizationManager.forCurrentTransaction().flatMap(synchronizationManager -> {
GenericReactiveTransaction reactiveTx = (GenericReactiveTransaction) transaction;
return processRollback(synchronizationManager, reactiveTx);
});
}
/**
* Process an actual rollback.
* The completed flag has already been checked.
* @param synchronizationManager the synchronization manager bound to the current transaction
* @param status object representing the transaction
*/
private Mono<Void> processRollback(TransactionSynchronizationManager synchronizationManager,
GenericReactiveTransaction status) {
return triggerBeforeCompletion(synchronizationManager, status).then(Mono.defer(() -> {
if (status.isNewTransaction()) {
if (status.isDebug()) {
logger.debug("Initiating transaction rollback");
}
this.transactionExecutionListeners.forEach(listener -> listener.beforeRollback(status));
return doRollback(synchronizationManager, status);
}
else {
Mono<Void> beforeCompletion = Mono.empty();
// Participating in larger transaction
if (status.hasTransaction()) {
if (status.isDebug()) {
logger.debug("Participating transaction failed - marking existing transaction as rollback-only");
}
beforeCompletion = doSetRollbackOnly(synchronizationManager, status);
}
else {
logger.debug("Should roll back transaction but cannot - no transaction available");
}
return beforeCompletion;
}
})).onErrorResume(ErrorPredicates.RUNTIME_OR_ERROR, ex ->
triggerAfterCompletion(synchronizationManager, status, TransactionSynchronization.STATUS_UNKNOWN)
.then(Mono.defer(() -> {
if (status.isNewTransaction()) {
this.transactionExecutionListeners.forEach(listener -> listener.afterRollback(status, ex));
}
return Mono.empty();
}))
.then(Mono.error(ex)))
.then(Mono.defer(() -> triggerAfterCompletion(synchronizationManager, status, TransactionSynchronization.STATUS_ROLLED_BACK)))
.then(Mono.defer(() -> {
if (status.isNewTransaction()) {
this.transactionExecutionListeners.forEach(listener -> listener.afterRollback(status, null));
}
return Mono.empty();
}))
.onErrorResume(ex -> cleanupAfterCompletion(synchronizationManager, status).then(Mono.error(ex)))
.then(cleanupAfterCompletion(synchronizationManager, status));
}
/**
* Invoke {@code doRollback}, handling rollback exceptions properly.
* @param synchronizationManager the synchronization manager bound to the current transaction
* @param status object representing the transaction
* @param ex the thrown application exception or error
* @see #doRollback
*/
private Mono<Void> doRollbackOnCommitException(TransactionSynchronizationManager synchronizationManager,
GenericReactiveTransaction status, Throwable ex) {
return Mono.defer(() -> {
if (status.isNewTransaction()) {
if (status.isDebug()) {
logger.debug("Initiating transaction rollback after commit exception", ex);
}
return doRollback(synchronizationManager, status);
}
else if (status.hasTransaction()) {
if (status.isDebug()) {
logger.debug("Marking existing transaction as rollback-only after commit exception", ex);
}
return doSetRollbackOnly(synchronizationManager, status);
}
return Mono.empty();
}).onErrorResume(ErrorPredicates.RUNTIME_OR_ERROR, rbex -> {
logger.error("Commit exception overridden by rollback exception", ex);
return triggerAfterCompletion(synchronizationManager, status, TransactionSynchronization.STATUS_UNKNOWN)
.then(Mono.defer(() -> {
this.transactionExecutionListeners.forEach(listener -> listener.afterRollback(status, rbex));
return Mono.empty();
}))
.then(Mono.error(rbex));
}).then(Mono.defer(() -> triggerAfterCompletion(synchronizationManager, status, TransactionSynchronization.STATUS_ROLLED_BACK)))
.then(Mono.defer(() -> {
this.transactionExecutionListeners.forEach(listener -> listener.afterRollback(status, null));
return Mono.empty();
}));
}
/**
* Trigger {@code beforeCommit} callbacks.
* @param synchronizationManager the synchronization manager bound to the current transaction
* @param status object representing the transaction
*/
private Mono<Void> triggerBeforeCommit(TransactionSynchronizationManager synchronizationManager,
GenericReactiveTransaction status) {
if (status.isNewSynchronization()) {
return TransactionSynchronizationUtils.triggerBeforeCommit(
synchronizationManager.getSynchronizations(), status.isReadOnly());
}
return Mono.empty();
}
/**
* Trigger {@code beforeCompletion} callbacks.
* @param synchronizationManager the synchronization manager bound to the current transaction
* @param status object representing the transaction
*/
private Mono<Void> triggerBeforeCompletion(TransactionSynchronizationManager synchronizationManager,
GenericReactiveTransaction status) {
if (status.isNewSynchronization()) {
return TransactionSynchronizationUtils.triggerBeforeCompletion(synchronizationManager.getSynchronizations());
}
return Mono.empty();
}
/**
* Trigger {@code afterCommit} callbacks.
* @param synchronizationManager the synchronization manager bound to the current transaction
* @param status object representing the transaction
*/
private Mono<Void> triggerAfterCommit(TransactionSynchronizationManager synchronizationManager,
GenericReactiveTransaction status) {
if (status.isNewSynchronization()) {
return TransactionSynchronizationUtils.invokeAfterCommit(synchronizationManager.getSynchronizations());
}
return Mono.empty();
}
/**
* Trigger {@code afterCompletion} callbacks.
* @param synchronizationManager the synchronization manager bound to the current transaction
* @param status object representing the transaction
* @param completionStatus completion status according to TransactionSynchronization constants
*/
private Mono<Void> triggerAfterCompletion(TransactionSynchronizationManager synchronizationManager,
GenericReactiveTransaction status, int completionStatus) {
if (status.isNewSynchronization()) {
List<TransactionSynchronization> synchronizations = synchronizationManager.getSynchronizations();
synchronizationManager.clearSynchronization();
if (!status.hasTransaction() || status.isNewTransaction()) {
// No transaction or new transaction for the current scope ->
// invoke the afterCompletion callbacks immediately
return invokeAfterCompletion(synchronizationManager, synchronizations, completionStatus);
}
else if (!synchronizations.isEmpty()) {
// Existing transaction that we participate in, controlled outside
// the scope of this Spring transaction manager -> try to register
// an afterCompletion callback with the existing (JTA) transaction.
return registerAfterCompletionWithExistingTransaction(
synchronizationManager, status.getTransaction(), synchronizations);
}
}
return Mono.empty();
}
/**
* Actually invoke the {@code afterCompletion} methods of the
* given TransactionSynchronization objects.
* <p>To be called by this abstract manager itself, or by special implementations
* of the {@code registerAfterCompletionWithExistingTransaction} callback.
* @param synchronizationManager the synchronization manager bound to the current transaction
* @param synchronizations a List of TransactionSynchronization objects
* @param completionStatus the completion status according to the
* constants in the TransactionSynchronization interface
* @see #registerAfterCompletionWithExistingTransaction(TransactionSynchronizationManager, Object, List)
* @see TransactionSynchronization#STATUS_COMMITTED
* @see TransactionSynchronization#STATUS_ROLLED_BACK
* @see TransactionSynchronization#STATUS_UNKNOWN
*/
private Mono<Void> invokeAfterCompletion(TransactionSynchronizationManager synchronizationManager,
List<TransactionSynchronization> synchronizations, int completionStatus) {
return TransactionSynchronizationUtils.invokeAfterCompletion(synchronizations, completionStatus);
}
/**
* Clean up after completion, clearing synchronization if necessary,
* and invoking doCleanupAfterCompletion.
* @param synchronizationManager the synchronization manager bound to the current transaction
* @param status object representing the transaction
* @see #doCleanupAfterCompletion
*/
private Mono<Void> cleanupAfterCompletion(TransactionSynchronizationManager synchronizationManager,
GenericReactiveTransaction status) {
return Mono.defer(() -> {
status.setCompleted();
if (status.isNewSynchronization()) {
synchronizationManager.clear();
}
Mono<Void> cleanup = Mono.empty();
if (status.isNewTransaction()) {
cleanup = doCleanupAfterCompletion(synchronizationManager, status.getTransaction());
}
if (status.getSuspendedResources() != null) {
if (status.isDebug()) {
logger.debug("Resuming suspended transaction after completion of inner transaction");
}
Object transaction = (status.hasTransaction() ? status.getTransaction() : null);
return cleanup.then(resume(synchronizationManager, transaction,
(SuspendedResourcesHolder) status.getSuspendedResources()));
}
return cleanup;
});
}
//---------------------------------------------------------------------
// Template methods to be implemented in subclasses
//---------------------------------------------------------------------
/**
* Return a transaction object for the current transaction state.
* <p>The returned object will usually be specific to the concrete transaction
* manager implementation, carrying corresponding transaction state in a
* modifiable fashion. This object will be passed into the other template
* methods (for example, doBegin and doCommit), either directly or as part of a
* DefaultReactiveTransactionStatus instance.
* <p>The returned object should contain information about any existing
* transaction, that is, a transaction that has already started before the
* current {@code getTransaction} call on the transaction manager.
* Consequently, a {@code doGetTransaction} implementation will usually
* look for an existing transaction and store corresponding state in the
* returned transaction object.
* @param synchronizationManager the synchronization manager bound to the current transaction
* @return the current transaction object
* @throws org.springframework.transaction.CannotCreateTransactionException
* if transaction support is not available
* @see #doBegin
* @see #doCommit
* @see #doRollback
* @see GenericReactiveTransaction#getTransaction
*/
protected abstract Object doGetTransaction(TransactionSynchronizationManager synchronizationManager);
/**
* Check if the given transaction object indicates an existing transaction
* (that is, a transaction which has already started).
* <p>The result will be evaluated according to the specified propagation
* behavior for the new transaction. An existing transaction might get
* suspended (in case of PROPAGATION_REQUIRES_NEW), or the new transaction
* might participate in the existing one (in case of PROPAGATION_REQUIRED).
* <p>The default implementation returns {@code false}, assuming that
* participating in existing transactions is generally not supported.
* Subclasses are of course encouraged to provide such support.
* @param transaction the transaction object returned by doGetTransaction
* @return if there is an existing transaction
* @see #doGetTransaction
*/
protected boolean isExistingTransaction(Object transaction) {
return false;
}
/**
* Begin a new transaction with semantics according to the given transaction
* definition. Does not have to care about applying the propagation behavior,
* as this has already been handled by this abstract manager.
* <p>This method gets called when the transaction manager has decided to actually
* start a new transaction. Either there wasn't any transaction before, or the
* previous transaction has been suspended.
* <p>A special scenario is a nested transaction: This method will be called to
* start a nested transaction when necessary. In such a context, there will be an
* active transaction: The implementation of this method has to detect this and
* start an appropriate nested transaction.
* @param synchronizationManager the synchronization manager bound to the new transaction
* @param transaction the transaction object returned by {@code doGetTransaction}
* @param definition a TransactionDefinition instance, describing propagation
* behavior, isolation level, read-only flag, timeout, and transaction name
* @throws org.springframework.transaction.NestedTransactionNotSupportedException
* if the underlying transaction does not support nesting (for example, through savepoints)
*/
protected abstract Mono<Void> doBegin(TransactionSynchronizationManager synchronizationManager,
Object transaction, TransactionDefinition definition);
/**
* Suspend the resources of the current transaction.
* Transaction synchronization will already have been suspended.
* <p>The default implementation throws a TransactionSuspensionNotSupportedException,
* assuming that transaction suspension is generally not supported.
* @param synchronizationManager the synchronization manager bound to the current transaction
* @param transaction the transaction object returned by {@code doGetTransaction}
* @return an object that holds suspended resources
* (will be kept unexamined for passing it into doResume)
* @throws org.springframework.transaction.TransactionSuspensionNotSupportedException
* if suspending is not supported by the transaction manager implementation
* @see #doResume
*/
protected Mono<Object> doSuspend(TransactionSynchronizationManager synchronizationManager,
Object transaction) {
throw new TransactionSuspensionNotSupportedException(
"Transaction manager [" + getClass().getName() + "] does not support transaction suspension");
}
/**
* Resume the resources of the current transaction.
* Transaction synchronization will be resumed afterwards.
* <p>The default implementation throws a TransactionSuspensionNotSupportedException,
* assuming that transaction suspension is generally not supported.
* @param synchronizationManager the synchronization manager bound to the current transaction
* @param transaction the transaction object returned by {@code doGetTransaction}
* @param suspendedResources the object that holds suspended resources,
* as returned by doSuspend
* @throws org.springframework.transaction.TransactionSuspensionNotSupportedException
* if suspending is not supported by the transaction manager implementation
* @see #doSuspend
*/
protected Mono<Void> doResume(TransactionSynchronizationManager synchronizationManager,
@Nullable Object transaction, Object suspendedResources) {
throw new TransactionSuspensionNotSupportedException(
"Transaction manager [" + getClass().getName() + "] does not support transaction suspension");
}
/**
* Make preparations for commit, to be performed before the
* {@code beforeCommit} synchronization callbacks occur.
* <p>Note that exceptions will get propagated to the commit caller
* and cause a rollback of the transaction.
* @param synchronizationManager the synchronization manager bound to the current transaction
* @param status the status representation of the transaction
* @throws RuntimeException in case of errors; will be <b>propagated to the caller</b>
* (note: do not throw TransactionException subclasses here!)
*/
protected Mono<Void> prepareForCommit(TransactionSynchronizationManager synchronizationManager,
GenericReactiveTransaction status) {
return Mono.empty();
}
/**
* Perform an actual commit of the given transaction.
* <p>An implementation does not need to check the "new transaction" flag
* or the rollback-only flag; this will already have been handled before.
* Usually, a straight commit will be performed on the transaction object
* contained in the passed-in status.
* @param synchronizationManager the synchronization manager bound to the current transaction
* @param status the status representation of the transaction
* @see GenericReactiveTransaction#getTransaction
*/
protected abstract Mono<Void> doCommit(TransactionSynchronizationManager synchronizationManager,
GenericReactiveTransaction status);
/**
* Perform an actual rollback of the given transaction.
* <p>An implementation does not need to check the "new transaction" flag;
* this will already have been handled before. Usually, a straight rollback
* will be performed on the transaction object contained in the passed-in status.
* @param synchronizationManager the synchronization manager bound to the current transaction
* @param status the status representation of the transaction
* @see GenericReactiveTransaction#getTransaction
*/
protected abstract Mono<Void> doRollback(TransactionSynchronizationManager synchronizationManager,
GenericReactiveTransaction status);
/**
* Set the given transaction rollback-only. Only called on rollback
* if the current transaction participates in an existing one.
* <p>The default implementation throws an IllegalTransactionStateException,
* assuming that participating in existing transactions is generally not
* supported. Subclasses are of course encouraged to provide such support.
* @param synchronizationManager the synchronization manager bound to the current transaction
* @param status the status representation of the transaction
*/
protected Mono<Void> doSetRollbackOnly(TransactionSynchronizationManager synchronizationManager,
GenericReactiveTransaction status) {
throw new IllegalTransactionStateException(
"Participating in existing transactions is not supported - when 'isExistingTransaction' " +
"returns true, appropriate 'doSetRollbackOnly' behavior must be provided");
}
/**
* Register the given list of transaction synchronizations with the existing transaction.
* <p>Invoked when the control of the Spring transaction manager and thus all Spring
* transaction synchronizations end, without the transaction being completed yet. This
* is for example the case when participating in an existing JTA or EJB CMT transaction.
* <p>The default implementation simply invokes the {@code afterCompletion} methods
* immediately, passing in "STATUS_UNKNOWN". This is the best we can do if there's no
* chance to determine the actual outcome of the outer transaction.
* @param synchronizationManager the synchronization manager bound to the current transaction
* @param transaction the transaction object returned by {@code doGetTransaction}
* @param synchronizations a List of TransactionSynchronization objects
* @see #invokeAfterCompletion(TransactionSynchronizationManager, List, int)
* @see TransactionSynchronization#afterCompletion(int)
* @see TransactionSynchronization#STATUS_UNKNOWN
*/
protected Mono<Void> registerAfterCompletionWithExistingTransaction(TransactionSynchronizationManager synchronizationManager,
Object transaction, List<TransactionSynchronization> synchronizations) {
logger.debug("Cannot register Spring after-completion synchronization with existing transaction - " +
"processing Spring after-completion callbacks immediately, with outcome status 'unknown'");
return invokeAfterCompletion(synchronizationManager, synchronizations, TransactionSynchronization.STATUS_UNKNOWN);
}
/**
* Cleanup resources after transaction completion.
* <p>Called after {@code doCommit} and {@code doRollback} execution,
* on any outcome. The default implementation does nothing.
* <p>Should not throw any exceptions but just issue warnings on errors.
* @param synchronizationManager the synchronization manager bound to the current transaction
* @param transaction the transaction object returned by {@code doGetTransaction}
*/
protected Mono<Void> doCleanupAfterCompletion(TransactionSynchronizationManager synchronizationManager,
Object transaction) {
return Mono.empty();
}
//---------------------------------------------------------------------
// Serialization support
//---------------------------------------------------------------------
private void readObject(ObjectInputStream ois) throws IOException, ClassNotFoundException {
// Rely on default serialization; just initialize state after deserialization.
ois.defaultReadObject();
// Initialize transient fields.
this.logger = LogFactory.getLog(getClass());
}
/**
* Holder for suspended resources.
* Used internally by {@code suspend} and {@code resume}.
*/
protected static final | AbstractReactiveTransactionManager |
java | bumptech__glide | annotation/compiler/src/main/java/com/bumptech/glide/annotation/compiler/RequestManagerGenerator.java | {
"start": 1535,
"end": 1984
} | class ____ extends RequestManager {
* GeneratedRequestManager(Glide glide, Lifecycle lifecycle, RequestManagerTreeNode treeNode) {
* super(glide, lifecycle, treeNode);
* }
*
* public RequestBuilder<GifDrawable> asGif() {
* RequestBuilder<GifDrawable> requestBuilder = this.as(GifDrawable.class);
* GifOptions.asGif(requestBuilder);
* return requestBuilder;
* }
* }
* </code>
* </pre>
*/
final | GeneratedRequestManager |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocolPB.java | {
"start": 1258,
"end": 1360
} | interface ____ extends ApplicationClientProtocolService.BlockingInterface {
}
| ApplicationClientProtocolPB |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/impl/OriginalMessageProcessor.java | {
"start": 945,
"end": 1612
} | class ____ implements Processor {
@Override
public void process(Exchange exchange) {
Message original = null;
if (exchange.getContext().isAllowUseOriginalMessage()) {
original = exchange.getUnitOfWork().getOriginalInMessage();
}
if (original == null) {
exchange.getIn().setHeader("HasOriginal", "false");
} else {
exchange.getIn().setHeader("HasOriginal", "true");
exchange.getIn().setHeader("OriginalBody", original.getBody());
exchange.getIn().setHeader("OriginalExchangeId", original.getExchange().getExchangeId());
}
}
}
| OriginalMessageProcessor |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/StreamCachingXPathRouteTest.java | {
"start": 1011,
"end": 2199
} | class ____ extends ContextTestSupport {
@Test
public void testByteArrayInputStream() throws Exception {
getMockEndpoint("mock:english").expectedBodiesReceived("<hello/>");
getMockEndpoint("mock:dutch").expectedBodiesReceived("<hallo/>");
getMockEndpoint("mock:german").expectedBodiesReceived("<hallo/>");
getMockEndpoint("mock:french").expectedBodiesReceived("<hellos/>");
template.sendBody("direct:a", new ByteArrayInputStream("<hello/>".getBytes()));
template.sendBody("direct:a", new ByteArrayInputStream("<hallo/>".getBytes()));
template.sendBody("direct:a", new ByteArrayInputStream("<hellos/>".getBytes()));
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
context.setStreamCaching(true);
from("direct:a").choice().when(xpath("//hello")).to("mock:english").when(xpath("//hallo"))
.to("mock:dutch", "mock:german").otherwise().to("mock:french");
}
};
}
}
| StreamCachingXPathRouteTest |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/TestExecutionListenersTests.java | {
"start": 11856,
"end": 11983
} | interface ____ {
}
@TestExecutionListeners(QuuxTestExecutionListener.class)
@Retention(RetentionPolicy.RUNTIME)
@ | MetaListeners |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/OperatorChain.java | {
"start": 21261,
"end": 29763
} | class ____ {
private final WatermarkGaugeExposingOutput<StreamRecord<?>> chainedSourceOutput;
private final StreamTaskSourceInput<?> sourceTaskInput;
public ChainedSource(
WatermarkGaugeExposingOutput<StreamRecord<?>> chainedSourceOutput,
StreamTaskSourceInput<?> sourceTaskInput) {
this.chainedSourceOutput = chainedSourceOutput;
this.sourceTaskInput = sourceTaskInput;
}
public WatermarkGaugeExposingOutput<StreamRecord<?>> getSourceOutput() {
return chainedSourceOutput;
}
public StreamTaskSourceInput<?> getSourceTaskInput() {
return sourceTaskInput;
}
}
// ------------------------------------------------------------------------
// initialization utilities
// ------------------------------------------------------------------------
private void createChainOutputs(
List<NonChainedOutput> outputsInOrder,
RecordWriterDelegate<SerializationDelegate<StreamRecord<OUT>>> recordWriterDelegate,
Map<Integer, StreamConfig> chainedConfigs,
StreamTask<OUT, OP> containingTask,
Map<IntermediateDataSetID, RecordWriterOutput<?>> recordWriterOutputs) {
for (int i = 0; i < outputsInOrder.size(); ++i) {
NonChainedOutput output = outputsInOrder.get(i);
RecordWriterOutput<?> recordWriterOutput =
createStreamOutput(
recordWriterDelegate.getRecordWriter(i),
output,
chainedConfigs.get(output.getSourceNodeId()),
containingTask.getEnvironment());
this.streamOutputs[i] = recordWriterOutput;
recordWriterOutputs.put(output.getDataSetId(), recordWriterOutput);
}
}
private RecordWriterOutput<OUT> createStreamOutput(
RecordWriter<SerializationDelegate<StreamRecord<OUT>>> recordWriter,
NonChainedOutput streamOutput,
StreamConfig upStreamConfig,
Environment taskEnvironment) {
OutputTag sideOutputTag =
streamOutput.getOutputTag(); // OutputTag, return null if not sideOutput
TypeSerializer outSerializer;
if (streamOutput.getOutputTag() != null) {
// side output
outSerializer =
upStreamConfig.getTypeSerializerSideOut(
streamOutput.getOutputTag(),
taskEnvironment.getUserCodeClassLoader().asClassLoader());
} else {
// main output
outSerializer =
upStreamConfig.getTypeSerializerOut(
taskEnvironment.getUserCodeClassLoader().asClassLoader());
}
return closer.register(
new RecordWriterOutput<OUT>(
recordWriter,
outSerializer,
sideOutputTag,
streamOutput.supportsUnalignedCheckpoints()));
}
@SuppressWarnings("rawtypes")
private Map<StreamConfig.SourceInputConfig, ChainedSource> createChainedSources(
StreamTask<OUT, OP> containingTask,
StreamConfig.InputConfig[] configuredInputs,
Map<Integer, StreamConfig> chainedConfigs,
ClassLoader userCodeClassloader,
List<StreamOperatorWrapper<?, ?>> allOpWrappers) {
if (Arrays.stream(configuredInputs)
.noneMatch(input -> input instanceof StreamConfig.SourceInputConfig)) {
return Collections.emptyMap();
}
checkState(
mainOperatorWrapper.getStreamOperator() instanceof MultipleInputStreamOperator,
"Creating chained input is only supported with MultipleInputStreamOperator and MultipleInputStreamTask");
Map<StreamConfig.SourceInputConfig, ChainedSource> chainedSourceInputs = new HashMap<>();
MultipleInputStreamOperator<?> multipleInputOperator =
(MultipleInputStreamOperator<?>) mainOperatorWrapper.getStreamOperator();
List<Input> operatorInputs = multipleInputOperator.getInputs();
int sourceInputGateIndex =
Arrays.stream(containingTask.getEnvironment().getAllInputGates())
.mapToInt(IndexedInputGate::getInputGateIndex)
.max()
.orElse(-1)
+ 1;
for (int inputId = 0; inputId < configuredInputs.length; inputId++) {
if (!(configuredInputs[inputId] instanceof StreamConfig.SourceInputConfig)) {
continue;
}
StreamConfig.SourceInputConfig sourceInput =
(StreamConfig.SourceInputConfig) configuredInputs[inputId];
int sourceEdgeId = sourceInput.getInputEdge().getSourceId();
StreamConfig sourceInputConfig = chainedConfigs.get(sourceEdgeId);
OutputTag outputTag = sourceInput.getInputEdge().getOutputTag();
WatermarkGaugeExposingOutput chainedSourceOutput =
createChainedSourceOutput(
containingTask,
sourceInputConfig,
userCodeClassloader,
getFinishedOnRestoreInputOrDefault(operatorInputs.get(inputId)),
multipleInputOperator.getMetricGroup(),
outputTag);
SourceOperator<?, ?> sourceOperator =
(SourceOperator<?, ?>)
createOperator(
containingTask,
sourceInputConfig,
userCodeClassloader,
(WatermarkGaugeExposingOutput<StreamRecord<OUT>>)
chainedSourceOutput,
allOpWrappers,
true);
chainedSourceInputs.put(
sourceInput,
new ChainedSource(
chainedSourceOutput,
this.isTaskDeployedAsFinished()
? new StreamTaskFinishedOnRestoreSourceInput<>(
sourceOperator, sourceInputGateIndex++, inputId)
: new StreamTaskSourceInput<>(
sourceOperator, sourceInputGateIndex++, inputId)));
}
return chainedSourceInputs;
}
/**
* Get the numRecordsOut counter for the operator represented by the given config. And re-use
* the operator-level counter for the task-level numRecordsOut counter if this operator is at
* the end of the operator chain.
*
* <p>Return null if we should not use the numRecordsOut counter to track the records emitted by
* this operator.
*/
@Nullable
private Counter getOperatorRecordsOutCounter(
StreamTask<?, ?> containingTask, StreamConfig operatorConfig) {
ClassLoader userCodeClassloader = containingTask.getUserCodeClassLoader();
Class<StreamOperatorFactory<?>> streamOperatorFactoryClass =
operatorConfig.getStreamOperatorFactoryClass(userCodeClassloader);
// Do not use the numRecordsOut counter on output if this operator is SinkWriterOperator.
//
// Metric "numRecordsOut" is defined as the total number of records written to the
// external system in FLIP-33, but this metric is occupied in AbstractStreamOperator as the
// number of records sent to downstream operators, which is number of Committable batches
// sent to SinkCommitter. So we skip registering this metric on output and leave this metric
// to sink writer implementations to report.
try {
Class<?> sinkWriterFactoryClass =
userCodeClassloader.loadClass(SinkWriterOperatorFactory.class.getName());
if (sinkWriterFactoryClass.isAssignableFrom(streamOperatorFactoryClass)) {
return null;
}
} catch (ClassNotFoundException e) {
throw new StreamTaskException(
"Could not load SinkWriterOperatorFactory | ChainedSource |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/config/plugins/validation/validators/NotBlankValidator.java | {
"start": 1318,
"end": 1875
} | class ____ implements ConstraintValidator<NotBlank> {
private static final Logger LOGGER = StatusLogger.getLogger();
private NotBlank annotation;
@Override
public void initialize(final NotBlank anAnnotation) {
this.annotation = anAnnotation;
}
@Override
public boolean isValid(final String name, final Object value) {
return Strings.isNotBlank(name) || err(name);
}
private boolean err(final String name) {
LOGGER.error(annotation.message(), name);
return false;
}
}
| NotBlankValidator |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/DebeziumOracleComponentBuilderFactory.java | {
"start": 8824,
"end": 79164
} | class ____ is responsible for persistence of
* connector offsets.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: org.apache.kafka.connect.storage.FileOffsetBackingStore
* Group: consumer
*
* @param offsetStorage the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder offsetStorage(java.lang.String offsetStorage) {
doSetProperty("offsetStorage", offsetStorage);
return this;
}
/**
* Path to file where offsets are to be stored. Required when
* offset.storage is set to the FileOffsetBackingStore.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param offsetStorageFileName the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder offsetStorageFileName(java.lang.String offsetStorageFileName) {
doSetProperty("offsetStorageFileName", offsetStorageFileName);
return this;
}
/**
* The number of partitions used when creating the offset storage topic.
* Required when offset.storage is set to the 'KafkaOffsetBackingStore'.
*
* The option is a: <code>int</code> type.
*
* Group: consumer
*
* @param offsetStoragePartitions the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder offsetStoragePartitions(int offsetStoragePartitions) {
doSetProperty("offsetStoragePartitions", offsetStoragePartitions);
return this;
}
/**
* Replication factor used when creating the offset storage topic.
* Required when offset.storage is set to the KafkaOffsetBackingStore.
*
* The option is a: <code>int</code> type.
*
* Group: consumer
*
* @param offsetStorageReplicationFactor the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder offsetStorageReplicationFactor(int offsetStorageReplicationFactor) {
doSetProperty("offsetStorageReplicationFactor", offsetStorageReplicationFactor);
return this;
}
/**
* The name of the Kafka topic where offsets are to be stored. Required
* when offset.storage is set to the KafkaOffsetBackingStore.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param offsetStorageTopic the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder offsetStorageTopic(java.lang.String offsetStorageTopic) {
doSetProperty("offsetStorageTopic", offsetStorageTopic);
return this;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type,
* which then gets configured on the component. This can be used for
* automatic configuring JDBC data sources, JMS connection factories,
* AWS Clients, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autowiredEnabled the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder autowiredEnabled(boolean autowiredEnabled) {
doSetProperty("autowiredEnabled", autowiredEnabled);
return this;
}
/**
* Sets the specific archive log destination as the source for reading
* archive logs.When not set, the connector will automatically select
* the first LOCAL and VALID destination.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param archiveDestinationName the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder archiveDestinationName(java.lang.String archiveDestinationName) {
doSetProperty("archiveDestinationName", archiveDestinationName);
return this;
}
/**
* The number of hours in the past from SYSDATE to mine archive logs.
* Using 0 mines all available archive logs.
*
* The option is a: <code>long</code> type.
*
* Group: oracle
*
* @param archiveLogHours the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder archiveLogHours(long archiveLogHours) {
doSetProperty("archiveLogHours", archiveLogHours);
return this;
}
/**
* Specify how binary (blob, binary, etc.) columns should be represented
* in change events, including: 'bytes' represents binary data as byte
* array (default); 'base64' represents binary data as base64-encoded
* string; 'base64-url-safe' represents binary data as
* base64-url-safe-encoded string; 'hex' represents binary data as
* hex-encoded (base16) string.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: bytes
* Group: oracle
*
* @param binaryHandlingMode the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder binaryHandlingMode(java.lang.String binaryHandlingMode) {
doSetProperty("binaryHandlingMode", binaryHandlingMode);
return this;
}
/**
* Regular expressions matching columns to exclude from change events.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param columnExcludeList the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder columnExcludeList(java.lang.String columnExcludeList) {
doSetProperty("columnExcludeList", columnExcludeList);
return this;
}
/**
* Regular expressions matching columns to include in change events.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param columnIncludeList the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder columnIncludeList(java.lang.String columnIncludeList) {
doSetProperty("columnIncludeList", columnIncludeList);
return this;
}
/**
* A comma-separated list of regular expressions matching
* fully-qualified names of columns that adds the columns original type
* and original length as parameters to the corresponding field schemas
* in the emitted change records.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param columnPropagateSourceType the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder columnPropagateSourceType(java.lang.String columnPropagateSourceType) {
doSetProperty("columnPropagateSourceType", columnPropagateSourceType);
return this;
}
/**
* The maximum time in milliseconds to wait for connection validation to
* complete. Defaults to 60 seconds.
*
* The option is a: <code>long</code> type.
*
* Default: 1m
* Group: oracle
*
* @param connectionValidationTimeoutMs the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder connectionValidationTimeoutMs(long connectionValidationTimeoutMs) {
doSetProperty("connectionValidationTimeoutMs", connectionValidationTimeoutMs);
return this;
}
/**
* Optional list of custom converters that would be used instead of
* default ones. The converters are defined using '.type' config option
* and configured using options '.'.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param converters the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder converters(java.lang.String converters) {
doSetProperty("converters", converters);
return this;
}
/**
* The custom metric tags will accept key-value pairs to customize the
* MBean object name which should be appended the end of regular name,
* each key would represent a tag for the MBean object name, and the
* corresponding value would be the value of that tag the key is. For
* example: k1=v1,k2=v2.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param customMetricTags the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder customMetricTags(java.lang.String customMetricTags) {
doSetProperty("customMetricTags", customMetricTags);
return this;
}
/**
* The adapter to use when capturing changes from the database. Options
* include: 'logminer': (the default) to capture changes using native
* Oracle LogMiner; 'xstream' to capture changes using Oracle XStreams.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: LogMiner
* Group: oracle
*
* @param databaseConnectionAdapter the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder databaseConnectionAdapter(java.lang.String databaseConnectionAdapter) {
doSetProperty("databaseConnectionAdapter", databaseConnectionAdapter);
return this;
}
/**
* The name of the database from which the connector should capture
* changes.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param databaseDbname the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder databaseDbname(java.lang.String databaseDbname) {
doSetProperty("databaseDbname", databaseDbname);
return this;
}
/**
* Resolvable hostname or IP address of the database server.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param databaseHostname the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder databaseHostname(java.lang.String databaseHostname) {
doSetProperty("databaseHostname", databaseHostname);
return this;
}
/**
* Name of the XStream Out server to connect to.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param databaseOutServerName the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder databaseOutServerName(java.lang.String databaseOutServerName) {
doSetProperty("databaseOutServerName", databaseOutServerName);
return this;
}
/**
* Password of the database user to be used when connecting to the
* database.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param databasePassword the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder databasePassword(java.lang.String databasePassword) {
doSetProperty("databasePassword", databasePassword);
return this;
}
/**
* Name of the pluggable database when working with a multi-tenant
* set-up. The CDB name must be given via database.dbname in this case.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param databasePdbName the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder databasePdbName(java.lang.String databasePdbName) {
doSetProperty("databasePdbName", databasePdbName);
return this;
}
/**
* Port of the database server.
*
* The option is a: <code>int</code> type.
*
* Default: 1528
* Group: oracle
*
* @param databasePort the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder databasePort(int databasePort) {
doSetProperty("databasePort", databasePort);
return this;
}
/**
* Time to wait for a query to execute, given in milliseconds. Defaults
* to 600 seconds (600,000 ms); zero means there is no limit.
*
* The option is a: <code>int</code> type.
*
* Default: 10m
* Group: oracle
*
* @param databaseQueryTimeoutMs the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder databaseQueryTimeoutMs(int databaseQueryTimeoutMs) {
doSetProperty("databaseQueryTimeoutMs", databaseQueryTimeoutMs);
return this;
}
/**
* Complete JDBC URL as an alternative to specifying hostname, port and
* database provided as a way to support alternative connection
* scenarios.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param databaseUrl the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder databaseUrl(java.lang.String databaseUrl) {
doSetProperty("databaseUrl", databaseUrl);
return this;
}
/**
* Name of the database user to be used when connecting to the database.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param databaseUser the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder databaseUser(java.lang.String databaseUser) {
doSetProperty("databaseUser", databaseUser);
return this;
}
/**
* A comma-separated list of regular expressions matching the
* database-specific data type names that adds the data type's original
* type and original length as parameters to the corresponding field
* schemas in the emitted change records.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param datatypePropagateSourceType the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder datatypePropagateSourceType(java.lang.String datatypePropagateSourceType) {
doSetProperty("datatypePropagateSourceType", datatypePropagateSourceType);
return this;
}
/**
* Specify how DECIMAL and NUMERIC columns should be represented in
* change events, including: 'precise' (the default) uses
* java.math.BigDecimal to represent values, which are encoded in the
* change events using a binary representation and Kafka Connect's
* 'org.apache.kafka.connect.data.Decimal' type; 'string' uses string to
* represent values; 'double' represents values using Java's 'double',
* which may not offer the precision but will be far easier to use in
* consumers.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: precise
* Group: oracle
*
* @param decimalHandlingMode the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder decimalHandlingMode(java.lang.String decimalHandlingMode) {
doSetProperty("decimalHandlingMode", decimalHandlingMode);
return this;
}
/**
* The maximum number of retries on connection errors before failing (-1
* = no limit, 0 = disabled, 0 = num of retries).
*
* The option is a: <code>int</code> type.
*
* Default: -1
* Group: oracle
*
* @param errorsMaxRetries the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder errorsMaxRetries(int errorsMaxRetries) {
doSetProperty("errorsMaxRetries", errorsMaxRetries);
return this;
}
/**
* Specify how failures during processing of events (i.e. when
* encountering a corrupted event) should be handled, including: 'fail'
* (the default) an exception indicating the problematic event and its
* position is raised, causing the connector to be stopped; 'warn' the
* problematic event and its position will be logged and the event will
* be skipped; 'ignore' the problematic event will be skipped.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: fail
* Group: oracle
*
* @param eventProcessingFailureHandlingMode the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder eventProcessingFailureHandlingMode(java.lang.String eventProcessingFailureHandlingMode) {
doSetProperty("eventProcessingFailureHandlingMode", eventProcessingFailureHandlingMode);
return this;
}
/**
* The maximum time in milliseconds to wait for task executor to shut
* down.
*
* The option is a: <code>long</code> type.
*
* Default: 4s
* Group: oracle
*
* @param executorShutdownTimeoutMs the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder executorShutdownTimeoutMs(long executorShutdownTimeoutMs) {
doSetProperty("executorShutdownTimeoutMs", executorShutdownTimeoutMs);
return this;
}
/**
* Enable/Disable Debezium context headers that provides essential
* metadata for tracking and identifying the source of CDC events in
* downstream processing systems.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: oracle
*
* @param extendedHeadersEnabled the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder extendedHeadersEnabled(boolean extendedHeadersEnabled) {
doSetProperty("extendedHeadersEnabled", extendedHeadersEnabled);
return this;
}
/**
* Specify the action to take when a guardrail collections limit is
* exceeded: 'warn' (the default) logs a warning message and continues
* processing; 'fail' stops the connector with an error.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: warn
* Group: oracle
*
* @param guardrailCollectionsLimitAction the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder guardrailCollectionsLimitAction(java.lang.String guardrailCollectionsLimitAction) {
doSetProperty("guardrailCollectionsLimitAction", guardrailCollectionsLimitAction);
return this;
}
/**
* The maximum number of collections or tables that can be captured by
* the connector. When this limit is exceeded, the action specified by
* 'guardrail.collections.limit.action' will be taken. Set to 0 to
* disable this guardrail.
*
* The option is a: <code>int</code> type.
*
* Group: oracle
*
* @param guardrailCollectionsMax the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder guardrailCollectionsMax(int guardrailCollectionsMax) {
doSetProperty("guardrailCollectionsMax", guardrailCollectionsMax);
return this;
}
/**
* The query executed with every heartbeat.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param heartbeatActionQuery the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder heartbeatActionQuery(java.lang.String heartbeatActionQuery) {
doSetProperty("heartbeatActionQuery", heartbeatActionQuery);
return this;
}
/**
* Length of an interval in milli-seconds in in which the connector
* periodically sends heartbeat messages to a heartbeat topic. Use 0 to
* disable heartbeat messages. Disabled by default.
*
* The option is a: <code>int</code> type.
*
* Default: 0ms
* Group: oracle
*
* @param heartbeatIntervalMs the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder heartbeatIntervalMs(int heartbeatIntervalMs) {
doSetProperty("heartbeatIntervalMs", heartbeatIntervalMs);
return this;
}
/**
* The prefix that is used to name heartbeat topics.Defaults to
* __debezium-heartbeat.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: __debezium-heartbeat
* Group: oracle
*
* @param heartbeatTopicsPrefix the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder heartbeatTopicsPrefix(java.lang.String heartbeatTopicsPrefix) {
doSetProperty("heartbeatTopicsPrefix", heartbeatTopicsPrefix);
return this;
}
/**
* Whether the connector should publish changes in the database schema
* to a Kafka topic with the same name as the database server ID. Each
* schema change will be recorded using a key that contains the database
* name and whose value include logical description of the new schema
* and optionally the DDL statement(s). The default is 'true'. This is
* independent of how the connector internally records database schema
* history.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: oracle
*
* @param includeSchemaChanges the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder includeSchemaChanges(boolean includeSchemaChanges) {
doSetProperty("includeSchemaChanges", includeSchemaChanges);
return this;
}
/**
* Whether the connector parse table and column's comment to metadata
* object. Note: Enable this option will bring the implications on
* memory usage. The number and size of ColumnImpl objects is what
* largely impacts how much memory is consumed by the Debezium
* connectors, and adding a String to each of them can potentially be
* quite heavy. The default is 'false'.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: oracle
*
* @param includeSchemaComments the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder includeSchemaComments(boolean includeSchemaComments) {
doSetProperty("includeSchemaComments", includeSchemaComments);
return this;
}
/**
* Specify the strategy used for watermarking during an incremental
* snapshot: 'insert_insert' both open and close signal is written into
* signal data collection (default); 'insert_delete' only open signal is
* written on signal data collection, the close will delete the relative
* open signal;.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: INSERT_INSERT
* Group: oracle
*
* @param incrementalSnapshotWatermarkingStrategy the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder incrementalSnapshotWatermarkingStrategy(java.lang.String incrementalSnapshotWatermarkingStrategy) {
doSetProperty("incrementalSnapshotWatermarkingStrategy", incrementalSnapshotWatermarkingStrategy);
return this;
}
/**
* Specify how INTERVAL columns should be represented in change events,
* including: 'string' represents values as an exact ISO formatted
* string; 'numeric' (default) represents values using the inexact
* conversion into microseconds.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: numeric
* Group: oracle
*
* @param intervalHandlingMode the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder intervalHandlingMode(java.lang.String intervalHandlingMode) {
doSetProperty("intervalHandlingMode", intervalHandlingMode);
return this;
}
/**
* Uses the legacy decimal handling behavior before DBZ-7882.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: oracle
*
* @param legacyDecimalHandlingStrategy the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder legacyDecimalHandlingStrategy(boolean legacyDecimalHandlingStrategy) {
doSetProperty("legacyDecimalHandlingStrategy", legacyDecimalHandlingStrategy);
return this;
}
/**
* When set to 'false', the default, LOB fields will not be captured nor
* emitted. When set to 'true', the connector will capture LOB fields
* and emit changes for those fields like any other column type.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: oracle
*
* @param lobEnabled the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder lobEnabled(boolean lobEnabled) {
doSetProperty("lobEnabled", lobEnabled);
return this;
}
/**
* When set to 'false', the default, the connector will mine both
* archive log and redo logs to emit change events. When set to 'true',
* the connector will only mine archive logs. There are circumstances
* where its advantageous to only mine archive logs and accept latency
* in event emission due to frequent revolving redo logs.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: oracle
*
* @param logMiningArchiveLogOnlyMode the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningArchiveLogOnlyMode(boolean logMiningArchiveLogOnlyMode) {
doSetProperty("logMiningArchiveLogOnlyMode", logMiningArchiveLogOnlyMode);
return this;
}
/**
* The interval in milliseconds to wait between polls checking to see if
* the SCN is in the archive logs.
*
* The option is a: <code>long</code> type.
*
* Default: 10s
* Group: oracle
*
* @param logMiningArchiveLogOnlyScnPollIntervalMs the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningArchiveLogOnlyScnPollIntervalMs(long logMiningArchiveLogOnlyScnPollIntervalMs) {
doSetProperty("logMiningArchiveLogOnlyScnPollIntervalMs", logMiningArchiveLogOnlyScnPollIntervalMs);
return this;
}
/**
* The starting SCN interval size that the connector will use for
* reading data from redo/archive logs.
*
* The option is a: <code>long</code> type.
*
* Default: 20000
* Group: oracle
*
* @param logMiningBatchSizeDefault the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningBatchSizeDefault(long logMiningBatchSizeDefault) {
doSetProperty("logMiningBatchSizeDefault", logMiningBatchSizeDefault);
return this;
}
/**
* Active batch size will be also increased/decreased by this amount for
* tuning connector throughput when needed.
*
* The option is a: <code>long</code> type.
*
* Default: 20000
* Group: oracle
*
* @param logMiningBatchSizeIncrement the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningBatchSizeIncrement(long logMiningBatchSizeIncrement) {
doSetProperty("logMiningBatchSizeIncrement", logMiningBatchSizeIncrement);
return this;
}
/**
* The maximum SCN interval size that this connector will use when
* reading from redo/archive logs.
*
* The option is a: <code>long</code> type.
*
* Default: 100000
* Group: oracle
*
* @param logMiningBatchSizeMax the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningBatchSizeMax(long logMiningBatchSizeMax) {
doSetProperty("logMiningBatchSizeMax", logMiningBatchSizeMax);
return this;
}
/**
* The minimum SCN interval size that this connector will try to read
* from redo/archive logs.
*
* The option is a: <code>long</code> type.
*
* Default: 1000
* Group: oracle
*
* @param logMiningBatchSizeMin the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningBatchSizeMin(long logMiningBatchSizeMin) {
doSetProperty("logMiningBatchSizeMin", logMiningBatchSizeMin);
return this;
}
/**
* When set to true the underlying buffer cache is not retained when the
* connector is stopped. When set to false (the default), the buffer
* cache is retained across restarts.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: oracle
*
* @param logMiningBufferDropOnStop the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningBufferDropOnStop(boolean logMiningBufferDropOnStop) {
doSetProperty("logMiningBufferDropOnStop", logMiningBufferDropOnStop);
return this;
}
/**
* Specifies the inner body the Ehcache tag for the events cache, but
* should not include the nor the attributes as these are managed by
* Debezium.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param logMiningBufferEhcacheEventsConfig the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningBufferEhcacheEventsConfig(java.lang.String logMiningBufferEhcacheEventsConfig) {
doSetProperty("logMiningBufferEhcacheEventsConfig", logMiningBufferEhcacheEventsConfig);
return this;
}
/**
* Specifies any Ehcache global configurations such as services or
* persistence. This cannot include nor tags as these are managed by
* Debezium.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param logMiningBufferEhcacheGlobalConfig the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningBufferEhcacheGlobalConfig(java.lang.String logMiningBufferEhcacheGlobalConfig) {
doSetProperty("logMiningBufferEhcacheGlobalConfig", logMiningBufferEhcacheGlobalConfig);
return this;
}
/**
* Specifies the inner body the Ehcache tag for the processed
* transaction cache, but should not include the nor the attributes as
* these are managed by Debezium.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param logMiningBufferEhcacheProcessedtransactionsConfig the value to
* set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningBufferEhcacheProcessedtransactionsConfig(java.lang.String logMiningBufferEhcacheProcessedtransactionsConfig) {
doSetProperty("logMiningBufferEhcacheProcessedtransactionsConfig", logMiningBufferEhcacheProcessedtransactionsConfig);
return this;
}
/**
* Specifies the inner body the Ehcache tag for the schema changes
* cache, but should not include the nor the attributes as these are
* managed by Debezium.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param logMiningBufferEhcacheSchemachangesConfig the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningBufferEhcacheSchemachangesConfig(java.lang.String logMiningBufferEhcacheSchemachangesConfig) {
doSetProperty("logMiningBufferEhcacheSchemachangesConfig", logMiningBufferEhcacheSchemachangesConfig);
return this;
}
/**
* Specifies the inner body the Ehcache tag for the transaction cache,
* but should not include the nor the attributes as these are managed by
* Debezium.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param logMiningBufferEhcacheTransactionsConfig the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningBufferEhcacheTransactionsConfig(java.lang.String logMiningBufferEhcacheTransactionsConfig) {
doSetProperty("logMiningBufferEhcacheTransactionsConfig", logMiningBufferEhcacheTransactionsConfig);
return this;
}
/**
* Specifies the XML configuration for the Infinispan 'events' cache.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param logMiningBufferInfinispanCacheEvents the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningBufferInfinispanCacheEvents(java.lang.String logMiningBufferInfinispanCacheEvents) {
doSetProperty("logMiningBufferInfinispanCacheEvents", logMiningBufferInfinispanCacheEvents);
return this;
}
/**
* Specifies the XML configuration for the Infinispan 'global'
* configuration.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param logMiningBufferInfinispanCacheGlobal the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningBufferInfinispanCacheGlobal(java.lang.String logMiningBufferInfinispanCacheGlobal) {
doSetProperty("logMiningBufferInfinispanCacheGlobal", logMiningBufferInfinispanCacheGlobal);
return this;
}
/**
* Specifies the XML configuration for the Infinispan
* 'processed-transactions' cache.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param logMiningBufferInfinispanCacheProcessedTransactions the value
* to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningBufferInfinispanCacheProcessedTransactions(java.lang.String logMiningBufferInfinispanCacheProcessedTransactions) {
doSetProperty("logMiningBufferInfinispanCacheProcessedTransactions", logMiningBufferInfinispanCacheProcessedTransactions);
return this;
}
/**
* Specifies the XML configuration for the Infinispan 'schema-changes'
* cache.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param logMiningBufferInfinispanCacheSchemaChanges the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningBufferInfinispanCacheSchemaChanges(java.lang.String logMiningBufferInfinispanCacheSchemaChanges) {
doSetProperty("logMiningBufferInfinispanCacheSchemaChanges", logMiningBufferInfinispanCacheSchemaChanges);
return this;
}
/**
* Specifies the XML configuration for the Infinispan 'transactions'
* cache.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param logMiningBufferInfinispanCacheTransactions the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningBufferInfinispanCacheTransactions(java.lang.String logMiningBufferInfinispanCacheTransactions) {
doSetProperty("logMiningBufferInfinispanCacheTransactions", logMiningBufferInfinispanCacheTransactions);
return this;
}
/**
* The number of events a transaction can include before the transaction
* is discarded. This is useful for managing buffer memory and/or space
* when dealing with very large transactions. Defaults to 0, meaning
* that no threshold is applied and transactions can have unlimited
* events.
*
* The option is a: <code>long</code> type.
*
* Group: oracle
*
* @param logMiningBufferTransactionEventsThreshold the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningBufferTransactionEventsThreshold(long logMiningBufferTransactionEventsThreshold) {
doSetProperty("logMiningBufferTransactionEventsThreshold", logMiningBufferTransactionEventsThreshold);
return this;
}
/**
* The buffer type controls how the connector manages buffering
* transaction data. memory - Uses the JVM process' heap to buffer all
* transaction data. infinispan_embedded - This option uses an embedded
* Infinispan cache to buffer transaction data and persist it to disk.
* infinispan_remote - This option uses a remote Infinispan cluster to
* buffer transaction data and persist it to disk. ehcache - Use ehcache
* in embedded mode to buffer transaction data and persist it to disk.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: memory
* Group: oracle
*
* @param logMiningBufferType the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningBufferType(java.lang.String logMiningBufferType) {
doSetProperty("logMiningBufferType", logMiningBufferType);
return this;
}
/**
* Comma separated list of client ids to exclude from LogMiner query.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param logMiningClientidExcludeList the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningClientidExcludeList(java.lang.String logMiningClientidExcludeList) {
doSetProperty("logMiningClientidExcludeList", logMiningClientidExcludeList);
return this;
}
/**
* Comma separated list of client ids to include from LogMiner query.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param logMiningClientidIncludeList the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningClientidIncludeList(java.lang.String logMiningClientidIncludeList) {
doSetProperty("logMiningClientidIncludeList", logMiningClientidIncludeList);
return this;
}
/**
* The name of the flush table used by the connector, defaults to
* LOG_MINING_FLUSH.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: LOG_MINING_FLUSH
* Group: oracle
*
* @param logMiningFlushTableName the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningFlushTableName(java.lang.String logMiningFlushTableName) {
doSetProperty("logMiningFlushTableName", logMiningFlushTableName);
return this;
}
/**
* When enabled, the transaction log REDO SQL will be included in the
* source information block.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: oracle
*
* @param logMiningIncludeRedoSql the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningIncludeRedoSql(boolean logMiningIncludeRedoSql) {
doSetProperty("logMiningIncludeRedoSql", logMiningIncludeRedoSql);
return this;
}
/**
* This is required when using the connector against a read-only
* database replica.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param logMiningPathDictionary the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningPathDictionary(java.lang.String logMiningPathDictionary) {
doSetProperty("logMiningPathDictionary", logMiningPathDictionary);
return this;
}
/**
* Specifies how the filter configuration is applied to the LogMiner
* database query. none - The query does not apply any schema or table
* filters, all filtering is at runtime by the connector. in - The query
* uses SQL in-clause expressions to specify the schema or table
* filters. regex - The query uses Oracle REGEXP_LIKE expressions to
* specify the schema or table filters.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: none
* Group: oracle
*
* @param logMiningQueryFilterMode the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningQueryFilterMode(java.lang.String logMiningQueryFilterMode) {
doSetProperty("logMiningQueryFilterMode", logMiningQueryFilterMode);
return this;
}
/**
* The hostname the connector will use to connect and perform read-only
* operations for the the replica.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param logMiningReadonlyHostname the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningReadonlyHostname(java.lang.String logMiningReadonlyHostname) {
doSetProperty("logMiningReadonlyHostname", logMiningReadonlyHostname);
return this;
}
/**
* Debezium opens a database connection and keeps that connection open
* throughout the entire streaming phase. In some situations, this can
* lead to excessive SGA memory usage. By setting this option to 'true'
* (the default is 'false'), the connector will close and re-open a
* database connection after every detected log switch or if the
* log.mining.session.max.ms has been reached.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: oracle
*
* @param logMiningRestartConnection the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningRestartConnection(boolean logMiningRestartConnection) {
doSetProperty("logMiningRestartConnection", logMiningRestartConnection);
return this;
}
/**
* Used for SCN gap detection, if the difference between current SCN and
* previous end SCN is bigger than this value, and the time difference
* of current SCN and previous end SCN is smaller than
* log.mining.scn.gap.detection.time.interval.max.ms, consider it a SCN
* gap.
*
* The option is a: <code>long</code> type.
*
* Default: 1000000
* Group: oracle
*
* @param logMiningScnGapDetectionGapSizeMin the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningScnGapDetectionGapSizeMin(long logMiningScnGapDetectionGapSizeMin) {
doSetProperty("logMiningScnGapDetectionGapSizeMin", logMiningScnGapDetectionGapSizeMin);
return this;
}
/**
* Used for SCN gap detection, if the difference between current SCN and
* previous end SCN is bigger than
* log.mining.scn.gap.detection.gap.size.min, and the time difference of
* current SCN and previous end SCN is smaller than this value, consider
* it a SCN gap.
*
* The option is a: <code>long</code> type.
*
* Default: 20s
* Group: oracle
*
* @param logMiningScnGapDetectionTimeIntervalMaxMs the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningScnGapDetectionTimeIntervalMaxMs(long logMiningScnGapDetectionTimeIntervalMaxMs) {
doSetProperty("logMiningScnGapDetectionTimeIntervalMaxMs", logMiningScnGapDetectionTimeIntervalMaxMs);
return this;
}
/**
* The maximum number of milliseconds that a LogMiner session lives for
* before being restarted. Defaults to 0 (indefinite until a log switch
* occurs).
*
* The option is a: <code>long</code> type.
*
* Default: 0ms
* Group: oracle
*
* @param logMiningSessionMaxMs the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningSessionMaxMs(long logMiningSessionMaxMs) {
doSetProperty("logMiningSessionMaxMs", logMiningSessionMaxMs);
return this;
}
/**
* The amount of time that the connector will sleep after reading data
* from redo/archive logs and before starting reading data again. Value
* is in milliseconds.
*
* The option is a: <code>long</code> type.
*
* Default: 1s
* Group: oracle
*
* @param logMiningSleepTimeDefaultMs the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningSleepTimeDefaultMs(long logMiningSleepTimeDefaultMs) {
doSetProperty("logMiningSleepTimeDefaultMs", logMiningSleepTimeDefaultMs);
return this;
}
/**
* The maximum amount of time that the connector will use to tune the
* optimal sleep time when reading data from LogMiner. Value is in
* milliseconds.
*
* The option is a: <code>long</code> type.
*
* Default: 200ms
* Group: oracle
*
* @param logMiningSleepTimeIncrementMs the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningSleepTimeIncrementMs(long logMiningSleepTimeIncrementMs) {
doSetProperty("logMiningSleepTimeIncrementMs", logMiningSleepTimeIncrementMs);
return this;
}
/**
* The maximum amount of time that the connector will sleep after
* reading data from redo/archive logs and before starting reading data
* again. Value is in milliseconds.
*
* The option is a: <code>long</code> type.
*
* Default: 3s
* Group: oracle
*
* @param logMiningSleepTimeMaxMs the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningSleepTimeMaxMs(long logMiningSleepTimeMaxMs) {
doSetProperty("logMiningSleepTimeMaxMs", logMiningSleepTimeMaxMs);
return this;
}
/**
* The minimum amount of time that the connector will sleep after
* reading data from redo/archive logs and before starting reading data
* again. Value is in milliseconds.
*
* The option is a: <code>long</code> type.
*
* Default: 0ms
* Group: oracle
*
* @param logMiningSleepTimeMinMs the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningSleepTimeMinMs(long logMiningSleepTimeMinMs) {
doSetProperty("logMiningSleepTimeMinMs", logMiningSleepTimeMinMs);
return this;
}
/**
* There are strategies: Online catalog with faster mining but no
* captured DDL. Another - with data dictionary loaded into REDO LOG
* files.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: online_catalog
* Group: oracle
*
* @param logMiningStrategy the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningStrategy(java.lang.String logMiningStrategy) {
doSetProperty("logMiningStrategy", logMiningStrategy);
return this;
}
/**
* Duration in milliseconds to keep long running transactions in
* transaction buffer between log mining sessions. By default, all
* transactions are retained.
*
* The option is a: <code>long</code> type.
*
* Default: 0ms
* Group: oracle
*
* @param logMiningTransactionRetentionMs the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningTransactionRetentionMs(long logMiningTransactionRetentionMs) {
doSetProperty("logMiningTransactionRetentionMs", logMiningTransactionRetentionMs);
return this;
}
/**
* Comma separated list of usernames to exclude from LogMiner query.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param logMiningUsernameExcludeList the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningUsernameExcludeList(java.lang.String logMiningUsernameExcludeList) {
doSetProperty("logMiningUsernameExcludeList", logMiningUsernameExcludeList);
return this;
}
/**
* Comma separated list of usernames to include from LogMiner query.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param logMiningUsernameIncludeList the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder logMiningUsernameIncludeList(java.lang.String logMiningUsernameIncludeList) {
doSetProperty("logMiningUsernameIncludeList", logMiningUsernameIncludeList);
return this;
}
/**
* Maximum size of each batch of source records. Defaults to 2048.
*
* The option is a: <code>int</code> type.
*
* Default: 2048
* Group: oracle
*
* @param maxBatchSize the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder maxBatchSize(int maxBatchSize) {
doSetProperty("maxBatchSize", maxBatchSize);
return this;
}
/**
* Maximum size of the queue for change events read from the database
* log but not yet recorded or forwarded. Defaults to 8192, and should
* always be larger than the maximum batch size.
*
* The option is a: <code>int</code> type.
*
* Default: 8192
* Group: oracle
*
* @param maxQueueSize the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder maxQueueSize(int maxQueueSize) {
doSetProperty("maxQueueSize", maxQueueSize);
return this;
}
/**
* Maximum size of the queue in bytes for change events read from the
* database log but not yet recorded or forwarded. Defaults to 0. Mean
* the feature is not enabled.
*
* The option is a: <code>long</code> type.
*
* Group: oracle
*
* @param maxQueueSizeInBytes the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder maxQueueSizeInBytes(long maxQueueSizeInBytes) {
doSetProperty("maxQueueSizeInBytes", maxQueueSizeInBytes);
return this;
}
/**
* A semicolon-separated list of expressions that match fully-qualified
* tables and column(s) to be used as message key. Each expression must
* match the pattern ':', where the table names could be defined as
* (DB_NAME.TABLE_NAME) or (SCHEMA_NAME.TABLE_NAME), depending on the
* specific connector, and the key columns are a comma-separated list of
* columns representing the custom key. For any table without an
* explicit key configuration the table's primary key column(s) will be
* used as message key. Example:
* dbserver1.inventory.orderlines:orderId,orderLineId;dbserver1.inventory.orders:id.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param messageKeyColumns the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder messageKeyColumns(java.lang.String messageKeyColumns) {
doSetProperty("messageKeyColumns", messageKeyColumns);
return this;
}
/**
* List of notification channels names that are enabled.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param notificationEnabledChannels the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder notificationEnabledChannels(java.lang.String notificationEnabledChannels) {
doSetProperty("notificationEnabledChannels", notificationEnabledChannels);
return this;
}
/**
* The name of the topic for the notifications. This is required in case
* 'sink' is in the list of enabled channels.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param notificationSinkTopicName the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder notificationSinkTopicName(java.lang.String notificationSinkTopicName) {
doSetProperty("notificationSinkTopicName", notificationSinkTopicName);
return this;
}
/**
* Path to OpenLineage file configuration. See
* https://openlineage.io/docs/client/java/configuration.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: ./openlineage.yml
* Group: oracle
*
* @param openlineageIntegrationConfigFilePath the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder openlineageIntegrationConfigFilePath(java.lang.String openlineageIntegrationConfigFilePath) {
doSetProperty("openlineageIntegrationConfigFilePath", openlineageIntegrationConfigFilePath);
return this;
}
/**
* The Kafka bootstrap server address used as input/output namespace/.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param openlineageIntegrationDatasetKafkaBootstrapServers the value
* to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder openlineageIntegrationDatasetKafkaBootstrapServers(java.lang.String openlineageIntegrationDatasetKafkaBootstrapServers) {
doSetProperty("openlineageIntegrationDatasetKafkaBootstrapServers", openlineageIntegrationDatasetKafkaBootstrapServers);
return this;
}
/**
* Enable Debezium to emit data lineage metadata through OpenLineage
* API.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: oracle
*
* @param openlineageIntegrationEnabled the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder openlineageIntegrationEnabled(boolean openlineageIntegrationEnabled) {
doSetProperty("openlineageIntegrationEnabled", openlineageIntegrationEnabled);
return this;
}
/**
* The job's description emitted by Debezium.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: Debezium change data capture job
* Group: oracle
*
* @param openlineageIntegrationJobDescription the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder openlineageIntegrationJobDescription(java.lang.String openlineageIntegrationJobDescription) {
doSetProperty("openlineageIntegrationJobDescription", openlineageIntegrationJobDescription);
return this;
}
/**
* The job's namespace emitted by Debezium.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param openlineageIntegrationJobNamespace the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder openlineageIntegrationJobNamespace(java.lang.String openlineageIntegrationJobNamespace) {
doSetProperty("openlineageIntegrationJobNamespace", openlineageIntegrationJobNamespace);
return this;
}
/**
* The job's owners emitted by Debezium. A comma-separated list of
* key-value pairs.For example: k1=v1,k2=v2.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param openlineageIntegrationJobOwners the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder openlineageIntegrationJobOwners(java.lang.String openlineageIntegrationJobOwners) {
doSetProperty("openlineageIntegrationJobOwners", openlineageIntegrationJobOwners);
return this;
}
/**
* The job's tags emitted by Debezium. A comma-separated list of
* key-value pairs.For example: k1=v1,k2=v2.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param openlineageIntegrationJobTags the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder openlineageIntegrationJobTags(java.lang.String openlineageIntegrationJobTags) {
doSetProperty("openlineageIntegrationJobTags", openlineageIntegrationJobTags);
return this;
}
/**
* The hostname of the OpenLogReplicator network service.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param openlogreplicatorHost the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder openlogreplicatorHost(java.lang.String openlogreplicatorHost) {
doSetProperty("openlogreplicatorHost", openlogreplicatorHost);
return this;
}
/**
* The port of the OpenLogReplicator network service.
*
* The option is a: <code>int</code> type.
*
* Group: oracle
*
* @param openlogreplicatorPort the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder openlogreplicatorPort(int openlogreplicatorPort) {
doSetProperty("openlogreplicatorPort", openlogreplicatorPort);
return this;
}
/**
* The configured logical source name in the OpenLogReplicator
* configuration that is to stream changes.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param openlogreplicatorSource the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder openlogreplicatorSource(java.lang.String openlogreplicatorSource) {
doSetProperty("openlogreplicatorSource", openlogreplicatorSource);
return this;
}
/**
* Time to wait for new change events to appear after receiving no
* events, given in milliseconds. Defaults to 500 ms.
*
* The option is a: <code>long</code> type.
*
* Default: 500ms
* Group: oracle
*
* @param pollIntervalMs the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder pollIntervalMs(long pollIntervalMs) {
doSetProperty("pollIntervalMs", pollIntervalMs);
return this;
}
/**
* Optional list of post processors. The processors are defined using
* '.type' config option and configured using options ''.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param postProcessors the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder postProcessors(java.lang.String postProcessors) {
doSetProperty("postProcessors", postProcessors);
return this;
}
/**
* Enables transaction metadata extraction together with event counting.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: oracle
*
* @param provideTransactionMetadata the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder provideTransactionMetadata(boolean provideTransactionMetadata) {
doSetProperty("provideTransactionMetadata", provideTransactionMetadata);
return this;
}
/**
* The maximum number of records that should be loaded into memory while
* streaming. A value of '0' uses the default JDBC fetch size, defaults
* to '2000'.
*
* The option is a: <code>int</code> type.
*
* Default: 10000
* Group: oracle
*
* @param queryFetchSize the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder queryFetchSize(int queryFetchSize) {
doSetProperty("queryFetchSize", queryFetchSize);
return this;
}
/**
* A comma-separated list of RAC node hostnames or ip addresses.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: oracle
*
* @param racNodes the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder racNodes(java.lang.String racNodes) {
doSetProperty("racNodes", racNodes);
return this;
}
/**
* Time to wait before restarting connector after retriable exception
* occurs. Defaults to 10000ms.
*
* The option is a: <code>long</code> type.
*
* Default: 10s
* Group: oracle
*
* @param retriableRestartConnectorWaitMs the value to set
* @return the dsl builder
*/
default DebeziumOracleComponentBuilder retriableRestartConnectorWaitMs(long retriableRestartConnectorWaitMs) {
doSetProperty("retriableRestartConnectorWaitMs", retriableRestartConnectorWaitMs);
return this;
}
/**
* The name of the SchemaHistory | that |
java | grpc__grpc-java | opentelemetry/src/main/java/io/grpc/opentelemetry/OpenTelemetryTracingModule.java | {
"start": 5437,
"end": 7168
} | class ____ extends ClientStreamTracer.Factory {
volatile int callEnded;
private final Span clientSpan;
private final String fullMethodName;
CallAttemptsTracerFactory(Span clientSpan, MethodDescriptor<?, ?> method) {
checkNotNull(method, "method");
this.fullMethodName = checkNotNull(method.getFullMethodName(), "fullMethodName");
this.clientSpan = checkNotNull(clientSpan, "clientSpan");
}
@Override
public ClientStreamTracer newClientStreamTracer(
ClientStreamTracer.StreamInfo info, Metadata headers) {
Span attemptSpan = otelTracer.spanBuilder(
"Attempt." + fullMethodName.replace('/', '.'))
.setParent(Context.current().with(clientSpan))
.startSpan();
attemptSpan.setAttribute(
"previous-rpc-attempts", info.getPreviousAttempts());
attemptSpan.setAttribute(
"transparent-retry",info.isTransparentRetry());
if (info.getCallOptions().getOption(NAME_RESOLUTION_DELAYED) != null) {
clientSpan.addEvent("Delayed name resolution complete");
}
return new ClientTracer(attemptSpan, clientSpan);
}
/**
* Record a finished call and mark the current time as the end time.
*
* <p>Can be called from any thread without synchronization. Calling it the second time or more
* is a no-op.
*/
void callEnded(io.grpc.Status status) {
if (callEndedUpdater != null) {
if (callEndedUpdater.getAndSet(this, 1) != 0) {
return;
}
} else {
if (callEnded != 0) {
return;
}
callEnded = 1;
}
endSpanWithStatus(clientSpan, status);
}
}
private final | CallAttemptsTracerFactory |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/Mapper.java | {
"start": 4991,
"end": 11806
} | class ____ {
// We use Integer.MAX_VALUE to represent a no-op, accepting all values.
public static final int IGNORE_ABOVE_DEFAULT_VALUE = Integer.MAX_VALUE;
public static final int IGNORE_ABOVE_DEFAULT_VALUE_FOR_LOGSDB_INDICES = 8191;
private final Integer value;
private final Integer defaultValue;
public IgnoreAbove(Integer value) {
this(Objects.requireNonNull(value), IndexMode.STANDARD, IndexVersion.current());
}
public IgnoreAbove(Integer value, IndexMode indexMode) {
this(value, indexMode, IndexVersion.current());
}
public IgnoreAbove(Integer value, IndexMode indexMode, IndexVersion indexCreatedVersion) {
if (value != null && value < 0) {
throw new IllegalArgumentException("[ignore_above] must be positive, got [" + value + "]");
}
this.value = value;
this.defaultValue = getIgnoreAboveDefaultValue(indexMode, indexCreatedVersion);
}
public int get() {
return value != null ? value : defaultValue;
}
/**
* Returns whether ignore_above is set; at field or index level.
*/
public boolean isSet() {
// if ignore_above equals default, its not considered to be set, even if it was explicitly set to the default value
return Integer.valueOf(get()).equals(defaultValue) == false;
}
/**
* Returns whether values are potentially ignored, either by an explicitly configured ignore_above or by the default value.
*/
public boolean valuesPotentiallyIgnored() {
// We use Integer.MAX_VALUE to represent accepting all values. If the value is anything else, then either we have an
// explicitly configured ignore_above, or we have a non no-op default.
return get() != Integer.MAX_VALUE;
}
/**
* Returns whether the given string will be ignored.
*/
public boolean isIgnored(final String s) {
if (s == null) return false;
return lengthExceedsIgnoreAbove(s.length());
}
public boolean isIgnored(final XContentString s) {
if (s == null) return false;
return lengthExceedsIgnoreAbove(s.stringLength());
}
private boolean lengthExceedsIgnoreAbove(int strLength) {
return strLength > get();
}
public static int getIgnoreAboveDefaultValue(final IndexMode indexMode, final IndexVersion indexCreatedVersion) {
if (diffIgnoreAboveDefaultForLogs(indexMode, indexCreatedVersion)) {
return IGNORE_ABOVE_DEFAULT_VALUE_FOR_LOGSDB_INDICES;
} else {
return IGNORE_ABOVE_DEFAULT_VALUE;
}
}
private static boolean diffIgnoreAboveDefaultForLogs(final IndexMode indexMode, final IndexVersion indexCreatedVersion) {
return indexMode == IndexMode.LOGSDB
&& (indexCreatedVersion != null && indexCreatedVersion.onOrAfter(IndexVersions.ENABLE_IGNORE_ABOVE_LOGSDB));
}
}
private final String leafName;
@SuppressWarnings("this-escape")
public Mapper(String leafName) {
Objects.requireNonNull(leafName);
this.leafName = internFieldName(leafName);
}
/**
* Returns the name of the field.
* When the field has a parent object, its leaf name won't include the entire path.
* When subobjects are disabled, its leaf name will be the same as {@link #fullPath()} in practice, because its parent is the root.
*/
public final String leafName() {
return leafName;
}
/** Returns the canonical name which uniquely identifies the mapper against other mappers in a type. */
public abstract String fullPath();
/**
* Returns a name representing the type of this mapper.
*/
public abstract String typeName();
/**
* Return the merge of {@code mergeWith} into this.
* Both {@code this} and {@code mergeWith} will be left unmodified.
*/
public abstract Mapper merge(Mapper mergeWith, MapperMergeContext mapperMergeContext);
/**
* Validate any cross-field references made by this mapper
* @param mappers a {@link MappingLookup} that can produce references to other mappers
*/
public abstract void validate(MappingLookup mappers);
@Override
public String toString() {
return Strings.toString(this);
}
private static final StringLiteralDeduplicator fieldNameStringDeduplicator = new StringLiteralDeduplicator();
/**
* Interns the given field name string through a {@link StringLiteralDeduplicator}.
* @param fieldName field name to intern
* @return interned field name string
*/
public static String internFieldName(String fieldName) {
return fieldNameStringDeduplicator.deduplicate(fieldName);
}
private static final Map<FieldType, FieldType> fieldTypeDeduplicator = new ConcurrentHashMap<>();
/**
* Freezes the given {@link FieldType} instances and tries to deduplicate it as long as the field does not return a non-empty value for
* {@link FieldType#getAttributes()}.
*
* @param fieldType field type to deduplicate
* @return deduplicated field type
*/
public static FieldType freezeAndDeduplicateFieldType(FieldType fieldType) {
fieldType.freeze();
var attributes = fieldType.getAttributes();
if ((attributes != null && attributes.isEmpty() == false) || fieldType.getClass() != FieldType.class) {
// don't deduplicate subclasses or types with non-empty attribute maps to avoid memory leaks
return fieldType;
}
if (fieldTypeDeduplicator.size() > 1000) {
// guard against the case where we run up too many combinations via (vector-)dimensions combinations
fieldTypeDeduplicator.clear();
}
return fieldTypeDeduplicator.computeIfAbsent(fieldType, Function.identity());
}
/**
* The total number of fields as defined in the mapping.
* Defines how this mapper counts towards {@link MapperService#INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING}.
*/
public abstract int getTotalFieldsCount();
/**
* @return whether this mapper supports storing leaf array elements natively when synthetic source is enabled.
*/
public final boolean supportStoringArrayOffsets() {
return getOffsetFieldName() != null;
}
/**
* @return the offset field name used to store offsets iff {@link #supportStoringArrayOffsets()} returns
* <code>true</code>.
*/
public String getOffsetFieldName() {
return null;
}
}
| IgnoreAbove |
java | apache__rocketmq | test/src/test/java/org/apache/rocketmq/test/client/consumer/broadcast/BaseBroadcast.java | {
"start": 1218,
"end": 2436
} | class ____ extends BaseConf {
private static Logger logger = LoggerFactory.getLogger(BaseBroadcast.class);
public static RMQBroadCastConsumer getBroadCastConsumer(String nsAddr, String topic,
String subExpression,
AbstractListener listener) {
String consumerGroup = initConsumerGroup();
return getBroadCastConsumer(nsAddr, consumerGroup, topic, subExpression, listener);
}
public static RMQBroadCastConsumer getBroadCastConsumer(String nsAddr, String consumerGroup,
String topic, String subExpression,
AbstractListener listener) {
RMQBroadCastConsumer consumer = ConsumerFactory.getRMQBroadCastConsumer(nsAddr,
consumerGroup, topic, subExpression, listener);
consumer.setDebug();
mqClients.add(consumer);
logger.info(String.format("consumer[%s] start,topic[%s],subExpression[%s]", consumerGroup,
topic, subExpression));
return consumer;
}
public void printSeparator() {
for (int i = 0; i < 3; i++) {
logger.info(
"<<<<<<<<================================================================================>>>>>>>>");
}
}
}
| BaseBroadcast |
java | apache__camel | components/camel-jms/src/test/java/org/apache/camel/component/jms/temp/JmsReconnectManualTest.java | {
"start": 1709,
"end": 1860
} | class ____ {
@RegisterExtension
public static ArtemisService service = ArtemisServiceFactory.createVMService();
public | JmsReconnectManualTest |
java | spring-projects__spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/i18n/AbstractLocaleResolver.java | {
"start": 819,
"end": 1048
} | class ____ {@link LocaleResolver} implementations.
*
* <p>Provides support for a {@linkplain #setDefaultLocale(Locale) default locale}.
*
* @author Juergen Hoeller
* @since 1.2.9
* @see #setDefaultLocale
*/
public abstract | for |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/ProfileServlet.java | {
"start": 5202,
"end": 6225
} | enum ____ {
CPU("cpu"),
ALLOC("alloc"),
LOCK("lock"),
PAGE_FAULTS("page-faults"),
CONTEXT_SWITCHES("context-switches"),
CYCLES("cycles"),
INSTRUCTIONS("instructions"),
CACHE_REFERENCES("cache-references"),
CACHE_MISSES("cache-misses"),
BRANCHES("branches"),
BRANCH_MISSES("branch-misses"),
BUS_CYCLES("bus-cycles"),
L1_DCACHE_LOAD_MISSES("L1-dcache-load-misses"),
LLC_LOAD_MISSES("LLC-load-misses"),
DTLB_LOAD_MISSES("dTLB-load-misses"),
MEM_BREAKPOINT("mem:breakpoint"),
TRACE_TRACEPOINT("trace:tracepoint");
private final String internalName;
Event(final String internalName) {
this.internalName = internalName;
}
public String getInternalName() {
return internalName;
}
public static Event fromInternalName(final String name) {
for (Event event : values()) {
if (event.getInternalName().equalsIgnoreCase(name)) {
return event;
}
}
return null;
}
}
private | Event |
java | google__error-prone | core/src/test/java/com/google/errorprone/refaster/testdata/output/PlaceholderAllowsIdentityTemplateExample.java | {
"start": 854,
"end": 1555
} | class ____ {
public void positiveExample(List<Integer> list) {
Iterables.removeIf(list, new Predicate<Integer>(){
@Override
public boolean apply(Integer input) {
return input < 0;
}
});
}
public void positiveIdentityExample(List<Boolean> list) {
Iterables.removeIf(list, new Predicate<Boolean>(){
@Override
public boolean apply(Boolean input) {
return input;
}
});
}
public void refersToForbiddenVariable(List<Integer> list) {
Iterator<Integer> itr = list.iterator();
while (itr.hasNext()) {
if (itr.next() < list.size()) {
itr.remove();
}
}
}
}
| PlaceholderAllowsIdentityTemplateExample |
java | spring-projects__spring-framework | spring-webflux/src/test/java/org/springframework/web/reactive/result/condition/HeadersRequestConditionTests.java | {
"start": 1126,
"end": 6125
} | class ____ {
@Test
void headerEquals() {
assertThat(new HeadersRequestCondition("foo")).isEqualTo(new HeadersRequestCondition("foo"));
assertThat(new HeadersRequestCondition("FOO")).isEqualTo(new HeadersRequestCondition("foo"));
assertThat(new HeadersRequestCondition("bar")).isNotEqualTo(new HeadersRequestCondition("foo"));
assertThat(new HeadersRequestCondition("foo=bar")).isEqualTo(new HeadersRequestCondition("foo=bar"));
assertThat(new HeadersRequestCondition("FOO=bar")).isEqualTo(new HeadersRequestCondition("foo=bar"));
}
@Test
void headerPresent() {
MockServerWebExchange exchange = MockServerWebExchange.from(get("/").header("Accept", ""));
HeadersRequestCondition condition = new HeadersRequestCondition("accept");
assertThat(condition.getMatchingCondition(exchange)).isNotNull();
}
@Test
void headerPresentNoMatch() {
MockServerWebExchange exchange = MockServerWebExchange.from(get("/").header("bar", ""));
HeadersRequestCondition condition = new HeadersRequestCondition("foo");
assertThat(condition.getMatchingCondition(exchange)).isNull();
}
@Test
void headerNotPresent() {
MockServerWebExchange exchange = MockServerWebExchange.from(get("/"));
HeadersRequestCondition condition = new HeadersRequestCondition("!accept");
assertThat(condition.getMatchingCondition(exchange)).isNotNull();
}
@Test
void headerValueMatch() {
MockServerWebExchange exchange = MockServerWebExchange.from(get("/").header("foo", "bar"));
HeadersRequestCondition condition = new HeadersRequestCondition("foo=bar");
assertThat(condition.getMatchingCondition(exchange)).isNotNull();
}
@Test
void headerValueNoMatch() {
MockServerWebExchange exchange = MockServerWebExchange.from(get("/").header("foo", "bazz"));
HeadersRequestCondition condition = new HeadersRequestCondition("foo=bar");
assertThat(condition.getMatchingCondition(exchange)).isNull();
}
@Test
void headerCaseSensitiveValueMatch() {
MockServerWebExchange exchange = MockServerWebExchange.from(get("/").header("foo", "bar"));
HeadersRequestCondition condition = new HeadersRequestCondition("foo=Bar");
assertThat(condition.getMatchingCondition(exchange)).isNull();
}
@Test
void headerValueMatchNegated() {
MockServerWebExchange exchange = MockServerWebExchange.from(get("/").header("foo", "baz"));
HeadersRequestCondition condition = new HeadersRequestCondition("foo!=bar");
assertThat(condition.getMatchingCondition(exchange)).isNotNull();
}
@Test
void headerValueNoMatchNegated() {
MockServerWebExchange exchange = MockServerWebExchange.from(get("/").header("foo", "bar"));
HeadersRequestCondition condition = new HeadersRequestCondition("foo!=bar");
assertThat(condition.getMatchingCondition(exchange)).isNull();
}
@Test
void compareTo() {
MockServerWebExchange exchange = MockServerWebExchange.from(get("/"));
HeadersRequestCondition condition1 = new HeadersRequestCondition("foo", "bar", "baz");
HeadersRequestCondition condition2 = new HeadersRequestCondition("foo=a", "bar");
int result = condition1.compareTo(condition2, exchange);
assertThat(result).as("Invalid comparison result: " + result).isLessThan(0);
result = condition2.compareTo(condition1, exchange);
assertThat(result).as("Invalid comparison result: " + result).isGreaterThan(0);
}
@Test // SPR-16674
public void compareToWithMoreSpecificMatchByValue() {
ServerWebExchange exchange = MockServerWebExchange.from(get("/"));
HeadersRequestCondition condition1 = new HeadersRequestCondition("foo=a");
HeadersRequestCondition condition2 = new HeadersRequestCondition("foo");
int result = condition1.compareTo(condition2, exchange);
assertThat(result).as("Invalid comparison result: " + result).isLessThan(0);
}
@Test
void compareToWithNegatedMatch() {
ServerWebExchange exchange = MockServerWebExchange.from(get("/"));
HeadersRequestCondition condition1 = new HeadersRequestCondition("foo!=a");
HeadersRequestCondition condition2 = new HeadersRequestCondition("foo");
assertThat(condition1.compareTo(condition2, exchange)).as("Negated match should not count as more specific").isEqualTo(0);
}
@Test
void combine() {
HeadersRequestCondition condition1 = new HeadersRequestCondition("foo=bar");
HeadersRequestCondition condition2 = new HeadersRequestCondition("foo=baz");
HeadersRequestCondition result = condition1.combine(condition2);
Collection<?> conditions = result.getContent();
assertThat(conditions).hasSize(2);
}
@Test
void getMatchingCondition() {
MockServerWebExchange exchange = MockServerWebExchange.from(get("/").header("foo", "bar"));
HeadersRequestCondition condition = new HeadersRequestCondition("foo");
HeadersRequestCondition result = condition.getMatchingCondition(exchange);
assertThat(result).isEqualTo(condition);
condition = new HeadersRequestCondition("bar");
result = condition.getMatchingCondition(exchange);
assertThat(result).isNull();
}
}
| HeadersRequestConditionTests |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/idgen/GeneratorNotAppliedToIdEmbeddableFieldsShouldThrowAnExceptionTest.java | {
"start": 2154,
"end": 2269
} | class ____ {
@GeneratedValue(strategy = GenerationType.IDENTITY)
private Integer serialValue;
}
}
| TestEmbeddable |
java | elastic__elasticsearch | x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlLogoutResponseHandlerHttpRedirectTests.java | {
"start": 1087,
"end": 6618
} | class ____ extends SamlTestCase {
private static final String IDP_ENTITY_ID = "https://idp.test/";
private static final String LOGOUT_URL = "https://sp.test/saml/logout";
private Clock clock;
private SamlLogoutResponseHandler samlLogoutResponseHandler;
private static X509Credential credential;
@BeforeClass
public static void setupCredential() throws Exception {
credential = (X509Credential) buildOpenSamlCredential(readRandomKeyPair()).get(0);
}
@AfterClass
public static void clearCredential() {
credential = null;
}
@Before
public void setupHandler() throws Exception {
clock = Clock.systemUTC();
final IdpConfiguration idp = new IdpConfiguration(IDP_ENTITY_ID, () -> Collections.singletonList(credential));
final X509Credential spCredential = (X509Credential) buildOpenSamlCredential(readRandomKeyPair()).get(0);
final SigningConfiguration signingConfiguration = new SigningConfiguration(Collections.singleton("*"), spCredential);
final SpConfiguration sp = new SingleSamlSpConfiguration(
"https://sp.test/",
"https://sp.test/saml/asc",
LOGOUT_URL,
signingConfiguration,
List.of(spCredential),
Collections.emptyList()
);
samlLogoutResponseHandler = new SamlLogoutResponseHandler(clock, idp, sp, TimeValue.timeValueSeconds(1));
}
public void testHandlerWorks() throws URISyntaxException {
final String requestId = SamlUtils.generateSecureNCName(randomIntBetween(8, 30));
final SigningConfiguration signingConfiguration = new SigningConfiguration(Sets.newHashSet("*"), credential);
final LogoutResponse logoutResponse = SamlUtils.buildObject(LogoutResponse.class, LogoutResponse.DEFAULT_ELEMENT_NAME);
logoutResponse.setDestination(LOGOUT_URL);
logoutResponse.setIssueInstant(clock.instant());
logoutResponse.setID(SamlUtils.generateSecureNCName(randomIntBetween(8, 30)));
logoutResponse.setInResponseTo(requestId);
logoutResponse.setStatus(buildStatus(StatusCode.SUCCESS));
final Issuer issuer = SamlUtils.buildObject(Issuer.class, Issuer.DEFAULT_ELEMENT_NAME);
issuer.setValue(IDP_ENTITY_ID);
logoutResponse.setIssuer(issuer);
final String url = new SamlRedirect(logoutResponse, signingConfiguration).getRedirectUrl();
samlLogoutResponseHandler.handle(true, new URI(url).getRawQuery(), List.of(requestId));
}
public void testHandlerFailsIfStatusIsNotSuccess() {
final String requestId = SamlUtils.generateSecureNCName(randomIntBetween(8, 30));
final SigningConfiguration signingConfiguration = new SigningConfiguration(Sets.newHashSet("*"), credential);
final LogoutResponse logoutResponse = SamlUtils.buildObject(LogoutResponse.class, LogoutResponse.DEFAULT_ELEMENT_NAME);
logoutResponse.setDestination(LOGOUT_URL);
logoutResponse.setIssueInstant(clock.instant());
logoutResponse.setID(SamlUtils.generateSecureNCName(randomIntBetween(8, 30)));
logoutResponse.setInResponseTo(requestId);
logoutResponse.setStatus(buildStatus(randomFrom(StatusCode.REQUESTER, StatusCode.RESPONDER)));
final Issuer issuer = SamlUtils.buildObject(Issuer.class, Issuer.DEFAULT_ELEMENT_NAME);
issuer.setValue(IDP_ENTITY_ID);
logoutResponse.setIssuer(issuer);
final String url = new SamlRedirect(logoutResponse, signingConfiguration).getRedirectUrl();
final ElasticsearchSecurityException e = expectSamlException(
() -> samlLogoutResponseHandler.handle(true, new URI(url).getRawQuery(), List.of(requestId))
);
assertThat(e.getMessage(), containsString("is not a 'success' response"));
}
public void testHandlerWillFailWhenQueryStringNotSigned() {
final String requestId = SamlUtils.generateSecureNCName(randomIntBetween(8, 30));
final SigningConfiguration signingConfiguration = new SigningConfiguration(Sets.newHashSet("*"), null);
final LogoutResponse logoutResponse = SamlUtils.buildObject(LogoutResponse.class, LogoutResponse.DEFAULT_ELEMENT_NAME);
logoutResponse.setDestination(LOGOUT_URL);
logoutResponse.setIssueInstant(clock.instant());
logoutResponse.setID(SamlUtils.generateSecureNCName(randomIntBetween(8, 30)));
logoutResponse.setInResponseTo(requestId);
logoutResponse.setStatus(buildStatus(randomFrom(StatusCode.REQUESTER, StatusCode.RESPONDER)));
final Issuer issuer = SamlUtils.buildObject(Issuer.class, Issuer.DEFAULT_ELEMENT_NAME);
issuer.setValue(IDP_ENTITY_ID);
logoutResponse.setIssuer(issuer);
final String url = new SamlRedirect(logoutResponse, signingConfiguration).getRedirectUrl();
final ElasticsearchSecurityException e = expectSamlException(
() -> samlLogoutResponseHandler.handle(true, new URI(url).getRawQuery(), List.of(requestId))
);
assertThat(e.getMessage(), containsString("Query string is not signed, but is required for HTTP-Redirect binding"));
}
private Status buildStatus(String statusCodeValue) {
final Status status = new StatusBuilder().buildObject();
final StatusCode statusCode = new StatusCodeBuilder().buildObject();
statusCode.setValue(statusCodeValue);
status.setStatusCode(statusCode);
return status;
}
}
| SamlLogoutResponseHandlerHttpRedirectTests |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java | {
"start": 1819,
"end": 8052
} | class ____ extends AbstractSnapshotIntegTestCase {
public void testMasterFailoverDuringCleanup() throws Exception {
final ActionFuture<CleanupRepositoryResponse> cleanupFuture = startBlockedCleanup("test-repo");
final int nodeCount = internalCluster().numDataAndMasterNodes();
logger.info("--> stopping master node");
internalCluster().stopCurrentMasterNode();
ensureStableCluster(nodeCount - 1);
logger.info("--> wait for cleanup to finish and disappear from cluster state");
awaitClusterState(state -> RepositoryCleanupInProgress.get(state).hasCleanupInProgress() == false);
try {
cleanupFuture.get();
} catch (ExecutionException e) {
// rare case where the master failure triggers a client retry that executes quicker than the removal of the initial
// cleanup in progress
final Throwable ise = ExceptionsHelper.unwrap(e, IllegalStateException.class);
assertThat(ise, instanceOf(IllegalStateException.class));
assertThat(ise.getMessage(), containsString(" a repository cleanup is already in-progress in "));
}
}
public void testRepeatCleanupsDontRemove() throws Exception {
final ActionFuture<CleanupRepositoryResponse> cleanupFuture = startBlockedCleanup("test-repo");
logger.info("--> sending another cleanup");
assertFutureThrows(
clusterAdmin().prepareCleanupRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo").execute(),
IllegalStateException.class
);
logger.info("--> ensure cleanup is still in progress");
final RepositoryCleanupInProgress cleanup = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT)
.get()
.getState()
.custom(RepositoryCleanupInProgress.TYPE);
assertTrue(cleanup.hasCleanupInProgress());
logger.info("--> unblocking master node");
unblockNode("test-repo", internalCluster().getMasterName());
logger.info("--> wait for cleanup to finish and disappear from cluster state");
awaitClusterState(state -> RepositoryCleanupInProgress.get(state).hasCleanupInProgress() == false);
final ExecutionException e = expectThrows(ExecutionException.class, cleanupFuture::get);
final Throwable ioe = ExceptionsHelper.unwrap(e, IOException.class);
assertThat(ioe, instanceOf(IOException.class));
assertThat(ioe.getMessage(), is("exception after block"));
}
private ActionFuture<CleanupRepositoryResponse> startBlockedCleanup(String repoName) throws Exception {
logger.info("--> starting two master nodes and one data node");
internalCluster().startMasterOnlyNodes(2);
internalCluster().startDataOnlyNodes(1);
createRepository(repoName, "mock");
logger.info("--> snapshot");
clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "test-snap").setWaitForCompletion(true).get();
final BlobStoreRepository repository = getRepositoryOnMaster(repoName);
logger.info("--> creating a garbage data blob");
final PlainActionFuture<Void> garbageFuture = new PlainActionFuture<>();
repository.threadPool()
.generic()
.execute(
ActionRunnable.run(
garbageFuture,
() -> repository.blobStore()
.blobContainer(repository.basePath())
.writeBlob(randomNonDataPurpose(), "snap-foo.dat", new BytesArray(new byte[1]), true)
)
);
garbageFuture.get();
blockMasterFromFinalizingSnapshotOnIndexFile(repoName);
logger.info("--> starting repository cleanup");
// running from a non-master client because shutting down a master while a request to it is pending might result in the future
// never completing
final ActionFuture<CleanupRepositoryResponse> future = internalCluster().nonMasterClient()
.admin()
.cluster()
.prepareCleanupRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName)
.execute();
final String masterNode = internalCluster().getMasterName();
waitForBlock(masterNode, repoName);
awaitClusterState(state -> RepositoryCleanupInProgress.get(state).hasCleanupInProgress());
return future;
}
public void testCleanupOldIndexN() throws ExecutionException, InterruptedException {
internalCluster().startNodes(Settings.EMPTY);
final String repoName = "test-repo";
createRepository(repoName, "fs");
logger.info("--> create three snapshots");
for (int i = 0; i < 3; ++i) {
CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(
TEST_REQUEST_TIMEOUT,
repoName,
"test-snap-" + i
).setWaitForCompletion(true).get();
assertThat(createSnapshotResponse.getSnapshotInfo().state(), is(SnapshotState.SUCCESS));
}
final BlobStoreRepository repository = getRepositoryOnMaster(repoName);
logger.info("--> write two outdated index-N blobs");
for (int i = 0; i < 2; ++i) {
final PlainActionFuture<Void> createOldIndexNFuture = new PlainActionFuture<>();
final int generation = i;
repository.threadPool()
.generic()
.execute(
ActionRunnable.run(
createOldIndexNFuture,
() -> repository.blobStore()
.blobContainer(repository.basePath())
.writeBlob(randomNonDataPurpose(), getRepositoryDataBlobName(generation), new BytesArray(new byte[1]), true)
)
);
createOldIndexNFuture.get();
}
logger.info("--> cleanup repository");
clusterAdmin().prepareCleanupRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName).get();
BlobStoreTestUtil.assertConsistency(repository);
}
}
| BlobStoreRepositoryCleanupIT |
java | apache__camel | components/camel-bindy/src/test/java/org/apache/camel/dataformat/bindy/format/factories/DefaultFactoryRegistryTest.java | {
"start": 1091,
"end": 1603
} | class ____ {
@Test
public void unregisterFormatFactory() {
FactoryRegistry reg = new DefaultFactoryRegistry();
FormattingOptions formattingOptions = new FormattingOptions().forClazz(String.class);
assertNotNull(reg.findForFormattingOptions(formattingOptions));
reg.unregister(StringFormatFactory.class);
assertThrows(IllegalArgumentException.class, () -> {
reg.findForFormattingOptions(formattingOptions);
});
}
}
| DefaultFactoryRegistryTest |
java | apache__rocketmq | proxy/src/main/java/org/apache/rocketmq/proxy/service/ServiceManager.java | {
"start": 1426,
"end": 1829
} | interface ____ extends StartAndShutdown {
MessageService getMessageService();
TopicRouteService getTopicRouteService();
ProducerManager getProducerManager();
ConsumerManager getConsumerManager();
TransactionService getTransactionService();
ProxyRelayService getProxyRelayService();
MetadataService getMetadataService();
AdminService getAdminService();
}
| ServiceManager |
java | grpc__grpc-java | core/src/main/java/io/grpc/internal/ManagedChannelImpl.java | {
"start": 32002,
"end": 36499
} | class ____ extends Channel {
// Reference to null if no config selector is available from resolution result
// Reference must be set() from syncContext
private final AtomicReference<InternalConfigSelector> configSelector =
new AtomicReference<>(INITIAL_PENDING_SELECTOR);
// Set when the NameResolver is initially created. When we create a new NameResolver for the
// same target, the new instance must have the same value.
private final String authority;
private final Channel clientCallImplChannel = new Channel() {
@Override
public <RequestT, ResponseT> ClientCall<RequestT, ResponseT> newCall(
MethodDescriptor<RequestT, ResponseT> method, CallOptions callOptions) {
return new ClientCallImpl<>(
method,
getCallExecutor(callOptions),
callOptions,
transportProvider,
terminated ? null : transportFactory.getScheduledExecutorService(),
channelCallTracer,
null)
.setFullStreamDecompression(fullStreamDecompression)
.setDecompressorRegistry(decompressorRegistry)
.setCompressorRegistry(compressorRegistry);
}
@Override
public String authority() {
return authority;
}
};
private RealChannel(String authority) {
this.authority = checkNotNull(authority, "authority");
}
@Override
public <ReqT, RespT> ClientCall<ReqT, RespT> newCall(
MethodDescriptor<ReqT, RespT> method, CallOptions callOptions) {
if (configSelector.get() != INITIAL_PENDING_SELECTOR) {
return newClientCall(method, callOptions);
}
syncContext.execute(new Runnable() {
@Override
public void run() {
exitIdleMode();
}
});
if (configSelector.get() != INITIAL_PENDING_SELECTOR) {
// This is an optimization for the case (typically with InProcessTransport) when name
// resolution result is immediately available at this point. Otherwise, some users'
// tests might observe slight behavior difference from earlier grpc versions.
return newClientCall(method, callOptions);
}
if (shutdown.get()) {
// Return a failing ClientCall.
return new ClientCall<ReqT, RespT>() {
@Override
public void start(Listener<RespT> responseListener, Metadata headers) {
responseListener.onClose(SHUTDOWN_STATUS, new Metadata());
}
@Override public void request(int numMessages) {}
@Override public void cancel(@Nullable String message, @Nullable Throwable cause) {}
@Override public void halfClose() {}
@Override public void sendMessage(ReqT message) {}
};
}
Context context = Context.current();
final PendingCall<ReqT, RespT> pendingCall = new PendingCall<>(context, method, callOptions);
syncContext.execute(new Runnable() {
@Override
public void run() {
if (configSelector.get() == INITIAL_PENDING_SELECTOR) {
if (pendingCalls == null) {
pendingCalls = new LinkedHashSet<>();
inUseStateAggregator.updateObjectInUse(pendingCallsInUseObject, true);
}
pendingCalls.add(pendingCall);
} else {
pendingCall.reprocess();
}
}
});
return pendingCall;
}
// Must run in SynchronizationContext.
void updateConfigSelector(@Nullable InternalConfigSelector config) {
InternalConfigSelector prevConfig = configSelector.get();
configSelector.set(config);
if (prevConfig == INITIAL_PENDING_SELECTOR && pendingCalls != null) {
for (RealChannel.PendingCall<?, ?> pendingCall : pendingCalls) {
pendingCall.reprocess();
}
}
}
// Must run in SynchronizationContext.
void onConfigError() {
if (configSelector.get() == INITIAL_PENDING_SELECTOR) {
// Apply Default Service Config if initial name resolution fails.
if (defaultServiceConfig != null) {
updateConfigSelector(defaultServiceConfig.getDefaultConfigSelector());
lastServiceConfig = defaultServiceConfig;
channelLogger.log(ChannelLogLevel.ERROR,
"Initial Name Resolution error, using default service config");
} else {
updateConfigSelector(null);
}
}
}
void shutdown() {
final | RealChannel |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/Issue213Test.java | {
"start": 552,
"end": 2138
} | class ____ implements Serializable {
private static final long serialVersionUID = 5515785177596600948L;
private String studyTargets;
private String applicableUsers;
private String intro;
private Date createDateTime;
private int createUserId;
private int liveStatus;
public String getStudyTargets() {
return studyTargets;
}
public void setStudyTargets(String studyTargets) {
this.studyTargets = studyTargets;
}
public String getApplicableUsers() {
return applicableUsers;
}
public void setApplicableUsers(String applicableUsers) {
this.applicableUsers = applicableUsers;
}
public String getIntro() {
return intro;
}
public void setIntro(String intro) {
this.intro = intro;
}
public int getCreateUserId() {
return createUserId;
}
public void setCreateUserId(int createUserId) {
this.createUserId = createUserId;
}
public int getLiveStatus() {
return liveStatus;
}
public void setLiveStatus(int liveStatus) {
this.liveStatus = liveStatus;
}
public Date getCreateDateTime() {
return createDateTime;
}
public void setCreateDateTime(Date createDateTime) {
this.createDateTime = createDateTime;
}
}
}
| Product |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/ConfigurationPropertiesBeanTests.java | {
"start": 18547,
"end": 18792
} | class ____ {
@Bean
@ConfigurationProperties
NonAnnotatedGenericBean<String> nonAnnotatedGenericBean() {
return new NonAnnotatedGenericBean<>();
}
}
@Configuration(proxyBeanMethods = false)
static | NonAnnotatedGenericBeanConfiguration |
java | apache__flink | flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/sync/AbstractForStSyncAppendingState.java | {
"start": 1099,
"end": 2865
} | class ____<K, N, IN, SV, OUT>
extends AbstractForStSyncState<K, N, SV>
implements InternalAppendingState<K, N, IN, SV, OUT> {
/**
* Creates a new RocksDB backend appending state.
*
* @param columnFamily The RocksDB column family that this state is associated to.
* @param namespaceSerializer The serializer for the namespace.
* @param valueSerializer The serializer for the state.
* @param defaultValue The default value for the state.
* @param backend The backend for which this state is bind to.
*/
protected AbstractForStSyncAppendingState(
ColumnFamilyHandle columnFamily,
TypeSerializer<N> namespaceSerializer,
TypeSerializer<SV> valueSerializer,
SV defaultValue,
ForStSyncKeyedStateBackend<K> backend) {
super(columnFamily, namespaceSerializer, valueSerializer, defaultValue, backend);
}
@Override
public SV getInternal() throws IOException, RocksDBException {
return getInternal(getKeyBytes());
}
SV getInternal(byte[] key) throws IOException, RocksDBException {
byte[] valueBytes = backend.db.get(columnFamily, key);
if (valueBytes == null) {
return null;
}
dataInputView.setBuffer(valueBytes);
return valueSerializer.deserialize(dataInputView);
}
@Override
public void updateInternal(SV valueToStore) throws RocksDBException {
updateInternal(getKeyBytes(), valueToStore);
}
void updateInternal(byte[] key, SV valueToStore) throws RocksDBException {
// write the new value to RocksDB
backend.db.put(columnFamily, writeOptions, key, getValueBytes(valueToStore));
}
}
| AbstractForStSyncAppendingState |
java | apache__camel | components/camel-jsonpath/src/test/java/org/apache/camel/jsonpath/JsonPathPredicateJsonSmartTest.java | {
"start": 1174,
"end": 1750
} | class ____ extends CamelTestSupport {
@Override
public boolean isUseRouteBuilder() {
return false;
}
@Test
public void testPredicate() {
Exchange exchange = new DefaultExchange(context);
exchange.getIn().setBody(new File("src/test/resources/messages.json"));
Language lan = context.resolveLanguage("jsonpath");
Predicate pre = lan.createPredicate("$.messages[?(!@.bot_id)]");
boolean bot = pre.matches(exchange);
assertTrue(bot, "Should have message from bot");
}
}
| JsonPathPredicateJsonSmartTest |
java | alibaba__fastjson | src/main/java/com/alibaba/fastjson/serializer/CalendarCodec.java | {
"start": 775,
"end": 7005
} | class ____ extends ContextObjectDeserializer implements ObjectSerializer, ObjectDeserializer, ContextObjectSerializer {
public final static CalendarCodec instance = new CalendarCodec();
private DatatypeFactory dateFactory;
public void write(JSONSerializer serializer, Object object, BeanContext context) throws IOException {
SerializeWriter out = serializer.out;
String format = context.getFormat();
Calendar calendar = (Calendar) object;
if (format.equals("unixtime")) {
long seconds = calendar.getTimeInMillis() / 1000L;
out.writeInt((int) seconds);
return;
}
DateFormat dateFormat = new SimpleDateFormat(format);
if (dateFormat == null) {
dateFormat = new SimpleDateFormat(JSON.DEFFAULT_DATE_FORMAT, serializer.locale);
}
dateFormat.setTimeZone(serializer.timeZone);
String text = dateFormat.format(calendar.getTime());
out.writeString(text);
}
public void write(JSONSerializer serializer, Object object, Object fieldName, Type fieldType, int features)
throws IOException {
SerializeWriter out = serializer.out;
if (object == null) {
out.writeNull();
return;
}
Calendar calendar;
if (object instanceof XMLGregorianCalendar) {
calendar = ((XMLGregorianCalendar) object).toGregorianCalendar();
} else {
calendar = (Calendar) object;
}
if (out.isEnabled(SerializerFeature.UseISO8601DateFormat)) {
final char quote = out.isEnabled(SerializerFeature.UseSingleQuotes) //
? '\'' //
: '\"';
out.append(quote);
int year = calendar.get(Calendar.YEAR);
int month = calendar.get(Calendar.MONTH) + 1;
int day = calendar.get(Calendar.DAY_OF_MONTH);
int hour = calendar.get(Calendar.HOUR_OF_DAY);
int minute = calendar.get(Calendar.MINUTE);
int second = calendar.get(Calendar.SECOND);
int millis = calendar.get(Calendar.MILLISECOND);
char[] buf;
if (millis != 0) {
buf = "0000-00-00T00:00:00.000".toCharArray();
IOUtils.getChars(millis, 23, buf);
IOUtils.getChars(second, 19, buf);
IOUtils.getChars(minute, 16, buf);
IOUtils.getChars(hour, 13, buf);
IOUtils.getChars(day, 10, buf);
IOUtils.getChars(month, 7, buf);
IOUtils.getChars(year, 4, buf);
} else {
if (second == 0 && minute == 0 && hour == 0) {
buf = "0000-00-00".toCharArray();
IOUtils.getChars(day, 10, buf);
IOUtils.getChars(month, 7, buf);
IOUtils.getChars(year, 4, buf);
} else {
buf = "0000-00-00T00:00:00".toCharArray();
IOUtils.getChars(second, 19, buf);
IOUtils.getChars(minute, 16, buf);
IOUtils.getChars(hour, 13, buf);
IOUtils.getChars(day, 10, buf);
IOUtils.getChars(month, 7, buf);
IOUtils.getChars(year, 4, buf);
}
}
out.write(buf);
float timeZoneF = calendar.getTimeZone().getOffset(calendar.getTimeInMillis()) / (3600.0f * 1000);
int timeZone = (int)timeZoneF;
if (timeZone == 0.0) {
out.write('Z');
} else {
if (timeZone > 9) {
out.write('+');
out.writeInt(timeZone);
} else if (timeZone > 0) {
out.write('+');
out.write('0');
out.writeInt(timeZone);
} else if (timeZone < -9) {
out.write('-');
out.writeInt(timeZone);
} else if (timeZone < 0) {
out.write('-');
out.write('0');
out.writeInt(-timeZone);
}
out.write(':');
// handles uneven timeZones 30 mins, 45 mins
// this would always be less than 60
int offSet = (int)((timeZoneF - timeZone) * 60);
out.append(String.format("%02d", offSet));
}
out.append(quote);
} else {
Date date = calendar.getTime();
serializer.write(date);
}
}
public <T> T deserialze(DefaultJSONParser parser, Type clazz, Object fieldName) {
return deserialze(parser, clazz, fieldName, null, 0);
}
@Override
@SuppressWarnings("unchecked")
public <T> T deserialze(DefaultJSONParser parser, Type type, Object fieldName, String format, int features) {
Object value = DateCodec.instance.deserialze(parser, type, fieldName, format, features);
if (value instanceof Calendar) {
return (T) value;
}
Date date = (Date) value;
if (date == null) {
return null;
}
JSONLexer lexer = parser.lexer;
Calendar calendar = Calendar.getInstance(lexer.getTimeZone(), lexer.getLocale());
calendar.setTime(date);
if (type == XMLGregorianCalendar.class) {
return (T) createXMLGregorianCalendar((GregorianCalendar) calendar);
}
return (T) calendar;
}
public XMLGregorianCalendar createXMLGregorianCalendar(Calendar calendar) {
if (dateFactory == null) {
try {
dateFactory = DatatypeFactory.newInstance();
} catch (DatatypeConfigurationException e) {
throw new IllegalStateException("Could not obtain an instance of DatatypeFactory.", e);
}
}
return dateFactory.newXMLGregorianCalendar((GregorianCalendar) calendar);
}
public int getFastMatchToken() {
return JSONToken.LITERAL_INT;
}
}
| CalendarCodec |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslServerProvider.java | {
"start": 1131,
"end": 1654
} | class ____ extends Provider {
private static final long serialVersionUID = 1L;
private OAuthBearerSaslServerProvider() {
super("SASL/OAUTHBEARER Server Provider", "1.0", "SASL/OAUTHBEARER Server Provider for Kafka");
put("SaslServerFactory." + OAuthBearerLoginModule.OAUTHBEARER_MECHANISM,
OAuthBearerSaslServerFactory.class.getName());
}
public static void initialize() {
Security.addProvider(new OAuthBearerSaslServerProvider());
}
}
| OAuthBearerSaslServerProvider |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/SubtasksAllAccumulatorsHeaders.java | {
"start": 1238,
"end": 2813
} | class ____
implements RuntimeMessageHeaders<
EmptyRequestBody, SubtasksAllAccumulatorsInfo, JobVertexMessageParameters> {
private static final SubtasksAllAccumulatorsHeaders INSTANCE =
new SubtasksAllAccumulatorsHeaders();
public static final String URL =
"/jobs"
+ "/:"
+ JobIDPathParameter.KEY
+ "/vertices"
+ "/:"
+ JobVertexIdPathParameter.KEY
+ "/subtasks/accumulators";
private SubtasksAllAccumulatorsHeaders() {}
@Override
public Class<EmptyRequestBody> getRequestClass() {
return EmptyRequestBody.class;
}
@Override
public Class<SubtasksAllAccumulatorsInfo> getResponseClass() {
return SubtasksAllAccumulatorsInfo.class;
}
@Override
public HttpResponseStatus getResponseStatusCode() {
return HttpResponseStatus.OK;
}
@Override
public JobVertexMessageParameters getUnresolvedMessageParameters() {
return new JobVertexMessageParameters();
}
@Override
public HttpMethodWrapper getHttpMethod() {
return HttpMethodWrapper.GET;
}
@Override
public String getTargetRestEndpointURL() {
return URL;
}
public static SubtasksAllAccumulatorsHeaders getInstance() {
return INSTANCE;
}
@Override
public String getDescription() {
return "Returns all user-defined accumulators for all subtasks of a task.";
}
}
| SubtasksAllAccumulatorsHeaders |
java | elastic__elasticsearch | modules/lang-painless/src/main/java/org/elasticsearch/painless/ir/DoWhileLoopNode.java | {
"start": 616,
"end": 1243
} | class ____ extends ConditionNode {
/* ---- begin visitor ---- */
@Override
public <Scope> void visit(IRTreeVisitor<Scope> irTreeVisitor, Scope scope) {
irTreeVisitor.visitDoWhileLoop(this, scope);
}
@Override
public <Scope> void visitChildren(IRTreeVisitor<Scope> irTreeVisitor, Scope scope) {
getBlockNode().visit(irTreeVisitor, scope);
if (getConditionNode() != null) {
getConditionNode().visit(irTreeVisitor, scope);
}
}
/* ---- end visitor ---- */
public DoWhileLoopNode(Location location) {
super(location);
}
}
| DoWhileLoopNode |
java | google__guice | core/test/com/google/inject/name/NamedEquivalanceTest.java | {
"start": 9345,
"end": 10405
} | class ____ implements jakarta.inject.Named, Serializable {
private final String value;
public JakartaNamed(String value) {
this.value = value;
}
@Override
public String value() {
return this.value;
}
@Override
public int hashCode() {
// This is specified in java.lang.Annotation.
return (127 * "value".hashCode()) ^ value.hashCode();
}
@Override
public boolean equals(Object o) {
if (!(o instanceof jakarta.inject.Named)) {
return false;
}
jakarta.inject.Named other = (jakarta.inject.Named) o;
return value.equals(other.value());
}
@Override
public String toString() {
return "@"
+ jakarta.inject.Named.class.getName()
+ "(value="
+ Annotations.memberValueString("value", value)
+ ")";
}
@Override
public Class<? extends Annotation> annotationType() {
return jakarta.inject.Named.class;
}
private static final long serialVersionUID = 0;
}
private static | JakartaNamed |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/IncludeWithDeserTest.java | {
"start": 1385,
"end": 1742
} | class ____
{
@JsonIncludeProperties({"y"})
//@JsonIgnoreProperties({"z"})
public List<OnlyYAndZ> onlyYs;
public IncludeForListValuesY()
{
onlyYs = Arrays.asList(new OnlyYAndZ());
}
}
@SuppressWarnings("serial")
@JsonIncludeProperties({"@class", "a"})
static | IncludeForListValuesY |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/metamodel/mapping/AttributeMappingsMap.java | {
"start": 1520,
"end": 2073
} | class ____ {
private Builder(){}
private LinkedHashMap<String,AttributeMapping> storage;
public void put(final String name, final AttributeMapping mapping) {
Objects.requireNonNull( name );
Objects.requireNonNull( mapping );
if ( storage == null ) {
storage = new LinkedHashMap<>();
}
storage.put( name, mapping );
}
public AttributeMappingsMap build() {
if ( storage == null ) {
return EmptyAttributeMappingsMap.INSTANCE;
}
else {
return new ImmutableAttributeMappingsMap( storage );
}
}
}
}
| Builder |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/IncompatibleModifiersCheckerTest.java | {
"start": 3703,
"end": 4208
} | class ____ {
// BUG: Diagnostic contains: The annotation '@NotPrivateOrFinal' has specified that it should not
// be used together with the following modifiers: [private]
@NotPrivateOrFinal
private void foo() {}
}
""")
.doTest();
}
@Test
public void annotationWithTwoIncompatibleModifiersFails() {
compilationHelper
.addSourceLines(
"test/IncompatibleModifiersTestCase.java",
"""
package test;
import test.NotPublicOrFinal;
public | IncompatibleModifiersTestCase |
java | redisson__redisson | redisson/src/main/java/org/redisson/connection/SingleConnectionManager.java | {
"start": 822,
"end": 5392
} | class ____ extends MasterSlaveConnectionManager {
SingleConnectionManager(SingleServerConfig cfg, Config configCopy) {
super(create(cfg), configCopy);
}
private static MasterSlaveServersConfig create(SingleServerConfig cfg) {
MasterSlaveServersConfig newconfig = new MasterSlaveServersConfig();
if (cfg.getUsername() != null) {
newconfig.setUsername(cfg.getUsername());
}
if (cfg.getPassword() != null) {
newconfig.setPassword(cfg.getPassword());
}
if (!(cfg.getNameMapper() instanceof DefaultNameMapper)) {
newconfig.setNameMapper(cfg.getNameMapper());
}
if (!(cfg.getCommandMapper() instanceof DefaultCommandMapper)) {
newconfig.setCommandMapper(cfg.getCommandMapper());
}
if (!(cfg.getCredentialsResolver() instanceof DefaultCredentialsResolver)) {
newconfig.setCredentialsResolver(cfg.getCredentialsResolver());
}
if (cfg.getSslVerificationMode() != SslVerificationMode.STRICT) {
newconfig.setSslVerificationMode(cfg.getSslVerificationMode());
}
if (cfg.getSslKeystoreType() != null) {
newconfig.setSslKeystoreType(cfg.getSslKeystoreType());
}
if (cfg.getSslProvider() != SslProvider.JDK) {
newconfig.setSslProvider(cfg.getSslProvider());
}
if (cfg.getSslTruststore() != null) {
newconfig.setSslTruststore(cfg.getSslTruststore());
}
if (cfg.getSslTruststorePassword() != null) {
newconfig.setSslTruststorePassword(cfg.getSslTruststorePassword());
}
if (cfg.getSslKeystore() != null) {
newconfig.setSslKeystore(cfg.getSslKeystore());
}
if (cfg.getSslKeystorePassword() != null) {
newconfig.setSslKeystorePassword(cfg.getSslKeystorePassword());
}
if (cfg.getSslProtocols() != null) {
newconfig.setSslProtocols(cfg.getSslProtocols());
}
if (cfg.getSslCiphers() != null) {
newconfig.setSslCiphers(cfg.getSslCiphers());
}
if (cfg.getSslKeyManagerFactory() != null) {
newconfig.setSslKeyManagerFactory(cfg.getSslKeyManagerFactory());
}
if (cfg.getSslTrustManagerFactory() != null) {
newconfig.setSslTrustManagerFactory(cfg.getSslTrustManagerFactory());
}
if (cfg.isKeepAlive()) {
newconfig.setKeepAlive(cfg.isKeepAlive());
}
if (cfg.getTcpKeepAliveCount() != 0) {
newconfig.setTcpKeepAliveCount(cfg.getTcpKeepAliveCount());
}
if (cfg.getTcpKeepAliveIdle() != 0) {
newconfig.setTcpKeepAliveIdle(cfg.getTcpKeepAliveIdle());
}
if (cfg.getTcpKeepAliveInterval() != 0) {
newconfig.setTcpKeepAliveInterval(cfg.getTcpKeepAliveInterval());
}
if (cfg.getTcpUserTimeout() != 0) {
newconfig.setTcpUserTimeout(cfg.getTcpUserTimeout());
}
if (!cfg.isTcpNoDelay()) {
newconfig.setTcpNoDelay(cfg.isTcpNoDelay());
}
newconfig.setPingConnectionInterval(cfg.getPingConnectionInterval());
newconfig.setRetryAttempts(cfg.getRetryAttempts());
newconfig.setRetryDelay(cfg.getRetryDelay());
newconfig.setReconnectionDelay(cfg.getReconnectionDelay());
newconfig.setTimeout(cfg.getTimeout());
newconfig.setDatabase(cfg.getDatabase());
newconfig.setClientName(cfg.getClientName());
newconfig.setMasterAddress(cfg.getAddress());
newconfig.setMasterConnectionPoolSize(cfg.getConnectionPoolSize());
newconfig.setSubscriptionsPerConnection(cfg.getSubscriptionsPerConnection());
newconfig.setSubscriptionConnectionPoolSize(cfg.getSubscriptionConnectionPoolSize());
newconfig.setConnectTimeout(cfg.getConnectTimeout());
newconfig.setIdleConnectionTimeout(cfg.getIdleConnectionTimeout());
newconfig.setDnsMonitoringInterval(cfg.getDnsMonitoringInterval());
newconfig.setMasterConnectionMinimumIdleSize(cfg.getConnectionMinimumIdleSize());
newconfig.setSubscriptionConnectionMinimumIdleSize(cfg.getSubscriptionConnectionMinimumIdleSize());
newconfig.setReadMode(ReadMode.MASTER);
newconfig.setSubscriptionMode(SubscriptionMode.MASTER);
newconfig.setSubscriptionTimeout(cfg.getSubscriptionTimeout());
return newconfig;
}
}
| SingleConnectionManager |
java | google__error-prone | core/src/test/java/com/google/errorprone/refaster/testdata/output/BinaryTemplateExample.java | {
"start": 786,
"end": 1303
} | class ____ {
public void example(int x, int y) {
// positive examples
System.out.println((0xFF + 5) >> 1);
System.out.println(((x + y) >> 1) + 20);
System.err.println((y + new Random().nextInt()) >> 1);
// negative examples
System.out.println((x + y /* signed division */) / 2 + 20);
System.out.println(x + y / 2);
System.out.println((x - y) / 2);
System.out.println((x * y) / 2);
System.out.println((x + y) / 3);
System.out.println((x + 5L) / 2);
}
}
| BinaryTemplateExample |
java | google__guava | android/guava-testlib/src/com/google/common/collect/testing/testers/CollectionIsEmptyTester.java | {
"start": 1434,
"end": 1798
} | class ____<E> extends AbstractCollectionTester<E> {
@CollectionSize.Require(ZERO)
public void testIsEmpty_yes() {
assertTrue("isEmpty() should return true", collection.isEmpty());
}
@CollectionSize.Require(absent = ZERO)
public void testIsEmpty_no() {
assertFalse("isEmpty() should return false", collection.isEmpty());
}
}
| CollectionIsEmptyTester |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksAggregatorFactory.java | {
"start": 1353,
"end": 3892
} | class ____ extends ValuesSourceAggregatorFactory {
private final PercentilesAggregatorSupplier aggregatorSupplier;
private final double[] percents;
private final PercentilesConfig percentilesConfig;
private final boolean keyed;
static void registerAggregators(ValuesSourceRegistry.Builder builder) {
builder.register(
PercentileRanksAggregationBuilder.REGISTRY_KEY,
List.of(
CoreValuesSourceType.NUMERIC,
CoreValuesSourceType.DATE,
CoreValuesSourceType.BOOLEAN,
TimeSeriesValuesSourceType.COUNTER
),
(name, config, context, parent, percents, percentilesConfig, keyed, formatter, metadata) -> percentilesConfig
.createPercentileRanksAggregator(name, config, context, parent, percents, keyed, formatter, metadata),
true
);
}
PercentileRanksAggregatorFactory(
String name,
ValuesSourceConfig config,
double[] percents,
PercentilesConfig percentilesConfig,
boolean keyed,
AggregationContext context,
AggregatorFactory parent,
AggregatorFactories.Builder subFactoriesBuilder,
Map<String, Object> metadata,
PercentilesAggregatorSupplier aggregatorSupplier
) throws IOException {
super(name, config, context, parent, subFactoriesBuilder, metadata);
this.percents = percents;
this.percentilesConfig = percentilesConfig;
this.keyed = keyed;
this.aggregatorSupplier = aggregatorSupplier;
}
@Override
protected Aggregator createUnmapped(Aggregator parent, Map<String, Object> metadata) throws IOException {
final InternalNumericMetricsAggregation.MultiValue empty = percentilesConfig.createEmptyPercentileRanksAggregator(
name,
percents,
keyed,
config.format(),
metadata
);
final Predicate<String> hasMetric = s -> PercentilesConfig.indexOfKey(percents, Double.parseDouble(s)) >= 0;
return new NonCollectingMultiMetricAggregator(name, context, parent, empty, hasMetric, metadata);
}
@Override
protected Aggregator doCreateInternal(Aggregator parent, CardinalityUpperBound bucketCardinality, Map<String, Object> metadata)
throws IOException {
return aggregatorSupplier.build(name, config, context, parent, percents, percentilesConfig, keyed, config.format(), metadata);
}
}
| PercentileRanksAggregatorFactory |
java | spring-projects__spring-security | access/src/test/java/org/springframework/security/messaging/access/expression/MessageExpressionVoterTests.java | {
"start": 1907,
"end": 5637
} | class ____ {
@Mock
Authentication authentication;
@Mock
Message<Object> message;
Collection<ConfigAttribute> attributes;
@Mock
Expression expression;
@Mock
MessageMatcher<?> matcher;
@Mock
SecurityExpressionHandler<Message> expressionHandler;
@Mock
EvaluationContext evaluationContext;
MessageExpressionVoter voter;
@BeforeEach
public void setup() {
this.attributes = Arrays
.<ConfigAttribute>asList(new MessageExpressionConfigAttribute(this.expression, this.matcher));
this.voter = new MessageExpressionVoter();
}
@Test
public void voteGranted() {
given(this.expression.getValue(any(EvaluationContext.class), eq(Boolean.class))).willReturn(true);
given(this.matcher.matcher(any())).willCallRealMethod();
assertThat(this.voter.vote(this.authentication, this.message, this.attributes))
.isEqualTo(AccessDecisionVoter.ACCESS_GRANTED);
}
@Test
public void voteDenied() {
given(this.expression.getValue(any(EvaluationContext.class), eq(Boolean.class))).willReturn(false);
given(this.matcher.matcher(any())).willCallRealMethod();
assertThat(this.voter.vote(this.authentication, this.message, this.attributes))
.isEqualTo(AccessDecisionVoter.ACCESS_DENIED);
}
@Test
public void voteAbstain() {
this.attributes = Arrays.<ConfigAttribute>asList(new SecurityConfig("ROLE_USER"));
assertThat(this.voter.vote(this.authentication, this.message, this.attributes))
.isEqualTo(AccessDecisionVoter.ACCESS_ABSTAIN);
}
@Test
public void supportsObjectClassFalse() {
assertThat(this.voter.supports(Object.class)).isFalse();
}
@Test
public void supportsMessageClassTrue() {
assertThat(this.voter.supports(Message.class)).isTrue();
}
@Test
public void supportsSecurityConfigFalse() {
assertThat(this.voter.supports(new SecurityConfig("ROLE_USER"))).isFalse();
}
@Test
public void supportsMessageExpressionConfigAttributeTrue() {
assertThat(this.voter.supports(new MessageExpressionConfigAttribute(this.expression, this.matcher))).isTrue();
}
@Test
public void setExpressionHandlerNull() {
assertThatIllegalArgumentException().isThrownBy(() -> this.voter.setExpressionHandler(null));
}
@Test
public void customExpressionHandler() {
this.voter.setExpressionHandler(this.expressionHandler);
given(this.expressionHandler.createEvaluationContext(this.authentication, this.message))
.willReturn(this.evaluationContext);
given(this.expression.getValue(this.evaluationContext, Boolean.class)).willReturn(true);
given(this.matcher.matcher(any())).willCallRealMethod();
assertThat(this.voter.vote(this.authentication, this.message, this.attributes))
.isEqualTo(AccessDecisionVoter.ACCESS_GRANTED);
verify(this.expressionHandler).createEvaluationContext(this.authentication, this.message);
}
@Test
public void postProcessEvaluationContext() {
final MessageExpressionConfigAttribute configAttribute = mock(MessageExpressionConfigAttribute.class);
this.voter.setExpressionHandler(this.expressionHandler);
given(this.expressionHandler.createEvaluationContext(this.authentication, this.message))
.willReturn(this.evaluationContext);
given(configAttribute.getAuthorizeExpression()).willReturn(this.expression);
this.attributes = Arrays.<ConfigAttribute>asList(configAttribute);
given(configAttribute.postProcess(this.evaluationContext, this.message)).willReturn(this.evaluationContext);
given(this.expression.getValue(any(EvaluationContext.class), eq(Boolean.class))).willReturn(true);
assertThat(this.voter.vote(this.authentication, this.message, this.attributes))
.isEqualTo(AccessDecisionVoter.ACCESS_GRANTED);
verify(configAttribute).postProcess(this.evaluationContext, this.message);
}
}
| MessageExpressionVoterTests |
java | google__guava | android/guava/src/com/google/common/reflect/ClassPath.java | {
"start": 4314,
"end": 4958
} | class ____ {
private static final Logger logger = Logger.getLogger(ClassPath.class.getName());
/** Separator for the Class-Path manifest attribute value in jar files. */
private static final Splitter CLASS_PATH_ATTRIBUTE_SEPARATOR =
Splitter.on(" ").omitEmptyStrings();
private static final String CLASS_FILE_NAME_EXTENSION = ".class";
private final ImmutableSet<ResourceInfo> resources;
private ClassPath(ImmutableSet<ResourceInfo> resources) {
this.resources = resources;
}
/**
* Returns a {@code ClassPath} representing all classes and resources loadable from {@code
* classloader} and its ancestor | ClassPath |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/DeduplicatedQueue.java | {
"start": 1055,
"end": 2011
} | class ____<T> {
/** The major component to store elements. */
private final Queue<T> queue = new LinkedList<>();
/**
* Saves the same elements as {@link #queue}, but helps to deduplicate elements at constant time
* complexity.
*/
private final Set<T> set = new HashSet<>();
/**
* @return true if this queue did not already contain the specified element
*/
public boolean add(T t) {
if (set.add(t)) {
queue.add(t);
return true;
}
return false;
}
public T peek() {
return queue.peek();
}
public T poll() {
T t = queue.poll();
set.remove(t);
return t;
}
public void remove(T t) {
if (set.remove(t)) {
queue.remove(t);
}
}
public Iterable<T> values() {
return queue;
}
public boolean isEmpty() {
return set.isEmpty();
}
}
| DeduplicatedQueue |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/observable/ObservableTakeUntilPredicate.java | {
"start": 1464,
"end": 3332
} | class ____<T> implements Observer<T>, Disposable {
final Observer<? super T> downstream;
final Predicate<? super T> predicate;
Disposable upstream;
boolean done;
TakeUntilPredicateObserver(Observer<? super T> downstream, Predicate<? super T> predicate) {
this.downstream = downstream;
this.predicate = predicate;
}
@Override
public void onSubscribe(Disposable d) {
if (DisposableHelper.validate(this.upstream, d)) {
this.upstream = d;
downstream.onSubscribe(this);
}
}
@Override
public void dispose() {
upstream.dispose();
}
@Override
public boolean isDisposed() {
return upstream.isDisposed();
}
@Override
public void onNext(T t) {
if (!done) {
downstream.onNext(t);
boolean b;
try {
b = predicate.test(t);
} catch (Throwable e) {
Exceptions.throwIfFatal(e);
upstream.dispose();
onError(e);
return;
}
if (b) {
done = true;
upstream.dispose();
downstream.onComplete();
}
}
}
@Override
public void onError(Throwable t) {
if (!done) {
done = true;
downstream.onError(t);
} else {
RxJavaPlugins.onError(t);
}
}
@Override
public void onComplete() {
if (!done) {
done = true;
downstream.onComplete();
}
}
}
}
| TakeUntilPredicateObserver |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_2021/Issue2021Mapper.java | {
"start": 396,
"end": 451
} | class ____ implements Issue2021Mapper {
}
}
| Decorator |
java | qos-ch__slf4j | slf4j-jdk14/src/main/java/org/slf4j/jul/JDK14LoggerAdapter.java | {
"start": 1959,
"end": 2101
} | class ____ to those defined in the java.util.logging
* package.
*
* @author Ceki Gülcü
* @author Peter Royal
*/
public final | refer |
java | apache__camel | core/camel-base-engine/src/main/java/org/apache/camel/impl/engine/DefaultResourceLoader.java | {
"start": 1548,
"end": 6499
} | class ____ extends ServiceSupport implements ResourceLoader, StaticService {
/**
* Prefix to use for looking up existing {@link ResourceLoader} from the {@link org.apache.camel.spi.Registry}.
*/
public static final String RESOURCE_LOADER_KEY_PREFIX = "resource-loader-";
private final Map<String, ResourceResolver> resolvers;
private CamelContext camelContext;
private ResourceResolver fallbackResolver;
public DefaultResourceLoader() {
this(null);
}
public DefaultResourceLoader(CamelContext camelContext) {
this.camelContext = camelContext;
this.resolvers = new ConcurrentHashMap<>();
this.fallbackResolver = null;
if (camelContext != null) {
this.fallbackResolver = camelContext.getRegistry().lookupByNameAndType(ResourceResolver.FALLBACK_RESOURCE_RESOLVER,
ResourceResolver.class);
}
if (this.fallbackResolver == null) {
this.fallbackResolver = new DefaultFallbackResourceResolver(camelContext);
}
this.fallbackResolver.setCamelContext(camelContext);
}
@Override
public void doStart() throws Exception {
super.doStart();
ServiceHelper.startService(this.fallbackResolver);
}
@Override
public void doStop() throws Exception {
super.doStop();
ServiceHelper.stopService(resolvers.values(), this.fallbackResolver);
resolvers.clear();
}
public ResourceResolver getFallbackResolver() {
return fallbackResolver;
}
public void setFallbackResolver(ResourceResolver fallbackResolver) {
this.fallbackResolver = fallbackResolver;
}
@Override
public CamelContext getCamelContext() {
return camelContext;
}
@Override
public void setCamelContext(CamelContext camelContext) {
this.camelContext = camelContext;
this.fallbackResolver.setCamelContext(this.camelContext);
}
@Override
public Resource resolveResource(final String uri) {
ObjectHelper.notNull(uri, "Resource uri must not be null");
//
// If the scheme is not set, use a fallback resolver which by default uses the classpath
// resolver but a custom implementation can be provided. This is useful when as example
// resources need to be discovered on a set of location through a dedicated resolver.
//
String scheme = StringHelper.before(uri, ":");
if (scheme == null) {
return this.fallbackResolver.resolve(uri);
}
ResourceResolver rr = getResourceResolver(scheme);
if (rr == null) {
throw new IllegalArgumentException(
"Cannot find a ResourceResolver in classpath supporting the scheme: " + scheme);
}
return rr.resolve(uri);
}
/**
* Is there an existing resource resolver that can load from the given scheme
*
* @param scheme the scheme
*/
protected boolean hasResourceResolver(String scheme) {
ResourceResolver answer = getCamelContext().getRegistry().lookupByNameAndType(
RESOURCE_LOADER_KEY_PREFIX + scheme,
ResourceResolver.class);
if (answer == null) {
answer = resolvers.get(scheme);
}
return answer != null;
}
/**
* Looks up a {@link ResourceResolver} for the given scheme in the registry or fallback to a factory finder
* mechanism if none found.
*
* @param scheme the file extension for which a loader should be found.
* @return a {@link RoutesBuilderLoader} or <code>null</code> if none found.
*/
private ResourceResolver getResourceResolver(final String scheme) {
ResourceResolver answer = getCamelContext().getRegistry().lookupByNameAndType(
RESOURCE_LOADER_KEY_PREFIX + scheme,
ResourceResolver.class);
if (answer == null) {
answer = resolvers.computeIfAbsent(scheme, this::resolveService);
}
return answer;
}
/**
* Looks up a {@link ResourceResolver} for the given scheme with factory finder.
*
* @param scheme the file extension for which a loader should be found.
* @return a {@link RoutesBuilderLoader} or <code>null</code> if none found.
*/
private ResourceResolver resolveService(String scheme) {
final CamelContext context = getCamelContext();
final FactoryFinder finder
= context.getCamelContextExtension().getBootstrapFactoryFinder(ResourceResolver.FACTORY_PATH);
ResourceResolver rr = ResolverHelper.resolveService(context, finder, scheme, ResourceResolver.class).orElse(null);
if (rr != null) {
CamelContextAware.trySetCamelContext(rr, getCamelContext());
ServiceHelper.startService(rr);
}
return rr;
}
}
| DefaultResourceLoader |
java | apache__hadoop | hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java | {
"start": 2801,
"end": 16766
} | class ____ extends RetriableCommand {
private static Logger LOG = LoggerFactory.getLogger(RetriableFileCopyCommand.class);
private boolean skipCrc = false;
private boolean directWrite = false;
private FileAction action;
/**
* Constructor, taking a description of the action.
* @param description Verbose description of the copy operation.
*/
public RetriableFileCopyCommand(String description, FileAction action) {
super(description);
this.action = action;
}
/**
* Create a RetriableFileCopyCommand.
*
* @param skipCrc Whether to skip the crc check.
* @param description A verbose description of the copy operation.
* @param action We should overwrite the target file or append new data to it.
*/
public RetriableFileCopyCommand(boolean skipCrc, String description,
FileAction action) {
this(description, action);
this.skipCrc = skipCrc;
}
/**
* Create a RetriableFileCopyCommand.
*
* @param skipCrc Whether to skip the crc check.
* @param description A verbose description of the copy operation.
* @param action We should overwrite the target file or append new data to it.
* @param directWrite Whether to write directly to the target path, avoiding a
* temporary file rename.
*/
public RetriableFileCopyCommand(boolean skipCrc, String description,
FileAction action, boolean directWrite) {
this(skipCrc, description, action);
this.directWrite = directWrite;
}
/**
* Implementation of RetriableCommand::doExecute().
* This is the actual copy-implementation.
* @param arguments Argument-list to the command.
* @return Number of bytes copied.
* @throws Exception
*/
@SuppressWarnings("unchecked")
@Override
protected Object doExecute(Object... arguments) throws Exception {
assert arguments.length == 5 : "Unexpected argument list.";
CopyListingFileStatus source = (CopyListingFileStatus)arguments[0];
assert !source.isDirectory() : "Unexpected file-status. Expected file.";
Path target = (Path)arguments[1];
Mapper.Context context = (Mapper.Context)arguments[2];
EnumSet<FileAttribute> fileAttributes
= (EnumSet<FileAttribute>)arguments[3];
FileStatus sourceStatus = (FileStatus)arguments[4];
return doCopy(source, target, context, fileAttributes, sourceStatus);
}
private long doCopy(CopyListingFileStatus source, Path target,
Mapper.Context context, EnumSet<FileAttribute> fileAttributes,
FileStatus sourceStatus)
throws IOException {
LOG.info("Copying {} to {}", source.getPath(), target);
final boolean toAppend = action == FileAction.APPEND;
final boolean useTempTarget = !toAppend && !directWrite;
Path targetPath = useTempTarget ? getTempFile(target, context) : target;
LOG.info("Writing to {} target file path {}", useTempTarget ? "temporary"
: "direct", targetPath);
final Configuration configuration = context.getConfiguration();
FileSystem targetFS = target.getFileSystem(configuration);
try {
final Path sourcePath = source.getPath();
final FileSystem sourceFS = sourcePath.getFileSystem(configuration);
final FileChecksum sourceChecksum = fileAttributes
.contains(FileAttribute.CHECKSUMTYPE) ? sourceFS
.getFileChecksum(sourcePath) : null;
long offset = (action == FileAction.APPEND) ?
targetFS.getFileStatus(target).getLen() : source.getChunkOffset();
long bytesRead = copyToFile(targetPath, targetFS, source, offset, context,
fileAttributes, sourceChecksum, sourceStatus, sourceFS);
if (!source.isSplit()) {
DistCpUtils.compareFileLengthsAndChecksums(source.getLen(), sourceFS,
sourcePath, sourceChecksum, targetFS,
targetPath, skipCrc, offset + bytesRead);
}
// it's not append or direct write (preferred for s3a) case, thus we first
// write to a temporary file, then rename it to the target path.
if (useTempTarget) {
LOG.info("Renaming temporary target file path {} to {}", targetPath,
target);
promoteTmpToTarget(targetPath, target, targetFS);
}
LOG.info("Completed writing {} ({} bytes)", target, bytesRead);
return bytesRead;
} finally {
// note that for append case, it is possible that we append partial data
// and then fail. In that case, for the next retry, we either reuse the
// partial appended data if it is good or we overwrite the whole file
if (useTempTarget) {
targetFS.delete(targetPath, false);
}
}
}
/**
* @return the checksum spec of the source checksum if checksum type should be
* preserved
*/
private ChecksumOpt getChecksumOpt(EnumSet<FileAttribute> fileAttributes,
FileChecksum sourceChecksum) {
if (fileAttributes.contains(FileAttribute.CHECKSUMTYPE)
&& sourceChecksum != null) {
return sourceChecksum.getChecksumOpt();
}
return null;
}
@SuppressWarnings("checkstyle:parameternumber")
private long copyToFile(Path targetPath, FileSystem targetFS,
CopyListingFileStatus source, long sourceOffset, Mapper.Context context,
EnumSet<FileAttribute> fileAttributes, final FileChecksum sourceChecksum,
FileStatus sourceStatus,FileSystem sourceFS)
throws IOException {
FsPermission permission = FsPermission.getFileDefault().applyUMask(
FsPermission.getUMask(targetFS.getConf()));
int copyBufferSize = context.getConfiguration().getInt(
DistCpOptionSwitch.COPY_BUFFER_SIZE.getConfigLabel(),
DistCpConstants.COPY_BUFFER_SIZE_DEFAULT);
boolean preserveEC = getFileAttributeSettings(context)
.contains(DistCpOptions.FileAttribute.ERASURECODINGPOLICY);
String ecPolicyName = null;
if (preserveEC && sourceStatus.isErasureCoded()
&& checkFSSupportsEC(sourceFS, sourceStatus.getPath())
&& checkFSSupportsEC(targetFS, targetPath)) {
ecPolicyName = ((WithErasureCoding) sourceFS).getErasureCodingPolicyName(sourceStatus);
}
final OutputStream outStream;
if (action == FileAction.OVERWRITE) {
// If there is an erasure coding policy set on the target directory,
// files will be written to the target directory using the same EC policy.
// The replication factor of the source file is ignored and not preserved.
final short repl = getReplicationFactor(fileAttributes, source,
targetFS, targetPath);
final long blockSize = getBlockSize(fileAttributes, source,
targetFS, targetPath);
FSDataOutputStream out;
ChecksumOpt checksumOpt = getChecksumOpt(fileAttributes, sourceChecksum);
if (!preserveEC || ecPolicyName == null) {
out = targetFS.create(targetPath, permission,
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), copyBufferSize,
repl, blockSize, context, checksumOpt);
} else {
FSDataOutputStreamBuilder builder = targetFS.createFile(targetPath)
.permission(permission)
.overwrite(true)
.bufferSize(copyBufferSize)
.replication(repl)
.blockSize(blockSize)
.progress(context)
.recursive();
builder.opt(FS_OPTION_OPENFILE_EC_POLICY, ecPolicyName);
out = builder.build();
}
outStream = new BufferedOutputStream(out);
} else {
outStream = new BufferedOutputStream(targetFS.append(targetPath,
copyBufferSize));
}
return copyBytes(source, sourceOffset, outStream, copyBufferSize,
context);
}
//If target file exists and unable to delete target - fail
//If target doesn't exist and unable to create parent folder - fail
//If target is successfully deleted and parent exists, if rename fails - fail
private void promoteTmpToTarget(Path tmpTarget, Path target, FileSystem fs)
throws IOException {
if ((fs.exists(target) && !fs.delete(target, false))
|| (!fs.exists(target.getParent()) && !fs.mkdirs(target.getParent()))
|| !fs.rename(tmpTarget, target)) {
throw new IOException("Failed to promote tmp-file:" + tmpTarget
+ " to: " + target);
}
}
private Path getTempFile(Path target, Mapper.Context context) {
Path targetWorkPath = new Path(context.getConfiguration().
get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH));
Path root = target.equals(targetWorkPath) ? targetWorkPath.getParent()
: targetWorkPath;
Path tempFile = new Path(root, ".distcp.tmp." +
context.getTaskAttemptID().toString() +
"." + String.valueOf(System.currentTimeMillis()));
LOG.info("Creating temp file: {}", tempFile);
return tempFile;
}
@VisibleForTesting
long copyBytes(CopyListingFileStatus source2, long sourceOffset,
OutputStream outStream, int bufferSize, Mapper.Context context)
throws IOException {
Path source = source2.getPath();
byte buf[] = new byte[bufferSize];
ThrottledInputStream inStream = null;
long totalBytesRead = 0;
long chunkLength = source2.getChunkLength();
boolean finished = false;
try {
inStream = getInputStream(source, context.getConfiguration());
long fileLength = source2.getLen();
int numBytesToRead = (int) getNumBytesToRead(fileLength, sourceOffset,
bufferSize);
seekIfRequired(inStream, sourceOffset);
int bytesRead = readBytes(inStream, buf, numBytesToRead);
while (bytesRead > 0) {
if (chunkLength > 0 &&
(totalBytesRead + bytesRead) >= chunkLength) {
bytesRead = (int)(chunkLength - totalBytesRead);
finished = true;
}
totalBytesRead += bytesRead;
sourceOffset += bytesRead;
outStream.write(buf, 0, bytesRead);
updateContextStatus(totalBytesRead, context, source2);
if (finished) {
break;
}
numBytesToRead = (int) getNumBytesToRead(fileLength, sourceOffset,
bufferSize);
bytesRead = readBytes(inStream, buf, numBytesToRead);
}
outStream.close();
outStream = null;
} finally {
IOUtils.cleanupWithLogger(LOG, outStream, inStream);
}
return totalBytesRead;
}
@VisibleForTesting
long getNumBytesToRead(long fileLength, long position, long bufLength) {
if (position + bufLength < fileLength) {
return bufLength;
} else {
return fileLength - position;
}
}
private void updateContextStatus(long totalBytesRead, Mapper.Context context,
CopyListingFileStatus source2) {
StringBuilder message = new StringBuilder(DistCpUtils.getFormatter()
.format(totalBytesRead * 100.0f / source2.getLen()));
message.append("% ")
.append(description).append(" [")
.append(DistCpUtils.getStringDescriptionFor(totalBytesRead))
.append('/')
.append(DistCpUtils.getStringDescriptionFor(source2.getLen()))
.append(']');
context.setStatus(message.toString());
}
private static int readBytes(ThrottledInputStream inStream, byte[] buf,
int numBytes)
throws IOException {
try {
return inStream.read(buf, 0, numBytes);
} catch (IOException e) {
throw new CopyReadException(e);
}
}
private static void seekIfRequired(ThrottledInputStream inStream,
long sourceOffset) throws IOException {
try {
if (sourceOffset != inStream.getPos()) {
inStream.seek(sourceOffset);
}
} catch (IOException e) {
throw new CopyReadException(e);
}
}
private static ThrottledInputStream getInputStream(Path path,
Configuration conf) throws IOException {
try {
FileSystem fs = path.getFileSystem(conf);
float bandwidthMB = conf.getFloat(DistCpConstants.CONF_LABEL_BANDWIDTH_MB,
DistCpConstants.DEFAULT_BANDWIDTH_MB);
// open with sequential read, but not whole-file
FSDataInputStream in = awaitFuture(fs.openFile(path)
.opt(FS_OPTION_OPENFILE_READ_POLICY,
FS_OPTION_OPENFILE_READ_POLICY_SEQUENTIAL)
.build());
return new ThrottledInputStream(in, bandwidthMB * 1024 * 1024);
}
catch (IOException e) {
throw new CopyReadException(e);
}
}
private static short getReplicationFactor(
EnumSet<FileAttribute> fileAttributes, CopyListingFileStatus source,
FileSystem targetFS, Path tmpTargetPath) {
if (source.isErasureCoded()) {
return targetFS.getDefaultReplication(tmpTargetPath);
}
return fileAttributes.contains(FileAttribute.REPLICATION)
? source.getReplication()
: targetFS.getDefaultReplication(tmpTargetPath);
}
/**
* @return the block size of the source file if we need to preserve either
* the block size or the checksum type. Otherwise the default block
* size of the target FS.
*/
private static long getBlockSize(
EnumSet<FileAttribute> fileAttributes, CopyListingFileStatus source,
FileSystem targetFS, Path tmpTargetPath) {
boolean preserve = fileAttributes.contains(FileAttribute.BLOCKSIZE)
|| fileAttributes.contains(FileAttribute.CHECKSUMTYPE);
return preserve ? source.getBlockSize() : targetFS
.getDefaultBlockSize(tmpTargetPath);
}
/**
* Special subclass of IOException. This is used to distinguish read-operation
* failures from other kinds of IOExceptions.
* The failure to read from source is dealt with specially, in the CopyMapper.
* Such failures may be skipped if the DistCpOptions indicate so.
* Write failures are intolerable, and amount to CopyMapper failure.
*/
@SuppressWarnings("serial")
public static | RetriableFileCopyCommand |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/ChannelTest.java | {
"start": 1095,
"end": 2174
} | class ____ extends ContextTestSupport {
private static int counter;
@Test
public void testChannel() throws Exception {
counter = 0;
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(2);
template.sendBody("direct:start", "Hello World");
template.sendBody("direct:start", "Bye World");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
errorHandler(deadLetterChannel("mock:dead").maximumRedeliveries(2).redeliveryDelay(0).logStackTrace(false));
from("direct:start").process(new Processor() {
public void process(Exchange exchange) {
if (counter++ < 1) {
throw new IllegalArgumentException("Damn");
}
}
}).delay(10).to("mock:result");
}
};
}
}
| ChannelTest |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/date/DateFieldTest3.java | {
"start": 429,
"end": 3995
} | class ____ extends TestCase {
protected void setUp() throws Exception {
JSON.defaultTimeZone = TimeZone.getTimeZone("Asia/Shanghai");
JSON.defaultLocale = Locale.CHINA;
}
public void test_codec() throws Exception {
SerializeConfig mapping = new SerializeConfig();
mapping.put(Date.class, new SimpleDateFormatSerializer("yyyy-MM-dd"));
V0 v = new V0();
v.setValue(new Date());
String text = JSON.toJSONString(v, mapping);
SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd", JSON.defaultLocale);
format.setTimeZone(JSON.defaultTimeZone);
Assert.assertEquals("{\"value\":" + JSON.toJSONString(format.format(v.getValue())) + "}", text);
}
public void test_codec_no_asm() throws Exception {
V0 v = new V0();
v.setValue(new Date());
SerializeConfig mapping = new SerializeConfig();
mapping.put(Date.class, new SimpleDateFormatSerializer("yyyy-MM-dd"));
mapping.setAsmEnable(false);
String text = JSON.toJSONString(v, mapping, SerializerFeature.WriteMapNullValue);
SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd", JSON.defaultLocale);
format.setTimeZone(JSON.defaultTimeZone);
Assert.assertEquals("{\"value\":" + JSON.toJSONString(format.format(v.getValue())) + "}", text);
}
public void test_codec_asm() throws Exception {
V0 v = new V0();
v.setValue(new Date());
SerializeConfig mapping = new SerializeConfig();
mapping.put(Date.class, new SimpleDateFormatSerializer("yyyy-MM-dd"));
mapping.setAsmEnable(true);
String text = JSON.toJSONString(v, mapping, SerializerFeature.WriteMapNullValue);
SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd", JSON.defaultLocale);
format.setTimeZone(JSON.defaultTimeZone);
Assert.assertEquals("{\"value\":" + JSON.toJSONString(format.format(v.getValue())) + "}", text);
}
public void test_codec_null_asm() throws Exception {
V0 v = new V0();
SerializeConfig mapping = new SerializeConfig();
mapping.setAsmEnable(true);
String text = JSON.toJSONString(v, mapping, SerializerFeature.WriteMapNullValue);
mapping.put(Date.class, new SimpleDateFormatSerializer("yyyy-MM-dd"));
Assert.assertEquals("{\"value\":null}", text);
V0 v1 = JSON.parseObject(text, V0.class);
Assert.assertEquals(v1.getValue(), v.getValue());
}
public void test_codec_null_no_asm() throws Exception {
V0 v = new V0();
SerializeConfig mapping = new SerializeConfig();
mapping.put(Date.class, new SimpleDateFormatSerializer("yyyy-MM-dd"));
mapping.setAsmEnable(false);
String text = JSON.toJSONString(v, mapping, SerializerFeature.WriteMapNullValue);
Assert.assertEquals("{\"value\":null}", text);
V0 v1 = JSON.parseObject(text, V0.class);
Assert.assertEquals(v1.getValue(), v.getValue());
}
public void test_codec_null_1() throws Exception {
V0 v = new V0();
SerializeConfig mapping = new SerializeConfig();
mapping.setAsmEnable(false);
String text = JSON.toJSONString(v, mapping, SerializerFeature.WriteMapNullValue, SerializerFeature.WriteNullNumberAsZero);
Assert.assertEquals("{\"value\":null}", text);
V0 v1 = JSON.parseObject(text, V0.class);
Assert.assertEquals(null, v1.getValue());
}
public static | DateFieldTest3 |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequestBuilder.java | {
"start": 612,
"end": 2223
} | class ____ extends ActionRequestBuilder<CreateTokenRequest, CreateTokenResponse> {
public CreateTokenRequestBuilder(ElasticsearchClient client) {
super(client, CreateTokenAction.INSTANCE, new CreateTokenRequest());
}
/**
* Specifies the grant type for this request. Currently only <code>password</code> is supported
*/
public CreateTokenRequestBuilder setGrantType(String grantType) {
request.setGrantType(grantType);
return this;
}
/**
* Set the username to be used for authentication with a password grant
*/
public CreateTokenRequestBuilder setUsername(@Nullable String username) {
request.setUsername(username);
return this;
}
/**
* Set the password credentials associated with the user. These credentials will be used for
* authentication and the resulting token will be for this user
*/
public CreateTokenRequestBuilder setPassword(@Nullable SecureString password) {
request.setPassword(password);
return this;
}
/**
* Set the scope of the access token. A <code>null</code> scope implies the default scope. If
* the requested scope differs from the scope of the token, the token's scope will be returned
* in the response
*/
public CreateTokenRequestBuilder setScope(@Nullable String scope) {
request.setScope(scope);
return this;
}
public CreateTokenRequestBuilder setRefreshToken(@Nullable String refreshToken) {
request.setRefreshToken(refreshToken);
return this;
}
}
| CreateTokenRequestBuilder |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/sql/exec/ExecutionException.java | {
"start": 256,
"end": 520
} | class ____ extends HibernateError {
public ExecutionException(String message) {
this( message, null );
}
public ExecutionException(String message, Throwable cause) {
super( "A problem occurred in the SQL executor : " + message, cause );
}
}
| ExecutionException |
java | google__guava | android/guava/src/com/google/common/collect/MapMakerInternalMap.java | {
"start": 31665,
"end": 33993
} | class ____<K, V>
implements InternalEntryHelper<
K, V, WeakKeyWeakValueEntry<K, V>, WeakKeyWeakValueSegment<K, V>> {
private static final Helper<?, ?> INSTANCE = new Helper<>();
@SuppressWarnings("unchecked")
static <K, V> Helper<K, V> instance() {
return (Helper<K, V>) INSTANCE;
}
@Override
public Strength keyStrength() {
return Strength.WEAK;
}
@Override
public Strength valueStrength() {
return Strength.WEAK;
}
@Override
public WeakKeyWeakValueSegment<K, V> newSegment(
MapMakerInternalMap<K, V, WeakKeyWeakValueEntry<K, V>, WeakKeyWeakValueSegment<K, V>> map,
int initialCapacity) {
return new WeakKeyWeakValueSegment<>(map, initialCapacity);
}
@Override
public @Nullable WeakKeyWeakValueEntry<K, V> copy(
WeakKeyWeakValueSegment<K, V> segment,
WeakKeyWeakValueEntry<K, V> entry,
@Nullable WeakKeyWeakValueEntry<K, V> newNext) {
K key = entry.getKey();
if (key == null) {
// key collected
return null;
}
if (Segment.isCollected(entry)) {
return null;
}
WeakKeyWeakValueEntry<K, V> newEntry = newEntry(segment, key, entry.hash, newNext);
newEntry.valueReference = entry.valueReference.copyFor(segment.queueForValues, newEntry);
return newEntry;
}
@Override
public void setValue(
WeakKeyWeakValueSegment<K, V> segment, WeakKeyWeakValueEntry<K, V> entry, V value) {
WeakValueReference<K, V, WeakKeyWeakValueEntry<K, V>> previous = entry.valueReference;
entry.valueReference = new WeakValueReferenceImpl<>(segment.queueForValues, value, entry);
previous.clear();
}
@Override
public WeakKeyWeakValueEntry<K, V> newEntry(
WeakKeyWeakValueSegment<K, V> segment,
K key,
int hash,
@Nullable WeakKeyWeakValueEntry<K, V> next) {
return next == null
? new WeakKeyWeakValueEntry<>(segment.queueForKeys, key, hash)
: new LinkedWeakKeyWeakValueEntry<>(segment.queueForKeys, key, hash, next);
}
}
}
/** A weakly referenced value that also has a reference to its containing entry. */
| Helper |
java | hibernate__hibernate-orm | hibernate-vector/src/main/java/org/hibernate/vector/internal/SQLServerTypeContributor.java | {
"start": 1153,
"end": 4539
} | class ____ implements TypeContributor {
@Override
public void contribute(TypeContributions typeContributions, ServiceRegistry serviceRegistry) {
final Dialect dialect = serviceRegistry.requireService( JdbcServices.class ).getDialect();
if ( dialect instanceof SQLServerDialect && dialect.getVersion().isSameOrAfter( 17 ) ) {
final boolean supportsDriverType = supportsDriverType( serviceRegistry );
final String vectorJdbcType = supportsDriverType ? "org.hibernate.vector.internal.SQLServerVectorJdbcType"
: "org.hibernate.vector.internal.SQLServerCastingVectorJdbcType";
final TypeConfiguration typeConfiguration = typeContributions.getTypeConfiguration();
final JavaTypeRegistry javaTypeRegistry = typeConfiguration.getJavaTypeRegistry();
final JdbcTypeRegistry jdbcTypeRegistry = typeConfiguration.getJdbcTypeRegistry();
final BasicTypeRegistry basicTypeRegistry = typeConfiguration.getBasicTypeRegistry();
final BasicType<Float> floatBasicType = basicTypeRegistry.resolve( StandardBasicTypes.FLOAT );
final JdbcType floatJdbcType = jdbcTypeRegistry.getDescriptor( SqlTypes.FLOAT );
final ArrayJdbcType genericVectorJdbcType = create(
serviceRegistry,
vectorJdbcType,
floatJdbcType,
SqlTypes.VECTOR
);
jdbcTypeRegistry.addDescriptor( SqlTypes.VECTOR, genericVectorJdbcType );
final ArrayJdbcType floatVectorJdbcType = create(
serviceRegistry,
vectorJdbcType,
floatJdbcType,
SqlTypes.VECTOR_FLOAT32
);
jdbcTypeRegistry.addDescriptor( SqlTypes.VECTOR_FLOAT32, floatVectorJdbcType );
basicTypeRegistry.register(
new BasicArrayType<>(
floatBasicType,
genericVectorJdbcType,
javaTypeRegistry.getDescriptor( float[].class )
),
StandardBasicTypes.VECTOR.getName()
);
basicTypeRegistry.register(
new BasicArrayType<>(
basicTypeRegistry.resolve( StandardBasicTypes.FLOAT ),
floatVectorJdbcType,
javaTypeRegistry.getDescriptor( float[].class )
),
StandardBasicTypes.VECTOR_FLOAT32.getName()
);
typeConfiguration.getDdlTypeRegistry().addDescriptor(
new VectorDdlType( SqlTypes.VECTOR, "vector($l)", "vector", dialect )
);
typeConfiguration.getDdlTypeRegistry().addDescriptor(
new VectorDdlType( SqlTypes.VECTOR_FLOAT32, "vector($l)", "vector", dialect )
);
}
}
private static boolean supportsDriverType(ServiceRegistry serviceRegistry) {
final ClassLoaderService classLoaderService = serviceRegistry.requireService( ClassLoaderService.class );
try {
classLoaderService.classForName( "microsoft.sql.Vector" );
return true;
}
catch (ClassLoadingException ex) {
return false;
}
}
private static <X> X create(ServiceRegistry serviceRegistry, String className, JdbcType elementType, int sqlType) {
final ClassLoaderService classLoaderService = serviceRegistry.requireService( ClassLoaderService.class );
try {
return classLoaderService.<X>classForName( className )
.getConstructor( JdbcType.class, int.class )
.newInstance( elementType, sqlType );
}
catch (NoSuchMethodException e) {
throw new HibernateError( "Class does not have an empty constructor", e );
}
catch (InstantiationException | IllegalAccessException | InvocationTargetException e) {
throw new HibernateError( "Could not construct JdbcType", e );
}
}
}
| SQLServerTypeContributor |
java | apache__kafka | metadata/src/main/java/org/apache/kafka/image/node/TopicImageNode.java | {
"start": 1009,
"end": 2238
} | class ____ implements MetadataNode {
/**
* The topic image.
*/
private final TopicImage image;
public TopicImageNode(TopicImage image) {
this.image = image;
}
@Override
public Collection<String> childNames() {
ArrayList<String> childNames = new ArrayList<>();
childNames.add("name");
childNames.add("id");
for (Integer partitionId : image.partitions().keySet()) {
childNames.add(partitionId.toString());
}
return childNames;
}
@Override
public MetadataNode child(String name) {
if (name.equals("name")) {
return new MetadataLeafNode(image.name());
} else if (name.equals("id")) {
return new MetadataLeafNode(image.id().toString());
} else {
int partitionId;
try {
partitionId = Integer.parseInt(name);
} catch (NumberFormatException e) {
return null;
}
PartitionRegistration registration = image.partitions().get(partitionId);
if (registration == null) return null;
return new MetadataLeafNode(registration.toString());
}
}
}
| TopicImageNode |
java | apache__camel | components/camel-github/src/test/java/org/apache/camel/component/github/services/MockPullRequestService.java | {
"start": 1384,
"end": 4996
} | class ____ extends PullRequestService {
protected static final Logger LOG = LoggerFactory.getLogger(MockPullRequestService.class);
private Map<Long, PullRequest> pullRequests = new HashMap<>();
private List<CommitComment> emptyComments = new ArrayList<>();
private AtomicInteger pullRequestNumber = new AtomicInteger(101);
private AtomicInteger commentId = new AtomicInteger(500);
private Map<Long, List<CommitComment>> allComments = new HashMap<>();
private Map<Integer, List<CommitFile>> files = new HashMap<>();
@Override
public List<CommitComment> getComments(IRepositoryIdProvider repository, int pullRequestId) {
Long id = Long.valueOf(pullRequestId);
if (allComments.containsKey(id)) {
List<CommitComment> comments = allComments.get(id);
return comments;
} else {
return emptyComments;
}
}
private User createAuthor() {
User author = new User();
author.setEmail("someguy@gmail.com");
author.setHtmlUrl("http://github/someguy");
author.setLogin("someguy");
return author;
}
public CommitComment addComment(Long pullRequestId, String bodyText) {
CommitComment commitComment = new CommitComment();
User author = createAuthor();
commitComment.setUser(author);
commitComment.setCommitId(Long.toString(pullRequestId));
commitComment.setId(commentId.getAndIncrement());
commitComment.setBody(bodyText);
commitComment.setBodyText(bodyText);
List<CommitComment> comments;
if (allComments.containsKey(pullRequestId)) {
comments = allComments.get(pullRequestId);
} else {
comments = new ArrayList<>();
}
comments.add(commitComment);
allComments.put(pullRequestId, comments);
return commitComment;
}
public PullRequest addPullRequest(String title) {
User author = createAuthor();
PullRequest pullRequest = new PullRequest();
pullRequest.setUser(author);
pullRequest.setHtmlUrl("https://github.com/someguy/somerepo/pull" + pullRequestNumber);
pullRequest.setTitle(title);
pullRequest.setNumber(pullRequestNumber.get());
pullRequest.setId(pullRequestNumber.get());
pullRequest.setState("open");
pullRequests.put(pullRequest.getId(), pullRequest);
pullRequestNumber.incrementAndGet();
return pullRequest;
}
@Override
public PullRequest getPullRequest(IRepositoryIdProvider repository, int id) {
PullRequest pullRequest = pullRequests.get((long) id);
return pullRequest;
}
@Override
public PullRequest editPullRequest(IRepositoryIdProvider repository, PullRequest request) {
pullRequests.put(request.getId(), request);
return request;
}
@Override
public synchronized List<PullRequest> getPullRequests(IRepositoryIdProvider repository, String state) {
List<PullRequest> result = new ArrayList<>();
for (PullRequest pr : pullRequests.values()) {
if (pr.getState().equals(state)) {
result.add(pr);
}
}
LOG.debug("Returning list of {} pull requests with state {}", result.size(), state);
return result;
}
public void setFiles(int id, List<CommitFile> commitFiles) {
files.put(id, commitFiles);
}
@Override
public List<CommitFile> getFiles(IRepositoryIdProvider repository, int id) {
return files.get(id);
}
}
| MockPullRequestService |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.