language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
apache__maven
|
its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng2843PluginConfigPropertiesInjectionTest.java
|
{
"start": 1134,
"end": 2317
}
|
class ____ extends AbstractMavenIntegrationTestCase {
/**
* Test that plugins can have the project properties injected via ${project.properties}.
*
* @throws Exception in case of failure
*/
@Test
public void testitMNG2843() throws Exception {
File testDir = extractResources("/mng-2843");
// First, build the test plugin
Verifier verifier = newVerifier(new File(testDir, "maven-it-plugin-uses-properties").getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
verifier.addCliArgument("install");
verifier.execute();
verifier.verifyErrorFreeLog();
// Then, run the test project that uses the plugin
verifier = newVerifier(testDir.getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
verifier.addCliArgument("validate");
verifier.execute();
verifier.verifyErrorFreeLog();
Properties props = verifier.loadProperties("target/project.properties");
assertEquals("PASSED", props.getProperty("key"));
}
}
|
MavenITmng2843PluginConfigPropertiesInjectionTest
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/mysql/param/MySqlParameterizedOutputVisitorTest_13.java
|
{
"start": 246,
"end": 785
}
|
class ____ extends MySQLParameterizedTest {
public void test_for_parameterize() throws Exception {
final DbType dbType = JdbcConstants.MYSQL;
String sql = "select * from `user_0000` `user`";
String psql = ParameterizedOutputVisitorUtils.parameterize(sql, dbType);
String expected = "SELECT *\n" +
"FROM user `user`";
assertEquals(expected, psql);
paramaterizeAST(sql, "SELECT *\n" +
"FROM `user_0000` `user`");
}
}
|
MySqlParameterizedOutputVisitorTest_13
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/persister/entity/AbstractEntityPersister.java
|
{
"start": 162464,
"end": 163032
}
|
class ____ implements CacheEntryHelper {
private final EntityPersister persister;
private StandardCacheEntryHelper(EntityPersister persister) {
this.persister = persister;
}
@Override
public CacheEntryStructure getCacheEntryStructure() {
return UnstructuredCacheEntry.INSTANCE;
}
@Override
public CacheEntry buildCacheEntry(Object entity, Object[] state, Object version, SharedSessionContractImplementor session) {
return new StandardCacheEntryImpl( state, persister, version, session, entity );
}
}
private static
|
StandardCacheEntryHelper
|
java
|
elastic__elasticsearch
|
build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/header/InjectHeaderTests.java
|
{
"start": 1005,
"end": 4100
}
|
class ____ extends InjectFeatureTests {
private static final Map<String, String> headers = Map.of(
"Content-Type",
"application/vnd.elasticsearch+json;compatible-with=8",
"Accept",
"application/vnd.elasticsearch+json;compatible-with=8"
);
/**
* test file does not any headers defined
*/
@Test
public void testInjectHeadersNoPreExisting() throws Exception {
String testName = "/rest/transform/header/without_existing_headers.yml";
List<ObjectNode> tests = getTests(testName);
validateSetupDoesNotExist(tests);
List<ObjectNode> transformedTests = transformTests(tests);
printTest(testName, transformedTests);
validateSetupAndTearDown(transformedTests);
validateBodyHasHeaders(transformedTests, headers);
}
/**
* test file has preexisting headers
*/
@Test
public void testInjectHeadersWithPreExisting() throws Exception {
String testName = "/rest/transform/header/with_existing_headers.yml";
List<ObjectNode> tests = getTests(testName);
validateSetupExist(tests);
validateBodyHasHeaders(tests, Map.of("foo", "bar"));
List<ObjectNode> transformedTests = transformTests(tests);
printTest(testName, transformedTests);
validateSetupAndTearDown(transformedTests);
validateBodyHasHeaders(tests, Map.of("foo", "bar"));
validateBodyHasHeaders(transformedTests, headers);
}
@Test
public void testNotInjectingHeaders() throws Exception {
String testName = "/rest/transform/header/with_operation_to_skip_adding_headers.yml";
List<ObjectNode> tests = getTests(testName);
validateSetupExist(tests);
validateBodyHasHeaders(tests, Map.of("foo", "bar"));
List<RestTestTransform<?>> transformations = Collections.singletonList(
new InjectHeaders(headers, Set.of(InjectHeaderTests::applyCondition))
);
List<ObjectNode> transformedTests = transformTests(tests, transformations);
printTest(testName, transformedTests);
validateSetupAndTearDown(transformedTests);
validateBodyHasHeaders(tests, Map.of("foo", "bar"));
validateBodyHasHeaders(transformedTests, Map.of("foo", "bar"));
}
private static boolean applyCondition(ObjectNode doNodeValue) {
final Iterator<String> fieldNamesIterator = doNodeValue.fieldNames();
while (fieldNamesIterator.hasNext()) {
final String fieldName = fieldNamesIterator.next();
if (fieldName.startsWith("something_to_skip")) {
return false;
}
}
return true;
}
@Override
protected List<String> getKnownFeatures() {
return Collections.singletonList("headers");
}
@Override
protected List<RestTestTransform<?>> getTransformations() {
return Collections.singletonList(new InjectHeaders(headers, Collections.emptySet()));
}
@Override
protected boolean getHumanDebug() {
return false;
}
}
|
InjectHeaderTests
|
java
|
elastic__elasticsearch
|
x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/email/EmailAction.java
|
{
"start": 1523,
"end": 8579
}
|
class ____ implements Action {
public static final String TYPE = "email";
private final EmailTemplate email;
@Nullable
private final String account;
@Nullable
private final Authentication auth;
@Nullable
private final Profile profile;
@Nullable
private final DataAttachment dataAttachment;
@Nullable
private final EmailAttachments emailAttachments;
public EmailAction(
EmailTemplate email,
@Nullable String account,
@Nullable Authentication auth,
@Nullable Profile profile,
@Nullable DataAttachment dataAttachment,
@Nullable EmailAttachments emailAttachments
) {
this.email = email;
this.account = account;
this.auth = auth;
this.profile = profile;
this.dataAttachment = dataAttachment;
this.emailAttachments = emailAttachments;
}
public EmailTemplate getEmail() {
return email;
}
public String getAccount() {
return account;
}
public Authentication getAuth() {
return auth;
}
public Profile getProfile() {
return profile;
}
public DataAttachment getDataAttachment() {
return dataAttachment;
}
public EmailAttachments getAttachments() {
return emailAttachments;
}
@Override
public String type() {
return TYPE;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
EmailAction action = (EmailAction) o;
return Objects.equals(email, action.email)
&& Objects.equals(account, action.account)
&& Objects.equals(auth, action.auth)
&& Objects.equals(profile, action.profile)
&& Objects.equals(emailAttachments, action.emailAttachments)
&& Objects.equals(dataAttachment, action.dataAttachment);
}
@Override
public int hashCode() {
return Objects.hash(email, account, auth, profile, dataAttachment, emailAttachments);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
if (account != null) {
builder.field(Field.ACCOUNT.getPreferredName(), account);
}
if (auth != null) {
builder.field(Field.USER.getPreferredName(), auth.user());
if (WatcherParams.hideSecrets(params) && auth.password().value().startsWith(CryptoService.ENCRYPTED_TEXT_PREFIX) == false) {
builder.field(Field.PASSWORD.getPreferredName(), WatcherXContentParser.REDACTED_PASSWORD);
} else {
builder.field(Field.PASSWORD.getPreferredName(), auth.password().value());
}
}
if (profile != null) {
builder.field(Field.PROFILE.getPreferredName(), profile.name().toLowerCase(Locale.ROOT));
}
if (dataAttachment != null) {
builder.field(Field.ATTACH_DATA.getPreferredName(), dataAttachment, params);
}
if (emailAttachments != null) {
emailAttachments.toXContent(builder, params);
}
email.xContentBody(builder, params);
return builder.endObject();
}
public static EmailAction parse(String watchId, String actionId, XContentParser parser, EmailAttachmentsParser emailAttachmentsParser)
throws IOException {
EmailTemplate.Parser emailParser = new EmailTemplate.Parser();
String account = null;
String user = null;
Secret password = null;
Profile profile = Profile.STANDARD;
DataAttachment dataAttachment = null;
EmailAttachments attachments = EmailAttachments.EMPTY_ATTACHMENTS;
String currentFieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (Field.ATTACH_DATA.match(currentFieldName, parser.getDeprecationHandler())) {
try {
dataAttachment = DataAttachment.parse(parser);
} catch (IOException ioe) {
throw new ElasticsearchParseException(
"could not parse [{}] action [{}/{}]. failed to parse data attachment field " + "[{}]",
ioe,
TYPE,
watchId,
actionId,
currentFieldName
);
}
} else if (Field.ATTACHMENTS.match(currentFieldName, parser.getDeprecationHandler())) {
attachments = emailAttachmentsParser.parse(parser);
} else if (emailParser.handle(currentFieldName, parser) == false) {
if (token == XContentParser.Token.VALUE_STRING) {
if (Field.ACCOUNT.match(currentFieldName, parser.getDeprecationHandler())) {
account = parser.text();
} else if (Field.USER.match(currentFieldName, parser.getDeprecationHandler())) {
user = parser.text();
} else if (Field.PASSWORD.match(currentFieldName, parser.getDeprecationHandler())) {
password = WatcherXContentParser.secretOrNull(parser);
} else if (Field.PROFILE.match(currentFieldName, parser.getDeprecationHandler())) {
try {
profile = Profile.resolve(parser.text());
} catch (IllegalArgumentException iae) {
throw new ElasticsearchParseException("could not parse [{}] action [{}/{}]", TYPE, watchId, actionId, iae);
}
} else {
throw new ElasticsearchParseException(
"could not parse [{}] action [{}/{}]. unexpected string field [{}]",
TYPE,
watchId,
actionId,
currentFieldName
);
}
} else {
throw new ElasticsearchParseException(
"could not parse [{}] action [{}/{}]. unexpected token [{}]",
TYPE,
watchId,
actionId,
token
);
}
}
}
Authentication auth = null;
if (user != null) {
auth = new Authentication(user, password);
}
return new EmailAction(emailParser.parsedTemplate(), account, auth, profile, dataAttachment, attachments);
}
public static Builder builder(EmailTemplate email) {
return new Builder(email);
}
public abstract static
|
EmailAction
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
|
{
"start": 1542,
"end": 1997
}
|
class ____ a cluster of computer with a tree hierarchical
* network topology.
* For example, a cluster may be consists of many data centers filled
* with racks of computers.
* In a network topology, leaves represent data nodes (computers) and inner
* nodes represent switches/routers that manage traffic in/out of data centers
* or racks.
*
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public
|
represents
|
java
|
quarkusio__quarkus
|
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/interceptors/arcInvContext/ArcContextLifecycleInterceptorPrivate.java
|
{
"start": 367,
"end": 1310
}
|
class ____ {
static boolean PRE_DESTROY_INVOKED = false;
static boolean POST_CONSTRUCT_INVOKED = false;
@PostConstruct
private Object postConstruct(ArcInvocationContext ctx) throws Exception {
// just to test that bindings are accessible
Set<Annotation> bindings = ctx.getInterceptorBindings();
if (bindings == null) {
throw new IllegalArgumentException("No bindings found");
}
POST_CONSTRUCT_INVOKED = true;
return ctx.proceed();
}
@PreDestroy
private Object preDestroy(ArcInvocationContext ctx) throws Exception {
// just to test that bindings are accessible
Set<Annotation> bindings = ctx.getInterceptorBindings();
if (bindings == null) {
throw new IllegalArgumentException("No bindings found");
}
PRE_DESTROY_INVOKED = true;
return ctx.proceed();
}
}
|
ArcContextLifecycleInterceptorPrivate
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/deser/NullHandlingDeserTest.java
|
{
"start": 1006,
"end": 1347
}
|
class ____{
private Map<String,String> any = new HashMap<String,String>();
@JsonAnySetter
public void setAny(String name, String value){
this.any.put(name,value);
}
public Map<String,String> getAny(){
return this.any;
}
}
// [databind#1601]
static
|
AnySetter
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Syncable.java
|
{
"start": 1007,
"end": 1215
}
|
interface ____ flush/sync operations.
* Consult the Hadoop filesystem specification for the definition of the
* semantics of these operations.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public
|
for
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSafeMode.java
|
{
"start": 1166,
"end": 1196
}
|
interface ____ HDFS.
*/
public
|
on
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/id/sequence/SequenceGeneratorAndAutoFlushTest.java
|
{
"start": 1253,
"end": 3418
}
|
class ____ {
@AfterEach
public void tearDown(EntityManagerFactoryScope scope){
scope.getEntityManagerFactory().getSchemaManager().truncate();
}
@Test
public void testCriteriaAutoFlush(EntityManagerFactoryScope scope) {
scope.inTransaction(
entityManager -> {
final Account account = new Account( 1l );
entityManager.persist( account );
final Person person = new Person( account );
account.addChild( person );
CriteriaBuilder cb = entityManager.getCriteriaBuilder();
CriteriaQuery<Long> cq = cb.createQuery( Long.class );
Root<Ticket> ticket = cq.from( Ticket.class );
cq.select( cb.count( ticket ) ).where( ticket.get( "owner" ).in( List.of( person ) ) );
entityManager.createQuery( cq ).getSingleResult();
final Person person2 = new Person( account );
account.addChild( person2 );
cb = entityManager.getCriteriaBuilder();
cq = cb.createQuery( Long.class );
ticket = cq.from( Ticket.class );
cq.select( cb.count( ticket ) ).where( ticket.get( "owner" ).in( List.of( person2 ) ) );
entityManager.createQuery( cq ).getSingleResult();
}
);
}
@Test
public void testScrollAutoFlush(EntityManagerFactoryScope scope) {
scope.inTransaction(
entityManager -> {
final Account account = new Account( 1l );
entityManager.persist( account );
final Person person = new Person( account );
account.addChild( person );
(entityManager.unwrap( Session.class )).createQuery( "select t from Ticket t where t.owner in (:owners)" ).setParameter( "owners", List.of( person ) ).scroll();
}
);
}
@Test
public void testStreamAutoFlush(EntityManagerFactoryScope scope) {
scope.inTransaction(
entityManager -> {
final Account account = new Account( 1l );
entityManager.persist( account );
final Person person = new Person( account );
account.addChild( person );
(entityManager.unwrap( Session.class )).createQuery( "select t from Ticket t where t.owner in (:owners)" ).setParameter( "owners", List.of( person ) ).stream();
}
);
}
@Entity(name = "Account")
public static
|
SequenceGeneratorAndAutoFlushTest
|
java
|
mockito__mockito
|
mockito-extensions/mockito-errorprone/src/test/java/org/mockito/errorprone/bugpatterns/MockitoInternalUsageTest.java
|
{
"start": 2601,
"end": 2814
}
|
class ____<T extends org.mockito.internal.stubbing.InvocationContainerImpl>"
+ " {}",
" // BUG: Diagnostic contains:",
"
|
ExtendsGeneric
|
java
|
google__dagger
|
hilt-compiler/main/java/dagger/hilt/android/processor/internal/androidentrypoint/InjectorEntryPointGenerator.java
|
{
"start": 1621,
"end": 2943
}
|
interface ____ {
// void injectFoo(FooActivity foo);
// }
public void generate() throws IOException {
ClassName name = metadata.injectorClassName();
TypeSpec.Builder builder =
TypeSpec.interfaceBuilder(name.simpleName())
.addAnnotation(Processors.getOriginatingElementAnnotation(metadata.element()))
.addAnnotation(ClassNames.GENERATED_ENTRY_POINT)
.addAnnotation(metadata.injectorInstallInAnnotation())
.addModifiers(Modifier.PUBLIC)
.addMethod(
MethodSpec.methodBuilder(metadata.injectMethodName())
.addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT)
.addParameter(
metadata.elementClassName(),
Processors.upperToLowerCamel(metadata.elementClassName().simpleName()))
.build());
JavaPoetExtKt.addOriginatingElement(builder, metadata.element());
Processors.addGeneratedAnnotation(builder, env, getClass());
Generators.copyLintAnnotations(metadata.element(), builder);
Generators.copySuppressAnnotations(metadata.element(), builder);
env.getFiler()
.write(
JavaFile.builder(name.packageName(), builder.build()).build(), XFiler.Mode.Isolating);
}
}
|
FooActivity_GeneratedInjector
|
java
|
junit-team__junit5
|
junit-jupiter-params/src/main/java/org/junit/jupiter/params/provider/FieldSource.java
|
{
"start": 7073,
"end": 7621
}
|
class ____.
*
* @since 5.11
* @see MethodSource
* @see Arguments
* @see ArgumentsSource
* @see org.junit.jupiter.params.ParameterizedClass
* @see org.junit.jupiter.params.ParameterizedTest
* @see org.junit.jupiter.api.TestInstance
*/
@Target({ ElementType.ANNOTATION_TYPE, ElementType.METHOD, ElementType.TYPE })
@Retention(RetentionPolicy.RUNTIME)
@Documented
@Inherited
@Repeatable(FieldSources.class)
@API(status = MAINTAINED, since = "5.13.3")
@ArgumentsSource(FieldArgumentsProvider.class)
@SuppressWarnings("exports")
public @
|
hierarchies
|
java
|
apache__camel
|
core/camel-api/src/main/java/org/apache/camel/CamelException.java
|
{
"start": 842,
"end": 932
}
|
class ____ all Camel checked exceptions typically thrown by a {@link Processor}
*/
public
|
for
|
java
|
quarkusio__quarkus
|
extensions/vertx-http/deployment/src/test/java/io/quarkus/vertx/http/tls/letsencrypt/LetsEncryptFlowWithTlsConfigurationNameTest.java
|
{
"start": 1075,
"end": 4362
}
|
class ____ extends LetsEncryptFlowTestBase {
public static final File temp = new File("target/acme-certificates-" + UUID.randomUUID());
private static final String configuration = """
# Enable SSL, configure the key store using the self-signed certificate
quarkus.tls.http.key-store.pem.0.cert=%s/cert.pem
quarkus.tls.http.key-store.pem.0.key=%s/key.pem
quarkus.tls.lets-encrypt.enabled=true
quarkus.management.enabled=true
quarkus.http.insecure-requests=disabled
quarkus.http.tls-configuration-name=http
""".formatted(temp.getAbsolutePath(), temp.getAbsolutePath());
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(MyBean.class)
.addAsResource(new StringAsset((configuration)), "application.properties"))
.overrideRuntimeConfigKey("loc", temp.getAbsolutePath())
.setBeforeAllCustomizer(() -> {
try {
// Prepare a random directory to store the certificates.
temp.mkdirs();
Files.copy(SELF_SIGNED_CERT.toPath(),
new File(temp, "cert.pem").toPath());
Files.copy(SELF_SIGNED_KEY.toPath(),
new File(temp, "key.pem").toPath());
} catch (Exception e) {
throw new RuntimeException(e);
}
})
.setAfterAllCustomizer(() -> {
try {
Files.deleteIfExists(new File(temp, "cert.pem").toPath());
Files.deleteIfExists(new File(temp, "key.pem").toPath());
Files.deleteIfExists(temp.toPath());
} catch (Exception e) {
throw new RuntimeException(e);
}
});
@Inject
Vertx vertx;
@ConfigProperty(name = "loc")
File certs;
@TestHTTPResource(value = "/tls", tls = true)
String endpoint;
@TestHTTPResource(value = "/lets-encrypt/challenge", management = true)
String management;
@TestHTTPResource(value = "/lets-encrypt/certs", management = true)
String reload;
@TestHTTPResource(value = "/.well-known/acme-challenge", tls = true)
String challenge;
@Test
void testFlow() throws IOException {
initFlow(vertx, "http");
testLetsEncryptFlow();
}
@Override
void updateCerts() throws IOException {
// Replace the certs on disk
Files.copy(ACME_CERT.toPath(),
new File(certs, "cert.pem").toPath(), StandardCopyOption.REPLACE_EXISTING);
Files.copy(ACME_KEY.toPath(),
new File(certs, "key.pem").toPath(), StandardCopyOption.REPLACE_EXISTING);
}
@Override
String getApplicationEndpoint() {
return endpoint;
}
@Override
String getLetsEncryptManagementEndpoint() {
return management;
}
@Override
String getLetsEncryptCertsEndpoint() {
return reload;
}
@Override
String getChallengeEndpoint() {
return challenge;
}
}
|
LetsEncryptFlowWithTlsConfigurationNameTest
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMCriticalThreadUncaughtExceptionHandler.java
|
{
"start": 1466,
"end": 2291
}
|
class ____
implements UncaughtExceptionHandler {
private static final Logger LOG = LoggerFactory.getLogger(
RMCriticalThreadUncaughtExceptionHandler.class);
private final RMContext rmContext;
public RMCriticalThreadUncaughtExceptionHandler(RMContext rmContext) {
this.rmContext = rmContext;
}
@Override
public void uncaughtException(Thread t, Throwable e) {
Exception ex;
if (e instanceof Exception) {
ex = (Exception)e;
} else {
ex = new YarnException(e);
}
RMFatalEvent event =
new RMFatalEvent(RMFatalEventType.CRITICAL_THREAD_CRASH, ex,
String.format("a critical thread, %s, that exited unexpectedly",
t.getName()));
rmContext.getDispatcher().getEventHandler().handle(event);
}
}
|
RMCriticalThreadUncaughtExceptionHandler
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/bug/Issue779.java
|
{
"start": 866,
"end": 945
}
|
class ____ {
public String token;
public String sign;
}
}
|
Model
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/arrow/src/main/java/org/elasticsearch/xpack/esql/arrow/ValueConversions.java
|
{
"start": 701,
"end": 2703
}
|
class ____ {
/**
* Shorten ipv6-mapped ipv4 IP addresses to 4 bytes
*/
public static BytesRef shortenIpV4Addresses(BytesRef value, BytesRef scratch) {
// Same logic as sun.net.util.IPAddressUtil#isIPv4MappedAddress
// See https://datatracker.ietf.org/doc/html/rfc4291#section-2.5.5.2
if (value.length == 16) {
int pos = value.offset;
byte[] bytes = value.bytes;
boolean isIpV4 = bytes[pos++] == 0
&& bytes[pos++] == 0
&& bytes[pos++] == 0
&& bytes[pos++] == 0
&& bytes[pos++] == 0
&& bytes[pos++] == 0
&& bytes[pos++] == 0
&& bytes[pos++] == 0
&& bytes[pos++] == 0
&& bytes[pos++] == 0
&& bytes[pos++] == (byte) 0xFF
&& bytes[pos] == (byte) 0xFF;
if (isIpV4) {
scratch.bytes = value.bytes;
scratch.offset = value.offset + 12;
scratch.length = 4;
return scratch;
}
}
return value;
}
/**
* Convert binary-encoded versions to strings
*/
public static BytesRef versionToString(BytesRef value, BytesRef scratch) {
return new BytesRef(new Version(value).toString());
}
/**
* Convert any xcontent source to json
*/
public static BytesRef sourceToJson(BytesRef value, BytesRef scratch) {
try {
var valueArray = new BytesArray(value);
XContentType xContentType = XContentHelper.xContentType(valueArray);
if (xContentType == XContentType.JSON) {
return value;
} else {
String json = XContentHelper.convertToJson(valueArray, false, xContentType);
return new BytesRef(json);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
|
ValueConversions
|
java
|
playframework__playframework
|
core/play/src/main/java/play/libs/Files.java
|
{
"start": 10422,
"end": 10469
}
|
class ____ the scenes.
*/
public static
|
behind
|
java
|
qos-ch__slf4j
|
slf4j-api/src/main/java/org/slf4j/helpers/ThreadLocalMapOfStacks.java
|
{
"start": 294,
"end": 2214
}
|
class ____ {
// BEWARE: Keys or values placed in a ThreadLocal should not be of a type/class
// not included in the JDK. See also https://jira.qos.ch/browse/LOGBACK-450
final ThreadLocal<Map<String, Deque<String>>> tlMapOfStacks = new ThreadLocal<>();
public void pushByKey(String key, String value) {
if (key == null)
return;
Map<String, Deque<String>> map = tlMapOfStacks.get();
if (map == null) {
map = new HashMap<>();
tlMapOfStacks.set(map);
}
Deque<String> deque = map.get(key);
if (deque == null) {
deque = new ArrayDeque<>();
}
deque.push(value);
map.put(key, deque);
}
public String popByKey(String key) {
if (key == null)
return null;
Map<String, Deque<String>> map = tlMapOfStacks.get();
if (map == null)
return null;
Deque<String> deque = map.get(key);
if (deque == null)
return null;
return deque.pop();
}
public Deque<String> getCopyOfDequeByKey(String key) {
if (key == null)
return null;
Map<String, Deque<String>> map = tlMapOfStacks.get();
if (map == null)
return null;
Deque<String> deque = map.get(key);
if (deque == null)
return null;
return new ArrayDeque<String>(deque);
}
/**
* Clear the deque(stack) referenced by 'key'.
*
* @param key identifies the stack
*
* @since 2.0.0
*/
public void clearDequeByKey(String key) {
if (key == null)
return;
Map<String, Deque<String>> map = tlMapOfStacks.get();
if (map == null)
return;
Deque<String> deque = map.get(key);
if (deque == null)
return;
deque.clear();
}
}
|
ThreadLocalMapOfStacks
|
java
|
redisson__redisson
|
redisson-spring-data/redisson-spring-data-24/src/main/java/org/redisson/spring/data/connection/RedissonConnectionFactory.java
|
{
"start": 2116,
"end": 6597
}
|
class ____ implements RedisConnectionFactory,
ReactiveRedisConnectionFactory, InitializingBean, DisposableBean {
private final static Log log = LogFactory.getLog(RedissonConnectionFactory.class);
public static final ExceptionTranslationStrategy EXCEPTION_TRANSLATION =
new PassThroughExceptionTranslationStrategy(new RedissonExceptionConverter());
private Config config;
private RedissonClient redisson;
private boolean hasOwnRedisson;
private boolean filterOkResponses = false;
/**
* Creates factory with default Redisson configuration
*/
public RedissonConnectionFactory() {
this(Redisson.create());
hasOwnRedisson = true;
}
/**
* Creates factory with defined Redisson instance
*
* @param redisson - Redisson instance
*/
public RedissonConnectionFactory(RedissonClient redisson) {
this.redisson = redisson;
}
/**
* Creates factory with defined Redisson config
*
* @param config - Redisson config
*/
public RedissonConnectionFactory(Config config) {
super();
this.config = config;
hasOwnRedisson = true;
}
public boolean isFilterOkResponses() {
return filterOkResponses;
}
public void setFilterOkResponses(boolean filterOkResponses) {
this.filterOkResponses = filterOkResponses;
}
@Override
public DataAccessException translateExceptionIfPossible(RuntimeException ex) {
return EXCEPTION_TRANSLATION.translate(ex);
}
@Override
public void destroy() throws Exception {
if (hasOwnRedisson) {
redisson.shutdown();
}
}
@Override
public void afterPropertiesSet() throws Exception {
if (config != null) {
redisson = Redisson.create(config);
}
}
@Override
public RedisConnection getConnection() {
if (redisson.getConfig().isClusterConfig()) {
return new RedissonClusterConnection(redisson, filterOkResponses);
}
return new RedissonConnection(redisson, filterOkResponses);
}
@Override
public RedisClusterConnection getClusterConnection() {
if (!redisson.getConfig().isClusterConfig()) {
throw new InvalidDataAccessResourceUsageException("Redisson is not in Cluster mode");
}
return new RedissonClusterConnection(redisson, filterOkResponses);
}
@Override
public boolean getConvertPipelineAndTxResults() {
return true;
}
@Override
public RedisSentinelConnection getSentinelConnection() {
if (!redisson.getConfig().isSentinelConfig()) {
throw new InvalidDataAccessResourceUsageException("Redisson is not in Sentinel mode");
}
SentinelConnectionManager manager = (SentinelConnectionManager)(((Redisson)redisson).getCommandExecutor().getConnectionManager());
for (RedisClient client : manager.getSentinels()) {
org.redisson.client.RedisConnection connection = null;
try {
connection = client.connect();
String res = connection.sync(RedisCommands.PING);
if ("pong".equalsIgnoreCase(res)) {
return new RedissonSentinelConnection(connection);
}
} catch (Exception e) {
log.warn("Can't connect to " + client, e);
if (connection != null) {
connection.closeAsync();
}
}
}
throw new InvalidDataAccessResourceUsageException("Sentinels are offline");
}
@Override
public ReactiveRedisConnection getReactiveConnection() {
if (redisson.getConfig().isClusterConfig()) {
return new RedissonReactiveRedisClusterConnection(((RedissonReactive)redisson.reactive()).getCommandExecutor());
}
return new RedissonReactiveRedisConnection(((RedissonReactive)redisson.reactive()).getCommandExecutor());
}
@Override
public ReactiveRedisClusterConnection getReactiveClusterConnection() {
if (!redisson.getConfig().isClusterConfig()) {
throw new InvalidDataAccessResourceUsageException("Redisson is not in Cluster mode");
}
return new RedissonReactiveRedisClusterConnection(((RedissonReactive)redisson.reactive()).getCommandExecutor());
}
}
|
RedissonConnectionFactory
|
java
|
redisson__redisson
|
redisson-spring-data/redisson-spring-data-18/src/test/java/org/redisson/BaseTest.java
|
{
"start": 223,
"end": 2890
}
|
class ____ {
protected RedissonClient redisson;
protected static RedissonClient defaultRedisson;
@BeforeClass
public static void beforeClass() throws IOException, InterruptedException {
if (!RedissonRuntimeEnvironment.isTravis) {
RedisRunner.startDefaultRedisServerInstance();
defaultRedisson = createInstance();
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
defaultRedisson.shutdown();
try {
RedisRunner.shutDownDefaultRedisServerInstance();
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
});
}
}
@Before
public void before() throws IOException, InterruptedException {
if (RedissonRuntimeEnvironment.isTravis) {
RedisRunner.startDefaultRedisServerInstance();
redisson = createInstance();
} else {
if (redisson == null) {
redisson = defaultRedisson;
}
if (flushBetweenTests()) {
redisson.getKeys().flushall();
}
}
}
@After
public void after() throws InterruptedException {
if (RedissonRuntimeEnvironment.isTravis) {
redisson.shutdown();
RedisRunner.shutDownDefaultRedisServerInstance();
}
}
public static Config createConfig() {
// String redisAddress = System.getProperty("redisAddress");
// if (redisAddress == null) {
// redisAddress = "127.0.0.1:6379";
// }
Config config = new Config();
// config.setCodec(new MsgPackJacksonCodec());
// config.useSentinelServers().setMasterName("mymaster").addSentinelAddress("127.0.0.1:26379", "127.0.0.1:26389");
// config.useClusterServers().addNodeAddress("127.0.0.1:7004", "127.0.0.1:7001", "127.0.0.1:7000");
config.useSingleServer()
.setAddress(RedisRunner.getDefaultRedisServerBindAddressAndPort());
// .setPassword("mypass1");
// config.useMasterSlaveConnection()
// .setMasterAddress("127.0.0.1:6379")
// .addSlaveAddress("127.0.0.1:6399")
// .addSlaveAddress("127.0.0.1:6389");
return config;
}
public static RedissonClient createInstance() {
Config config = createConfig();
return Redisson.create(config);
}
protected boolean flushBetweenTests() {
return true;
}
}
|
BaseTest
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
|
{
"start": 12089,
"end": 14765
}
|
class ____ {
private long avgMapTime;
private long avgReduceTime;
private long avgShuffleTime;
private JobHistoryParser.TaskAttemptInfo [] mapTasks;
private JobHistoryParser.TaskAttemptInfo [] reduceTasks;
/** Get the average map time */
public long getAvgMapTime() { return avgMapTime; }
/** Get the average reduce time */
public long getAvgReduceTime() { return avgReduceTime; }
/** Get the average shuffle time */
public long getAvgShuffleTime() { return avgShuffleTime; }
/** Get the map tasks list */
public JobHistoryParser.TaskAttemptInfo [] getMapTasks() {
return mapTasks;
}
/** Get the reduce tasks list */
public JobHistoryParser.TaskAttemptInfo [] getReduceTasks() {
return reduceTasks;
}
/** Generate analysis information for the parsed job */
public AnalyzedJob (JobInfo job) {
Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks();
int succeededMaps = (int) job.getSucceededMaps();
int succeededReduces = (int) job.getSucceededReduces();
mapTasks =
new JobHistoryParser.TaskAttemptInfo[succeededMaps];
reduceTasks =
new JobHistoryParser.TaskAttemptInfo[succeededReduces];
int mapIndex = 0 , reduceIndex=0;
avgMapTime = 0;
avgReduceTime = 0;
avgShuffleTime = 0;
for (JobHistoryParser.TaskInfo task : tasks.values()) {
Map<TaskAttemptID, JobHistoryParser.TaskAttemptInfo> attempts =
task.getAllTaskAttempts();
for (JobHistoryParser.TaskAttemptInfo attempt : attempts.values()) {
if (attempt.getTaskStatus().
equals(TaskStatus.State.SUCCEEDED.toString())) {
long avgFinishTime = (attempt.getFinishTime() -
attempt.getStartTime());
if (attempt.getTaskType().equals(TaskType.MAP)) {
mapTasks[mapIndex++] = attempt;
avgMapTime += avgFinishTime;
} else if (attempt.getTaskType().equals(TaskType.REDUCE)) {
reduceTasks[reduceIndex++] = attempt;
avgShuffleTime += (attempt.getShuffleFinishTime() -
attempt.getStartTime());
avgReduceTime += (attempt.getFinishTime() -
attempt.getShuffleFinishTime());
}
break;
}
}
}
if (succeededMaps > 0) {
avgMapTime /= succeededMaps;
}
if (succeededReduces > 0) {
avgReduceTime /= succeededReduces;
avgShuffleTime /= succeededReduces;
}
}
}
/**
* Utility to filter out events based on the task status
*/
public static
|
AnalyzedJob
|
java
|
micronaut-projects__micronaut-core
|
http-server-tck/src/main/java/io/micronaut/http/server/tck/tests/jsonview/JsonViewsTest.java
|
{
"start": 1193,
"end": 2411
}
|
class ____ {
public static final String SPEC_NAME = "JsonViewsTest";
private static final Map<String, Object> CONFIGURATION = Map.of("jackson.json-view.enabled", true);
@Test
void testJsonViewPojo() throws Exception {
assertPath("/views/pojo");
}
@Test
void testJsonViewList() throws Exception {
assertPath("/views/list");
}
@Test
void testJsonViewOptional() throws Exception {
assertPath("/views/optional");
}
@Test
void testJsonViewMono() throws Exception {
assertPath("/views/mono");
}
@Test
void testJsonViewFlux() throws Exception {
assertPath("/views/flux");
}
@Test
void testJsonViewFuture() throws Exception {
assertPath("/views/future");
}
private void assertPath(String path) throws IOException {
asserts(SPEC_NAME,
CONFIGURATION,
HttpRequest.GET(path),
(server, request) -> AssertionUtils.assertDoesNotThrow(server, request, HttpResponseAssertion.builder()
.body(BodyAssertion.builder().body("password").doesntContain())
.status(HttpStatus.OK)
.build()));
}
}
|
JsonViewsTest
|
java
|
apache__thrift
|
lib/java/src/main/java/org/apache/thrift/server/ServerContext.java
|
{
"start": 1458,
"end": 1609
}
|
interface ____ or returns false otherwise.
*
* @param iface a Class defining the underlying context
* @return true if this implements the
|
argument
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/boot/models/annotations/internal/OverriddenGeneratedColumnsAnnotation.java
|
{
"start": 660,
"end": 1896
}
|
class ____
implements DialectOverride.GeneratedColumns, RepeatableContainer<DialectOverride.GeneratedColumn> {
private DialectOverride.GeneratedColumn[] value;
/**
* Used in creating dynamic annotation instances (e.g. from XML)
*/
public OverriddenGeneratedColumnsAnnotation(ModelsContext modelContext) {
}
/**
* Used in creating annotation instances from JDK variant
*/
public OverriddenGeneratedColumnsAnnotation(
DialectOverride.GeneratedColumns annotation,
ModelsContext modelContext) {
this.value = extractJdkValue( annotation, DIALECT_OVERRIDE_GENERATED_COLUMNS, "value", modelContext );
}
/**
* Used in creating annotation instances from Jandex variant
*/
public OverriddenGeneratedColumnsAnnotation(
Map<String, Object> attributeValues,
ModelsContext modelContext) {
this.value = (DialectOverride.GeneratedColumn[]) attributeValues.get( "value" );
}
@Override
public DialectOverride.GeneratedColumn[] value() {
return value;
}
@Override
public void value(DialectOverride.GeneratedColumn[] value) {
this.value = value;
}
@Override
public Class<? extends Annotation> annotationType() {
return DialectOverride.GeneratedColumns.class;
}
}
|
OverriddenGeneratedColumnsAnnotation
|
java
|
quarkusio__quarkus
|
extensions/undertow/runtime/src/main/java/io/quarkus/undertow/runtime/UndertowHandlersConfServletExtension.java
|
{
"start": 635,
"end": 2435
}
|
class ____ implements ServletExtension {
public static final String META_INF_UNDERTOW_HANDLERS_CONF = "META-INF/undertow-handlers.conf";
@Override
public void handleDeployment(DeploymentInfo deploymentInfo, ServletContext servletContext) {
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
try (InputStream handlers = classLoader.getResourceAsStream(META_INF_UNDERTOW_HANDLERS_CONF)) {
if (handlers != null) {
// From Stuart Douglas: Ideally these would be parsed at deployment time and passed into a recorder,
// however they are likely not bytecode serialisable. Even though this approach
// does not 100% align with the Quarkus ethos I think it is ok in this case as
// the gains would be marginal compared to the cost of attempting to make
// every predicate bytecode serialisable.
List<PredicatedHandler> handlerList = PredicatedHandlersParser.parse(handlers, classLoader);
if (!handlerList.isEmpty()) {
deploymentInfo.addOuterHandlerChainWrapper(new RewriteCorrectingHandlerWrappers.PostWrapper());
deploymentInfo.addOuterHandlerChainWrapper(new HandlerWrapper() {
@Override
public HttpHandler wrap(HttpHandler handler) {
return Handlers.predicates(handlerList, handler);
}
});
deploymentInfo.addOuterHandlerChainWrapper(new RewriteCorrectingHandlerWrappers.PreWrapper());
}
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
|
UndertowHandlersConfServletExtension
|
java
|
apache__camel
|
components/camel-pubnub/src/test/java/org/apache/camel/component/pubnub/PubNubTestBase.java
|
{
"start": 2644,
"end": 3094
}
|
class ____ extends PubNubForJavaImpl {
MockedTimePubNub(PNConfiguration initialConfig) {
super(initialConfig);
}
@Override
public int getTimestamp() {
return 1337;
}
@Override
public String getVersion() {
return "suchJava";
}
}
return new MockedTimePubNub(config);
}
}
|
MockedTimePubNub
|
java
|
elastic__elasticsearch
|
libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserException.java
|
{
"start": 648,
"end": 4056
}
|
class ____ extends RuntimeException {
public static PolicyParserException newPolicyParserException(XContentLocation location, String policyName, String message) {
return new PolicyParserException(
"[" + location.lineNumber() + ":" + location.columnNumber() + "] policy parsing error for [" + policyName + "]: " + message
);
}
public static PolicyParserException newPolicyParserException(
XContentLocation location,
String policyName,
String scopeName,
String message
) {
if (scopeName == null) {
return new PolicyParserException(
"[" + location.lineNumber() + ":" + location.columnNumber() + "] policy parsing error for [" + policyName + "]: " + message
);
} else {
return new PolicyParserException(
"["
+ location.lineNumber()
+ ":"
+ location.columnNumber()
+ "] policy parsing error for ["
+ policyName
+ "] in scope ["
+ scopeName
+ "]: "
+ message
);
}
}
public static PolicyParserException newPolicyParserException(
XContentLocation location,
String policyName,
String scopeName,
String entitlementType,
String message
) {
if (scopeName == null) {
return new PolicyParserException(
"["
+ location.lineNumber()
+ ":"
+ location.columnNumber()
+ "] policy parsing error for ["
+ policyName
+ "] for entitlement type ["
+ entitlementType
+ "]: "
+ message
);
} else {
return new PolicyParserException(
"["
+ location.lineNumber()
+ ":"
+ location.columnNumber()
+ "] policy parsing error for ["
+ policyName
+ "] in scope ["
+ scopeName
+ "] for entitlement type ["
+ entitlementType
+ "]: "
+ message
);
}
}
public static PolicyParserException newPolicyParserException(
XContentLocation location,
String policyName,
String scopeName,
String entitlementType,
PolicyValidationException cause
) {
assert (scopeName != null);
return new PolicyParserException(
"["
+ location.lineNumber()
+ ":"
+ location.columnNumber()
+ "] policy parsing error for ["
+ policyName
+ "] in scope ["
+ scopeName
+ "] for entitlement type ["
+ entitlementType
+ "]: "
+ cause.getMessage(),
cause
);
}
private PolicyParserException(String message) {
super(message);
}
private PolicyParserException(String message, PolicyValidationException cause) {
super(message, cause);
}
}
|
PolicyParserException
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/jsontype/TestSubtypesWithDefaultImpl.java
|
{
"start": 983,
"end": 3500
}
|
class ____ extends SuperTypeWithoutDefault {
public int a;
}
/*
/**********************************************************************
/* Unit tests
/**********************************************************************
*/
private final ObjectMapper MAPPER = new ObjectMapper();
@Test
public void testDefaultImpl() throws Exception
{
// first, test with no type information
SuperTypeWithDefault bean = MAPPER.readValue("{\"a\":13}", SuperTypeWithDefault.class);
assertEquals(DefaultImpl.class, bean.getClass());
assertEquals(13, ((DefaultImpl) bean).a);
// and then with unmapped info
bean = MAPPER.readValue("{\"a\":14,\"#type\":\"foobar\"}", SuperTypeWithDefault.class);
assertEquals(DefaultImpl.class, bean.getClass());
assertEquals(14, ((DefaultImpl) bean).a);
bean = MAPPER.readValue("{\"#type\":\"foobar\",\"a\":15}", SuperTypeWithDefault.class);
assertEquals(DefaultImpl.class, bean.getClass());
assertEquals(15, ((DefaultImpl) bean).a);
bean = MAPPER.readValue("{\"#type\":\"foobar\"}", SuperTypeWithDefault.class);
assertEquals(DefaultImpl.class, bean.getClass());
assertEquals(0, ((DefaultImpl) bean).a);
}
@Test
public void testDefaultImplViaModule() throws Exception
{
final String JSON = "{\"a\":123}";
// first: without registration etc, epic fail:
try {
MAPPER.readValue(JSON, SuperTypeWithoutDefault.class);
fail("Expected an exception");
} catch (InvalidTypeIdException e) {
verifyException(e, "missing type id property '#type'");
}
// but then succeed when we register default impl
SimpleModule module = new SimpleModule("test", Version.unknownVersion());
module.addAbstractTypeMapping(SuperTypeWithoutDefault.class, DefaultImpl505.class);
ObjectMapper mapper = jsonMapperBuilder()
.addModule(module)
.build();
SuperTypeWithoutDefault bean = mapper.readValue(JSON, SuperTypeWithoutDefault.class);
assertNotNull(bean);
assertEquals(DefaultImpl505.class, bean.getClass());
assertEquals(123, ((DefaultImpl505) bean).a);
bean = mapper.readValue("{\"#type\":\"foobar\"}", SuperTypeWithoutDefault.class);
assertEquals(DefaultImpl505.class, bean.getClass());
assertEquals(0, ((DefaultImpl505) bean).a);
}
}
|
DefaultImpl505
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/AutoCloseTests.java
|
{
"start": 19774,
"end": 20298
}
|
class ____ {
@AutoClose
static AutoCloseable superStaticClosable;
@AutoClose
final AutoCloseable superClosable = new AutoCloseSpy("superClosable");
@BeforeAll
// WARNING: if this method is named setup() AND the @BeforeAll method in
// SubTestCase is also named setup(), the latter will "hide" the former.
static void superSetup() {
superStaticClosable = new AutoCloseSpy("superStaticClosable");
}
@Test
void superTest() {
}
}
@SuppressWarnings("JUnitMalformedDeclaration")
static
|
SuperTestCase
|
java
|
square__moshi
|
moshi-adapters/src/test/java/com/squareup/moshi/adapters/PolymorphicJsonAdapterFactoryTest.java
|
{
"start": 14852,
"end": 15293
}
|
class ____ implements Message {
final String value;
Success(String value) {
this.value = value;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof Success)) return false;
Success success = (Success) o;
return value.equals(success.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
}
static final
|
Success
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/functions/aggfunctions/DenseRankAggFunction.java
|
{
"start": 1796,
"end": 3922
}
|
class ____ extends RankLikeAggFunctionBase {
public DenseRankAggFunction(LogicalType[] orderKeyTypes) {
super(orderKeyTypes);
}
@Override
public UnresolvedReferenceExpression[] aggBufferAttributes() {
UnresolvedReferenceExpression[] aggBufferAttrs =
new UnresolvedReferenceExpression[1 + lastValues.length];
aggBufferAttrs[0] = sequence;
System.arraycopy(lastValues, 0, aggBufferAttrs, 1, lastValues.length);
return aggBufferAttrs;
}
@Override
public DataType[] getAggBufferTypes() {
DataType[] aggBufferTypes = new DataType[1 + orderKeyTypes.length];
aggBufferTypes[0] = DataTypes.BIGINT();
System.arraycopy(
Arrays.stream(orderKeyTypes)
.map(LogicalTypeDataTypeConverter::toDataType)
.toArray(DataType[]::new),
0,
aggBufferTypes,
1,
orderKeyTypes.length);
return aggBufferTypes;
}
@Override
public Expression[] initialValuesExpressions() {
Expression[] initExpressions = new Expression[1 + orderKeyTypes.length];
// sequence = 0L
initExpressions[0] = literal(0L);
for (int i = 0; i < orderKeyTypes.length; ++i) {
// lastValue_i = init value
initExpressions[i + 1] = generateInitLiteral(orderKeyTypes[i]);
}
return initExpressions;
}
@Override
public Expression[] accumulateExpressions() {
Expression[] accExpressions = new Expression[1 + operands().length];
// sequence = if (lastValues equalTo orderKeys and sequence != 0) sequence else sequence + 1
accExpressions[0] =
ifThenElse(
and(orderKeyEqualsExpression(), not(equalTo(sequence, literal(0L)))),
sequence,
plus(sequence, literal(1L)));
Expression[] operands = operands();
System.arraycopy(operands, 0, accExpressions, 1, operands.length);
return accExpressions;
}
}
|
DenseRankAggFunction
|
java
|
google__auto
|
value/src/test/java/com/google/auto/value/processor/GeneratedDoesNotExistTest.java
|
{
"start": 5625,
"end": 6190
}
|
class ____ extends OverridableInvocationHandler<Elements> {
private final Set<String> ignoredGenerated;
ElementsHandler(Elements original, Set<String> ignoredGenerated) {
super(original);
this.ignoredGenerated = ignoredGenerated;
}
@Keep
public TypeElement getTypeElement(CharSequence name) {
if (GENERATED_ANNOTATIONS.contains(name.toString())) {
ignoredGenerated.add(name.toString());
return null;
} else {
return original.getTypeElement(name);
}
}
}
private static
|
ElementsHandler
|
java
|
apache__logging-log4j2
|
log4j-1.2-api/src/test/java/org/apache/log4j/config/PropertiesConfigurationTest.java
|
{
"start": 2516,
"end": 15147
}
|
class ____ extends AbstractLog4j1ConfigurationTest {
private static final String TEST_KEY = "log4j.test.tmpdir";
private static final String SUFFIX = ".properties";
@Override
Configuration getConfiguration(final String configResourcePrefix) throws IOException {
final String configResource = configResourcePrefix + SUFFIX;
final InputStream inputStream = getResourceAsStream(configResource);
final ConfigurationSource source = new ConfigurationSource(inputStream);
final LoggerContext context = LoggerContext.getContext(false);
final Configuration configuration = new PropertiesConfigurationFactory().getConfiguration(context, source);
assertNotNull(configuration, "No configuration created");
configuration.initialize();
return configuration;
}
@Test
void testConfigureNullPointerException() throws Exception {
try (final LoggerContext loggerContext =
TestConfigurator.configure("target/test-classes/LOG4J2-3247.properties")) {
// [LOG4J2-3247] configure() should not throw an NPE.
final Configuration configuration = loggerContext.getConfiguration();
assertNotNull(configuration);
final Appender appender = configuration.getAppender("CONSOLE");
assertNotNull(appender);
}
}
@Test
void testConsoleAppenderFilter() throws Exception {
try (final LoggerContext loggerContext =
TestConfigurator.configure("target/test-classes/LOG4J2-3247.properties")) {
// LOG4J2-3281 PropertiesConfiguration.buildAppender not adding filters to appender
final Configuration configuration = loggerContext.getConfiguration();
assertNotNull(configuration);
final Appender appender = configuration.getAppender("CONSOLE");
assertNotNull(appender);
final Filterable filterable = (Filterable) appender;
final FilterAdapter filter = (FilterAdapter) filterable.getFilter();
assertNotNull(filter);
assertInstanceOf(NeutralFilterFixture.class, filter.getFilter());
}
}
@Test
void testCustomAppenderFilter() throws Exception {
try (final LoggerContext loggerContext =
TestConfigurator.configure("target/test-classes/LOG4J2-3281.properties")) {
// LOG4J2-3281 PropertiesConfiguration.buildAppender not adding filters to appender
final Configuration configuration = loggerContext.getConfiguration();
assertNotNull(configuration);
final Appender appender = configuration.getAppender("CUSTOM");
assertNotNull(appender);
final Filterable filterable = (Filterable) appender;
final FilterAdapter filter = (FilterAdapter) filterable.getFilter();
assertNotNull(filter);
assertInstanceOf(NeutralFilterFixture.class, filter.getFilter());
}
}
@Test
void testConsoleAppenderLevelRangeFilter() throws Exception {
PluginManager.addPackage("org.apache.log4j.builders.filter");
try (final LoggerContext loggerContext =
TestConfigurator.configure("target/test-classes/LOG4J2-3326.properties")) {
final Configuration configuration = loggerContext.getConfiguration();
assertNotNull(configuration);
final Appender appender = configuration.getAppender("CUSTOM");
assertNotNull(appender);
final Filterable filterable = (Filterable) appender;
final CompositeFilter filter = (CompositeFilter) filterable.getFilter();
final org.apache.logging.log4j.core.Filter[] filters = filter.getFiltersArray();
final LevelRangeFilter filter1 = (LevelRangeFilter) filters[0];
// XXX: LOG4J2-2315
assertEquals(Level.OFF, filter1.getMinLevel());
assertEquals(Level.ALL, filter1.getMaxLevel());
final LevelRangeFilter filter2 = (LevelRangeFilter) filters[1];
assertEquals(Level.ERROR, filter2.getMinLevel());
assertEquals(Level.INFO, filter2.getMaxLevel());
final LevelRangeFilter filter3 = (LevelRangeFilter) filters[2];
assertEquals(Level.OFF, filter3.getMinLevel());
assertEquals(Level.ALL, filter3.getMaxLevel());
final ListAppender legacyAppender = (ListAppender) ((AppenderAdapter.Adapter) appender).getAppender();
final Logger logger = LogManager.getLogger(PropertiesConfigurationTest.class);
// deny
logger.trace("TRACE");
assertEquals(0, legacyAppender.getEvents().size());
// deny
logger.debug("DEBUG");
assertEquals(0, legacyAppender.getEvents().size());
// accept
logger.info("INFO");
assertEquals(1, legacyAppender.getEvents().size());
// accept
logger.warn("WARN");
assertEquals(2, legacyAppender.getEvents().size());
// accept
logger.error("ERROR");
assertEquals(3, legacyAppender.getEvents().size());
// deny
logger.fatal("FATAL");
assertEquals(3, legacyAppender.getEvents().size());
}
}
@Test
void testConfigureAppenderDoesNotExist() throws Exception {
// Verify that we tolerate a logger which specifies an appender that does not exist.
try (final LoggerContext loggerContext =
TestConfigurator.configure("target/test-classes/LOG4J2-3407.properties")) {
final Configuration configuration = loggerContext.getConfiguration();
assertNotNull(configuration);
}
}
@Test
void testListAppender() throws Exception {
try (final LoggerContext loggerContext =
TestConfigurator.configure("target/test-classes/log4j1-list.properties")) {
final Logger logger = LogManager.getLogger("test");
logger.debug("This is a test of the root logger");
final Configuration configuration = loggerContext.getConfiguration();
final Map<String, Appender> appenders = configuration.getAppenders();
ListAppender eventAppender = null;
ListAppender messageAppender = null;
for (final Map.Entry<String, Appender> entry : appenders.entrySet()) {
if (entry.getKey().equals("list")) {
messageAppender = (ListAppender) ((AppenderAdapter.Adapter) entry.getValue()).getAppender();
} else if (entry.getKey().equals("events")) {
eventAppender = (ListAppender) ((AppenderAdapter.Adapter) entry.getValue()).getAppender();
}
}
assertNotNull(eventAppender, "No Event Appender");
assertNotNull(messageAppender, "No Message Appender");
final List<LoggingEvent> events = eventAppender.getEvents();
assertTrue(events != null && !events.isEmpty(), "No events");
final List<String> messages = messageAppender.getMessages();
assertTrue(messages != null && !messages.isEmpty(), "No messages");
}
}
@Test
void testProperties() throws Exception {
try (final LoggerContext loggerContext =
TestConfigurator.configure("target/test-classes/log4j1-file-1.properties")) {
final Logger logger = LogManager.getLogger("test");
logger.debug("This is a test of the root logger");
File file = new File("target/temp.A1");
assertTrue(file.exists(), "File A1 was not created");
assertTrue(file.length() > 0, "File A1 is empty");
file = new File("target/temp.A2");
assertTrue(file.exists(), "File A2 was not created");
assertTrue(file.length() > 0, "File A2 is empty");
}
}
@Test
void testSystemProperties() throws Exception {
final String testPathLocation = "target";
System.setProperty(TEST_KEY, testPathLocation);
try (final LoggerContext loggerContext =
TestConfigurator.configure("target/test-classes/config-1.2/log4j-FileAppender-with-props.properties")) {
// [LOG4J2-3312] Bridge does not convert properties.
final Configuration configuration = loggerContext.getConfiguration();
assertNotNull(configuration);
final String name = "FILE_APPENDER";
final Appender appender = configuration.getAppender(name);
assertNotNull(appender, name);
assertInstanceOf(FileAppender.class, appender, appender.getClass().getName());
final FileAppender fileAppender = (FileAppender) appender;
// Two slashes because that's how the config file is setup.
assertEquals(testPathLocation + "/hadoop.log", fileAppender.getFileName());
} finally {
System.clearProperty(TEST_KEY);
}
}
@Override
@Test
public void testConsoleEnhancedPatternLayout() throws Exception {
super.testConsoleEnhancedPatternLayout();
}
@Override
@Test
public void testConsoleHtmlLayout() throws Exception {
super.testConsoleHtmlLayout();
}
@Override
@Test
public void testConsolePatternLayout() throws Exception {
super.testConsolePatternLayout();
}
@Override
@Test
public void testConsoleSimpleLayout() throws Exception {
super.testConsoleSimpleLayout();
}
@Override
@Test
public void testFileSimpleLayout() throws Exception {
super.testFileSimpleLayout();
}
@Override
@Test
public void testNullAppender() throws Exception {
super.testNullAppender();
}
@Override
@Test
public void testConsoleCapitalization() throws Exception {
super.testConsoleCapitalization();
}
@Override
@Test
public void testConsoleTtccLayout() throws Exception {
super.testConsoleTtccLayout();
}
@Override
@Test
public void testRollingFileAppender() throws Exception {
super.testRollingFileAppender();
}
@Override
@Test
public void testDailyRollingFileAppender() throws Exception {
super.testDailyRollingFileAppender();
}
@Override
@Test
public void testRollingFileAppenderWithProperties() throws Exception {
super.testRollingFileAppenderWithProperties();
}
@Override
@Test
public void testSystemProperties1() throws Exception {
super.testSystemProperties1();
}
@Override
@Test
public void testSystemProperties2() throws Exception {
super.testSystemProperties2();
}
@Override
@Test
public void testDefaultValues() throws Exception {
super.testDefaultValues();
}
@Override
@Test
public void testMultipleFilters() throws Exception {
super.testMultipleFilters();
}
@Test
void testUntrimmedValues() throws Exception {
try {
final Configuration config = getConfiguration("config-1.2/log4j-untrimmed");
final LoggerConfig rootLogger = config.getRootLogger();
assertEquals(Level.DEBUG, rootLogger.getLevel());
final Appender appender = config.getAppender("Console");
assertInstanceOf(ConsoleAppender.class, appender);
final Layout<? extends Serializable> layout = appender.getLayout();
assertInstanceOf(PatternLayout.class, layout);
assertEquals("%v1Level - %m%n", ((PatternLayout) layout).getConversionPattern());
final Filter filter = ((Filterable) appender).getFilter();
assertInstanceOf(DenyAllFilter.class, filter);
config.start();
config.stop();
} catch (NoClassDefFoundError e) {
fail(e.getMessage());
}
}
@Override
@Test
public void testGlobalThreshold() throws Exception {
super.testGlobalThreshold();
}
@Test
void testEnhancedRollingFileAppender() throws Exception {
try (final LoggerContext ctx = configure("config-1.2/log4j-EnhancedRollingFileAppender")) {
final Configuration configuration = ctx.getConfiguration();
assertNotNull(configuration);
testEnhancedRollingFileAppender(configuration);
}
}
@Override
@Test
public void testLevelRangeFilter() throws Exception {
super.testLevelRangeFilter();
}
}
|
PropertiesConfigurationTest
|
java
|
google__error-prone
|
check_api/src/test/java/com/google/errorprone/util/MoreAnnotationsTest.java
|
{
"start": 1871,
"end": 3060
}
|
class ____ extends BugChecker
implements ClassTreeMatcher, MethodTreeMatcher, VariableTreeMatcher {
private Description process(Tree tree) {
Symbol sym = ASTHelpers.getSymbol(tree);
if (sym == null) {
return NO_MATCH;
}
if (sym.getKind() == ElementKind.ANNOTATION_TYPE) {
return NO_MATCH;
}
String annos =
getAnnotations(sym)
.map(c -> c.type.asElement().getSimpleName().toString())
.collect(joining(", "));
if (annos.isEmpty()) {
return NO_MATCH;
}
return buildDescription(tree).setMessage(annos).build();
}
protected abstract Stream<? extends Compound> getAnnotations(Symbol sym);
@Override
public Description matchClass(ClassTree tree, VisitorState state) {
return process(tree);
}
@Override
public Description matchMethod(MethodTree tree, VisitorState state) {
return process(tree);
}
@Override
public Description matchVariable(VariableTree tree, VisitorState state) {
return process(tree);
}
}
@BugPattern(summary = "A test checker.", severity = ERROR)
public static
|
MoreAnnotationsTester
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/dynamic/intercept/MethodInterceptorChain.java
|
{
"start": 757,
"end": 3396
}
|
class ____ {
private final ThreadLocal<PooledMethodInvocation> pool = ThreadLocal.withInitial(PooledMethodInvocation::new);
final MethodInterceptorChain next;
MethodInterceptorChain(MethodInterceptorChain next) {
this.next = next;
}
/**
* Create a {@link MethodInterceptorChain} from {@link MethodInterceptor}s. Chain elements are created eagerly by
* stack-walking {@code interceptors}. Make sure the {@link Iterable} does not exhaust the stack size.
*
* @param interceptors must not be {@code null}.
* @return the {@link MethodInterceptorChain} that is an entry point for method invocations.
*/
public static Head from(Iterable<? extends MethodInterceptor> interceptors) {
return new Head(next(interceptors.iterator()));
}
private static MethodInterceptorChain next(Iterator<? extends MethodInterceptor> iterator) {
return iterator.hasNext() ? createContext(iterator, iterator.next()) : Tail.INSTANCE;
}
private static MethodInterceptorChain createContext(Iterator<? extends MethodInterceptor> iterator,
MethodInterceptor interceptor) {
return new MethodInterceptorContext(next(iterator), interceptor);
}
/**
* Invoke a {@link Method} with its {@code args}.
*
* @param target must not be {@code null}.
* @param method must not be {@code null}.
* @param args must not be {@code null}.
* @return
* @throws Throwable
*/
public Object invoke(Object target, Method method, Object[] args) throws Throwable {
PooledMethodInvocation invocation = getInvocation(target, method, args, next);
try {
// JIT hint
if (next instanceof MethodInterceptorContext) {
return next.proceed(invocation);
}
return next.proceed(invocation);
} finally {
invocation.clear();
}
}
private PooledMethodInvocation getInvocation(Object target, Method method, Object[] args, MethodInterceptorChain next) {
PooledMethodInvocation pooledMethodInvocation = pool.get();
pooledMethodInvocation.initialize(target, method, args, next);
return pooledMethodInvocation;
}
/**
* Proceed to the next {@link MethodInterceptorChain}.
*
* @param invocation must not be {@code null}.
* @return
* @throws Throwable
*/
abstract Object proceed(MethodInvocation invocation) throws Throwable;
/**
* {@link MethodInterceptorChain} using {@link MethodInterceptor} to handle invocations.
*/
static
|
MethodInterceptorChain
|
java
|
apache__dubbo
|
dubbo-config/dubbo-config-spring/src/main/java/org/apache/dubbo/config/spring/util/DubboAnnotationUtils.java
|
{
"start": 3888,
"end": 4947
}
|
class ____ purpose.
interfaceClass = null;
} else if (GenericService.class.isAssignableFrom(interfaceClass)) {
throw new IllegalStateException(
"@Service interfaceClass() cannot be GenericService :" + interfaceClass.getName());
}
// 3. get from annotation element type, ignore GenericService
if (interfaceClass == null
&& defaultInterfaceClass != null
&& !GenericService.class.isAssignableFrom(defaultInterfaceClass)) {
// Find all interfaces from the annotated class
// To resolve an issue : https://github.com/apache/dubbo/issues/3251
Class<?>[] allInterfaces = getAllInterfacesForClass(defaultInterfaceClass);
if (allInterfaces.length > 0) {
interfaceClass = allInterfaces[0];
} else {
interfaceClass = defaultInterfaceClass;
}
}
Assert.notNull(
interfaceClass, "@Service interfaceClass() or interfaceName() or
|
for
|
java
|
apache__logging-log4j2
|
log4j-api-java9/src/main/java/org/apache/logging/log4j/util/LoaderUtil.java
|
{
"start": 996,
"end": 1136
}
|
class ____ {
public static ClassLoader getThreadContextClassLoader() {
return LoaderUtil.class.getClassLoader();
}
}
|
LoaderUtil
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/scheduling/annotation/EnableScheduling.java
|
{
"start": 3469,
"end": 3812
}
|
class ____ implement
* {@link SchedulingConfigurer}. This allows access to the underlying
* {@link ScheduledTaskRegistrar} instance. For example, the following example
* demonstrates how to customize the {@link Executor} used to execute scheduled
* tasks:
*
* <pre class="code">
* @Configuration
* @EnableScheduling
* public
|
may
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/boot/models/annotations/internal/AnyDiscriminatorValuesAnnotation.java
|
{
"start": 748,
"end": 1979
}
|
class ____
implements AnyDiscriminatorValues, RepeatableContainer<AnyDiscriminatorValue> {
private org.hibernate.annotations.AnyDiscriminatorValue[] value;
/**
* Used in creating dynamic annotation instances (e.g. from XML)
*/
public AnyDiscriminatorValuesAnnotation(ModelsContext modelContext) {
}
/**
* Used in creating annotation instances from JDK variant
*/
public AnyDiscriminatorValuesAnnotation(
AnyDiscriminatorValues annotation,
ModelsContext modelContext) {
this.value = extractJdkValue(
annotation,
HibernateAnnotations.ANY_DISCRIMINATOR_VALUES,
"value",
modelContext
);
}
/**
* Used in creating annotation instances from Jandex variant
*/
public AnyDiscriminatorValuesAnnotation(Map<String, Object> attributeValues, ModelsContext modelContext) {
this.value = (AnyDiscriminatorValue[]) attributeValues.get( "value" );
}
@Override
public Class<? extends Annotation> annotationType() {
return AnyDiscriminatorValues.class;
}
@Override
public org.hibernate.annotations.AnyDiscriminatorValue[] value() {
return value;
}
public void value(org.hibernate.annotations.AnyDiscriminatorValue[] value) {
this.value = value;
}
}
|
AnyDiscriminatorValuesAnnotation
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/annotations/SoftDelete.java
|
{
"start": 2130,
"end": 4372
}
|
interface ____ {
/**
* (Optional) The name of the column.
* <p/>
* Default depends on the {@linkplain #strategy() strategy} being used.
*
* @see SoftDeleteType#getDefaultColumnName()
*/
String columnName() default "";
/**
* (Optional) The SQL fragment that is used when
* generating the DDL for the column.
* <p>
* The DDL must be written in the native SQL dialect
* of the target database (it is not portable across databases).
*
* @since 7.0
*/
String options() default "";
/**
* (Optional) A comment to be applied to the column.
*
* @since 7.0
*/
String comment() default "";
/**
* The strategy to use for storing/reading values to/from the database.
* <p/>
* The strategy also affects the default {@linkplain #columnName() column name} - see
* {@linkplain SoftDeleteType#getDefaultColumnName}.
*/
SoftDeleteType strategy() default SoftDeleteType.DELETED;
/**
* (Optional) Conversion to apply to determine the appropriate value to
* store in the database. The "domain representation" can be: <dl>
* <dt>{@code true}</dt>
* <dd>Indicates that the row is considered deleted</dd>
*
* <dt>{@code false}</dt>
* <dd>Indicates that the row is considered NOT deleted</dd>
* </dl>
* <p/>
* By default, values are stored as booleans in the database according to
* the {@linkplain Dialect#getPreferredSqlTypeCodeForBoolean() dialect}
* and {@linkplain org.hibernate.cfg.MappingSettings#PREFERRED_BOOLEAN_JDBC_TYPE settings}
*
* @apiNote Only valid when {@linkplain #strategy} is {@linkplain SoftDeleteType#DELETED}
* or {@linkplain SoftDeleteType#ACTIVE}. Will lead to a {@linkplain UnsupportedMappingException}
* when combined with {@linkplain SoftDeleteType#TIMESTAMP}.
*
* @implSpec The specified converter should never return {@code null}
*/
Class<? extends AttributeConverter<Boolean,?>> converter() default UnspecifiedConversion.class;
/**
* Used as the default for {@linkplain SoftDelete#converter()}, indicating that
* {@linkplain Dialect#getPreferredSqlTypeCodeForBoolean() dialect} and
* {@linkplain org.hibernate.cfg.MappingSettings#PREFERRED_BOOLEAN_JDBC_TYPE settings}
* resolution should be used.
*/
|
SoftDelete
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyToLocal.java
|
{
"start": 5980,
"end": 7486
}
|
class ____ extends CopyToLocal {
public static final String NAME = "multiThreadCopy";
private final int expectedThreads;
private final int expectedQueuePoolSize;
private final int expectedCompletedTaskCount;
MultiThreadedCopy(int expectedThreads, int expectedQueuePoolSize,
int expectedCompletedTaskCount) {
this.expectedThreads = expectedThreads;
this.expectedQueuePoolSize = expectedQueuePoolSize;
this.expectedCompletedTaskCount = expectedCompletedTaskCount;
}
@Override
protected void processArguments(LinkedList<PathData> args)
throws IOException {
// Check if the number of threads are same as expected
assertEquals(expectedThreads, getThreadCount());
// Check if the queue pool size of executor is same as expected
assertEquals(expectedQueuePoolSize, getThreadPoolQueueSize());
super.processArguments(args);
if (isMultiThreadNecessary(args)) {
// Once the copy is complete, check following
// 1) number of completed tasks are same as expected
// 2) There are no active tasks in the executor
// 3) Executor has shutdown correctly
ThreadPoolExecutor executor = getExecutor();
assertEquals(expectedCompletedTaskCount,
executor.getCompletedTaskCount());
assertEquals(0, executor.getActiveCount());
assertTrue(executor.isTerminated());
} else {
assert getExecutor() == null;
}
}
}
}
|
MultiThreadedCopy
|
java
|
mapstruct__mapstruct
|
processor/src/main/java/org/mapstruct/ap/internal/processor/DefaultModelElementProcessorContext.java
|
{
"start": 4299,
"end": 6869
}
|
class ____ implements FormattingMessager {
private final Messager delegate;
private boolean isErroneous = false;
private final boolean verbose;
DelegatingMessager(Messager delegate, boolean verbose) {
this.delegate = delegate;
this.verbose = verbose;
}
@Override
public void printMessage(Message msg, Object... args) {
String message = String.format( msg.getDescription(), args );
delegate.printMessage( msg.getDiagnosticKind(), message );
if ( msg.getDiagnosticKind() == Kind.ERROR ) {
isErroneous = true;
}
}
@Override
public void printMessage(Element e, Message msg, Object... args) {
String message = String.format( msg.getDescription(), args );
delegate.printMessage( msg.getDiagnosticKind(), message, e );
if ( msg.getDiagnosticKind() == Kind.ERROR ) {
isErroneous = true;
}
}
@Override
public void printMessage(Element e, AnnotationMirror a, Message msg, Object... args) {
if ( a == null ) {
printMessage( e, msg, args );
}
else {
String message = String.format( msg.getDescription(), args );
delegate.printMessage( msg.getDiagnosticKind(), message, e, a );
if ( msg.getDiagnosticKind() == Kind.ERROR ) {
isErroneous = true;
}
}
}
@Override
public void printMessage(Element e, AnnotationMirror a, AnnotationValue v, Message msg,
Object... args) {
String message = String.format( msg.getDescription(), args );
delegate.printMessage( msg.getDiagnosticKind(), message, e, a, v );
if ( msg.getDiagnosticKind() == Kind.ERROR ) {
isErroneous = true;
}
}
public void note( int level, Message msg, Object... args ) {
if ( verbose ) {
StringBuilder builder = new StringBuilder();
IntStream.range( 0, level ).mapToObj( i -> "-" ).forEach( builder::append );
builder.append( " MapStruct: " ).append( String.format( msg.getDescription(), args ) );
delegate.printMessage( Kind.NOTE, builder.toString() );
}
}
@Override
public boolean isErroneous() {
return isErroneous;
}
}
}
|
DelegatingMessager
|
java
|
google__guice
|
core/test/com/google/inject/example/ClientServiceWithFactories.java
|
{
"start": 850,
"end": 966
}
|
class ____ implements Service {
@Override
public void go() {
// ...
}
}
public static
|
ServiceImpl
|
java
|
hibernate__hibernate-orm
|
hibernate-envers/src/main/java/org/hibernate/envers/internal/entities/mapper/relation/BasicCollectionMapper.java
|
{
"start": 1345,
"end": 8710
}
|
class ____<T extends Collection> extends AbstractCollectionMapper<T> implements PropertyMapper {
protected final MiddleComponentData elementComponentData;
public BasicCollectionMapper(
Configuration configuration,
CommonCollectionMapperData commonCollectionMapperData,
Class<? extends T> collectionClass,
Class<? extends T> proxyClass,
MiddleComponentData elementComponentData,
boolean ordinalInId,
boolean revisionTypeInId) {
super( configuration, commonCollectionMapperData, collectionClass, proxyClass, ordinalInId, revisionTypeInId );
this.elementComponentData = elementComponentData;
}
@Override
protected Initializor<T> getInitializor(
EnversService enversService,
AuditReaderImplementor versionsReader,
Object primaryKey,
Number revision,
boolean removed) {
return new BasicCollectionInitializor<>(
enversService,
versionsReader,
commonCollectionMapperData.getQueryGenerator(),
primaryKey,
revision,
removed,
collectionClass,
elementComponentData
);
}
@Override
protected Collection getNewCollectionContent(PersistentCollection newCollection) {
return (Collection) newCollection;
}
@Override
protected Collection getOldCollectionContent(Serializable oldCollection) {
if ( oldCollection == null ) {
return null;
}
else if ( oldCollection instanceof Map ) {
return ( (Map) oldCollection ).keySet();
}
else {
return (Collection) oldCollection;
}
}
@Override
protected void mapToMapFromObject(
SharedSessionContractImplementor session,
Map<String, Object> idData,
Map<String, Object> data,
Object changed) {
elementComponentData.getComponentMapper().mapToMapFromObject( session, idData, data, changed );
}
@Override
protected Set<Object> buildCollectionChangeSet(Object eventCollection, Collection collection) {
final Set<Object> changeSet = new HashSet<>();
if ( eventCollection != null ) {
for ( Object entry : collection ) {
if ( entry != null ) {
changeSet.add( entry );
}
}
}
return changeSet;
}
@Override
protected List<PersistentCollectionChangeData> mapCollectionChanges(
SharedSessionContractImplementor session,
PersistentCollection newColl,
Serializable oldColl,
Object id) {
final List<PersistentCollectionChangeData> collectionChanges = new ArrayList<>();
final CollectionPersister collectionPersister = resolveCollectionPersister( session, newColl );
// Comparing new and old collection content.
final Collection newCollection = getNewCollectionContent( newColl );
final Collection oldCollection = getOldCollectionContent( oldColl );
final Set<Object> addedElements = buildCollectionChangeSet( newColl, newCollection );
if ( oldColl != null ) {
for ( Object oldEntry : oldCollection ) {
for ( Iterator itor = addedElements.iterator(); itor.hasNext(); ) {
Object newEntry = itor.next();
if ( isCollectionElementSame( session, collectionPersister, oldEntry, newEntry ) ) {
itor.remove();
break;
}
}
}
}
final Set<Object> deleteElements = buildCollectionChangeSet( oldColl, oldCollection );
if ( newColl != null ) {
for ( Object newEntry : newCollection ) {
for ( Iterator itor = deleteElements.iterator(); itor.hasNext(); ) {
Object deletedEntry = itor.next();
if ( isCollectionElementSame( session, collectionPersister, deletedEntry, newEntry ) ) {
itor.remove();
break;
}
}
}
}
addCollectionChanges( session, collectionChanges, addedElements, RevisionType.ADD, id );
addCollectionChanges( session, collectionChanges, deleteElements, RevisionType.DEL, id );
return collectionChanges;
}
private boolean isCollectionElementSame(
SharedSessionContractImplementor session,
CollectionPersister collectionPersister,
Object lhs,
Object rhs) {
final Type elementType = collectionPersister.getElementType();
// If the collection element is an Entity association but the collection does not include the
// REVTYPE column as a part of the primary key, special care must be taken in order to assess
// whether the element actually changed.
//
// Previously we delegated to the element type, which for entity-based collections would be
// EntityType. The EntityType#isSame method results in only a reference equality check. This
// would result in both an ADD/DEL entry trying to be saved for the same entity identifier
// under certain circumstances. While we generally agree with this ORM assessment, this
// leads to HHH-13080 which ultimately is because REVTYPE is not part of the middle entity
// table's primary key.
//
// For 5.x, rather than impose schema changes mid-major release, we're going to explore this
// compromise for now where we're going to treat EntityType-based collections in a slightly
// different way by delegating the equality check to the entity identifier instead. This
// ultimately means that the equality check will leverage both reference and value equality
// since identifiers can be basic or composite types.
//
// In the end for 5.x, this means if an entity is removed from the collection and added
// back with the same identifier, we will treat it as a no-change for now to avoid the
// problem presented in HHH-13080.
//
// todo (6.0) - support REVTYPE as part of the primary key.
// What we actually want to do here is to introduce a legacy compat flag that we check
// when we generate the mapper that influences whether the revisionTypeInId value is
// true or false. When its set to true, we actually will treat all element types,
// regardless if they're entity, embeddables, or basic types equally.
//
// As an example, if a collection is cleared and instances are added back and it just
// so happens that those instances ahve the same entity identifier but aren't reference
// equal to the original collection elements, Envers will then actually treat that as
// a series of DEL followed by ADD operations for those elements, which ultimately is
// the right behavior. But that only works if REVTYPE is part of the primary key so
// that the tuple { owner_id, entity_id, rev, rev_type } differ for the two types of
// revision type operations.
//
// Currently the tuple is { owner_id, entity_id, rev } and so having this special
// treatment is critical to avoid HHH-13080.
//
if ( elementType instanceof EntityType && !revisionTypeInId ) {
// This is a short-circuit to check for reference equality only.
// There is no need to delegate to the identifier if the objects are reference equal.
if ( elementType.isSame( lhs, rhs ) ) {
return true;
}
final EntityPersister entityPersister = session.getFactory()
.getMappingMetamodel()
.getEntityDescriptor( ( (EntityType) elementType ).getAssociatedEntityName() );
final Object lhsId = entityPersister.getIdentifier( lhs, session );
final Object rhsId = entityPersister.getIdentifier( rhs, session );
// Since the two instances aren't reference equal, delegate to identifier now.
return entityPersister.getIdentifierType().isSame( lhsId, rhsId );
}
// for element types that aren't entities (aka embeddables/basic types), use legacy behavior.
return elementType.isSame( lhs, rhs );
}
}
|
BasicCollectionMapper
|
java
|
elastic__elasticsearch
|
x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/ChangePasswordRequestBuilderTests.java
|
{
"start": 1238,
"end": 5464
}
|
class ____ extends ESTestCase {
public void testWithCleartextPassword() throws IOException {
final Hasher hasher = getFastStoredHashAlgoForTests();
final String json = """
{
"password": "superlongpassword"
}""";
ChangePasswordRequestBuilder builder = new ChangePasswordRequestBuilder(mock(Client.class));
ChangePasswordRequest request = builder.source(new BytesArray(json.getBytes(StandardCharsets.UTF_8)), XContentType.JSON, hasher)
.request();
assertThat(hasher.verify(new SecureString("superlongpassword".toCharArray()), request.passwordHash()), equalTo(true));
}
public void testWithHashedPassword() throws IOException {
final Hasher hasher = getFastStoredHashAlgoForTests();
final char[] hash = hasher.hash(new SecureString("superlongpassword".toCharArray()));
final String json = Strings.format("""
{
"password_hash": "%s"
}""", new String(hash));
ChangePasswordRequestBuilder builder = new ChangePasswordRequestBuilder(mock(Client.class));
ChangePasswordRequest request = builder.source(new BytesArray(json.getBytes(StandardCharsets.UTF_8)), XContentType.JSON, hasher)
.request();
assertThat(request.passwordHash(), equalTo(hash));
}
public void testWithHashedPasswordWithDifferentAlgo() throws IOException {
final Hasher systemHasher = getFastStoredHashAlgoForTests();
Hasher userHasher = getFastStoredHashAlgoForTests();
while (userHasher.name().equals(systemHasher.name())) {
userHasher = getFastStoredHashAlgoForTests();
}
final char[] hash = userHasher.hash(new SecureString("superlongpassword".toCharArray()));
final String json = Strings.format("""
{"password_hash": "%s"}
""", new String(hash));
ChangePasswordRequestBuilder builder = new ChangePasswordRequestBuilder(mock(Client.class));
final ChangePasswordRequest request = builder.source(
new BytesArray(json.getBytes(StandardCharsets.UTF_8)),
XContentType.JSON,
systemHasher
).request();
assertThat(request.passwordHash(), equalTo(hash));
}
public void testWithHashedPasswordNotHash() {
final Hasher systemHasher = Hasher.valueOf(randomFrom(Hasher.getAvailableAlgoStoredPasswordHash()).toUpperCase(Locale.ROOT));
final char[] hash = randomAlphaOfLength(20).toCharArray();
final String json = Strings.format("""
{
"password_hash": "%s"
}""", new String(hash));
ChangePasswordRequestBuilder builder = new ChangePasswordRequestBuilder(mock(Client.class));
final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {
builder.source(new BytesArray(json.getBytes(StandardCharsets.UTF_8)), XContentType.JSON, systemHasher).request();
});
assertThat(
e.getMessage(),
containsString("The provided password hash is not a hash or it could not be resolved to a supported hash algorithm.")
);
}
public void testWithPasswordAndHash() throws IOException {
final Hasher hasher = getFastStoredHashAlgoForTests();
final String password = randomAlphaOfLength(14);
final char[] hash = hasher.hash(new SecureString(password.toCharArray()));
final LinkedHashMap<String, Object> fields = new LinkedHashMap<>();
fields.put("password", password);
fields.put("password_hash", new String(hash));
BytesReference json = BytesReference.bytes(
XContentBuilder.builder(XContentType.JSON.xContent()).map(shuffleMap(fields, Collections.emptySet()))
);
ChangePasswordRequestBuilder builder = new ChangePasswordRequestBuilder(mock(Client.class));
final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {
builder.source(json, XContentType.JSON, hasher).request();
});
assertThat(e.getMessage(), containsString("password_hash has already been set"));
}
}
|
ChangePasswordRequestBuilderTests
|
java
|
apache__camel
|
tooling/maven/camel-package-maven-plugin/src/test/java/org/apache/camel/maven/packaging/MyConfiguration.java
|
{
"start": 958,
"end": 20311
}
|
class ____ {
private String name;
private int durationMaxSeconds;
private int durationMaxIdleSeconds;
private int durationMaxMessages;
private int shutdownTimeout = 300;
private boolean shutdownSuppressLoggingOnTimeout;
private boolean shutdownNowOnTimeout = true;
private boolean shutdownRoutesInReverseOrder = true;
private boolean shutdownLogInflightExchangesOnTimeout = true;
private String fileConfigurations;
private boolean jmxEnabled = true;
private int producerTemplateCacheSize = 1000;
private int consumerTemplateCacheSize = 1000;
private boolean loadTypeConverters = true;
private int logDebugMaxChars;
private boolean streamCachingEnabled;
private String streamCachingSpoolDirectory;
private String streamCachingSpoolCipher;
private long streamCachingSpoolThreshold;
private int streamCachingSpoolUsedHeapMemoryThreshold;
private String streamCachingSpoolUsedHeapMemoryLimit;
private boolean streamCachingAnySpoolRules;
private int streamCachingBufferSize;
private boolean streamCachingRemoveSpoolDirectoryWhenStopping = true;
private boolean streamCachingStatisticsEnabled;
private boolean tracing;
private boolean messageHistory = true;
private boolean logMask;
private boolean logExhaustedMessageBody;
private boolean autoStartup = true;
private boolean allowUseOriginalMessage;
private boolean endpointRuntimeStatisticsEnabled;
private boolean useDataType;
private boolean useBreadcrumb;
private String jmxManagementNamePattern = "#name#";
private boolean useMdcLogging;
private String threadNamePattern;
private String routeFilterIncludePattern;
private String routeFilterExcludePattern;
// getter and setters
// --------------------------------------------------------------
public String getName() {
return name;
}
/**
* Sets the name of the CamelContext.
*/
public void setName(String name) {
this.name = name;
}
public int getDurationMaxSeconds() {
return durationMaxSeconds;
}
/**
* To specify for how long time in seconds to keep running the JVM before automatic terminating the JVM. You can use
* this to run Camel for a short while.
*/
public void setDurationMaxSeconds(int durationMaxSeconds) {
this.durationMaxSeconds = durationMaxSeconds;
}
public int getDurationMaxIdleSeconds() {
return durationMaxIdleSeconds;
}
/**
* To specify for how long time in seconds Camel can be idle before automatic terminating the JVM. You can use this
* to run Camel for a short while.
*/
public void setDurationMaxIdleSeconds(int durationMaxIdleSeconds) {
this.durationMaxIdleSeconds = durationMaxIdleSeconds;
}
public int getDurationMaxMessages() {
return durationMaxMessages;
}
/**
* To specify how many messages to process by Camel before automatic terminating the JVM. You can use this to run
* Camel for a short while.
*/
public void setDurationMaxMessages(int durationMaxMessages) {
this.durationMaxMessages = durationMaxMessages;
}
public int getShutdownTimeout() {
return shutdownTimeout;
}
/**
* Timeout in seconds to graceful shutdown Camel.
*/
public void setShutdownTimeout(int shutdownTimeout) {
this.shutdownTimeout = shutdownTimeout;
}
public boolean isShutdownSuppressLoggingOnTimeout() {
return shutdownSuppressLoggingOnTimeout;
}
/**
* Whether Camel should try to suppress logging during shutdown and timeout was triggered, meaning forced shutdown
* is happening. And during forced shutdown we want to avoid logging errors/warnings et all in the logs as a
* side-effect of the forced timeout. Notice the suppress is a best effort as there may still be some logs coming
* from 3rd party libraries and whatnot, which Camel cannot control. This option is default false.
*/
public void setShutdownSuppressLoggingOnTimeout(boolean shutdownSuppressLoggingOnTimeout) {
this.shutdownSuppressLoggingOnTimeout = shutdownSuppressLoggingOnTimeout;
}
public boolean isShutdownNowOnTimeout() {
return shutdownNowOnTimeout;
}
/**
* Sets whether to force shutdown of all consumers when a timeout occurred and thus not all consumers was shutdown
* within that period. You should have good reasons to set this option to false as it means that the routes keep
* running and is halted abruptly when CamelContext has been shutdown.
*/
public void setShutdownNowOnTimeout(boolean shutdownNowOnTimeout) {
this.shutdownNowOnTimeout = shutdownNowOnTimeout;
}
public boolean isShutdownRoutesInReverseOrder() {
return shutdownRoutesInReverseOrder;
}
/**
* Sets whether routes should be shutdown in reverse or the same order as they where started.
*/
public void setShutdownRoutesInReverseOrder(boolean shutdownRoutesInReverseOrder) {
this.shutdownRoutesInReverseOrder = shutdownRoutesInReverseOrder;
}
public boolean isShutdownLogInflightExchangesOnTimeout() {
return shutdownLogInflightExchangesOnTimeout;
}
/**
* Sets whether to log information about the inflight Exchanges which are still running during a shutdown which
* didn't complete without the given timeout.
*/
public void setShutdownLogInflightExchangesOnTimeout(boolean shutdownLogInflightExchangesOnTimeout) {
this.shutdownLogInflightExchangesOnTimeout = shutdownLogInflightExchangesOnTimeout;
}
public String getFileConfigurations() {
return fileConfigurations;
}
/**
* Directory to load additional configuration files that contains configuration values that takes precedence over
* any other configuration. This can be used to refer to files that may have secret configuration that has been
* mounted on the file system for containers. You can specify a pattern to load from sub directories and a name
* pattern such as /var/app/secret/*.properties, multiple directories can be separated by comma.
*/
public void setFileConfigurations(String fileConfigurations) {
this.fileConfigurations = fileConfigurations;
}
public boolean isJmxEnabled() {
return jmxEnabled;
}
/**
* Enable JMX in your Camel application.
*/
public void setJmxEnabled(boolean jmxEnabled) {
this.jmxEnabled = jmxEnabled;
}
public int getProducerTemplateCacheSize() {
return producerTemplateCacheSize;
}
/**
* Producer template endpoints cache size.
*/
public void setProducerTemplateCacheSize(int producerTemplateCacheSize) {
this.producerTemplateCacheSize = producerTemplateCacheSize;
}
public int getConsumerTemplateCacheSize() {
return consumerTemplateCacheSize;
}
/**
* Consumer template endpoints cache size.
*/
public void setConsumerTemplateCacheSize(int consumerTemplateCacheSize) {
this.consumerTemplateCacheSize = consumerTemplateCacheSize;
}
public boolean isLoadTypeConverters() {
return loadTypeConverters;
}
/**
* Whether to load custom type converters by scanning classpath. This is used for backwards compatibility with Camel
* 2.x. Its recommended to migrate to use fast type converter loading by setting <tt>@Converter(loader = true)</tt>
* on your custom type converter classes.
*/
public void setLoadTypeConverters(boolean loadTypeConverters) {
this.loadTypeConverters = loadTypeConverters;
}
public int getLogDebugMaxChars() {
return logDebugMaxChars;
}
/**
* Is used to limit the maximum length of the logging Camel message bodies. If the message body is longer than the
* limit, the log message is clipped. Use -1 to have unlimited length. Use for example 1000 to log at most 1000
* characters.
*/
public void setLogDebugMaxChars(int logDebugMaxChars) {
this.logDebugMaxChars = logDebugMaxChars;
}
public boolean isStreamCachingEnabled() {
return streamCachingEnabled;
}
/**
* Sets whether stream caching is enabled or not. Default is false.
*/
public void setStreamCachingEnabled(boolean streamCachingEnabled) {
this.streamCachingEnabled = streamCachingEnabled;
}
public String getStreamCachingSpoolDirectory() {
return streamCachingSpoolDirectory;
}
/**
* Sets the stream caching spool (temporary) directory to use for overflow and spooling to disk. If no spool
* directory has been explicit configured, then a temporary directory is created in the java.io.tmpdir directory.
*/
public void setStreamCachingSpoolDirectory(String streamCachingSpoolDirectory) {
this.streamCachingSpoolDirectory = streamCachingSpoolDirectory;
}
public String getStreamCachingSpoolCipher() {
return streamCachingSpoolCipher;
}
/**
* Sets a stream caching cipher name to use when spooling to disk to write with encryption. By default the data is
* not encrypted.
*/
public void setStreamCachingSpoolCipher(String streamCachingSpoolCipher) {
this.streamCachingSpoolCipher = streamCachingSpoolCipher;
}
public long getStreamCachingSpoolThreshold() {
return streamCachingSpoolThreshold;
}
/**
* Stream caching threshold in bytes when overflow to disk is activated. The default threshold is 128kb. Use -1 to
* disable overflow to disk.
*/
public void setStreamCachingSpoolThreshold(long streamCachingSpoolThreshold) {
this.streamCachingSpoolThreshold = streamCachingSpoolThreshold;
}
public int getStreamCachingSpoolUsedHeapMemoryThreshold() {
return streamCachingSpoolUsedHeapMemoryThreshold;
}
/**
* Sets a percentage (1-99) of used heap memory threshold to activate stream caching spooling to disk.
*/
public void setStreamCachingSpoolUsedHeapMemoryThreshold(int streamCachingSpoolUsedHeapMemoryThreshold) {
this.streamCachingSpoolUsedHeapMemoryThreshold = streamCachingSpoolUsedHeapMemoryThreshold;
}
public String getStreamCachingSpoolUsedHeapMemoryLimit() {
return streamCachingSpoolUsedHeapMemoryLimit;
}
/**
* Sets what the upper bounds should be when streamCachingSpoolUsedHeapMemoryThreshold is in use.
*/
public void setStreamCachingSpoolUsedHeapMemoryLimit(String streamCachingSpoolUsedHeapMemoryLimit) {
this.streamCachingSpoolUsedHeapMemoryLimit = streamCachingSpoolUsedHeapMemoryLimit;
}
public boolean isStreamCachingAnySpoolRules() {
return streamCachingAnySpoolRules;
}
/**
* Sets whether if just any of the org.apache.camel.spi.StreamCachingStrategy.SpoolRule rules returns true then
* shouldSpoolCache(long) returns true, to allow spooling to disk. If this option is false, then all the
* org.apache.camel.spi.StreamCachingStrategy.SpoolRule must return true. The default value is false which means
* that all the rules must return true.
*/
public void setStreamCachingAnySpoolRules(boolean streamCachingAnySpoolRules) {
this.streamCachingAnySpoolRules = streamCachingAnySpoolRules;
}
public int getStreamCachingBufferSize() {
return streamCachingBufferSize;
}
/**
* Sets the stream caching buffer size to use when allocating in-memory buffers used for in-memory stream caches.
* The default size is 4096.
*/
public void setStreamCachingBufferSize(int streamCachingBufferSize) {
this.streamCachingBufferSize = streamCachingBufferSize;
}
public boolean isStreamCachingRemoveSpoolDirectoryWhenStopping() {
return streamCachingRemoveSpoolDirectoryWhenStopping;
}
/**
* Whether to remove stream caching temporary directory when stopping. This option is default true.
*/
public void setStreamCachingRemoveSpoolDirectoryWhenStopping(boolean streamCachingRemoveSpoolDirectoryWhenStopping) {
this.streamCachingRemoveSpoolDirectoryWhenStopping = streamCachingRemoveSpoolDirectoryWhenStopping;
}
public boolean isStreamCachingStatisticsEnabled() {
return streamCachingStatisticsEnabled;
}
/**
* Sets whether stream caching statistics is enabled.
*/
public void setStreamCachingStatisticsEnabled(boolean streamCachingStatisticsEnabled) {
this.streamCachingStatisticsEnabled = streamCachingStatisticsEnabled;
}
public boolean isTracing() {
return tracing;
}
/**
* Sets whether tracing is enabled or not. Default is false.
*/
@Deprecated
public void setTracing(boolean tracing) {
this.tracing = tracing;
}
public boolean isMessageHistory() {
return messageHistory;
}
/**
* Sets whether message history is enabled or not. Default is true.
*/
public void setMessageHistory(boolean messageHistory) {
this.messageHistory = messageHistory;
}
public boolean isLogMask() {
return logMask;
}
/**
* Sets whether log mask is enabled or not. Default is false.
*/
public void setLogMask(boolean logMask) {
this.logMask = logMask;
}
public boolean isLogExhaustedMessageBody() {
return logExhaustedMessageBody;
}
/**
* Sets whether to log exhausted message body with message history. Default is false.
*/
public void setLogExhaustedMessageBody(boolean logExhaustedMessageBody) {
this.logExhaustedMessageBody = logExhaustedMessageBody;
}
public boolean isAutoStartup() {
return autoStartup;
}
/**
* Sets whether the object should automatically start when Camel starts. Important: Currently only routes can be
* disabled, as CamelContext's are always started. Note: When setting auto startup false on CamelContext then that
* takes precedence and no routes is started. You would need to start CamelContext explicit using the
* org.apache.camel.CamelContext.start() method, to start the context, and then you would need to start the routes
* manually using CamelContext.getRouteController().startRoute(String). Default is true to always start up.
*/
public void setAutoStartup(boolean autoStartup) {
this.autoStartup = autoStartup;
}
public boolean isAllowUseOriginalMessage() {
return allowUseOriginalMessage;
}
/**
* Sets whether to allow access to the original message from Camel's error handler, or from
* org.apache.camel.spi.UnitOfWork.getOriginalInMessage(). Turning this off can optimize performance, as defensive
* copy of the original message is not needed. Default is false.
*/
public void setAllowUseOriginalMessage(boolean allowUseOriginalMessage) {
this.allowUseOriginalMessage = allowUseOriginalMessage;
}
public boolean isEndpointRuntimeStatisticsEnabled() {
return endpointRuntimeStatisticsEnabled;
}
/**
* Sets whether endpoint runtime statistics is enabled (gathers runtime usage of each incoming and outgoing
* endpoints). The default value is false.
*/
public void setEndpointRuntimeStatisticsEnabled(boolean endpointRuntimeStatisticsEnabled) {
this.endpointRuntimeStatisticsEnabled = endpointRuntimeStatisticsEnabled;
}
public boolean isUseDataType() {
return useDataType;
}
/**
* Whether to enable using data type on Camel messages. Data type are automatic turned on if one ore more routes has
* been explicit configured with input and output types. Otherwise data type is default off.
*/
public void setUseDataType(boolean useDataType) {
this.useDataType = useDataType;
}
public boolean isUseBreadcrumb() {
return useBreadcrumb;
}
/**
* Set whether breadcrumb is enabled. The default value is false.
*/
public void setUseBreadcrumb(boolean useBreadcrumb) {
this.useBreadcrumb = useBreadcrumb;
}
public String getJmxManagementNamePattern() {
return jmxManagementNamePattern;
}
/**
* The naming pattern for creating the CamelContext JMX management name. The default pattern is #name#
*/
public void setJmxManagementNamePattern(String jmxManagementNamePattern) {
this.jmxManagementNamePattern = jmxManagementNamePattern;
}
public boolean isUseMdcLogging() {
return useMdcLogging;
}
/**
* To turn on MDC logging
*/
public void setUseMdcLogging(boolean useMdcLogging) {
this.useMdcLogging = useMdcLogging;
}
public String getThreadNamePattern() {
return threadNamePattern;
}
/**
* Sets the thread name pattern used for creating the full thread name. The default pattern is: Camel (#camelId#)
* thread ##counter# - #name# Where #camelId# is the name of the CamelContext. and #counter# is a unique
* incrementing counter. and #name# is the regular thread name. You can also use #longName# which is the long thread
* name which can includes endpoint parameters etc.
*/
public void setThreadNamePattern(String threadNamePattern) {
this.threadNamePattern = threadNamePattern;
}
public String getRouteFilterIncludePattern() {
return routeFilterIncludePattern;
}
/**
* Used for filtering routes routes matching the given pattern, which follows the following rules: - Match by route
* id - Match by route input endpoint uri The matching is using exact match, by wildcard and regular expression as
* documented by PatternHelper#matchPattern(String, String). For example to only include routes which starts with
* foo in their route id's, use: include=foo* And to exclude routes which starts from JMS endpoints, use:
* exclude=jms:* Multiple patterns can be separated by comma, for example to exclude both foo and bar routes,
* use: exclude=foo*,bar* Exclude takes precedence over include.
*/
public void setRouteFilterIncludePattern(String include) {
this.routeFilterIncludePattern = include;
}
public String getRouteFilterExcludePattern() {
return routeFilterExcludePattern;
}
/**
* Used for filtering routes routes matching the given pattern, which follows the following rules: - Match by route
* id - Match by route input endpoint uri The matching is using exact match, by wildcard and regular expression as
* documented by PatternHelper#matchPattern(String, String). For example to only include routes which starts with
* foo in their route id's, use: include=foo* And to exclude routes which starts from JMS endpoints, use:
* exclude=jms:* Multiple patterns can be separated by comma, for example to exclude both foo and bar routes,
* use: exclude=foo*,bar* Exclude takes precedence over include.
*/
public void setRouteFilterExcludePattern(String exclude) {
this.routeFilterExcludePattern = exclude;
}
}
|
MyConfiguration
|
java
|
FasterXML__jackson-core
|
src/test/java/tools/jackson/core/unittest/read/loc/LocationDuringStreamParsingTest.java
|
{
"start": 3484,
"end": 7202
}
|
enum ____
{
SIMPLE_VALUE("42", at(1, 1, 0), at(1, 3, 2)),
SIMPLE_VALUE_WITH_PADDING(" 1337 ", at(1, 4, 3), at(1, 10, 9)),
SIMPLE_VALUE_WITH_MULTIBYTE_CHARS("\"Правда\"",
at(1, 1, 0),
at(1, 15, 14) // one byte for each ", two for each Cyrillic char
),
SIMPLE_VALUE_INCLUDING_SURROGATE_PAIR_CHARS("\"a П \uD83D\uDE01\"",
at(1, 1, 0),
at(1, 12, 11) // one byte for each ", a and space; two for П, four for smiley emoji
),
ARRAY_IN_ONE_LINE("[\"hello\",42,true]",
at(1, 1, 0), // [
at(1, 2, 1), // "hello"
at(1, 10, 9), // 42
at(1, 13, 12), // true
at(1, 17, 16), // ]
at(1, 18, 17) // end of input
),
ARRAY_IN_ONE_LINE_WITH_PADDING(" [ \"hello\" , 42 , true ] ",
at(1, 3, 2), // [
at(1, 5, 4), // "hello"
at(1, 17, 16), // 42
at(1, 26, 25), // true
at(1, 33, 32), // ]
at(1, 37, 36) // end of input
),
ARRAY_IN_MULTIPLE_LINES("[\n" + " \"hello\",\n" + " 42,\n" + " true\n" + "]",
at(1, 1, 0), // [
at(2, 5, 6), // "hello"
at(3, 5, 19), // 42
at(4, 5, 27), // true
at(5, 1, 32), // ]
at(5, 2, 33) // end of input
),
ARRAY_IN_MULTIPLE_LINES_WITH_WEIRD_SPACING(" [\n" + " \"hello\" , \n" + " 42 ,\n" + " true\n" + " ]",
at(1, 2, 1), // [
at(2, 3, 5), // "hello"
at(3, 2, 18), // 42
at(4, 7, 31), // true
at(5, 2, 37), // ]
at(5, 3, 38) // end of input
),
ARRAY_IN_MULTIPLE_LINES_CRLF("[\r\n" + " \"hello\",\r\n" + " 42,\r\n" + " true\r\n" + "]",
at(1, 1, 0), // [
at(2, 5, 7), // "hello"
at(3, 5, 21), // 42
at(4, 5, 30), // true
at(5, 1, 36), // ]
at(5, 2, 37) // end of input
),
OBJECT_IN_ONE_LINE("{\"first\":\"hello\",\"second\":42}",
at(1, 1, 0), // {
at(1, 2, 1), // "first"
at(1, 10, 9), // "hello"
at(1, 18, 17), // "second"
at(1, 27, 26), // 42
at(1, 29, 28), // }
at(1, 30, 29) // end of input
),
OBJECT_IN_MULTIPLE_LINES("{\n" + " \"first\":\"hello\",\n" + " \"second\":42\n" + "}",
at(1, 1, 0), // {
at(2, 5, 6), // "first"
at(2, 13, 14), // "hello"
at(3, 5, 27), // "second"
at(3, 14, 36), // 42
at(4, 1, 39), // }
at(4, 2, 40) // end of input
),
OBJECT_IN_MULTIPLE_LINES_CRLF("{\r\n" + " \"first\":\"hello\",\r\n" + " \"second\":42\r\n" + "}",
at(1, 1, 0), // {
at(2, 5, 7), // "first"
at(2, 13, 15), // "hello"
at(3, 5, 29), // "second"
at(3, 14, 38), // 42
at(4, 1, 42), // }
at(4, 2, 43) // end of input
),
;
final String json;
final List<LocData> locations;
LocationTestCase(String json, LocData... locations)
{
this.json = json;
this.locations = Arrays.asList(locations);
}
LocData getFinalLocation()
{
return locations.get(locations.size() - 1);
}
}
}
|
LocationTestCase
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
|
{
"start": 8228,
"end": 8539
}
|
class ____ of the codec
* @return the codec object
*/
public CompressionCodec getCodecByClassName(String classname) {
if (codecsByClassName == null) {
return null;
}
return codecsByClassName.get(classname);
}
/**
* Find the relevant compression codec for the codec's canonical
|
name
|
java
|
apache__camel
|
core/camel-core-model/src/main/java/org/apache/camel/model/BeanFactoryDefinition.java
|
{
"start": 4083,
"end": 5567
}
|
class ____ (fully qualified) of the bean
*/
public void setType(String type) {
this.type = type;
}
public String getInitMethod() {
return initMethod;
}
/**
* The name of the custom initialization method to invoke after setting bean properties. The method must have no
* arguments, but may throw any exception.
*/
public void setInitMethod(String initMethod) {
this.initMethod = initMethod;
}
public String getDestroyMethod() {
return destroyMethod;
}
/**
* The name of the custom destroy method to invoke on bean shutdown, such as when Camel is shutting down. The method
* must have no arguments, but may throw any exception.
*/
public void setDestroyMethod(String destroyMethod) {
this.destroyMethod = destroyMethod;
}
public String getFactoryMethod() {
return factoryMethod;
}
/**
* Name of method to invoke when creating the bean via a factory bean.
*/
public void setFactoryMethod(String factoryMethod) {
this.factoryMethod = factoryMethod;
}
public String getFactoryBean() {
return factoryBean;
}
/**
* Name of factory bean (bean id) to use for creating the bean.
*/
public void setFactoryBean(String factoryBean) {
this.factoryBean = factoryBean;
}
public String getBuilderClass() {
return builderClass;
}
/**
* Fully qualified
|
name
|
java
|
quarkusio__quarkus
|
independent-projects/arc/processor/src/main/java/io/quarkus/arc/processor/SubclassGenerator.java
|
{
"start": 2666,
"end": 26216
}
|
class ____ extends AbstractGenerator {
private static final DotName JAVA_LANG_THROWABLE = DotNames.create(Throwable.class);
private static final DotName JAVA_LANG_EXCEPTION = DotNames.create(Exception.class);
private static final DotName JAVA_LANG_RUNTIME_EXCEPTION = DotNames.create(RuntimeException.class);
static final String SUBCLASS_SUFFIX = "_Subclass";
static final String MARK_CONSTRUCTED_METHOD_NAME = "arc$markConstructed";
static final String DESTROY_METHOD_NAME = "arc$destroy";
protected static final String FIELD_NAME_PREDESTROYS = "arc$preDestroys";
protected static final String FIELD_NAME_CONSTRUCTED = "arc$constructed";
private final Predicate<DotName> applicationClassPredicate;
private final Set<String> existingClasses;
private final PrivateMembersCollector privateMembers;
private final AnnotationLiteralProcessor annotationLiterals;
static String generatedName(DotName providerTypeName, String baseName) {
return generatedNameFromTarget(DotNames.packagePrefix(providerTypeName), baseName, SUBCLASS_SUFFIX);
}
SubclassGenerator(AnnotationLiteralProcessor annotationLiterals, Predicate<DotName> applicationClassPredicate,
boolean generateSources, ReflectionRegistration reflectionRegistration,
Set<String> existingClasses, PrivateMembersCollector privateMembers) {
super(generateSources, reflectionRegistration);
this.applicationClassPredicate = applicationClassPredicate;
this.annotationLiterals = annotationLiterals;
this.existingClasses = existingClasses;
this.privateMembers = privateMembers;
}
Collection<Resource> generate(BeanInfo bean, String beanClassName) {
Type providerType = bean.getProviderType();
String baseName = getBeanBaseName(beanClassName);
String generatedName = generatedName(providerType.name(), baseName);
if (existingClasses.contains(generatedName)) {
return Collections.emptyList();
}
boolean isApplicationClass = applicationClassPredicate.test(bean.getBeanClass())
|| bean.hasBoundDecoratorWhichIsApplicationClass(applicationClassPredicate);
ResourceClassOutput classOutput = new ResourceClassOutput(isApplicationClass,
name -> name.equals(generatedName) ? SpecialType.SUBCLASS : null,
generateSources);
Gizmo gizmo = gizmo(classOutput);
createSubclass(gizmo, bean, generatedName, providerType);
return classOutput.getResources();
}
private void createSubclass(Gizmo gizmo, BeanInfo bean, String generatedName, Type providerType) {
CodeGenInfo codeGenInfo = preprocess(bean);
InterceptionInfo preDestroyInterception = bean.getLifecycleInterceptors(InterceptionType.PRE_DESTROY);
// Foo_Subclass extends Foo implements Subclass
gizmo.class_(generatedName, cc -> {
cc.extends_(classDescOf(providerType));
cc.implements_(Subclass.class);
for (InterceptedDecoratedMethod interceptedDecoratedMethod : codeGenInfo.interceptedDecoratedMethods) {
if (interceptedDecoratedMethod.interception() != null) {
// Each intercepted method has a corresponding InterceptedMethodMetadata field
cc.field("arc$" + interceptedDecoratedMethod.index, fc -> {
fc.private_();
fc.setType(InterceptedMethodMetadata.class);
});
}
}
FieldDesc aroundInvokesField;
if (bean.hasAroundInvokes()) {
aroundInvokesField = cc.field("aroundInvokes", fc -> {
fc.private_();
fc.setType(List.class);
});
} else {
aroundInvokesField = null;
}
FieldDesc preDestroys;
if (!preDestroyInterception.isEmpty()) {
// private final List<InvocationContextImpl.InterceptorInvocation> preDestroys
preDestroys = cc.field(FIELD_NAME_PREDESTROYS, fc -> {
fc.private_();
fc.final_();
fc.setType(ArrayList.class);
});
} else {
preDestroys = null;
}
// `volatile` is perhaps not best, this field is monotonic (once `true`, it never becomes `false` again),
// so maybe making the `markConstructed` method `synchronized` would be enough (?)
FieldDesc constructedField = cc.field(FIELD_NAME_CONSTRUCTED, fc -> {
fc.private_();
fc.volatile_();
fc.setType(boolean.class);
});
// Initialize maps of shared interceptor chains and interceptor bindings
Map<List<InterceptorInfo>, String> interceptorChainKeys = new HashMap<>();
Map<Set<AnnotationInstanceEquivalenceProxy>, String> bindingKeys = new HashMap<>();
Map<MethodDesc, MethodDesc> forwardingMethods = new HashMap<>();
for (InterceptedDecoratedMethod interceptedDecoratedMethod : codeGenInfo.interceptedDecoratedMethods()) {
MethodInfo method = interceptedDecoratedMethod.method();
MethodDesc forwardDesc = createForwardingMethod(cc, classDescOf(providerType), method, false);
forwardingMethods.put(methodDescOf(method), forwardDesc);
}
cc.constructor(mc -> {
Optional<Injection> constructorInjection = bean.getConstructorInjection();
List<ClassDesc> ipTypes = new ArrayList<>();
List<ParamVar> ipParams = new ArrayList<>();
if (constructorInjection.isPresent()) {
int idx = 0;
for (InjectionPointInfo injectionPoint : constructorInjection.get().injectionPoints) {
ClassDesc ipType = classDescOf(injectionPoint.getType());
ipTypes.add(ipType);
ipParams.add(mc.parameter("ip" + idx, ipType));
idx++;
}
}
ParamVar ccParam = mc.parameter("creationalContext", CreationalContext.class);
List<ParamVar> interceptorParams = new ArrayList<>();
for (int i = 0; i < codeGenInfo.boundInterceptors().size(); i++) {
interceptorParams.add(mc.parameter("interceptor" + i, InjectableInterceptor.class));
}
List<ParamVar> decoratorParams = new ArrayList<>();
for (int i = 0; i < codeGenInfo.boundDecorators().size(); i++) {
decoratorParams.add(mc.parameter("decorator" + i, InjectableDecorator.class));
}
mc.body(bc -> {
// super(fooProvider)
bc.invokeSpecial(ConstructorDesc.of(classDescOf(providerType), ipTypes), cc.this_(), ipParams);
// First instantiate all interceptor instances, so that they can be shared
Map<String, ParamVar> interceptorBeanToParamVar = new HashMap<>();
Map<String, LocalVar> interceptorInstanceToLocalVar = new HashMap<>();
for (int i = 0; i < codeGenInfo.boundInterceptors().size(); i++) {
InterceptorInfo interceptorInfo = codeGenInfo.boundInterceptors().get(i);
String id = interceptorInfo.getIdentifier();
ParamVar interceptorBean = interceptorParams.get(i);
interceptorBeanToParamVar.put(id, interceptorBean);
// create instance of each interceptor -> InjectableInterceptor.get()
Expr ccChild = bc.invokeStatic(MethodDescs.CREATIONAL_CTX_CHILD, ccParam);
LocalVar interceptorInstance = bc.localVar("interceptorInstance_" + id, bc.invokeInterface(
MethodDescs.INJECTABLE_REF_PROVIDER_GET, interceptorBean, ccChild));
interceptorInstanceToLocalVar.put(id, interceptorInstance);
}
// If a decorator is associated:
// 1. Generate the delegate subclass
// 2. Instantiate the decorator instance
// 3. Create and set the corresponding field
if (!codeGenInfo.boundDecorators().isEmpty()) {
Map<String, LocalVar> decoratorToLocalVar = new HashMap<>();
for (int j = 0; j < codeGenInfo.boundDecorators().size(); j++) {
processDecorator(gizmo, cc, codeGenInfo.boundDecorators().get(j), bean, providerType, bc,
decoratorParams.get(j), decoratorToLocalVar, ccParam, forwardingMethods);
}
}
// PreDestroy interceptors
if (preDestroys != null) {
LocalVar list = bc.localVar("preDestroysList", bc.new_(ArrayList.class));
for (InterceptorInfo interceptor : preDestroyInterception.interceptors) {
LocalVar interceptorInstance = interceptorInstanceToLocalVar.get(interceptor.getIdentifier());
bc.withList(list).add(bc.invokeStatic(MethodDescs.INTERCEPTOR_INVOCATION_PRE_DESTROY,
interceptorBeanToParamVar.get(interceptor.getIdentifier()), interceptorInstance));
}
bc.set(cc.this_().field(preDestroys), list);
}
LocalVar interceptorChainMap = bc.localVar("interceptorChainMap", bc.new_(HashMap.class));
LocalVar bindingsMap = bc.localVar("bindingsMap", bc.new_(HashMap.class));
// Shared interceptor bindings literals
IntegerHolder chainIdx = new IntegerHolder();
IntegerHolder bindingIdx = new IntegerHolder();
Map<AnnotationInstanceEquivalenceProxy, Expr> bindingsLiterals = new HashMap<>();
var bindingsFun = SubclassGenerator.createBindingsFun(bindingIdx, bc, bindingsMap, bindingsLiterals,
bean, annotationLiterals);
var interceptorChainKeysFun = SubclassGenerator.createInterceptorChainKeysFun(chainIdx, bc,
interceptorChainMap, interceptorInstanceToLocalVar, interceptorBeanToParamVar);
for (InterceptedDecoratedMethod interceptedDecoratedMethod : codeGenInfo.interceptedDecoratedMethods) {
InterceptionInfo interception = interceptedDecoratedMethod.interception();
if (interception != null) {
interceptorChainKeys.computeIfAbsent(interception.interceptors, interceptorChainKeysFun);
bindingKeys.computeIfAbsent(interception.bindingsEquivalenceProxies(), bindingsFun);
}
}
// Initialize the "aroundInvokes" field if necessary
if (bean.hasAroundInvokes()) {
LocalVar aroundInvokes = bc.localVar("aroundInvokes", bc.new_(ArrayList.class));
for (MethodInfo method : bean.getAroundInvokes()) {
// BiFunction<Object,InvocationContext,Object>
Expr lambda = bc.lambda(BiFunction.class, lc -> {
ParamVar target = lc.parameter("target", 0);
ParamVar ctx = lc.parameter("ctx", 1);
lc.body(lbc -> {
boolean isApplicationClass = applicationClassPredicate.test(bean.getBeanClass());
// Check if interceptor method uses InvocationContext or ArcInvocationContext
Class<?> invocationContextClass;
if (method.parameterType(0).name().equals(DotNames.INVOCATION_CONTEXT)) {
invocationContextClass = InvocationContext.class;
} else {
invocationContextClass = ArcInvocationContext.class;
}
if (Modifier.isPrivate(method.flags())) {
// Use reflection fallback
privateMembers.add(isApplicationClass, String.format("Interceptor method %s#%s()",
method.declaringClass().name(), method.name()));
reflectionRegistration.registerMethod(method);
Expr paramTypes = lbc.newArray(Class.class, Const.of(invocationContextClass));
Expr argValues = lbc.newArray(Object.class, ctx);
lbc.return_(lbc.invokeStatic(MethodDescs.REFLECTIONS_INVOKE_METHOD,
Const.of(classDescOf(method.declaringClass())), Const.of(method.name()),
paramTypes, target, argValues));
} else {
lbc.return_(lbc.invokeVirtual(methodDescOf(method),
lbc.cast(target, classDescOf(method.declaringClass())), ctx));
}
});
});
bc.withList(aroundInvokes).add(lambda);
}
bc.set(cc.this_().field(aroundInvokesField), aroundInvokes);
}
// Split initialization of InterceptedMethodMetadata into multiple methods
for (MethodGroup group : codeGenInfo.methodGroups()) {
MethodDesc desc = ClassMethodDesc.of(cc.type(), "arc$initMetadata" + group.id(),
void.class, Map.class, Map.class);
bc.invokeVirtual(desc, cc.this_(), interceptorChainMap, bindingsMap);
}
bc.return_();
});
});
for (MethodGroup group : codeGenInfo.methodGroups()) {
generateInitMetadata(cc, bean, providerType, aroundInvokesField, constructedField, group,
forwardingMethods, interceptorChainKeys, bindingKeys);
}
cc.method(MARK_CONSTRUCTED_METHOD_NAME, mc -> {
mc.body(bc -> {
bc.set(cc.this_().field(constructedField), Const.of(true));
bc.return_();
});
});
if (preDestroys != null) {
cc.method(DESTROY_METHOD_NAME, mc -> {
ParamVar forward = mc.parameter("forward", Runnable.class);
mc.body(b0 -> {
b0.try_(tc -> {
tc.body(b1 -> {
Expr bindings = b1.setOf(preDestroyInterception.bindings
.stream()
.map(binding -> {
ClassInfo bindingClass = bean.getDeployment().getInterceptorBinding(binding.name());
return annotationLiterals.create(b1, bindingClass, binding);
})
.toList());
Expr invocationContext = b1.invokeStatic(MethodDescs.INVOCATION_CONTEXTS_PRE_DESTROY,
cc.this_(), cc.this_().field(preDestroys), bindings, forward);
b1.invokeInterface(MethodDescs.INVOCATION_CONTEXT_PROCEED, invocationContext);
b1.return_();
});
tc.catch_(Exception.class, "e", (b1, e) -> {
b1.throw_(b1.new_(RuntimeException.class, Const.of("Error destroying subclass"), e));
});
});
});
});
}
});
}
private void generateInitMetadata(ClassCreator cc, BeanInfo bean, Type providerType,
FieldDesc aroundInvokesField, FieldDesc constructedField, MethodGroup group,
Map<MethodDesc, MethodDesc> forwardingMethods, Map<List<InterceptorInfo>, String> interceptorChainKeys,
Map<Set<AnnotationInstanceEquivalenceProxy>, String> bindingKeys) {
cc.method("arc$initMetadata" + group.id(), mc -> {
mc.private_();
mc.returning(void.class);
ParamVar interceptorChainMapParam = mc.parameter("interceptorChainMap", Map.class);
ParamVar bindingsMapParam = mc.parameter("bindingsMap", Map.class);
mc.body(bc -> {
// to avoid repeatedly looking for the exact same thing in the maps
Map<String, LocalVar> chains = new HashMap<>();
Map<String, LocalVar> bindings = new HashMap<>();
for (InterceptedDecoratedMethod interceptedDecoratedMethod : group.interceptedDecoratedMethods()) {
MethodInfo method = interceptedDecoratedMethod.method();
MethodDesc methodDesc = methodDescOf(method);
InterceptionInfo interception = interceptedDecoratedMethod.interception();
DecorationInfo decoration = interceptedDecoratedMethod.decoration();
MethodDesc forwardDesc = forwardingMethods.get(methodDesc);
List<Type> parameters = method.parameterTypes();
if (interception != null) {
// 1. Interceptor chain
String interceptorChainKey = interceptorChainKeys.get(interception.interceptors);
LocalVar chainArg = chains.computeIfAbsent(interceptorChainKey, ignored -> {
return bc.localVar("interceptorChain", bc.withMap(interceptorChainMapParam)
.get(Const.of(interceptorChainKey)));
});
// 2. Method method = Reflections.findMethod(org.jboss.weld.arc.test.interceptors.SimpleBean.class,"foo",java.lang.String.class)
Expr[] args = new Expr[3];
args[0] = Const.of(classDescOf(providerType));
args[1] = Const.of(method.name());
if (!parameters.isEmpty()) {
LocalVar paramTypes = bc.localVar("paramTypes",
bc.newEmptyArray(Class.class, parameters.size()));
for (int i = 0; i < parameters.size(); i++) {
bc.set(paramTypes.elem(i), Const.of(classDescOf(parameters.get(i))));
}
args[2] = paramTypes;
} else {
args[2] = Expr.staticField(FieldDescs.ANNOTATION_LITERALS_EMPTY_CLASS_ARRAY);
}
Expr methodArg = bc.invokeStatic(MethodDescs.REFLECTIONS_FIND_METHOD, args);
// 3. Interceptor bindings
String bindingKey = bindingKeys.get(interception.bindingsEquivalenceProxies());
LocalVar bindingsArg = bindings.computeIfAbsent(bindingKey, ignored -> {
return bc.localVar("bindings", bc.withMap(bindingsMapParam)
.get(Const.of(bindingKey)));
});
DecoratorMethod decoratorMethod = decoration != null ? decoration.firstDecoratorMethod() : null;
FieldVar decorator;
if (decoratorMethod != null) {
decorator = cc.this_().field(FieldDesc.of(cc.type(),
decoratorMethod.decorator.getIdentifier(), Object.class));
} else {
decorator = null;
}
// Instantiate the forwarding function
// BiFunction<Object, InvocationContext, Object> forward = (target, ctx) -> target.foo$$superforward((java.lang.String)ctx.getParameters()[0])
LocalVar forwardFun = bc.localVar("forwardFun", bc.lambda(BiFunction.class, lc -> {
Var capturedDecorator = decorator != null ? lc.capture(decorator) : null;
ParamVar target = lc.parameter("target", 0);
ParamVar ctx = lc.parameter("ctx", 1);
lc.body(lbc -> {
MethodDesc desc;
Expr instance;
if (decoratorMethod == null) {
desc = forwardDesc;
instance = target;
} else {
// If a decorator is bound then invoke the method upon the decorator instance instead of the generated forwarding method
// We need to use the decorator method in order to support not visible or generic decorators
desc = methodDescOf(decoratorMethod.method);
instance = capturedDecorator;
}
Expr[] superArgs;
if (parameters.isEmpty()) {
superArgs = new Expr[0];
} else {
Expr ctxArgs = lbc.localVar("args", lbc.invokeInterface(
MethodDescs.INVOCATION_CONTEXT_GET_PARAMETERS, ctx));
superArgs = new Expr[parameters.size()];
for (int i = 0; i < parameters.size(); i++) {
superArgs[i] = ctxArgs.elem(i);
}
}
Expr superResult = decoratorMethod == null
? lbc.invokeVirtual(desc, instance, superArgs)
: lbc.invokeInterface(desc, instance, superArgs);
lbc.return_(superResult.isVoid() ? Const.ofNull(Object.class) : superResult);
});
}));
if (bean.hasAroundInvokes()) {
LocalVar finalForwardFun = forwardFun;
// Wrap the forwarding function with a function that calls around invoke methods declared in a hierarchy of the target
|
SubclassGenerator
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/issue_1300/Issue_for_zuojian.java
|
{
"start": 181,
"end": 870
}
|
class ____ extends TestCase {
public void test_for_issue() throws Exception {
JSON.DEFFAULT_DATE_FORMAT = "yyyyMMddHHmmssSSSZ";
String json = "{\"value\":\"20180131022733000-0800\"}";
JSON.parseObject(json, Model.class);
JSON.DEFFAULT_DATE_FORMAT = "yyyy-MM-dd HH:mm:ss";
}
public void test_for_issue_1() throws Exception {
JSON.DEFFAULT_DATE_FORMAT = "yyyyMMddHHmmssSSSZ";
String json = "{\"value\":\"20180131022733000-0800\"}";
JSONObject object = JSON.parseObject(json);
object.getObject("value", Date.class);
JSON.DEFFAULT_DATE_FORMAT = "yyyy-MM-dd HH:mm:ss";
}
public static
|
Issue_for_zuojian
|
java
|
apache__camel
|
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/AzureStorageBlobComponentBuilderFactory.java
|
{
"start": 28774,
"end": 35552
}
|
class ____
extends AbstractComponentBuilder<BlobComponent>
implements AzureStorageBlobComponentBuilder {
@Override
protected BlobComponent buildConcreteComponent() {
return new BlobComponent();
}
private org.apache.camel.component.azure.storage.blob.BlobConfiguration getOrCreateConfiguration(BlobComponent component) {
if (component.getConfiguration() == null) {
component.setConfiguration(new org.apache.camel.component.azure.storage.blob.BlobConfiguration());
}
return component.getConfiguration();
}
@Override
protected boolean setPropertyOnComponent(
Component component,
String name,
Object value) {
switch (name) {
case "blobName": getOrCreateConfiguration((BlobComponent) component).setBlobName((java.lang.String) value); return true;
case "blobOffset": getOrCreateConfiguration((BlobComponent) component).setBlobOffset((long) value); return true;
case "blobType": getOrCreateConfiguration((BlobComponent) component).setBlobType((org.apache.camel.component.azure.storage.blob.BlobType) value); return true;
case "closeStreamAfterRead": getOrCreateConfiguration((BlobComponent) component).setCloseStreamAfterRead((boolean) value); return true;
case "configuration": ((BlobComponent) component).setConfiguration((org.apache.camel.component.azure.storage.blob.BlobConfiguration) value); return true;
case "credentials": getOrCreateConfiguration((BlobComponent) component).setCredentials((com.azure.storage.common.StorageSharedKeyCredential) value); return true;
case "credentialType": getOrCreateConfiguration((BlobComponent) component).setCredentialType((org.apache.camel.component.azure.storage.blob.CredentialType) value); return true;
case "dataCount": getOrCreateConfiguration((BlobComponent) component).setDataCount((java.lang.Long) value); return true;
case "fileDir": getOrCreateConfiguration((BlobComponent) component).setFileDir((java.lang.String) value); return true;
case "leaseBlob": getOrCreateConfiguration((BlobComponent) component).setLeaseBlob((boolean) value); return true;
case "leaseDurationInSeconds": getOrCreateConfiguration((BlobComponent) component).setLeaseDurationInSeconds((java.lang.Integer) value); return true;
case "maxResultsPerPage": getOrCreateConfiguration((BlobComponent) component).setMaxResultsPerPage((java.lang.Integer) value); return true;
case "maxRetryRequests": getOrCreateConfiguration((BlobComponent) component).setMaxRetryRequests((int) value); return true;
case "prefix": getOrCreateConfiguration((BlobComponent) component).setPrefix((java.lang.String) value); return true;
case "regex": getOrCreateConfiguration((BlobComponent) component).setRegex((java.lang.String) value); return true;
case "sasToken": getOrCreateConfiguration((BlobComponent) component).setSasToken((java.lang.String) value); return true;
case "serviceClient": getOrCreateConfiguration((BlobComponent) component).setServiceClient((com.azure.storage.blob.BlobServiceClient) value); return true;
case "timeout": getOrCreateConfiguration((BlobComponent) component).setTimeout((java.time.Duration) value); return true;
case "bridgeErrorHandler": ((BlobComponent) component).setBridgeErrorHandler((boolean) value); return true;
case "blobSequenceNumber": getOrCreateConfiguration((BlobComponent) component).setBlobSequenceNumber((java.lang.Long) value); return true;
case "blockListType": getOrCreateConfiguration((BlobComponent) component).setBlockListType((com.azure.storage.blob.models.BlockListType) value); return true;
case "changeFeedContext": getOrCreateConfiguration((BlobComponent) component).setChangeFeedContext((com.azure.core.util.Context) value); return true;
case "changeFeedEndTime": getOrCreateConfiguration((BlobComponent) component).setChangeFeedEndTime((java.time.OffsetDateTime) value); return true;
case "changeFeedStartTime": getOrCreateConfiguration((BlobComponent) component).setChangeFeedStartTime((java.time.OffsetDateTime) value); return true;
case "closeStreamAfterWrite": getOrCreateConfiguration((BlobComponent) component).setCloseStreamAfterWrite((boolean) value); return true;
case "commitBlockListLater": getOrCreateConfiguration((BlobComponent) component).setCommitBlockListLater((boolean) value); return true;
case "createAppendBlob": getOrCreateConfiguration((BlobComponent) component).setCreateAppendBlob((boolean) value); return true;
case "createPageBlob": getOrCreateConfiguration((BlobComponent) component).setCreatePageBlob((boolean) value); return true;
case "downloadLinkExpiration": getOrCreateConfiguration((BlobComponent) component).setDownloadLinkExpiration((java.lang.Long) value); return true;
case "lazyStartProducer": ((BlobComponent) component).setLazyStartProducer((boolean) value); return true;
case "operation": getOrCreateConfiguration((BlobComponent) component).setOperation((org.apache.camel.component.azure.storage.blob.BlobOperationsDefinition) value); return true;
case "pageBlobSize": getOrCreateConfiguration((BlobComponent) component).setPageBlobSize((java.lang.Long) value); return true;
case "autowiredEnabled": ((BlobComponent) component).setAutowiredEnabled((boolean) value); return true;
case "healthCheckConsumerEnabled": ((BlobComponent) component).setHealthCheckConsumerEnabled((boolean) value); return true;
case "healthCheckProducerEnabled": ((BlobComponent) component).setHealthCheckProducerEnabled((boolean) value); return true;
case "accessKey": getOrCreateConfiguration((BlobComponent) component).setAccessKey((java.lang.String) value); return true;
case "azureClientId": getOrCreateConfiguration((BlobComponent) component).setAzureClientId((java.lang.String) value); return true;
case "azureClientSecret": getOrCreateConfiguration((BlobComponent) component).setAzureClientSecret((java.lang.String) value); return true;
case "azureTenantId": getOrCreateConfiguration((BlobComponent) component).setAzureTenantId((java.lang.String) value); return true;
case "sourceBlobAccessKey": getOrCreateConfiguration((BlobComponent) component).setSourceBlobAccessKey((java.lang.String) value); return true;
default: return false;
}
}
}
}
|
AzureStorageBlobComponentBuilderImpl
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/TestExecutionListenersTests.java
|
{
"start": 12559,
"end": 12926
}
|
interface ____ {
@AliasFor(annotation = TestExecutionListeners.class)
Class<? extends TestExecutionListener>[] listeners() default QuuxTestExecutionListener.class;
@AliasFor(annotation = TestExecutionListeners.class)
boolean inheritListeners() default true;
}
@TestExecutionListeners
@Retention(RetentionPolicy.RUNTIME)
@
|
MetaInheritedListenersWithOverrides
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/core/io/support/SpringFactoriesLoader.java
|
{
"start": 12811,
"end": 13393
}
|
class ____.
* @param classLoader the ClassLoader to use for loading resources; can be
* {@code null} to use the default
* @return a {@link SpringFactoriesLoader} instance
* @since 6.0
* @see #forDefaultResourceLocation()
*/
public static SpringFactoriesLoader forDefaultResourceLocation(@Nullable ClassLoader classLoader) {
return forResourceLocation(FACTORIES_RESOURCE_LOCATION, classLoader);
}
/**
* Create a {@link SpringFactoriesLoader} instance that will load and
* instantiate the factory implementations from the given location,
* using the default
|
loader
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java
|
{
"start": 6937,
"end": 7249
}
|
class ____ extends FileSystemTask<Integer> {
ListTask(FileSystem fs, Path p) {
super(fs, p);
}
public Integer call() throws Exception {
FileSystem fs = getFileSystem();
Path p = getFilePath();
FileStatus[] files = fs.listStatus(p);
return files.length;
}
}
}
|
ListTask
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/inheritance/version/InheritanceImplicitVersionUpdateTest.java
|
{
"start": 4380,
"end": 4657
}
|
class ____ {
@Id
@GeneratedValue
private Long id;
@Version
private Long version;
public Long getVersion() {
return version;
}
public void setVersion(Long version) {
this.version = version;
}
}
@Entity( name = "Employee" )
public static
|
ObjectWithUnid
|
java
|
google__dagger
|
dagger-android/main/java/dagger/android/DaggerService.java
|
{
"start": 783,
"end": 923
}
|
class ____ extends Service {
@Override
public void onCreate() {
AndroidInjection.inject(this);
super.onCreate();
}
}
|
DaggerService
|
java
|
apache__camel
|
core/camel-main/src/test/java/org/apache/camel/main/MainVetoTest.java
|
{
"start": 2051,
"end": 2344
}
|
class ____ extends LifecycleStrategySupport {
@Override
public void onContextStarting(CamelContext context) throws VetoCamelContextStartException {
throw new VetoCamelContextStartException("We do not like this route", context, false);
}
}
}
|
MyVetoLifecycle
|
java
|
quarkusio__quarkus
|
independent-projects/qute/core/src/main/java/io/quarkus/qute/trace/BaseEvent.java
|
{
"start": 76,
"end": 220
}
|
class ____ trace events related to template rendering.
* <p>
* Captures the engine instance and tracks execution duration.
*/
public abstract
|
for
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/test/java/io/vertx/tests/http/Http2ServerTest.java
|
{
"start": 4857,
"end": 5564
}
|
class ____ {
public final Channel channel;
public final ChannelHandlerContext context;
public final Http2Connection connection;
public final Http2ConnectionEncoder encoder;
public final Http2ConnectionDecoder decoder;
public Connection(ChannelHandlerContext context, Http2Connection connection, Http2ConnectionEncoder encoder, Http2ConnectionDecoder decoder) {
this.channel = context.channel();
this.context = context;
this.connection = connection;
this.encoder = encoder;
this.decoder = decoder;
}
public int nextStreamId() {
return connection.local().incrementAndGetNextStreamId();
}
}
|
Connection
|
java
|
greenrobot__greendao
|
tests/DaoTestPerformance/src/androidTest/java/org/greenrobot/greendao/performance/target/LongHashMapJDBM.java
|
{
"start": 1942,
"end": 3021
}
|
class ____<V> {
Entry<V> next;
long key;
V value;
public boolean equals(Object object) {
if (this == object) {
return true;
}
if (object instanceof Entry) {
Entry<?> entry = (Entry) object;
return ( key == entry.key)
&& (value == null ? entry.value == null : value
.equals(entry.value));
}
return false;
}
public int hashCode() {
return (int)(key)
^ (value == null ? 0 : value.hashCode());
}
public String toString() {
return key + "=" + value;
}
Entry(long theKey) {
this.key = theKey;
this.value = null;
}
Entry(long theKey, V theValue) {
this.key = theKey;
this.value = theValue;
//origKeyHash = (int)(theKey ^ (theKey >>> 32));
}
}
|
Entry
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/util/profiler/ProfilingService.java
|
{
"start": 2007,
"end": 8242
}
|
class ____ implements Closeable {
protected static final Logger LOG = LoggerFactory.getLogger(ProfilingService.class);
private static final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd_HH_mm_ss");
private static volatile ProfilingService instance;
private final Map<String, ArrayDeque<ProfilingInfo>> profilingMap;
private final String profilingResultDir;
private final int historySizeLimit;
private final ScheduledExecutorService scheduledExecutor;
private ProfilingFuture profilingFuture;
private ProfilingService(Configuration configs) {
this.profilingMap = new HashMap<>();
this.historySizeLimit = configs.get(RestOptions.MAX_PROFILING_HISTORY_SIZE);
Preconditions.checkArgument(
historySizeLimit > 0,
String.format(
"Configured %s must be positive.",
RestOptions.MAX_PROFILING_HISTORY_SIZE.key()));
this.profilingResultDir = configs.get(RestOptions.PROFILING_RESULT_DIR);
this.scheduledExecutor =
Executors.newSingleThreadScheduledExecutor(
new ExecutorThreadFactory.Builder()
.setPoolName("flink-profiling-service")
.build());
}
public static ProfilingService getInstance(Configuration configs) {
if (instance == null) {
synchronized (ProfilingService.class) {
if (instance == null) {
instance = new ProfilingService(configs);
}
}
}
return instance;
}
public CompletableFuture<ProfilingInfo> requestProfiling(
String resourceID, long duration, ProfilingInfo.ProfilingMode mode) {
if (profilingFuture != null && !profilingFuture.isDone()) {
return FutureUtils.completedExceptionally(
new IllegalStateException(resourceID + " is still under profiling."));
}
ProfilingInfo profilingInfo = ProfilingInfo.create(duration, mode);
profilingMap.putIfAbsent(resourceID, new ArrayDeque<>());
profilingMap.get(resourceID).addFirst(profilingInfo);
AsyncProfiler profiler = AsyncProfiler.getInstance();
try {
String response =
profiler.execute(
ProfilerConstants.COMMAND_START.msg
+ profilingInfo.getProfilingMode().getCode());
if (StringUtils.isNullOrWhitespaceOnly(response)
|| !response.startsWith(ProfilerConstants.PROFILER_STARTED_SUCCESS.msg)) {
return CompletableFuture.completedFuture(
profilingInfo.fail("Start profiler failed. " + response));
}
} catch (Exception e) {
return CompletableFuture.completedFuture(
profilingInfo.fail("Start profiler failed. " + e));
}
this.profilingFuture = new ProfilingFuture(duration, () -> stopProfiling(resourceID));
return CompletableFuture.completedFuture(profilingInfo);
}
private void stopProfiling(String resourceID) {
AsyncProfiler profiler = AsyncProfiler.getInstance();
ArrayDeque<ProfilingInfo> profilingList = profilingMap.get(resourceID);
Preconditions.checkState(!CollectionUtil.isNullOrEmpty(profilingList));
ProfilingInfo info = profilingList.getFirst();
try {
String fileName = formatOutputFileName(resourceID, info);
String outputPath = new File(profilingResultDir, fileName).getPath();
String response = profiler.execute(ProfilerConstants.COMMAND_STOP.msg + outputPath);
if (!StringUtils.isNullOrWhitespaceOnly(response)
&& response.startsWith(ProfilerConstants.PROFILER_STOPPED_SUCCESS.msg)) {
info.success(fileName);
} else {
info.fail("Stop profiler failed. " + response);
}
rollingClearing(profilingList);
} catch (Throwable e) {
info.fail("Stop profiler failed. " + e);
}
}
private void rollingClearing(ArrayDeque<ProfilingInfo> profilingList) {
while (profilingList.size() > historySizeLimit) {
ProfilingInfo info = profilingList.pollLast();
String outputFile = info != null ? info.getOutputFile() : "";
if (StringUtils.isNullOrWhitespaceOnly(outputFile)) {
continue;
}
try {
Files.deleteIfExists(Paths.get(profilingResultDir, outputFile));
} catch (Exception e) {
LOG.error(String.format("Clearing file for %s failed. Skipped.", info), e);
}
}
}
private String formatOutputFileName(String resourceID, ProfilingInfo info) {
return String.format(
"%s_%s_%s.html", resourceID, info.getProfilingMode(), sdf.format(new Date()));
}
@Override
public void close() throws IOException {
try {
if (profilingFuture != null && !profilingFuture.isDone()) {
profilingFuture.cancel();
}
if (!scheduledExecutor.isShutdown()) {
scheduledExecutor.shutdownNow();
}
} catch (Exception e) {
LOG.error("Exception thrown during stopping profiling service. ", e);
} finally {
instance = null;
}
}
public CompletableFuture<Collection<ProfilingInfo>> getProfilingList(String resourceID) {
return CompletableFuture.completedFuture(
profilingMap.getOrDefault(resourceID, new ArrayDeque<>()));
}
public String getProfilingResultDir() {
return profilingResultDir;
}
@VisibleForTesting
ArrayDeque<ProfilingInfo> getProfilingListForTest(String resourceID) {
return profilingMap.getOrDefault(resourceID, new ArrayDeque<>());
}
@VisibleForTesting
int getHistorySizeLimit() {
return historySizeLimit;
}
@VisibleForTesting
ProfilingFuture getProfilingFuture() {
return profilingFuture;
}
|
ProfilingService
|
java
|
apache__dubbo
|
dubbo-common/src/test/java/org/apache/dubbo/rpc/model/ScopeModelUtilTest.java
|
{
"start": 4524,
"end": 4596
}
|
interface ____ {}
@SPI(scope = ExtensionScope.APPLICATION)
|
SPIDemo1
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/impl/CommitContext.java
|
{
"start": 3150,
"end": 13857
}
|
class ____ implements Closeable {
private static final Logger LOG = LoggerFactory.getLogger(
CommitContext.class);
/**
* The actual commit operations.
*/
private final CommitOperations commitOperations;
/**
* Job Context.
*/
private final JobContext jobContext;
/**
* Serializer pool.
*/
private final WeakReferenceThreadMap<JsonSerialization<PendingSet>>
pendingSetSerializer =
new WeakReferenceThreadMap<>((k) -> PendingSet.serializer(), null);
private final WeakReferenceThreadMap<JsonSerialization<SinglePendingCommit>>
singleCommitSerializer =
new WeakReferenceThreadMap<>((k) -> SinglePendingCommit.serializer(), null);
/**
* Submitter for per task operations, e.g loading manifests.
*/
private PoolSubmitter outerSubmitter;
/**
* Submitter for operations within the tasks,
* such as POSTing the final commit operations.
*/
private PoolSubmitter innerSubmitter;
/**
* Job Configuration.
*/
private final Configuration conf;
/**
* Job ID.
*/
private final String jobId;
/**
* Audit context; will be reset when this is closed.
*/
private final AuditContextUpdater auditContextUpdater;
/**
* Number of committer threads.
*/
private final int committerThreads;
/**
* Should IOStatistics be collected by the committer?
*/
private final boolean collectIOStatistics;
/**
* IOStatisticsContext to switch to in all threads
* taking part in the commit operation.
* This ensures that the IOStatistics collected in the
* worker threads will be aggregated into the total statistics
* of the thread calling the committer commit/abort methods.
*/
private final IOStatisticsContext ioStatisticsContext;
/**
* Create.
* @param commitOperations commit callbacks
* @param jobContext job context
* @param committerThreads number of commit threads
* @param ioStatisticsContext IOStatistics context of current thread
*/
public CommitContext(
final CommitOperations commitOperations,
final JobContext jobContext,
final int committerThreads,
final IOStatisticsContext ioStatisticsContext) {
this.commitOperations = commitOperations;
this.jobContext = jobContext;
this.conf = jobContext.getConfiguration();
JobID contextJobID = jobContext.getJobID();
// either the job ID or make one up as it will be
// used for the filename of any reports.
this.jobId = contextJobID != null
? contextJobID.toString()
: ("job-without-id-at-" + System.currentTimeMillis());
this.collectIOStatistics = conf.getBoolean(
S3A_COMMITTER_EXPERIMENTAL_COLLECT_IOSTATISTICS,
S3A_COMMITTER_EXPERIMENTAL_COLLECT_IOSTATISTICS_DEFAULT);
this.ioStatisticsContext = Objects.requireNonNull(ioStatisticsContext);
this.auditContextUpdater = new AuditContextUpdater(jobContext);
this.auditContextUpdater.updateCurrentAuditContext();
this.committerThreads = committerThreads;
buildSubmitters();
}
/**
* Create for testing.
* This has no job context; instead the values
* are set explicitly.
* @param commitOperations commit callbacks
* @param conf job conf
* @param jobId ID
* @param committerThreads number of commit threads
* @param ioStatisticsContext IOStatistics context of current thread
*/
public CommitContext(final CommitOperations commitOperations,
final Configuration conf,
final String jobId,
final int committerThreads,
final IOStatisticsContext ioStatisticsContext) {
this.commitOperations = commitOperations;
this.jobContext = null;
this.conf = conf;
this.jobId = jobId;
this.collectIOStatistics = false;
this.ioStatisticsContext = Objects.requireNonNull(ioStatisticsContext);
this.auditContextUpdater = new AuditContextUpdater(jobId);
this.auditContextUpdater.updateCurrentAuditContext();
this.committerThreads = committerThreads;
buildSubmitters();
}
/**
* Build the submitters and thread pools if the number of committerThreads
* is greater than zero.
* This should only be called in constructors; it is synchronized to keep
* SpotBugs happy.
*/
private synchronized void buildSubmitters() {
if (committerThreads != 0) {
outerSubmitter = new PoolSubmitter(buildThreadPool(committerThreads));
}
}
/**
* Returns an {@link ExecutorService} for parallel tasks. The number of
* threads in the thread-pool is set by fs.s3a.committer.threads.
* If num-threads is 0, this will raise an exception.
* The threads have a lifespan set by
* {@link InternalCommitterConstants#THREAD_KEEP_ALIVE_TIME}.
* When the thread pool is full, the caller runs
* policy takes over.
* @param numThreads thread count, may be negative.
* @return an {@link ExecutorService} for the number of threads
*/
private ExecutorService buildThreadPool(
int numThreads) {
if (numThreads < 0) {
// a negative number means "multiple of available processors"
numThreads = numThreads * -Runtime.getRuntime().availableProcessors();
}
Preconditions.checkArgument(numThreads > 0,
"Cannot create a thread pool with no threads");
LOG.debug("creating thread pool of size {}", numThreads);
final ThreadFactory factory = new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat(THREAD_PREFIX + jobId + "-%d")
.build();
return new HadoopThreadPoolExecutor(numThreads, numThreads,
THREAD_KEEP_ALIVE_TIME,
TimeUnit.SECONDS,
new LinkedBlockingQueue<>(),
factory,
new ThreadPoolExecutor.CallerRunsPolicy());
}
/**
* Commit the operation, throwing an exception on any failure.
* See {@code CommitOperations#commitOrFail(SinglePendingCommit)}.
* @param commit commit to execute
* @throws IOException on a failure
*/
public void commitOrFail(SinglePendingCommit commit) throws IOException {
commitOperations.commitOrFail(commit);
}
/**
* Commit a single pending commit; exceptions are caught
* and converted to an outcome.
* See {@link CommitOperations#commit(SinglePendingCommit, String)}.
* @param commit entry to commit
* @param origin origin path/string for outcome text
* @return the outcome
*/
public CommitOperations.MaybeIOE commit(SinglePendingCommit commit,
String origin) {
return commitOperations.commit(commit, origin);
}
/**
* See {@link CommitOperations#abortSingleCommit(SinglePendingCommit)}.
* @param commit pending commit to abort
* @throws FileNotFoundException if the abort ID is unknown
* @throws IOException on any failure
*/
public void abortSingleCommit(final SinglePendingCommit commit)
throws IOException {
commitOperations.abortSingleCommit(commit);
}
/**
* See {@link CommitOperations#revertCommit(SinglePendingCommit)}.
* @param commit pending commit
* @throws IOException failure
*/
public void revertCommit(final SinglePendingCommit commit)
throws IOException {
commitOperations.revertCommit(commit);
}
/**
* See {@link CommitOperations#abortMultipartCommit(String, String)}..
* @param destKey destination key
* @param uploadId upload to cancel
* @throws FileNotFoundException if the abort ID is unknown
* @throws IOException on any failure
*/
public void abortMultipartCommit(
final String destKey,
final String uploadId)
throws IOException {
commitOperations.abortMultipartCommit(destKey, uploadId);
}
@Override
public synchronized void close() throws IOException {
destroyThreadPools();
auditContextUpdater.resetCurrentAuditContext();
}
@Override
public String toString() {
return "CommitContext{}";
}
/**
* Job Context.
* @return job context.
*/
public JobContext getJobContext() {
return jobContext;
}
/**
* Return a submitter.
* If created with 0 threads, this returns null so
* TaskPool knows to run it in the current thread.
* @return a submitter or null
*/
public synchronized TaskPool.Submitter getOuterSubmitter() {
return outerSubmitter;
}
/**
* Return a submitter. As this pool is used less often,
* create it on demand.
* If created with 0 threads, this returns null so
* TaskPool knows to run it in the current thread.
* @return a submitter or null
*/
public synchronized TaskPool.Submitter getInnerSubmitter() {
if (innerSubmitter == null && committerThreads > 0) {
innerSubmitter = new PoolSubmitter(buildThreadPool(committerThreads));
}
return innerSubmitter;
}
/**
* Get a serializer for .pending files.
* @return a serializer.
*/
public JsonSerialization<SinglePendingCommit> getSinglePendingFileSerializer() {
return singleCommitSerializer.getForCurrentThread();
}
/**
* Get a serializer for .pendingset files.
* @return a serializer.
*/
public JsonSerialization<PendingSet> getPendingSetSerializer() {
return pendingSetSerializer.getForCurrentThread();
}
/**
* Destroy any thread pools; wait for that to finish,
* but don't overreact if it doesn't finish in time.
*/
private synchronized void destroyThreadPools() {
try {
IOUtils.cleanupWithLogger(LOG, outerSubmitter, innerSubmitter);
} finally {
outerSubmitter = null;
innerSubmitter = null;
}
}
/**
* Job configuration.
* @return configuration (never null)
*/
public Configuration getConf() {
return conf;
}
/**
* Get the job ID.
* @return job ID.
*/
public String getJobId() {
return jobId;
}
/**
* Collecting thread level IO statistics?
* @return true if thread level IO stats should be collected.
*/
public boolean isCollectIOStatistics() {
return collectIOStatistics;
}
/**
* IOStatistics context of the created thread.
* @return the IOStatistics.
*/
public IOStatisticsContext getIOStatisticsContext() {
return ioStatisticsContext;
}
/**
* Switch to the context IOStatistics context,
* if needed.
*/
public void switchToIOStatisticsContext() {
IOStatisticsContext.setThreadIOStatisticsContext(ioStatisticsContext);
}
/**
* Reset the IOStatistics context if statistics are being
* collected.
* Logs at info.
*/
public void maybeResetIOStatisticsContext() {
if (collectIOStatistics) {
LOG.info("Resetting IO statistics context {}",
ioStatisticsContext.getID());
ioStatisticsContext.reset();
}
}
/**
* Submitter for a given thread pool.
*/
private final
|
CommitContext
|
java
|
alibaba__fastjson
|
src/test/java/com/derbysoft/spitfire/fastjson/dto/UniqueIDDTO.java
|
{
"start": 104,
"end": 885
}
|
class ____ extends AbstractDTO{
private String companyName;
private String code;
private UniqueIDType type;
public UniqueIDDTO() {
}
public UniqueIDDTO(String code, UniqueIDType type) {
this.code = code;
this.type = type;
}
@JSONField(name="CName")
public String getCompanyName() {
return companyName;
}
@JSONField(name="CName")
public void setCompanyName(String companyName) {
this.companyName = companyName;
}
public String getCode() {
return code;
}
public void setCode(String code) {
this.code = code;
}
public UniqueIDType getType() {
return type;
}
public void setType(UniqueIDType type) {
this.type = type;
}
}
|
UniqueIDDTO
|
java
|
apache__camel
|
core/camel-core-reifier/src/main/java/org/apache/camel/reifier/dataformat/GrokDataFormatReifier.java
|
{
"start": 1027,
"end": 1647
}
|
class ____ extends DataFormatReifier<GrokDataFormat> {
public GrokDataFormatReifier(CamelContext camelContext, DataFormatDefinition definition) {
super(camelContext, (GrokDataFormat) definition);
}
@Override
protected void prepareDataFormatConfig(Map<String, Object> properties) {
properties.put("pattern", definition.getPattern());
properties.put("flattened", definition.getFlattened());
properties.put("allowMultipleMatchesPerLine", definition.getAllowMultipleMatchesPerLine());
properties.put("namedOnly", definition.getNamedOnly());
}
}
|
GrokDataFormatReifier
|
java
|
alibaba__nacos
|
client/src/main/java/com/alibaba/nacos/client/config/impl/CacheData.java
|
{
"start": 2191,
"end": 11418
}
|
class ____ {
private static final Logger LOGGER = LogUtils.logger(CacheData.class);
private static final long DEFAULT_NOTIF_WARN_TIMEOUTS = 60000;
private static long notifyWarnTimeout = DEFAULT_NOTIF_WARN_TIMEOUTS;
static {
initNotifyWarnTimeout();
}
static long initNotifyWarnTimeout() {
String notifyTimeouts = System.getProperty("nacos.listener.notify.warn.timeout");
if (StringUtils.isNotBlank(notifyTimeouts) && NumberUtils.isDigits(notifyTimeouts)) {
notifyWarnTimeout = Long.valueOf(notifyTimeouts);
LOGGER.info("config listener notify warn timeout millis is set to {}", notifyWarnTimeout);
} else {
LOGGER.info("config listener notify warn timeout millis use default {} millis ",
DEFAULT_NOTIF_WARN_TIMEOUTS);
notifyWarnTimeout = DEFAULT_NOTIF_WARN_TIMEOUTS;
}
return notifyWarnTimeout;
}
/**
* double check lock initialization of scheduledExecutor.
*/
static volatile ScheduledThreadPoolExecutor scheduledExecutor;
static ScheduledThreadPoolExecutor getNotifyBlockMonitor() {
if (scheduledExecutor == null) {
synchronized (CacheData.class) {
if (scheduledExecutor == null) {
scheduledExecutor = new ScheduledThreadPoolExecutor(1,
new NameThreadFactory("com.alibaba.nacos.client.notify.block.monitor"),
new ThreadPoolExecutor.DiscardPolicy());
scheduledExecutor.setRemoveOnCancelPolicy(true);
// it will shut down when jvm exit.
ThreadUtils.addShutdownHook(CacheData::shutdownScheduledExecutor);
}
}
}
return scheduledExecutor;
}
/**
* shutdownScheduledExecutor.
*/
public static void shutdownScheduledExecutor() {
if (scheduledExecutor != null && !scheduledExecutor.isShutdown()) {
try {
scheduledExecutor.shutdown();
// help gc
scheduledExecutor = null;
} catch (Exception e) {
// ignore
}
}
}
static boolean initSnapshot;
static {
initSnapshot = NacosClientProperties.PROTOTYPE.getBoolean("nacos.cache.data.init.snapshot", true);
LOGGER.info("nacos.cache.data.init.snapshot = {} ", initSnapshot);
}
public final String envName;
private final ConfigFilterChainManager configFilterChainManager;
public final String dataId;
public final String group;
public final String tenant;
private final CopyOnWriteArrayList<ManagerListenerWrap> listeners;
private volatile String md5;
/**
* whether use local config.
*/
private volatile boolean isUseLocalConfig = false;
/**
* last modify time.
*/
private volatile long localConfigLastModified;
private volatile String content;
private volatile String encryptedDataKey;
/**
* local cache change timestamp.
*/
private final AtomicLong lastModifiedTs = new AtomicLong(0);
/**
* notify change flag,for notify&sync concurrent control. 1.reset to false if starting to sync with server. 2.update
* to true if receive config change notification.
*/
private final AtomicBoolean receiveNotifyChanged = new AtomicBoolean(false);
private int taskId;
private volatile boolean isInitializing = true;
/**
* if is cache data md5 sync with the server.
*/
private final AtomicBoolean isConsistentWithServer = new AtomicBoolean();
/**
* if is cache data is discard,need to remove.
*/
private volatile boolean isDiscard = false;
private String type;
public boolean isInitializing() {
return isInitializing;
}
public void setInitializing(boolean isInitializing) {
this.isInitializing = isInitializing;
}
public String getMd5() {
return md5;
}
public String getTenant() {
return tenant;
}
public String getContent() {
return content;
}
public void setContent(String content) {
this.content = content;
this.md5 = getMd5String(this.content);
}
public AtomicBoolean getReceiveNotifyChanged() {
return receiveNotifyChanged;
}
/**
* Getter method for property <tt>lastModifiedTs</tt>.
*
* @return property value of lastModifiedTs
*/
public AtomicLong getLastModifiedTs() {
return lastModifiedTs;
}
/**
* Setter method for property <tt>lastModifiedTs</tt>.
*
* @param lastModifiedTs value to be assigned to property lastModifiedTs
*/
public void setLastModifiedTs(long lastModifiedTs) {
this.lastModifiedTs.set(lastModifiedTs);
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
/**
* Add listener if CacheData already set new content, Listener should init lastCallMd5 by CacheData.md5
*
* @param listener listener
*/
public void addListener(Listener listener) throws NacosException {
if (null == listener) {
throw new IllegalArgumentException("listener is null");
}
ManagerListenerWrap wrap;
if (listener instanceof AbstractConfigChangeListener) {
ConfigResponse cr = new ConfigResponse();
cr.setDataId(dataId);
cr.setGroup(group);
cr.setContent(content);
cr.setEncryptedDataKey(encryptedDataKey);
configFilterChainManager.doFilter(null, cr);
String contentTmp = cr.getContent();
wrap = new ManagerListenerWrap(listener, md5, contentTmp);
} else {
wrap = new ManagerListenerWrap(listener, md5);
}
if (listeners.addIfAbsent(wrap)) {
LOGGER.info("[{}] [add-listener] ok, tenant={}, dataId={}, group={}, cnt={}", envName, tenant, dataId,
group, listeners.size());
}
}
/**
* Remove listener.
*
* @param listener listener
*/
public void removeListener(Listener listener) {
if (null == listener) {
throw new IllegalArgumentException("listener is null");
}
ManagerListenerWrap wrap = new ManagerListenerWrap(listener);
if (listeners.remove(wrap)) {
LOGGER.info("[{}] [remove-listener] ok, dataId={}, group={},tenant={}, cnt={}", envName, dataId, group,
tenant, listeners.size());
}
}
/**
* Returns the iterator on the listener list, read-only. It is guaranteed not to return NULL.
*/
public List<Listener> getListeners() {
List<Listener> result = new ArrayList<>();
for (ManagerListenerWrap wrap : listeners) {
result.add(wrap.listener);
}
return result;
}
public long getLocalConfigInfoVersion() {
return localConfigLastModified;
}
public void setLocalConfigInfoVersion(long localConfigLastModified) {
this.localConfigLastModified = localConfigLastModified;
}
public boolean isUseLocalConfigInfo() {
return isUseLocalConfig;
}
public void setUseLocalConfigInfo(boolean useLocalConfigInfo) {
this.isUseLocalConfig = useLocalConfigInfo;
if (!useLocalConfigInfo) {
localConfigLastModified = -1;
}
}
public int getTaskId() {
return taskId;
}
public void setTaskId(int taskId) {
this.taskId = taskId;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((dataId == null) ? 0 : dataId.hashCode());
result = prime * result + ((group == null) ? 0 : group.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (null == obj || obj.getClass() != getClass()) {
return false;
}
if (this == obj) {
return true;
}
CacheData other = (CacheData) obj;
return dataId.equals(other.dataId) && group.equals(other.group);
}
@Override
public String toString() {
return "CacheData [" + dataId + ", " + group + "]";
}
void checkListenerMd5() {
for (ManagerListenerWrap wrap : listeners) {
if (!md5.equals(wrap.lastCallMd5)) {
safeNotifyListener(dataId, group, content, type, md5, encryptedDataKey, wrap);
}
}
}
/**
* check if all listeners md5 is equal with cache data.
*/
public boolean checkListenersMd5Consistent() {
for (ManagerListenerWrap wrap : listeners) {
if (!md5.equals(wrap.lastCallMd5)) {
return false;
}
}
return true;
}
|
CacheData
|
java
|
google__guava
|
android/guava/src/com/google/common/math/BigIntegerMath.java
|
{
"start": 4599,
"end": 12710
}
|
class ____ time and linearly
* increasing memory consumption.
*/
@VisibleForTesting static final int SQRT2_PRECOMPUTE_THRESHOLD = 256;
@VisibleForTesting
static final BigInteger SQRT2_PRECOMPUTED_BITS =
new BigInteger("16a09e667f3bcc908b2fb1366ea957d3e3adec17512775099da2f590b0667322a", 16);
/**
* Returns the base-10 logarithm of {@code x}, rounded according to the specified rounding mode.
*
* @throws IllegalArgumentException if {@code x <= 0}
* @throws ArithmeticException if {@code mode} is {@link RoundingMode#UNNECESSARY} and {@code x}
* is not a power of ten
*/
@GwtIncompatible // TODO
@SuppressWarnings("fallthrough")
public static int log10(BigInteger x, RoundingMode mode) {
checkPositive("x", x);
if (fitsInLong(x)) {
return LongMath.log10(x.longValue(), mode);
}
int approxLog10 = (int) (log2(x, FLOOR) * LN_2 / LN_10);
BigInteger approxPow = BigInteger.TEN.pow(approxLog10);
int approxCmp = approxPow.compareTo(x);
/*
* We adjust approxLog10 and approxPow until they're equal to floor(log10(x)) and
* 10^floor(log10(x)).
*/
if (approxCmp > 0) {
/*
* The code is written so that even completely incorrect approximations will still yield the
* correct answer eventually, but in practice this branch should almost never be entered, and
* even then the loop should not run more than once.
*/
do {
approxLog10--;
approxPow = approxPow.divide(BigInteger.TEN);
approxCmp = approxPow.compareTo(x);
} while (approxCmp > 0);
} else {
BigInteger nextPow = BigInteger.TEN.multiply(approxPow);
int nextCmp = nextPow.compareTo(x);
while (nextCmp <= 0) {
approxLog10++;
approxPow = nextPow;
approxCmp = nextCmp;
nextPow = BigInteger.TEN.multiply(approxPow);
nextCmp = nextPow.compareTo(x);
}
}
int floorLog = approxLog10;
BigInteger floorPow = approxPow;
int floorCmp = approxCmp;
switch (mode) {
case UNNECESSARY:
checkRoundingUnnecessary(floorCmp == 0);
// fall through
case FLOOR:
case DOWN:
return floorLog;
case CEILING:
case UP:
return floorPow.equals(x) ? floorLog : floorLog + 1;
case HALF_DOWN:
case HALF_UP:
case HALF_EVEN:
// Since sqrt(10) is irrational, log10(x) - floorLog can never be exactly 0.5
BigInteger x2 = x.pow(2);
BigInteger halfPowerSquared = floorPow.pow(2).multiply(BigInteger.TEN);
return (x2.compareTo(halfPowerSquared) <= 0) ? floorLog : floorLog + 1;
}
throw new AssertionError();
}
private static final double LN_10 = Math.log(10);
private static final double LN_2 = Math.log(2);
/**
* Returns the square root of {@code x}, rounded with the specified rounding mode.
*
* @throws IllegalArgumentException if {@code x < 0}
* @throws ArithmeticException if {@code mode} is {@link RoundingMode#UNNECESSARY} and {@code
* sqrt(x)} is not an integer
*/
@GwtIncompatible // TODO
@SuppressWarnings("fallthrough")
public static BigInteger sqrt(BigInteger x, RoundingMode mode) {
checkNonNegative("x", x);
if (fitsInLong(x)) {
return BigInteger.valueOf(LongMath.sqrt(x.longValue(), mode));
}
BigInteger sqrtFloor = sqrtFloor(x);
switch (mode) {
case UNNECESSARY:
checkRoundingUnnecessary(sqrtFloor.pow(2).equals(x)); // fall through
case FLOOR:
case DOWN:
return sqrtFloor;
case CEILING:
case UP:
int sqrtFloorInt = sqrtFloor.intValue();
boolean sqrtFloorIsExact =
(sqrtFloorInt * sqrtFloorInt == x.intValue()) // fast check mod 2^32
&& sqrtFloor.pow(2).equals(x); // slow exact check
return sqrtFloorIsExact ? sqrtFloor : sqrtFloor.add(BigInteger.ONE);
case HALF_DOWN:
case HALF_UP:
case HALF_EVEN:
BigInteger halfSquare = sqrtFloor.pow(2).add(sqrtFloor);
/*
* We wish to test whether or not x <= (sqrtFloor + 0.5)^2 = halfSquare + 0.25. Since both x
* and halfSquare are integers, this is equivalent to testing whether or not x <=
* halfSquare.
*/
return (halfSquare.compareTo(x) >= 0) ? sqrtFloor : sqrtFloor.add(BigInteger.ONE);
}
throw new AssertionError();
}
@GwtIncompatible // TODO
private static BigInteger sqrtFloor(BigInteger x) {
/*
* Adapted from Hacker's Delight, Figure 11-1.
*
* Using DoubleUtils.bigToDouble, getting a double approximation of x is extremely fast, and
* then we can get a double approximation of the square root. Then, we iteratively improve this
* guess with an application of Newton's method, which sets guess := (guess + (x / guess)) / 2.
* This iteration has the following two properties:
*
* a) every iteration (except potentially the first) has guess >= floor(sqrt(x)). This is
* because guess' is the arithmetic mean of guess and x / guess, sqrt(x) is the geometric mean,
* and the arithmetic mean is always higher than the geometric mean.
*
* b) this iteration converges to floor(sqrt(x)). In fact, the number of correct digits doubles
* with each iteration, so this algorithm takes O(log(digits)) iterations.
*
* We start out with a double-precision approximation, which may be higher or lower than the
* true value. Therefore, we perform at least one Newton iteration to get a guess that's
* definitely >= floor(sqrt(x)), and then continue the iteration until we reach a fixed point.
*/
BigInteger sqrt0;
int log2 = log2(x, FLOOR);
if (log2 < Double.MAX_EXPONENT) {
sqrt0 = sqrtApproxWithDoubles(x);
} else {
int shift = (log2 - DoubleUtils.SIGNIFICAND_BITS) & ~1; // even!
/*
* We have that x / 2^shift < 2^54. Our initial approximation to sqrtFloor(x) will be
* 2^(shift/2) * sqrtApproxWithDoubles(x / 2^shift).
*/
sqrt0 = sqrtApproxWithDoubles(x.shiftRight(shift)).shiftLeft(shift >> 1);
}
BigInteger sqrt1 = sqrt0.add(x.divide(sqrt0)).shiftRight(1);
if (sqrt0.equals(sqrt1)) {
return sqrt0;
}
do {
sqrt0 = sqrt1;
sqrt1 = sqrt0.add(x.divide(sqrt0)).shiftRight(1);
} while (sqrt1.compareTo(sqrt0) < 0);
return sqrt0;
}
@GwtIncompatible // TODO
private static BigInteger sqrtApproxWithDoubles(BigInteger x) {
return DoubleMath.roundToBigInteger(Math.sqrt(DoubleUtils.bigToDouble(x)), HALF_EVEN);
}
/**
* Returns {@code x}, rounded to a {@code double} with the specified rounding mode. If {@code x}
* is precisely representable as a {@code double}, its {@code double} value will be returned;
* otherwise, the rounding will choose between the two nearest representable values with {@code
* mode}.
*
* <p>For the case of {@link RoundingMode#HALF_DOWN}, {@code HALF_UP}, and {@code HALF_EVEN},
* infinite {@code double} values are considered infinitely far away. For example, 2^2000 is not
* representable as a double, but {@code roundToDouble(BigInteger.valueOf(2).pow(2000), HALF_UP)}
* will return {@code Double.MAX_VALUE}, not {@code Double.POSITIVE_INFINITY}.
*
* <p>For the case of {@link RoundingMode#HALF_EVEN}, this implementation uses the IEEE 754
* default rounding mode: if the two nearest representable values are equally near, the one with
* the least significant bit zero is chosen. (In such cases, both of the nearest representable
* values are even integers; this method returns the one that is a multiple of a greater power of
* two.)
*
* @throws ArithmeticException if {@code mode} is {@link RoundingMode#UNNECESSARY} and {@code x}
* is not precisely representable as a {@code double}
* @since 30.0
*/
@GwtIncompatible
public static double roundToDouble(BigInteger x, RoundingMode mode) {
return BigIntegerToDoubleRounder.INSTANCE.roundToDouble(x, mode);
}
@GwtIncompatible
private static final
|
load
|
java
|
apache__kafka
|
raft/src/test/java/org/apache/kafka/raft/RaftEventSimulationTest.java
|
{
"start": 55217,
"end": 58199
}
|
class ____ {
final Map<Integer, InflightRequest> inflight = new HashMap<>();
final Map<Integer, NetworkFilter> filters = new HashMap<>();
final Cluster cluster;
private MessageRouter(Cluster cluster) {
this.cluster = cluster;
for (int nodeId : cluster.nodes.keySet())
filters.put(nodeId, new PermitAllTraffic());
}
void deliver(int senderId, RaftRequest.Outbound outbound) {
if (!filters.get(senderId).acceptOutbound(outbound))
return;
int correlationId = outbound.correlationId();
Node destination = outbound.destination();
RaftRequest.Inbound inbound = cluster
.nodeIfRunning(senderId)
.map(node ->
new RaftRequest.Inbound(
node.channel.listenerName(),
correlationId,
ApiMessageType
.fromApiKey(outbound.data().apiKey())
.highestSupportedVersion(true),
outbound.data(),
cluster.time.milliseconds()
)
)
.get();
if (!filters.get(destination.id()).acceptInbound(inbound))
return;
cluster.nodeIfRunning(destination.id()).ifPresent(node -> {
inflight.put(correlationId, new InflightRequest(senderId, destination));
inbound.completion.whenComplete((response, exception) -> {
if (response != null && filters.get(destination.id()).acceptOutbound(response)) {
deliver(response);
}
});
node.client.handle(inbound);
});
}
void deliver(RaftResponse.Outbound outbound) {
int correlationId = outbound.correlationId();
InflightRequest inflightRequest = inflight.remove(correlationId);
RaftResponse.Inbound inbound = new RaftResponse.Inbound(
correlationId,
outbound.data(),
// The source of the response is the destination of the request
inflightRequest.destination
);
if (!filters.get(inflightRequest.sourceId).acceptInbound(inbound))
return;
cluster.nodeIfRunning(inflightRequest.sourceId).ifPresent(node ->
node.channel.mockReceive(inbound)
);
}
void filter(int nodeId, NetworkFilter filter) {
filters.put(nodeId, filter);
}
void deliverTo(RaftNode node) {
node.channel.drainSendQueue().forEach(msg -> deliver(node.nodeId, msg));
}
void deliverAll() {
for (RaftNode node : cluster.running()) {
deliverTo(node);
}
}
}
private static
|
MessageRouter
|
java
|
apache__camel
|
components/camel-knative/camel-knative-component/src/generated/java/org/apache/camel/component/knative/KnativeEndpointConfigurer.java
|
{
"start": 734,
"end": 7670
}
|
class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
KnativeEndpoint target = (KnativeEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiversion":
case "apiVersion": target.getConfiguration().setApiVersion(property(camelContext, java.lang.String.class, value)); return true;
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "ceoverride":
case "ceOverride": target.getConfiguration().setCeOverride(property(camelContext, java.util.Map.class, value)); return true;
case "cloudeventsspecversion":
case "cloudEventsSpecVersion": target.getConfiguration().setCloudEventsSpecVersion(property(camelContext, java.lang.String.class, value)); return true;
case "cloudeventstype":
case "cloudEventsType": target.getConfiguration().setCloudEventsType(property(camelContext, java.lang.String.class, value)); return true;
case "environment": target.getConfiguration().setEnvironment(property(camelContext, org.apache.camel.component.knative.spi.KnativeEnvironment.class, value)); return true;
case "exceptionhandler":
case "exceptionHandler": target.setExceptionHandler(property(camelContext, org.apache.camel.spi.ExceptionHandler.class, value)); return true;
case "exchangepattern":
case "exchangePattern": target.setExchangePattern(property(camelContext, org.apache.camel.ExchangePattern.class, value)); return true;
case "filters": target.getConfiguration().setFilters(property(camelContext, java.util.Map.class, value)); return true;
case "kind": target.getConfiguration().setKind(property(camelContext, java.lang.String.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "name": target.getConfiguration().setName(property(camelContext, java.lang.String.class, value)); return true;
case "reply": target.getConfiguration().setReply(property(camelContext, java.lang.Boolean.class, value)); return true;
case "replywithcloudevent":
case "replyWithCloudEvent": target.getConfiguration().setReplyWithCloudEvent(property(camelContext, boolean.class, value)); return true;
case "sinkbinding":
case "sinkBinding": target.getConfiguration().setSinkBinding(property(camelContext, org.apache.camel.component.knative.spi.KnativeSinkBinding.class, value)); return true;
case "transportoptions":
case "transportOptions": target.getConfiguration().setTransportOptions(property(camelContext, java.util.Map.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiversion":
case "apiVersion": return java.lang.String.class;
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "ceoverride":
case "ceOverride": return java.util.Map.class;
case "cloudeventsspecversion":
case "cloudEventsSpecVersion": return java.lang.String.class;
case "cloudeventstype":
case "cloudEventsType": return java.lang.String.class;
case "environment": return org.apache.camel.component.knative.spi.KnativeEnvironment.class;
case "exceptionhandler":
case "exceptionHandler": return org.apache.camel.spi.ExceptionHandler.class;
case "exchangepattern":
case "exchangePattern": return org.apache.camel.ExchangePattern.class;
case "filters": return java.util.Map.class;
case "kind": return java.lang.String.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "name": return java.lang.String.class;
case "reply": return java.lang.Boolean.class;
case "replywithcloudevent":
case "replyWithCloudEvent": return boolean.class;
case "sinkbinding":
case "sinkBinding": return org.apache.camel.component.knative.spi.KnativeSinkBinding.class;
case "transportoptions":
case "transportOptions": return java.util.Map.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
KnativeEndpoint target = (KnativeEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiversion":
case "apiVersion": return target.getConfiguration().getApiVersion();
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "ceoverride":
case "ceOverride": return target.getConfiguration().getCeOverride();
case "cloudeventsspecversion":
case "cloudEventsSpecVersion": return target.getConfiguration().getCloudEventsSpecVersion();
case "cloudeventstype":
case "cloudEventsType": return target.getConfiguration().getCloudEventsType();
case "environment": return target.getConfiguration().getEnvironment();
case "exceptionhandler":
case "exceptionHandler": return target.getExceptionHandler();
case "exchangepattern":
case "exchangePattern": return target.getExchangePattern();
case "filters": return target.getConfiguration().getFilters();
case "kind": return target.getConfiguration().getKind();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "name": return target.getConfiguration().getName();
case "reply": return target.getConfiguration().getReply();
case "replywithcloudevent":
case "replyWithCloudEvent": return target.getConfiguration().isReplyWithCloudEvent();
case "sinkbinding":
case "sinkBinding": return target.getConfiguration().getSinkBinding();
case "transportoptions":
case "transportOptions": return target.getConfiguration().getTransportOptions();
default: return null;
}
}
@Override
public Object getCollectionValueType(Object target, String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "ceoverride":
case "ceOverride": return java.lang.String.class;
case "filters": return java.lang.String.class;
case "transportoptions":
case "transportOptions": return java.lang.Object.class;
default: return null;
}
}
}
|
KnativeEndpointConfigurer
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/query/MappedSuperclassAttributeInMultipleSubtypesTest.java
|
{
"start": 3471,
"end": 3718
}
|
class ____ {
@Id
private Long id;
public BaseEntity() {
}
public BaseEntity(Long id) {
this.id = id;
}
public Long getId() {
return id;
}
}
@MappedSuperclass
@SuppressWarnings( "unused" )
public static abstract
|
BaseEntity
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adaptivebatch/SpeculativeExecutionTest.java
|
{
"start": 5075,
"end": 25538
}
|
class ____ {
@RegisterExtension
private static final TestExecutorExtension<ScheduledExecutorService> EXECUTOR_RESOURCE =
TestingUtils.defaultExecutorExtension();
private ScheduledExecutorService futureExecutor;
private ManuallyTriggeredScheduledExecutor taskRestartExecutor;
private TestExecutionOperationsDecorator testExecutionOperations;
private TestBlocklistOperations testBlocklistOperations;
private TestRestartBackoffTimeStrategy restartStrategy;
private TestExecutionSlotAllocatorFactory testExecutionSlotAllocatorFactory;
private TestExecutionSlotAllocator testExecutionSlotAllocator;
@BeforeEach
void setUp() {
futureExecutor = new DirectScheduledExecutorService();
taskRestartExecutor = new ManuallyTriggeredScheduledExecutor();
testExecutionOperations =
new TestExecutionOperationsDecorator(new DefaultExecutionOperations());
testBlocklistOperations = new TestBlocklistOperations();
restartStrategy = new TestRestartBackoffTimeStrategy(true, 0);
testExecutionSlotAllocatorFactory = new TestExecutionSlotAllocatorFactory();
testExecutionSlotAllocator =
testExecutionSlotAllocatorFactory.getTestExecutionSlotAllocator();
}
@AfterEach
void tearDown() {
if (futureExecutor != null) {
ExecutorUtils.gracefulShutdown(10, TimeUnit.SECONDS, futureExecutor);
}
}
@Test
void testStartScheduling() {
createSchedulerAndStartScheduling();
final List<ExecutionAttemptID> deployedExecutions =
testExecutionOperations.getDeployedExecutions();
assertThat(deployedExecutions).hasSize(1);
}
@Test
void testNotifySlowTasks() {
final AdaptiveBatchScheduler scheduler = createSchedulerAndStartScheduling();
final ExecutionVertex ev = getOnlyExecutionVertex(scheduler);
final Execution attempt1 = ev.getCurrentExecutionAttempt();
assertThat(testExecutionOperations.getDeployedExecutions()).hasSize(1);
final long timestamp = System.currentTimeMillis();
notifySlowTask(scheduler, attempt1);
assertThat(testExecutionOperations.getDeployedExecutions()).hasSize(2);
assertThat(testBlocklistOperations.getAllBlockedNodeIds())
.containsExactly(attempt1.getAssignedResourceLocation().getNodeId());
final Execution attempt2 = getExecution(ev, 1);
assertThat(attempt2.getState()).isEqualTo(ExecutionState.DEPLOYING);
assertThat(attempt2.getStateTimestamp(ExecutionState.CREATED))
.isGreaterThanOrEqualTo(timestamp);
}
@Test
void testNotifyDuplicatedSlowTasks() {
final AdaptiveBatchScheduler scheduler = createSchedulerAndStartScheduling();
final ExecutionVertex ev = getOnlyExecutionVertex(scheduler);
final Execution attempt1 = ev.getCurrentExecutionAttempt();
notifySlowTask(scheduler, attempt1);
assertThat(testExecutionOperations.getDeployedExecutions()).hasSize(2);
// notify the execution as a slow task again
notifySlowTask(scheduler, attempt1);
assertThat(testExecutionOperations.getDeployedExecutions()).hasSize(2);
// fail attempt2 to make room for a new speculative execution
final Execution attempt2 = getExecution(ev, 1);
scheduler.updateTaskExecutionState(createFailedTaskExecutionState(attempt2.getAttemptId()));
// notify the execution as a slow task again
notifySlowTask(scheduler, attempt1);
assertThat(testExecutionOperations.getDeployedExecutions()).hasSize(3);
}
@Test
void testRestartVertexIfAllSpeculativeExecutionFailed() {
final AdaptiveBatchScheduler scheduler = createSchedulerAndStartScheduling();
final ExecutionVertex ev = getOnlyExecutionVertex(scheduler);
final Execution attempt1 = ev.getCurrentExecutionAttempt();
notifySlowTask(scheduler, attempt1);
assertThat(testExecutionOperations.getDeployedExecutions()).hasSize(2);
final ExecutionAttemptID attemptId1 = attempt1.getAttemptId();
final ExecutionAttemptID attemptId2 = getExecution(ev, 1).getAttemptId();
scheduler.updateTaskExecutionState(createFailedTaskExecutionState(attemptId1));
scheduler.updateTaskExecutionState(createFailedTaskExecutionState(attemptId2));
taskRestartExecutor.triggerScheduledTasks();
assertThat(testExecutionOperations.getDeployedExecutions()).hasSize(3);
}
@Test
void testNoRestartIfNotAllSpeculativeExecutionFailed() {
final AdaptiveBatchScheduler scheduler = createSchedulerAndStartScheduling();
final ExecutionVertex ev = getOnlyExecutionVertex(scheduler);
final Execution attempt1 = ev.getCurrentExecutionAttempt();
notifySlowTask(scheduler, attempt1);
scheduler.updateTaskExecutionState(createFailedTaskExecutionState(attempt1.getAttemptId()));
taskRestartExecutor.triggerScheduledTasks();
assertThat(testExecutionOperations.getDeployedExecutions()).hasSize(2);
}
@Test
void testRestartVertexIfPartitionExceptionHappened() {
final AdaptiveBatchScheduler scheduler = createSchedulerAndStartScheduling();
final ExecutionVertex ev = getOnlyExecutionVertex(scheduler);
final Execution attempt1 = ev.getCurrentExecutionAttempt();
notifySlowTask(scheduler, attempt1);
final Execution attempt2 = getExecution(ev, 1);
scheduler.updateTaskExecutionState(
createFailedTaskExecutionState(
attempt1.getAttemptId(),
new PartitionNotFoundException(new ResultPartitionID())));
assertThat(attempt2.getState()).isEqualTo(ExecutionState.CANCELING);
completeCancellingForAllVertices(scheduler.getExecutionGraph());
taskRestartExecutor.triggerScheduledTasks();
assertThat(testExecutionOperations.getDeployedExecutions()).hasSize(3);
}
@Test
void testCancelOtherDeployedCurrentExecutionsWhenAnyExecutionFinished() {
final AdaptiveBatchScheduler scheduler = createSchedulerAndStartScheduling();
final ExecutionVertex ev = getOnlyExecutionVertex(scheduler);
final Execution attempt1 = ev.getCurrentExecutionAttempt();
notifySlowTask(scheduler, attempt1);
final Execution attempt2 = getExecution(ev, 1);
scheduler.updateTaskExecutionState(
createFinishedTaskExecutionState(attempt1.getAttemptId()));
assertThat(attempt2.getState()).isEqualTo(ExecutionState.CANCELING);
}
@Test
void testCancelOtherScheduledCurrentExecutionsWhenAnyExecutionFinished() {
testExecutionSlotAllocator.disableAutoCompletePendingRequests();
final AdaptiveBatchScheduler scheduler = createSchedulerAndStartScheduling();
final ExecutionVertex ev = getOnlyExecutionVertex(scheduler);
final Execution attempt1 = ev.getCurrentExecutionAttempt();
testExecutionSlotAllocator.completePendingRequest(attempt1.getAttemptId());
notifySlowTask(scheduler, attempt1);
final Execution attempt2 = getExecution(ev, 1);
scheduler.updateTaskExecutionState(
createFinishedTaskExecutionState(attempt1.getAttemptId()));
assertThat(attempt2.getState()).isEqualTo(ExecutionState.CANCELED);
}
@Test
void testExceptionHistoryIfPartitionExceptionHappened() {
final AdaptiveBatchScheduler scheduler = createSchedulerAndStartScheduling();
final ExecutionVertex ev = getOnlyExecutionVertex(scheduler);
final Execution attempt1 = ev.getCurrentExecutionAttempt();
notifySlowTask(scheduler, attempt1);
// A partition exception can result in a restart of the whole execution vertex.
scheduler.updateTaskExecutionState(
createFailedTaskExecutionState(
attempt1.getAttemptId(),
new PartitionNotFoundException(new ResultPartitionID())));
completeCancellingForAllVertices(scheduler.getExecutionGraph());
taskRestartExecutor.triggerScheduledTasks();
assertThat(scheduler.getExceptionHistory()).hasSize(1);
final RootExceptionHistoryEntry entry = scheduler.getExceptionHistory().iterator().next();
// the current execution attempt before the restarting should be attempt2 but the failure
// root exception should be attempt1
assertThat(entry.getFailingTaskName()).isEqualTo(attempt1.getVertexWithAttempt());
}
@Test
void testLocalExecutionAttemptFailureIsCorrectlyRecorded() {
final AdaptiveBatchScheduler scheduler = createSchedulerAndStartScheduling();
final ExecutionVertex ev = getOnlyExecutionVertex(scheduler);
final Execution attempt1 = ev.getCurrentExecutionAttempt();
notifySlowTask(scheduler, attempt1);
// the execution vertex will not be restarted if we only fails attempt1, but it still should
// be recorded in the execution graph and in exception history
final TaskExecutionState failedState =
createFailedTaskExecutionState(attempt1.getAttemptId());
scheduler.updateTaskExecutionState(failedState);
final ClassLoader classLoader = this.getClass().getClassLoader();
assertThat(scheduler.getExecutionGraph().getFailureInfo()).isNotNull();
assertThat(scheduler.getExecutionGraph().getFailureInfo().getExceptionAsString())
.contains(failedState.getError(classLoader).getMessage());
assertThat(scheduler.getExceptionHistory()).hasSize(1);
final RootExceptionHistoryEntry entry = scheduler.getExceptionHistory().iterator().next();
assertThat(entry.getFailingTaskName()).isEqualTo(attempt1.getVertexWithAttempt());
}
@Test
void testUnrecoverableLocalExecutionAttemptFailureWillFailJob() {
final AdaptiveBatchScheduler scheduler = createSchedulerAndStartScheduling();
final ExecutionVertex ev = getOnlyExecutionVertex(scheduler);
final Execution attempt1 = ev.getCurrentExecutionAttempt();
notifySlowTask(scheduler, attempt1);
final TaskExecutionState failedState =
createFailedTaskExecutionState(
attempt1.getAttemptId(),
new SuppressRestartsException(
new Exception("Forced failure for testing.")));
scheduler.updateTaskExecutionState(failedState);
assertThat(scheduler.getExecutionGraph().getState()).isEqualTo(JobStatus.FAILING);
}
@Test
void testLocalExecutionAttemptFailureAndForbiddenRestartWillFailJob() {
restartStrategy.setCanRestart(false);
final AdaptiveBatchScheduler scheduler = createSchedulerAndStartScheduling();
final ExecutionVertex ev = getOnlyExecutionVertex(scheduler);
final Execution attempt1 = ev.getCurrentExecutionAttempt();
notifySlowTask(scheduler, attempt1);
final TaskExecutionState failedState =
createFailedTaskExecutionState(attempt1.getAttemptId());
scheduler.updateTaskExecutionState(failedState);
assertThat(scheduler.getExecutionGraph().getState()).isEqualTo(JobStatus.FAILING);
}
static Stream<ResultPartitionType> supportedResultPartitionType() {
return Stream.of(
ResultPartitionType.BLOCKING,
ResultPartitionType.HYBRID_FULL,
ResultPartitionType.HYBRID_SELECTIVE);
}
@ParameterizedTest
@MethodSource("supportedResultPartitionType")
void testSpeculativeExecutionCombinedWithAdaptiveScheduling(
ResultPartitionType resultPartitionType) throws Exception {
final JobVertex source = createNoOpVertex("source", 1);
final JobVertex sink = createNoOpVertex("sink", -1);
connectNewDataSetAsInput(sink, source, DistributionPattern.ALL_TO_ALL, resultPartitionType);
final JobGraph jobGraph = JobGraphTestUtils.batchJobGraph(source, sink);
final ComponentMainThreadExecutor mainThreadExecutor =
ComponentMainThreadExecutorServiceAdapter.forMainThread();
final AdaptiveBatchScheduler scheduler =
createSchedulerBuilder(jobGraph, mainThreadExecutor)
.setVertexParallelismAndInputInfosDecider(createCustomParallelismDecider(3))
.buildAdaptiveBatchJobScheduler(true);
mainThreadExecutor.execute(scheduler::startScheduling);
final DefaultExecutionGraph graph = (DefaultExecutionGraph) scheduler.getExecutionGraph();
final ExecutionJobVertex sourceExecutionJobVertex = graph.getJobVertex(source.getID());
final ExecutionJobVertex sinkExecutionJobVertex = graph.getJobVertex(sink.getID());
final ExecutionVertex sourceExecutionVertex = sourceExecutionJobVertex.getTaskVertices()[0];
assertThat(sourceExecutionVertex.getCurrentExecutions()).hasSize(1);
// trigger source vertex speculation
final Execution sourceAttempt1 = sourceExecutionVertex.getCurrentExecutionAttempt();
notifySlowTask(scheduler, sourceAttempt1);
assertThat(sourceExecutionVertex.getCurrentExecutions()).hasSize(2);
assertThat(sinkExecutionJobVertex.getParallelism()).isEqualTo(-1);
// Finishing any source execution attempt will finish the source execution vertex, and then
// finish the job vertex.
scheduler.updateTaskExecutionState(
createFinishedTaskExecutionState(
sourceAttempt1.getAttemptId(),
createResultPartitionBytesForExecution(sourceAttempt1)));
assertThat(sinkExecutionJobVertex.getParallelism()).isEqualTo(3);
// trigger sink vertex speculation
final ExecutionVertex sinkExecutionVertex = sinkExecutionJobVertex.getTaskVertices()[0];
final Execution sinkAttempt1 = sinkExecutionVertex.getCurrentExecutionAttempt();
notifySlowTask(scheduler, sinkAttempt1);
assertThat(sinkExecutionVertex.getCurrentExecutions()).hasSize(2);
}
@Test
void testNumSlowExecutionVerticesMetric() {
final AdaptiveBatchScheduler scheduler = createSchedulerAndStartScheduling();
final ExecutionVertex ev = getOnlyExecutionVertex(scheduler);
final Execution attempt1 = ev.getCurrentExecutionAttempt();
notifySlowTask(scheduler, attempt1);
assertThat(getNumSlowExecutionVertices(scheduler)).isOne();
// notify a slow vertex twice
notifySlowTask(scheduler, attempt1);
assertThat(getNumSlowExecutionVertices(scheduler)).isOne();
// vertex no longer slow
notifySlowTask(scheduler, Collections.emptyMap());
assertThat(getNumSlowExecutionVertices(scheduler)).isZero();
}
@Test
void testEffectiveSpeculativeExecutionsMetric() {
final AdaptiveBatchScheduler scheduler = createSchedulerAndStartScheduling();
final ExecutionVertex ev = getOnlyExecutionVertex(scheduler);
final Execution attempt1 = ev.getCurrentExecutionAttempt();
notifySlowTask(scheduler, attempt1);
// numEffectiveSpeculativeExecutions will increase if a speculative execution attempt
// finishes first
final Execution attempt2 = getExecution(ev, 1);
scheduler.updateTaskExecutionState(
createFinishedTaskExecutionState(attempt2.getAttemptId()));
assertThat(getNumEffectiveSpeculativeExecutions(scheduler)).isOne();
// complete cancellation
scheduler.updateTaskExecutionState(
createCanceledTaskExecutionState(attempt1.getAttemptId()));
// trigger a global failure to reset the vertex.
// after that, no speculative execution finishes before its original execution and the
// numEffectiveSpeculativeExecutions will be decreased accordingly.
scheduler.handleGlobalFailure(new Exception());
taskRestartExecutor.triggerScheduledTasks();
assertThat(getNumEffectiveSpeculativeExecutions(scheduler)).isZero();
final Execution attempt3 = getExecution(ev, 2);
notifySlowTask(scheduler, attempt3);
// numEffectiveSpeculativeExecutions will not increase if an original execution attempt
// finishes first
scheduler.updateTaskExecutionState(
createFinishedTaskExecutionState(attempt3.getAttemptId()));
assertThat(getNumEffectiveSpeculativeExecutions(scheduler)).isZero();
}
private static Execution getExecution(ExecutionVertex executionVertex, int attemptNumber) {
return executionVertex.getCurrentExecutions().stream()
.filter(e -> e.getAttemptNumber() == attemptNumber)
.findFirst()
.get();
}
private static ExecutionVertex getOnlyExecutionVertex(AdaptiveBatchScheduler scheduler) {
return Iterables.getOnlyElement(scheduler.getExecutionGraph().getAllExecutionVertices());
}
private AdaptiveBatchScheduler createSchedulerAndStartScheduling() {
return createSchedulerAndStartScheduling(singleNonParallelJobVertexJobGraphForBatch());
}
private AdaptiveBatchScheduler createSchedulerAndStartScheduling(final JobGraph jobGraph) {
final ComponentMainThreadExecutor mainThreadExecutor =
ComponentMainThreadExecutorServiceAdapter.forMainThread();
try {
final AdaptiveBatchScheduler scheduler = createScheduler(jobGraph, mainThreadExecutor);
mainThreadExecutor.execute(scheduler::startScheduling);
return scheduler;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private AdaptiveBatchScheduler createScheduler(
final JobGraph jobGraph, final ComponentMainThreadExecutor mainThreadExecutor)
throws Exception {
return createSchedulerBuilder(jobGraph, mainThreadExecutor)
.buildAdaptiveBatchJobScheduler(true);
}
private DefaultSchedulerBuilder createSchedulerBuilder(
final JobGraph jobGraph, final ComponentMainThreadExecutor mainThreadExecutor) {
// disable periodical slow task detection to avoid affecting the designed testing process
final Configuration configuration = new Configuration();
configuration.set(SlowTaskDetectorOptions.CHECK_INTERVAL, Duration.ofDays(1));
return new DefaultSchedulerBuilder(
jobGraph, mainThreadExecutor, EXECUTOR_RESOURCE.getExecutor())
.setBlocklistOperations(testBlocklistOperations)
.setExecutionOperations(testExecutionOperations)
.setFutureExecutor(futureExecutor)
.setDelayExecutor(taskRestartExecutor)
.setRestartBackoffTimeStrategy(restartStrategy)
.setExecutionSlotAllocatorFactory(testExecutionSlotAllocatorFactory)
.setJobMasterConfiguration(configuration);
}
private static void notifySlowTask(
final AdaptiveBatchScheduler scheduler, final Execution slowTask) {
((DefaultSpeculativeExecutionHandler) scheduler.getSpeculativeExecutionHandler())
.notifySlowTasks(
ImmutableMap.of(
slowTask.getVertex().getID(),
Collections.singleton(slowTask.getAttemptId())));
}
private static void notifySlowTask(
final AdaptiveBatchScheduler scheduler,
final Map<ExecutionVertexID, Collection<ExecutionAttemptID>> slowTasks) {
((DefaultSpeculativeExecutionHandler) scheduler.getSpeculativeExecutionHandler())
.notifySlowTasks(slowTasks);
}
private long getNumSlowExecutionVertices(AdaptiveBatchScheduler scheduler) {
return ((DefaultSpeculativeExecutionHandler) scheduler.getSpeculativeExecutionHandler())
.getNumSlowExecutionVertices();
}
private long getNumEffectiveSpeculativeExecutions(AdaptiveBatchScheduler scheduler) {
return ((DefaultSpeculativeExecutionHandler) scheduler.getSpeculativeExecutionHandler())
.getNumEffectiveSpeculativeExecutions();
}
private static
|
SpeculativeExecutionTest
|
java
|
hibernate__hibernate-orm
|
hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/DerbyStoredProcedureTest.java
|
{
"start": 7147,
"end": 7479
}
|
class ____ {
@Id
private Integer id;
private String name;
@Column(name = "DATE_OF_BIRTH")
@Temporal(TemporalType.DATE)
private Date dateOfBirth;
public Person() {
}
public Person(Integer id, String name, Date dateOfBirth) {
this.id = id;
this.name = name;
this.dateOfBirth = dateOfBirth;
}
}
}
|
Person
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/query/order/OrderTest.java
|
{
"start": 1228,
"end": 18483
}
|
class ____ {
@BeforeEach
void createTestData(SessionFactoryScope scope) {
scope.inTransaction( session -> {
session.persist(new Book("9781932394153", "Hibernate in Action"));
session.persist(new Book("9781617290459", "Java Persistence with Hibernate"));
});
}
@AfterEach
void tearDown(SessionFactoryScope scope) {
scope.dropData();
}
@Test void testAscendingDescending(SessionFactoryScope scope) {
EntityDomainType<Book> bookType = scope.getSessionFactory().getJpaMetamodel().findEntityType(Book.class);
SingularAttribute<? super Book, ?> title = bookType.findSingularAttribute("title");
SingularAttribute<? super Book, ?> isbn = bookType.findSingularAttribute("isbn");
scope.inSession(session -> {
List<String> titlesAsc = SelectionSpecification.create( Book.class, "from Book" )
.sort(asc(title))
.createQuery( session )
.getResultList()
.stream().map(book -> book.title)
.toList();
assertEquals("Hibernate in Action", titlesAsc.get(0));
assertEquals("Java Persistence with Hibernate", titlesAsc.get(1));
List<String> titlesDesc = SelectionSpecification.create( Book.class, "from Book" )
.sort(desc(title))
.createQuery( session )
.getResultList()
.stream().map(book -> book.title)
.toList();
assertEquals("Hibernate in Action", titlesDesc.get(1));
assertEquals("Java Persistence with Hibernate", titlesDesc.get(0));
List<String> isbnAsc = SelectionSpecification.create( Book.class, "from Book" )
.sort(asc(isbn))
.sort(desc(title))
.createQuery( session )
.getResultList()
.stream().map(book -> book.title)
.toList();
assertEquals("Hibernate in Action", isbnAsc.get(1));
assertEquals("Java Persistence with Hibernate", isbnAsc.get(0));
List<String> isbnDesc = SelectionSpecification.create( Book.class, "from Book" )
.sort(desc(isbn))
.sort(desc(title))
.createQuery( session )
.getResultList()
.stream().map(book -> book.title)
.toList();
assertEquals("Hibernate in Action", isbnDesc.get(0));
assertEquals("Java Persistence with Hibernate", isbnDesc.get(1));
titlesAsc = SelectionSpecification.create( Book.class, "from Book order by isbn asc" )
.resort(asc(title))
.createQuery( session )
.getResultList()
.stream().map(book -> book.title)
.toList();
assertEquals("Hibernate in Action", titlesAsc.get(0));
assertEquals("Java Persistence with Hibernate", titlesAsc.get(1));
});
}
@Test void testAscendingDescendingWithPositionalParam(SessionFactoryScope scope) {
EntityDomainType<Book> bookType = scope.getSessionFactory().getJpaMetamodel().findEntityType(Book.class);
SingularAttribute<? super Book, ?> title = bookType.findSingularAttribute("title");
SingularAttribute<? super Book, ?> isbn = bookType.findSingularAttribute("isbn");
scope.inSession(session -> {
List<String> titlesAsc = SelectionSpecification.create( Book.class, "from Book where title like ?1" )
.sort(asc(title))
.createQuery( session )
.setParameter(1, "%Hibernate%")
.getResultList()
.stream().map(book -> book.title)
.toList();
assertEquals("Hibernate in Action", titlesAsc.get(0));
assertEquals("Java Persistence with Hibernate", titlesAsc.get(1));
List<String> titlesDesc = SelectionSpecification.create(Book.class, "from Book where title like ?1")
.sort(Order.desc(title))
.createQuery( session )
.setParameter(1, "%Hibernate%")
.getResultList()
.stream().map(book -> book.title)
.toList();
assertEquals("Hibernate in Action", titlesDesc.get(1));
assertEquals("Java Persistence with Hibernate", titlesDesc.get(0));
List<String> isbnAsc = SelectionSpecification.create(Book.class, "from Book where title like ?1")
.sort(asc(isbn))
.sort(desc(title))
.createQuery( session )
.setParameter(1, "%Hibernate%")
.getResultList()
.stream().map(book -> book.title)
.toList();
assertEquals("Hibernate in Action", isbnAsc.get(1));
assertEquals("Java Persistence with Hibernate", isbnAsc.get(0));
List<String> isbnDesc = SelectionSpecification.create(Book.class, "from Book where title like ?1")
.sort(desc(isbn))
.sort(desc(title))
.createQuery( session )
.setParameter(1, "%Hibernate%")
.getResultList()
.stream().map(book -> book.title)
.toList();
assertEquals("Hibernate in Action", isbnDesc.get(0));
assertEquals("Java Persistence with Hibernate", isbnDesc.get(1));
titlesAsc = SelectionSpecification.create(Book.class, "from Book where title like ?1 order by isbn asc")
.resort(asc(title))
.createQuery( session )
.setParameter(1, "%Hibernate%")
.getResultList()
.stream().map(book -> book.title)
.toList();
assertEquals("Hibernate in Action", titlesAsc.get(0));
assertEquals("Java Persistence with Hibernate", titlesAsc.get(1));
});
}
@Test void testAscendingDescendingWithNamedParam(SessionFactoryScope scope) {
EntityDomainType<Book> bookType = scope.getSessionFactory().getJpaMetamodel().findEntityType(Book.class);
SingularAttribute<? super Book, ?> title = bookType.findSingularAttribute("title");
SingularAttribute<? super Book, ?> isbn = bookType.findSingularAttribute("isbn");
scope.inSession(session -> {
List<String> titlesAsc = SelectionSpecification.create(Book.class, "from Book where title like :title")
.sort(asc(title))
.createQuery( session )
.setParameter("title", "%Hibernate%")
.getResultList()
.stream().map(book -> book.title)
.toList();
assertEquals("Hibernate in Action", titlesAsc.get(0));
assertEquals("Java Persistence with Hibernate", titlesAsc.get(1));
List<String> titlesDesc = SelectionSpecification.create(Book.class,"from Book where title like :title")
.sort(desc(title))
.createQuery( session )
.setParameter("title", "%Hibernate%")
.getResultList()
.stream().map(book -> book.title)
.toList();
assertEquals("Hibernate in Action", titlesDesc.get(1));
assertEquals("Java Persistence with Hibernate", titlesDesc.get(0));
List<String> isbnAsc = SelectionSpecification.create(Book.class, "from Book where title like :title")
.sort(asc(isbn))
.sort(desc(title))
.createQuery( session )
.setParameter("title", "%Hibernate%")
.getResultList()
.stream().map(book -> book.title)
.toList();
assertEquals("Hibernate in Action", isbnAsc.get(1));
assertEquals("Java Persistence with Hibernate", isbnAsc.get(0));
List<String> isbnDesc = SelectionSpecification.create(Book.class, "from Book where title like :title")
.sort(desc(isbn))
.sort(desc(title))
.createQuery( session )
.setParameter("title", "%Hibernate%")
.getResultList()
.stream().map(book -> book.title)
.toList();
assertEquals("Hibernate in Action", isbnDesc.get(0));
assertEquals("Java Persistence with Hibernate", isbnDesc.get(1));
titlesAsc = SelectionSpecification.create(Book.class, "from Book where title like :title order by isbn asc")
.resort(asc(title))
.createQuery( session )
.setParameter("title", "%Hibernate%")
.getResultList()
.stream().map(book -> book.title)
.toList();
assertEquals("Hibernate in Action", titlesAsc.get(0));
assertEquals("Java Persistence with Hibernate", titlesAsc.get(1));
});
}
@NotImplementedYet(reason = "Support for explicit select lists not implemented yet for SelectionSpecification")
@Test void testAscendingDescendingBySelectElement(SessionFactoryScope scope) {
scope.inSession(session -> {
List<?> titlesAsc = SelectionSpecification.create(Object[].class, "select isbn, title from Book")
.sort(asc(2))
.createQuery( session )
.getResultList()
.stream().map(book -> book[1])
.toList();
assertEquals("Hibernate in Action", titlesAsc.get(0));
assertEquals("Java Persistence with Hibernate", titlesAsc.get(1));
List<?> titlesDesc = SelectionSpecification.create(Object[].class, "select isbn, title from Book")
.sort(desc(2))
.createQuery( session )
.getResultList()
.stream().map(book -> book[1])
.toList();
assertEquals("Hibernate in Action", titlesDesc.get(1));
assertEquals("Java Persistence with Hibernate", titlesDesc.get(0));
List<?> isbnAsc = SelectionSpecification.create(Object[].class, "select isbn, title from Book")
.sort(asc(1))
.sort(desc(2))
.createQuery( session )
.getResultList()
.stream().map(book -> book[1])
.toList();
assertEquals("Hibernate in Action", isbnAsc.get(1));
assertEquals("Java Persistence with Hibernate", isbnAsc.get(0));
List<?> isbnDesc = SelectionSpecification.create(Object[].class, "select isbn, title from Book")
.sort(desc(1))
.sort(desc(2))
.createQuery( session )
.getResultList()
.stream().map(book -> book[1])
.toList();
assertEquals("Hibernate in Action", isbnDesc.get(0));
assertEquals("Java Persistence with Hibernate", isbnDesc.get(1));
});
}
@Test void testAscendingDescendingCaseInsensitive(SessionFactoryScope scope) {
EntityDomainType<Book> bookType = scope.getSessionFactory().getJpaMetamodel().findEntityType(Book.class);
SingularAttribute<? super Book, ?> title = bookType.findSingularAttribute("title");
SingularAttribute<? super Book, ?> isbn = bookType.findSingularAttribute("isbn");
scope.inSession(session -> {
List<String> titlesAsc = SelectionSpecification.create(Book.class, "from Book")
.sort(asc(title).ignoringCase())
.createQuery( session )
.getResultList()
.stream().map(book -> book.title)
.toList();
assertEquals("Hibernate in Action", titlesAsc.get(0));
assertEquals("Java Persistence with Hibernate", titlesAsc.get(1));
List<String> titlesDesc = SelectionSpecification.create(Book.class, "from Book")
.sort(desc(title).ignoringCase())
.createQuery( session )
.getResultList()
.stream().map(book -> book.title)
.toList();
assertEquals("Hibernate in Action", titlesDesc.get(1));
assertEquals("Java Persistence with Hibernate", titlesDesc.get(0));
List<String> isbnAsc = SelectionSpecification.create(Book.class, "from Book")
.sort(asc(isbn).ignoringCase())
.sort(desc(title).ignoringCase())
.createQuery( session )
.getResultList()
.stream().map(book -> book.title)
.toList();
assertEquals("Hibernate in Action", isbnAsc.get(1));
assertEquals("Java Persistence with Hibernate", isbnAsc.get(0));
List<String> isbnDesc = SelectionSpecification.create(Book.class, "from Book")
.sort(desc(isbn).ignoringCase())
.sort(desc(title).ignoringCase())
.createQuery( session )
.getResultList()
.stream().map(book -> book.title)
.toList();
assertEquals("Hibernate in Action", isbnDesc.get(0));
assertEquals("Java Persistence with Hibernate", isbnDesc.get(1));
titlesAsc = SelectionSpecification.create(Book.class, "from Book order by isbn asc")
.resort(asc(title).ignoringCase())
.createQuery( session )
.getResultList()
.stream().map(book -> book.title)
.toList();
assertEquals("Hibernate in Action", titlesAsc.get(0));
assertEquals("Java Persistence with Hibernate", titlesAsc.get(1));
});
}
@Test void testAscendingDescendingCaseInsensitiveLongForm(SessionFactoryScope scope) {
EntityDomainType<Book> bookType = scope.getSessionFactory().getJpaMetamodel().findEntityType(Book.class);
SingularAttribute<? super Book, ?> title = bookType.findSingularAttribute("title");
SingularAttribute<? super Book, ?> isbn = bookType.findSingularAttribute("isbn");
scope.inSession(session -> {
List<String> titlesAsc = SelectionSpecification.create(Book.class, "from Book")
.sort(by(title, ASCENDING, true))
.createQuery( session )
.getResultList()
.stream().map(book -> book.title)
.toList();
assertEquals("Hibernate in Action", titlesAsc.get(0));
assertEquals("Java Persistence with Hibernate", titlesAsc.get(1));
List<String> titlesDesc = SelectionSpecification.create(Book.class, "from Book")
.sort(by(title, DESCENDING, true))
.createQuery( session )
.getResultList()
.stream().map(book -> book.title)
.toList();
assertEquals("Hibernate in Action", titlesDesc.get(1));
assertEquals("Java Persistence with Hibernate", titlesDesc.get(0));
List<String> isbnAsc = SelectionSpecification.create(Book.class, "from Book")
.sort(by(isbn, ASCENDING, true))
.sort(by(title, DESCENDING, true))
.createQuery( session )
.getResultList()
.stream().map(book -> book.title)
.toList();
assertEquals("Hibernate in Action", isbnAsc.get(1));
assertEquals("Java Persistence with Hibernate", isbnAsc.get(0));
List<String> isbnDesc = SelectionSpecification.create(Book.class, "from Book")
.sort(by(isbn, DESCENDING, true))
.sort(by(title, DESCENDING, true))
.createQuery( session )
.getResultList()
.stream().map(book -> book.title)
.toList();
assertEquals("Hibernate in Action", isbnDesc.get(0));
assertEquals("Java Persistence with Hibernate", isbnDesc.get(1));
titlesAsc = SelectionSpecification.create(Book.class, "from Book order by isbn asc")
.resort(by(title, ASCENDING, true))
.createQuery( session )
.getResultList()
.stream().map(book -> book.title)
.toList();
assertEquals("Hibernate in Action", titlesAsc.get(0));
assertEquals("Java Persistence with Hibernate", titlesAsc.get(1));
});
}
@NotImplementedYet(reason = "Support for explicit select lists not implemented yet for SelectionSpecification")
@Test void testAscendingDescendingBySelectElementCaseInsensitive(SessionFactoryScope scope) {
scope.inSession(session -> {
List<?> titlesAsc = SelectionSpecification.create(Object[].class, "select isbn, title from Book")
.sort(asc(2).ignoringCase())
.createQuery( session )
.getResultList()
.stream().map(book -> book[1])
.toList();
assertEquals("Hibernate in Action", titlesAsc.get(0));
assertEquals("Java Persistence with Hibernate", titlesAsc.get(1));
List<?> titlesDesc = SelectionSpecification.create(Object[].class, "select isbn, title from Book")
.sort(desc(2).ignoringCase())
.createQuery( session )
.getResultList()
.stream().map(book -> book[1])
.toList();
assertEquals("Hibernate in Action", titlesDesc.get(1));
assertEquals("Java Persistence with Hibernate", titlesDesc.get(0));
List<?> isbnAsc = SelectionSpecification.create(Object[].class, "select isbn, title from Book")
.sort(asc(1).ignoringCase())
.sort(desc(2).ignoringCase())
.createQuery( session )
.getResultList()
.stream().map(book -> book[1])
.toList();
assertEquals("Hibernate in Action", isbnAsc.get(1));
assertEquals("Java Persistence with Hibernate", isbnAsc.get(0));
List<?> isbnDesc = SelectionSpecification.create(Object[].class, "select isbn, title from Book")
.sort(desc(1).ignoringCase())
.sort(desc(2).ignoringCase())
.createQuery( session )
.getResultList()
.stream().map(book -> book[1])
.toList();
assertEquals("Hibernate in Action", isbnDesc.get(0));
assertEquals("Java Persistence with Hibernate", isbnDesc.get(1));
});
}
@NotImplementedYet(reason = "Support for explicit select lists not implemented yet for SelectionSpecification")
@Test void testAscendingDescendingBySelectElementCaseInsensitiveLongForm(SessionFactoryScope scope) {
scope.inSession(session -> {
List<?> titlesAsc = SelectionSpecification.create(Object[].class, "select isbn, title from Book")
.sort(by(2, ASCENDING, true))
.createQuery( session )
.getResultList()
.stream().map(book -> book[1])
.toList();
assertEquals("Hibernate in Action", titlesAsc.get(0));
assertEquals("Java Persistence with Hibernate", titlesAsc.get(1));
List<?> titlesDesc = SelectionSpecification.create(Object[].class, "select isbn, title from Book")
.sort(by(2, DESCENDING, true))
.createQuery( session )
.getResultList()
.stream().map(book -> book[1])
.toList();
assertEquals("Hibernate in Action", titlesDesc.get(1));
assertEquals("Java Persistence with Hibernate", titlesDesc.get(0));
List<?> isbnAsc = SelectionSpecification.create(Object[].class, "select isbn, title from Book")
.sort(by(1, ASCENDING, true))
.sort(by(2, DESCENDING, true))
.createQuery( session )
.getResultList()
.stream().map(book -> book[1])
.toList();
assertEquals("Hibernate in Action", isbnAsc.get(1));
assertEquals("Java Persistence with Hibernate", isbnAsc.get(0));
List<?> isbnDesc = SelectionSpecification.create(Object[].class, "select isbn, title from Book")
.sort(by(1, DESCENDING, true))
.sort(by(2, DESCENDING, true))
.createQuery( session )
.getResultList()
.stream().map(book -> book[1])
.toList();
assertEquals("Hibernate in Action", isbnDesc.get(0));
assertEquals("Java Persistence with Hibernate", isbnDesc.get(1));
});
}
@Entity(name="Book")
static
|
OrderTest
|
java
|
spring-projects__spring-framework
|
spring-web/src/main/java/org/springframework/web/util/ForwardedHeaderUtils.java
|
{
"start": 1342,
"end": 1833
}
|
class ____ but rather rely on
* {@link org.springframework.web.filter.ForwardedHeaderFilter} for Spring MVC or
* {@link org.springframework.web.server.adapter.ForwardedHeaderTransformer} in
* order to extract the information from the headers as early as possible and discard
* such headers. Underlying servers such as Tomcat, Jetty, and Reactor Netty also
* provide options to handle forwarded headers even earlier.
*
* @author Rossen Stoyanchev
* @since 6.1
*/
public abstract
|
directly
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/CompileTimeConstantCheckerTest.java
|
{
"start": 9016,
"end": 9758
}
|
class ____ {
public CompileTimeConstantTestCase(String s, @CompileTimeConstant String p) {}
public static CompileTimeConstantTestCase makeNew(String x) {
// BUG: Diagnostic contains: Non-compile-time constant expression passed
return new CompileTimeConstantTestCase("boo", x);
}
}
""")
.doTest();
}
@Test
public void matches_identCallSucceedsWithinCtorWithLiteral() {
compilationHelper
.addSourceLines(
"test/CompileTimeConstantTestCase.java",
"""
package test;
import com.google.errorprone.annotations.CompileTimeConstant;
public
|
CompileTimeConstantTestCase
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/config/AbstractConfiguration.java
|
{
"start": 4061,
"end": 10491
}
|
class ____ extends AbstractFilterable implements Configuration {
private static final int BUF_SIZE = 16384;
/**
* The root node of the configuration.
*/
protected Node rootNode;
/**
* Listeners for configuration changes.
*/
protected final List<ConfigurationListener> listeners = new CopyOnWriteArrayList<>();
/**
* Packages found in configuration "packages" attribute.
*/
protected final List<String> pluginPackages = new ArrayList<>();
/**
* The plugin manager.
*/
protected PluginManager pluginManager;
/**
* Shutdown hook is enabled by default.
*/
protected boolean isShutdownHookEnabled = true;
/**
* Shutdown timeout in milliseconds.
*/
protected long shutdownTimeoutMillis;
/**
* The Script manager.
*/
protected ScriptManager scriptManager;
/**
* The Advertiser which exposes appender configurations to external systems.
*/
private Advertiser advertiser = new DefaultAdvertiser();
private Node advertiserNode;
private Object advertisement;
private String name;
private ConcurrentMap<String, Appender> appenders = new ConcurrentHashMap<>();
private ConcurrentMap<String, LoggerConfig> loggerConfigs = new ConcurrentHashMap<>();
private List<CustomLevelConfig> customLevels = Collections.emptyList();
private Set<MonitorResource> monitorResources = Collections.emptySet();
private final ConcurrentMap<String, String> propertyMap = new ConcurrentHashMap<>();
private final Interpolator tempLookup = new Interpolator(propertyMap);
private final StrSubstitutor runtimeStrSubstitutor = new RuntimeStrSubstitutor(tempLookup);
private final StrSubstitutor configurationStrSubstitutor = new ConfigurationStrSubstitutor(runtimeStrSubstitutor);
private LoggerConfig root = new LoggerConfig();
private final ConcurrentMap<String, Object> componentMap = new ConcurrentHashMap<>();
private final ConfigurationSource configurationSource;
private final ConfigurationScheduler configurationScheduler = new ConfigurationScheduler();
private final WatchManager watchManager = new WatchManager(configurationScheduler);
private AsyncLoggerConfigDisruptor asyncLoggerConfigDisruptor;
private AsyncWaitStrategyFactory asyncWaitStrategyFactory;
private NanoClock nanoClock = new DummyNanoClock();
private final WeakReference<LoggerContext> loggerContext;
/**
* Constructor.
*/
protected AbstractConfiguration(final LoggerContext loggerContext, final ConfigurationSource configurationSource) {
this.loggerContext = new WeakReference<>(loggerContext);
tempLookup.setLoggerContext(loggerContext);
// The loggerContext is null for the NullConfiguration class.
// this.loggerContext = new WeakReference(Objects.requireNonNull(loggerContext, "loggerContext is null"));
this.configurationSource = Objects.requireNonNull(configurationSource, "configurationSource is null");
componentMap.put(CONTEXT_PROPERTIES, propertyMap);
pluginManager = new PluginManager(Node.CATEGORY);
rootNode = new Node();
setState(State.INITIALIZING);
}
@Override
public ConfigurationSource getConfigurationSource() {
return configurationSource;
}
@Override
public List<String> getPluginPackages() {
return pluginPackages;
}
@Override
public Map<String, String> getProperties() {
return propertyMap;
}
@Override
public ScriptManager getScriptManager() {
return scriptManager;
}
public void setScriptManager(final ScriptManager scriptManager) {
this.scriptManager = scriptManager;
}
public PluginManager getPluginManager() {
return pluginManager;
}
public void setPluginManager(final PluginManager pluginManager) {
this.pluginManager = pluginManager;
}
@Override
public WatchManager getWatchManager() {
return watchManager;
}
@Override
public ConfigurationScheduler getScheduler() {
return configurationScheduler;
}
public Node getRootNode() {
return rootNode;
}
@Override
public AsyncLoggerConfigDelegate getAsyncLoggerConfigDelegate() {
// lazily instantiate only when requested by AsyncLoggers:
// loading AsyncLoggerConfigDisruptor requires LMAX Disruptor jar on classpath
if (asyncLoggerConfigDisruptor == null) {
asyncLoggerConfigDisruptor = new AsyncLoggerConfigDisruptor(asyncWaitStrategyFactory);
}
return asyncLoggerConfigDisruptor;
}
@Override
public AsyncWaitStrategyFactory getAsyncWaitStrategyFactory() {
return asyncWaitStrategyFactory;
}
/**
* Initialize the configuration.
*/
@Override
public void initialize() {
LOGGER.debug(Version.getProductString() + " initializing configuration {}", this);
runtimeStrSubstitutor.setConfiguration(this);
configurationStrSubstitutor.setConfiguration(this);
final String scriptLanguages = PropertiesUtil.getProperties().getStringProperty(Constants.SCRIPT_LANGUAGES);
if (scriptLanguages != null) {
try {
scriptManager = new ScriptManager(this, watchManager, scriptLanguages);
} catch (final LinkageError | Exception e) {
// LOG4J2-1920 ScriptEngineManager is not available in Android
LOGGER.info("Cannot initialize scripting support because this JRE does not support it.", e);
}
}
if (!pluginPackages.isEmpty()) {
LOGGER.warn("The use of package scanning to locate Log4j plugins is deprecated.\n"
+ "Please remove the `packages` attribute from your configuration file.\n"
+ "See https://logging.apache.org/log4j/2.x/faq.html#package-scanning for details.");
}
pluginManager.collectPlugins(pluginPackages);
final PluginManager levelPlugins = new PluginManager(Level.CATEGORY);
levelPlugins.collectPlugins(pluginPackages);
final Map<String, PluginType<?>> plugins = levelPlugins.getPlugins();
if (plugins != null) {
for (final PluginType<?> type : plugins.values()) {
try {
// Cause the
|
AbstractConfiguration
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/deser/CyclicTypesDeserTest.java
|
{
"start": 1097,
"end": 1174
}
|
class ____<T> {
public GenericLink<T> next;
}
static
|
GenericLink
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/querycache/EntityWithCollectionReloadCacheInheritanceTest.java
|
{
"start": 4369,
"end": 4413
}
|
class ____ extends Subject {
}
|
EnglishSubject
|
java
|
apache__camel
|
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/AwsSecretsManagerComponentBuilderFactory.java
|
{
"start": 16693,
"end": 21312
}
|
class ____
extends AbstractComponentBuilder<SecretsManagerComponent>
implements AwsSecretsManagerComponentBuilder {
@Override
protected SecretsManagerComponent buildConcreteComponent() {
return new SecretsManagerComponent();
}
private org.apache.camel.component.aws.secretsmanager.SecretsManagerConfiguration getOrCreateConfiguration(SecretsManagerComponent component) {
if (component.getConfiguration() == null) {
component.setConfiguration(new org.apache.camel.component.aws.secretsmanager.SecretsManagerConfiguration());
}
return component.getConfiguration();
}
@Override
protected boolean setPropertyOnComponent(
Component component,
String name,
Object value) {
switch (name) {
case "binaryPayload": getOrCreateConfiguration((SecretsManagerComponent) component).setBinaryPayload((boolean) value); return true;
case "configuration": ((SecretsManagerComponent) component).setConfiguration((org.apache.camel.component.aws.secretsmanager.SecretsManagerConfiguration) value); return true;
case "lazyStartProducer": ((SecretsManagerComponent) component).setLazyStartProducer((boolean) value); return true;
case "operation": getOrCreateConfiguration((SecretsManagerComponent) component).setOperation((org.apache.camel.component.aws.secretsmanager.SecretsManagerOperations) value); return true;
case "overrideEndpoint": getOrCreateConfiguration((SecretsManagerComponent) component).setOverrideEndpoint((boolean) value); return true;
case "pojoRequest": getOrCreateConfiguration((SecretsManagerComponent) component).setPojoRequest((boolean) value); return true;
case "profileCredentialsName": getOrCreateConfiguration((SecretsManagerComponent) component).setProfileCredentialsName((java.lang.String) value); return true;
case "region": getOrCreateConfiguration((SecretsManagerComponent) component).setRegion((java.lang.String) value); return true;
case "uriEndpointOverride": getOrCreateConfiguration((SecretsManagerComponent) component).setUriEndpointOverride((java.lang.String) value); return true;
case "useProfileCredentialsProvider": getOrCreateConfiguration((SecretsManagerComponent) component).setUseProfileCredentialsProvider((boolean) value); return true;
case "autowiredEnabled": ((SecretsManagerComponent) component).setAutowiredEnabled((boolean) value); return true;
case "secretsManagerClient": getOrCreateConfiguration((SecretsManagerComponent) component).setSecretsManagerClient((software.amazon.awssdk.services.secretsmanager.SecretsManagerClient) value); return true;
case "healthCheckConsumerEnabled": ((SecretsManagerComponent) component).setHealthCheckConsumerEnabled((boolean) value); return true;
case "healthCheckProducerEnabled": ((SecretsManagerComponent) component).setHealthCheckProducerEnabled((boolean) value); return true;
case "proxyHost": getOrCreateConfiguration((SecretsManagerComponent) component).setProxyHost((java.lang.String) value); return true;
case "proxyPort": getOrCreateConfiguration((SecretsManagerComponent) component).setProxyPort((java.lang.Integer) value); return true;
case "proxyProtocol": getOrCreateConfiguration((SecretsManagerComponent) component).setProxyProtocol((software.amazon.awssdk.core.Protocol) value); return true;
case "accessKey": getOrCreateConfiguration((SecretsManagerComponent) component).setAccessKey((java.lang.String) value); return true;
case "secretKey": getOrCreateConfiguration((SecretsManagerComponent) component).setSecretKey((java.lang.String) value); return true;
case "sessionToken": getOrCreateConfiguration((SecretsManagerComponent) component).setSessionToken((java.lang.String) value); return true;
case "trustAllCertificates": getOrCreateConfiguration((SecretsManagerComponent) component).setTrustAllCertificates((boolean) value); return true;
case "useDefaultCredentialsProvider": getOrCreateConfiguration((SecretsManagerComponent) component).setUseDefaultCredentialsProvider((boolean) value); return true;
case "useSessionCredentials": getOrCreateConfiguration((SecretsManagerComponent) component).setUseSessionCredentials((boolean) value); return true;
default: return false;
}
}
}
}
|
AwsSecretsManagerComponentBuilderImpl
|
java
|
quarkusio__quarkus
|
extensions/tls-registry/deployment/src/test/java/io/quarkus/tls/P12KeyStoreWithAliasCredentialsProviderTest.java
|
{
"start": 1145,
"end": 2506
}
|
class ____ {
private static final String configuration = """
quarkus.tls.key-store.p12.path=target/certs/test-credentials-provider-alias-keystore.p12
quarkus.tls.key-store.p12.alias=my-alias
quarkus.tls.key-store.credentials-provider.name=tls
""";
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest().setArchiveProducer(
() -> ShrinkWrap.create(JavaArchive.class)
.addClass(MyCredentialProvider.class)
.add(new StringAsset(configuration), "application.properties"));
@Inject
TlsConfigurationRegistry certificates;
@Test
void test() throws KeyStoreException, CertificateParsingException {
TlsConfiguration def = certificates.getDefault().orElseThrow();
assertThat(def.getKeyStoreOptions()).isNotNull();
assertThat(def.getKeyStore()).isNotNull();
X509Certificate certificate = (X509Certificate) def.getKeyStore().getCertificate("my-alias");
assertThat(certificate).isNotNull();
assertThat(certificate.getSubjectAlternativeNames()).anySatisfy(l -> {
assertThat(l.get(0)).isEqualTo(2);
assertThat(l.get(1)).isEqualTo("dns:acme.org");
});
}
@ApplicationScoped
public static
|
P12KeyStoreWithAliasCredentialsProviderTest
|
java
|
redisson__redisson
|
redisson/src/test/java/org/redisson/RedisVersion.java
|
{
"start": 168,
"end": 1651
}
|
class ____ implements Comparable<RedisVersion>{
private final String fullVersion;
private final Integer majorVersion;
private final Integer minorVersion;
private final Integer patchVersion;
public RedisVersion(String fullVersion) {
this.fullVersion = fullVersion;
Matcher matcher = Pattern.compile("^([\\d]+)\\.([\\d]+)\\.([\\d]+)").matcher(fullVersion);
matcher.find();
majorVersion = Integer.parseInt(matcher.group(1));
minorVersion = Integer.parseInt(matcher.group(2));
patchVersion = Integer.parseInt(matcher.group(3));
}
public String getFullVersion() {
return fullVersion;
}
public int getMajorVersion() {
return majorVersion;
}
public int getMinorVersion() {
return minorVersion;
}
public int getPatchVersion() {
return patchVersion;
}
@Override
public int compareTo(RedisVersion o) {
int ma = this.majorVersion.compareTo(o.majorVersion);
int mi = this.minorVersion.compareTo(o.minorVersion);
int pa = this.patchVersion.compareTo(o.patchVersion);
return ma != 0 ? ma : mi != 0 ? mi : pa;
}
public int compareTo(String redisVersion) {
return this.compareTo(new RedisVersion(redisVersion));
}
public static int compareTo(String redisVersion1, String redisVersion2) {
return new RedisVersion(redisVersion1).compareTo(redisVersion2);
}
}
|
RedisVersion
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateRequestTests.java
|
{
"start": 1092,
"end": 2322
}
|
class ____ extends ESTestCase {
public void testAddingGlobalTemplateWithHiddenIndexSettingIsIllegal() {
Template template = new Template(Settings.builder().put(IndexMetadata.SETTING_INDEX_HIDDEN, true).build(), null, null);
ComposableIndexTemplate globalTemplate = ComposableIndexTemplate.builder().indexPatterns(List.of("*")).template(template).build();
TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request("test");
request.indexTemplate(globalTemplate);
SimulateTemplateAction.Request simulateRequest = new SimulateTemplateAction.Request(TEST_REQUEST_TIMEOUT, "testing");
simulateRequest.indexTemplateRequest(request);
ActionRequestValidationException validationException = simulateRequest.validate();
assertThat(validationException, is(notNullValue()));
List<String> validationErrors = validationException.validationErrors();
assertThat(validationErrors.size(), is(1));
String error = validationErrors.get(0);
assertThat(error, is("global composable templates may not specify the setting " + IndexMetadata.SETTING_INDEX_HIDDEN));
}
}
|
SimulateTemplateRequestTests
|
java
|
lettuce-io__lettuce-core
|
src/test/java/io/lettuce/core/support/caching/ClientsideCachingIntegrationTests.java
|
{
"start": 1274,
"end": 7264
}
|
class ____ extends TestSupport {
private final RedisClient redisClient;
@Inject
public ClientsideCachingIntegrationTests(RedisClient redisClient) {
this.redisClient = redisClient;
}
@BeforeEach
void setUp() {
try (StatefulRedisConnection<String, String> connection = redisClient.connect()) {
connection.sync().flushdb();
}
}
@Test
void clientCachingResp2() {
ClientOptions resp2 = ClientOptions.builder().protocolVersion(ProtocolVersion.RESP2).build();
redisClient.setOptions(resp2);
StatefulRedisConnection<String, String> data = redisClient.connect();
RedisCommands<String, String> commands = data.sync();
StatefulRedisPubSubConnection<String, String> pubSub = redisClient.connectPubSub();
List<String> invalidations = new CopyOnWriteArrayList<>();
commands.clientTracking(TrackingArgs.Builder.enabled().redirect(pubSub.sync().clientId()));
pubSub.addListener(new RedisPubSubAdapter<String, String>() {
@Override
public void message(String channel, String message) {
if (channel.equals("__redis__:invalidate")) {
invalidations.add(message);
}
}
});
pubSub.sync().subscribe("__redis__:invalidate");
commands.get("key1");
commands.get("key2");
assertThat(invalidations).isEmpty();
Map<String, String> keys = new HashMap<>();
keys.put("key1", "value1");
keys.put("key2", "value2");
commands.mset(keys);
Wait.untilEquals(2, invalidations::size).waitOrTimeout();
assertThat(invalidations).contains("key1", "key2");
data.close();
pubSub.close();
}
@Test
void clientCachingResp3() {
ClientOptions resp2 = ClientOptions.builder().protocolVersion(ProtocolVersion.RESP3).build();
redisClient.setOptions(resp2);
StatefulRedisConnection<String, String> data = redisClient.connect();
RedisCommands<String, String> commands = data.sync();
List<String> invalidations = new CopyOnWriteArrayList<>();
commands.clientTracking(TrackingArgs.Builder.enabled());
data.addListener(message -> {
if (message.getType().equals("invalidate")) {
invalidations.addAll((List) message.getContent(StringCodec.UTF8::decodeKey).get(1));
}
});
commands.get("key1");
commands.get("key2");
assertThat(invalidations).isEmpty();
Map<String, String> keys = new HashMap<>();
keys.put("key1", "value1");
keys.put("key2", "value2");
commands.mset(keys);
Wait.untilEquals(2, invalidations::size).waitOrTimeout();
assertThat(invalidations).contains("key1", "key2");
data.close();
}
@Test
void serverAssistedCachingShouldFetchValueFromRedis() {
Map<String, String> clientCache = new ConcurrentHashMap<>();
StatefulRedisConnection<String, String> otherParty = redisClient.connect();
RedisCommands<String, String> commands = otherParty.sync();
commands.set(key, value);
StatefulRedisConnection<String, String> connection = redisClient.connect();
CacheFrontend<String, String> frontend = ClientSideCaching.enable(CacheAccessor.forMap(clientCache), connection,
TrackingArgs.Builder.enabled().noloop());
assertThat(clientCache).isEmpty();
String shouldExist = frontend.get(key);
assertThat(shouldExist).isNotNull();
assertThat(clientCache).hasSize(1);
otherParty.close();
frontend.close();
}
@Test
void serverAssistedCachingShouldExpireValueFromRedis() throws InterruptedException {
Map<String, String> clientCache = new ConcurrentHashMap<>();
StatefulRedisConnection<String, String> otherParty = redisClient.connect();
RedisCommands<String, String> commands = otherParty.sync();
StatefulRedisConnection<String, String> connection = redisClient.connect();
CacheFrontend<String, String> frontend = ClientSideCaching.enable(CacheAccessor.forMap(clientCache), connection,
TrackingArgs.Builder.enabled());
// make sure value exists in Redis
// client-side cache is empty
commands.set(key, value);
// Read-through into Redis
String cachedValue = frontend.get(key);
assertThat(cachedValue).isNotNull();
// client-side cache holds the same value
assertThat(clientCache).hasSize(1);
// now, the key expires
commands.pexpire(key, 1);
// a while later
Thread.sleep(200);
// the expiration reflects in the client-side cache
assertThat(clientCache).isEmpty();
assertThat(frontend.get(key)).isNull();
otherParty.close();
frontend.close();
}
@Test
void serverAssistedCachingShouldUseValueLoader() throws InterruptedException {
Map<String, String> clientCache = new ConcurrentHashMap<>();
StatefulRedisConnection<String, String> otherParty = redisClient.connect();
RedisCommands<String, String> commands = otherParty.sync();
StatefulRedisConnection<String, String> connection = redisClient.connect();
CacheFrontend<String, String> frontend = ClientSideCaching.enable(CacheAccessor.forMap(clientCache), connection,
TrackingArgs.Builder.enabled().noloop());
String shouldLoad = frontend.get(key, () -> "myvalue");
assertThat(shouldLoad).isEqualTo("myvalue");
assertThat(clientCache).hasSize(1);
assertThat(commands.get(key)).isEqualTo("myvalue");
commands.set(key, value);
Thread.sleep(100);
assertThat(clientCache).isEmpty();
otherParty.close();
frontend.close();
}
}
|
ClientsideCachingIntegrationTests
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockBatchedDocumentsIterator.java
|
{
"start": 749,
"end": 2531
}
|
class ____<T> extends BatchedResultsIterator<T> {
private final List<Deque<Result<T>>> batches;
private int index;
private boolean wasTimeRangeCalled;
private Boolean includeInterim;
private Boolean requireIncludeInterim;
public MockBatchedDocumentsIterator(List<Deque<Result<T>>> batches, String resultType) {
super(MockOriginSettingClient.mockOriginSettingClient(mock(Client.class), ClientHelper.ML_ORIGIN), "foo", resultType);
this.batches = batches;
index = 0;
wasTimeRangeCalled = false;
}
@Override
public BatchedResultsIterator<T> timeRange(long startEpochMs, long endEpochMs) {
wasTimeRangeCalled = true;
return this;
}
@Override
public BatchedResultsIterator<T> includeInterim(boolean includeInterim) {
this.includeInterim = includeInterim;
return this;
}
@Override
public Deque<Result<T>> next() {
if (requireIncludeInterim != null && requireIncludeInterim != includeInterim) {
throw new IllegalStateException(
"Required include interim value [" + requireIncludeInterim + "]; actual was [" + includeInterim + "]"
);
}
if (wasTimeRangeCalled == false || hasNext() == false) {
throw new NoSuchElementException();
}
return batches.get(index++);
}
@Override
protected Result<T> map(SearchHit hit) {
return null;
}
@Override
public boolean hasNext() {
return index != batches.size();
}
@Nullable
public Boolean isIncludeInterim() {
return includeInterim;
}
public void requireIncludeInterim(boolean value) {
this.requireIncludeInterim = value;
}
}
|
MockBatchedDocumentsIterator
|
java
|
alibaba__nacos
|
cmdb/src/main/java/com/alibaba/nacos/cmdb/utils/Loggers.java
|
{
"start": 784,
"end": 901
}
|
class ____ {
public static final Logger MAIN = LoggerFactory.getLogger("com.alibaba.nacos.cmdb.main");
}
|
Loggers
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/context/annotation/TestScopeMetadataResolver.java
|
{
"start": 778,
"end": 1048
}
|
class ____ implements ScopeMetadataResolver {
@Override
public ScopeMetadata resolveScopeMetadata(BeanDefinition beanDefinition) {
ScopeMetadata metadata = new ScopeMetadata();
metadata.setScopeName("myCustomScope");
return metadata;
}
}
|
TestScopeMetadataResolver
|
java
|
spring-projects__spring-framework
|
spring-tx/src/main/java/org/springframework/dao/ConcurrencyFailureException.java
|
{
"start": 1078,
"end": 1594
}
|
class ____ extends TransientDataAccessException {
/**
* Constructor for ConcurrencyFailureException.
* @param msg the detail message
*/
public ConcurrencyFailureException(@Nullable String msg) {
super(msg);
}
/**
* Constructor for ConcurrencyFailureException.
* @param msg the detail message
* @param cause the root cause from the data access API in use
*/
public ConcurrencyFailureException(@Nullable String msg, @Nullable Throwable cause) {
super(msg, cause);
}
}
|
ConcurrencyFailureException
|
java
|
apache__flink
|
flink-table/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/result/ChangelogResult.java
|
{
"start": 1088,
"end": 1242
}
|
interface ____ extends DynamicResult {
/** Retrieves the available result records. */
TypedResult<List<RowData>> retrieveChanges();
}
|
ChangelogResult
|
java
|
netty__netty
|
resolver-dns/src/main/java/io/netty/resolver/dns/BiDnsQueryLifecycleObserver.java
|
{
"start": 1048,
"end": 3181
}
|
class ____ implements DnsQueryLifecycleObserver {
private final DnsQueryLifecycleObserver a;
private final DnsQueryLifecycleObserver b;
/**
* Create a new instance.
* @param a The {@link DnsQueryLifecycleObserver} that will receive events first.
* @param b The {@link DnsQueryLifecycleObserver} that will receive events second.
*/
public BiDnsQueryLifecycleObserver(DnsQueryLifecycleObserver a, DnsQueryLifecycleObserver b) {
this.a = checkNotNull(a, "a");
this.b = checkNotNull(b, "b");
}
@Override
public void queryWritten(InetSocketAddress dnsServerAddress, ChannelFuture future) {
try {
a.queryWritten(dnsServerAddress, future);
} finally {
b.queryWritten(dnsServerAddress, future);
}
}
@Override
public void queryCancelled(int queriesRemaining) {
try {
a.queryCancelled(queriesRemaining);
} finally {
b.queryCancelled(queriesRemaining);
}
}
@Override
public DnsQueryLifecycleObserver queryRedirected(List<InetSocketAddress> nameServers) {
try {
a.queryRedirected(nameServers);
} finally {
b.queryRedirected(nameServers);
}
return this;
}
@Override
public DnsQueryLifecycleObserver queryCNAMEd(DnsQuestion cnameQuestion) {
try {
a.queryCNAMEd(cnameQuestion);
} finally {
b.queryCNAMEd(cnameQuestion);
}
return this;
}
@Override
public DnsQueryLifecycleObserver queryNoAnswer(DnsResponseCode code) {
try {
a.queryNoAnswer(code);
} finally {
b.queryNoAnswer(code);
}
return this;
}
@Override
public void queryFailed(Throwable cause) {
try {
a.queryFailed(cause);
} finally {
b.queryFailed(cause);
}
}
@Override
public void querySucceed() {
try {
a.querySucceed();
} finally {
b.querySucceed();
}
}
}
|
BiDnsQueryLifecycleObserver
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/ObjectsHashCodePrimitiveTest.java
|
{
"start": 1816,
"end": 2091
}
|
class ____ {
void f() {
byte x = 3;
int y = Objects.hashCode(x);
}
}
""")
.addOutputLines(
"Test.java",
"""
import java.util.Objects;
|
Test
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java
|
{
"start": 3577,
"end": 28582
}
|
class ____ extends BucketsAggregator implements SizedBucketAggregator {
private static final Logger logger = LogManager.getLogger(CompositeAggregator.class);
private final int size;
private final List<String> sourceNames;
private final int[] reverseMuls;
private final MissingOrder[] missingOrders;
private final List<DocValueFormat> formats;
private final CompositeKey rawAfterKey;
private final CompositeValuesSourceConfig[] sourceConfigs;
private final SingleDimensionValuesSource<?>[] sources;
private final CompositeValuesCollectorQueue queue;
private final DateHistogramValuesSource[] innerSizedBucketAggregators;
private final List<Entry> entries = new ArrayList<>();
private AggregationExecutionContext currentAggCtx;
private RoaringDocIdSet.Builder docIdSetBuilder;
private BucketCollector deferredCollectors;
private boolean earlyTerminated;
CompositeAggregator(
String name,
AggregatorFactories factories,
AggregationContext aggCtx,
Aggregator parent,
Map<String, Object> metadata,
int size,
CompositeValuesSourceConfig[] sourceConfigs,
CompositeKey rawAfterKey
) throws IOException {
super(name, factories, aggCtx, parent, CardinalityUpperBound.MANY, metadata);
this.size = size;
this.sourceNames = Arrays.stream(sourceConfigs).map(CompositeValuesSourceConfig::name).toList();
this.reverseMuls = Arrays.stream(sourceConfigs).mapToInt(CompositeValuesSourceConfig::reverseMul).toArray();
this.missingOrders = Arrays.stream(sourceConfigs).map(CompositeValuesSourceConfig::missingOrder).toArray(MissingOrder[]::new);
this.formats = Arrays.stream(sourceConfigs).map(CompositeValuesSourceConfig::format).toList();
this.sources = new SingleDimensionValuesSource<?>[sourceConfigs.length];
// check that the provided size is not greater than the search.max_buckets setting
int bucketLimit = aggCtx.maxBuckets();
if (size > bucketLimit) {
logger.warn("Too many buckets (max [{}], count [{}])", bucketLimit, size);
throw new MultiBucketConsumerService.TooManyBucketsException(
"Trying to create too many buckets. Must be less than or equal"
+ " to: ["
+ bucketLimit
+ "] but was ["
+ size
+ "]. This limit can be set by changing the ["
+ MAX_BUCKET_SETTING.getKey()
+ "] cluster level setting.",
bucketLimit
);
}
this.sourceConfigs = sourceConfigs;
List<DateHistogramValuesSource> dateHistogramValuesSources = new ArrayList<>();
for (int i = 0; i < sourceConfigs.length; i++) {
this.sources[i] = sourceConfigs[i].createValuesSource(
aggCtx.bigArrays(),
aggCtx.searcher().getIndexReader(),
size,
this::addRequestCircuitBreakerBytes
);
if (this.sources[i] instanceof DateHistogramValuesSource) {
dateHistogramValuesSources.add((DateHistogramValuesSource) this.sources[i]);
}
}
this.innerSizedBucketAggregators = dateHistogramValuesSources.toArray(new DateHistogramValuesSource[0]);
this.queue = new CompositeValuesCollectorQueue(aggCtx.bigArrays(), sources, size, aggCtx.searcher().getIndexReader());
if (rawAfterKey != null) {
try {
this.queue.setAfterKey(rawAfterKey);
} catch (IllegalArgumentException ex) {
throw new ElasticsearchParseException(
"Cannot set after key in the composite aggregation [" + name + "] - " + ex.getMessage(),
ex
);
}
}
this.rawAfterKey = rawAfterKey;
}
@Override
protected void doClose() {
try {
Releasables.close(queue);
} finally {
if (sources != null) {
Releasables.close(sources);
}
}
}
@Override
public ScoreMode scoreMode() {
if (queue.mayDynamicallyPrune()) {
return super.scoreMode().needsScores() ? ScoreMode.TOP_DOCS_WITH_SCORES : ScoreMode.TOP_DOCS;
}
return super.scoreMode();
}
@Override
protected void doPreCollection() throws IOException {
deferredCollectors = MultiBucketCollector.wrap(false, Arrays.asList(subAggregators));
collectableSubAggregators = BucketCollector.NO_OP_BUCKET_COLLECTOR;
}
@Override
protected void doPostCollection() throws IOException {
finishLeaf();
}
@Override
public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException {
// Composite aggregator must be at the top of the aggregation tree
assert owningBucketOrds.size() == 1 && owningBucketOrds.get(0) == 0L;
if (deferredCollectors != NO_OP_BUCKET_COLLECTOR) {
// Replay all documents that contain at least one top bucket (collected during the first pass).
runDeferredCollections();
}
final int num = Math.min(size, (int) queue.size());
final InternalComposite.InternalBucket[] buckets = new InternalComposite.InternalBucket[num];
try (LongArray bucketOrdsToCollect = bigArrays().newLongArray(queue.size())) {
for (int i = 0; i < queue.size(); i++) {
bucketOrdsToCollect.set(i, i);
}
var subAggsForBuckets = buildSubAggsForBuckets(bucketOrdsToCollect);
while (queue.size() > 0) {
int slot = queue.pop();
CompositeKey key = queue.toCompositeKey(slot);
InternalAggregations aggs = subAggsForBuckets.apply(slot);
long docCount = queue.getDocCount(slot);
buckets[(int) queue.size()] = new InternalComposite.InternalBucket(sourceNames, formats, key, docCount, aggs);
}
CompositeKey lastBucket = num > 0 ? buckets[num - 1].getRawKey() : null;
return new InternalAggregation[] {
new InternalComposite(
name,
size,
sourceNames,
formats,
Arrays.asList(buckets),
lastBucket,
reverseMuls,
missingOrders,
earlyTerminated,
metadata()
) };
}
}
@Override
public InternalAggregation buildEmptyAggregation() {
return new InternalComposite(
name,
size,
sourceNames,
formats,
Collections.emptyList(),
null,
reverseMuls,
missingOrders,
false,
metadata()
);
}
private void finishLeaf() {
if (currentAggCtx != null) {
DocIdSet docIdSet = docIdSetBuilder.build();
entries.add(new Entry(currentAggCtx, docIdSet));
currentAggCtx = null;
docIdSetBuilder = null;
}
}
/** Return true if the provided field may have multiple values per document in the leaf **/
private static boolean isMaybeMultivalued(LeafReaderContext context, SortField sortField) throws IOException {
SortField.Type type = IndexSortConfig.getSortFieldType(sortField);
return switch (type) {
case STRING -> {
final SortedSetDocValues v1 = context.reader().getSortedSetDocValues(sortField.getField());
yield v1 != null && DocValues.unwrapSingleton(v1) == null;
}
case DOUBLE, FLOAT, LONG, INT -> {
final SortedNumericDocValues v2 = context.reader().getSortedNumericDocValues(sortField.getField());
yield v2 != null && DocValues.unwrapSingleton(v2) == null;
}
default ->
// we have no clue whether the field is multi-valued or not so we assume it is.
true;
};
}
/**
* Returns the {@link Sort} prefix that is eligible to index sort
* optimization and null if index sort is not applicable.
*/
private Sort buildIndexSortPrefix(LeafReaderContext context) throws IOException {
Sort indexSort = context.reader().getMetaData().sort();
if (indexSort == null) {
return null;
}
List<SortField> sortFields = new ArrayList<>();
int end = Math.min(indexSort.getSort().length, sourceConfigs.length);
for (int i = 0; i < end; i++) {
CompositeValuesSourceConfig sourceConfig = sourceConfigs[i];
SingleDimensionValuesSource<?> source = sources[i];
SortField indexSortField = indexSort.getSort()[i];
if (source.fieldType == null
// TODO: can we handle missing bucket when using index sort optimization ?
|| source.missingBucket
|| indexSortField.getField().equals(source.fieldType.name()) == false
|| isMaybeMultivalued(context, indexSortField)
|| sourceConfig.hasScript()) {
break;
}
if (indexSortField.getReverse() != (source.reverseMul == -1)) {
if (i == 0) {
// the leading index sort matches the leading source field but the order is reversed
// so we don't check the other sources.
return new Sort(indexSortField);
}
break;
}
sortFields.add(indexSortField);
if (sourceConfig.valuesSource() instanceof RoundingValuesSource) {
// the rounding "squashes" many values together, that breaks the ordering of sub-values
// so we ignore subsequent source even if they match the index sort.
break;
}
}
return sortFields.isEmpty() ? null : new Sort(sortFields.toArray(new SortField[0]));
}
/**
* Return the number of leading sources that match the index sort.
*
* @param indexSortPrefix The index sort prefix that matches the sources
* @return The length of the index sort prefix if the sort order matches
* or -1 if the leading index sort is in the reverse order of the
* leading source. A value of 0 indicates that the index sort is
* not applicable.
*/
private int computeSortPrefixLen(Sort indexSortPrefix) {
if (indexSortPrefix == null) {
return 0;
}
if (indexSortPrefix.getSort()[0].getReverse() != (sources[0].reverseMul == -1)) {
assert indexSortPrefix.getSort().length == 1;
return -1;
} else {
return indexSortPrefix.getSort().length;
}
}
/**
* Rewrites the provided {@link Sort} to apply rounding on {@link SortField} that target
* {@link RoundingValuesSource}.
*/
private Sort applySortFieldRounding(Sort sort) {
SortField[] sortFields = new SortField[sort.getSort().length];
for (int i = 0; i < sort.getSort().length; i++) {
if (sourceConfigs[i].valuesSource() instanceof RoundingValuesSource) {
LongUnaryOperator round = ((RoundingValuesSource) sourceConfigs[i].valuesSource())::round;
final SortedNumericSortField delegate = (SortedNumericSortField) sort.getSort()[i];
sortFields[i] = new SortedNumericSortField(delegate.getField(), delegate.getNumericType(), delegate.getReverse()) {
@Override
public boolean equals(Object obj) {
return delegate.equals(obj);
}
@Override
public int hashCode() {
return delegate.hashCode();
}
@Override
public FieldComparator<?> getComparator(int numHits, Pruning enableSkipping) {
return new LongComparator(1, delegate.getField(), (Long) missingValue, delegate.getReverse(), Pruning.NONE) {
@Override
public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException {
return new LongLeafComparator(context) {
@Override
protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field)
throws IOException {
NumericDocValues dvs = SortedNumericSelector.wrap(
DocValues.getSortedNumeric(context.reader(), field),
delegate.getSelector(),
delegate.getNumericType()
);
return new NumericDocValues() {
@Override
public long longValue() throws IOException {
return round.applyAsLong(dvs.longValue());
}
@Override
public boolean advanceExact(int target) throws IOException {
return dvs.advanceExact(target);
}
@Override
public int docID() {
return dvs.docID();
}
@Override
public int nextDoc() throws IOException {
return dvs.nextDoc();
}
@Override
public int advance(int target) throws IOException {
return dvs.advance(target);
}
@Override
public long cost() {
return dvs.cost();
}
};
}
};
}
};
}
};
} else {
sortFields[i] = sort.getSort()[i];
}
}
return new Sort(sortFields);
}
private void processLeafFromQuery(LeafReaderContext ctx, Sort indexSortPrefix) throws IOException {
DocValueFormat[] formats = new DocValueFormat[indexSortPrefix.getSort().length];
for (int i = 0; i < formats.length; i++) {
formats[i] = sources[i].format;
}
FieldDoc fieldDoc = SearchAfterBuilder.buildFieldDoc(
new SortAndFormats(indexSortPrefix, formats),
Arrays.copyOfRange(rawAfterKey.values(), 0, formats.length),
null
);
if (indexSortPrefix.getSort().length < sources.length) {
// include all docs that belong to the partial bucket
fieldDoc.doc = -1;
}
BooleanQuery newQuery = new BooleanQuery.Builder().add(topLevelQuery(), BooleanClause.Occur.MUST)
.add(new SearchAfterSortedDocQuery(applySortFieldRounding(indexSortPrefix), fieldDoc), BooleanClause.Occur.FILTER)
.build();
Weight weight = searcher().createWeight(searcher().rewrite(newQuery), ScoreMode.COMPLETE_NO_SCORES, 1f);
Scorer scorer = weight.scorer(ctx);
if (scorer != null) {
DocIdSetIterator docIt = scorer.iterator();
final LeafBucketCollector inner = queue.getLeafCollector(
ctx,
getFirstPassCollector(docIdSetBuilder, indexSortPrefix.getSort().length)
);
inner.setScorer(scorer);
final Bits liveDocs = ctx.reader().getLiveDocs();
while (docIt.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
if (liveDocs == null || liveDocs.get(docIt.docID())) {
inner.collect(docIt.docID());
}
}
}
}
@Override
protected LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, LeafBucketCollector sub) throws IOException {
finishLeaf();
boolean fillDocIdSet = deferredCollectors != NO_OP_BUCKET_COLLECTOR;
Sort indexSortPrefix = buildIndexSortPrefix(aggCtx.getLeafReaderContext());
int sortPrefixLen = computeSortPrefixLen(indexSortPrefix);
SortedDocsProducer sortedDocsProducer = (sortPrefixLen == 0 && parent == null)
? sources[0].createSortedDocsProducerOrNull(aggCtx.getLeafReaderContext().reader(), topLevelQuery())
: null;
if (sortedDocsProducer != null) {
// Visit documents sorted by the leading source of the composite definition and terminates
// when the leading source value is guaranteed to be greater than the lowest composite bucket
// in the queue.
DocIdSet docIdSet = sortedDocsProducer.processLeaf(queue, aggCtx.getLeafReaderContext(), fillDocIdSet);
if (fillDocIdSet) {
entries.add(new Entry(aggCtx, docIdSet));
}
// We can bypass search entirely for this segment, the processing is done in the previous call.
// Throwing this exception will terminate the execution of the search for this root aggregation,
// see {@link MultiCollector} for more details on how we handle early termination in aggregations.
earlyTerminated = true;
return LeafBucketCollector.NO_OP_COLLECTOR;
} else {
if (fillDocIdSet) {
currentAggCtx = aggCtx;
docIdSetBuilder = new RoaringDocIdSet.Builder(aggCtx.getLeafReaderContext().reader().maxDoc());
}
if (rawAfterKey != null && sortPrefixLen > 0) {
// We have an after key and index sort is applicable so we jump directly to the doc
// that is after the index sort prefix using the rawAfterKey and we start collecting
// document from there.
try {
processLeafFromQuery(aggCtx.getLeafReaderContext(), indexSortPrefix);
} catch (CollectionTerminatedException e) {
/*
* Signal that there isn't anything to collect. We're going
* to return noop collector anyway so we can ignore it.
*/
}
return LeafBucketCollector.NO_OP_COLLECTOR;
} else {
final LeafBucketCollector inner;
try {
inner = queue.getLeafCollector(aggCtx.getLeafReaderContext(), getFirstPassCollector(docIdSetBuilder, sortPrefixLen));
} catch (CollectionTerminatedException e) {
return LeafBucketCollector.NO_OP_COLLECTOR;
}
return new LeafBucketCollector() {
@Override
public void collect(int doc, long zeroBucket) throws IOException {
assert zeroBucket == 0L;
inner.collect(doc);
}
@Override
public DocIdSetIterator competitiveIterator() throws IOException {
if (queue.mayDynamicallyPrune()) {
return inner.competitiveIterator();
} else {
return null;
}
}
};
}
}
}
/**
* The first pass selects the top composite buckets from all matching documents.
*/
private LeafBucketCollector getFirstPassCollector(RoaringDocIdSet.Builder builder, int indexSortPrefix) {
return new LeafBucketCollector() {
int lastDoc = -1;
@Override
public void collect(int doc, long bucket) throws IOException {
try {
int docCount = docCountProvider.getDocCount(doc);
if (queue.addIfCompetitive(indexSortPrefix, docCount)) {
if (builder != null && lastDoc != doc) {
builder.add(doc);
lastDoc = doc;
}
}
} catch (CollectionTerminatedException exc) {
earlyTerminated = true;
throw exc;
}
}
};
}
/**
* Replay the documents that might contain a top bucket and pass top buckets to
* the {@link #deferredCollectors}.
*/
private void runDeferredCollections() throws IOException {
final boolean needsScores = scoreMode().needsScores();
Weight weight = null;
if (needsScores) {
weight = searcher().createWeight(searcher().rewrite(topLevelQuery()), ScoreMode.COMPLETE, 1f);
}
deferredCollectors.preCollection();
for (Entry entry : entries) {
DocIdSetIterator docIdSetIterator = entry.docIdSet.iterator();
if (docIdSetIterator == null) {
continue;
}
final LeafBucketCollector subCollector = deferredCollectors.getLeafCollector(entry.aggCtx);
final LeafBucketCollector collector = queue.getLeafCollector(
entry.aggCtx.getLeafReaderContext(),
getSecondPassCollector(subCollector)
);
DocIdSetIterator scorerIt = null;
if (needsScores) {
Scorer scorer = weight.scorer(entry.aggCtx.getLeafReaderContext());
if (scorer != null) {
scorerIt = scorer.iterator();
subCollector.setScorer(scorer);
}
}
int docID;
while ((docID = docIdSetIterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
if (needsScores) {
assert scorerIt != null && scorerIt.docID() < docID;
scorerIt.advance(docID);
// aggregations should only be replayed on matching documents
assert scorerIt.docID() == docID;
}
collector.collect(docID);
}
}
deferredCollectors.postCollection();
}
/**
* Replay the top buckets from the matching documents.
*/
private LeafBucketCollector getSecondPassCollector(LeafBucketCollector subCollector) {
return new LeafBucketCollector() {
@Override
public void collect(int doc, long zeroBucket) throws IOException {
assert zeroBucket == 0;
Integer slot = queue.compareCurrent();
if (slot != null) {
// The candidate key is a top bucket.
// We can defer the collection of this document/bucket to the sub collector
subCollector.collect(doc, slot);
}
}
};
}
@Override
public double bucketSize(long bucket, Rounding.DateTimeUnit unit) {
if (innerSizedBucketAggregators.length != 1) {
throw AggregationErrors.rateWithoutDateHistogram(name());
}
return innerSizedBucketAggregators[0].bucketSize(bucket, unit);
}
@Override
public double bucketSize(Rounding.DateTimeUnit unit) {
if (innerSizedBucketAggregators.length != 1) {
throw AggregationErrors.rateWithoutDateHistogram(name());
}
return innerSizedBucketAggregators[0].bucketSize(unit);
}
@Override
public void collectDebugInfo(BiConsumer<String, Object> add) {
super.collectDebugInfo(add);
if (sources[0] instanceof GlobalOrdinalValuesSource globalOrdinalValuesSource) {
globalOrdinalValuesSource.collectDebugInfo(Strings.format("sources.%s", sourceConfigs[0].name()), add);
}
}
private record Entry(AggregationExecutionContext aggCtx, DocIdSet docIdSet) {}
}
|
CompositeAggregator
|
java
|
square__retrofit
|
samples/src/main/java/com/example/retrofit/AnnotatedConverters.java
|
{
"start": 3608,
"end": 3685
}
|
interface ____ {}
@Default(value = DefaultType.FIELD)
static final
|
SimpleXml
|
java
|
quarkusio__quarkus
|
extensions/quartz/deployment/src/test/java/io/quarkus/quartz/test/delayedexecution/DelayedExecutionTest.java
|
{
"start": 935,
"end": 1519
}
|
class ____ {
static final CountDownLatch LATCH = new CountDownLatch(1);
static final CountDownLatch EVENT_LATCH = new CountDownLatch(1);
@Scheduled(identity = "foo", every = "1s", executionMaxDelay = "500ms")
static void everySecond() {
LATCH.countDown();
}
void onDelay(@Observes DelayedExecution delayedExecution) {
assertTrue(delayedExecution.getDelay() < 500);
assertEquals("foo", delayedExecution.getExecution().getTrigger().getId());
EVENT_LATCH.countDown();
}
}
}
|
Jobs
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
|
{
"start": 2484,
"end": 2697
}
|
interface ____ an Application in the ResourceManager. Take a
* look at {@link RMAppImpl} for its implementation. This interface
* exposes methods to access various updates in application status/report.
*/
public
|
to
|
java
|
quarkusio__quarkus
|
core/deployment/src/main/java/io/quarkus/deployment/ide/IdeRunningProcessBuildItem.java
|
{
"start": 180,
"end": 457
}
|
class ____ extends SimpleBuildItem {
private final Set<Ide> detectedIDEs;
IdeRunningProcessBuildItem(Set<Ide> detectedIDEs) {
this.detectedIDEs = detectedIDEs;
}
Set<Ide> getDetectedIDEs() {
return detectedIDEs;
}
}
|
IdeRunningProcessBuildItem
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/sql/results/internal/InitializersList.java
|
{
"start": 1692,
"end": 3724
}
|
class ____ {
private final ArrayList<Initializer<?>> initializers;
int nonCollectionInitializersNum = 0;
int resolveFirstNum = 0;
public Builder() {
initializers = new ArrayList<>();
}
public Builder(int size) {
initializers = new ArrayList<>( size );
}
public void addInitializer(final Initializer<?> initializer) {
initializers.add( initializer );
//in this method we perform these checks merely to learn the sizing hints,
//so to not need dynamically scaling collections.
//This implies performing both checks twice but since they're cheap it's preferrable
//to multiple allocations; not least this allows using arrays, which makes iteration
//cheaper during the row processing - which is very hot.
if ( !initializer.isCollectionInitializer() ) {
nonCollectionInitializersNum++;
}
if ( initializeFirst( initializer ) ) {
resolveFirstNum++;
}
}
private static boolean initializeFirst(final Initializer<?> initializer) {
return initializer instanceof EntityInitializerImpl;
}
public InitializersList build() {
final int size = initializers.size();
final Initializer<?>[] sortedForResolveInstance = new Initializer<?>[size];
int resolveFirstIdx = 0;
int resolveLaterIdx = resolveFirstNum;
final Initializer<?>[] originalSortInitializers = toArray( initializers );
for ( Initializer<?> initializer : originalSortInitializers ) {
if ( initializeFirst( initializer ) ) {
sortedForResolveInstance[resolveFirstIdx++] = initializer;
}
else {
sortedForResolveInstance[resolveLaterIdx++] = initializer;
}
}
final boolean hasCollectionInitializers = ( nonCollectionInitializersNum != initializers.size() );
return new InitializersList(
originalSortInitializers,
sortedForResolveInstance,
hasCollectionInitializers
);
}
private Initializer<?>[] toArray(final ArrayList<Initializer<?>> initializers) {
return initializers.toArray( new Initializer<?>[initializers.size()] );
}
}
}
|
Builder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.