language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/core/SpringSecurityCoreVersion.java | {
"start": 988,
"end": 1125
} | class ____ for checking version compatibility in a deployed application.
*
* @author Luke Taylor
* @author Rob Winch
*/
public final | used |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/subscriptions/AsyncSubscriptionTest.java | {
"start": 953,
"end": 4794
} | class ____ extends RxJavaTest {
@Test
public void noResource() {
AsyncSubscription as = new AsyncSubscription();
Subscription s = mock(Subscription.class);
as.setSubscription(s);
as.request(1);
as.cancel();
verify(s).request(1);
verify(s).cancel();
}
@Test
public void requestBeforeSet() {
AsyncSubscription as = new AsyncSubscription();
Subscription s = mock(Subscription.class);
as.request(1);
as.setSubscription(s);
as.cancel();
verify(s).request(1);
verify(s).cancel();
}
@Test
public void cancelBeforeSet() {
AsyncSubscription as = new AsyncSubscription();
Subscription s = mock(Subscription.class);
as.request(1);
as.cancel();
as.setSubscription(s);
verify(s, never()).request(1);
verify(s).cancel();
}
@Test
public void singleSet() {
AsyncSubscription as = new AsyncSubscription();
Subscription s = mock(Subscription.class);
as.setSubscription(s);
Subscription s1 = mock(Subscription.class);
as.setSubscription(s1);
assertSame(as.actual.get(), s);
verify(s1).cancel();
}
@Test
public void initialResource() {
Disposable r = mock(Disposable.class);
AsyncSubscription as = new AsyncSubscription(r);
as.cancel();
verify(r).dispose();
}
@Test
public void setResource() {
AsyncSubscription as = new AsyncSubscription();
Disposable r = mock(Disposable.class);
assertTrue(as.setResource(r));
as.cancel();
verify(r).dispose();
}
@Test
public void replaceResource() {
AsyncSubscription as = new AsyncSubscription();
Disposable r = mock(Disposable.class);
assertTrue(as.replaceResource(r));
as.cancel();
verify(r).dispose();
}
@Test
public void setResource2() {
AsyncSubscription as = new AsyncSubscription();
Disposable r = mock(Disposable.class);
assertTrue(as.setResource(r));
Disposable r2 = mock(Disposable.class);
assertTrue(as.setResource(r2));
as.cancel();
verify(r).dispose();
verify(r2).dispose();
}
@Test
public void replaceResource2() {
AsyncSubscription as = new AsyncSubscription();
Disposable r = mock(Disposable.class);
assertTrue(as.replaceResource(r));
Disposable r2 = mock(Disposable.class);
as.replaceResource(r2);
as.cancel();
verify(r, never()).dispose();
verify(r2).dispose();
}
@Test
public void setResourceAfterCancel() {
AsyncSubscription as = new AsyncSubscription();
as.cancel();
Disposable r = mock(Disposable.class);
as.setResource(r);
verify(r).dispose();
}
@Test
public void replaceResourceAfterCancel() {
AsyncSubscription as = new AsyncSubscription();
as.cancel();
Disposable r = mock(Disposable.class);
as.replaceResource(r);
verify(r).dispose();
}
@Test
public void cancelOnce() {
Disposable r = mock(Disposable.class);
AsyncSubscription as = new AsyncSubscription(r);
Subscription s = mock(Subscription.class);
as.setSubscription(s);
as.cancel();
as.cancel();
as.cancel();
verify(s, never()).request(anyLong());
verify(s).cancel();
verify(r).dispose();
}
@Test
public void disposed() {
AsyncSubscription as = new AsyncSubscription();
assertFalse(as.isDisposed());
as.dispose();
assertTrue(as.isDisposed());
}
}
| AsyncSubscriptionTest |
java | apache__camel | components/camel-telemetry/src/main/java/org/apache/camel/telemetry/decorators/AbstractHttpSpanDecorator.java | {
"start": 1057,
"end": 4054
} | class ____ extends AbstractSpanDecorator {
public static final String POST_METHOD = "POST";
public static final String GET_METHOD = "GET";
public String getHttpMethod(Exchange exchange, Endpoint endpoint) {
// 1. Use method provided in header.
Object method = exchange.getIn().getHeader(Exchange.HTTP_METHOD);
if (method instanceof String) {
return (String) method;
} else if (method instanceof Enum) {
return ((Enum<?>) method).name();
} else if (method != null) {
return exchange.getContext().getTypeConverter().tryConvertTo(String.class, exchange, method);
}
// 2. GET if query string is provided in header.
if (exchange.getIn().getHeader(Exchange.HTTP_QUERY) != null) {
return GET_METHOD;
}
// 3. GET if endpoint is configured with a query string.
if (endpoint.getEndpointUri().indexOf('?') != -1) {
return GET_METHOD;
}
// 4. POST if there is data to send (body is not null).
if (exchange.getIn().getBody() != null) {
return POST_METHOD;
}
// 5. GET otherwise.
return GET_METHOD;
}
@Override
public String getOperationName(Exchange exchange, Endpoint endpoint) {
// Based on HTTP component documentation:
return getHttpMethod(exchange, endpoint);
}
@Override
public void beforeTracingEvent(Span span, Exchange exchange, Endpoint endpoint) {
super.beforeTracingEvent(span, exchange, endpoint);
String httpUrl = getHttpURL(exchange, endpoint);
if (httpUrl != null) {
span.setTag(TagConstants.HTTP_URL, httpUrl);
}
span.setTag(TagConstants.HTTP_METHOD, getHttpMethod(exchange, endpoint));
}
protected String getHttpURL(Exchange exchange, Endpoint endpoint) {
Object url = exchange.getIn().getHeader(Exchange.HTTP_URL);
if (url instanceof String) {
return (String) url;
} else {
Object uri = exchange.getIn().getHeader(Exchange.HTTP_URI);
if (uri instanceof String) {
return (String) uri;
} else {
// Try to obtain from endpoint
int index = endpoint.getEndpointUri().lastIndexOf("http:");
if (index != -1) {
return endpoint.getEndpointUri().substring(index);
}
}
}
return null;
}
@Override
public void afterTracingEvent(Span span, Exchange exchange) {
super.afterTracingEvent(span, exchange);
Message message = exchange.getMessage();
if (message != null) {
Integer responseCode = message.getHeader(Exchange.HTTP_RESPONSE_CODE, Integer.class);
if (responseCode != null) {
span.setTag(TagConstants.HTTP_STATUS, responseCode.toString());
}
}
}
}
| AbstractHttpSpanDecorator |
java | apache__flink | flink-core-api/src/main/java/org/apache/flink/api/common/watermark/WatermarkDeclarations.java | {
"start": 1488,
"end": 2001
} | class ____ {
protected final String identifier;
WatermarkDeclarationBuilder(String identifier) {
this.identifier = identifier;
}
public LongWatermarkDeclarationBuilder typeLong() {
return new LongWatermarkDeclarationBuilder(identifier);
}
public BoolWatermarkDeclarationBuilder typeBool() {
return new BoolWatermarkDeclarationBuilder(identifier);
}
@Experimental
public static | WatermarkDeclarationBuilder |
java | quarkusio__quarkus | extensions/qute/deployment/src/main/java/io/quarkus/qute/deployment/QuteProcessor.java | {
"start": 112597,
"end": 114705
} | class
____ initialPriority = -1000 + existingValueResolvers.allGlobals.size();
TemplateGlobalGenerator globalGenerator = new TemplateGlobalGenerator(classOutput, GLOBAL_NAMESPACE,
initialPriority, index);
Map<DotName, Map<String, AnnotationTarget>> classToTargets = new LinkedHashMap<>();
Map<DotName, List<TemplateGlobalBuildItem>> classToGlobals = templateGlobals.stream()
.sorted(Comparator.comparing(g -> g.getDeclaringClass()))
.collect(Collectors.groupingBy(TemplateGlobalBuildItem::getDeclaringClass, LinkedHashMap::new,
Collectors.toList()));
for (Entry<DotName, List<TemplateGlobalBuildItem>> entry : classToGlobals.entrySet()) {
classToTargets.put(entry.getKey(), entry.getValue().stream().collect(
Collectors.toMap(TemplateGlobalBuildItem::getName, TemplateGlobalBuildItem::getTarget)));
}
for (Entry<DotName, Map<String, AnnotationTarget>> e : classToTargets.entrySet()) {
String generatedClass = existingValueResolvers.getGeneratedGlobalClass(e.getKey());
if (generatedClass != null) {
generatedGlobals.add(generatedClass);
} else {
generatedClass = globalGenerator.generate(index.getClassByName(e.getKey()), e.getValue());
}
existingValueResolvers.addGlobal(e.getKey(), generatedClass, applicationClassPredicate);
}
generatedGlobals.addAll(globalGenerator.getGeneratedTypes());
for (String globalType : generatedGlobals) {
globalProviders.produce(new TemplateGlobalProviderBuildItem(globalType));
reflectiveClass.produce(ReflectiveClassBuildItem.builder(globalType).build());
}
}
}
/**
* Tracks non-application value resolvers that have already been generated. There is no need to spend time
* generating them again on a hot reload.
*/
static | int |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/DeadLetterChannelUnmarshalSetHeaderTest.java | {
"start": 2175,
"end": 2540
} | class ____ extends ServiceSupport implements DataFormat {
@Override
public void marshal(Exchange exchange, Object graph, OutputStream stream) {
// noop
}
@Override
public Object unmarshal(Exchange exchange, InputStream stream) {
throw new IllegalArgumentException("Damn");
}
}
}
| MyDataFormat |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/DynamicRouterComponentBuilderFactory.java | {
"start": 1473,
"end": 2072
} | interface ____ {
/**
* Dynamic Router (camel-dynamic-router)
* The Dynamic Router component routes exchanges to recipients, and the
* recipients (and their rules) may change at runtime.
*
* Category: messaging,core
* Since: 3.15
* Maven coordinates: org.apache.camel:camel-dynamic-router
*
* @return the dsl builder
*/
static DynamicRouterComponentBuilder dynamicRouter() {
return new DynamicRouterComponentBuilderImpl();
}
/**
* Builder for the Dynamic Router component.
*/
| DynamicRouterComponentBuilderFactory |
java | spring-projects__spring-security | crypto/src/main/java/org/springframework/security/crypto/scrypt/SCryptPasswordEncoder.java | {
"start": 2185,
"end": 7987
} | class ____ extends AbstractValidatingPasswordEncoder {
private static final int DEFAULT_CPU_COST = 65536;
private static final int DEFAULT_MEMORY_COST = 8;
private static final int DEFAULT_PARALLELISM = 1;
private static final int DEFAULT_KEY_LENGTH = 32;
private static final int DEFAULT_SALT_LENGTH = 16;
private final Log logger = LogFactory.getLog(getClass());
private final int cpuCost;
private final int memoryCost;
private final int parallelization;
private final int keyLength;
private final BytesKeyGenerator saltGenerator;
/**
* Constructs a SCrypt password encoder with the provided parameters.
* @param cpuCost cpu cost of the algorithm (as defined in scrypt this is N). must be
* power of 2 greater than 1. Default is currently 65,536 or 2^16)
* @param memoryCost memory cost of the algorithm (as defined in scrypt this is r)
* Default is currently 8.
* @param parallelization the parallelization of the algorithm (as defined in scrypt
* this is p) Default is currently 1. Note that the implementation does not currently
* take advantage of parallelization.
* @param keyLength key length for the algorithm (as defined in scrypt this is dkLen).
* The default is currently 32.
* @param saltLength salt length (as defined in scrypt this is the length of S). The
* default is currently 16.
*/
public SCryptPasswordEncoder(int cpuCost, int memoryCost, int parallelization, int keyLength, int saltLength) {
if (cpuCost <= 1) {
throw new IllegalArgumentException("Cpu cost parameter must be > 1.");
}
if (memoryCost == 1 && cpuCost > 65536) {
throw new IllegalArgumentException("Cpu cost parameter must be > 1 and < 65536.");
}
if (memoryCost < 1) {
throw new IllegalArgumentException("Memory cost must be >= 1.");
}
int maxParallel = Integer.MAX_VALUE / (128 * memoryCost * 8);
if (parallelization < 1 || parallelization > maxParallel) {
throw new IllegalArgumentException("Parallelisation parameter p must be >= 1 and <= " + maxParallel
+ " (based on block size r of " + memoryCost + ")");
}
if (keyLength < 1 || keyLength > Integer.MAX_VALUE) {
throw new IllegalArgumentException("Key length must be >= 1 and <= " + Integer.MAX_VALUE);
}
if (saltLength < 1 || saltLength > Integer.MAX_VALUE) {
throw new IllegalArgumentException("Salt length must be >= 1 and <= " + Integer.MAX_VALUE);
}
this.cpuCost = cpuCost;
this.memoryCost = memoryCost;
this.parallelization = parallelization;
this.keyLength = keyLength;
this.saltGenerator = KeyGenerators.secureRandom(saltLength);
}
/**
* Constructs a SCrypt password encoder with cpu cost of 16,384, memory cost of 8,
* parallelization of 1, a key length of 32 and a salt length of 64 bytes.
* @return the {@link SCryptPasswordEncoder}
* @since 5.8
* @deprecated Use {@link #defaultsForSpringSecurity_v5_8()} instead
*/
@Deprecated
public static SCryptPasswordEncoder defaultsForSpringSecurity_v4_1() {
return new SCryptPasswordEncoder(16384, 8, 1, 32, 64);
}
/**
* Constructs a SCrypt password encoder with cpu cost of 65,536, memory cost of 8,
* parallelization of 1, a key length of 32 and a salt length of 16 bytes.
* @return the {@link SCryptPasswordEncoder}
* @since 5.8
*/
public static SCryptPasswordEncoder defaultsForSpringSecurity_v5_8() {
return new SCryptPasswordEncoder(DEFAULT_CPU_COST, DEFAULT_MEMORY_COST, DEFAULT_PARALLELISM, DEFAULT_KEY_LENGTH,
DEFAULT_SALT_LENGTH);
}
@Override
protected String encodeNonNullPassword(String rawPassword) {
return digest(rawPassword, this.saltGenerator.generateKey());
}
@Override
protected boolean matchesNonNull(String rawPassword, String encodedPassword) {
return decodeAndCheckMatches(rawPassword, encodedPassword);
}
@Override
protected boolean upgradeEncodingNonNull(String encodedPassword) {
String[] parts = encodedPassword.split("\\$");
if (parts.length != 4) {
throw new IllegalArgumentException("Encoded password does not look like SCrypt: " + encodedPassword);
}
long params = Long.parseLong(parts[1], 16);
int cpuCost = (int) Math.pow(2, params >> 16 & 0xffff);
int memoryCost = (int) params >> 8 & 0xff;
int parallelization = (int) params & 0xff;
return cpuCost < this.cpuCost || memoryCost < this.memoryCost || parallelization < this.parallelization;
}
private boolean decodeAndCheckMatches(CharSequence rawPassword, String encodedPassword) {
String[] parts = encodedPassword.split("\\$");
if (parts.length != 4) {
return false;
}
long params = Long.parseLong(parts[1], 16);
byte[] salt = decodePart(parts[2]);
byte[] derived = decodePart(parts[3]);
int cpuCost = (int) Math.pow(2, params >> 16 & 0xffff);
int memoryCost = (int) params >> 8 & 0xff;
int parallelization = (int) params & 0xff;
byte[] generated = SCrypt.generate(Utf8.encode(rawPassword), salt, cpuCost, memoryCost, parallelization,
this.keyLength);
return MessageDigest.isEqual(derived, generated);
}
private String digest(CharSequence rawPassword, byte[] salt) {
byte[] derived = SCrypt.generate(Utf8.encode(rawPassword), salt, this.cpuCost, this.memoryCost,
this.parallelization, this.keyLength);
String params = Long.toString(
((int) (Math.log(this.cpuCost) / Math.log(2)) << 16L) | this.memoryCost << 8 | this.parallelization,
16);
StringBuilder sb = new StringBuilder((salt.length + derived.length) * 2);
sb.append("$").append(params).append('$');
sb.append(encodePart(salt)).append('$');
sb.append(encodePart(derived));
return sb.toString();
}
private byte[] decodePart(String part) {
return Base64.getDecoder().decode(Utf8.encode(part));
}
private String encodePart(byte[] part) {
return Utf8.decode(Base64.getEncoder().encode(part));
}
}
| SCryptPasswordEncoder |
java | spring-projects__spring-framework | spring-expression/src/test/java/org/springframework/expression/spel/ParsingTests.java | {
"start": 12920,
"end": 13115
} | class ____ {
@Test
void variables() {
parseCheck("#foo");
}
@Test
void functions() {
parseCheck("#fn(1,2,3)");
parseCheck("#fn('hello')");
}
}
@Nested
| VariablesAndFunctions |
java | micronaut-projects__micronaut-core | inject-java/src/test/groovy/io/micronaut/aop/factory/InterfaceFactory.java | {
"start": 814,
"end": 1142
} | class ____ {
@Prototype
@Mutating("name")
@Primary
@Executable
InterfaceClass interfaceClass() {
return new InterfaceImpl();
}
@Prototype
@Mutating("name")
@Named("another")
@Executable
InterfaceClass anotherImpl() {
return new InterfaceImpl();
}
}
| InterfaceFactory |
java | quarkusio__quarkus | extensions/opentelemetry/runtime/src/main/java/io/quarkus/opentelemetry/runtime/exporter/otlp/logs/NoopLogRecordExporter.java | {
"start": 270,
"end": 844
} | class ____ implements LogRecordExporter {
public static final NoopLogRecordExporter INSTANCE = new NoopLogRecordExporter();
private NoopLogRecordExporter() {
}
@Override
public CompletableResultCode export(Collection<LogRecordData> collection) {
return CompletableResultCode.ofSuccess();
}
@Override
public CompletableResultCode flush() {
return CompletableResultCode.ofSuccess();
}
@Override
public CompletableResultCode shutdown() {
return CompletableResultCode.ofSuccess();
}
}
| NoopLogRecordExporter |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Connection.java | {
"start": 945,
"end": 3794
} | class ____ {
private Vertex from;
private Vertex to;
private double weight;
private long docCount;
public Connection(Vertex from, Vertex to, double weight, long docCount) {
this.from = from;
this.to = to;
this.weight = weight;
this.docCount = docCount;
}
public Connection(StreamInput in, Map<VertexId, Vertex> vertices) throws IOException {
from = vertices.get(new VertexId(in.readString(), in.readString()));
to = vertices.get(new VertexId(in.readString(), in.readString()));
weight = in.readDouble();
docCount = in.readVLong();
}
Connection() {}
void writeTo(StreamOutput out) throws IOException {
out.writeString(from.getField());
out.writeString(from.getTerm());
out.writeString(to.getField());
out.writeString(to.getTerm());
out.writeDouble(weight);
out.writeVLong(docCount);
}
public ConnectionId getId() {
return new ConnectionId(from.getId(), to.getId());
}
public Vertex getFrom() {
return from;
}
public Vertex getTo() {
return to;
}
/**
* @return a measure of the relative connectedness between a pair of {@link Vertex} objects
*/
public double getWeight() {
return weight;
}
/**
* @return the number of documents in the sampled set that contained this
* pair of {@link Vertex} objects.
*/
public long getDocCount() {
return docCount;
}
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null) return false;
if (getClass() != obj.getClass()) return false;
Connection other = (Connection) obj;
return docCount == other.docCount && weight == other.weight && Objects.equals(to, other.to) && Objects.equals(from, other.from);
}
@Override
public int hashCode() {
return Objects.hash(docCount, weight, from, to);
}
private static final ParseField SOURCE = new ParseField("source");
private static final ParseField TARGET = new ParseField("target");
private static final ParseField WEIGHT = new ParseField("weight");
private static final ParseField DOC_COUNT = new ParseField("doc_count");
void toXContent(XContentBuilder builder, Params params, Map<Vertex, Integer> vertexNumbers) throws IOException {
builder.field(SOURCE.getPreferredName(), vertexNumbers.get(from));
builder.field(TARGET.getPreferredName(), vertexNumbers.get(to));
builder.field(WEIGHT.getPreferredName(), weight);
builder.field(DOC_COUNT.getPreferredName(), docCount);
}
/**
* An identifier (implements hashcode and equals) that represents a
* unique key for a {@link Connection}
*/
public static | Connection |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/LambdaFunctionalInterfaceTest.java | {
"start": 2836,
"end": 3483
} | class ____ {
private static double generateDataSeries(Function<Double, Double> curveFunction) {
final double scale = 100;
final double modX = 2.0;
return modX / curveFunction.apply(scale);
}
// call site
private static double generateSpendCurveForMetric(double curved) {
// explicit Function variable creation
Function<Double, Double> curveFunction = x -> Math.pow(x, 1 / curved) * 100;
return generateDataSeries(curveFunction);
}
// call site: lambda Function
public Double getMu() {
return generateDataSeries(mu -> 2.3);
}
}
public static | WithCallSiteExplicitFunction |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/component/basic2/Person.java | {
"start": 326,
"end": 820
} | class ____ implements Serializable {
private int id;
private Name name;
public Person() {
}
public Person(int id, Name name) {
this.id = id;
this.name = name;
}
public Person(int id, String firstName, String lastName) {
this( id, new Name( firstName, lastName ) );
}
@Id
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
@Embedded
public Name getName() {
return name;
}
public void setName(Name name) {
this.name = name;
}
}
| Person |
java | google__dagger | javatests/dagger/internal/codegen/ComponentShardTest.java | {
"start": 4525,
"end": 5002
} | interface ____ {",
" Binding3 binding3();",
"}"),
CompilerTests.javaSource(
"dagger.internal.codegen.TestComponent",
"package dagger.internal.codegen;",
"",
"import dagger.Component;",
"import javax.inject.Provider;",
"import javax.inject.Singleton;",
"",
"@Singleton",
"@Component(dependencies = Dependency.class)",
" | Dependency |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/runtime/utils/JavaPojos.java | {
"start": 2044,
"end": 2466
} | class ____ {
public String name;
public Long id;
public ProductItem() {}
public ProductItem(String name, Long id) {
this.name = name;
this.id = id;
}
@Override
public String toString() {
return "Product{" + "name='" + name + '\'' + ", id=" + id + '}';
}
}
/** POJO with a RAW type. */
public static | ProductItem |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/eval/EvalMethodModTest.java | {
"start": 185,
"end": 375
} | class ____ extends TestCase {
public void test_reverse() throws Exception {
assertEquals(2, SQLEvalVisitorUtils.evalExpr(JdbcConstants.MYSQL, "MOD(29,9)"));
}
}
| EvalMethodModTest |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/injection/unsatisfied/UnsatisfiedMatchByRestrictedTypeTest.java | {
"start": 427,
"end": 1152
} | class ____ {
@RegisterExtension
ArcTestContainer container = ArcTestContainer.builder()
.beanClasses(FooService.class, Consumer.class)
.shouldFail()
.build();
@Test
public void testExceptionThrown() {
Throwable error = container.getFailure();
assertThat(error).rootCause().isInstanceOf(UnsatisfiedResolutionException.class)
.hasMessageContaining("The following beans match by type excluded by the @Typed annotation")
.hasMessageContaining(
"io.quarkus.arc.test.injection.unsatisfied.UnsatisfiedMatchByRestrictedTypeTest$FooService");
}
@Singleton
static | UnsatisfiedMatchByRestrictedTypeTest |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java | {
"start": 2464,
"end": 35054
} | class ____ extends AbstractXContentSerializingTestCase<Job> {
private static final String FUTURE_JOB = """
{
"job_id": "farequote",
"create_time": 1234567890000,
"tomorrows_technology_today": "wow",
"analysis_config": {
"bucket_span": "1h",
"something_new": "gasp",
"detectors": [{"function": "metric", "field_name": "responsetime", "by_field_name": "airline"}]
},
"data_description": {
"time_field": "time",
"the_future": 123
}
}""";
@Override
protected Job createTestInstance() {
return createRandomizedJob(new DatafeedConfig.Builder().setIndices(Arrays.asList("airline_data")));
}
@Override
protected Job mutateInstance(Job instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
@Override
protected NamedWriteableRegistry getNamedWriteableRegistry() {
SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList());
return new NamedWriteableRegistry(searchModule.getNamedWriteables());
}
@Override
protected NamedXContentRegistry xContentRegistry() {
SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList());
return new NamedXContentRegistry(searchModule.getNamedXContents());
}
@Override
protected Writeable.Reader<Job> instanceReader() {
return Job::new;
}
@Override
protected Job doParseInstance(XContentParser parser) {
return Job.LENIENT_PARSER.apply(parser, null).build();
}
public void testToXContentForInternalStorage() throws IOException {
Job config = createRandomizedJob(new DatafeedConfig.Builder().setIndices(Arrays.asList("airline_data")));
ToXContent.MapParams params = new ToXContent.MapParams(Collections.singletonMap(ToXContentParams.FOR_INTERNAL_STORAGE, "true"));
BytesReference serializedJob = XContentHelper.toXContent(config, XContentType.JSON, params, false);
try (
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
.createParser(XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry()), serializedJob.streamInput())
) {
Job parsedConfig = Job.LENIENT_PARSER.apply(parser, null).build();
// When we are writing for internal storage, we do not include the datafeed config
assertThat(parsedConfig.getDatafeedConfig(), isEmpty());
}
}
public void testRestRequestParser_DoesntAllowInternalFields() throws IOException {
XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(XContentParserConfiguration.EMPTY, FUTURE_JOB);
XContentParseException e = expectThrows(XContentParseException.class, () -> Job.REST_REQUEST_PARSER.apply(parser, null).build());
assertEquals("[3:5] [job_details] unknown field [create_time]", e.getMessage());
}
public void testFutureMetadataParse() throws IOException {
XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(XContentParserConfiguration.EMPTY, FUTURE_JOB);
// Unlike the config version of this test, the metadata parser should tolerate the unknown future field
assertNotNull(Job.LENIENT_PARSER.apply(parser, null).build());
}
public void testConstructor_GivenEmptyJobConfiguration() {
Job job = buildJobBuilder("foo").build();
assertEquals("foo", job.getId());
assertNotNull(job.getCreateTime());
assertNotNull(job.getAnalysisConfig());
assertNotNull(job.getAnalysisLimits());
assertNull(job.getCustomSettings());
assertNotNull(job.getDataDescription());
assertNull(job.getDescription());
assertNull(job.getFinishedTime());
assertNull(job.getModelPlotConfig());
assertNull(job.getRenormalizationWindowDays());
assertNull(job.getBackgroundPersistInterval());
assertThat(job.getModelSnapshotRetentionDays(), equalTo(10L));
assertNull(job.getDailyModelSnapshotRetentionAfterDays());
assertNull(job.getResultsRetentionDays());
assertNotNull(job.allInputFields());
assertFalse(job.allInputFields().isEmpty());
assertFalse(job.allowLazyOpen());
}
public void testNoId() {
expectThrows(IllegalArgumentException.class, () -> buildJobBuilder("").build());
}
public void testEnsureModelMemoryLimitSet() {
Job.Builder builder = buildJobBuilder("foo");
builder.setAnalysisLimits(new AnalysisLimits(null, null));
builder.validateAnalysisLimitsAndSetDefaults(ByteSizeValue.ZERO);
Job job = builder.build();
assertEquals("foo", job.getId());
assertNotNull(job.getAnalysisLimits());
assertThat(job.getAnalysisLimits().getModelMemoryLimit(), equalTo(AnalysisLimits.DEFAULT_MODEL_MEMORY_LIMIT_MB));
assertThat(job.getAnalysisLimits().getCategorizationExamplesLimit(), equalTo(4L));
builder.setAnalysisLimits(new AnalysisLimits(AnalysisLimits.DEFAULT_MODEL_MEMORY_LIMIT_MB * 2, 5L));
builder.validateAnalysisLimitsAndSetDefaults(null);
job = builder.build();
assertNotNull(job.getAnalysisLimits());
assertThat(job.getAnalysisLimits().getModelMemoryLimit(), equalTo(AnalysisLimits.DEFAULT_MODEL_MEMORY_LIMIT_MB * 2));
assertThat(job.getAnalysisLimits().getCategorizationExamplesLimit(), equalTo(5L));
}
public void testValidateAnalysisLimitsAndSetDefaults_whenMaxIsLessThanTheDefault() {
Job.Builder builder = buildJobBuilder("foo");
builder.validateAnalysisLimitsAndSetDefaults(ByteSizeValue.of(512L, ByteSizeUnit.MB));
Job job = builder.build();
assertNotNull(job.getAnalysisLimits());
assertThat(job.getAnalysisLimits().getModelMemoryLimit(), equalTo(512L));
assertThat(job.getAnalysisLimits().getCategorizationExamplesLimit(), equalTo(4L));
}
public void testValidateAnalysisLimitsAndSetDefaults_throwsWhenMaxLimitIsExceeded() {
Job.Builder builder = buildJobBuilder("foo");
builder.setAnalysisLimits(new AnalysisLimits(4096L, null));
ElasticsearchStatusException e = expectThrows(
ElasticsearchStatusException.class,
() -> builder.validateAnalysisLimitsAndSetDefaults(ByteSizeValue.of(1000L, ByteSizeUnit.MB))
);
assertEquals(
"model_memory_limit [4gb] must be less than the value of the "
+ MachineLearningField.MAX_MODEL_MEMORY_LIMIT.getKey()
+ " setting [1000mb]",
e.getMessage()
);
builder.validateAnalysisLimitsAndSetDefaults(ByteSizeValue.of(8192L, ByteSizeUnit.MB));
}
public void testEquals_GivenDifferentClass() {
Job job = buildJobBuilder("foo").build();
assertFalse(job.equals("a string"));
}
public void testEquals_GivenDifferentIds() {
Date createTime = new Date();
Job.Builder builder = buildJobBuilder("foo");
builder.setCreateTime(createTime);
Job job1 = builder.build();
builder.setId("bar");
Job job2 = builder.build();
assertNotEquals(job1, job2);
}
public void testEquals_GivenDifferentRenormalizationWindowDays() {
Date date = new Date();
Job.Builder jobDetails1 = new Job.Builder("foo");
jobDetails1.setDataDescription(new DataDescription.Builder());
jobDetails1.setAnalysisConfig(createAnalysisConfig());
jobDetails1.setRenormalizationWindowDays(3L);
jobDetails1.setCreateTime(date);
Job.Builder jobDetails2 = new Job.Builder("foo");
jobDetails2.setDataDescription(new DataDescription.Builder());
jobDetails2.setRenormalizationWindowDays(4L);
jobDetails2.setAnalysisConfig(createAnalysisConfig());
jobDetails2.setCreateTime(date);
assertNotEquals(jobDetails1.build(), jobDetails2.build());
}
public void testEquals_GivenDifferentBackgroundPersistInterval() {
Date date = new Date();
Job.Builder jobDetails1 = new Job.Builder("foo");
jobDetails1.setDataDescription(new DataDescription.Builder());
jobDetails1.setAnalysisConfig(createAnalysisConfig());
jobDetails1.setBackgroundPersistInterval(TimeValue.timeValueSeconds(10000L));
jobDetails1.setCreateTime(date);
Job.Builder jobDetails2 = new Job.Builder("foo");
jobDetails2.setDataDescription(new DataDescription.Builder());
jobDetails2.setBackgroundPersistInterval(TimeValue.timeValueSeconds(8000L));
jobDetails2.setAnalysisConfig(createAnalysisConfig());
jobDetails2.setCreateTime(date);
assertNotEquals(jobDetails1.build(), jobDetails2.build());
}
public void testEquals_GivenDifferentModelSnapshotRetentionDays() {
Date date = new Date();
Job.Builder jobDetails1 = new Job.Builder("foo");
jobDetails1.setDataDescription(new DataDescription.Builder());
jobDetails1.setAnalysisConfig(createAnalysisConfig());
jobDetails1.setModelSnapshotRetentionDays(10L);
jobDetails1.setCreateTime(date);
Job.Builder jobDetails2 = new Job.Builder("foo");
jobDetails2.setDataDescription(new DataDescription.Builder());
jobDetails2.setModelSnapshotRetentionDays(8L);
jobDetails2.setAnalysisConfig(createAnalysisConfig());
jobDetails2.setCreateTime(date);
assertNotEquals(jobDetails1.build(), jobDetails2.build());
}
public void testEquals_GivenDifferentResultsRetentionDays() {
Date date = new Date();
Job.Builder jobDetails1 = new Job.Builder("foo");
jobDetails1.setDataDescription(new DataDescription.Builder());
jobDetails1.setAnalysisConfig(createAnalysisConfig());
jobDetails1.setCreateTime(date);
jobDetails1.setResultsRetentionDays(30L);
Job.Builder jobDetails2 = new Job.Builder("foo");
jobDetails2.setDataDescription(new DataDescription.Builder());
jobDetails2.setResultsRetentionDays(4L);
jobDetails2.setAnalysisConfig(createAnalysisConfig());
jobDetails2.setCreateTime(date);
assertNotEquals(jobDetails1.build(), jobDetails2.build());
}
public void testEquals_GivenDifferentCustomSettings() {
Job.Builder jobDetails1 = buildJobBuilder("foo");
Map<String, Object> customSettings1 = new HashMap<>();
customSettings1.put("key1", "value1");
jobDetails1.setCustomSettings(customSettings1);
Job.Builder jobDetails2 = buildJobBuilder("foo");
Map<String, Object> customSettings2 = new HashMap<>();
customSettings2.put("key2", "value2");
jobDetails2.setCustomSettings(customSettings2);
assertNotEquals(jobDetails1.build(), jobDetails2.build());
}
// JobConfigurationTests:
/**
* Test the {@link AnalysisConfig#analysisFields()} method which produces a
* list of analysis fields from the detectors
*/
public void testAnalysisConfigRequiredFields() {
Detector.Builder d1 = new Detector.Builder("max", "field");
d1.setByFieldName("by_field");
Detector.Builder d2 = new Detector.Builder("median", "field2");
d2.setOverFieldName("over_field");
AnalysisConfig.Builder ac = new AnalysisConfig.Builder(Arrays.asList(d1.build(), d2.build()));
ac.setSummaryCountFieldName("agg");
Set<String> analysisFields = ac.build().analysisFields();
assertTrue(analysisFields.size() == 5);
assertTrue(analysisFields.contains("agg"));
assertTrue(analysisFields.contains("field"));
assertTrue(analysisFields.contains("by_field"));
assertTrue(analysisFields.contains("field2"));
assertTrue(analysisFields.contains("over_field"));
assertFalse(analysisFields.contains("max"));
assertFalse(analysisFields.contains("median"));
assertFalse(analysisFields.contains(""));
Detector.Builder d3 = new Detector.Builder("count", null);
d3.setByFieldName("by2");
d3.setPartitionFieldName("partition");
ac = new AnalysisConfig.Builder(Arrays.asList(d1.build(), d2.build(), d3.build()));
analysisFields = ac.build().analysisFields();
assertTrue(analysisFields.size() == 6);
assertTrue(analysisFields.contains("partition"));
assertTrue(analysisFields.contains("field"));
assertTrue(analysisFields.contains("by_field"));
assertTrue(analysisFields.contains("by2"));
assertTrue(analysisFields.contains("field2"));
assertTrue(analysisFields.contains("over_field"));
assertFalse(analysisFields.contains("count"));
assertFalse(analysisFields.contains("max"));
assertFalse(analysisFields.contains("median"));
assertFalse(analysisFields.contains(""));
}
// JobConfigurationVerifierTests:
public void testCopyConstructor() {
for (int i = 0; i < NUMBER_OF_TEST_RUNS; i++) {
Job job = createTestInstance();
Job copy = new Job.Builder(job).build();
assertEquals(job, copy);
}
}
public void testCheckValidId_IdTooLong() {
Job.Builder builder = buildJobBuilder("foo");
builder.setId("averyveryveryaveryveryveryaveryveryveryaveryveryveryaveryveryveryaveryveryverylongid");
expectThrows(IllegalArgumentException.class, builder::build);
}
public void testCheckValidId_GivenAllValidChars() {
Job.Builder builder = buildJobBuilder("foo");
builder.setId("abcdefghijklmnopqrstuvwxyz-._0123456789");
builder.build();
}
public void testCheckValidId_ProhibitedChars() {
String invalidChars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ!@#$%^&*()+?\"'~±/\\[]{},<>=";
Job.Builder builder = buildJobBuilder("foo");
for (char c : invalidChars.toCharArray()) {
builder.setId(Character.toString(c));
String errorMessage = Messages.getMessage(Messages.INVALID_ID, Job.ID.getPreferredName(), Character.toString(c));
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build);
assertEquals(errorMessage, e.getMessage());
}
}
public void testCheckValidId_startsWithUnderscore() {
Job.Builder builder = buildJobBuilder("_foo");
String errorMessage = Messages.getMessage(Messages.INVALID_ID, Job.ID.getPreferredName(), "_foo");
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build);
assertEquals(errorMessage, e.getMessage());
}
public void testCheckValidId_endsWithUnderscore() {
Job.Builder builder = buildJobBuilder("foo_");
String errorMessage = Messages.getMessage(Messages.INVALID_ID, Job.ID.getPreferredName(), "foo_");
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build);
assertEquals(errorMessage, e.getMessage());
}
public void testCheckValidId_ControlChars() {
Job.Builder builder = buildJobBuilder("foo");
builder.setId("two\nlines");
expectThrows(IllegalArgumentException.class, builder::build);
}
public void jobConfigurationTest() {
Job.Builder builder = new Job.Builder();
expectThrows(IllegalArgumentException.class, builder::build);
builder.setId("bad id with spaces");
expectThrows(IllegalArgumentException.class, builder::build);
builder.setId("bad_id_with_UPPERCASE_chars");
expectThrows(IllegalArgumentException.class, builder::build);
builder.setId("a very very very very very very very very very very very very very very very very very very very very long id");
expectThrows(IllegalArgumentException.class, builder::build);
builder.setId(null);
expectThrows(IllegalArgumentException.class, builder::build);
Detector.Builder d = new Detector.Builder("max", "a");
d.setByFieldName("b");
AnalysisConfig.Builder ac = new AnalysisConfig.Builder(Collections.singletonList(d.build()));
builder.setAnalysisConfig(ac);
builder.build();
builder.setAnalysisLimits(new AnalysisLimits(-1L, null));
expectThrows(IllegalArgumentException.class, builder::build);
AnalysisLimits limits = new AnalysisLimits(1000L, 4L);
builder.setAnalysisLimits(limits);
builder.build();
DataDescription.Builder dc = new DataDescription.Builder();
dc.setTimeFormat("YYY_KKKKajsatp*");
builder.setDataDescription(dc);
expectThrows(IllegalArgumentException.class, builder::build);
dc = new DataDescription.Builder();
builder.setDataDescription(dc);
expectThrows(IllegalArgumentException.class, builder::build);
builder.build();
}
public void testVerify_GivenNegativeRenormalizationWindowDays() {
String errorMessage = Messages.getMessage(Messages.JOB_CONFIG_FIELD_VALUE_TOO_LOW, "renormalization_window_days", 0, -1);
Job.Builder builder = buildJobBuilder("foo");
builder.setRenormalizationWindowDays(-1L);
IllegalArgumentException e = ESTestCase.expectThrows(IllegalArgumentException.class, builder::build);
assertEquals(errorMessage, e.getMessage());
}
public void testVerify_GivenNegativeModelSnapshotRetentionDays() {
String errorMessage = Messages.getMessage(Messages.JOB_CONFIG_FIELD_VALUE_TOO_LOW, "model_snapshot_retention_days", 0, -1);
Job.Builder builder = buildJobBuilder("foo");
builder.setModelSnapshotRetentionDays(-1L);
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build);
assertEquals(errorMessage, e.getMessage());
}
public void testVerify_GivenNegativeDailyModelSnapshotRetentionAfterDays() {
String errorMessage = Messages.getMessage(
Messages.JOB_CONFIG_FIELD_VALUE_TOO_LOW,
"daily_model_snapshot_retention_after_days",
0,
-1
);
Job.Builder builder = buildJobBuilder("foo");
builder.setDailyModelSnapshotRetentionAfterDays(-1L);
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build);
assertEquals(errorMessage, e.getMessage());
}
public void testVerify_GivenInconsistentModelSnapshotRetentionSettings() {
long dailyModelSnapshotRetentionAfterDays = randomLongBetween(1, Long.MAX_VALUE);
long modelSnapshotRetentionDays = randomLongBetween(0, dailyModelSnapshotRetentionAfterDays - 1);
String errorMessage = Messages.getMessage(
Messages.JOB_CONFIG_MODEL_SNAPSHOT_RETENTION_SETTINGS_INCONSISTENT,
dailyModelSnapshotRetentionAfterDays,
modelSnapshotRetentionDays
);
Job.Builder builder = buildJobBuilder("foo");
builder.setDailyModelSnapshotRetentionAfterDays(dailyModelSnapshotRetentionAfterDays);
builder.setModelSnapshotRetentionDays(modelSnapshotRetentionDays);
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build);
assertEquals(errorMessage, e.getMessage());
}
public void testVerify_GivenLowBackgroundPersistInterval() {
String errorMessage = Messages.getMessage(Messages.JOB_CONFIG_FIELD_VALUE_TOO_LOW, "background_persist_interval", 3600, 3599);
Job.Builder builder = buildJobBuilder("foo");
builder.setBackgroundPersistInterval(TimeValue.timeValueSeconds(3599L));
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build);
assertEquals(errorMessage, e.getMessage());
}
public void testVerify_GivenNegativeResultsRetentionDays() {
String errorMessage = Messages.getMessage(Messages.JOB_CONFIG_FIELD_VALUE_TOO_LOW, "results_retention_days", 0, -1);
Job.Builder builder = buildJobBuilder("foo");
builder.setResultsRetentionDays(-1L);
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build);
assertEquals(errorMessage, e.getMessage());
}
public void testBuilder_setsDefaultIndexName() {
Job.Builder builder = buildJobBuilder("foo");
Job job = builder.build();
assertEquals(
AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT,
job.getInitialResultsIndexName()
);
}
public void testBuilder_setsIndexName() {
Job.Builder builder = buildJobBuilder("foo");
builder.setResultsIndexName("carol");
Job job = builder.build();
assertEquals(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-carol", job.getInitialResultsIndexName());
}
public void testBuilder_withInvalidIndexNameThrows() {
Job.Builder builder = buildJobBuilder("foo");
builder.setResultsIndexName("_bad^name");
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build);
assertEquals(Messages.getMessage(Messages.INVALID_ID, Job.RESULTS_INDEX_NAME.getPreferredName(), "_bad^name"), e.getMessage());
}
public void testBuilder_buildWithCreateTime() {
Job.Builder builder = buildJobBuilder("foo");
Date now = new Date();
Job job = builder.build(now);
assertEquals(now, job.getCreateTime());
assertEquals(MlConfigVersion.CURRENT, job.getJobVersion());
}
public void testJobWithoutVersion() throws IOException {
Job.Builder builder = buildJobBuilder("foo");
Job job = builder.build();
assertThat(job.getJobVersion(), is(nullValue()));
// Assert parsing a job without version works as expected
XContentType xContentType = randomFrom(XContentType.values());
BytesReference bytes = XContentHelper.toXContent(job, xContentType, false);
try (XContentParser parser = createParser(xContentType.xContent(), bytes)) {
Job parsed = doParseInstance(parser);
assertThat(parsed, equalTo(job));
}
}
public void testBuilder_buildRequiresDataDescription() {
Job.Builder builder = new Job.Builder("no-data-description");
builder.setAnalysisConfig(createAnalysisConfig());
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build);
assertThat(e.getMessage(), equalTo("A data_description must be set"));
}
public void testBuilder_givenTimeFieldInAnalysisConfig() {
DataDescription.Builder dataDescriptionBuilder = new DataDescription.Builder();
// field name used here matches what's in createAnalysisConfig()
dataDescriptionBuilder.setTimeField("client");
Job.Builder jobBuilder = new Job.Builder("time-field-in-analysis-config");
jobBuilder.setAnalysisConfig(createAnalysisConfig());
jobBuilder.setDataDescription(dataDescriptionBuilder);
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, jobBuilder::build);
assertThat(e.getMessage(), equalTo(Messages.getMessage(Messages.JOB_CONFIG_TIME_FIELD_NOT_ALLOWED_IN_ANALYSIS_CONFIG)));
}
public void testEmptyGroup() {
Job.Builder builder = buildJobBuilder("foo");
builder.setGroups(Arrays.asList("foo-group", ""));
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build);
assertThat(e.getMessage(), containsString("Invalid group id ''"));
}
public void testInvalidGroup() {
Job.Builder builder = buildJobBuilder("foo");
builder.setGroups(Arrays.asList("foo-group", "$$$"));
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build);
assertThat(e.getMessage(), containsString("Invalid group id '$$$'"));
}
public void testInvalidGroup_matchesJobId() {
Job.Builder builder = buildJobBuilder("foo");
builder.setGroups(Collections.singletonList("foo"));
ResourceAlreadyExistsException e = expectThrows(ResourceAlreadyExistsException.class, builder::build);
assertEquals(e.getMessage(), "job and group names must be unique but job [foo] and group [foo] have the same name");
}
public void testInvalidAnalysisConfig_duplicateDetectors() throws Exception {
Job.Builder builder = new Job.Builder("job_with_duplicate_detectors").setCreateTime(new Date())
.setDataDescription(new DataDescription.Builder())
.setAnalysisConfig(
new AnalysisConfig.Builder(
Arrays.asList(
new Detector.Builder("mean", "responsetime").build(),
new Detector.Builder("mean", "responsetime").build()
)
)
);
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::validateDetectorsAreUnique);
assertThat(e.getMessage(), containsString("Duplicate detectors are not allowed: [mean(responsetime)]"));
}
public void testEarliestValidTimestamp_GivenEmptyDataCounts() {
assertThat(createRandomizedJob().earliestValidTimestamp(new DataCounts("foo")), equalTo(0L));
}
public void testEarliestValidTimestamp_GivenDataCountsAndZeroLatency() {
Job.Builder builder = buildJobBuilder("foo");
DataCounts dataCounts = new DataCounts(builder.getId());
dataCounts.setLatestRecordTimeStamp(new Date(123456789L));
assertThat(builder.build().earliestValidTimestamp(dataCounts), equalTo(123456789L));
}
public void testEarliestValidTimestamp_GivenDataCountsAndLatency() {
Job.Builder builder = buildJobBuilder("foo");
AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(builder.build().getAnalysisConfig());
analysisConfig.setLatency(TimeValue.timeValueMillis(1000L));
builder.setAnalysisConfig(analysisConfig);
DataCounts dataCounts = new DataCounts(builder.getId());
dataCounts.setLatestRecordTimeStamp(new Date(123456789L));
assertThat(builder.build().earliestValidTimestamp(dataCounts), equalTo(123455789L));
}
public void testCopyingJobDoesNotCauseStackOverflow() {
Job job = createRandomizedJob();
for (int i = 0; i < 100000; i++) {
job = new Job.Builder(job).build();
}
}
public void testDocumentId() {
String jobFoo = "foo";
assertEquals("anomaly_detector-" + jobFoo, Job.documentId(jobFoo));
assertEquals(jobFoo, Job.extractJobIdFromDocumentId(Job.documentId(jobFoo)));
assertNull(Job.extractJobIdFromDocumentId("some_other_type-foo"));
}
public void testDeletingAndBlockReasonAreSynced() {
{
Job job = buildJobBuilder(randomValidJobId()).setDeleting(true).build();
assertThat(job.getBlocked().getReason(), equalTo(Blocked.Reason.DELETE));
}
{
Job job = buildJobBuilder(randomValidJobId()).setBlocked(new Blocked(Blocked.Reason.DELETE, null)).build();
assertThat(job.isDeleting(), is(true));
}
}
public void testParseJobWithDeletingButWithoutBlockReason() throws IOException {
String jobWithDeleting = """
{
"job_id": "deleting_job",
"create_time": 1234567890000,
"analysis_config": {
"bucket_span": "1h",
"detectors": [{"function": "count"}]
},
"data_description": {
"time_field": "time"
},
"deleting": true
}""";
try (XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, jobWithDeleting)) {
Job job = doParseInstance(parser);
assertThat(job.getBlocked().getReason(), equalTo(Blocked.Reason.DELETE));
}
}
public static Job.Builder buildJobBuilder(String id, Date date) {
Job.Builder builder = new Job.Builder(id);
builder.setCreateTime(date);
AnalysisConfig.Builder ac = createAnalysisConfig();
DataDescription.Builder dc = new DataDescription.Builder();
builder.setAnalysisConfig(ac);
builder.setDataDescription(dc);
return builder;
}
public static Job.Builder buildJobBuilder(String id) {
return buildJobBuilder(id, new Date());
}
public static String randomValidJobId() {
CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz".toCharArray());
return generator.ofCodePointsLength(random(), 10, 10);
}
public static AnalysisConfig.Builder createAnalysisConfig() {
Detector.Builder d1 = new Detector.Builder("info_content", "domain");
d1.setOverFieldName("client");
Detector.Builder d2 = new Detector.Builder("min", "field");
return new AnalysisConfig.Builder(Arrays.asList(d1.build(), d2.build()));
}
public static Job createRandomizedJob() {
return createRandomizedJob(null);
}
public static Job createRandomizedJob(DatafeedConfig.Builder datafeedBuilder) {
String jobId = randomValidJobId();
Job.Builder builder = new Job.Builder(jobId);
if (randomBoolean()) {
builder.setDescription(randomAlphaOfLength(10));
}
if (randomBoolean()) {
builder.setJobVersion(MlConfigVersion.CURRENT);
}
if (randomBoolean()) {
int groupsNum = randomIntBetween(0, 10);
List<String> groups = new ArrayList<>(groupsNum);
for (int i = 0; i < groupsNum; i++) {
groups.add(randomValidJobId());
}
builder.setGroups(groups);
}
builder.setCreateTime(new Date(randomNonNegativeLong()));
if (randomBoolean()) {
builder.setFinishedTime(new Date(randomNonNegativeLong()));
}
builder.setAnalysisConfig(AnalysisConfigTests.createRandomized());
builder.setAnalysisLimits(
AnalysisLimits.validateAndSetDefaults(
AnalysisLimitsTests.createRandomized(),
null,
AnalysisLimits.DEFAULT_MODEL_MEMORY_LIMIT_MB
)
);
DataDescription.Builder dataDescription = new DataDescription.Builder();
builder.setDataDescription(dataDescription);
if (randomBoolean()) {
builder.setModelPlotConfig(ModelPlotConfigTests.createRandomized());
}
if (randomBoolean()) {
builder.setRenormalizationWindowDays(randomNonNegativeLong());
}
if (randomBoolean()) {
builder.setBackgroundPersistInterval(TimeValue.timeValueHours(randomIntBetween(1, 24)));
}
if (randomBoolean()) {
builder.setModelSnapshotRetentionDays(randomNonNegativeLong());
}
if (randomBoolean()) {
if (builder.getModelSnapshotRetentionDays() != null) {
builder.setDailyModelSnapshotRetentionAfterDays(randomLongBetween(0, builder.getModelSnapshotRetentionDays()));
} else {
builder.setDailyModelSnapshotRetentionAfterDays(randomNonNegativeLong());
}
}
if (randomBoolean()) {
builder.setResultsRetentionDays(randomNonNegativeLong());
}
if (randomBoolean()) {
builder.setCustomSettings(Collections.singletonMap(randomAlphaOfLength(10), randomAlphaOfLength(10)));
}
if (randomBoolean()) {
builder.setModelSnapshotId(randomAlphaOfLength(10));
}
if (randomBoolean()) {
builder.setModelSnapshotMinVersion(MlConfigVersion.CURRENT);
}
if (randomBoolean()) {
builder.setResultsIndexName(randomValidJobId());
}
if (randomBoolean()) {
builder.setAllowLazyOpen(randomBoolean());
}
if (randomBoolean()) {
builder.setBlocked(BlockedTests.createRandom());
}
if (datafeedBuilder != null) {
builder.setDatafeed(datafeedBuilder);
}
return builder.build();
}
}
| JobTests |
java | apache__camel | test-infra/camel-test-infra-aws-v2/src/main/java/org/apache/camel/test/infra/aws2/services/AWSLocalContainerInfraService.java | {
"start": 1492,
"end": 5404
} | class ____ implements AWSInfraService, ContainerService<AWSContainer> {
private static final Logger LOG = LoggerFactory.getLogger(AWSLocalContainerInfraService.class);
private final AWSContainer container;
public AWSLocalContainerInfraService(Service... services) {
this(LocalPropertyResolver.getProperty(AWSContainer.class, AWSProperties.AWS_CONTAINER), services);
}
public AWSLocalContainerInfraService(AWSContainer container) {
this.container = container;
}
public AWSLocalContainerInfraService(String imageName, Service... services) {
container = initContainer(imageName);
String name = ContainerEnvironmentUtil.containerName(this.getClass());
if (name != null) {
container.withCreateContainerCmdModifier(cmd -> cmd.withName(name));
}
container.setupServices(services);
}
protected AWSContainer initContainer(String imageName, Service... services) {
boolean fixedPort = !this.getClass().getName().contains("TestService");
return new AWSContainer(imageName, fixedPort, services);
}
private String getAmazonHost() {
return container.getAmazonHost();
}
@Override
public AWSContainer getContainer() {
return container;
}
@Override
public String amazonAWSHost() {
return container.getAmazonHost();
}
@Override
public String region() {
return Region.US_EAST_1.toString();
}
@Override
public String protocol() {
return "http";
}
@Override
public String accessKey() {
return container.getCredentialsProvider().resolveCredentials().accessKeyId();
}
@Override
public String secretKey() {
return container.getCredentialsProvider().resolveCredentials().secretAccessKey();
}
@Override
public Properties getConnectionProperties() {
Properties properties = new Properties();
properties.put(AWSConfigs.ACCESS_KEY, accessKey());
properties.put(AWSConfigs.SECRET_KEY, secretKey());
properties.put(AWSConfigs.REGION, region());
properties.put(AWSConfigs.AMAZON_AWS_HOST, amazonAWSHost());
properties.put(AWSConfigs.PROTOCOL, protocol());
return properties;
}
public URI getServiceEndpoint() {
return container.getServiceEndpoint();
}
@Override
public void registerProperties() {
AwsCredentials credentials = container.getCredentialsProvider().resolveCredentials();
/**
* We need to set these. For some sets, when they instantiate the clients within Camel, they need to know what
* is the Amazon host being used (ie.: when creating them using the withEndpointConfiguration()). Because this
* happens within Camel, there's no way to pass that information easily. Therefore, the host is set as a
* property and read by whatever class/method creates the clients to pass to Camel.
*
* Do not unset.
*/
System.setProperty(AWSConfigs.AMAZON_AWS_HOST, getAmazonHost());
System.setProperty(AWSConfigs.SECRET_KEY, credentials.secretAccessKey());
System.setProperty(AWSConfigs.ACCESS_KEY, credentials.accessKeyId());
System.setProperty(AWSConfigs.AMAZON_AWS_HOST, getAmazonHost());
System.setProperty(AWSConfigs.REGION, Region.US_EAST_1.toString());
System.setProperty(AWSConfigs.PROTOCOL, "http");
}
@Override
public void initialize() {
LOG.debug("Trying to start the container");
container.withStartupAttempts(5);
container.start();
registerProperties();
LOG.info("AWS service running at address {}", getServiceEndpoint());
}
@Override
public void shutdown() {
LOG.info("Stopping the local AWS services");
container.stop();
}
}
| AWSLocalContainerInfraService |
java | grpc__grpc-java | core/src/main/java/io/grpc/internal/ManagedClientTransport.java | {
"start": 946,
"end": 1344
} | interface ____ return before calling other
* methods.
*
* <p>Typically the transport owns the streams it creates through {@link #newStream}, while some
* implementations may transfer the streams to somewhere else. Either way they must conform to the
* contract defined by {@link #shutdown}, {@link Listener#transportShutdown} and
* {@link Listener#transportTerminated}.
*/
@ThreadSafe
public | and |
java | junit-team__junit5 | junit-platform-launcher/src/main/java/org/junit/platform/launcher/Launcher.java | {
"start": 750,
"end": 1512
} | interface ____ responsible for determining
* the set of test engines to delegate to at runtime and for ensuring that
* each test engine has an
* {@linkplain org.junit.platform.engine.TestEngine#getId ID} that is unique
* among the registered test engines. For example, the default implementation
* returned by {@link org.junit.platform.launcher.core.LauncherFactory#create}
* dynamically discovers test engines via Java's {@link java.util.ServiceLoader
* ServiceLoader} mechanism.
*
* <p>Test discovery and execution require a {@link LauncherDiscoveryRequest}
* that is passed to all registered engines. Each engine decides which tests it
* can discover and execute according to the supplied request.
*
* <p>Prior to executing tests, clients of this | are |
java | mockito__mockito | mockito-core/src/testFixtures/java/org/mockitoutil/ClassLoaders.java | {
"start": 11071,
"end": 12380
} | class ____ extends ClassLoaders {
private final ArrayList<String> excludedPrefixes = new ArrayList<String>();
private final ArrayList<URL> codeSourceUrls = new ArrayList<URL>();
public ExcludingURLClassLoaderBuilder without(String... privatePrefixes) {
excludedPrefixes.addAll(asList(privatePrefixes));
return this;
}
public ExcludingURLClassLoaderBuilder withCodeSourceUrls(String... urls) {
codeSourceUrls.addAll(pathsToURLs(urls));
return this;
}
public ExcludingURLClassLoaderBuilder withCodeSourceUrlOf(Class<?>... classes) {
for (Class<?> clazz : classes) {
codeSourceUrls.add(obtainCurrentClassPathOf(clazz.getName()));
}
return this;
}
public ExcludingURLClassLoaderBuilder withCurrentCodeSourceUrls() {
codeSourceUrls.add(obtainCurrentClassPathOf(ClassLoaders.class.getName()));
return this;
}
public ClassLoader build() {
return new LocalExcludingURLClassLoader(
jdkClassLoader(),
codeSourceUrls.toArray(new URL[codeSourceUrls.size()]),
excludedPrefixes);
}
}
static | ExcludingURLClassLoaderBuilder |
java | quarkusio__quarkus | devtools/gradle/gradle-application-plugin/src/main/java/io/quarkus/gradle/tasks/ImagePush.java | {
"start": 238,
"end": 722
} | class ____ extends ImageTask {
@Inject
public ImagePush() {
super("Perform an image push", true);
}
@TaskAction
public void imagePush() {
Map<String, String> forcedProperties = new HashMap<String, String>();
forcedProperties.put(QUARKUS_CONTAINER_IMAGE_BUILD, "true");
forcedProperties.put(QUARKUS_CONTAINER_IMAGE_PUSH, "true");
getAdditionalForcedProperties().get().getProperties().putAll(forcedProperties);
}
}
| ImagePush |
java | google__dagger | javatests/dagger/functional/subcomponent/multibindings/SubcomponentBuilderMultibindingsTest.java | {
"start": 4797,
"end": 5378
} | interface ____ {
@Provides
@IntoSet
static String provideStringMulti() {
return "child";
}
@Provides
@ChildFoo
static Foo provideChildFoo(FloatingSub.Builder builder) {
return builder.build().getFoo();
}
}
private ChildDoesNotInstallFloating() {}
}
// This is similar to the first, except this time the components installs the subcomponent via
// factory methods. Here, we expect the child to get a new subcomponent and so should see its
// multibinding contribution.
public static final | ChildModule |
java | quarkusio__quarkus | extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/sql_load_script/DefaultSqlLoadScriptAbsentTestCase.java | {
"start": 363,
"end": 1230
} | class ____ {
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(MyEntity.class, SqlLoadScriptTestResource.class)
.addAsResource("application.properties"))
.setLogRecordPredicate(record -> record.getLevel().intValue() >= Level.WARNING.intValue())
// In particular, we don't want Hibernate ORM to log
// "Specified schema generation script file [import.sql] did not exist for reading"
// when "import.sql" is just the Quarkus default.
.assertLogRecords(records -> assertThat(records).extracting(LogRecord::getMessage).isEmpty());
@Test
public void testImportSqlLoadScriptTest() {
// No startup failure, so we're already good.
}
}
| DefaultSqlLoadScriptAbsentTestCase |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManagerHolder.java | {
"start": 352,
"end": 1267
} | class ____ {
private final JobManager instance;
/**
* Create an empty holder which also means that no job manager gets created.
*/
public JobManagerHolder() {
this.instance = null;
}
/**
* Create a holder that allows lazy creation of a job manager.
*
*/
public JobManagerHolder(JobManager jobManager) {
this.instance = jobManager;
}
public boolean isEmpty() {
return instance == null;
}
/**
* Get the instance of the held JobManager.
*
* @return job manager instance
* @throws ElasticsearchException if holder has been created with the empty constructor
*/
public JobManager getJobManager() {
if (instance == null) {
throw new ElasticsearchException("Tried to get job manager although Machine Learning is disabled");
}
return instance;
}
}
| JobManagerHolder |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/context/support/AnnotationConfigWebApplicationContext.java | {
"start": 5386,
"end": 8567
} | class ____ extends AbstractRefreshableWebApplicationContext
implements AnnotationConfigRegistry {
private @Nullable BeanNameGenerator beanNameGenerator;
private @Nullable ScopeMetadataResolver scopeMetadataResolver;
private final Set<BeanRegistrar> beanRegistrars = new LinkedHashSet<>();
private final Set<Class<?>> componentClasses = new LinkedHashSet<>();
private final Set<String> basePackages = new LinkedHashSet<>();
/**
* Set a custom {@link BeanNameGenerator} for use with {@link AnnotatedBeanDefinitionReader}
* and/or {@link ClassPathBeanDefinitionScanner}.
* <p>Default is {@link org.springframework.context.annotation.AnnotationBeanNameGenerator}.
* @see AnnotatedBeanDefinitionReader#setBeanNameGenerator
* @see ClassPathBeanDefinitionScanner#setBeanNameGenerator
*/
public void setBeanNameGenerator(@Nullable BeanNameGenerator beanNameGenerator) {
this.beanNameGenerator = beanNameGenerator;
}
/**
* Return the custom {@link BeanNameGenerator} for use with {@link AnnotatedBeanDefinitionReader}
* and/or {@link ClassPathBeanDefinitionScanner}, if any.
*/
protected @Nullable BeanNameGenerator getBeanNameGenerator() {
return this.beanNameGenerator;
}
/**
* Set a custom {@link ScopeMetadataResolver} for use with {@link AnnotatedBeanDefinitionReader}
* and/or {@link ClassPathBeanDefinitionScanner}.
* <p>Default is an {@link org.springframework.context.annotation.AnnotationScopeMetadataResolver}.
* @see AnnotatedBeanDefinitionReader#setScopeMetadataResolver
* @see ClassPathBeanDefinitionScanner#setScopeMetadataResolver
*/
public void setScopeMetadataResolver(@Nullable ScopeMetadataResolver scopeMetadataResolver) {
this.scopeMetadataResolver = scopeMetadataResolver;
}
/**
* Return the custom {@link ScopeMetadataResolver} for use with {@link AnnotatedBeanDefinitionReader}
* and/or {@link ClassPathBeanDefinitionScanner}, if any.
*/
protected @Nullable ScopeMetadataResolver getScopeMetadataResolver() {
return this.scopeMetadataResolver;
}
/**
* Invoke the given registrars for registering their beans with this
* application context.
* <p>Note that {@link #refresh()} must be called in order for the context
* to fully process the new classes.
* @param registrars one or more {@link BeanRegistrar} instances
* @since 7.0
*/
@Override
public void register(BeanRegistrar... registrars) {
Assert.notEmpty(registrars, "At least one BeanRegistrar must be specified");
Collections.addAll(this.beanRegistrars, registrars);
}
/**
* Register one or more component classes to be processed.
* <p>Note that {@link #refresh()} must be called in order for the context
* to fully process the new classes.
* @param componentClasses one or more component classes,
* for example, {@link org.springframework.context.annotation.Configuration @Configuration} classes
* @see #scan(String...)
* @see #loadBeanDefinitions(DefaultListableBeanFactory)
* @see #setConfigLocation(String)
* @see #refresh()
*/
@Override
public void register(Class<?>... componentClasses) {
Assert.notEmpty(componentClasses, "At least one component | AnnotationConfigWebApplicationContext |
java | google__auto | value/src/test/java/com/google/auto/value/processor/AutoValueCompilationTest.java | {
"start": 108180,
"end": 108502
} | class ____<E> {",
" public void addAll(Iterator<? extends E> elements) {}",
"",
" public ImmutableSet<E> build() {",
" return null;",
" }",
" }",
"",
" @AutoValue.Builder",
" public | ImmutableSetBuilder |
java | quarkusio__quarkus | extensions/resteasy-classic/resteasy/deployment/src/test/java/io/quarkus/resteasy/test/root/ApplicationTest.java | {
"start": 1018,
"end": 3421
} | class ____ {
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(
IResourceTest.class, ResourceInheritedInterfaceTest.class,
AResourceTest.class, ResourceInheritedClassTest.class,
ResourceTest1.class, ResourceTest2.class,
ResponseFilter1.class, ResponseFilter2.class,
ResponseFilter3.class, ResponseFilter4.class, ResponseFilter5.class, ResponseFilter6.class,
Feature1.class, Feature2.class, DynamicFeature1.class, DynamicFeature2.class,
ExceptionMapper1.class, ExceptionMapper2.class, AppTest.class, AppTest2.class));
@DisplayName("Should access to ok of resource 1 and provide a response with the expected headers")
@Test
void should_call_ok_of_resource_1() {
when()
.get("/rt-1/ok")
.then()
.header("X-RF-1", notNullValue())
.header("X-RF-2", nullValue())
.header("X-RF-3", notNullValue())
.header("X-RF-4", nullValue())
.header("X-RF-5", notNullValue())
.header("X-RF-6", nullValue())
.body(Matchers.is("ok1"));
}
@DisplayName("Should access to ko of resource 1 and call the expected exception mapper")
@Test
void should_call_ko_of_resource_1() {
when()
.get("/rt-1/ko")
.then()
.statusCode(Response.Status.SERVICE_UNAVAILABLE.getStatusCode());
}
@DisplayName("Should access to ok of resource 1 and provide a response with the expected headers")
@Test
void should_not_call_ok_of_resource_2() {
when()
.get("/rt-2/ok")
.then()
.statusCode(Response.Status.SERVICE_UNAVAILABLE.getStatusCode());
}
@DisplayName("Should access to path inherited from an interface")
@Test
void should_call_inherited_from_interface() {
when()
.get("/rt-i/ok")
.then()
.statusCode(Response.Status.OK.getStatusCode())
.body(Matchers.is("ok-i"));
}
@DisplayName("Should access to path inherited from a | ApplicationTest |
java | elastic__elasticsearch | modules/lang-painless/src/test/java/org/elasticsearch/painless/UnaryTests.java | {
"start": 569,
"end": 4451
} | class ____ extends ScriptTestCase {
/** basic tests */
public void testBasics() {
assertEquals(false, exec("return !true;"));
assertEquals(true, exec("boolean x = false; return !x;"));
assertEquals(-2, exec("return ~1;"));
assertEquals(-2, exec("byte x = 1; return ~x;"));
assertEquals(1, exec("return +1;"));
assertEquals(1.0, exec("double x = 1; return +x;"));
assertEquals(-1, exec("return -1;"));
assertEquals(-2, exec("short x = 2; return -x;"));
assertEquals(-1.0, exec("def x = (def)-1.0; return +x"));
expectScriptThrows(IllegalArgumentException.class, () -> exec("double x = (Double)-1.0; return +x"));
expectScriptThrows(IllegalArgumentException.class, () -> exec("double x = (ArrayList)-1.0; return +x"));
}
public void testNegationInt() throws Exception {
assertEquals(-1, exec("return -1;"));
assertEquals(1, exec("return -(-1);"));
assertEquals(0, exec("return -0;"));
}
public void testPlus() {
assertEquals(-1, exec("byte x = (byte)-1; return +x"));
assertEquals(-1, exec("short x = (short)-1; return +x"));
assertEquals(65535, exec("char x = (char)-1; return +x"));
assertEquals(-1, exec("int x = -1; return +x"));
assertEquals(-1L, exec("long x = -1L; return +x"));
assertEquals(-1.0F, exec("float x = -1F; return +x"));
assertEquals(-1.0, exec("double x = -1.0; return +x"));
}
public void testDefNot() {
assertEquals(~1, exec("def x = (byte)1; return ~x"));
assertEquals(~1, exec("def x = (short)1; return ~x"));
assertEquals(~1, exec("def x = (char)1; return ~x"));
assertEquals(~1, exec("def x = 1; return ~x"));
assertEquals(~1L, exec("def x = 1L; return ~x"));
}
public void testDefNotTypedRet() {
assertEquals((double) ~1, exec("def x = (byte)1; double y = ~x; return y;"));
assertEquals((float) ~1, exec("def x = (short)1; float y = ~x; return y;"));
assertEquals((long) ~1, exec("def x = (char)1; long y = ~x; return y;"));
assertEquals(~1, exec("def x = 1; int y = ~x; return y;"));
}
public void testDefNeg() {
assertEquals(-1, exec("def x = (byte)1; return -x"));
assertEquals(-1, exec("def x = (short)1; return -x"));
assertEquals(-1, exec("def x = (char)1; return -x"));
assertEquals(-1, exec("def x = 1; return -x"));
assertEquals(-1L, exec("def x = 1L; return -x"));
assertEquals(-1.0F, exec("def x = 1F; return -x"));
assertEquals(-1.0, exec("def x = 1.0; return -x"));
}
public void testDefNegTypedRet() {
assertEquals((double) -1, exec("def x = (byte)1; double y = -x; return y;"));
assertEquals((float) -1, exec("def x = (short)1; float y = -x; return y;"));
assertEquals((long) -1, exec("def x = (char)1; long y = -x; return y;"));
assertEquals(-1, exec("def x = 1; int y = -x; return y;"));
}
public void testDefPlus() {
assertEquals(-1, exec("def x = (byte)-1; return +x"));
assertEquals(-1, exec("def x = (short)-1; return +x"));
assertEquals(65535, exec("def x = (char)-1; return +x"));
assertEquals(-1, exec("def x = -1; return +x"));
assertEquals(-1L, exec("def x = -1L; return +x"));
assertEquals(-1.0F, exec("def x = -1F; return +x"));
assertEquals(-1.0D, exec("def x = -1.0; return +x"));
}
public void testDefPlusTypedRet() {
assertEquals((double) -1, exec("def x = (byte)-1; double y = +x; return y;"));
assertEquals((float) -1, exec("def x = (short)-1; float y = +x; return y;"));
assertEquals((long) 65535, exec("def x = (char)-1; long y = +x; return y;"));
assertEquals(-1, exec("def x = -1; int y = +x; return y;"));
}
}
| UnaryTests |
java | elastic__elasticsearch | x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/CliCommandsTests.java | {
"start": 574,
"end": 1500
} | class ____ extends SqlCliTestCase {
public void testCliCommands() {
TestTerminal testTerminal = new TestTerminal();
HttpClient httpClient = mock(HttpClient.class);
CliSession cliSession = new CliSession(httpClient);
CliCommands cliCommands = new CliCommands(
(terminal, session, line) -> line.equals("foo"),
(terminal, session, line) -> line.equals("bar"),
(terminal, session, line) -> line.equals("baz")
);
assertTrue(cliCommands.handle(testTerminal, cliSession, "foo"));
assertTrue(cliCommands.handle(testTerminal, cliSession, "bar"));
assertTrue(cliCommands.handle(testTerminal, cliSession, "baz"));
assertFalse(cliCommands.handle(testTerminal, cliSession, ""));
assertFalse(cliCommands.handle(testTerminal, cliSession, "something"));
verifyNoMoreInteractions(httpClient);
}
}
| CliCommandsTests |
java | processing__processing4 | java/src/processing/mode/java/RuntimePathBuilder.java | {
"start": 23198,
"end": 24912
} | class ____ implements RuntimePathFactoryStrategy {
final private AtomicReference<List<String>> cachedResult;
final private RuntimePathFactoryStrategy innerStrategy;
/**
* Create a new cache around {RuntimePathFactoryStrategy}.
*
* @param newInnerStrategy The strategy to cache.
*/
public CachedRuntimePathFactory(RuntimePathFactoryStrategy newInnerStrategy) {
cachedResult = new AtomicReference<>(null);
innerStrategy = newInnerStrategy;
}
/**
* Invalidate the cached path so that, when requested next time, it will be rebuilt from
* scratch.
*/
public void invalidateCache() {
cachedResult.set(null);
}
/**
* Return the cached classpath or, if not cached, build a classpath using the inner strategy.
*
* <p>
* Return the cached classpath or, if not cached, build a classpath using the inner strategy.
* Note that this getter will not check to see if mode, imports, or sketch have changed. If a
* cached value is available, it will be returned without examining the identity of the
* parameters.
* </p>
*
* @param mode The {JavaMode} for which the classpath should be built.
* @param imports The sketch (user) imports.
* @param sketch The sketch for which a classpath is to be returned.
* @return Newly generated classpath.
*/
@Override
public List<String> buildClasspath(JavaMode mode, List<ImportStatement> imports,
Sketch sketch) {
return cachedResult.updateAndGet((cachedValue) ->
cachedValue == null ? innerStrategy.buildClasspath(mode, imports, sketch) : cachedValue
);
}
}
}
| CachedRuntimePathFactory |
java | elastic__elasticsearch | modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/Netty4TransportMultiPortIntegrationIT.java | {
"start": 1683,
"end": 4343
} | class ____ extends ESNetty4IntegTestCase {
private static final int NUMBER_OF_CLIENT_PORTS = Constants.WINDOWS ? 300 : 10;
private static int randomPort = -1;
private static String randomPortRange;
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
if (randomPort == -1) {
randomPort = randomIntBetween(49152, 65535 - NUMBER_OF_CLIENT_PORTS);
randomPortRange = Strings.format("%s-%s", randomPort, randomPort + NUMBER_OF_CLIENT_PORTS);
}
Settings.Builder builder = Settings.builder()
.put(super.nodeSettings(nodeOrdinal, otherSettings))
.put("network.host", "127.0.0.1")
.put("transport.profiles.client1.port", randomPortRange)
.put("transport.profiles.client1.publish_host", "127.0.0.7")
.put("transport.profiles.client1.publish_port", "4321")
.put("transport.profiles.client1.tcp.reuse_address", true);
return builder.build();
}
@Network
public void testThatInfosAreExposed() throws Exception {
NodesInfoResponse response = clusterAdmin().prepareNodesInfo().clear().setTransport(true).get();
for (NodeInfo nodeInfo : response.getNodes()) {
assertThat(nodeInfo.getInfo(TransportInfo.class).getProfileAddresses().keySet(), hasSize(1));
assertThat(nodeInfo.getInfo(TransportInfo.class).getProfileAddresses(), hasKey("client1"));
BoundTransportAddress boundTransportAddress = nodeInfo.getInfo(TransportInfo.class).getProfileAddresses().get("client1");
for (TransportAddress transportAddress : boundTransportAddress.boundAddresses()) {
assertThat(transportAddress, instanceOf(TransportAddress.class));
}
// bound addresses
for (TransportAddress transportAddress : boundTransportAddress.boundAddresses()) {
assertThat(transportAddress, instanceOf(TransportAddress.class));
assertThat(
transportAddress.address().getPort(),
is(allOf(greaterThanOrEqualTo(randomPort), lessThanOrEqualTo(randomPort + 10)))
);
}
// publish address
assertThat(boundTransportAddress.publishAddress(), instanceOf(TransportAddress.class));
TransportAddress publishAddress = boundTransportAddress.publishAddress();
assertThat(NetworkAddress.format(publishAddress.address().getAddress()), is("127.0.0.7"));
assertThat(publishAddress.address().getPort(), is(4321));
}
}
}
| Netty4TransportMultiPortIntegrationIT |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/MapResetTest.java | {
"start": 494,
"end": 1178
} | class ____ {
private int id;
private int pageCountNum;
private MetaData metadata;
public int getPageCountNum() {
return pageCountNum;
}
public void setPageCountNum(int pageCountNum) {
this.pageCountNum = pageCountNum;
}
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public MetaData getMetadata() {
return metadata;
}
public void setMetadata(MetaData metadata) {
this.metadata = metadata;
}
}
public static | Book |
java | redisson__redisson | redisson/src/main/java/org/redisson/RedissonScript.java | {
"start": 1177,
"end": 14967
} | class ____ implements RScript {
private final Codec codec;
private final CommandAsyncExecutor commandExecutor;
public RedissonScript(CommandAsyncExecutor commandExecutor) {
this.commandExecutor = commandExecutor;
this.codec = commandExecutor.getServiceManager().getCfg().getCodec();
}
public RedissonScript(CommandAsyncExecutor commandExecutor, Codec codec) {
this.commandExecutor = commandExecutor;
this.codec = commandExecutor.getServiceManager().getCodec(codec);
}
@Override
public String scriptLoad(String luaScript) {
return commandExecutor.get(scriptLoadAsync(luaScript));
}
public String scriptLoad(String key, String luaScript) {
return commandExecutor.get(scriptLoadAsync(key, luaScript));
}
@Override
public RFuture<String> scriptLoadAsync(String luaScript) {
List<CompletableFuture<String>> futures = commandExecutor.executeAllAsync(RedisCommands.SCRIPT_LOAD, luaScript);
CompletableFuture<Void> f = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
CompletableFuture<String> s = f.thenApply(r -> futures.get(0).getNow(null));
return new CompletableFutureWrapper<>(s);
}
@Override
public RFuture<String> scriptLoadAsync(String key, String luaScript) {
return commandExecutor.writeAsync(key, StringCodec.INSTANCE, RedisCommands.SCRIPT_LOAD, luaScript);
}
@Override
public <R> R eval(Mode mode, String luaScript, ReturnType returnType) {
return eval(mode, luaScript, returnType, Collections.emptyList());
}
@Override
public <R> R eval(Mode mode, String luaScript, ReturnType returnType, List<Object> keys, Object... values) {
String key = getKey(keys);
return eval(key, mode, luaScript, returnType, keys, values);
}
private static String getKey(List<Object> keys) {
String key = null;
if (!keys.isEmpty()) {
if (keys.get(0) instanceof byte[]) {
key = new String((byte[]) keys.get(0));
} else {
key = keys.get(0).toString();
}
}
return key;
}
@Override
public <R> RFuture<R> evalAsync(Mode mode, String luaScript, ReturnType returnType, List<Object> keys, Object... values) {
String key = getKey(keys);
return evalAsync(key, mode, luaScript, returnType, keys, values);
}
@Override
public <R> R evalSha(Mode mode, String shaDigest, ReturnType returnType) {
return evalSha(null, mode, shaDigest, returnType, Collections.emptyList());
}
@Override
public <R> R evalSha(Mode mode, String shaDigest, ReturnType returnType, List<Object> keys, Object... values) {
String key = getKey(keys);
return evalSha(key, mode, shaDigest, returnType, keys, values);
}
@Override
public <R> RFuture<R> evalShaAsync(Mode mode, String shaDigest, ReturnType returnType, List<Object> keys, Object... values) {
String key = getKey(keys);
return evalShaAsync(key, mode, codec, shaDigest, returnType, keys, values);
}
public <R> RFuture<R> evalShaAsync(String key, Mode mode, Codec codec, String shaDigest, ReturnType returnType, List<Object> keys, Object... values) {
RedissonScript script = new RedissonScript(commandExecutor, codec);
return script.evalShaAsync(key, mode, shaDigest, returnType, keys, values);
}
@Override
public void scriptKill() {
commandExecutor.get(scriptKillAsync());
}
public void scriptKill(String key) {
commandExecutor.get(scriptKillAsync(key));
}
@Override
public RFuture<Void> scriptKillAsync() {
return commandExecutor.writeAllVoidAsync(RedisCommands.SCRIPT_KILL);
}
public RFuture<Void> scriptKillAsync(String key) {
return commandExecutor.writeAsync(key, RedisCommands.SCRIPT_KILL);
}
@Override
public List<Boolean> scriptExists(String... shaDigests) {
return commandExecutor.get(scriptExistsAsync(shaDigests));
}
@Override
public RFuture<List<Boolean>> scriptExistsAsync(String... shaDigests) {
List<CompletableFuture<List<Boolean>>> futures = commandExecutor.executeAllAsync(RedisCommands.SCRIPT_EXISTS, (Object[]) shaDigests);
CompletableFuture<Void> f = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
CompletableFuture<List<Boolean>> s = f.thenApply(r -> {
List<Boolean> result = futures.get(0).getNow(new ArrayList<>());
for (CompletableFuture<List<Boolean>> future : futures.subList(1, futures.size())) {
List<Boolean> l = future.getNow(new ArrayList<>());
for (int i = 0; i < l.size(); i++) {
result.set(i, result.get(i) | l.get(i));
}
}
return result;
});
return new CompletableFutureWrapper<>(s);
}
public List<Boolean> scriptExists(String key, String... shaDigests) {
return commandExecutor.get(scriptExistsAsync(key, shaDigests));
}
public RFuture<List<Boolean>> scriptExistsAsync(String key, String... shaDigests) {
return commandExecutor.writeAsync(key, RedisCommands.SCRIPT_EXISTS, shaDigests);
}
@Override
public void scriptFlush() {
commandExecutor.get(scriptFlushAsync());
}
public void scriptFlush(String key) {
commandExecutor.get(scriptFlushAsync(key));
}
@Override
public RFuture<Void> scriptFlushAsync() {
return commandExecutor.writeAllVoidAsync(RedisCommands.SCRIPT_FLUSH);
}
public RFuture<Void> scriptFlushAsync(String key) {
return commandExecutor.writeAsync(key, RedisCommands.SCRIPT_FLUSH);
}
@Override
public <R> RFuture<R> evalShaAsync(Mode mode, String shaDigest, ReturnType returnType) {
return evalShaAsync(null, mode, codec, shaDigest, returnType, Collections.emptyList());
}
@Override
public <R> RFuture<R> evalAsync(Mode mode, String luaScript, ReturnType returnType) {
return evalAsync(null, mode, luaScript, returnType, Collections.emptyList());
}
private List<Object> encode(Collection<?> values, Codec codec) {
List<Object> result = new ArrayList<Object>(values.size());
for (Object object : values) {
result.add(commandExecutor.encode(codec, object));
}
return result;
}
@Override
public <R> RFuture<R> evalShaAsync(String key, Mode mode, String shaDigest, ReturnType returnType,
List<Object> keys, Object... values) {
RedisCommand command = new RedisCommand(returnType.getCommand(), "EVALSHA");
String mappedKey = commandExecutor.getServiceManager().getNameMapper().map(key);
List<Object> mappedKeys = keys.stream()
.map(k -> {
if (k instanceof String) {
return commandExecutor.getServiceManager().getNameMapper().map(k.toString());
}
return k;
})
.collect(Collectors.toList());
if (mode == Mode.READ_ONLY && commandExecutor.isEvalShaROSupported()) {
RedisCommand cmd = new RedisCommand(returnType.getCommand(), "EVALSHA_RO");
RFuture<R> f = commandExecutor.evalReadAsync(mappedKey, codec, cmd, shaDigest, mappedKeys, encode(Arrays.asList(values), codec).toArray());
CompletableFuture<R> result = new CompletableFuture<>();
f.whenComplete((r, e) -> {
if (e != null && e.getMessage().startsWith("ERR unknown command")) {
commandExecutor.setEvalShaROSupported(false);
RFuture<R> s = evalShaAsync(mappedKey, mode, shaDigest, returnType, mappedKeys, values);
commandExecutor.transfer(s.toCompletableFuture(), result);
return;
}
commandExecutor.transfer(f.toCompletableFuture(), result);
});
return new CompletableFutureWrapper<>(result);
}
return commandExecutor.evalWriteAsync(mappedKey, codec, command, shaDigest, mappedKeys, encode(Arrays.asList(values), codec).toArray());
}
@Override
public <R> RFuture<R> evalAsync(String key, Mode mode, String luaScript, ReturnType returnType, List<Object> keys,
Object... values) {
String mappedKey = commandExecutor.getServiceManager().getNameMapper().map(key);
List<Object> mappedKeys = keys.stream()
.map(k -> {
if (k instanceof String) {
return commandExecutor.getServiceManager().getNameMapper().map(k.toString());
}
return k;
})
.collect(Collectors.toList());
if (mode == Mode.READ_ONLY) {
return commandExecutor.evalReadAsync(mappedKey, codec, returnType.getCommand(), luaScript, mappedKeys, encode(Arrays.asList(values), codec).toArray());
}
return commandExecutor.evalWriteAsync(mappedKey, codec, returnType.getCommand(), luaScript, mappedKeys, encode(Arrays.asList(values), codec).toArray());
}
@Override
public <R> R evalSha(String key, Mode mode, String shaDigest, ReturnType returnType, List<Object> keys,
Object... values) {
return commandExecutor.get(evalShaAsync(key, mode, shaDigest, returnType, keys, values));
}
@Override
public <R> R eval(String key, Mode mode, String luaScript, ReturnType returnType, List<Object> keys,
Object... values) {
return commandExecutor.get(evalAsync(key, mode, luaScript, returnType, keys, values));
}
@Override
public <R> R eval(Mode mode, String luaScript, ReturnType returnType, Function<Collection<R>, R> resultMapper, Object... values) {
return commandExecutor.get(evalAsync(mode, luaScript, returnType, resultMapper, values));
}
@Override
public <R> RFuture<R> evalAsync(Mode mode, String luaScript, ReturnType returnType, Function<Collection<R>, R> resultMapper, Object... values) {
List<Object> args = new ArrayList<>();
args.add(luaScript);
args.add(0);
for (Object object : values) {
args.add(commandExecutor.encode(codec, object));
}
List<CompletableFuture<R>> futures;
if (mode == Mode.READ_ONLY) {
futures = commandExecutor.readAllAsync(codec, returnType.getCommand(), args.toArray());
} else {
futures = commandExecutor.writeAllAsync(codec, returnType.getCommand(), args.toArray());
}
CompletableFuture<Void> r = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
CompletableFuture<R> res = r.thenApply(v -> {
List<R> l = futures.stream().map(f -> f.join()).collect(Collectors.toList());
return resultMapper.apply(l);
});
return new CompletableFutureWrapper<>(res);
}
@Override
public <R> R evalSha(Mode mode, String shaDigest, ReturnType returnType, Function<Collection<R>, R> resultMapper, Object... values) {
return commandExecutor.get(evalShaAsync(mode, shaDigest, returnType, resultMapper, values));
}
@Override
public <R> RFuture<R> evalShaAsync(Mode mode, String shaDigest, ReturnType returnType, Function<Collection<R>, R> resultMapper, Object... values) {
List<Object> args = new ArrayList<>();
args.add(shaDigest);
args.add(0);
for (Object object : values) {
args.add(commandExecutor.encode(codec, object));
}
if (mode == Mode.READ_ONLY && commandExecutor.isEvalShaROSupported()) {
RedisCommand cmd = new RedisCommand(returnType.getCommand(), "EVALSHA_RO");
List<CompletableFuture<R>> futures = commandExecutor.readAllAsync(codec, cmd, args.toArray());
CompletableFuture<Void> r = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
CompletableFuture<R> rr = r.handle((res, e) -> {
if (e != null) {
if (e.getMessage().startsWith("ERR unknown command")) {
commandExecutor.setEvalShaROSupported(false);
return evalShaAsync(mode, shaDigest, returnType, resultMapper, values);
}
CompletableFuture<R> ex = new CompletableFuture<>();
ex.completeExceptionally(e);
return ex;
}
List<R> l = futures.stream().map(f -> f.join()).collect(Collectors.toList());
R result = resultMapper.apply(l);
return CompletableFuture.completedFuture(result);
}).thenCompose(ff -> ff);
return new CompletableFutureWrapper<>(rr);
}
List<CompletableFuture<R>> futures = commandExecutor.readAllAsync(codec, returnType.getCommand(), args.toArray());
CompletableFuture<Void> r = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
CompletableFuture<R> res = r.thenApply(v -> {
List<R> l = futures.stream().map(f -> f.join()).collect(Collectors.toList());
return resultMapper.apply(l);
});
return new CompletableFutureWrapper<>(res);
}
}
| RedissonScript |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/dialect/postgresql/ast/stmt/PGDropDatabaseStatement.java | {
"start": 902,
"end": 1765
} | class ____ extends SQLDropCatalogStatement implements PGSQLStatement {
private boolean usingWith;
private boolean force;
public PGDropDatabaseStatement(DbType dbType) {
super(dbType);
}
public boolean isUsingWith() {
return usingWith;
}
public void setUsingWith(boolean usingWith) {
this.usingWith = usingWith;
}
public boolean isForce() { return force; }
public void setForce(boolean force) { this.force = force; }
protected void accept0(SQLASTVisitor visitor) {
if (visitor instanceof PGASTVisitor) {
accept0((PGASTVisitor) visitor);
}
}
@Override
public void accept0(PGASTVisitor visitor) {
if (visitor.visit(this)) {
acceptChild(visitor, this.getName());
}
visitor.endVisit(this);
}
}
| PGDropDatabaseStatement |
java | quarkusio__quarkus | extensions/smallrye-graphql-client/deployment/src/test/java/io/quarkus/smallrye/graphql/client/deployment/TypesafeGraphQLClientShortcutTest.java | {
"start": 803,
"end": 858
} | class ____, the
* package will be deduced.
*/
public | name |
java | apache__flink | flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/runtime/operators/join/interval/TimeIntervalStreamJoinTestBase.java | {
"start": 1881,
"end": 2829
} | class ____ extends org.apache.flink.api.common.functions.AbstractRichFunction "
+ "implements org.apache.flink.table.runtime.generated.JoinCondition {\n"
+ "\n"
+ " public TestIntervalJoinCondition(Object[] reference) {\n"
+ " }\n"
+ "\n"
+ " @Override\n"
+ " public boolean apply(org.apache.flink.table.data.RowData in1, org.apache.flink.table.data.RowData in2) {\n"
+ " return true;\n"
+ " }\n"
+ "}\n";
protected IntervalJoinFunction joinFunction =
new IntervalJoinFunction(
new GeneratedJoinCondition(
"TestIntervalJoinCondition", funcCode, new Object[0]),
outputRowType,
new boolean[] {true});
}
| TestIntervalJoinCondition |
java | apache__logging-log4j2 | log4j-core-test/src/test/java/org/apache/logging/log4j/core/config/builder/ConfigurationAssemblerTest.java | {
"start": 2505,
"end": 6162
} | class ____ {
@Test
void testBuildConfiguration() {
try {
System.setProperty(
Constants.LOG4J_CONTEXT_SELECTOR, "org.apache.logging.log4j.core.async.AsyncLoggerContextSelector");
final ConfigurationBuilder<BuiltConfiguration> builder =
ConfigurationBuilderFactory.newConfigurationBuilder();
CustomConfigurationFactory.addTestFixtures("config name", builder);
final Configuration configuration = builder.build();
try (final LoggerContext ctx = Configurator.initialize(configuration)) {
validate(configuration);
}
} finally {
System.getProperties().remove(Constants.LOG4J_CONTEXT_SELECTOR);
}
}
@Test
void testCustomConfigurationFactory() {
try {
System.setProperty(
ConfigurationFactory.CONFIGURATION_FACTORY_PROPERTY,
"org.apache.logging.log4j.core.config.builder.CustomConfigurationFactory");
System.setProperty(
Constants.LOG4J_CONTEXT_SELECTOR, "org.apache.logging.log4j.core.async.AsyncLoggerContextSelector");
final Configuration config = ((LoggerContext) LogManager.getContext(false)).getConfiguration();
validate(config);
} finally {
System.getProperties().remove(Constants.LOG4J_CONTEXT_SELECTOR);
System.getProperties().remove(ConfigurationFactory.CONFIGURATION_FACTORY_PROPERTY);
}
}
private void validate(final Configuration config) {
assertNotNull(config);
assertNotNull(config.getName());
assertFalse(config.getName().isEmpty());
assertNotNull(config, "No configuration created");
assertEquals(LifeCycle.State.STARTED, config.getState(), "Incorrect State: " + config.getState());
final Map<String, Appender> appenders = config.getAppenders();
assertNotNull(appenders);
assertEquals(2, appenders.size(), "Incorrect number of Appenders: " + appenders.size());
final KafkaAppender kafkaAppender = (KafkaAppender) appenders.get("Kafka");
final GelfLayout gelfLayout = (GelfLayout) kafkaAppender.getLayout();
final ConsoleAppender consoleAppender = (ConsoleAppender) appenders.get("Stdout");
final PatternLayout patternLayout = (PatternLayout) consoleAppender.getLayout();
final Map<String, LoggerConfig> loggers = config.getLoggers();
assertNotNull(loggers);
assertEquals(2, loggers.size(), "Incorrect number of LoggerConfigs: " + loggers.size());
final LoggerConfig rootLoggerConfig = loggers.get("");
assertEquals(Level.ERROR, rootLoggerConfig.getLevel());
assertFalse(rootLoggerConfig.isIncludeLocation());
final LoggerConfig loggerConfig = loggers.get("org.apache.logging.log4j");
assertEquals(Level.DEBUG, loggerConfig.getLevel());
assertTrue(loggerConfig.isIncludeLocation());
final Filter filter = config.getFilter();
assertNotNull(filter, "No Filter");
assertThat(filter, instanceOf(ThresholdFilter.class));
final List<CustomLevelConfig> customLevels = config.getCustomLevels();
assertNotNull(filter, "No CustomLevels");
assertEquals(1, customLevels.size());
final CustomLevelConfig customLevel = customLevels.get(0);
assertEquals("Panic", customLevel.getLevelName());
assertEquals(17, customLevel.getIntLevel());
final Logger logger = LogManager.getLogger(getClass());
logger.info("Welcome to Log4j!");
}
}
| ConfigurationAssemblerTest |
java | google__auto | value/src/main/java/com/google/auto/value/extension/toprettystring/ToPrettyString.java | {
"start": 2019,
"end": 2411
} | class ____ {
* abstract List<String> property();
*
* @ToPrettyString
* abstract String toPrettyString();
* }
*
* System.out.println(new AutoValue_Pretty(List.of("abc", "def", "has\nnewline")).toPrettyString())
* // Pretty{
* // property = [
* // abc,
* // def,
* // has
* // newline,
* // ]
* // }
* }</pre>
*/
@Documented
@Target(METHOD)
public @ | Pretty |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/impl/engine/CamelPostProcessorHelperTest.java | {
"start": 2535,
"end": 26178
} | class ____ extends ContextTestSupport {
private final MySynchronization mySynchronization = new MySynchronization();
private final Properties myProp = new Properties();
@Override
protected Registry createCamelRegistry() {
Registry jndi = new DefaultRegistry();
jndi.bind("myProp", myProp);
jndi.bind("foo", new FooBar());
return jndi;
}
@Override
protected CamelContext createCamelContext() throws Exception {
CamelContext context = super.createCamelContext();
context.getPropertiesComponent().setLocation("ref:myProp");
return context;
}
@Test
public void testConstructor() {
CamelPostProcessorHelper helper = new CamelPostProcessorHelper();
assertNull(helper.getCamelContext());
helper.setCamelContext(context);
assertNotNull(helper.getCamelContext());
}
@Test
public void testConstructorCamelContext() {
CamelPostProcessorHelper helper = new CamelPostProcessorHelper(context);
assertNotNull(helper.getCamelContext());
}
@Test
public void testConsume() throws Exception {
CamelPostProcessorHelper helper = new CamelPostProcessorHelper(context);
MyConsumeBean my = new MyConsumeBean();
Method method = my.getClass().getMethod("consumeSomething", String.class);
helper.consumerInjection(method, my, "foo");
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("Hello World");
template.sendBody("seda:foo", "Hello World");
assertMockEndpointsSatisfied();
}
@Test
public void testConsumePrivate() throws Exception {
CamelPostProcessorHelper helper = new CamelPostProcessorHelper(context);
MyPrivateConsumeBean my = new MyPrivateConsumeBean();
Method method = my.getClass().getDeclaredMethod("consumeSomethingPrivate", String.class);
RuntimeCamelException e = assertThrows(RuntimeCamelException.class,
() -> helper.consumerInjection(method, my, "foo"),
"Should have thrown exception");
IllegalArgumentException iae = assertIsInstanceOf(IllegalArgumentException.class, e.getCause());
assertTrue(iae.getMessage().startsWith("The method private void"));
assertTrue(iae.getMessage().endsWith("(for example the method must be public)"));
}
@Test
public void testConsumeSynchronization() throws Exception {
CamelPostProcessorHelper helper = new CamelPostProcessorHelper(context);
MyConsumeAndSynchronizationBean my = new MyConsumeAndSynchronizationBean();
Method method = my.getClass().getMethod("consumeSomething", String.class, Exchange.class);
helper.consumerInjection(method, my, "foo");
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("Hello World");
template.sendBody("seda:foo", "Hello World");
assertMockEndpointsSatisfied();
// give UoW a bit of time
await("onDone invokation").atMost(1, TimeUnit.SECONDS).until(mySynchronization::isOnDone);
}
@Test
public void testProduceSynchronization() throws Exception {
MyProduceAndSynchronizationBean my = new MyProduceAndSynchronizationBean();
CamelPostProcessorHelper helper = new CamelPostProcessorHelper(context);
Producer producer = helper.createInjectionProducer(context.getEndpoint("mock:result"), my, "foo");
my.setProducer(producer);
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("Hello World");
my.produceSomething("Hello World");
assertMockEndpointsSatisfied();
// give UoW a bit of time
await("onDone invocation").atMost(1, TimeUnit.SECONDS).until(mySynchronization::isOnDone);
}
@Test
public void testEndpointInjectProducerTemplate() throws Exception {
CamelPostProcessorHelper helper = new CamelPostProcessorHelper(context);
MyEndpointInjectBeanProducerTemplate bean = new MyEndpointInjectBeanProducerTemplate();
Method method = bean.getClass().getMethod("setProducer", ProducerTemplate.class);
EndpointInject endpointInject = method.getAnnotation(EndpointInject.class);
Class<?>[] parameterTypes = method.getParameterTypes();
for (Class<?> type : parameterTypes) {
String propertyName = org.apache.camel.util.ObjectHelper.getPropertyName(method);
Object value = helper.getInjectionValue(type, endpointInject.value(), endpointInject.property(), propertyName, bean,
"foo");
ObjectHelper.invokeMethod(method, bean, value);
}
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("Hello World");
assertNotNull(bean.getProducer());
bean.send("Hello World");
assertMockEndpointsSatisfied();
}
@Test
public void testEndpointInjectProducer() throws Exception {
CamelPostProcessorHelper helper = new CamelPostProcessorHelper(context);
MyEndpointBeanProducer bean = new MyEndpointBeanProducer();
Method method = bean.getClass().getMethod("setProducer", Producer.class);
EndpointInject endpointInject = method.getAnnotation(EndpointInject.class);
Class<?>[] parameterTypes = method.getParameterTypes();
for (Class<?> type : parameterTypes) {
String propertyName = org.apache.camel.util.ObjectHelper.getPropertyName(method);
Object value = helper.getInjectionValue(type, endpointInject.value(), endpointInject.property(), propertyName, bean,
"foo");
ObjectHelper.invokeMethod(method, bean, value);
}
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("Hello World");
assertNotNull(bean.getProducer());
Exchange exchange = new DefaultExchange(context);
exchange.getIn().setBody("Hello World");
bean.send(exchange);
assertMockEndpointsSatisfied();
}
@Test
public void testEndpointInjectPollingConsumer() throws Exception {
CamelPostProcessorHelper helper = new CamelPostProcessorHelper(context);
MyEndpointBeanPollingConsumer bean = new MyEndpointBeanPollingConsumer();
Method method = bean.getClass().getMethod("setConsumer", PollingConsumer.class);
EndpointInject endpointInject = method.getAnnotation(EndpointInject.class);
Class<?>[] parameterTypes = method.getParameterTypes();
for (Class<?> type : parameterTypes) {
String propertyName = org.apache.camel.util.ObjectHelper.getPropertyName(method);
Object value = helper.getInjectionValue(type, endpointInject.value(), endpointInject.property(), propertyName, bean,
"foo");
ObjectHelper.invokeMethod(method, bean, value);
}
template.sendBody("seda:foo", "Hello World");
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("Hello World");
assertNotNull(bean.getConsumer());
Exchange exchange = bean.consume();
template.send("mock:result", exchange);
assertMockEndpointsSatisfied();
}
@Test
public void testEndpointInjectProducerTemplateField() throws Exception {
CamelPostProcessorHelper helper = new CamelPostProcessorHelper(context);
MyEndpointInjectProducerTemplate bean = new MyEndpointInjectProducerTemplate();
Field field = bean.getClass().getField("producer");
EndpointInject endpointInject = field.getAnnotation(EndpointInject.class);
Class<?> type = field.getType();
String propertyName = "producer";
Object value
= helper.getInjectionValue(type, endpointInject.value(), endpointInject.property(), propertyName, bean, "foo");
field.set(bean, value);
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("Hello World");
Exchange exchange = new DefaultExchange(context);
exchange.getIn().setBody("Hello World");
bean.send(exchange);
assertMockEndpointsSatisfied();
}
@Test
public void testEndpointInjectFluentProducerTemplateField() throws Exception {
CamelPostProcessorHelper helper = new CamelPostProcessorHelper(context);
MyEndpointInjectFluentProducerTemplate bean = new MyEndpointInjectFluentProducerTemplate();
Field field = bean.getClass().getField("producer");
EndpointInject endpointInject = field.getAnnotation(EndpointInject.class);
Class<?> type = field.getType();
String propertyName = "producer";
Object value
= helper.getInjectionValue(type, endpointInject.value(), endpointInject.property(), propertyName, bean, "foo");
field.set(bean, value);
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("Hello World");
Exchange exchange = new DefaultExchange(context);
exchange.getIn().setBody("Hello World");
bean.send(exchange);
assertMockEndpointsSatisfied();
}
@Test
public void testEndpointInjectProducerTemplateFieldNoDefaultEndpoint() throws Exception {
CamelPostProcessorHelper helper = new CamelPostProcessorHelper(context);
MyEndpointInjectProducerTemplateNoDefaultEndpoint bean = new MyEndpointInjectProducerTemplateNoDefaultEndpoint();
Field field = bean.getClass().getField("producer");
EndpointInject endpointInject = field.getAnnotation(EndpointInject.class);
Class<?> type = field.getType();
String propertyName = "producer";
Object value
= helper.getInjectionValue(type, endpointInject.value(), endpointInject.property(), propertyName, bean, "foo");
field.set(bean, value);
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("Hello World");
Exchange exchange = new DefaultExchange(context);
exchange.getIn().setBody("Hello World");
bean.send(exchange);
assertMockEndpointsSatisfied();
}
@Test
public void testEndpointInjectProducerTemplateFieldNameUnknown() throws Exception {
CamelPostProcessorHelper helper = new CamelPostProcessorHelper(context);
MyEndpointInjectProducerTemplateNameUnknown bean = new MyEndpointInjectProducerTemplateNameUnknown();
Field field = bean.getClass().getField("producer");
EndpointInject endpointInject = field.getAnnotation(EndpointInject.class);
Class<?> type = field.getType();
String propertyName = "producer";
ResolveEndpointFailedException e = assertThrows(ResolveEndpointFailedException.class,
() -> helper.getInjectionValue(type, endpointInject.value(), endpointInject.property(), propertyName, bean,
"foo"),
"Should throw exception");
assertEquals("ref://unknown", e.getUri());
NoSuchBeanException nsbe = assertIsInstanceOf(NoSuchBeanException.class, e.getCause());
assertEquals("No bean could be found in the registry for: unknown of type: org.apache.camel.Endpoint",
nsbe.getMessage());
}
@Test
public void testEndpointInjectProducerTemplateFieldUrlUnknown() throws Exception {
CamelPostProcessorHelper helper = new CamelPostProcessorHelper(context);
MyEndpointInjectProducerTemplateUrlUnknown bean = new MyEndpointInjectProducerTemplateUrlUnknown();
Field field = bean.getClass().getField("producer");
EndpointInject endpointInject = field.getAnnotation(EndpointInject.class);
Class<?> type = field.getType();
String propertyName = "producer";
assertThrows(NoSuchEndpointException.class,
() -> helper.getInjectionValue(type, endpointInject.value(), endpointInject.property(), propertyName, bean,
"foo"),
"Should throw exception");
}
@Test
public void testPropertyFieldInject() throws Exception {
myProp.put("myTimeout", "2000");
myProp.put("myApp", "Camel");
CamelPostProcessorHelper helper = new CamelPostProcessorHelper(context);
MyPropertyFieldBean bean = new MyPropertyFieldBean();
Field field = bean.getClass().getField("timeout");
PropertyInject propertyInject = field.getAnnotation(PropertyInject.class);
Class<?> type = field.getType();
Object value = helper.getInjectionPropertyValue(type, null, propertyInject.value(), "", "");
assertEquals(Integer.valueOf(2000), (Object) Integer.valueOf(String.valueOf(value)));
field = bean.getClass().getField("greeting");
propertyInject = field.getAnnotation(PropertyInject.class);
type = field.getType();
value = helper.getInjectionPropertyValue(type, null, propertyInject.value(), "", "");
assertEquals("Hello Camel", value);
}
@Test
public void testPropertyFieldDefaultValueInject() throws Exception {
myProp.put("myApp", "Camel");
CamelPostProcessorHelper helper = new CamelPostProcessorHelper(context);
MyPropertyFieldBean bean = new MyPropertyFieldBean();
Field field = bean.getClass().getField("timeout");
PropertyInject propertyInject = field.getAnnotation(PropertyInject.class);
Class<?> type = field.getType();
Object value = helper.getInjectionPropertyValue(type, null, propertyInject.value(), "5000", "");
assertEquals(Integer.valueOf(5000), (Object) Integer.valueOf(String.valueOf(value)));
field = bean.getClass().getField("greeting");
propertyInject = field.getAnnotation(PropertyInject.class);
type = field.getType();
value = helper.getInjectionPropertyValue(type, null, propertyInject.value(), "", "");
assertEquals("Hello Camel", value);
}
@Test
public void testPropertyFieldSeparatorArrayInject() throws Exception {
myProp.put("serverPorts", "4444;5555"); // test with semicolon as separator
myProp.put("hosts", "serverA , serverB"); // test with whitespace noise
CamelPostProcessorHelper helper = new CamelPostProcessorHelper(context);
MyPropertyFieldSeparatorArrayBean bean = new MyPropertyFieldSeparatorArrayBean();
Field field = bean.getClass().getField("ports");
PropertyInject propertyInject = field.getAnnotation(PropertyInject.class);
Class<?> type = field.getType();
Object value
= helper.getInjectionPropertyValue(type, null, propertyInject.value(), "", propertyInject.separator());
assertIsInstanceOf(int[].class, value);
int[] arr = (int[]) value;
assertEquals(2, arr.length);
assertEquals(4444, arr[0]);
assertEquals(5555, arr[1]);
field = bean.getClass().getField("hosts");
propertyInject = field.getAnnotation(PropertyInject.class);
type = field.getType();
value = helper.getInjectionPropertyValue(type, null, propertyInject.value(), "", propertyInject.separator());
assertIsInstanceOf(String[].class, value);
String[] arr2 = (String[]) value;
assertEquals(2, arr2.length);
assertEquals("serverA", arr2[0]);
assertEquals("serverB", arr2[1]);
}
@Test
public void testPropertyFieldSeparatorListInject() throws Exception {
myProp.put("serverPorts", "4444;5555"); // test with semicolon as separator
myProp.put("hosts", "serverA , serverB"); // test with whitespace noise
CamelPostProcessorHelper helper = new CamelPostProcessorHelper(context);
MyPropertyFieldSeparatorListBean bean = new MyPropertyFieldSeparatorListBean();
Field field = bean.getClass().getField("ports");
PropertyInject propertyInject = field.getAnnotation(PropertyInject.class);
Class<?> type = field.getType();
Object value = helper.getInjectionPropertyValue(type, field.getGenericType(),
propertyInject.value(), "", propertyInject.separator());
assertIsInstanceOf(List.class, value);
List arr = (List) value;
assertEquals(2, arr.size());
assertEquals(4444, arr.get(0));
assertEquals(5555, arr.get(1));
field = bean.getClass().getField("hosts");
propertyInject = field.getAnnotation(PropertyInject.class);
type = field.getType();
value = helper.getInjectionPropertyValue(type, field.getGenericType(),
propertyInject.value(), "", propertyInject.separator());
assertIsInstanceOf(Set.class, value);
Set arr2 = (Set) value;
assertEquals(2, arr.size());
Iterator it = arr2.iterator();
assertEquals("serverA", it.next());
assertEquals("serverB", it.next());
}
@Test
public void testPropertyFieldSeparatorMapInject() throws Exception {
myProp.put("servers", "serverA = 4444 ; serverB=5555"); // test with semicolon as separator and whitespace
CamelPostProcessorHelper helper = new CamelPostProcessorHelper(context);
MyPropertyFieldSeparatorMapBean bean = new MyPropertyFieldSeparatorMapBean();
Field field = bean.getClass().getField("servers");
PropertyInject propertyInject = field.getAnnotation(PropertyInject.class);
Class<?> type = field.getType();
Object value = helper.getInjectionPropertyValue(type, field.getGenericType(),
propertyInject.value(), "", propertyInject.separator());
assertIsInstanceOf(Map.class, value);
Map arr = (Map) value;
assertEquals(2, arr.size());
assertEquals(4444, arr.get("serverA"));
assertEquals(5555, arr.get("serverB"));
}
@Test
public void testPropertyMethodInject() throws Exception {
myProp.put("myTimeout", "2000");
myProp.put("myApp", "Camel");
CamelPostProcessorHelper helper = new CamelPostProcessorHelper(context);
MyPropertyMethodBean bean = new MyPropertyMethodBean();
Method method = bean.getClass().getMethod("setTimeout", int.class);
PropertyInject propertyInject = method.getAnnotation(PropertyInject.class);
Class<?> type = method.getParameterTypes()[0];
Object value = helper.getInjectionPropertyValue(type, null, propertyInject.value(), "", "");
assertEquals(Integer.valueOf(2000), (Object) Integer.valueOf(String.valueOf(value)));
method = bean.getClass().getMethod("setGreeting", String.class);
propertyInject = method.getAnnotation(PropertyInject.class);
type = method.getParameterTypes()[0];
value = helper.getInjectionPropertyValue(type, null, propertyInject.value(), "", "");
assertEquals("Hello Camel", value);
}
@Test
public void testBeanInject() throws Exception {
CamelPostProcessorHelper helper = new CamelPostProcessorHelper(context);
MyBeanInjectBean bean = new MyBeanInjectBean();
Field field = bean.getClass().getField("foo");
BeanInject beanInject = field.getAnnotation(BeanInject.class);
Class<?> type = field.getType();
Object value = helper.getInjectionBeanValue(type, beanInject.value());
field.set(bean, value);
String out = bean.doSomething("World");
assertEquals("Hello World", out);
}
@Test
public void testBeanInjectNotFound() throws Exception {
CamelPostProcessorHelper helper = new CamelPostProcessorHelper(context);
MyBeanInjectBean bean = new MyBeanInjectBean();
Field field = bean.getClass().getField("foo");
Class<?> type = field.getType();
NoSuchBeanException e = assertThrows(NoSuchBeanException.class,
() -> helper.getInjectionBeanValue(type, "bar"),
"Should have thrown exception");
assertEquals("No bean could be found in the registry for: bar of type: org.apache.camel.impl.FooBar",
e.getMessage());
assertEquals("bar", e.getName());
}
@Test
public void testBeanInjectByType() throws Exception {
CamelPostProcessorHelper helper = new CamelPostProcessorHelper(context);
MyBeanInjectByTypeBean bean = new MyBeanInjectByTypeBean();
Field field = bean.getClass().getField("foo");
BeanInject beanInject = field.getAnnotation(BeanInject.class);
Class<?> type = field.getType();
Object value = helper.getInjectionBeanValue(type, beanInject.value());
field.set(bean, value);
String out = bean.doSomething("Camel");
assertEquals("Hello Camel", out);
}
@Test
public void testBeanConfigInjectByType() throws Exception {
Properties initial = new Properties();
initial.put("foobar.name", "Donald");
initial.put("foobar.age", "33");
context.getPropertiesComponent().setInitialProperties(initial);
CamelPostProcessorHelper helper = new CamelPostProcessorHelper(context);
MyBeanConfigInjectByTypeBean bean = new MyBeanConfigInjectByTypeBean();
Field field = bean.getClass().getField("config");
BeanConfigInject beanInject = field.getAnnotation(BeanConfigInject.class);
Class<?> type = field.getType();
Object value = helper.getInjectionBeanConfigValue(type, beanInject.value());
field.set(bean, value);
String out = bean.doSomething("Camel");
assertEquals("Donald (age: 33) likes Camel", out);
}
@Test
public void testBeanConfigInjectByMethod() throws Exception {
Properties initial = new Properties();
initial.put("foobar.name", "Goofy");
initial.put("foobar.age", "34");
context.getPropertiesComponent().setInitialProperties(initial);
CamelPostProcessorHelper helper = new CamelPostProcessorHelper(context);
MyBeanConfigInjectByMethod bean = new MyBeanConfigInjectByMethod();
Method method = bean.getClass().getMethod("initFooBar", FooBarConfig.class);
BeanConfigInject beanInject = method.getAnnotation(BeanConfigInject.class);
Class<?> type = method.getParameterTypes()[0];
Object value = helper.getInjectionBeanConfigValue(type, beanInject.value());
method.invoke(bean, value);
String out = bean.doSomething("Camel");
assertEquals("Goofy (age: 34) likes Camel", out);
}
@Test
public void testFluentProducerTemplateWithNoInjection() throws Exception {
CamelPostProcessorHelper helper = new CamelPostProcessorHelper(context);
NoBeanInjectionTestClass myBean = new NoBeanInjectionTestClass();
Field field = myBean.getClass().getField("fluentProducerTemplate");
EndpointInject inject = field.getAnnotation(EndpointInject.class);
String propertyName = "fluent";
Class<?> classType = field.getType();
Object value = helper.getInjectionValue(classType, inject.value(), inject.property(), propertyName, myBean, "bla");
field.set(myBean, value);
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("Bla Bla Bla. .");
Exchange exchange = new DefaultExchange(context);
exchange.getIn().setBody("Bla Bla Bla. .");
myBean.sendExchange(exchange);
assertMockEndpointsSatisfied();
}
public static | CamelPostProcessorHelperTest |
java | apache__kafka | trogdor/src/main/java/org/apache/kafka/trogdor/workload/SustainedConnectionWorker.java | {
"start": 13357,
"end": 16945
} | class ____ extends ClaimableConnection {
private KafkaConsumer<byte[], byte[]> consumer;
private TopicPartition activePartition;
private final String topicName;
private final Random rand;
private final Properties props;
ConsumerSustainedConnection() {
// These variables are used to maintain the connection itself.
this.topicName = SustainedConnectionWorker.this.spec.topicName();
this.consumer = null;
this.activePartition = null;
this.rand = new Random();
this.refreshRate = SustainedConnectionWorker.this.spec.refreshRateMs();
// This variable is used to maintain the connection properties.
this.props = new Properties();
this.props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, SustainedConnectionWorker.this.spec.bootstrapServers());
this.props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
this.props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 1);
this.props.put(ConsumerConfig.FETCH_MAX_BYTES_CONFIG, 1024);
WorkerUtils.addConfigsToProperties(
this.props, SustainedConnectionWorker.this.spec.commonClientConf(), SustainedConnectionWorker.this.spec.consumerConf());
}
@Override
public void refresh() {
try {
if (this.consumer == null) {
// Housekeeping to track the number of opened connections.
SustainedConnectionWorker.this.totalConsumerConnections.incrementAndGet();
// Create the consumer and fetch the partitions for the specified topic.
this.consumer = new KafkaConsumer<>(this.props, new ByteArrayDeserializer(), new ByteArrayDeserializer());
List<TopicPartition> partitions = this.consumer.partitionsFor(this.topicName).stream()
.map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition()))
.toList();
// Select a random partition and assign it.
this.activePartition = partitions.get(this.rand.nextInt(partitions.size()));
this.consumer.assign(List.of(this.activePartition));
}
// The behavior when passing in an empty list is to seek to the end of all subscribed partitions.
this.consumer.seekToEnd(List.of());
// Poll to keep the connection alive, ignoring any records returned.
this.consumer.poll(Duration.ofMillis(50));
} catch (Throwable e) {
// Set the consumer to be recreated on the next cycle.
this.closeQuietly();
// Housekeeping to track the number of opened connections and failed connection attempts.
SustainedConnectionWorker.this.totalConsumerConnections.decrementAndGet();
SustainedConnectionWorker.this.totalConsumerFailedConnections.incrementAndGet();
SustainedConnectionWorker.log.error("Error while refreshing sustained KafkaConsumer connection", e);
}
// Schedule this again and set to not in use.
this.completeRefresh();
}
@Override
protected void closeQuietly() {
Utils.closeQuietly(this.consumer, "KafkaConsumer");
this.consumer = null;
this.activePartition = null;
}
}
public | ConsumerSustainedConnection |
java | grpc__grpc-java | xds/src/main/java/io/grpc/xds/internal/Matchers.java | {
"start": 6792,
"end": 7103
} | class ____ {
public abstract long start();
public abstract long end();
public static Range create(long start, long end) {
return new AutoValue_Matchers_HeaderMatcher_Range(start, end);
}
}
}
/** Represents a fractional value. */
@AutoValue
public abstract static | Range |
java | apache__camel | components/camel-cxf/camel-cxf-soap/src/test/java/org/apache/camel/component/cxf/jaxws/CxfConsumerPayloadXPathTest.java | {
"start": 7434,
"end": 9013
} | class ____ implements Processor {
@Override
public void process(Exchange exchange) throws Exception {
Object obj = exchange.getIn().getBody();
String content = (String) obj;
String msgOut = constructSoapMessage(content);
exchange.getMessage().setBody(msgOut);
exchange.getMessage().setHeaders(exchange.getIn().getHeaders());
exchange.getMessage().setHeader(HEADER_SIZE, Integer.toString(content.length()));
}
}
private void simpleTest(int repeat, BaseRouteBuilder builder) throws Exception {
testConfiguration().withUseRouteBuilder(false);
context.addRoutes(builder);
startCamelContext();
String content = StringUtils.repeat("x", repeat);
String msgIn = constructSoapMessage(content);
Exchange exchgIn = new DefaultExchange(context);
exchgIn.setPattern(ExchangePattern.InOut);
exchgIn.getIn().setBody(msgIn);
//Execute
Exchange exchgOut = template.send(builder.getTestAddress(), exchgIn);
//Verify
String result = exchgOut.getMessage().getBody(String.class);
assertNotNull(result, "response on http call");
//check for data loss in received input (after xpath)
String headerSize = exchgOut.getMessage().getHeader(HEADER_SIZE, String.class);
assertEquals(Integer.toString(repeat), headerSize);
assertTrue(result.length() > repeat, "dataloss in output occurred");
stopCamelContext();
}
private abstract | ResponseProcessor |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/runtime/stream/table/AsyncCalcITCase.java | {
"start": 13730,
"end": 14093
} | class ____ extends AsyncFuncBase {
private static final long serialVersionUID = 1L;
public void eval(CompletableFuture<String> future, String a, String b, String c) {
executor.schedule(() -> future.complete("val " + a + b + c), 10, TimeUnit.MILLISECONDS);
}
}
/** Test function. */
public static | AsyncFuncThreeParams |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/KeyConverter.java | {
"start": 985,
"end": 1387
} | interface ____<T> {
/**
* Encodes a key as a byte array.
*
* @param key key to be encoded.
* @return a byte array.
*/
byte[] encode(T key);
/**
* Decodes a byte array and returns a key of type T.
*
* @param bytes byte representation
* @return an object(key) of type T which has been constructed after decoding
* the bytes.
*/
T decode(byte[] bytes);
}
| KeyConverter |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java | {
"start": 2445,
"end": 7995
} | class ____ extends ProtocolHATestBase {
private YarnClient client = null;
@BeforeEach
public void initiate() throws Exception {
startHACluster(1, true, false, false);
Configuration conf = new YarnConfiguration(this.conf);
client = createAndStartYarnClient(conf);
}
@AfterEach
public void shutDown() {
if (client != null) {
client.stop();
}
}
@Test
public void testGetApplicationReportOnHA() throws Exception {
ApplicationReport report =
client.getApplicationReport(cluster.createFakeAppId());
assertTrue(report != null);
assertEquals(cluster.createFakeAppReport(), report);
}
@Test
public void testGetNewApplicationOnHA() throws Exception {
ApplicationId appId =
client.createApplication().getApplicationSubmissionContext()
.getApplicationId();
assertTrue(appId != null);
assertEquals(cluster.createFakeAppId(), appId);
}
@Test
public void testGetClusterMetricsOnHA() throws Exception {
YarnClusterMetrics clusterMetrics =
client.getYarnClusterMetrics();
assertTrue(clusterMetrics != null);
assertEquals(cluster.createFakeYarnClusterMetrics(),
clusterMetrics);
}
@Test
public void testGetApplicationsOnHA() throws Exception {
List<ApplicationReport> reports =
client.getApplications();
assertTrue(reports != null);
assertFalse(reports.isEmpty());
assertEquals(cluster.createFakeAppReports(),
reports);
}
@Test
public void testGetClusterNodesOnHA() throws Exception {
List<NodeReport> reports = client.getNodeReports(NodeState.RUNNING);
assertTrue(reports != null);
assertFalse(reports.isEmpty());
assertEquals(cluster.createFakeNodeReports(),
reports);
}
@Test
public void testGetQueueInfoOnHA() throws Exception {
QueueInfo queueInfo = client.getQueueInfo("root");
assertTrue(queueInfo != null);
assertEquals(cluster.createFakeQueueInfo(),
queueInfo);
}
@Test
public void testGetQueueUserAclsOnHA() throws Exception {
List<QueueUserACLInfo> queueUserAclsList = client.getQueueAclsInfo();
assertTrue(queueUserAclsList != null);
assertFalse(queueUserAclsList.isEmpty());
assertEquals(cluster.createFakeQueueUserACLInfoList(),
queueUserAclsList);
}
@Test
public void testGetApplicationAttemptReportOnHA() throws Exception {
ApplicationAttemptReport report =
client.getApplicationAttemptReport(cluster
.createFakeApplicationAttemptId());
assertTrue(report != null);
assertEquals(cluster.createFakeApplicationAttemptReport(), report);
}
@Test
public void testGetApplicationAttemptsOnHA() throws Exception {
List<ApplicationAttemptReport> reports =
client.getApplicationAttempts(cluster.createFakeAppId());
assertTrue(reports != null);
assertFalse(reports.isEmpty());
assertEquals(cluster.createFakeApplicationAttemptReports(),
reports);
}
@Test
public void testGetContainerReportOnHA() throws Exception {
ContainerReport report =
client.getContainerReport(cluster.createFakeContainerId());
assertTrue(report != null);
assertEquals(cluster.createFakeContainerReport(), report);
}
@Test
public void testGetContainersOnHA() throws Exception {
List<ContainerReport> reports =
client.getContainers(cluster.createFakeApplicationAttemptId());
assertTrue(reports != null);
assertFalse(reports.isEmpty());
assertEquals(cluster.createFakeContainerReports(),
reports);
}
@Test
public void testSubmitApplicationOnHA() throws Exception {
ApplicationSubmissionContext appContext =
Records.newRecord(ApplicationSubmissionContext.class);
appContext.setApplicationId(cluster.createFakeAppId());
ContainerLaunchContext amContainer =
Records.newRecord(ContainerLaunchContext.class);
appContext.setAMContainerSpec(amContainer);
Resource capability = Records.newRecord(Resource.class);
capability.setMemorySize(10);
capability.setVirtualCores(1);
appContext.setResource(capability);
ApplicationId appId = client.submitApplication(appContext);
assertTrue(getActiveRM().getRMContext().getRMApps()
.containsKey(appId));
}
@Test
public void testMoveApplicationAcrossQueuesOnHA() throws Exception{
client.moveApplicationAcrossQueues(cluster.createFakeAppId(), "root");
}
@Test
public void testForceKillApplicationOnHA() throws Exception {
client.killApplication(cluster.createFakeAppId());
}
@Test
public void testGetDelegationTokenOnHA() throws Exception {
Token token = client.getRMDelegationToken(new Text(" "));
assertEquals(token, cluster.createFakeToken());
}
@Test
public void testRenewDelegationTokenOnHA() throws Exception {
RenewDelegationTokenRequest request =
RenewDelegationTokenRequest.newInstance(cluster.createFakeToken());
long newExpirationTime =
ClientRMProxy.createRMProxy(this.conf, ApplicationClientProtocol.class)
.renewDelegationToken(request).getNextExpirationTime();
assertEquals(newExpirationTime, cluster.createNextExpirationTime());
}
@Test
public void testCancelDelegationTokenOnHA() throws Exception {
CancelDelegationTokenRequest request =
CancelDelegationTokenRequest.newInstance(cluster.createFakeToken());
ClientRMProxy.createRMProxy(this.conf, ApplicationClientProtocol.class)
.cancelDelegationToken(request);
}
}
| TestApplicationClientProtocolOnHA |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/StoredValueFetcher.java | {
"start": 911,
"end": 2212
} | class ____ implements ValueFetcher {
private final SearchLookup lookup;
private LeafSearchLookup leafSearchLookup;
private final String fieldname;
private final StoredFieldsSpec storedFieldsSpec;
public StoredValueFetcher(SearchLookup lookup, String fieldname) {
this.lookup = lookup;
this.fieldname = fieldname;
this.storedFieldsSpec = new StoredFieldsSpec(false, false, Set.of(fieldname));
}
@Override
public void setNextReader(LeafReaderContext context) {
this.leafSearchLookup = lookup.getLeafSearchLookup(context);
}
@Override
public List<Object> fetchValues(Source source, int doc, List<Object> ignoredValues) throws IOException {
leafSearchLookup.setDocument(doc);
List<Object> values = leafSearchLookup.fields().get(fieldname).getValues();
if (values == null) {
return values;
} else {
return parseStoredValues(List.copyOf(values));
}
}
/**
* Given the values stored in lucene, parse it into a standard format.
*/
protected List<Object> parseStoredValues(List<Object> values) {
return values;
}
@Override
public StoredFieldsSpec storedFieldsSpec() {
return storedFieldsSpec;
}
}
| StoredValueFetcher |
java | spring-projects__spring-boot | module/spring-boot-graphql-test/src/test/java/org/springframework/boot/graphql/test/autoconfigure/tester/HttpGraphQlTesterAutoConfigurationTests.java | {
"start": 1678,
"end": 4578
} | class ____ {
private final ApplicationContextRunner contextRunner = new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(HttpGraphQlTesterAutoConfiguration.class));
@Test
void shouldNotContributeTesterIfWebTestClientNotPresent() {
this.contextRunner.run((context) -> assertThat(context).doesNotHaveBean(HttpGraphQlTester.class));
}
@Test
void shouldContributeTesterBoutToMockMvc() {
this.contextRunner.withBean(MockMvc.class, () -> mock(MockMvc.class))
.withConfiguration(AutoConfigurations.of(WebTestClientAutoConfiguration.class))
.run((context) -> {
assertThat(context).hasSingleBean(HttpGraphQlTester.class);
assertThat(context.getBean(HttpGraphQlTester.class)).extracting("webTestClient")
.extracting("builder")
.extracting("baseUrl")
.isEqualTo("/graphql");
});
}
@Test
@WithResource(name = "META-INF/spring.factories",
content = """
org.springframework.boot.test.http.server.LocalTestWebServer$Provider=\
org.springframework.boot.graphql.test.autoconfigure.tester.HttpGraphQlTesterAutoConfigurationTests$TestLocalTestWebServerProvider
""")
void shouldContributeTesterBoundToHttpServer() {
this.contextRunner.withConfiguration(AutoConfigurations.of(WebTestClientAutoConfiguration.class))
.run((context) -> {
assertThat(context).hasSingleBean(HttpGraphQlTester.class);
assertThat(context.getBean(HttpGraphQlTester.class)).extracting("webTestClient")
.extracting("builder")
.extracting("uriBuilderFactory")
.asInstanceOf(InstanceOfAssertFactories.type(UriBuilderFactory.class))
.satisfies((uriBuilderFactory) -> assertThat(uriBuilderFactory.uriString("/something").build())
.isEqualTo(URI.create("https://localhost:4242/graphql/something")));
});
}
@Test
@WithResource(name = "META-INF/spring.factories",
content = """
org.springframework.boot.test.http.server.LocalTestWebServer$Provider=\
org.springframework.boot.graphql.test.autoconfigure.tester.HttpGraphQlTesterAutoConfigurationTests$TestLocalTestWebServerProvider
""")
void shouldContributeTesterBoundToHttpServerUsingCustomGraphQlHttpPath() {
this.contextRunner.withConfiguration(AutoConfigurations.of(WebTestClientAutoConfiguration.class))
.withPropertyValues("spring.graphql.http.path=/api/graphql")
.run((context) -> {
assertThat(context).hasSingleBean(HttpGraphQlTester.class);
assertThat(context.getBean(HttpGraphQlTester.class)).extracting("webTestClient")
.extracting("builder")
.extracting("uriBuilderFactory")
.asInstanceOf(InstanceOfAssertFactories.type(UriBuilderFactory.class))
.satisfies((uriBuilderFactory) -> assertThat(uriBuilderFactory.uriString("/something").build())
.isEqualTo(URI.create("https://localhost:4242/api/graphql/something")));
});
}
@SuppressWarnings("unused")
static | HttpGraphQlTesterAutoConfigurationTests |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/main/java/org/springframework/data/jpa/convert/threeten/Jsr310JpaConverters.java | {
"start": 2426,
"end": 2500
} | class ____ {
@Converter(autoApply = true)
public static | Jsr310JpaConverters |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/internal/util/reflection/InstrumentationMemberAccessor.java | {
"start": 10013,
"end": 20002
} | class ____ {
// Varargs(String whatever, Observer... observers) {
// }
// }
return DISPATCHER.invokeWithArguments(
handle.asFixedArity(), arguments);
} catch (Throwable throwable) {
thrown.set(true);
return throwable;
}
});
if (thrown.get()) {
throw new InvocationTargetException((Throwable) value);
} else {
return value;
}
} catch (InvocationTargetException e) {
throw e;
} catch (Throwable t) {
throw new IllegalStateException(
"Could not construct "
+ constructor
+ " with arguments "
+ Arrays.toString(arguments),
t);
}
}
@Override
public Object invoke(Method method, Object target, Object... arguments)
throws InvocationTargetException {
assureArguments(
method,
Modifier.isStatic(method.getModifiers()) ? null : target,
method.getDeclaringClass(),
arguments,
method.getParameterTypes());
try {
Object module =
DISPATCHER.invokeWithArguments(getModule.bindTo(method.getDeclaringClass()));
String packageName = method.getDeclaringClass().getPackage().getName();
assureOpen(module, packageName);
MethodHandle handle =
((MethodHandles.Lookup)
DISPATCHER.invokeWithArguments(
privateLookupIn,
method.getDeclaringClass(),
DISPATCHER.getLookup()))
.unreflect(method);
if (!Modifier.isStatic(method.getModifiers())) {
handle = handle.bindTo(target);
}
if (handle.isVarargsCollector()) {
handle = handle.asFixedArity();
}
try {
return DISPATCHER.invokeWithArguments(handle, arguments);
} catch (Throwable t) {
throw new InvocationTargetException(t);
}
} catch (InvocationTargetException e) {
throw e;
} catch (Throwable t) {
throw new IllegalStateException(
"Could not invoke "
+ method
+ " on "
+ target
+ " with arguments "
+ Arrays.toString(arguments),
t);
}
}
@Override
public Object get(Field field, Object target) {
assureArguments(
field,
Modifier.isStatic(field.getModifiers()) ? null : target,
field.getDeclaringClass(),
new Object[0],
new Class<?>[0]);
try {
Object module =
DISPATCHER.invokeWithArguments(getModule.bindTo(field.getDeclaringClass()));
String packageName = field.getDeclaringClass().getPackage().getName();
assureOpen(module, packageName);
MethodHandle handle =
((MethodHandles.Lookup)
DISPATCHER.invokeWithArguments(
privateLookupIn,
field.getDeclaringClass(),
DISPATCHER.getLookup()))
.unreflectGetter(field);
if (!Modifier.isStatic(field.getModifiers())) {
handle = handle.bindTo(target);
}
return DISPATCHER.invokeWithArguments(handle);
} catch (Throwable t) {
throw new IllegalStateException("Could not read " + field + " on " + target, t);
}
}
@Override
public void set(Field field, Object target, Object value) throws IllegalAccessException {
assureArguments(
field,
Modifier.isStatic(field.getModifiers()) ? null : target,
field.getDeclaringClass(),
new Object[] {value},
new Class<?>[] {field.getType()});
boolean illegalAccess = false;
try {
Object module =
DISPATCHER.invokeWithArguments(getModule.bindTo(field.getDeclaringClass()));
String packageName = field.getDeclaringClass().getPackage().getName();
assureOpen(module, packageName);
// Method handles do not allow setting final fields where setAccessible(true)
// is required before unreflecting.
boolean isFinal;
if (Modifier.isFinal(field.getModifiers())) {
isFinal = true;
try {
DISPATCHER.setAccessible(field, true);
} catch (Throwable ignored) {
illegalAccess =
true; // To distinguish from propagated illegal access exception.
throw new IllegalAccessException(
"Could not make final field " + field + " accessible");
}
} else {
isFinal = false;
}
try {
MethodHandle handle =
((MethodHandles.Lookup)
DISPATCHER.invokeWithArguments(
privateLookupIn,
field.getDeclaringClass(),
DISPATCHER.getLookup()))
.unreflectSetter(field);
if (!Modifier.isStatic(field.getModifiers())) {
handle = handle.bindTo(target);
}
DISPATCHER.invokeWithArguments(handle, value);
} finally {
if (isFinal) {
DISPATCHER.setAccessible(field, false);
}
}
} catch (Throwable t) {
if (illegalAccess) {
throw (IllegalAccessException) t;
} else {
throw new IllegalStateException("Could not read " + field + " on " + target, t);
}
}
}
private void assureOpen(Object module, String packageName) throws Throwable {
// It would be more reliable to check if a module's package already is opened to
// the dispatcher module from before. Unfortunately, there is no reliable check
// for doing so since the isOpen(String, Module) method always returns true
// if the second argument is an unnamed module. Therefore, for now, we need
// to reopen packages even if they are already opened to the dispatcher module.
if (!(Boolean) DISPATCHER.invokeWithArguments(isOpen, module, packageName)) {
DISPATCHER.invokeWithArguments(
redefineModule.bindTo(INSTRUMENTATION),
module,
Collections.emptySet(),
Collections.emptyMap(),
Collections.singletonMap(
packageName, Collections.singleton(DISPATCHER.getModule())),
Collections.emptySet(),
Collections.emptyMap());
}
}
private static void assureArguments(
AccessibleObject target,
Object owner,
Class<?> type,
Object[] values,
Class<?>[] types) {
if (owner != null) {
if (!type.isAssignableFrom(owner.getClass())) {
throw new IllegalArgumentException("Cannot access " + target + " on " + owner);
}
}
Object[] args = values;
if (args == null) {
args = new Object[0];
}
if (types.length != args.length) {
throw new IllegalArgumentException(
"Incorrect number of arguments for "
+ target
+ ": expected "
+ types.length
+ " but recevied "
+ args.length);
}
for (int index = 0; index < args.length; index++) {
if (args[index] == null) {
if (types[index].isPrimitive()) {
throw new IllegalArgumentException(
"Cannot assign null to primitive type "
+ types[index].getTypeName()
+ " for "
+ index
+ " parameter of "
+ target);
}
} else {
Class<?> resolved = WRAPPERS.getOrDefault(types[index], types[index]);
if (!resolved.isAssignableFrom(args[index].getClass())) {
throw new IllegalArgumentException(
"Cannot assign value of type "
+ args[index].getClass()
+ " to "
+ resolved
+ " for "
+ index
+ " parameter of "
+ target);
}
}
}
}
public | Varargs |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java | {
"start": 7259,
"end": 20373
} | enum ____ isn't found
public static SortOrder of(String order) {
for (var sortOrder : values()) {
if (sortOrder.order.equals(order)) {
return sortOrder;
}
}
throw new IllegalArgumentException("sort order not supported [" + order + "]");
}
}
public HotThreads interval(TimeValue interval) {
this.interval = interval;
return this;
}
public HotThreads busiestThreads(int busiestThreads) {
this.busiestThreads = busiestThreads;
return this;
}
public HotThreads ignoreIdleThreads(boolean ignoreIdleThreads) {
this.ignoreIdleThreads = ignoreIdleThreads;
return this;
}
public HotThreads threadElementsSnapshotCount(int threadElementsSnapshotCount) {
this.threadElementsSnapshotCount = threadElementsSnapshotCount;
return this;
}
public HotThreads type(ReportType type) {
this.type = type;
return this;
}
public HotThreads sortOrder(SortOrder order) {
this.sortOrder = order;
return this;
}
public void detect(Writer writer) throws Exception {
detect(writer, () -> {});
}
public void detect(Writer writer, Runnable onNextThread) throws Exception {
synchronized (mutex) {
innerDetect(ManagementFactory.getThreadMXBean(), SunThreadInfo.INSTANCE, Thread.currentThread().getId(), writer, onNextThread);
}
}
static boolean isKnownJDKThread(ThreadInfo threadInfo) {
return (knownJDKInternalThreads.stream()
.anyMatch(jvmThread -> threadInfo.getThreadName() != null && threadInfo.getThreadName().equals(jvmThread)));
}
static boolean isKnownIdleStackFrame(String className, String methodName) {
return (knownIdleStackFrames.stream().anyMatch(pair -> pair[0].equals(className) && pair[1].equals(methodName)));
}
static boolean isIdleThread(ThreadInfo threadInfo) {
if (isKnownJDKThread(threadInfo)) {
return true;
}
for (StackTraceElement frame : threadInfo.getStackTrace()) {
if (isKnownIdleStackFrame(frame.getClassName(), frame.getMethodName())) {
return true;
}
}
return false;
}
Map<Long, ThreadTimeAccumulator> getAllValidThreadInfos(ThreadMXBean threadBean, SunThreadInfo sunThreadInfo, long currentThreadId) {
long[] threadIds = threadBean.getAllThreadIds();
ThreadInfo[] threadInfos = threadBean.getThreadInfo(threadIds);
Map<Long, ThreadTimeAccumulator> result = Maps.newMapWithExpectedSize(threadIds.length);
for (int i = 0; i < threadIds.length; i++) {
if (threadInfos[i] == null || threadIds[i] == currentThreadId) {
continue;
}
long cpuTime = threadBean.getThreadCpuTime(threadIds[i]);
if (cpuTime == INVALID_TIMING) {
continue;
}
long allocatedBytes = type == ReportType.MEM ? sunThreadInfo.getThreadAllocatedBytes(threadIds[i]) : 0;
result.put(threadIds[i], new ThreadTimeAccumulator(threadInfos[i], interval, cpuTime, allocatedBytes));
}
return result;
}
ThreadInfo[][] captureThreadStacks(ThreadMXBean threadBean, long[] threadIds) throws InterruptedException {
ThreadInfo[][] result = new ThreadInfo[threadElementsSnapshotCount][];
// NOTE, javadoc of getThreadInfo says: If a thread of the given ID is not alive or does not exist,
// null will be set in the corresponding element in the returned array. A thread is alive if it has
// been started and has not yet died.
for (int j = 0; j < threadElementsSnapshotCount - 1; j++) {
result[j] = threadBean.getThreadInfo(threadIds, Integer.MAX_VALUE);
Thread.sleep(threadElementsSnapshotDelay.millis());
}
result[threadElementsSnapshotCount - 1] = threadBean.getThreadInfo(threadIds, Integer.MAX_VALUE);
return result;
}
private static boolean isThreadWaitBlockTimeMonitoringEnabled(ThreadMXBean threadBean) {
if (threadBean.isThreadContentionMonitoringSupported()) {
return threadBean.isThreadContentionMonitoringEnabled();
}
return false;
}
private double getTimeSharePercentage(long time) {
return (((double) time) / interval.nanos()) * 100;
}
void innerDetect(ThreadMXBean threadBean, SunThreadInfo sunThreadInfo, long currentThreadId, Writer writer, Runnable onNextThread)
throws Exception {
if (threadBean.isThreadCpuTimeSupported() == false) {
throw new ElasticsearchException("thread CPU time is not supported on this JDK");
}
if (type == ReportType.MEM && sunThreadInfo.isThreadAllocatedMemorySupported() == false) {
throw new ElasticsearchException("thread allocated memory is not supported on this JDK");
}
// Enabling thread contention monitoring is required for capturing JVM thread wait/blocked times. If we weren't
// able to enable this functionality during bootstrap, we should not produce HotThreads reports.
if (isThreadWaitBlockTimeMonitoringEnabled(threadBean) == false) {
throw new ElasticsearchException("thread wait/blocked time accounting is not supported on this JDK");
}
writer.append("Hot threads at ")
.append(DATE_TIME_FORMATTER.format(LocalDateTime.now(Clock.systemUTC())))
.append(", interval=")
.append(interval.toString())
.append(", busiestThreads=")
.append(Integer.toString(busiestThreads))
.append(", ignoreIdleThreads=")
.append(Boolean.toString(ignoreIdleThreads))
.append(":\n");
onNextThread.run();
// Capture before and after thread state with timings
Map<Long, ThreadTimeAccumulator> previousThreadInfos = getAllValidThreadInfos(threadBean, sunThreadInfo, currentThreadId);
Thread.sleep(interval.millis());
Map<Long, ThreadTimeAccumulator> latestThreadInfos = getAllValidThreadInfos(threadBean, sunThreadInfo, currentThreadId);
latestThreadInfos.forEach((threadId, accumulator) -> accumulator.subtractPrevious(previousThreadInfos.get(threadId)));
// Sort by delta CPU time on thread.
List<ThreadTimeAccumulator> topThreads = new ArrayList<>(latestThreadInfos.values());
// Special comparator for CPU mode with TOTAL sort type only. Otherwise, we simply use the value.
if (type == ReportType.CPU && sortOrder == SortOrder.TOTAL) {
CollectionUtil.introSort(
topThreads,
Comparator.comparingLong(ThreadTimeAccumulator::getRunnableTime)
.thenComparingLong(ThreadTimeAccumulator::getCpuTime)
.reversed()
);
} else {
CollectionUtil.introSort(topThreads, Comparator.comparingLong(ThreadTimeAccumulator.valueGetterForReportType(type)).reversed());
}
topThreads = topThreads.subList(0, Math.min(busiestThreads, topThreads.size()));
long[] topThreadIds = topThreads.stream().mapToLong(t -> t.threadId).toArray();
// analyse N stack traces for the top threads
ThreadInfo[][] allInfos = captureThreadStacks(threadBean, topThreadIds);
for (int t = 0; t < topThreads.size(); t++) {
String threadName = null;
for (ThreadInfo[] info : allInfos) {
if (info != null && info[t] != null) {
if (ignoreIdleThreads && isIdleThread(info[t])) {
info[t] = null;
continue;
}
threadName = info[t].getThreadName();
break;
}
}
if (threadName == null) {
continue; // thread is not alive yet or died before the first snapshot - ignore it!
}
ThreadTimeAccumulator topThread = topThreads.get(t);
switch (type) {
case MEM -> writer.append(
Strings.format(
"%n%s memory allocated by thread '%s'%n",
ByteSizeValue.ofBytes(topThread.getAllocatedBytes()),
threadName
)
);
case CPU -> {
double percentCpu = getTimeSharePercentage(topThread.getCpuTime());
double percentOther = Transports.isTransportThread(threadName) && topThread.getCpuTime() == 0L
? 100.0
: getTimeSharePercentage(topThread.getOtherTime());
double percentTotal = (Transports.isTransportThread(threadName)) ? percentCpu : percentOther + percentCpu;
String otherLabel = (Transports.isTransportThread(threadName)) ? "idle" : "other";
writer.append(
Strings.format(
"%n%4.1f%% [cpu=%1.1f%%, %s=%1.1f%%] (%s out of %s) %s usage by thread '%s'%n",
percentTotal,
percentCpu,
otherLabel,
percentOther,
TimeValue.timeValueNanos(topThread.getCpuTime() + topThread.getOtherTime()),
interval,
type.getTypeValue(),
threadName
)
);
}
default -> {
long time = ThreadTimeAccumulator.valueGetterForReportType(type).applyAsLong(topThread);
double percent = getTimeSharePercentage(time);
writer.append(
Strings.format(
"%n%4.1f%% (%s out of %s) %s usage by thread '%s'%n",
percent,
TimeValue.timeValueNanos(time),
interval,
type.getTypeValue(),
threadName
)
);
}
}
// for each snapshot (2nd array index) find later snapshot for same thread with max number of
// identical StackTraceElements (starting from end of each)
boolean[] done = new boolean[threadElementsSnapshotCount];
for (int i = 0; i < threadElementsSnapshotCount; i++) {
if (done[i]) continue;
int maxSim = 1;
boolean[] similars = new boolean[threadElementsSnapshotCount];
for (int j = i + 1; j < threadElementsSnapshotCount; j++) {
if (done[j]) continue;
int similarity = similarity(allInfos[i][t], allInfos[j][t]);
if (similarity > maxSim) {
maxSim = similarity;
similars = new boolean[threadElementsSnapshotCount];
}
if (similarity == maxSim) similars[j] = true;
}
// print out trace maxSim levels of i, and mark similar ones as done
int count = 1;
for (int j = i + 1; j < threadElementsSnapshotCount; j++) {
if (similars[j]) {
done[j] = true;
count++;
}
}
if (allInfos[i][t] != null) {
final StackTraceElement[] show = allInfos[i][t].getStackTrace();
if (count == 1) {
writer.append(Strings.format(" unique snapshot%n"));
for (StackTraceElement frame : show) {
writer.append(Strings.format(" %s%n", frame));
}
} else {
writer.append(
Strings.format(" %d/%d snapshots sharing following %d elements%n", count, threadElementsSnapshotCount, maxSim)
);
for (int l = show.length - maxSim; l < show.length; l++) {
writer.append(Strings.format(" %s%n", show[l]));
}
}
}
}
onNextThread.run();
}
}
static int similarity(ThreadInfo threadInfo, ThreadInfo threadInfo0) {
StackTraceElement[] s1 = threadInfo == null ? EMPTY : threadInfo.getStackTrace();
StackTraceElement[] s2 = threadInfo0 == null ? EMPTY : threadInfo0.getStackTrace();
int i = s1.length - 1;
int j = s2.length - 1;
int rslt = 0;
while (i >= 0 && j >= 0 && s1[i].equals(s2[j])) {
rslt++;
i--;
j--;
}
return rslt;
}
static | value |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/runtime/src/main/java/org/jboss/resteasy/reactive/server/core/parameters/converters/YearMonthParamConverter.java | {
"start": 1208,
"end": 1803
} | class ____ extends TemporalSupplier<YearMonthParamConverter> {
public Supplier() {
}
public Supplier(String pattern, String dateTimeFormatterProviderClassName) {
super(pattern, dateTimeFormatterProviderClassName);
}
@Override
protected YearMonthParamConverter createConverter(DateTimeFormatter dateTimeFormatter) {
return new YearMonthParamConverter(dateTimeFormatter);
}
@Override
public String getClassName() {
return YearMonthParamConverter.class.getName();
}
}
}
| Supplier |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/type/IdentityEqualityType.java | {
"start": 198,
"end": 917
} | class ____ extends TypeBase
{
private static final long serialVersionUID = 1L;
protected IdentityEqualityType(Class<?> raw,
TypeBindings bindings, JavaType superClass, JavaType[] superInts,
int hash,
Object valueHandler, Object typeHandler, boolean asStatic) {
super(raw, bindings, superClass, superInts, hash, valueHandler, typeHandler, asStatic);
}
@Override
public final boolean equals(Object o) {
return o == this;
}
@Override
public final int hashCode() {
// The identity hashCode must be used otherwise all instances will have colliding hashCodes.
return System.identityHashCode(this);
}
}
| IdentityEqualityType |
java | quarkusio__quarkus | extensions/resteasy-classic/resteasy/deployment/src/test/java/io/quarkus/resteasy/test/NullHeaderTestCase.java | {
"start": 1026,
"end": 1365
} | class ____ implements ContainerResponseFilter {
@Override
public void filter(ContainerRequestContext requestContext, ContainerResponseContext responseContext)
throws IOException {
responseContext.getHeaders().add("nullHeader", null);
}
}
@Path("/null")
public static | NullFilter |
java | apache__kafka | server-common/src/main/java/org/apache/kafka/server/share/persister/DeleteShareGroupStateResult.java | {
"start": 997,
"end": 1103
} | class ____ the result from {@link Persister#deleteState(DeleteShareGroupStateParameters)}.
*/
public | contains |
java | spring-projects__spring-boot | module/spring-boot-restdocs/src/main/java/org/springframework/boot/restdocs/test/autoconfigure/RestDocsProperties.java | {
"start": 1017,
"end": 1749
} | class ____ {
/**
* The URI scheme for to use (for example http).
*/
private @Nullable String uriScheme;
/**
* The URI host to use.
*/
private @Nullable String uriHost;
/**
* The URI port to use.
*/
private @Nullable Integer uriPort;
public @Nullable String getUriScheme() {
return this.uriScheme;
}
public void setUriScheme(@Nullable String uriScheme) {
this.uriScheme = uriScheme;
}
public @Nullable String getUriHost() {
return this.uriHost;
}
public void setUriHost(@Nullable String uriHost) {
this.uriHost = uriHost;
}
public @Nullable Integer getUriPort() {
return this.uriPort;
}
public void setUriPort(@Nullable Integer uriPort) {
this.uriPort = uriPort;
}
}
| RestDocsProperties |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UnnecessaryDefaultInEnumSwitchTest.java | {
"start": 11352,
"end": 11920
} | enum ____ {
ONE,
TWO,
THREE
}
boolean m(Case c) {
switch (c) {
case ONE:
case TWO:
return true;
case THREE:
default:
// This is a comment
System.out.println("Test");
}
return false;
}
}
""")
.addOutputLines(
"out/Test.java",
"""
| Case |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/web/servlet/client/samples/bind/ControllerTests.java | {
"start": 1083,
"end": 1437
} | class ____ {
private RestTestClient client;
@BeforeEach
void setUp() {
this.client = RestTestClient.bindToController(new TestController()).build();
}
@Test
void test() {
this.client.get().uri("/test")
.exchange()
.expectStatus().isOk()
.expectBody(String.class).isEqualTo("It works!");
}
@RestController
static | ControllerTests |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/StatementSwitchToExpressionSwitchTest.java | {
"start": 157608,
"end": 158581
} | class ____ {
int[] x;
public Test(int foo) {
x = null;
}
public int[] foo() {
int z = 0;
switch (z) {
case 1:
String[] foo = {"hello", "world"};
break;
case 2:
foo = null;
}
return x;
}
}
""")
.setArgs("-XepOpt:StatementSwitchToExpressionSwitch:EnableDirectConversion")
.doTest();
}
@Test
public void directConversion_hoistWithNamingConflictAbove_noError() {
// The checker currently does not have the ability to hoist variables whose names conflict
// with other variables in the switch statement's enclosing scope. Here the conflict is above
// the variable to be hoisted.
helper
.addSourceLines(
"Test.java",
"""
| Test |
java | redisson__redisson | redisson/src/main/java/org/redisson/client/protocol/decoder/ObjectSetReplayDecoder.java | {
"start": 852,
"end": 1043
} | class ____<T> implements MultiDecoder<Set<T>> {
@Override
public Set<T> decode(List<Object> parts, State state) {
return new LinkedHashSet(parts);
}
}
| ObjectSetReplayDecoder |
java | elastic__elasticsearch | x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSessionConfiguration.java | {
"start": 408,
"end": 1797
} | class ____ {
private int fetchSize;
private String fetchSeparator = "";
private boolean debug;
private boolean lenient;
private boolean allowPartialResults;
public CliSessionConfiguration() {
this.fetchSize = CoreProtocol.FETCH_SIZE;
this.lenient = CoreProtocol.FIELD_MULTI_VALUE_LENIENCY;
this.allowPartialResults = CoreProtocol.ALLOW_PARTIAL_SEARCH_RESULTS;
}
public void setFetchSize(int fetchSize) {
if (fetchSize <= 0) {
throw new IllegalArgumentException("Must be > 0.");
}
this.fetchSize = fetchSize;
}
public int getFetchSize() {
return fetchSize;
}
public void setFetchSeparator(String fetchSeparator) {
this.fetchSeparator = fetchSeparator;
}
public String getFetchSeparator() {
return fetchSeparator;
}
public void setDebug(boolean debug) {
this.debug = debug;
}
public boolean isDebug() {
return debug;
}
public boolean isLenient() {
return lenient;
}
public void setLenient(boolean lenient) {
this.lenient = lenient;
}
public boolean allowPartialResults() {
return allowPartialResults;
}
public void setAllowPartialResults(boolean allowPartialResults) {
this.allowPartialResults = allowPartialResults;
}
}
| CliSessionConfiguration |
java | spring-projects__spring-framework | spring-webflux/src/test/java/org/springframework/web/reactive/function/server/LocaleContextResolverIntegrationTests.java | {
"start": 2705,
"end": 2907
} | class ____ implements ViewResolver {
@Override
public Mono<View> resolveViewName(String viewName, Locale locale) {
return Mono.just(new DummyView(locale));
}
}
private static | DummyViewResolver |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/OpenshiftDeploymentConfigsEndpointBuilderFactory.java | {
"start": 23671,
"end": 34189
} | interface ____
extends
EndpointProducerBuilder {
default AdvancedOpenshiftDeploymentConfigsEndpointProducerBuilder advanced() {
return (AdvancedOpenshiftDeploymentConfigsEndpointProducerBuilder) this;
}
/**
* The Kubernetes API Version to use.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param apiVersion the value to set
* @return the dsl builder
*/
default OpenshiftDeploymentConfigsEndpointProducerBuilder apiVersion(String apiVersion) {
doSetProperty("apiVersion", apiVersion);
return this;
}
/**
* The dns domain, used for ServiceCall EIP.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param dnsDomain the value to set
* @return the dsl builder
*/
default OpenshiftDeploymentConfigsEndpointProducerBuilder dnsDomain(String dnsDomain) {
doSetProperty("dnsDomain", dnsDomain);
return this;
}
/**
* Default KubernetesClient to use if provided.
*
* The option is a:
* <code>io.fabric8.kubernetes.client.KubernetesClient</code> type.
*
* Group: common
*
* @param kubernetesClient the value to set
* @return the dsl builder
*/
default OpenshiftDeploymentConfigsEndpointProducerBuilder kubernetesClient(io.fabric8.kubernetes.client.KubernetesClient kubernetesClient) {
doSetProperty("kubernetesClient", kubernetesClient);
return this;
}
/**
* Default KubernetesClient to use if provided.
*
* The option will be converted to a
* <code>io.fabric8.kubernetes.client.KubernetesClient</code> type.
*
* Group: common
*
* @param kubernetesClient the value to set
* @return the dsl builder
*/
default OpenshiftDeploymentConfigsEndpointProducerBuilder kubernetesClient(String kubernetesClient) {
doSetProperty("kubernetesClient", kubernetesClient);
return this;
}
/**
* The namespace.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param namespace the value to set
* @return the dsl builder
*/
default OpenshiftDeploymentConfigsEndpointProducerBuilder namespace(String namespace) {
doSetProperty("namespace", namespace);
return this;
}
/**
* The port name, used for ServiceCall EIP.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param portName the value to set
* @return the dsl builder
*/
default OpenshiftDeploymentConfigsEndpointProducerBuilder portName(String portName) {
doSetProperty("portName", portName);
return this;
}
/**
* The port protocol, used for ServiceCall EIP.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: tcp
* Group: common
*
* @param portProtocol the value to set
* @return the dsl builder
*/
default OpenshiftDeploymentConfigsEndpointProducerBuilder portProtocol(String portProtocol) {
doSetProperty("portProtocol", portProtocol);
return this;
}
/**
* Producer operation to do on Kubernetes.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param operation the value to set
* @return the dsl builder
*/
default OpenshiftDeploymentConfigsEndpointProducerBuilder operation(String operation) {
doSetProperty("operation", operation);
return this;
}
/**
* The CA Cert Data.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param caCertData the value to set
* @return the dsl builder
*/
default OpenshiftDeploymentConfigsEndpointProducerBuilder caCertData(String caCertData) {
doSetProperty("caCertData", caCertData);
return this;
}
/**
* The CA Cert File.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param caCertFile the value to set
* @return the dsl builder
*/
default OpenshiftDeploymentConfigsEndpointProducerBuilder caCertFile(String caCertFile) {
doSetProperty("caCertFile", caCertFile);
return this;
}
/**
* The Client Cert Data.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param clientCertData the value to set
* @return the dsl builder
*/
default OpenshiftDeploymentConfigsEndpointProducerBuilder clientCertData(String clientCertData) {
doSetProperty("clientCertData", clientCertData);
return this;
}
/**
* The Client Cert File.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param clientCertFile the value to set
* @return the dsl builder
*/
default OpenshiftDeploymentConfigsEndpointProducerBuilder clientCertFile(String clientCertFile) {
doSetProperty("clientCertFile", clientCertFile);
return this;
}
/**
* The Key Algorithm used by the client.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param clientKeyAlgo the value to set
* @return the dsl builder
*/
default OpenshiftDeploymentConfigsEndpointProducerBuilder clientKeyAlgo(String clientKeyAlgo) {
doSetProperty("clientKeyAlgo", clientKeyAlgo);
return this;
}
/**
* The Client Key data.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param clientKeyData the value to set
* @return the dsl builder
*/
default OpenshiftDeploymentConfigsEndpointProducerBuilder clientKeyData(String clientKeyData) {
doSetProperty("clientKeyData", clientKeyData);
return this;
}
/**
* The Client Key file.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param clientKeyFile the value to set
* @return the dsl builder
*/
default OpenshiftDeploymentConfigsEndpointProducerBuilder clientKeyFile(String clientKeyFile) {
doSetProperty("clientKeyFile", clientKeyFile);
return this;
}
/**
* The Client Key Passphrase.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param clientKeyPassphrase the value to set
* @return the dsl builder
*/
default OpenshiftDeploymentConfigsEndpointProducerBuilder clientKeyPassphrase(String clientKeyPassphrase) {
doSetProperty("clientKeyPassphrase", clientKeyPassphrase);
return this;
}
/**
* The Auth Token.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param oauthToken the value to set
* @return the dsl builder
*/
default OpenshiftDeploymentConfigsEndpointProducerBuilder oauthToken(String oauthToken) {
doSetProperty("oauthToken", oauthToken);
return this;
}
/**
* Password to connect to Kubernetes.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param password the value to set
* @return the dsl builder
*/
default OpenshiftDeploymentConfigsEndpointProducerBuilder password(String password) {
doSetProperty("password", password);
return this;
}
/**
* Define if the certs we used are trusted anyway or not.
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Default: false
* Group: security
*
* @param trustCerts the value to set
* @return the dsl builder
*/
default OpenshiftDeploymentConfigsEndpointProducerBuilder trustCerts(Boolean trustCerts) {
doSetProperty("trustCerts", trustCerts);
return this;
}
/**
* Define if the certs we used are trusted anyway or not.
*
* The option will be converted to a <code>java.lang.Boolean</code>
* type.
*
* Default: false
* Group: security
*
* @param trustCerts the value to set
* @return the dsl builder
*/
default OpenshiftDeploymentConfigsEndpointProducerBuilder trustCerts(String trustCerts) {
doSetProperty("trustCerts", trustCerts);
return this;
}
/**
* Username to connect to Kubernetes.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param username the value to set
* @return the dsl builder
*/
default OpenshiftDeploymentConfigsEndpointProducerBuilder username(String username) {
doSetProperty("username", username);
return this;
}
}
/**
* Advanced builder for endpoint producers for the OpenShift Deployment Configs component.
*/
public | OpenshiftDeploymentConfigsEndpointProducerBuilder |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineReaderClientImpl.java | {
"start": 2812,
"end": 10656
} | class ____ extends TimelineReaderClient {
private static final Logger LOG =
LoggerFactory.getLogger(TimelineReaderClientImpl.class);
private static final String RESOURCE_URI_STR_V2 = "/ws/v2/timeline/";
private TimelineConnector connector;
private URI baseUri;
private String clusterId;
public TimelineReaderClientImpl() {
super(TimelineReaderClientImpl.class.getName());
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
if (!YarnConfiguration.timelineServiceV2Enabled(conf)) {
throw new IOException("Timeline V2 client is not properly configured. "
+ "Either timeline service is not enabled or version is not set to"
+ " 2");
}
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
UserGroupInformation realUgi = ugi.getRealUser();
String doAsUser;
UserGroupInformation authUgi;
if (realUgi != null) {
authUgi = realUgi;
doAsUser = ugi.getShortUserName();
} else {
authUgi = ugi;
doAsUser = null;
}
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
connector = new TimelineConnector(false, authUgi, doAsUser, token);
addIfService(connector);
String timelineReaderWebAppAddress =
WebAppUtils.getTimelineReaderWebAppURLWithoutScheme(conf);
baseUri = TimelineConnector.constructResURI(
conf, timelineReaderWebAppAddress, RESOURCE_URI_STR_V2);
clusterId = conf.get(YarnConfiguration.RM_CLUSTER_ID,
YarnConfiguration.DEFAULT_RM_CLUSTER_ID);
LOG.info("Initialized TimelineReader URI = {} , clusterId = {}.", baseUri, clusterId);
super.serviceInit(conf);
}
@Override
public TimelineEntity getApplicationEntity(ApplicationId appId, String fields,
Map<String, String> filters)
throws IOException {
String path = PATH_JOINER.join("clusters", clusterId, "apps", appId);
if (fields == null || fields.isEmpty()) {
fields = "INFO";
}
MultivaluedMap<String, String> params = new MultivaluedHashMap<>();
params.add("fields", fields);
mergeFilters(params, filters);
Response response = doGetUri(baseUri, path, params);
TimelineEntity entity = response.readEntity(TimelineEntity.class);
return entity;
}
@Override
public TimelineEntity getApplicationAttemptEntity(
ApplicationAttemptId appAttemptId,
String fields, Map<String, String> filters) throws IOException {
ApplicationId appId = appAttemptId.getApplicationId();
String path = PATH_JOINER.join("clusters", clusterId, "apps",
appId, "entities", YARN_APPLICATION_ATTEMPT, appAttemptId);
if (fields == null || fields.isEmpty()) {
fields = "INFO";
}
MultivaluedMap<String, String> params = new MultivaluedHashMap<>();
params.add("fields", fields);
mergeFilters(params, filters);
Response response = doGetUri(baseUri, path, params);
TimelineEntity entity = response.readEntity(TimelineEntity.class);
return entity;
}
@Override
public List<TimelineEntity> getApplicationAttemptEntities(
ApplicationId appId, String fields, Map<String, String> filters,
long limit, String fromId) throws IOException {
String path = PATH_JOINER.join("clusters", clusterId, "apps",
appId, "entities", YARN_APPLICATION_ATTEMPT);
if (fields == null || fields.isEmpty()) {
fields = "INFO";
}
MultivaluedMap<String, String> params = new MultivaluedHashMap<>();
params.add("fields", fields);
if (limit > 0) {
params.add("limit", Long.toString(limit));
}
if (fromId != null && !fromId.isEmpty()) {
params.add("fromid", fromId);
}
mergeFilters(params, filters);
Response response = doGetUri(baseUri, path, params);
Set<TimelineEntity> entities = response.readEntity(new GenericType<Set<TimelineEntity>>(){});
return entities.stream().collect(Collectors.toList());
}
@Override
public TimelineEntity getContainerEntity(ContainerId containerId,
String fields, Map<String, String> filters) throws IOException {
ApplicationId appId = containerId.getApplicationAttemptId().
getApplicationId();
String path = PATH_JOINER.join("clusters", clusterId, "apps",
appId, "entities", YARN_CONTAINER, containerId);
if (fields == null || fields.isEmpty()) {
fields = "INFO";
}
MultivaluedMap<String, String> params = new MultivaluedHashMap<>();
params.add("fields", fields);
mergeFilters(params, filters);
Response response = doGetUri(baseUri, path, params);
TimelineEntity entity = response.readEntity(TimelineEntity.class);
return entity;
}
@Override
public List<TimelineEntity> getContainerEntities(
ApplicationId appId, String fields,
Map<String, String> filters,
long limit, String fromId) throws IOException {
String path = PATH_JOINER.join("clusters", clusterId, "apps",
appId, "entities", YARN_CONTAINER);
if (fields == null || fields.isEmpty()) {
fields = "INFO";
}
MultivaluedMap<String, String> params = new MultivaluedHashMap<>();
params.add("fields", fields);
if (limit > 0) {
params.add("limit", Long.toString(limit));
}
if (fromId != null && !fromId.isEmpty()) {
params.add("fromid", fromId);
}
mergeFilters(params, filters);
Response response = doGetUri(baseUri, path, params);
Set<TimelineEntity> entities = response.readEntity(new GenericType<Set<TimelineEntity>>(){});
return entities.stream().collect(Collectors.toList());
}
@VisibleForTesting
protected String encodeValue(String value) throws UnsupportedEncodingException {
// Since URLEncoder doesn't use and doesn't have an option for percent-encoding
// (as specified in RFC 3986) the spaces are encoded to + signs, which need to be replaced
// manually
return URLEncoder.encode(value, StandardCharsets.UTF_8.toString())
.replaceAll("\\+", "%20");
}
private void mergeFilters(MultivaluedMap<String, String> defaults,
Map<String, String> filters) throws UnsupportedEncodingException {
if (filters != null && !filters.isEmpty()) {
for (Map.Entry<String, String> entry : filters.entrySet()) {
if (!defaults.containsKey(entry.getKey())) {
defaults.add(entry.getKey(), encodeValue(entry.getValue()));
}
}
}
}
@VisibleForTesting
protected Response doGetUri(URI base, String path, MultivaluedMap<String, String> params)
throws IOException {
WebTarget target = connector.getClient().
register(TimelineEntityReader.class).target(base).path(path);
// To set query parameters where the value of a `MultivaluedMap` is a `List`,
// we need to iterate through each value to configure them.
for (Map.Entry<String, List<String>> param : params.entrySet()) {
for (String paramItem : param.getValue()) {
target = target.queryParam(param.getKey(), paramItem);
}
}
Invocation.Builder builder = target.request(MediaType.APPLICATION_JSON);
Response resp;
try {
resp = Failsafe.with(connector.getRetryPolicy()).get(() -> builder.get(Response.class));
} catch (Exception e) {
LOG.error("base = {}, path = {}, params = {} doGet error.", base, path, params, e);
throw new IOException(e);
}
if (resp == null || resp.getStatusInfo().getStatusCode() != OK.getStatusCode()) {
String msg = "Response from the timeline reader server is " +
((resp == null) ? "null" : "not successful, HTTP error code: " +
resp.getStatus() + ", Server response:\n" + resp.readEntity(String.class));
LOG.error(msg);
throw new IOException(msg);
}
return resp;
}
}
| TimelineReaderClientImpl |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/ObjectsHashCodePrimitiveTest.java | {
"start": 5829,
"end": 6184
} | class ____ {
void f() {
char x = 'C';
int y = Character.hashCode(x);
}
}
""")
.doTest();
}
@Test
public void hashCodeBoolean() {
helper
.addInputLines(
"Test.java",
"""
import java.util.Objects;
| Test |
java | quarkusio__quarkus | extensions/vertx/deployment/src/test/java/io/quarkus/vertx/deployment/currentcontextfactory/VertxCurrentContextFactoryDisabledTest.java | {
"start": 350,
"end": 737
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.overrideConfigKey("quarkus.vertx.customize-arc-context", "false");
@Test
public void testCustomizedFactoryNotUsed() {
assertFalse(Arc.container().getCurrentContextFactory() instanceof VertxCurrentContextFactory);
}
}
| VertxCurrentContextFactoryDisabledTest |
java | elastic__elasticsearch | x-pack/plugin/otel-data/src/main/java/org/elasticsearch/xpack/oteldata/otlp/datapoint/HistogramConverter.java | {
"start": 5310,
"end": 5417
} | interface ____<E extends Exception> {
void accept(double value) throws E;
}
}
| CheckedDoubleConsumer |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/stream/JSONReaderScannerTest_chars.java | {
"start": 291,
"end": 1794
} | class ____ extends TestCase {
public void test_double() throws Exception {
char[] chars = "{\"value\":3.5D}".toCharArray();
DefaultJSONParser parser = new DefaultJSONParser(new JSONReaderScanner(chars, chars.length));
JSONObject json = parser.parseObject();
Assert.assertTrue(3.5D == ((Double) json.get("value")).doubleValue());
parser.close();
}
public void test_float() throws Exception {
char[] chars = "{\"value\":3.5F}".toCharArray();
DefaultJSONParser parser = new DefaultJSONParser(new JSONReaderScanner(chars, chars.length));
JSONObject json = parser.parseObject();
Assert.assertTrue(3.5F == ((Float) json.get("value")).doubleValue());
parser.close();
}
public void test_decimal() throws Exception {
char[] chars = "{\"value\":3.5}".toCharArray();
DefaultJSONParser parser = new DefaultJSONParser(new JSONReaderScanner(chars, chars.length));
JSONObject json = parser.parseObject();
Assert.assertEquals(new BigDecimal("3.5"), json.get("value"));
parser.close();
}
public void test_long() throws Exception {
char[] chars = "{\"value\":3L}".toCharArray();
DefaultJSONParser parser = new DefaultJSONParser(new JSONReaderScanner(chars, chars.length));
JSONObject json = parser.parseObject();
Assert.assertTrue(3L == ((Long) json.get("value")).longValue());
parser.close();
}
}
| JSONReaderScannerTest_chars |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/protocol/Errors.java | {
"start": 11145,
"end": 32994
} | enum ____ {
UNKNOWN_SERVER_ERROR(-1, "The server experienced an unexpected error when processing the request.",
UnknownServerException::new),
NONE(0, null, message -> null),
OFFSET_OUT_OF_RANGE(1, "The requested offset is not within the range of offsets maintained by the server.",
OffsetOutOfRangeException::new),
CORRUPT_MESSAGE(2, "This message has failed its CRC checksum, exceeds the valid size, has a null key for a compacted topic, or is otherwise corrupt.",
CorruptRecordException::new),
UNKNOWN_TOPIC_OR_PARTITION(3, "This server does not host this topic-partition.",
UnknownTopicOrPartitionException::new),
INVALID_FETCH_SIZE(4, "The requested fetch size is invalid.",
InvalidFetchSizeException::new),
LEADER_NOT_AVAILABLE(5, "There is no leader for this topic-partition as we are in the middle of a leadership election.",
LeaderNotAvailableException::new),
NOT_LEADER_OR_FOLLOWER(6, "For requests intended only for the leader, this error indicates that the broker is not the current leader. " +
"For requests intended for any replica, this error indicates that the broker is not a replica of the topic partition.",
NotLeaderOrFollowerException::new),
REQUEST_TIMED_OUT(7, "The request timed out.",
TimeoutException::new),
BROKER_NOT_AVAILABLE(8, "The broker is not available.",
BrokerNotAvailableException::new),
REPLICA_NOT_AVAILABLE(9, "The replica is not available for the requested topic-partition. Produce/Fetch requests and other requests " +
"intended only for the leader or follower return NOT_LEADER_OR_FOLLOWER if the broker is not a replica of the topic-partition.",
ReplicaNotAvailableException::new),
MESSAGE_TOO_LARGE(10, "The request included a message larger than the max message size the server will accept.",
RecordTooLargeException::new),
STALE_CONTROLLER_EPOCH(11, "The controller moved to another broker.",
ControllerMovedException::new),
OFFSET_METADATA_TOO_LARGE(12, "The metadata field of the offset request was too large.",
OffsetMetadataTooLarge::new),
NETWORK_EXCEPTION(13, "The server disconnected before a response was received.",
NetworkException::new),
COORDINATOR_LOAD_IN_PROGRESS(14, "The coordinator is loading and hence can't process requests.",
CoordinatorLoadInProgressException::new),
COORDINATOR_NOT_AVAILABLE(15, "The coordinator is not available.",
CoordinatorNotAvailableException::new),
NOT_COORDINATOR(16, "This is not the correct coordinator.",
NotCoordinatorException::new),
INVALID_TOPIC_EXCEPTION(17, "The request attempted to perform an operation on an invalid topic.",
InvalidTopicException::new),
RECORD_LIST_TOO_LARGE(18, "The request included message batch larger than the configured segment size on the server.",
RecordBatchTooLargeException::new),
NOT_ENOUGH_REPLICAS(19, "Messages are rejected since there are fewer in-sync replicas than required.",
NotEnoughReplicasException::new),
NOT_ENOUGH_REPLICAS_AFTER_APPEND(20, "Messages are written to the log, but to fewer in-sync replicas than required.",
NotEnoughReplicasAfterAppendException::new),
INVALID_REQUIRED_ACKS(21, "Produce request specified an invalid value for required acks.",
InvalidRequiredAcksException::new),
ILLEGAL_GENERATION(22, "Specified group generation id is not valid.",
IllegalGenerationException::new),
INCONSISTENT_GROUP_PROTOCOL(23,
"The group member's supported protocols are incompatible with those of existing members " +
"or first group member tried to join with empty protocol type or empty protocol list.",
InconsistentGroupProtocolException::new),
INVALID_GROUP_ID(24, "The group id is invalid.",
InvalidGroupIdException::new),
UNKNOWN_MEMBER_ID(25, "The coordinator is not aware of this member.",
UnknownMemberIdException::new),
INVALID_SESSION_TIMEOUT(26,
"The session timeout is not within the range allowed by the broker " +
"(as configured by group.min.session.timeout.ms and group.max.session.timeout.ms).",
InvalidSessionTimeoutException::new),
REBALANCE_IN_PROGRESS(27, "The group is rebalancing, so a rejoin is needed.",
RebalanceInProgressException::new),
INVALID_COMMIT_OFFSET_SIZE(28, "The committing offset data size is not valid.",
InvalidCommitOffsetSizeException::new),
TOPIC_AUTHORIZATION_FAILED(29, "Topic authorization failed.", TopicAuthorizationException::new),
GROUP_AUTHORIZATION_FAILED(30, "Group authorization failed.", GroupAuthorizationException::new),
CLUSTER_AUTHORIZATION_FAILED(31, "Cluster authorization failed.",
ClusterAuthorizationException::new),
INVALID_TIMESTAMP(32, "The timestamp of the message is out of acceptable range.",
InvalidTimestampException::new),
UNSUPPORTED_SASL_MECHANISM(33, "The broker does not support the requested SASL mechanism.",
UnsupportedSaslMechanismException::new),
ILLEGAL_SASL_STATE(34, "Request is not valid given the current SASL state.",
IllegalSaslStateException::new),
UNSUPPORTED_VERSION(35, "The version of API is not supported.",
UnsupportedVersionException::new),
TOPIC_ALREADY_EXISTS(36, "Topic with this name already exists.",
TopicExistsException::new),
INVALID_PARTITIONS(37, "Number of partitions is below 1.",
InvalidPartitionsException::new),
INVALID_REPLICATION_FACTOR(38, "Replication factor is below 1 or larger than the number of available brokers.",
InvalidReplicationFactorException::new),
INVALID_REPLICA_ASSIGNMENT(39, "Replica assignment is invalid.",
InvalidReplicaAssignmentException::new),
INVALID_CONFIG(40, "Configuration is invalid.",
InvalidConfigurationException::new),
NOT_CONTROLLER(41, "This is not the correct controller for this cluster.",
NotControllerException::new),
INVALID_REQUEST(42, "This most likely occurs because of a request being malformed by the " +
"client library or the message was sent to an incompatible broker. See the broker logs " +
"for more details.",
InvalidRequestException::new),
UNSUPPORTED_FOR_MESSAGE_FORMAT(43, "The message format version on the broker does not support the request.",
UnsupportedForMessageFormatException::new),
POLICY_VIOLATION(44, "Request parameters do not satisfy the configured policy.",
PolicyViolationException::new),
OUT_OF_ORDER_SEQUENCE_NUMBER(45, "The broker received an out of order sequence number.",
OutOfOrderSequenceException::new),
DUPLICATE_SEQUENCE_NUMBER(46, "The broker received a duplicate sequence number.",
DuplicateSequenceException::new),
INVALID_PRODUCER_EPOCH(47, "Producer attempted to produce with an old epoch.",
InvalidProducerEpochException::new),
INVALID_TXN_STATE(48, "The producer attempted a transactional operation in an invalid state.",
InvalidTxnStateException::new),
INVALID_PRODUCER_ID_MAPPING(49, "The producer attempted to use a producer id which is not currently assigned to " +
"its transactional id.",
InvalidPidMappingException::new),
INVALID_TRANSACTION_TIMEOUT(50, "The transaction timeout is larger than the maximum value allowed by " +
"the broker (as configured by transaction.max.timeout.ms).",
InvalidTxnTimeoutException::new),
CONCURRENT_TRANSACTIONS(51, "The producer attempted to update a transaction " +
"while another concurrent operation on the same transaction was ongoing.",
ConcurrentTransactionsException::new),
TRANSACTION_COORDINATOR_FENCED(52, "Indicates that the transaction coordinator sending a WriteTxnMarker " +
"is no longer the current coordinator for a given producer.",
TransactionCoordinatorFencedException::new),
TRANSACTIONAL_ID_AUTHORIZATION_FAILED(53, "Transactional Id authorization failed.",
TransactionalIdAuthorizationException::new),
SECURITY_DISABLED(54, "Security features are disabled.",
SecurityDisabledException::new),
OPERATION_NOT_ATTEMPTED(55, "The broker did not attempt to execute this operation. This may happen for " +
"batched RPCs where some operations in the batch failed, causing the broker to respond without " +
"trying the rest.",
OperationNotAttemptedException::new),
KAFKA_STORAGE_ERROR(56, "Disk error when trying to access log file on the disk.",
KafkaStorageException::new),
LOG_DIR_NOT_FOUND(57, "The user-specified log directory is not found in the broker config.",
LogDirNotFoundException::new),
SASL_AUTHENTICATION_FAILED(58, "SASL Authentication failed.",
SaslAuthenticationException::new),
UNKNOWN_PRODUCER_ID(59, "This exception is raised by the broker if it could not locate the producer metadata " +
"associated with the producerId in question. This could happen if, for instance, the producer's records " +
"were deleted because their retention time had elapsed. Once the last records of the producerId are " +
"removed, the producer's metadata is removed from the broker, and future appends by the producer will " +
"return this exception.",
UnknownProducerIdException::new),
REASSIGNMENT_IN_PROGRESS(60, "A partition reassignment is in progress.",
ReassignmentInProgressException::new),
DELEGATION_TOKEN_AUTH_DISABLED(61, "Delegation Token feature is not enabled.",
DelegationTokenDisabledException::new),
DELEGATION_TOKEN_NOT_FOUND(62, "Delegation Token is not found on server.",
DelegationTokenNotFoundException::new),
DELEGATION_TOKEN_OWNER_MISMATCH(63, "Specified Principal is not valid Owner/Renewer.",
DelegationTokenOwnerMismatchException::new),
DELEGATION_TOKEN_REQUEST_NOT_ALLOWED(64, "Delegation Token requests are not allowed on PLAINTEXT/1-way SSL " +
"channels and on delegation token authenticated channels.",
UnsupportedByAuthenticationException::new),
DELEGATION_TOKEN_AUTHORIZATION_FAILED(65, "Delegation Token authorization failed.",
DelegationTokenAuthorizationException::new),
DELEGATION_TOKEN_EXPIRED(66, "Delegation Token is expired.",
DelegationTokenExpiredException::new),
INVALID_PRINCIPAL_TYPE(67, "Supplied principalType is not supported.",
InvalidPrincipalTypeException::new),
NON_EMPTY_GROUP(68, "The group is not empty.",
GroupNotEmptyException::new),
GROUP_ID_NOT_FOUND(69, "The group id does not exist.",
GroupIdNotFoundException::new),
FETCH_SESSION_ID_NOT_FOUND(70, "The fetch session ID was not found.",
FetchSessionIdNotFoundException::new),
INVALID_FETCH_SESSION_EPOCH(71, "The fetch session epoch is invalid.",
InvalidFetchSessionEpochException::new),
LISTENER_NOT_FOUND(72, "There is no listener on the leader broker that matches the listener on which " +
"metadata request was processed.",
ListenerNotFoundException::new),
TOPIC_DELETION_DISABLED(73, "Topic deletion is disabled.",
TopicDeletionDisabledException::new),
FENCED_LEADER_EPOCH(74, "The leader epoch in the request is older than the epoch on the broker.",
FencedLeaderEpochException::new),
UNKNOWN_LEADER_EPOCH(75, "The leader epoch in the request is newer than the epoch on the broker.",
UnknownLeaderEpochException::new),
UNSUPPORTED_COMPRESSION_TYPE(76, "The requesting client does not support the compression type of given partition.",
UnsupportedCompressionTypeException::new),
STALE_BROKER_EPOCH(77, "Broker epoch has changed.",
StaleBrokerEpochException::new),
OFFSET_NOT_AVAILABLE(78, "The leader high watermark has not caught up from a recent leader " +
"election so the offsets cannot be guaranteed to be monotonically increasing.",
OffsetNotAvailableException::new),
MEMBER_ID_REQUIRED(79, "The group member needs to have a valid member id before actually entering a consumer group.",
MemberIdRequiredException::new),
PREFERRED_LEADER_NOT_AVAILABLE(80, "The preferred leader was not available.",
PreferredLeaderNotAvailableException::new),
GROUP_MAX_SIZE_REACHED(81, "The group has reached its maximum size.", GroupMaxSizeReachedException::new),
FENCED_INSTANCE_ID(82, "The broker rejected this static consumer since " +
"another consumer with the same group.instance.id has registered with a different member.id.",
FencedInstanceIdException::new),
ELIGIBLE_LEADERS_NOT_AVAILABLE(83, "Eligible topic partition leaders are not available.",
EligibleLeadersNotAvailableException::new),
ELECTION_NOT_NEEDED(84, "Leader election not needed for topic partition.", ElectionNotNeededException::new),
NO_REASSIGNMENT_IN_PROGRESS(85, "No partition reassignment is in progress.",
NoReassignmentInProgressException::new),
GROUP_SUBSCRIBED_TO_TOPIC(86, "Deleting offsets of a topic is forbidden while the consumer group is actively subscribed to it.",
GroupSubscribedToTopicException::new),
INVALID_RECORD(87, "This record has failed the validation on broker and hence will be rejected.", InvalidRecordException::new),
UNSTABLE_OFFSET_COMMIT(88, "There are unstable offsets that need to be cleared.", UnstableOffsetCommitException::new),
THROTTLING_QUOTA_EXCEEDED(89, "The throttling quota has been exceeded.", ThrottlingQuotaExceededException::new),
PRODUCER_FENCED(90, "There is a newer producer with the same transactionalId " +
"which fences the current one.", ProducerFencedException::new),
RESOURCE_NOT_FOUND(91, "A request illegally referred to a resource that does not exist.", ResourceNotFoundException::new),
DUPLICATE_RESOURCE(92, "A request illegally referred to the same resource twice.", DuplicateResourceException::new),
UNACCEPTABLE_CREDENTIAL(93, "Requested credential would not meet criteria for acceptability.", UnacceptableCredentialException::new),
INCONSISTENT_VOTER_SET(94, "Indicates that the either the sender or recipient of a " +
"voter-only request is not one of the expected voters.", InconsistentVoterSetException::new),
INVALID_UPDATE_VERSION(95, "The given update version was invalid.", InvalidUpdateVersionException::new),
FEATURE_UPDATE_FAILED(96, "Unable to update finalized features due to an unexpected server error.", FeatureUpdateFailedException::new),
PRINCIPAL_DESERIALIZATION_FAILURE(97, "Request principal deserialization failed during forwarding. " +
"This indicates an internal error on the broker cluster security setup.", PrincipalDeserializationException::new),
SNAPSHOT_NOT_FOUND(98, "Requested snapshot was not found.", SnapshotNotFoundException::new),
POSITION_OUT_OF_RANGE(99, "Requested position is not greater than or equal to zero, and less than the size of the snapshot.", PositionOutOfRangeException::new),
UNKNOWN_TOPIC_ID(100, "This server does not host this topic ID.", UnknownTopicIdException::new),
DUPLICATE_BROKER_REGISTRATION(101, "This broker ID is already in use.", DuplicateBrokerRegistrationException::new),
BROKER_ID_NOT_REGISTERED(102, "The given broker ID was not registered.", BrokerIdNotRegisteredException::new),
INCONSISTENT_TOPIC_ID(103, "The log's topic ID did not match the topic ID in the request.", InconsistentTopicIdException::new),
INCONSISTENT_CLUSTER_ID(104, "The clusterId in the request does not match that found on the server.", InconsistentClusterIdException::new),
TRANSACTIONAL_ID_NOT_FOUND(105, "The transactionalId could not be found.", TransactionalIdNotFoundException::new),
FETCH_SESSION_TOPIC_ID_ERROR(106, "The fetch session encountered inconsistent topic ID usage.", FetchSessionTopicIdException::new),
INELIGIBLE_REPLICA(107, "The new ISR contains at least one ineligible replica.", IneligibleReplicaException::new),
NEW_LEADER_ELECTED(108, "The AlterPartition request successfully updated the partition state but the leader has changed.", NewLeaderElectedException::new),
OFFSET_MOVED_TO_TIERED_STORAGE(109, "The requested offset is moved to tiered storage.", OffsetMovedToTieredStorageException::new),
FENCED_MEMBER_EPOCH(110, "The member epoch is fenced by the group coordinator. The member must abandon all its partitions and rejoin.", FencedMemberEpochException::new),
UNRELEASED_INSTANCE_ID(111, "The instance ID is still used by another member in the consumer group. That member must leave first.", UnreleasedInstanceIdException::new),
UNSUPPORTED_ASSIGNOR(112, "The assignor or its version range is not supported by the consumer group.", UnsupportedAssignorException::new),
STALE_MEMBER_EPOCH(113, "The member epoch is stale. The member must retry after receiving its updated member epoch via the ConsumerGroupHeartbeat API.", StaleMemberEpochException::new),
MISMATCHED_ENDPOINT_TYPE(114, "The request was sent to an endpoint of the wrong type.", MismatchedEndpointTypeException::new),
UNSUPPORTED_ENDPOINT_TYPE(115, "This endpoint type is not supported yet.", UnsupportedEndpointTypeException::new),
UNKNOWN_CONTROLLER_ID(116, "This controller ID is not known.", UnknownControllerIdException::new),
UNKNOWN_SUBSCRIPTION_ID(117, "Client sent a push telemetry request with an invalid or outdated subscription ID.", UnknownSubscriptionIdException::new),
TELEMETRY_TOO_LARGE(118, "Client sent a push telemetry request larger than the maximum size the broker will accept.", TelemetryTooLargeException::new),
INVALID_REGISTRATION(119, "The controller has considered the broker registration to be invalid.", InvalidRegistrationException::new),
TRANSACTION_ABORTABLE(120, "The server encountered an error with the transaction. The client can abort the transaction to continue using this transactional ID.", TransactionAbortableException::new),
INVALID_RECORD_STATE(121, "The record state is invalid. The acknowledgement of delivery could not be completed.", InvalidRecordStateException::new),
SHARE_SESSION_NOT_FOUND(122, "The share session was not found.", ShareSessionNotFoundException::new),
INVALID_SHARE_SESSION_EPOCH(123, "The share session epoch is invalid.", InvalidShareSessionEpochException::new),
FENCED_STATE_EPOCH(124, "The share coordinator rejected the request because the share-group state epoch did not match.", FencedStateEpochException::new),
INVALID_VOTER_KEY(125, "The voter key doesn't match the receiving replica's key.", InvalidVoterKeyException::new),
DUPLICATE_VOTER(126, "The voter is already part of the set of voters.", DuplicateVoterException::new),
VOTER_NOT_FOUND(127, "The voter is not part of the set of voters.", VoterNotFoundException::new),
INVALID_REGULAR_EXPRESSION(128, "The regular expression is not valid.", InvalidRegularExpression::new),
REBOOTSTRAP_REQUIRED(129, "Client metadata is stale. The client should rebootstrap to obtain new metadata.", RebootstrapRequiredException::new),
STREAMS_INVALID_TOPOLOGY(130, "The supplied topology is invalid.", StreamsInvalidTopologyException::new),
STREAMS_INVALID_TOPOLOGY_EPOCH(131, "The supplied topology epoch is invalid.", StreamsInvalidTopologyEpochException::new),
STREAMS_TOPOLOGY_FENCED(132, "The supplied topology epoch is outdated.", StreamsTopologyFencedException::new),
SHARE_SESSION_LIMIT_REACHED(133, "The limit of share sessions has been reached.", ShareSessionLimitReachedException::new);
private static final Logger log = LoggerFactory.getLogger(Errors.class);
private static final Map<Class<?>, Errors> CLASS_TO_ERROR = new HashMap<>();
private static final Map<Short, Errors> CODE_TO_ERROR = new HashMap<>();
static {
for (Errors error : Errors.values()) {
if (CODE_TO_ERROR.put(error.code(), error) != null)
throw new ExceptionInInitializerError("Code " + error.code() + " for error " +
error + " has already been used");
if (error.exception != null)
CLASS_TO_ERROR.put(error.exception.getClass(), error);
}
}
private final short code;
private final Function<String, ApiException> builder;
private final ApiException exception;
Errors(int code, String defaultExceptionString, Function<String, ApiException> builder) {
this.code = (short) code;
this.builder = builder;
this.exception = builder.apply(defaultExceptionString);
}
/**
* An instance of the exception
*/
public ApiException exception() {
return this.exception;
}
/**
* Create an instance of the ApiException that contains the given error message.
*
* @param message The message string to set.
* @return The exception.
*/
public ApiException exception(String message) {
if (message == null) {
// If no error message was specified, return an exception with the default error message.
return exception;
}
// Return an exception with the given error message.
return builder.apply(message);
}
/**
* Returns the | Errors |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/ActionUtils.java | {
"start": 618,
"end": 1794
} | class ____ {
public static ActionListener<InferenceServiceResults> wrapFailuresInElasticsearchException(
String errorMessage,
ActionListener<InferenceServiceResults> listener
) {
return listener.delegateResponse((l, e) -> {
var unwrappedException = ExceptionsHelper.unwrapCause(e);
if (unwrappedException instanceof ElasticsearchException esException) {
l.onFailure(esException);
} else {
l.onFailure(
// Determine the appropriate RestStatus from the unwrapped exception, then wrap in an ElasticsearchStatusException
new ElasticsearchStatusException(
Strings.format("%s. Cause: %s", errorMessage, unwrappedException.getMessage()),
ExceptionsHelper.status(unwrappedException),
unwrappedException
)
);
}
});
}
public static String constructFailedToSendRequestMessage(String message) {
return Strings.format("Failed to send %s request", message);
}
private ActionUtils() {}
}
| ActionUtils |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/enums/EnumMapDeserializationTest.java | {
"start": 647,
"end": 761
} | enum ____ {
JACKSON, RULES,
@JsonEnumDefaultValue
OK;
}
protected | TestEnumWithDefault |
java | apache__camel | components/camel-file/src/main/java/org/apache/camel/component/file/GenericFileProcessStrategyFactory.java | {
"start": 978,
"end": 1149
} | interface ____<T> {
GenericFileProcessStrategy<T> createGenericFileProcessStrategy(CamelContext context, Map<String, Object> params);
}
| GenericFileProcessStrategyFactory |
java | alibaba__nacos | core/src/main/java/com/alibaba/nacos/core/distributed/ProtocolManager.java | {
"start": 1804,
"end": 6385
} | class ____ extends MemberChangeListener implements DisposableBean {
private CPProtocol cpProtocol;
private APProtocol apProtocol;
private final ServerMemberManager memberManager;
private volatile boolean apInit = false;
private volatile boolean cpInit = false;
private final Object cpLock = new Object();
private final Object apLock = new Object();
private Set<Member> oldMembers;
public ProtocolManager(ServerMemberManager memberManager) {
this.memberManager = memberManager;
NotifyCenter.registerSubscriber(this);
}
public static Set<String> toAPMembersInfo(Collection<Member> members) {
Set<String> nodes = new HashSet<>();
members.forEach(member -> nodes.add(member.getAddress()));
return nodes;
}
public static Set<String> toCPMembersInfo(Collection<Member> members) {
Set<String> nodes = new HashSet<>();
members.forEach(member -> {
final String ip = member.getIp();
final int raftPort = MemberUtil.calculateRaftPort(member);
nodes.add(ip + ":" + raftPort);
});
return nodes;
}
public CPProtocol getCpProtocol() {
if (!cpInit){
synchronized (cpLock) {
if (!cpInit) {
initCPProtocol();
cpInit = true;
}
}
}
return cpProtocol;
}
public APProtocol getApProtocol() {
if (!apInit) {
synchronized (apLock) {
if (!apInit) {
initAPProtocol();
apInit = true;
}
}
}
return apProtocol;
}
public boolean isCpInit() {
return cpInit;
}
public boolean isApInit() {
return apInit;
}
@PreDestroy
@Override
public void destroy() {
if (Objects.nonNull(apProtocol)) {
apProtocol.shutdown();
}
if (Objects.nonNull(cpProtocol)) {
cpProtocol.shutdown();
}
}
private void initAPProtocol() {
ApplicationUtils.getBeanIfExist(APProtocol.class, protocol -> {
Class configType = ClassUtils.resolveGenericType(protocol.getClass());
Config config = (Config) ApplicationUtils.getBean(configType);
injectMembers4AP(config);
protocol.init(config);
ProtocolManager.this.apProtocol = protocol;
});
}
private void initCPProtocol() {
ApplicationUtils.getBeanIfExist(CPProtocol.class, protocol -> {
Class configType = ClassUtils.resolveGenericType(protocol.getClass());
Config config = (Config) ApplicationUtils.getBean(configType);
injectMembers4CP(config);
protocol.init(config);
ProtocolManager.this.cpProtocol = protocol;
});
}
private void injectMembers4CP(Config config) {
final Member selfMember = memberManager.getSelf();
final String self = selfMember.getIp() + ":" + Integer
.parseInt(String.valueOf(selfMember.getExtendVal(MemberMetaDataConstants.RAFT_PORT)));
Set<String> others = toCPMembersInfo(memberManager.allMembers());
config.setMembers(self, others);
}
private void injectMembers4AP(Config config) {
final String self = memberManager.getSelf().getAddress();
Set<String> others = toAPMembersInfo(memberManager.allMembers());
config.setMembers(self, others);
}
@Override
public void onEvent(MembersChangeEvent event) {
// Here, the sequence of node change events is very important. For example,
// node change event A occurs at time T1, and node change event B occurs at
// time T2 after a period of time.
// (T1 < T2)
// Node change events between different protocols should not block each other.
// and we use a single thread pool to inform the consistency layer of node changes,
// to avoid multiple tasks simultaneously carrying out the consistency layer of
// node changes operation
if (Objects.nonNull(apProtocol)) {
ProtocolExecutor.apMemberChange(() -> apProtocol.memberChange(toAPMembersInfo(event.getMembers())));
}
if (Objects.nonNull(cpProtocol)) {
ProtocolExecutor.cpMemberChange(() -> cpProtocol.memberChange(toCPMembersInfo(event.getMembers())));
}
}
}
| ProtocolManager |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/engine/jdbc/proxy/WrappedBlob.java | {
"start": 242,
"end": 401
} | interface ____ {
/**
* Retrieve the wrapped {@link Blob} reference
*
* @return The wrapped {@link Blob} reference
*/
Blob getWrappedBlob();
}
| WrappedBlob |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/engine/internal/StatefulPersistenceContext.java | {
"start": 74506,
"end": 75211
} | enum ____ {
UNINITIALIZED,
ENHANCED_PROXY,
INITIALIZED,
DETACHED
}
// NATURAL ID RESOLUTION HANDLING ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
private NaturalIdResolutionsImpl naturalIdResolutions;
@Override
public NaturalIdResolutions getNaturalIdResolutions() {
if ( naturalIdResolutions == null ) {
naturalIdResolutions = new NaturalIdResolutionsImpl( this );
}
return naturalIdResolutions;
}
@Override
public EntityHolder detachEntity(EntityKey key) {
final var entityHolder = removeHolder( key );
if ( entityHolder != null ) {
entityHolder.state = EntityHolderState.DETACHED;
}
return entityHolder;
}
}
| EntityHolderState |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/CompareUtils.java | {
"start": 2468,
"end": 2861
} | class ____ implements
RawComparator<Object>, Serializable {
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
return WritableComparator.compareBytes(b1, s1, l1, b2, s2, l2);
}
@Override
public int compare(Object o1, Object o2) {
throw new RuntimeException("Object comparison not supported");
}
}
}
| MemcmpRawComparator |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/beanparam/FirstAndSecondResource.java | {
"start": 180,
"end": 443
} | class ____ {
@Path("{first}/{second}")
@GET
public String firstAndSecond(Param param) {
return param.first() + "-" + param.second();
}
public record Param(@RestPath String first, @RestPath String second) {
}
}
| FirstAndSecondResource |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/criteria/EmptyPredicateTest.java | {
"start": 920,
"end": 1865
} | class ____ {
@BeforeAll
public void setUp(SessionFactoryScope scope) {
scope.inTransaction( session -> {
BasicEntity entity = new BasicEntity( 1, "test" );
session.persist( entity );
} );
}
@AfterAll
public void tearDown(SessionFactoryScope scope) {
scope.inTransaction( session -> session.createMutationQuery( "delete from BasicEntity" ).executeUpdate() );
}
@Test
public void testEmptyPredicateArray(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final CriteriaBuilder cb = session.getCriteriaBuilder();
final CriteriaQuery<BasicEntity> query = cb.createQuery( BasicEntity.class );
final Root<BasicEntity> root = query.from( BasicEntity.class );
query.select( root ).where( cb.equal( cb.literal( 1 ), 2 ) );
query.where( new Predicate[] {} ); // this should remove previous restrictions
assertEquals( 1, session.createQuery( query ).getResultList().size() );
} );
}
}
| EmptyPredicateTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/joincolumn/StringToCharArrayJoinColumnTest.java | {
"start": 2623,
"end": 3163
} | class ____ {
@Id
@Column(name = "string_col")
private String id;
@ManyToOne(fetch = FetchType.EAGER)
@JoinColumn(name = "string_col", referencedColumnName = "char_array_col", insertable = false, updatable = false)
private Vehicle vehicle;
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public Vehicle getVehicle() {
return vehicle;
}
public void setVehicle(Vehicle vehicle) {
this.vehicle = vehicle;
}
}
@Entity(name = "Vehicle")
public static | VehicleInvoice |
java | apache__commons-lang | src/test/java/org/apache/commons/lang3/ClassUtilsTest.java | {
"start": 39463,
"end": 91654
} | class ____ {
// empty
}
assertEquals("", ClassUtils.getSimpleName(new Object() {
// empty
}.getClass()));
assertEquals("Named", ClassUtils.getSimpleName(Named.class));
}
@Test
void test_getSimpleName_Object() {
assertEquals("ClassUtils", ClassUtils.getSimpleName(new ClassUtils()));
assertEquals("Inner", ClassUtils.getSimpleName(new Inner()));
assertEquals("String", ClassUtils.getSimpleName("hello"));
assertEquals(StringUtils.EMPTY, ClassUtils.getSimpleName(null));
assertEquals(StringUtils.EMPTY, ClassUtils.getSimpleName(null));
}
@Test
void test_getSimpleName_Object_String() {
assertEquals("ClassUtils", ClassUtils.getSimpleName(new ClassUtils(), "<null>"));
assertEquals("Inner", ClassUtils.getSimpleName(new Inner(), "<null>"));
assertEquals("String", ClassUtils.getSimpleName("hello", "<null>"));
assertEquals("<null>", ClassUtils.getSimpleName(null, "<null>"));
assertNull(ClassUtils.getSimpleName(null, null));
}
@Test
void test_isAssignable() {
assertFalse(ClassUtils.isAssignable((Class<?>) null, null));
assertFalse(ClassUtils.isAssignable(String.class, null));
assertTrue(ClassUtils.isAssignable(null, Object.class));
assertTrue(ClassUtils.isAssignable(null, Integer.class));
assertFalse(ClassUtils.isAssignable(null, Integer.TYPE));
assertTrue(ClassUtils.isAssignable(String.class, Object.class));
assertTrue(ClassUtils.isAssignable(String.class, String.class));
assertFalse(ClassUtils.isAssignable(Object.class, String.class));
assertTrue(ClassUtils.isAssignable(Integer.TYPE, Integer.class));
assertTrue(ClassUtils.isAssignable(Integer.TYPE, Object.class));
assertTrue(ClassUtils.isAssignable(Integer.class, Integer.TYPE));
assertTrue(ClassUtils.isAssignable(Integer.class, Object.class));
assertTrue(ClassUtils.isAssignable(Integer.TYPE, Integer.TYPE));
assertTrue(ClassUtils.isAssignable(Integer.class, Integer.class));
assertTrue(ClassUtils.isAssignable(Boolean.TYPE, Boolean.class));
assertTrue(ClassUtils.isAssignable(Boolean.TYPE, Object.class));
assertTrue(ClassUtils.isAssignable(Boolean.class, Boolean.TYPE));
assertTrue(ClassUtils.isAssignable(Boolean.class, Object.class));
assertTrue(ClassUtils.isAssignable(Boolean.TYPE, Boolean.TYPE));
assertTrue(ClassUtils.isAssignable(Boolean.class, Boolean.class));
}
@Test
void test_isAssignable_Autoboxing() {
assertFalse(ClassUtils.isAssignable((Class<?>) null, null, true));
assertFalse(ClassUtils.isAssignable(String.class, null, true));
assertTrue(ClassUtils.isAssignable(null, Object.class, true));
assertTrue(ClassUtils.isAssignable(null, Integer.class, true));
assertFalse(ClassUtils.isAssignable(null, Integer.TYPE, true));
assertTrue(ClassUtils.isAssignable(String.class, Object.class, true));
assertTrue(ClassUtils.isAssignable(String.class, String.class, true));
assertFalse(ClassUtils.isAssignable(Object.class, String.class, true));
assertTrue(ClassUtils.isAssignable(Integer.TYPE, Integer.class, true));
assertTrue(ClassUtils.isAssignable(Integer.TYPE, Object.class, true));
assertTrue(ClassUtils.isAssignable(Integer.class, Integer.TYPE, true));
assertTrue(ClassUtils.isAssignable(Integer.class, Object.class, true));
assertTrue(ClassUtils.isAssignable(Integer.TYPE, Integer.TYPE, true));
assertTrue(ClassUtils.isAssignable(Integer.class, Integer.class, true));
assertTrue(ClassUtils.isAssignable(Boolean.TYPE, Boolean.class, true));
assertTrue(ClassUtils.isAssignable(Boolean.class, Boolean.TYPE, true));
assertTrue(ClassUtils.isAssignable(Boolean.class, Object.class, true));
assertTrue(ClassUtils.isAssignable(Boolean.TYPE, Boolean.TYPE, true));
assertTrue(ClassUtils.isAssignable(Boolean.class, Boolean.class, true));
}
@Test
void test_isAssignable_ClassArray_ClassArray() {
final Class<?>[] array2 = new Class[] {Object.class, Object.class};
final Class<?>[] array1 = new Class[] {Object.class};
final Class<?>[] array1s = new Class[] {String.class};
final Class<?>[] array0 = new Class[] {};
final Class<?>[] arrayPrimitives = {Integer.TYPE, Boolean.TYPE};
final Class<?>[] arrayWrappers = {Integer.class, Boolean.class};
assertFalse(ClassUtils.isAssignable(array1, array2));
assertFalse(ClassUtils.isAssignable(null, array2));
assertTrue(ClassUtils.isAssignable(null, array0));
assertTrue(ClassUtils.isAssignable(array0, array0));
assertTrue(ClassUtils.isAssignable(array0, (Class<?>[]) null)); // explicit cast to avoid warning
assertTrue(ClassUtils.isAssignable(null, (Class<?>[]) null)); // explicit cast to avoid warning
assertFalse(ClassUtils.isAssignable(array1, array1s));
assertTrue(ClassUtils.isAssignable(array1s, array1s));
assertTrue(ClassUtils.isAssignable(array1s, array1));
assertTrue(ClassUtils.isAssignable(arrayPrimitives, arrayWrappers));
assertTrue(ClassUtils.isAssignable(arrayWrappers, arrayPrimitives));
assertFalse(ClassUtils.isAssignable(arrayPrimitives, array1));
assertFalse(ClassUtils.isAssignable(arrayWrappers, array1));
assertTrue(ClassUtils.isAssignable(arrayPrimitives, array2));
assertTrue(ClassUtils.isAssignable(arrayWrappers, array2));
}
@Test
void test_isAssignable_ClassArray_ClassArray_Autoboxing() {
final Class<?>[] array2 = new Class[] {Object.class, Object.class};
final Class<?>[] array1 = new Class[] {Object.class};
final Class<?>[] array1s = new Class[] {String.class};
final Class<?>[] array0 = new Class[] {};
final Class<?>[] arrayPrimitives = {Integer.TYPE, Boolean.TYPE};
final Class<?>[] arrayWrappers = {Integer.class, Boolean.class};
assertFalse(ClassUtils.isAssignable(array1, array2, true));
assertFalse(ClassUtils.isAssignable(null, array2, true));
assertTrue(ClassUtils.isAssignable(null, array0, true));
assertTrue(ClassUtils.isAssignable(array0, array0, true));
assertTrue(ClassUtils.isAssignable(array0, null, true));
assertTrue(ClassUtils.isAssignable((Class[]) null, null, true));
assertFalse(ClassUtils.isAssignable(array1, array1s, true));
assertTrue(ClassUtils.isAssignable(array1s, array1s, true));
assertTrue(ClassUtils.isAssignable(array1s, array1, true));
assertTrue(ClassUtils.isAssignable(arrayPrimitives, arrayWrappers, true));
assertTrue(ClassUtils.isAssignable(arrayWrappers, arrayPrimitives, true));
assertFalse(ClassUtils.isAssignable(arrayPrimitives, array1, true));
assertFalse(ClassUtils.isAssignable(arrayWrappers, array1, true));
assertTrue(ClassUtils.isAssignable(arrayPrimitives, array2, true));
assertTrue(ClassUtils.isAssignable(arrayWrappers, array2, true));
}
@Test
void test_isAssignable_ClassArray_ClassArray_NoAutoboxing() {
final Class<?>[] array2 = new Class[] {Object.class, Object.class};
final Class<?>[] array1 = new Class[] {Object.class};
final Class<?>[] array1s = new Class[] {String.class};
final Class<?>[] array0 = new Class[] {};
final Class<?>[] arrayPrimitives = {Integer.TYPE, Boolean.TYPE};
final Class<?>[] arrayWrappers = {Integer.class, Boolean.class};
assertFalse(ClassUtils.isAssignable(array1, array2, false));
assertFalse(ClassUtils.isAssignable(null, array2, false));
assertTrue(ClassUtils.isAssignable(null, array0, false));
assertTrue(ClassUtils.isAssignable(array0, array0, false));
assertTrue(ClassUtils.isAssignable(array0, null, false));
assertTrue(ClassUtils.isAssignable((Class[]) null, null, false));
assertFalse(ClassUtils.isAssignable(array1, array1s, false));
assertTrue(ClassUtils.isAssignable(array1s, array1s, false));
assertTrue(ClassUtils.isAssignable(array1s, array1, false));
assertFalse(ClassUtils.isAssignable(arrayPrimitives, arrayWrappers, false));
assertFalse(ClassUtils.isAssignable(arrayWrappers, arrayPrimitives, false));
assertFalse(ClassUtils.isAssignable(arrayPrimitives, array1, false));
assertFalse(ClassUtils.isAssignable(arrayWrappers, array1, false));
assertTrue(ClassUtils.isAssignable(arrayWrappers, array2, false));
assertFalse(ClassUtils.isAssignable(arrayPrimitives, array2, false));
}
@Test
void test_isAssignable_DefaultUnboxing_Widening() {
// test byte conversions
assertFalse(ClassUtils.isAssignable(Byte.class, Character.TYPE), "byte -> char");
assertTrue(ClassUtils.isAssignable(Byte.class, Byte.TYPE), "byte -> byte");
assertTrue(ClassUtils.isAssignable(Byte.class, Short.TYPE), "byte -> short");
assertTrue(ClassUtils.isAssignable(Byte.class, Integer.TYPE), "byte -> int");
assertTrue(ClassUtils.isAssignable(Byte.class, Long.TYPE), "byte -> long");
assertTrue(ClassUtils.isAssignable(Byte.class, Float.TYPE), "byte -> float");
assertTrue(ClassUtils.isAssignable(Byte.class, Double.TYPE), "byte -> double");
assertFalse(ClassUtils.isAssignable(Byte.class, Boolean.TYPE), "byte -> boolean");
// test short conversions
assertFalse(ClassUtils.isAssignable(Short.class, Character.TYPE), "short -> char");
assertFalse(ClassUtils.isAssignable(Short.class, Byte.TYPE), "short -> byte");
assertTrue(ClassUtils.isAssignable(Short.class, Short.TYPE), "short -> short");
assertTrue(ClassUtils.isAssignable(Short.class, Integer.TYPE), "short -> int");
assertTrue(ClassUtils.isAssignable(Short.class, Long.TYPE), "short -> long");
assertTrue(ClassUtils.isAssignable(Short.class, Float.TYPE), "short -> float");
assertTrue(ClassUtils.isAssignable(Short.class, Double.TYPE), "short -> double");
assertFalse(ClassUtils.isAssignable(Short.class, Boolean.TYPE), "short -> boolean");
// test char conversions
assertTrue(ClassUtils.isAssignable(Character.class, Character.TYPE), "char -> char");
assertFalse(ClassUtils.isAssignable(Character.class, Byte.TYPE), "char -> byte");
assertFalse(ClassUtils.isAssignable(Character.class, Short.TYPE), "char -> short");
assertTrue(ClassUtils.isAssignable(Character.class, Integer.TYPE), "char -> int");
assertTrue(ClassUtils.isAssignable(Character.class, Long.TYPE), "char -> long");
assertTrue(ClassUtils.isAssignable(Character.class, Float.TYPE), "char -> float");
assertTrue(ClassUtils.isAssignable(Character.class, Double.TYPE), "char -> double");
assertFalse(ClassUtils.isAssignable(Character.class, Boolean.TYPE), "char -> boolean");
// test int conversions
assertFalse(ClassUtils.isAssignable(Integer.class, Character.TYPE), "int -> char");
assertFalse(ClassUtils.isAssignable(Integer.class, Byte.TYPE), "int -> byte");
assertFalse(ClassUtils.isAssignable(Integer.class, Short.TYPE), "int -> short");
assertTrue(ClassUtils.isAssignable(Integer.class, Integer.TYPE), "int -> int");
assertTrue(ClassUtils.isAssignable(Integer.class, Long.TYPE), "int -> long");
assertTrue(ClassUtils.isAssignable(Integer.class, Float.TYPE), "int -> float");
assertTrue(ClassUtils.isAssignable(Integer.class, Double.TYPE), "int -> double");
assertFalse(ClassUtils.isAssignable(Integer.class, Boolean.TYPE), "int -> boolean");
// test long conversions
assertFalse(ClassUtils.isAssignable(Long.class, Character.TYPE), "long -> char");
assertFalse(ClassUtils.isAssignable(Long.class, Byte.TYPE), "long -> byte");
assertFalse(ClassUtils.isAssignable(Long.class, Short.TYPE), "long -> short");
assertFalse(ClassUtils.isAssignable(Long.class, Integer.TYPE), "long -> int");
assertTrue(ClassUtils.isAssignable(Long.class, Long.TYPE), "long -> long");
assertTrue(ClassUtils.isAssignable(Long.class, Float.TYPE), "long -> float");
assertTrue(ClassUtils.isAssignable(Long.class, Double.TYPE), "long -> double");
assertFalse(ClassUtils.isAssignable(Long.class, Boolean.TYPE), "long -> boolean");
// test float conversions
assertFalse(ClassUtils.isAssignable(Float.class, Character.TYPE), "float -> char");
assertFalse(ClassUtils.isAssignable(Float.class, Byte.TYPE), "float -> byte");
assertFalse(ClassUtils.isAssignable(Float.class, Short.TYPE), "float -> short");
assertFalse(ClassUtils.isAssignable(Float.class, Integer.TYPE), "float -> int");
assertFalse(ClassUtils.isAssignable(Float.class, Long.TYPE), "float -> long");
assertTrue(ClassUtils.isAssignable(Float.class, Float.TYPE), "float -> float");
assertTrue(ClassUtils.isAssignable(Float.class, Double.TYPE), "float -> double");
assertFalse(ClassUtils.isAssignable(Float.class, Boolean.TYPE), "float -> boolean");
// test double conversions
assertFalse(ClassUtils.isAssignable(Double.class, Character.TYPE), "double -> char");
assertFalse(ClassUtils.isAssignable(Double.class, Byte.TYPE), "double -> byte");
assertFalse(ClassUtils.isAssignable(Double.class, Short.TYPE), "double -> short");
assertFalse(ClassUtils.isAssignable(Double.class, Integer.TYPE), "double -> int");
assertFalse(ClassUtils.isAssignable(Double.class, Long.TYPE), "double -> long");
assertFalse(ClassUtils.isAssignable(Double.class, Float.TYPE), "double -> float");
assertTrue(ClassUtils.isAssignable(Double.class, Double.TYPE), "double -> double");
assertFalse(ClassUtils.isAssignable(Double.class, Boolean.TYPE), "double -> boolean");
// test boolean conversions
assertFalse(ClassUtils.isAssignable(Boolean.class, Character.TYPE), "boolean -> char");
assertFalse(ClassUtils.isAssignable(Boolean.class, Byte.TYPE), "boolean -> byte");
assertFalse(ClassUtils.isAssignable(Boolean.class, Short.TYPE), "boolean -> short");
assertFalse(ClassUtils.isAssignable(Boolean.class, Integer.TYPE), "boolean -> int");
assertFalse(ClassUtils.isAssignable(Boolean.class, Long.TYPE), "boolean -> long");
assertFalse(ClassUtils.isAssignable(Boolean.class, Float.TYPE), "boolean -> float");
assertFalse(ClassUtils.isAssignable(Boolean.class, Double.TYPE), "boolean -> double");
assertTrue(ClassUtils.isAssignable(Boolean.class, Boolean.TYPE), "boolean -> boolean");
}
@Test
void test_isAssignable_NoAutoboxing() {
assertFalse(ClassUtils.isAssignable((Class<?>) null, null, false));
assertFalse(ClassUtils.isAssignable(String.class, null, false));
assertTrue(ClassUtils.isAssignable(null, Object.class, false));
assertTrue(ClassUtils.isAssignable(null, Integer.class, false));
assertFalse(ClassUtils.isAssignable(null, Integer.TYPE, false));
assertTrue(ClassUtils.isAssignable(String.class, Object.class, false));
assertTrue(ClassUtils.isAssignable(String.class, String.class, false));
assertFalse(ClassUtils.isAssignable(Object.class, String.class, false));
assertFalse(ClassUtils.isAssignable(Integer.TYPE, Integer.class, false));
assertFalse(ClassUtils.isAssignable(Integer.TYPE, Object.class, false));
assertFalse(ClassUtils.isAssignable(Integer.class, Integer.TYPE, false));
assertTrue(ClassUtils.isAssignable(Integer.TYPE, Integer.TYPE, false));
assertTrue(ClassUtils.isAssignable(Integer.class, Integer.class, false));
assertFalse(ClassUtils.isAssignable(Boolean.TYPE, Boolean.class, false));
assertFalse(ClassUtils.isAssignable(Boolean.TYPE, Object.class, false));
assertFalse(ClassUtils.isAssignable(Boolean.class, Boolean.TYPE, false));
assertTrue(ClassUtils.isAssignable(Boolean.class, Object.class, false));
assertTrue(ClassUtils.isAssignable(Boolean.TYPE, Boolean.TYPE, false));
assertTrue(ClassUtils.isAssignable(Boolean.class, Boolean.class, false));
}
@Test
void test_isAssignable_Unboxing_Widening() {
// test byte conversions
assertFalse(ClassUtils.isAssignable(Byte.class, Character.TYPE, true), "byte -> char");
assertTrue(ClassUtils.isAssignable(Byte.class, Byte.TYPE, true), "byte -> byte");
assertTrue(ClassUtils.isAssignable(Byte.class, Short.TYPE, true), "byte -> short");
assertTrue(ClassUtils.isAssignable(Byte.class, Integer.TYPE, true), "byte -> int");
assertTrue(ClassUtils.isAssignable(Byte.class, Long.TYPE, true), "byte -> long");
assertTrue(ClassUtils.isAssignable(Byte.class, Float.TYPE, true), "byte -> float");
assertTrue(ClassUtils.isAssignable(Byte.class, Double.TYPE, true), "byte -> double");
assertFalse(ClassUtils.isAssignable(Byte.class, Boolean.TYPE, true), "byte -> boolean");
// test short conversions
assertFalse(ClassUtils.isAssignable(Short.class, Character.TYPE, true), "short -> char");
assertFalse(ClassUtils.isAssignable(Short.class, Byte.TYPE, true), "short -> byte");
assertTrue(ClassUtils.isAssignable(Short.class, Short.TYPE, true), "short -> short");
assertTrue(ClassUtils.isAssignable(Short.class, Integer.TYPE, true), "short -> int");
assertTrue(ClassUtils.isAssignable(Short.class, Long.TYPE, true), "short -> long");
assertTrue(ClassUtils.isAssignable(Short.class, Float.TYPE, true), "short -> float");
assertTrue(ClassUtils.isAssignable(Short.class, Double.TYPE, true), "short -> double");
assertFalse(ClassUtils.isAssignable(Short.class, Boolean.TYPE, true), "short -> boolean");
// test char conversions
assertTrue(ClassUtils.isAssignable(Character.class, Character.TYPE, true), "char -> char");
assertFalse(ClassUtils.isAssignable(Character.class, Byte.TYPE, true), "char -> byte");
assertFalse(ClassUtils.isAssignable(Character.class, Short.TYPE, true), "char -> short");
assertTrue(ClassUtils.isAssignable(Character.class, Integer.TYPE, true), "char -> int");
assertTrue(ClassUtils.isAssignable(Character.class, Long.TYPE, true), "char -> long");
assertTrue(ClassUtils.isAssignable(Character.class, Float.TYPE, true), "char -> float");
assertTrue(ClassUtils.isAssignable(Character.class, Double.TYPE, true), "char -> double");
assertFalse(ClassUtils.isAssignable(Character.class, Boolean.TYPE, true), "char -> boolean");
// test int conversions
assertFalse(ClassUtils.isAssignable(Integer.class, Character.TYPE, true), "int -> char");
assertFalse(ClassUtils.isAssignable(Integer.class, Byte.TYPE, true), "int -> byte");
assertFalse(ClassUtils.isAssignable(Integer.class, Short.TYPE, true), "int -> short");
assertTrue(ClassUtils.isAssignable(Integer.class, Integer.TYPE, true), "int -> int");
assertTrue(ClassUtils.isAssignable(Integer.class, Long.TYPE, true), "int -> long");
assertTrue(ClassUtils.isAssignable(Integer.class, Float.TYPE, true), "int -> float");
assertTrue(ClassUtils.isAssignable(Integer.class, Double.TYPE, true), "int -> double");
assertFalse(ClassUtils.isAssignable(Integer.class, Boolean.TYPE, true), "int -> boolean");
// test long conversions
assertFalse(ClassUtils.isAssignable(Long.class, Character.TYPE, true), "long -> char");
assertFalse(ClassUtils.isAssignable(Long.class, Byte.TYPE, true), "long -> byte");
assertFalse(ClassUtils.isAssignable(Long.class, Short.TYPE, true), "long -> short");
assertFalse(ClassUtils.isAssignable(Long.class, Integer.TYPE, true), "long -> int");
assertTrue(ClassUtils.isAssignable(Long.class, Long.TYPE, true), "long -> long");
assertTrue(ClassUtils.isAssignable(Long.class, Float.TYPE, true), "long -> float");
assertTrue(ClassUtils.isAssignable(Long.class, Double.TYPE, true), "long -> double");
assertFalse(ClassUtils.isAssignable(Long.class, Boolean.TYPE, true), "long -> boolean");
// test float conversions
assertFalse(ClassUtils.isAssignable(Float.class, Character.TYPE, true), "float -> char");
assertFalse(ClassUtils.isAssignable(Float.class, Byte.TYPE, true), "float -> byte");
assertFalse(ClassUtils.isAssignable(Float.class, Short.TYPE, true), "float -> short");
assertFalse(ClassUtils.isAssignable(Float.class, Integer.TYPE, true), "float -> int");
assertFalse(ClassUtils.isAssignable(Float.class, Long.TYPE, true), "float -> long");
assertTrue(ClassUtils.isAssignable(Float.class, Float.TYPE, true), "float -> float");
assertTrue(ClassUtils.isAssignable(Float.class, Double.TYPE, true), "float -> double");
assertFalse(ClassUtils.isAssignable(Float.class, Boolean.TYPE, true), "float -> boolean");
// test double conversions
assertFalse(ClassUtils.isAssignable(Double.class, Character.TYPE, true), "double -> char");
assertFalse(ClassUtils.isAssignable(Double.class, Byte.TYPE, true), "double -> byte");
assertFalse(ClassUtils.isAssignable(Double.class, Short.TYPE, true), "double -> short");
assertFalse(ClassUtils.isAssignable(Double.class, Integer.TYPE, true), "double -> int");
assertFalse(ClassUtils.isAssignable(Double.class, Long.TYPE, true), "double -> long");
assertFalse(ClassUtils.isAssignable(Double.class, Float.TYPE, true), "double -> float");
assertTrue(ClassUtils.isAssignable(Double.class, Double.TYPE, true), "double -> double");
assertFalse(ClassUtils.isAssignable(Double.class, Boolean.TYPE, true), "double -> boolean");
// test boolean conversions
assertFalse(ClassUtils.isAssignable(Boolean.class, Character.TYPE, true), "boolean -> char");
assertFalse(ClassUtils.isAssignable(Boolean.class, Byte.TYPE, true), "boolean -> byte");
assertFalse(ClassUtils.isAssignable(Boolean.class, Short.TYPE, true), "boolean -> short");
assertFalse(ClassUtils.isAssignable(Boolean.class, Integer.TYPE, true), "boolean -> int");
assertFalse(ClassUtils.isAssignable(Boolean.class, Long.TYPE, true), "boolean -> long");
assertFalse(ClassUtils.isAssignable(Boolean.class, Float.TYPE, true), "boolean -> float");
assertFalse(ClassUtils.isAssignable(Boolean.class, Double.TYPE, true), "boolean -> double");
assertTrue(ClassUtils.isAssignable(Boolean.class, Boolean.TYPE, true), "boolean -> boolean");
}
@Test
void test_isAssignable_Widening() {
// test byte conversions
assertFalse(ClassUtils.isAssignable(Byte.TYPE, Character.TYPE), "byte -> char");
assertTrue(ClassUtils.isAssignable(Byte.TYPE, Byte.TYPE), "byte -> byte");
assertTrue(ClassUtils.isAssignable(Byte.TYPE, Short.TYPE), "byte -> short");
assertTrue(ClassUtils.isAssignable(Byte.TYPE, Integer.TYPE), "byte -> int");
assertTrue(ClassUtils.isAssignable(Byte.TYPE, Long.TYPE), "byte -> long");
assertTrue(ClassUtils.isAssignable(Byte.TYPE, Float.TYPE), "byte -> float");
assertTrue(ClassUtils.isAssignable(Byte.TYPE, Double.TYPE), "byte -> double");
assertFalse(ClassUtils.isAssignable(Byte.TYPE, Boolean.TYPE), "byte -> boolean");
// test short conversions
assertFalse(ClassUtils.isAssignable(Short.TYPE, Character.TYPE), "short -> char");
assertFalse(ClassUtils.isAssignable(Short.TYPE, Byte.TYPE), "short -> byte");
assertTrue(ClassUtils.isAssignable(Short.TYPE, Short.TYPE), "short -> short");
assertTrue(ClassUtils.isAssignable(Short.TYPE, Integer.TYPE), "short -> int");
assertTrue(ClassUtils.isAssignable(Short.TYPE, Long.TYPE), "short -> long");
assertTrue(ClassUtils.isAssignable(Short.TYPE, Float.TYPE), "short -> float");
assertTrue(ClassUtils.isAssignable(Short.TYPE, Double.TYPE), "short -> double");
assertFalse(ClassUtils.isAssignable(Short.TYPE, Boolean.TYPE), "short -> boolean");
// test char conversions
assertTrue(ClassUtils.isAssignable(Character.TYPE, Character.TYPE), "char -> char");
assertFalse(ClassUtils.isAssignable(Character.TYPE, Byte.TYPE), "char -> byte");
assertFalse(ClassUtils.isAssignable(Character.TYPE, Short.TYPE), "char -> short");
assertTrue(ClassUtils.isAssignable(Character.TYPE, Integer.TYPE), "char -> int");
assertTrue(ClassUtils.isAssignable(Character.TYPE, Long.TYPE), "char -> long");
assertTrue(ClassUtils.isAssignable(Character.TYPE, Float.TYPE), "char -> float");
assertTrue(ClassUtils.isAssignable(Character.TYPE, Double.TYPE), "char -> double");
assertFalse(ClassUtils.isAssignable(Character.TYPE, Boolean.TYPE), "char -> boolean");
// test int conversions
assertFalse(ClassUtils.isAssignable(Integer.TYPE, Character.TYPE), "int -> char");
assertFalse(ClassUtils.isAssignable(Integer.TYPE, Byte.TYPE), "int -> byte");
assertFalse(ClassUtils.isAssignable(Integer.TYPE, Short.TYPE), "int -> short");
assertTrue(ClassUtils.isAssignable(Integer.TYPE, Integer.TYPE), "int -> int");
assertTrue(ClassUtils.isAssignable(Integer.TYPE, Long.TYPE), "int -> long");
assertTrue(ClassUtils.isAssignable(Integer.TYPE, Float.TYPE), "int -> float");
assertTrue(ClassUtils.isAssignable(Integer.TYPE, Double.TYPE), "int -> double");
assertFalse(ClassUtils.isAssignable(Integer.TYPE, Boolean.TYPE), "int -> boolean");
// test long conversions
assertFalse(ClassUtils.isAssignable(Long.TYPE, Character.TYPE), "long -> char");
assertFalse(ClassUtils.isAssignable(Long.TYPE, Byte.TYPE), "long -> byte");
assertFalse(ClassUtils.isAssignable(Long.TYPE, Short.TYPE), "long -> short");
assertFalse(ClassUtils.isAssignable(Long.TYPE, Integer.TYPE), "long -> int");
assertTrue(ClassUtils.isAssignable(Long.TYPE, Long.TYPE), "long -> long");
assertTrue(ClassUtils.isAssignable(Long.TYPE, Float.TYPE), "long -> float");
assertTrue(ClassUtils.isAssignable(Long.TYPE, Double.TYPE), "long -> double");
assertFalse(ClassUtils.isAssignable(Long.TYPE, Boolean.TYPE), "long -> boolean");
// test float conversions
assertFalse(ClassUtils.isAssignable(Float.TYPE, Character.TYPE), "float -> char");
assertFalse(ClassUtils.isAssignable(Float.TYPE, Byte.TYPE), "float -> byte");
assertFalse(ClassUtils.isAssignable(Float.TYPE, Short.TYPE), "float -> short");
assertFalse(ClassUtils.isAssignable(Float.TYPE, Integer.TYPE), "float -> int");
assertFalse(ClassUtils.isAssignable(Float.TYPE, Long.TYPE), "float -> long");
assertTrue(ClassUtils.isAssignable(Float.TYPE, Float.TYPE), "float -> float");
assertTrue(ClassUtils.isAssignable(Float.TYPE, Double.TYPE), "float -> double");
assertFalse(ClassUtils.isAssignable(Float.TYPE, Boolean.TYPE), "float -> boolean");
// test double conversions
assertFalse(ClassUtils.isAssignable(Double.TYPE, Character.TYPE), "double -> char");
assertFalse(ClassUtils.isAssignable(Double.TYPE, Byte.TYPE), "double -> byte");
assertFalse(ClassUtils.isAssignable(Double.TYPE, Short.TYPE), "double -> short");
assertFalse(ClassUtils.isAssignable(Double.TYPE, Integer.TYPE), "double -> int");
assertFalse(ClassUtils.isAssignable(Double.TYPE, Long.TYPE), "double -> long");
assertFalse(ClassUtils.isAssignable(Double.TYPE, Float.TYPE), "double -> float");
assertTrue(ClassUtils.isAssignable(Double.TYPE, Double.TYPE), "double -> double");
assertFalse(ClassUtils.isAssignable(Double.TYPE, Boolean.TYPE), "double -> boolean");
// test boolean conversions
assertFalse(ClassUtils.isAssignable(Boolean.TYPE, Character.TYPE), "boolean -> char");
assertFalse(ClassUtils.isAssignable(Boolean.TYPE, Byte.TYPE), "boolean -> byte");
assertFalse(ClassUtils.isAssignable(Boolean.TYPE, Short.TYPE), "boolean -> short");
assertFalse(ClassUtils.isAssignable(Boolean.TYPE, Integer.TYPE), "boolean -> int");
assertFalse(ClassUtils.isAssignable(Boolean.TYPE, Long.TYPE), "boolean -> long");
assertFalse(ClassUtils.isAssignable(Boolean.TYPE, Float.TYPE), "boolean -> float");
assertFalse(ClassUtils.isAssignable(Boolean.TYPE, Double.TYPE), "boolean -> double");
assertTrue(ClassUtils.isAssignable(Boolean.TYPE, Boolean.TYPE), "boolean -> boolean");
}
@Test
void test_isInnerClass_Class() {
assertTrue(ClassUtils.isInnerClass(Inner.class));
assertTrue(ClassUtils.isInnerClass(Map.Entry.class));
assertTrue(ClassUtils.isInnerClass(new Cloneable() {
// empty
}.getClass()));
assertFalse(ClassUtils.isInnerClass(this.getClass()));
assertFalse(ClassUtils.isInnerClass(String.class));
assertFalse(ClassUtils.isInnerClass(null));
}
@Test
void testComparable() {
final TreeMap<Class<?>, String> map = new TreeMap<>(ClassUtils.comparator());
map.put(String.class, "lastEntry");
map.toString();
map.put(Character.class, "firstEntry");
map.toString();
assertEquals("firstEntry", map.firstEntry().getValue());
assertEquals(Character.class, map.firstEntry().getKey());
//
assertEquals("lastEntry", map.lastEntry().getValue());
assertEquals(String.class, map.lastEntry().getKey());
//
map.put(null, "null");
map.toString();
assertEquals("null", map.get(null));
}
@Test
void testConstructor() {
assertNotNull(new ClassUtils());
final Constructor<?>[] cons = ClassUtils.class.getDeclaredConstructors();
assertEquals(1, cons.length);
assertTrue(Modifier.isPublic(cons[0].getModifiers()));
assertTrue(Modifier.isPublic(ClassUtils.class.getModifiers()));
assertFalse(Modifier.isFinal(ClassUtils.class.getModifiers()));
}
@ParameterizedTest
@IntRangeSource(from = 1, to = 255)
void testGetClassArray(final int dimensions) throws ClassNotFoundException {
assertEquals(dimensions,
getDimension(ClassUtils.getClass("org.apache.commons.lang3.ClassUtilsTest$Inner.DeeplyNested" + StringUtils.repeat("[]", dimensions))));
assertEquals(dimensions, getDimension(ClassUtils.getClass("java.lang.String" + StringUtils.repeat("[]", dimensions))));
}
@ParameterizedTest
@IntRangeSource(from = 256, to = 300)
void testGetClassArrayIllegal(final int dimensions) throws ClassNotFoundException {
assertThrows(IllegalArgumentException.class, () -> assertEquals(dimensions,
getDimension(ClassUtils.getClass("org.apache.commons.lang3.ClassUtilsTest$Inner.DeeplyNested" + StringUtils.repeat("[]", dimensions)))));
assertThrows(IllegalArgumentException.class,
() -> assertEquals(dimensions, getDimension(ClassUtils.getClass("java.lang.String" + StringUtils.repeat("[]", dimensions)))));
}
@Test
void testGetClassByNormalNameArrays() throws ClassNotFoundException {
assertEquals(int[].class, ClassUtils.getClass("int[]"));
assertEquals(long[].class, ClassUtils.getClass("long[]"));
assertEquals(short[].class, ClassUtils.getClass("short[]"));
assertEquals(byte[].class, ClassUtils.getClass("byte[]"));
assertEquals(char[].class, ClassUtils.getClass("char[]"));
assertEquals(float[].class, ClassUtils.getClass("float[]"));
assertEquals(double[].class, ClassUtils.getClass("double[]"));
assertEquals(boolean[].class, ClassUtils.getClass("boolean[]"));
assertEquals(String[].class, ClassUtils.getClass("java.lang.String[]"));
assertEquals(java.util.Map.Entry[].class, ClassUtils.getClass("java.util.Map.Entry[]"));
assertEquals(java.util.Map.Entry[].class, ClassUtils.getClass("java.util.Map$Entry[]"));
assertEquals(java.util.Map.Entry[].class, ClassUtils.getClass("[Ljava.util.Map.Entry;"));
assertEquals(java.util.Map.Entry[].class, ClassUtils.getClass("[Ljava.util.Map$Entry;"));
assertEquals(java.util.Map.Entry[][].class, ClassUtils.getClass("[[Ljava.util.Map$Entry;"));
}
@Test
void testGetClassByNormalNameArrays2D() throws ClassNotFoundException {
assertEquals(int[][].class, ClassUtils.getClass("int[][]"));
assertEquals(long[][].class, ClassUtils.getClass("long[][]"));
assertEquals(short[][].class, ClassUtils.getClass("short[][]"));
assertEquals(byte[][].class, ClassUtils.getClass("byte[][]"));
assertEquals(char[][].class, ClassUtils.getClass("char[][]"));
assertEquals(float[][].class, ClassUtils.getClass("float[][]"));
assertEquals(double[][].class, ClassUtils.getClass("double[][]"));
assertEquals(boolean[][].class, ClassUtils.getClass("boolean[][]"));
assertEquals(String[][].class, ClassUtils.getClass("java.lang.String[][]"));
}
@Test
void testGetClassClassNotFound() throws Exception {
assertGetClassThrowsClassNotFound("bool");
assertGetClassThrowsClassNotFound("bool[]");
assertGetClassThrowsClassNotFound("integer[]");
}
@Test
void testGetClassInner() throws ClassNotFoundException {
assertEquals(Inner.DeeplyNested.class, ClassUtils.getClass("org.apache.commons.lang3.ClassUtilsTest.Inner.DeeplyNested"));
assertEquals(Inner.DeeplyNested.class, ClassUtils.getClass("org.apache.commons.lang3.ClassUtilsTest.Inner$DeeplyNested"));
assertEquals(Inner.DeeplyNested.class, ClassUtils.getClass("org.apache.commons.lang3.ClassUtilsTest$Inner$DeeplyNested"));
assertEquals(Inner.DeeplyNested.class, ClassUtils.getClass("org.apache.commons.lang3.ClassUtilsTest$Inner.DeeplyNested"));
assertEquals(Inner.DeeplyNested[].class, ClassUtils.getClass("org.apache.commons.lang3.ClassUtilsTest$Inner.DeeplyNested[]"));
//
assertEquals(Inner.DeeplyNested.class, ClassUtils.getClass("org.apache.commons.lang3.ClassUtilsTest.Inner.DeeplyNested", true));
assertEquals(Inner.DeeplyNested.class, ClassUtils.getClass("org.apache.commons.lang3.ClassUtilsTest.Inner$DeeplyNested", true));
assertEquals(Inner.DeeplyNested.class, ClassUtils.getClass("org.apache.commons.lang3.ClassUtilsTest$Inner$DeeplyNested", true));
assertEquals(Inner.DeeplyNested.class, ClassUtils.getClass("org.apache.commons.lang3.ClassUtilsTest$Inner.DeeplyNested", true));
//
final ClassLoader classLoader = Inner.DeeplyNested.class.getClassLoader();
assertEquals(Inner.DeeplyNested.class, ClassUtils.getClass(classLoader, "org.apache.commons.lang3.ClassUtilsTest.Inner.DeeplyNested"));
assertEquals(Inner.DeeplyNested.class, ClassUtils.getClass(classLoader, "org.apache.commons.lang3.ClassUtilsTest.Inner$DeeplyNested"));
assertEquals(Inner.DeeplyNested.class, ClassUtils.getClass(classLoader, "org.apache.commons.lang3.ClassUtilsTest$Inner$DeeplyNested"));
assertEquals(Inner.DeeplyNested.class, ClassUtils.getClass(classLoader, "org.apache.commons.lang3.ClassUtilsTest$Inner.DeeplyNested"));
}
@Test
void testGetClassInvalidArguments() throws Exception {
assertGetClassThrowsNullPointerException(null);
assertGetClassThrowsClassNotFound("[][][]");
assertGetClassThrowsClassNotFound("[[]");
assertGetClassThrowsClassNotFound("[");
assertGetClassThrowsClassNotFound("java.lang.String][");
assertGetClassThrowsClassNotFound(".hello.world");
assertGetClassThrowsClassNotFound("hello..world");
}
@ParameterizedTest
@IntRangeSource(from = 65536, to = 65555)
void testGetClassLengthIllegal(final int classNameLength) throws ClassNotFoundException {
assertThrows(IllegalArgumentException.class, () -> ClassUtils.getClass(StringUtils.repeat("a", classNameLength)));
assertThrows(IllegalArgumentException.class, () -> assertEquals(classNameLength, ClassUtils.getClass(StringUtils.repeat("a.", classNameLength / 2))));
}
@Test
void testGetClassLongestCheck() throws ClassNotFoundException {
final String maxClassName = StringUtils.repeat("a", 65535);
final String maxDimensions = StringUtils.repeat("[]", MAX_ARRAY_DIMENSIONS);
final String maxOpens = StringUtils.repeat("[", MAX_ARRAY_DIMENSIONS);
assertThrows(ClassNotFoundException.class, () -> ClassUtils.getClass(maxClassName));
assertNotNull(ClassUtils.getClass("java.lang.String" + maxDimensions));
assertThrows(ClassNotFoundException.class, () -> ClassUtils.getClass(maxClassName + maxDimensions));
assertThrows(ClassNotFoundException.class, () -> ClassUtils.getClass(maxOpens + "L" + maxClassName + ";"));
// maxOpens + 1
assertThrows(IllegalArgumentException.class, () -> ClassUtils.getClass(maxOpens + "[L" + maxClassName + ";"));
}
@Test
void testGetClassRawPrimitives() throws ClassNotFoundException {
assertEquals(int.class, ClassUtils.getClass("int"));
assertEquals(long.class, ClassUtils.getClass("long"));
assertEquals(short.class, ClassUtils.getClass("short"));
assertEquals(byte.class, ClassUtils.getClass("byte"));
assertEquals(char.class, ClassUtils.getClass("char"));
assertEquals(float.class, ClassUtils.getClass("float"));
assertEquals(double.class, ClassUtils.getClass("double"));
assertEquals(boolean.class, ClassUtils.getClass("boolean"));
assertEquals(void.class, ClassUtils.getClass("void"));
}
@Test
void testGetClassWithArrayClasses() throws Exception {
assertGetClassReturnsClass(String[].class);
assertGetClassReturnsClass(int[].class);
assertGetClassReturnsClass(long[].class);
assertGetClassReturnsClass(short[].class);
assertGetClassReturnsClass(byte[].class);
assertGetClassReturnsClass(char[].class);
assertGetClassReturnsClass(float[].class);
assertGetClassReturnsClass(double[].class);
assertGetClassReturnsClass(boolean[].class);
}
@Test
void testGetClassWithArrayClasses2D() throws Exception {
assertGetClassReturnsClass(String[][].class);
assertGetClassReturnsClass(int[][].class);
assertGetClassReturnsClass(long[][].class);
assertGetClassReturnsClass(short[][].class);
assertGetClassReturnsClass(byte[][].class);
assertGetClassReturnsClass(char[][].class);
assertGetClassReturnsClass(float[][].class);
assertGetClassReturnsClass(double[][].class);
assertGetClassReturnsClass(boolean[][].class);
}
@Test
void testGetComponentType() {
final CX[] newArray = {};
@SuppressWarnings("unchecked")
final Class<CX[]> classCxArray = (Class<CX[]>) newArray.getClass();
// No type-cast required.
final Class<CX> componentType = ClassUtils.getComponentType(classCxArray);
assertEquals(CX.class, componentType);
assertNull(ClassUtils.getComponentType(null));
}
@Test
void testGetPublicMethod() throws Exception {
// Tests with Collections$UnmodifiableSet
final Set<?> set = Collections.unmodifiableSet(new HashSet<>());
final Method isEmptyMethod = ClassUtils.getPublicMethod(set.getClass(), "isEmpty");
assertTrue(Modifier.isPublic(isEmptyMethod.getDeclaringClass().getModifiers()));
assertTrue((Boolean) isEmptyMethod.invoke(set));
// Tests with a public Class
final Method toStringMethod = ClassUtils.getPublicMethod(Object.class, "toString");
assertEquals(Object.class.getMethod("toString"), toStringMethod);
}
@Test
void testHierarchyExcludingInterfaces() {
final Iterator<Class<?>> iter = ClassUtils.hierarchy(StringParameterizedChild.class).iterator();
assertEquals(StringParameterizedChild.class, iter.next());
assertEquals(GenericParent.class, iter.next());
assertEquals(Object.class, iter.next());
assertFalse(iter.hasNext());
}
@Test
void testHierarchyIncludingInterfaces() {
final Iterator<Class<?>> iter = ClassUtils.hierarchy(StringParameterizedChild.class, Interfaces.INCLUDE).iterator();
assertEquals(StringParameterizedChild.class, iter.next());
assertEquals(GenericParent.class, iter.next());
assertEquals(GenericConsumer.class, iter.next());
assertEquals(Object.class, iter.next());
assertFalse(iter.hasNext());
}
@Test
void testIsPrimitiveOrWrapper() {
// test primitive wrapper classes
assertTrue(ClassUtils.isPrimitiveOrWrapper(Boolean.class), "Boolean.class");
assertTrue(ClassUtils.isPrimitiveOrWrapper(Byte.class), "Byte.class");
assertTrue(ClassUtils.isPrimitiveOrWrapper(Character.class), "Character.class");
assertTrue(ClassUtils.isPrimitiveOrWrapper(Short.class), "Short.class");
assertTrue(ClassUtils.isPrimitiveOrWrapper(Integer.class), "Integer.class");
assertTrue(ClassUtils.isPrimitiveOrWrapper(Long.class), "Long.class");
assertTrue(ClassUtils.isPrimitiveOrWrapper(Double.class), "Double.class");
assertTrue(ClassUtils.isPrimitiveOrWrapper(Float.class), "Float.class");
// test primitive classes
assertTrue(ClassUtils.isPrimitiveOrWrapper(Boolean.TYPE), "boolean");
assertTrue(ClassUtils.isPrimitiveOrWrapper(Byte.TYPE), "byte");
assertTrue(ClassUtils.isPrimitiveOrWrapper(Character.TYPE), "char");
assertTrue(ClassUtils.isPrimitiveOrWrapper(Short.TYPE), "short");
assertTrue(ClassUtils.isPrimitiveOrWrapper(Integer.TYPE), "int");
assertTrue(ClassUtils.isPrimitiveOrWrapper(Long.TYPE), "long");
assertTrue(ClassUtils.isPrimitiveOrWrapper(Double.TYPE), "double");
assertTrue(ClassUtils.isPrimitiveOrWrapper(Float.TYPE), "float");
assertTrue(ClassUtils.isPrimitiveOrWrapper(Void.TYPE), "Void.TYPE");
// others
assertFalse(ClassUtils.isPrimitiveOrWrapper(null), "null");
assertFalse(ClassUtils.isPrimitiveOrWrapper(Void.class), "Void.class");
assertFalse(ClassUtils.isPrimitiveOrWrapper(String.class), "String.class");
assertFalse(ClassUtils.isPrimitiveOrWrapper(this.getClass()), "this.getClass()");
}
@Test
void testIsPrimitiveWrapper() {
// test primitive wrapper classes
assertTrue(ClassUtils.isPrimitiveWrapper(Boolean.class), "Boolean.class");
assertTrue(ClassUtils.isPrimitiveWrapper(Byte.class), "Byte.class");
assertTrue(ClassUtils.isPrimitiveWrapper(Character.class), "Character.class");
assertTrue(ClassUtils.isPrimitiveWrapper(Short.class), "Short.class");
assertTrue(ClassUtils.isPrimitiveWrapper(Integer.class), "Integer.class");
assertTrue(ClassUtils.isPrimitiveWrapper(Long.class), "Long.class");
assertTrue(ClassUtils.isPrimitiveWrapper(Double.class), "Double.class");
assertTrue(ClassUtils.isPrimitiveWrapper(Float.class), "Float.class");
// test primitive classes
assertFalse(ClassUtils.isPrimitiveWrapper(Boolean.TYPE), "boolean");
assertFalse(ClassUtils.isPrimitiveWrapper(Byte.TYPE), "byte");
assertFalse(ClassUtils.isPrimitiveWrapper(Character.TYPE), "char");
assertFalse(ClassUtils.isPrimitiveWrapper(Short.TYPE), "short");
assertFalse(ClassUtils.isPrimitiveWrapper(Integer.TYPE), "int");
assertFalse(ClassUtils.isPrimitiveWrapper(Long.TYPE), "long");
assertFalse(ClassUtils.isPrimitiveWrapper(Double.TYPE), "double");
assertFalse(ClassUtils.isPrimitiveWrapper(Float.TYPE), "float");
// others
assertFalse(ClassUtils.isPrimitiveWrapper(null), "null");
assertFalse(ClassUtils.isPrimitiveWrapper(Void.class), "Void.class");
assertFalse(ClassUtils.isPrimitiveWrapper(Void.TYPE), "Void.TYPE");
assertFalse(ClassUtils.isPrimitiveWrapper(String.class), "String.class");
assertFalse(ClassUtils.isPrimitiveWrapper(this.getClass()), "this.getClass()");
}
@Test
void testPrimitivesToWrappers() {
// test null
// assertNull("null -> null", ClassUtils.primitivesToWrappers(null)); // generates warning
assertNull(ClassUtils.primitivesToWrappers((Class<?>[]) null), "null -> null"); // equivalent cast to avoid warning
// Other possible casts for null
assertArrayEquals(ArrayUtils.EMPTY_CLASS_ARRAY, ClassUtils.primitivesToWrappers(), "empty -> empty");
final Class<?>[] castNull = ClassUtils.primitivesToWrappers((Class<?>) null); // == new Class<?>[]{null}
assertArrayEquals(new Class<?>[] {null}, castNull, "(Class<?>) null -> [null]");
// test empty array is returned unchanged
assertArrayEquals(ArrayUtils.EMPTY_CLASS_ARRAY, ClassUtils.primitivesToWrappers(ArrayUtils.EMPTY_CLASS_ARRAY), "empty -> empty");
// test an array of various classes
final Class<?>[] primitives = new Class[] {Boolean.TYPE, Byte.TYPE, Character.TYPE, Short.TYPE, Integer.TYPE, Long.TYPE, Double.TYPE, Float.TYPE,
String.class, ClassUtils.class};
final Class<?>[] wrappers = ClassUtils.primitivesToWrappers(primitives);
for (int i = 0; i < primitives.length; i++) {
// test each returned wrapper
final Class<?> primitive = primitives[i];
final Class<?> expectedWrapper = ClassUtils.primitiveToWrapper(primitive);
assertEquals(expectedWrapper, wrappers[i], primitive + " -> " + expectedWrapper);
}
// test an array of no primitive classes
final Class<?>[] noPrimitives = new Class[] {String.class, ClassUtils.class, Void.TYPE};
// This used to return the exact same array, but no longer does.
assertNotSame(noPrimitives, ClassUtils.primitivesToWrappers(noPrimitives), "unmodified");
}
@Test
void testPrimitiveToWrapper() {
// test primitive classes
assertEquals(Boolean.class, ClassUtils.primitiveToWrapper(Boolean.TYPE), "boolean -> Boolean.class");
assertEquals(Byte.class, ClassUtils.primitiveToWrapper(Byte.TYPE), "byte -> Byte.class");
assertEquals(Character.class, ClassUtils.primitiveToWrapper(Character.TYPE), "char -> Character.class");
assertEquals(Short.class, ClassUtils.primitiveToWrapper(Short.TYPE), "short -> Short.class");
assertEquals(Integer.class, ClassUtils.primitiveToWrapper(Integer.TYPE), "int -> Integer.class");
assertEquals(Long.class, ClassUtils.primitiveToWrapper(Long.TYPE), "long -> Long.class");
assertEquals(Double.class, ClassUtils.primitiveToWrapper(Double.TYPE), "double -> Double.class");
assertEquals(Float.class, ClassUtils.primitiveToWrapper(Float.TYPE), "float -> Float.class");
// test a few other classes
assertEquals(String.class, ClassUtils.primitiveToWrapper(String.class), "String.class -> String.class");
assertEquals(ClassUtils.class, ClassUtils.primitiveToWrapper(ClassUtils.class), "ClassUtils.class -> ClassUtils.class");
assertEquals(Void.TYPE, ClassUtils.primitiveToWrapper(Void.TYPE), "Void.TYPE -> Void.TYPE");
// test null
assertNull(ClassUtils.primitiveToWrapper(null), "null -> null");
}
// Show the Java bug: https://bugs.java.com/bugdatabase/view_bug.do?bug_id=4071957
// We may have to delete this if a JDK fixes the bug.
@Test
void testShowJavaBug() throws Exception {
// Tests with Collections$UnmodifiableSet
final Set<?> set = Collections.unmodifiableSet(new HashSet<>());
final Method isEmptyMethod = set.getClass().getMethod("isEmpty");
assertThrows(IllegalAccessException.class, () -> isEmptyMethod.invoke(set));
}
@Test
void testToClass_object() {
// assertNull(ClassUtils.toClass(null)); // generates warning
assertNull(ClassUtils.toClass((Object[]) null)); // equivalent explicit cast
// Additional varargs tests
assertArrayEquals(ArrayUtils.EMPTY_CLASS_ARRAY, ClassUtils.toClass(), "empty -> empty");
final Class<?>[] castNull = ClassUtils.toClass((Object) null); // == new Object[]{null}
assertArrayEquals(new Object[] {null}, castNull, "(Object) null -> [null]");
assertSame(ArrayUtils.EMPTY_CLASS_ARRAY, ClassUtils.toClass(ArrayUtils.EMPTY_OBJECT_ARRAY));
assertArrayEquals(new Class[] {String.class, Integer.class, Double.class}, ClassUtils.toClass("Test", Integer.valueOf(1), Double.valueOf(99d)));
assertArrayEquals(new Class[] {String.class, null, Double.class}, ClassUtils.toClass("Test", null, Double.valueOf(99d)));
}
@Test
void testWithInterleavingWhitespace() throws ClassNotFoundException {
assertEquals(int[].class, ClassUtils.getClass(" int [ ] "));
assertEquals(long[].class, ClassUtils.getClass("\rlong\t[\n]\r"));
assertEquals(short[].class, ClassUtils.getClass("\tshort \t\t[]"));
assertEquals(byte[].class, ClassUtils.getClass("byte[\t\t\n\r] "));
}
@Test
void testWrappersToPrimitives() {
// an array with classes to test
final Class<?>[] classes = {Boolean.class, Byte.class, Character.class, Short.class, Integer.class, Long.class, Float.class, Double.class, String.class,
ClassUtils.class, null};
final Class<?>[] primitives = ClassUtils.wrappersToPrimitives(classes);
// now test the result
assertEquals(classes.length, primitives.length, "Wrong length of result array");
for (int i = 0; i < classes.length; i++) {
final Class<?> expectedPrimitive = ClassUtils.wrapperToPrimitive(classes[i]);
assertEquals(expectedPrimitive, primitives[i], classes[i] + " -> " + expectedPrimitive);
}
}
@Test
void testWrappersToPrimitivesEmpty() {
final Class<?>[] empty = new Class[0];
assertArrayEquals(empty, ClassUtils.wrappersToPrimitives(empty), "Wrong result for empty input");
}
@Test
void testWrappersToPrimitivesNull() {
// assertNull("Wrong result for null input", ClassUtils.wrappersToPrimitives(null)); // generates warning
assertNull(ClassUtils.wrappersToPrimitives((Class<?>[]) null), "Wrong result for null input"); // equivalent cast
// Other possible casts for null
assertArrayEquals(ArrayUtils.EMPTY_CLASS_ARRAY, ClassUtils.wrappersToPrimitives(), "empty -> empty");
final Class<?>[] castNull = ClassUtils.wrappersToPrimitives((Class<?>) null); // == new Class<?>[]{null}
assertArrayEquals(new Class<?>[] {null}, castNull, "(Class<?>) null -> [null]");
}
@Test
void testWrapperToPrimitive() {
// an array with classes to convert
final Class<?>[] primitives = {Boolean.TYPE, Byte.TYPE, Character.TYPE, Short.TYPE, Integer.TYPE, Long.TYPE, Float.TYPE, Double.TYPE};
for (final Class<?> primitive : primitives) {
final Class<?> wrapperCls = ClassUtils.primitiveToWrapper(primitive);
assertFalse(wrapperCls.isPrimitive(), "Still primitive");
assertEquals(primitive, ClassUtils.wrapperToPrimitive(wrapperCls), wrapperCls + " -> " + primitive);
}
}
@Test
void testWrapperToPrimitiveNoWrapper() {
assertNull(ClassUtils.wrapperToPrimitive(String.class), "Wrong result for non wrapper class");
}
@Test
void testWrapperToPrimitiveNull() {
assertNull(ClassUtils.wrapperToPrimitive(null), "Wrong result for null class");
}
}
| Named |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/AutoCloseTests.java | {
"start": 16141,
"end": 16265
} | class ____ implements TestInterface {
@AutoClose("")
final String field = "blank";
}
static | BlankCloseMethodNameTestCase |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/creators/TestCreators2.java | {
"start": 2147,
"end": 2331
} | class ____ {
protected final String id;
@JsonCreator
public Item431(@JsonProperty("id") String id) {
this.id = id;
}
}
// Test | Item431 |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/asm/ClassReader.java | {
"start": 160672,
"end": 161182
} | class ____ or adapters.</i>
*
* @param offset the start offset of the value to be read in this {@link ClassReader}.
* @return the read value.
*/
public short readShort(final int offset) {
byte[] classBuffer = classFileBuffer;
return (short) (((classBuffer[offset] & 0xFF) << 8) | (classBuffer[offset + 1] & 0xFF));
}
/**
* Reads a signed int value in this {@link ClassReader}. <i>This method is intended for {@link
* Attribute} sub classes, and is normally not needed by | generators |
java | apache__camel | components/camel-salesforce/camel-salesforce-component/src/test/java/org/apache/camel/component/salesforce/CompoundTypesManualIT.java | {
"start": 1732,
"end": 5247
} | class ____ extends AbstractSalesforceTestBase {
private static final Logger LOG = LoggerFactory.getLogger(CompoundTypesManualIT.class);
@Test
public void testTypes() throws Exception {
doTestTypes("");
doTestTypes("Xml");
}
private void doTestTypes(String suffix) {
Account account = new Account();
account.setName("Camel Test Account");
account.setBillingCity("San Francisco");
account.setBillingCountry("USA");
account.setBillingPostalCode("94105");
account.setBillingState("CA");
account.setBillingStreet("1 Market St #300");
account.setBillingLatitude(37.793779);
account.setBillingLongitude(-122.39448);
account.setShippingCity("San Francisco");
account.setShippingCountry("USA");
account.setShippingPostalCode("94105");
account.setShippingState("CA");
account.setShippingStreet("1 Market St #300");
account.setShippingLatitude(37.793779);
account.setShippingLongitude(-122.39448);
account.setShipping_Location__Latitude__s(37.793779);
account.setShipping_Location__Longitude__s(-122.39448);
CreateSObjectResult result
= template().requestBody("direct:createSObject" + suffix, account, CreateSObjectResult.class);
assertNotNull(result);
assertTrue(result.getSuccess(), "Create success");
LOG.debug("Create: {}", result);
try {
// get account with compound fields
account = template().requestBody("direct:getSObject" + suffix, result.getId(), Account.class);
assertNotNull(account);
assertNotNull(account.getBillingAddress(), "Billing Address");
assertNotNull(account.getShippingAddress(), "Shipping Address");
assertNotNull(account.getShippingAddress(), "Shipping Location");
LOG.debug("Retrieved fields billing address: {}, shipping location: {}", account.getBillingAddress(),
account.getShipping_Location__c());
} finally {
// delete the test SObject
String id = (String) template().requestBody("direct:deleteSObject" + suffix, result.getId());
Assertions.assertEquals(id, result.getId());
LOG.debug("Delete successful");
}
}
@Override
protected RouteBuilder doCreateRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
// testCreateSObject
from("direct:createSObject").to("salesforce:createSObject?sObjectName=Account");
from("direct:createSObjectXml").to("salesforce:createSObject?format=XML&sObjectName=Account");
// testGetSObject
from("direct:getSObject").to(
"salesforce:getSObject?sObjectName=Account&sObjectFields=Id,BillingAddress,ShippingAddress,Shipping_Location__c");
from("direct:getSObjectXml").to(
"salesforce:getSObject?format=XML&sObjectName=Account&sObjectFields=Id,BillingAddress,ShippingAddress,Shipping_Location__c");
// testDeleteSObject
from("direct:deleteSObject").to("salesforce:deleteSObject?sObjectName=Account");
from("direct:deleteSObjectXml").to("salesforce:deleteSObject?format=XML&sObjectName=Account");
}
};
}
}
| CompoundTypesManualIT |
java | grpc__grpc-java | examples/src/main/java/io/grpc/examples/keepalive/KeepAliveClient.java | {
"start": 1137,
"end": 3873
} | class ____ {
private static final Logger logger = Logger.getLogger(KeepAliveClient.class.getName());
private final GreeterGrpc.GreeterBlockingStub blockingStub;
/** Construct client for accessing HelloWorld server using the existing channel. */
public KeepAliveClient(Channel channel) {
// 'channel' here is a Channel, not a ManagedChannel, so it is not this code's responsibility to
// shut it down.
// Passing Channels to code makes code easier to test and makes it easier to reuse Channels.
blockingStub = GreeterGrpc.newBlockingStub(channel);
}
/** Say hello to server. */
public void greet(String name) {
logger.info("Will try to greet " + name + " ...");
HelloRequest request = HelloRequest.newBuilder().setName(name).build();
HelloReply response;
try {
response = blockingStub.sayHello(request);
} catch (StatusRuntimeException e) {
logger.log(Level.WARNING, "RPC failed: {0}", e.getStatus());
return;
}
logger.info("Greeting: " + response.getMessage());
}
/**
* Greet server.
*/
public static void main(String[] args) throws Exception {
// Access a service running on the local machine on port 50051
String target = "localhost:50051";
// Create a channel with the following keep alive configurations (demo only, you should set
// more appropriate values based on your environment):
// keepAliveTime: Send pings every 10 seconds if there is no activity. Set to an appropriate
// value in reality, e.g. (5, TimeUnit.MINUTES).
// keepAliveTimeout: Wait 1 second for ping ack before considering the connection dead. Set to a
// larger value in reality, e.g. (10, TimeUnit.SECONDS). You should only set such a small value,
// e.g. (1, TimeUnit.SECONDS) in certain low latency environments.
// keepAliveWithoutCalls: Send pings even without active streams. Normally disable it.
// Use JAVA_OPTS=-Djava.util.logging.config.file=logging.properties to see the keep alive ping
// frames.
// More details see: https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md
ManagedChannel channel = Grpc.newChannelBuilder(target, InsecureChannelCredentials.create())
.keepAliveTime(10, TimeUnit.SECONDS) // Change to a larger value, e.g. 5min.
.keepAliveTimeout(1, TimeUnit.SECONDS) // Change to a larger value, e.g. 10s.
.keepAliveWithoutCalls(true)// You should normally avoid enabling this.
.build();
try {
KeepAliveClient client = new KeepAliveClient(channel);
client.greet("Keep-alive Demo");
Thread.sleep(30000);
} finally {
channel.shutdownNow().awaitTermination(5, TimeUnit.SECONDS);
}
}
}
| KeepAliveClient |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/JobSplit.java | {
"start": 4876,
"end": 6289
} | class ____ {
private TaskSplitIndex splitIndex;
private long inputDataLength;
private String[] locations;
public TaskSplitMetaInfo(){
this.splitIndex = new TaskSplitIndex();
this.locations = new String[0];
}
public TaskSplitMetaInfo(TaskSplitIndex splitIndex, String[] locations,
long inputDataLength) {
this.splitIndex = splitIndex;
this.locations = locations;
this.inputDataLength = inputDataLength;
}
public TaskSplitMetaInfo(InputSplit split, long startOffset)
throws InterruptedException, IOException {
this(new TaskSplitIndex("", startOffset), split.getLocations(),
split.getLength());
}
public TaskSplitMetaInfo(String[] locations, long startOffset,
long inputDataLength) {
this(new TaskSplitIndex("",startOffset), locations, inputDataLength);
}
public TaskSplitIndex getSplitIndex() {
return splitIndex;
}
public String getSplitLocation() {
return splitIndex.getSplitLocation();
}
public long getInputDataLength() {
return inputDataLength;
}
public String[] getLocations() {
return locations;
}
public long getStartOffset() {
return splitIndex.getStartOffset();
}
}
/**
* This represents the meta information about the task split that the
* task gets
*/
public static | TaskSplitMetaInfo |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/adaptivebatch/BlockingResultInfo.java | {
"start": 1205,
"end": 3119
} | interface ____ extends IntermediateResultInfo {
/**
* Return the num of bytes produced(numBytesProduced) by the producer.
*
* <p>The difference between numBytesProduced and numBytesOut : numBytesProduced represents the
* number of bytes actually produced, and numBytesOut represents the number of bytes sent to
* downstream tasks. In unicast scenarios, these two values should be equal. In broadcast
* scenarios, numBytesOut should be (N * numBytesProduced), where N refers to the number of
* subpartitions.
*
* @return the num of bytes produced by the producer
*/
long getNumBytesProduced();
/**
* Return the aggregated num of bytes according to the index range for partition and
* subpartition.
*
* @param partitionIndexRange range of the index of the consumed partition.
* @param subpartitionIndexRange range of the index of the consumed subpartition.
* @return aggregated bytes according to the index ranges.
*/
long getNumBytesProduced(IndexRange partitionIndexRange, IndexRange subpartitionIndexRange);
/**
* Record the information of the result partition.
*
* @param partitionIndex the intermediate result partition index
* @param partitionBytes the {@link ResultPartitionBytes} of the partition
*/
void recordPartitionInfo(int partitionIndex, ResultPartitionBytes partitionBytes);
/**
* Reset the information of the result partition.
*
* @param partitionIndex the intermediate result partition index
*/
void resetPartitionInfo(int partitionIndex);
/**
* Gets subpartition bytes by partition index.
*
* @return a map with integer keys representing partition indices and long array values
* representing subpartition bytes.
*/
Map<Integer, long[]> getSubpartitionBytesByPartitionIndex();
}
| BlockingResultInfo |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/operations/converters/table/SqlAlterTableDropPartitionConverter.java | {
"start": 1420,
"end": 2222
} | class ____ implements SqlNodeConverter<SqlDropPartitions> {
@Override
public Operation convertSqlNode(SqlDropPartitions sqlDropPartitions, ConvertContext context) {
UnresolvedIdentifier unresolvedIdentifier =
UnresolvedIdentifier.of(sqlDropPartitions.fullTableName());
ObjectIdentifier tableIdentifier =
context.getCatalogManager().qualifyIdentifier(unresolvedIdentifier);
List<CatalogPartitionSpec> specs = new ArrayList<>();
for (int i = 0; i < sqlDropPartitions.getPartSpecs().size(); i++) {
specs.add(new CatalogPartitionSpec(sqlDropPartitions.getPartitionKVs(i)));
}
return new DropPartitionsOperation(tableIdentifier, sqlDropPartitions.ifExists(), specs);
}
}
| SqlAlterTableDropPartitionConverter |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/interceptors/bindings/inherited/InheritedMethodsWithInterceptorBindingTest.java | {
"start": 1189,
"end": 1462
} | class ____ {
@MyInterceptorBinding
String foobar() {
return "this should be ignored";
}
@MyInterceptorBinding
String foobarNotInherited() {
return "foobar";
}
}
@Dependent
static | MySuperclass |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/RootNameTest.java | {
"start": 548,
"end": 606
} | class ____
{
@JsonRootName("rudy")
static | RootNameTest |
java | spring-projects__spring-boot | build-plugin/spring-boot-gradle-plugin/src/test/java/org/springframework/boot/gradle/tasks/run/BootTestRunIntegrationTests.java | {
"start": 4967,
"end": 5116
} | class ____ has not been configured and it could not be resolved from classpath");
}
else {
assertThat(result.getOutput())
.contains("Main | name |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/cglib/transform/impl/AddInitTransformer.java | {
"start": 1099,
"end": 2239
} | class ____ extends ClassEmitterTransformer {
private MethodInfo info;
public AddInitTransformer(Method method) {
info = ReflectUtils.getMethodInfo(method);
Type[] types = info.getSignature().getArgumentTypes();
if (types.length != 1 ||
!types[0].equals(Constants.TYPE_OBJECT) ||
!info.getSignature().getReturnType().equals(Type.VOID_TYPE)) {
throw new IllegalArgumentException(method + " illegal signature");
}
}
@Override
public CodeEmitter begin_method(int access, Signature sig, Type[] exceptions) {
final CodeEmitter emitter = super.begin_method(access, sig, exceptions);
if (sig.getName().equals(Constants.CONSTRUCTOR_NAME)) {
return new CodeEmitter(emitter) {
@Override
public void visitInsn(int opcode) {
if (opcode == Constants.RETURN) {
load_this();
invoke(info);
}
super.visitInsn(opcode);
}
};
}
return emitter;
}
}
| AddInitTransformer |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/core/TransformerTest.java | {
"start": 850,
"end": 5572
} | class ____ extends RxJavaTest {
@Test
public void flowableTransformerThrows() {
try {
Flowable.just(1).compose(new FlowableTransformer<Integer, Integer>() {
@Override
public Publisher<Integer> apply(Flowable<Integer> v) {
throw new TestException("Forced failure");
}
});
fail("Should have thrown!");
} catch (TestException ex) {
assertEquals("Forced failure", ex.getMessage());
}
}
@Test
public void observableTransformerThrows() {
try {
Observable.just(1).compose(new ObservableTransformer<Integer, Integer>() {
@Override
public Observable<Integer> apply(Observable<Integer> v) {
throw new TestException("Forced failure");
}
});
fail("Should have thrown!");
} catch (TestException ex) {
assertEquals("Forced failure", ex.getMessage());
}
}
@Test
public void singleTransformerThrows() {
try {
Single.just(1).compose(new SingleTransformer<Integer, Integer>() {
@Override
public Single<Integer> apply(Single<Integer> v) {
throw new TestException("Forced failure");
}
});
fail("Should have thrown!");
} catch (TestException ex) {
assertEquals("Forced failure", ex.getMessage());
}
}
@Test
public void maybeTransformerThrows() {
try {
Maybe.just(1).compose(new MaybeTransformer<Integer, Integer>() {
@Override
public Maybe<Integer> apply(Maybe<Integer> v) {
throw new TestException("Forced failure");
}
});
fail("Should have thrown!");
} catch (TestException ex) {
assertEquals("Forced failure", ex.getMessage());
}
}
@Test
public void completableTransformerThrows() {
try {
Completable.complete().compose(new CompletableTransformer() {
@Override
public Completable apply(Completable v) {
throw new TestException("Forced failure");
}
});
fail("Should have thrown!");
} catch (TestException ex) {
assertEquals("Forced failure", ex.getMessage());
}
}
// Test demos for signature generics in compose() methods. Just needs to compile.
@Test
public void observableGenericsSignatureTest() {
A<String, Integer> a = new A<String, Integer>() { };
Observable.just(a).compose(TransformerTest.<String>testObservableTransformerCreator());
}
@Test
public void singleGenericsSignatureTest() {
A<String, Integer> a = new A<String, Integer>() { };
Single.just(a).compose(TransformerTest.<String>testSingleTransformerCreator());
}
@Test
public void maybeGenericsSignatureTest() {
A<String, Integer> a = new A<String, Integer>() { };
Maybe.just(a).compose(TransformerTest.<String>testMaybeTransformerCreator());
}
@Test
public void flowableGenericsSignatureTest() {
A<String, Integer> a = new A<String, Integer>() { };
Flowable.just(a).compose(TransformerTest.<String>testFlowableTransformerCreator());
}
private static <T> ObservableTransformer<A<T, ?>, B<T>> testObservableTransformerCreator() {
return new ObservableTransformer<A<T, ?>, B<T>>() {
@Override
public ObservableSource<B<T>> apply(Observable<A<T, ?>> a) {
return Observable.empty();
}
};
}
private static <T> SingleTransformer<A<T, ?>, B<T>> testSingleTransformerCreator() {
return new SingleTransformer<A<T, ?>, B<T>>() {
@Override
public SingleSource<B<T>> apply(Single<A<T, ?>> a) {
return Single.never();
}
};
}
private static <T> MaybeTransformer<A<T, ?>, B<T>> testMaybeTransformerCreator() {
return new MaybeTransformer<A<T, ?>, B<T>>() {
@Override
public MaybeSource<B<T>> apply(Maybe<A<T, ?>> a) {
return Maybe.empty();
}
};
}
private static <T> FlowableTransformer<A<T, ?>, B<T>> testFlowableTransformerCreator() {
return new FlowableTransformer<A<T, ?>, B<T>>() {
@Override
public Publisher<B<T>> apply(Flowable<A<T, ?>> a) {
return Flowable.empty();
}
};
}
}
| TransformerTest |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/View.java | {
"start": 1675,
"end": 6149
} | class ____ {
final Controller.RequestContext rc;
int nestLevel = 0;
boolean wasInline;
@Inject ViewContext(Controller.RequestContext ctx) {
rc = ctx;
}
public int nestLevel() { return nestLevel; }
public boolean wasInline() { return wasInline; }
public void set(int nestLevel, boolean wasInline) {
this.nestLevel = nestLevel;
this.wasInline = wasInline;
}
public Controller.RequestContext requestContext() { return rc; }
}
private ViewContext vc;
@Inject Injector injector;
public View() {
// Makes injection in subclasses optional.
// Time will tell if this buy us more than the NPEs :)
}
public View(ViewContext ctx) {
vc = ctx;
}
/**
* The API to render the view
*/
public abstract void render();
public ViewContext context() {
if (vc == null) {
if (injector == null) {
// One downside of making the injection in subclasses optional
throw new WebAppException(join("Error accessing ViewContext from a\n",
"child constructor, either move the usage of the View methods\n",
"out of the constructor or inject the ViewContext into the\n",
"constructor"));
}
vc = injector.getInstance(ViewContext.class);
}
return vc;
}
public Throwable error() {
return context().requestContext().error;
}
public int status() {
return context().requestContext().status;
}
public boolean inDevMode() {
return context().requestContext().devMode;
}
public Injector injector() {
return context().requestContext().injector;
}
public <T> T getInstance(Class<T> cls) {
return injector().getInstance(cls);
}
public HttpServletRequest request() {
return context().requestContext().getRequest();
}
public HttpServletResponse response() {
return context().requestContext().response;
}
public Map<String, String> moreParams() {
return context().requestContext().moreParams();
}
/**
* Get the cookies
* @return the cookies map
*/
public Map<String, Cookie> cookies() {
return context().requestContext().cookies();
}
public ServletOutputStream outputStream() {
try {
return response().getOutputStream();
} catch (IOException e) {
throw new WebAppException(e);
}
}
public PrintWriter writer() {
try {
return response().getWriter();
} catch (IOException e) {
throw new WebAppException(e);
}
}
/**
* Lookup a value from the current context.
* @param key to lookup
* @param defaultValue if key is missing
* @return the value of the key or the default value
*/
public String $(String key, String defaultValue) {
// moreParams take precedence
String value = moreParams().get(key);
if (value == null) {
value = request().getParameter(key);
}
return value == null ? defaultValue : value;
}
/**
* Lookup a value from the current context
* @param key to lookup
* @return the value of the key or empty string
*/
public String $(String key) {
return $(key, "");
}
/**
* Set a context value. (e.g. UI properties for sub views.)
* Try to avoid any application (vs view/ui) logic.
* @param key to set
* @param value to set
*/
public void set(String key, String value) {
moreParams().put(key, value);
}
public String root() {
String root = System.getenv(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV);
if(root == null || root.isEmpty()) {
root = "/";
}
return root;
}
public String prefix() {
if(context().rc.prefix == null) {
return root();
} else {
return ujoin(root(), context().rc.prefix);
}
}
public void setTitle(String title) {
set(TITLE, title);
}
public void setTitle(String title, String url) {
setTitle(title);
set(TITLE_LINK, url);
}
/**
* Create an url from url components
* @param parts components to join
* @return an url string
*/
public String root_url(String... parts) {
return ujoin(root(), parts);
}
/**
* Create an url from url components
* @param parts components to join
* @return an url string
*/
public String url(String... parts) {
return ujoin(prefix(), parts);
}
public ResponseInfo info(String about) {
return getInstance(ResponseInfo.class).about(about);
}
/**
* Render a sub-view
* @param cls the | ViewContext |
java | quarkusio__quarkus | independent-projects/qute/core/src/main/java/io/quarkus/qute/ResultMapper.java | {
"start": 402,
"end": 888
} | interface ____ extends WithPriority {
/**
*
* @param origin
* @param result
* @return {@code true} if this mapper applies to the given result
*/
default boolean appliesTo(Origin origin, Object result) {
return true;
}
/**
*
* @param result The result, never {@code null}
* @param expression The original expression
* @return the string value
*/
String map(Object result, Expression expression);
}
| ResultMapper |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/processor/internals/TaskManager.java | {
"start": 3467,
"end": 108072
} | class ____ {
private static final String BUG_ERROR_MESSAGE = "This indicates a bug. " +
"Please report at https://issues.apache.org/jira/projects/KAFKA/issues or to the dev-mailing list (https://kafka.apache.org/contact).";
private static final String INTERRUPTED_ERROR_MESSAGE = "Thread got interrupted. " + BUG_ERROR_MESSAGE;
// initialize the task list
// activeTasks needs to be concurrent as it can be accessed
// by QueryableState
private final Logger log;
private final Time time;
private final TasksRegistry tasks;
private final ProcessId processId;
private final String logPrefix;
private final Admin adminClient;
private final StateDirectory stateDirectory;
private final ProcessingMode processingMode;
private final ChangelogReader changelogReader;
private final TopologyMetadata topologyMetadata;
private final TaskExecutor taskExecutor;
private Consumer<byte[], byte[]> mainConsumer;
private DeleteRecordsResult deleteRecordsResult;
private boolean rebalanceInProgress = false; // if we are in the middle of a rebalance, it is not safe to commit
// includes assigned & initialized tasks and unassigned tasks we locked temporarily during rebalance
private final Set<TaskId> lockedTaskDirectories = new HashSet<>();
private final Map<TaskId, BackoffRecord> taskIdToBackoffRecord = new HashMap<>();
private final ActiveTaskCreator activeTaskCreator;
private final StandbyTaskCreator standbyTaskCreator;
private final StateUpdater stateUpdater;
private final DefaultTaskManager schedulingTaskManager;
TaskManager(final Time time,
final ChangelogReader changelogReader,
final ProcessId processId,
final String logPrefix,
final ActiveTaskCreator activeTaskCreator,
final StandbyTaskCreator standbyTaskCreator,
final TasksRegistry tasks,
final TopologyMetadata topologyMetadata,
final Admin adminClient,
final StateDirectory stateDirectory,
final StateUpdater stateUpdater,
final DefaultTaskManager schedulingTaskManager
) {
this.time = time;
this.processId = processId;
this.logPrefix = logPrefix;
this.adminClient = adminClient;
this.stateDirectory = stateDirectory;
this.changelogReader = changelogReader;
this.topologyMetadata = topologyMetadata;
this.activeTaskCreator = activeTaskCreator;
this.standbyTaskCreator = standbyTaskCreator;
this.processingMode = topologyMetadata.processingMode();
final LogContext logContext = new LogContext(logPrefix);
this.log = logContext.logger(getClass());
this.stateUpdater = stateUpdater;
this.schedulingTaskManager = schedulingTaskManager;
this.tasks = tasks;
this.taskExecutor = new TaskExecutor(
this.tasks,
this,
topologyMetadata.taskExecutionMetadata(),
logContext
);
}
void init() {
if (stateUpdater != null) {
this.stateUpdater.start();
}
}
void setMainConsumer(final Consumer<byte[], byte[]> mainConsumer) {
this.mainConsumer = mainConsumer;
}
public double totalProducerBlockedTime() {
return activeTaskCreator.totalProducerBlockedTime();
}
public ProcessId processId() {
return processId;
}
public TopologyMetadata topologyMetadata() {
return topologyMetadata;
}
ConsumerGroupMetadata consumerGroupMetadata() {
return mainConsumer.groupMetadata();
}
void consumerCommitSync(final Map<TopicPartition, OffsetAndMetadata> offsets) {
mainConsumer.commitSync(offsets);
}
StreamsProducer streamsProducer() {
return activeTaskCreator.streamsProducer();
}
boolean rebalanceInProgress() {
return rebalanceInProgress;
}
void handleRebalanceStart(final Set<String> subscribedTopics) {
topologyMetadata.addSubscribedTopicsFromMetadata(subscribedTopics, logPrefix);
tryToLockAllNonEmptyTaskDirectories();
rebalanceInProgress = true;
}
void handleRebalanceComplete() {
// we should pause consumer only within the listener since
// before then the assignment has not been updated yet.
if (stateUpdater == null) {
mainConsumer.pause(mainConsumer.assignment());
} else {
// All tasks that are owned by the task manager are ready and do not need to be paused
final Set<TopicPartition> partitionsNotToPause = tasks.allNonFailedTasks()
.stream()
.flatMap(task -> task.inputPartitions().stream())
.collect(Collectors.toSet());
final Set<TopicPartition> partitionsToPause = new HashSet<>(mainConsumer.assignment());
partitionsToPause.removeAll(partitionsNotToPause);
mainConsumer.pause(partitionsToPause);
}
releaseLockedUnassignedTaskDirectories();
rebalanceInProgress = false;
}
/**
* @throws TaskMigratedException
*/
boolean handleCorruption(final Set<TaskId> corruptedTasks) {
final Set<TaskId> activeTasks = new HashSet<>(tasks.activeTaskIds());
// We need to stop all processing, since we need to commit non-corrupted tasks as well.
maybeLockTasks(activeTasks);
final Set<Task> corruptedActiveTasks = new TreeSet<>(Comparator.comparing(Task::id));
final Set<Task> corruptedStandbyTasks = new TreeSet<>(Comparator.comparing(Task::id));
for (final TaskId taskId : corruptedTasks) {
final Task task = tasks.task(taskId);
if (task.isActive()) {
corruptedActiveTasks.add(task);
} else {
corruptedStandbyTasks.add(task);
}
}
// Make sure to clean up any corrupted standby tasks in their entirety before committing
// since TaskMigrated can be thrown and the resulting handleLostAll will only clean up active tasks
closeDirtyAndRevive(corruptedStandbyTasks, true);
// We need to commit before closing the corrupted active tasks since this will force the ongoing txn to abort
try {
final Collection<Task> tasksToCommit = tasks.allTasksPerId()
.values()
.stream()
.filter(t -> t.state() == Task.State.RUNNING)
.filter(t -> !corruptedTasks.contains(t.id()))
.collect(Collectors.toSet());
commitTasksAndMaybeUpdateCommittableOffsets(tasksToCommit, new HashMap<>());
} catch (final TaskCorruptedException e) {
log.info("Some additional tasks were found corrupted while trying to commit, these will be added to the " +
"tasks to clean and revive: {}", e.corruptedTasks());
corruptedActiveTasks.addAll(tasks.tasks(e.corruptedTasks()));
} catch (final TimeoutException e) {
log.info("Hit TimeoutException when committing all non-corrupted tasks, these will be closed and revived");
final Collection<Task> uncorruptedTasks = new HashSet<>(tasks.activeTasks());
uncorruptedTasks.removeAll(corruptedActiveTasks);
// Those tasks which just timed out can just be closed dirty without marking changelogs as corrupted
closeDirtyAndRevive(uncorruptedTasks, false);
}
closeDirtyAndRevive(corruptedActiveTasks, true);
maybeUnlockTasks(activeTasks);
return !corruptedActiveTasks.isEmpty();
}
private void closeDirtyAndRevive(final Collection<Task> taskWithChangelogs, final boolean markAsCorrupted) {
for (final Task task : taskWithChangelogs) {
if (task.state() != State.CLOSED) {
final Collection<TopicPartition> corruptedPartitions = task.changelogPartitions();
// mark corrupted partitions to not be checkpointed, and then close the task as dirty
// TODO: this step should be removed as we complete migrating to state updater
if (markAsCorrupted && stateUpdater == null) {
task.markChangelogAsCorrupted(corruptedPartitions);
}
try {
// we do not need to take the returned offsets since we are not going to commit anyways;
// this call is only used for active tasks to flush the cache before suspending and
// closing the topology
task.prepareCommit(false);
} catch (final RuntimeException swallow) {
log.warn("Error flushing cache for corrupted task {}. " +
"Since the task is closing dirty, the following exception is swallowed: {}",
task.id(), swallow.getMessage());
}
try {
task.suspend();
// we need to enforce a checkpoint that removes the corrupted partitions
if (markAsCorrupted) {
task.postCommit(true);
}
} catch (final RuntimeException swallow) {
log.warn("Error suspending corrupted task {}. " +
"Since the task is closing dirty, the following exception is swallowed: {}",
task.id(), swallow.getMessage());
}
task.closeDirty();
}
// For active tasks pause their input partitions so we won't poll any more records
// for this task until it has been re-initialized;
// Note, closeDirty already clears the partition-group for the task.
if (task.isActive()) {
final Set<TopicPartition> currentAssignment = mainConsumer.assignment();
final Set<TopicPartition> taskInputPartitions = task.inputPartitions();
final Set<TopicPartition> assignedToPauseAndReset =
intersection(HashSet::new, currentAssignment, taskInputPartitions);
if (!assignedToPauseAndReset.equals(taskInputPartitions)) {
log.warn(
"Expected the current consumer assignment {} to contain the input partitions {}. " +
"Will proceed to recover.",
currentAssignment,
taskInputPartitions
);
}
task.addPartitionsForOffsetReset(assignedToPauseAndReset);
}
if (stateUpdater != null) {
tasks.removeTask(task);
}
task.revive();
if (stateUpdater != null) {
tasks.addPendingTasksToInit(Collections.singleton(task));
}
}
}
private Map<Task, Set<TopicPartition>> assignStartupTasks(final Map<TaskId, Set<TopicPartition>> tasksToAssign,
final String threadLogPrefix,
final TopologyMetadata topologyMetadata,
final ChangelogRegister changelogReader) {
if (stateDirectory.hasStartupTasks()) {
final Map<Task, Set<TopicPartition>> assignedTasks = new HashMap<>(tasksToAssign.size());
for (final Map.Entry<TaskId, Set<TopicPartition>> entry : tasksToAssign.entrySet()) {
final TaskId taskId = entry.getKey();
final Task task = stateDirectory.removeStartupTask(taskId);
if (task != null) {
// replace our dummy values with the real ones, now we know our thread and assignment
final Set<TopicPartition> inputPartitions = entry.getValue();
task.stateManager().assignToStreamThread(new LogContext(threadLogPrefix), changelogReader, inputPartitions);
updateInputPartitionsOfStandbyTaskIfTheyChanged(task, inputPartitions);
assignedTasks.put(task, inputPartitions);
}
}
return assignedTasks;
} else {
return Collections.emptyMap();
}
}
/**
* @throws TaskMigratedException if the task producer got fenced (EOS only)
* @throws StreamsException fatal error while creating / initializing the task
*
* public for upgrade testing only
*/
public void handleAssignment(final Map<TaskId, Set<TopicPartition>> activeTasks,
final Map<TaskId, Set<TopicPartition>> standbyTasks) {
log.info("Handle new assignment with:\n" +
"\tNew active tasks: {}\n" +
"\tNew standby tasks: {}\n" +
"\tExisting active tasks: {}\n" +
"\tExisting standby tasks: {}",
activeTasks.keySet(), standbyTasks.keySet(), activeTaskIds(), standbyTaskIds());
topologyMetadata.addSubscribedTopicsFromAssignment(
activeTasks.values().stream().flatMap(Collection::stream).collect(Collectors.toSet()),
logPrefix
);
final Map<TaskId, Set<TopicPartition>> activeTasksToCreate = new HashMap<>(activeTasks);
final Map<TaskId, Set<TopicPartition>> standbyTasksToCreate = new HashMap<>(standbyTasks);
final Map<Task, Set<TopicPartition>> tasksToRecycle = new HashMap<>();
final Set<Task> tasksToCloseClean = new TreeSet<>(Comparator.comparing(Task::id));
final Set<TaskId> tasksToLock =
tasks.allTaskIds().stream()
.filter(x -> activeTasksToCreate.containsKey(x) || standbyTasksToCreate.containsKey(x))
.collect(Collectors.toSet());
maybeLockTasks(tasksToLock);
// first put aside those unrecognized tasks because of unknown named-topologies
tasks.clearPendingTasksToCreate();
tasks.addPendingActiveTasksToCreate(pendingTasksToCreate(activeTasksToCreate));
tasks.addPendingStandbyTasksToCreate(pendingTasksToCreate(standbyTasksToCreate));
// first rectify all existing tasks:
// 1. for tasks that are already owned, just update input partitions / resume and skip re-creating them
// 2. for tasks that have changed active/standby status, just recycle and skip re-creating them
// 3. otherwise, close them since they are no longer owned
final Map<TaskId, RuntimeException> failedTasks = new LinkedHashMap<>();
if (stateUpdater == null) {
handleTasksWithoutStateUpdater(activeTasksToCreate, standbyTasksToCreate, tasksToRecycle, tasksToCloseClean);
} else {
handleTasksWithStateUpdater(
activeTasksToCreate,
standbyTasksToCreate,
tasksToRecycle,
tasksToCloseClean,
failedTasks
);
failedTasks.putAll(collectExceptionsAndFailedTasksFromStateUpdater());
}
final Map<TaskId, RuntimeException> taskCloseExceptions = closeAndRecycleTasks(tasksToRecycle, tasksToCloseClean);
maybeUnlockTasks(tasksToLock);
failedTasks.putAll(taskCloseExceptions);
maybeThrowTaskExceptions(failedTasks);
createNewTasks(activeTasksToCreate, standbyTasksToCreate);
}
// Wrap and throw the exception in the following order
// if at least one of the exception is a non-streams exception, then wrap and throw since it should be handled by thread's handler
// if at least one of the exception is a streams exception, then directly throw since it should be handled by thread's handler
// if at least one of the exception is a task-migrated exception, then directly throw since it indicates all tasks are lost
// otherwise, all the exceptions are task-corrupted, then merge their tasks and throw a single one
// TODO: move task-corrupted and task-migrated out of the public errors package since they are internal errors and always be
// handled by Streams library itself
private void maybeThrowTaskExceptions(final Map<TaskId, RuntimeException> taskExceptions) {
if (!taskExceptions.isEmpty()) {
log.error("Get exceptions for the following tasks: {}", taskExceptions);
final Set<TaskId> aggregatedCorruptedTaskIds = new HashSet<>();
StreamsException lastFatal = null;
TaskMigratedException lastTaskMigrated = null;
for (final Map.Entry<TaskId, RuntimeException> entry : taskExceptions.entrySet()) {
final TaskId taskId = entry.getKey();
final RuntimeException exception = entry.getValue();
if (exception instanceof StreamsException) {
if (exception instanceof TaskMigratedException) {
lastTaskMigrated = (TaskMigratedException) exception;
} else if (exception instanceof TaskCorruptedException) {
log.warn("Encounter corrupted task " + taskId + ", will group it with other corrupted tasks " +
"and handle together", exception);
aggregatedCorruptedTaskIds.add(taskId);
} else {
((StreamsException) exception).setTaskId(taskId);
lastFatal = (StreamsException) exception;
}
} else if (exception instanceof KafkaException) {
lastFatal = new StreamsException(exception, taskId);
} else {
lastFatal = new StreamsException("Encounter unexpected fatal error for task " + taskId, exception, taskId);
}
}
if (lastFatal != null) {
throw lastFatal;
} else if (lastTaskMigrated != null) {
throw lastTaskMigrated;
} else {
throw new TaskCorruptedException(aggregatedCorruptedTaskIds);
}
}
}
private void createNewTasks(final Map<TaskId, Set<TopicPartition>> activeTasksToCreate,
final Map<TaskId, Set<TopicPartition>> standbyTasksToCreate) {
final Collection<Task> newActiveTasks = activeTaskCreator.createTasks(mainConsumer, activeTasksToCreate);
final Collection<Task> newStandbyTasks = standbyTaskCreator.createTasks(standbyTasksToCreate);
if (stateUpdater == null) {
tasks.addActiveTasks(newActiveTasks);
tasks.addStandbyTasks(newStandbyTasks);
} else {
tasks.addPendingTasksToInit(newActiveTasks);
tasks.addPendingTasksToInit(newStandbyTasks);
}
}
private void handleTasksWithoutStateUpdater(final Map<TaskId, Set<TopicPartition>> activeTasksToCreate,
final Map<TaskId, Set<TopicPartition>> standbyTasksToCreate,
final Map<Task, Set<TopicPartition>> tasksToRecycle,
final Set<Task> tasksToCloseClean) {
final Map<Task, Set<TopicPartition>> startupStandbyTasksToRecycle = assignStartupTasks(activeTasksToCreate, logPrefix, topologyMetadata, changelogReader);
final Map<Task, Set<TopicPartition>> startupStandbyTasksToUse = assignStartupTasks(standbyTasksToCreate, logPrefix, topologyMetadata, changelogReader);
// recycle the startup standbys to active
tasks.addStandbyTasks(startupStandbyTasksToRecycle.keySet());
// use startup Standbys as real Standby tasks
tasks.addStandbyTasks(startupStandbyTasksToUse.keySet());
for (final Task task : tasks.allTasks()) {
final TaskId taskId = task.id();
if (activeTasksToCreate.containsKey(taskId)) {
if (task.isActive()) {
final Set<TopicPartition> topicPartitions = activeTasksToCreate.get(taskId);
if (tasks.updateActiveTaskInputPartitions(task, topicPartitions)) {
task.updateInputPartitions(topicPartitions, topologyMetadata.nodeToSourceTopics(task.id()));
}
task.resume();
} else {
tasksToRecycle.put(task, activeTasksToCreate.get(taskId));
}
activeTasksToCreate.remove(taskId);
} else if (standbyTasksToCreate.containsKey(taskId)) {
if (!task.isActive()) {
updateInputPartitionsOfStandbyTaskIfTheyChanged(task, standbyTasksToCreate.get(taskId));
task.resume();
} else {
tasksToRecycle.put(task, standbyTasksToCreate.get(taskId));
}
standbyTasksToCreate.remove(taskId);
} else {
tasksToCloseClean.add(task);
}
}
}
private void updateInputPartitionsOfStandbyTaskIfTheyChanged(final Task task,
final Set<TopicPartition> inputPartitions) {
/*
We should only update input partitions of a standby task if the input partitions really changed. Updating the
input partitions of tasks also updates the mapping from source nodes to input topics in the processor topology
within the task. The mapping is updated with the topics from the topology metadata. The topology metadata does
not prefix intermediate internal topics with the application ID. Thus, if a standby task has input partitions
from an intermediate internal topic the update of the mapping in the processor topology leads to an invalid
topology exception during recycling of a standby task to an active task when the input queues are created. This
is because the input topics in the processor topology and the input partitions of the task do not match because
the former miss the application ID prefix.
For standby task that have only input partitions from intermediate internal topics this check avoids the invalid
topology exception. Unfortunately, a subtopology might have input partitions subscribed to with a regex
additionally intermediate internal topics which might still lead to an invalid topology exception during recycling
irrespectively of this check here. Thus, there is still a bug to fix here.
*/
if (!task.inputPartitions().equals(inputPartitions)) {
task.updateInputPartitions(inputPartitions, topologyMetadata.nodeToSourceTopics(task.id()));
}
}
private void handleTasksWithStateUpdater(final Map<TaskId, Set<TopicPartition>> activeTasksToCreate,
final Map<TaskId, Set<TopicPartition>> standbyTasksToCreate,
final Map<Task, Set<TopicPartition>> tasksToRecycle,
final Set<Task> tasksToCloseClean,
final Map<TaskId, RuntimeException> failedTasks) {
handleTasksPendingInitialization();
handleStartupTaskReuse(activeTasksToCreate, standbyTasksToCreate, failedTasks);
handleRestoringAndUpdatingTasks(activeTasksToCreate, standbyTasksToCreate, failedTasks);
handleRunningAndSuspendedTasks(activeTasksToCreate, standbyTasksToCreate, tasksToRecycle, tasksToCloseClean);
}
private void handleTasksPendingInitialization() {
// All tasks pending initialization are not part of the usual bookkeeping
final Set<Task> tasksToCloseDirty = new TreeSet<>(Comparator.comparing(Task::id));
for (final Task task : tasks.drainPendingTasksToInit()) {
closeTaskClean(task, tasksToCloseDirty, new HashMap<>());
}
for (final Task task : tasksToCloseDirty) {
closeTaskDirty(task, false);
}
}
private void handleStartupTaskReuse(final Map<TaskId, Set<TopicPartition>> activeTasksToCreate,
final Map<TaskId, Set<TopicPartition>> standbyTasksToCreate,
final Map<TaskId, RuntimeException> failedTasks) {
final Map<Task, Set<TopicPartition>> startupStandbyTasksToRecycle = assignStartupTasks(activeTasksToCreate, logPrefix, topologyMetadata, changelogReader);
final Map<Task, Set<TopicPartition>> startupStandbyTasksToUse = assignStartupTasks(standbyTasksToCreate, logPrefix, topologyMetadata, changelogReader);
// recycle the startup standbys to active, and remove them from the set of actives that need to be created
if (!startupStandbyTasksToRecycle.isEmpty()) {
final Set<Task> tasksToCloseDirty = new TreeSet<>(Comparator.comparing(Task::id));
for (final Map.Entry<Task, Set<TopicPartition>> entry : startupStandbyTasksToRecycle.entrySet()) {
final Task task = entry.getKey();
recycleTaskFromStateUpdater(task, entry.getValue(), tasksToCloseDirty, failedTasks);
activeTasksToCreate.remove(task.id());
}
// if any standby tasks failed to recycle, close them dirty
tasksToCloseDirty.forEach(task ->
closeTaskDirty(task, false)
);
}
// use startup Standbys as real Standby tasks
if (!startupStandbyTasksToUse.isEmpty()) {
tasks.addPendingTasksToInit(startupStandbyTasksToUse.keySet());
startupStandbyTasksToUse.keySet().forEach(task -> standbyTasksToCreate.remove(task.id()));
}
}
private void handleRunningAndSuspendedTasks(final Map<TaskId, Set<TopicPartition>> activeTasksToCreate,
final Map<TaskId, Set<TopicPartition>> standbyTasksToCreate,
final Map<Task, Set<TopicPartition>> tasksToRecycle,
final Set<Task> tasksToCloseClean) {
for (final Task task : tasks.allNonFailedTasks()) {
if (!task.isActive()) {
throw new IllegalStateException("Standby tasks should only be managed by the state updater, " +
"but standby task " + task.id() + " is managed by the stream thread");
}
final TaskId taskId = task.id();
if (activeTasksToCreate.containsKey(taskId)) {
handleReassignedActiveTask(task, activeTasksToCreate.get(taskId));
activeTasksToCreate.remove(taskId);
} else if (standbyTasksToCreate.containsKey(taskId)) {
tasksToRecycle.put(task, standbyTasksToCreate.get(taskId));
standbyTasksToCreate.remove(taskId);
} else {
tasksToCloseClean.add(task);
}
}
}
private void handleReassignedActiveTask(final Task task,
final Set<TopicPartition> inputPartitions) {
if (tasks.updateActiveTaskInputPartitions(task, inputPartitions)) {
task.updateInputPartitions(inputPartitions, topologyMetadata.nodeToSourceTopics(task.id()));
}
if (task.state() == State.SUSPENDED) {
tasks.removeTask(task);
task.resume();
stateUpdater.add(task);
}
}
private void handleRestoringAndUpdatingTasks(final Map<TaskId, Set<TopicPartition>> activeTasksToCreate,
final Map<TaskId, Set<TopicPartition>> standbyTasksToCreate,
final Map<TaskId, RuntimeException> failedTasks) {
final Map<Task, Set<TopicPartition>> tasksToRecycleFromStateUpdater = new HashMap<>();
final Set<Task> tasksToCloseCleanFromStateUpdater = new TreeSet<>(Comparator.comparing(Task::id));
final Set<Task> tasksToCloseDirtyFromStateUpdater = new TreeSet<>(Comparator.comparing(Task::id));
handleTasksInStateUpdater(
activeTasksToCreate,
standbyTasksToCreate,
tasksToRecycleFromStateUpdater,
tasksToCloseCleanFromStateUpdater,
tasksToCloseDirtyFromStateUpdater,
failedTasks
);
tasksToRecycleFromStateUpdater.forEach((task, inputPartitions) ->
recycleTaskFromStateUpdater(
task,
inputPartitions,
tasksToCloseDirtyFromStateUpdater,
failedTasks
)
);
tasksToCloseCleanFromStateUpdater.forEach(task ->
closeTaskClean(task, tasksToCloseDirtyFromStateUpdater, failedTasks)
);
tasksToCloseDirtyFromStateUpdater.forEach(task ->
closeTaskDirty(task, false)
);
}
private void handleTasksInStateUpdater(final Map<TaskId, Set<TopicPartition>> activeTasksToCreate,
final Map<TaskId, Set<TopicPartition>> standbyTasksToCreate,
final Map<Task, Set<TopicPartition>> tasksToRecycle,
final Set<Task> tasksToCloseCleanFromStateUpdater,
final Set<Task> tasksToCloseDirtyFromStateUpdater,
final Map<TaskId, RuntimeException> failedTasks) {
final Map<TaskId, Set<TopicPartition>> newInputPartitions = new HashMap<>();
final Map<TaskId, Set<TopicPartition>> standbyInputPartitions = new HashMap<>();
final Map<TaskId, Set<TopicPartition>> activeInputPartitions = new HashMap<>();
final Map<TaskId, CompletableFuture<StateUpdater.RemovedTaskResult>> futuresForUpdatingInputPartitions = new LinkedHashMap<>();
final Map<TaskId, CompletableFuture<StateUpdater.RemovedTaskResult>> futuresForActiveTasksToRecycle = new LinkedHashMap<>();
final Map<TaskId, CompletableFuture<StateUpdater.RemovedTaskResult>> futuresForStandbyTasksToRecycle = new LinkedHashMap<>();
final Map<TaskId, CompletableFuture<StateUpdater.RemovedTaskResult>> futuresForTasksToClose = new LinkedHashMap<>();
for (final Task task : stateUpdater.tasks()) {
final TaskId taskId = task.id();
if (activeTasksToCreate.containsKey(taskId)) {
if (task.isActive()) {
if (!task.inputPartitions().equals(activeTasksToCreate.get(taskId))) {
final CompletableFuture<StateUpdater.RemovedTaskResult> future = stateUpdater.remove(taskId);
futuresForUpdatingInputPartitions.put(taskId, future);
newInputPartitions.put(taskId, activeTasksToCreate.get(taskId));
}
} else {
final CompletableFuture<StateUpdater.RemovedTaskResult> future = stateUpdater.remove(taskId);
futuresForStandbyTasksToRecycle.put(taskId, future);
activeInputPartitions.put(taskId, activeTasksToCreate.get(taskId));
}
activeTasksToCreate.remove(taskId);
} else if (standbyTasksToCreate.containsKey(taskId)) {
if (task.isActive()) {
final CompletableFuture<StateUpdater.RemovedTaskResult> future = stateUpdater.remove(taskId);
futuresForActiveTasksToRecycle.put(taskId, future);
standbyInputPartitions.put(taskId, standbyTasksToCreate.get(taskId));
}
standbyTasksToCreate.remove(taskId);
} else {
final CompletableFuture<StateUpdater.RemovedTaskResult> future = stateUpdater.remove(taskId);
futuresForTasksToClose.put(taskId, future);
}
}
updateInputPartitions(futuresForUpdatingInputPartitions, newInputPartitions, failedTasks);
addToActiveTasksToRecycle(futuresForActiveTasksToRecycle, standbyInputPartitions, tasksToRecycle, failedTasks);
addToStandbyTasksToRecycle(futuresForStandbyTasksToRecycle, activeInputPartitions, tasksToRecycle, failedTasks);
addToTasksToClose(futuresForTasksToClose, tasksToCloseCleanFromStateUpdater, tasksToCloseDirtyFromStateUpdater);
}
private void updateInputPartitions(final Map<TaskId, CompletableFuture<StateUpdater.RemovedTaskResult>> futures,
final Map<TaskId, Set<TopicPartition>> newInputPartitions,
final Map<TaskId, RuntimeException> failedTasks) {
getNonFailedTasks(futures, failedTasks).forEach(task -> {
task.updateInputPartitions(
newInputPartitions.get(task.id()),
topologyMetadata.nodeToSourceTopics(task.id())
);
stateUpdater.add(task);
});
}
private void addToActiveTasksToRecycle(final Map<TaskId, CompletableFuture<StateUpdater.RemovedTaskResult>> futures,
final Map<TaskId, Set<TopicPartition>> standbyInputPartitions,
final Map<Task, Set<TopicPartition>> tasksToRecycle,
final Map<TaskId, RuntimeException> failedTasks) {
getNonFailedTasks(futures, failedTasks).forEach(task -> tasksToRecycle.put(task, standbyInputPartitions.get(task.id())));
}
private void addToStandbyTasksToRecycle(final Map<TaskId, CompletableFuture<StateUpdater.RemovedTaskResult>> futures,
final Map<TaskId, Set<TopicPartition>> activeInputPartitions,
final Map<Task, Set<TopicPartition>> tasksToRecycle,
final Map<TaskId, RuntimeException> failedTasks) {
getNonFailedTasks(futures, failedTasks).forEach(task -> tasksToRecycle.put(task, activeInputPartitions.get(task.id())));
}
private Stream<Task> getNonFailedTasks(final Map<TaskId, CompletableFuture<StateUpdater.RemovedTaskResult>> futures,
final Map<TaskId, RuntimeException> failedTasks) {
return futures.entrySet().stream()
.map(entry -> waitForFuture(entry.getKey(), entry.getValue()))
.filter(Objects::nonNull)
.map(removedTaskResult -> checkIfTaskFailed(removedTaskResult, failedTasks))
.filter(Objects::nonNull);
}
private void addToTasksToClose(final Map<TaskId, CompletableFuture<StateUpdater.RemovedTaskResult>> futures,
final Set<Task> tasksToCloseCleanFromStateUpdater,
final Set<Task> tasksToCloseDirtyFromStateUpdater) {
futures.entrySet().stream()
.map(entry -> waitForFuture(entry.getKey(), entry.getValue()))
.filter(Objects::nonNull)
.forEach(removedTaskResult -> {
if (removedTaskResult.exception().isPresent()) {
tasksToCloseDirtyFromStateUpdater.add(removedTaskResult.task());
} else {
tasksToCloseCleanFromStateUpdater.add(removedTaskResult.task());
}
});
}
private Task checkIfTaskFailed(final StateUpdater.RemovedTaskResult removedTaskResult,
final Map<TaskId, RuntimeException> failedTasks) {
final Task task = removedTaskResult.task();
if (removedTaskResult.exception().isPresent()) {
failedTasks.put(task.id(), removedTaskResult.exception().get());
tasks.addFailedTask(task);
return null;
}
return task;
}
private StateUpdater.RemovedTaskResult waitForFuture(final TaskId taskId,
final CompletableFuture<StateUpdater.RemovedTaskResult> future) {
final StateUpdater.RemovedTaskResult removedTaskResult;
try {
removedTaskResult = future.get(5, TimeUnit.MINUTES);
if (removedTaskResult == null) {
throw new IllegalStateException("Task " + taskId + " was not found in the state updater. "
+ BUG_ERROR_MESSAGE);
}
return removedTaskResult;
} catch (final ExecutionException executionException) {
log.warn("An exception happened when removing task {} from the state updater. The task was added to the " +
"failed task in the state updater: ",
taskId, executionException);
return null;
} catch (final InterruptedException shouldNotHappen) {
Thread.currentThread().interrupt();
log.error(INTERRUPTED_ERROR_MESSAGE, shouldNotHappen);
throw new IllegalStateException(INTERRUPTED_ERROR_MESSAGE, shouldNotHappen);
} catch (final java.util.concurrent.TimeoutException timeoutException) {
log.warn("The state updater wasn't able to remove task {} in time. The state updater thread may be dead. "
+ BUG_ERROR_MESSAGE, taskId, timeoutException);
return null;
}
}
private Map<TaskId, Set<TopicPartition>> pendingTasksToCreate(final Map<TaskId, Set<TopicPartition>> tasksToCreate) {
final Map<TaskId, Set<TopicPartition>> pendingTasks = new HashMap<>();
final Iterator<Map.Entry<TaskId, Set<TopicPartition>>> iter = tasksToCreate.entrySet().iterator();
while (iter.hasNext()) {
final Map.Entry<TaskId, Set<TopicPartition>> entry = iter.next();
final TaskId taskId = entry.getKey();
final boolean taskIsOwned = tasks.allTaskIds().contains(taskId)
|| (stateUpdater != null && stateUpdater.tasks().stream().anyMatch(task -> task.id() == taskId));
if (taskId.topologyName() != null && !taskIsOwned && !topologyMetadata.namedTopologiesView().contains(taskId.topologyName())) {
log.info("Cannot create the assigned task {} since it's topology name cannot be recognized, will put it " +
"aside as pending for now and create later when topology metadata gets refreshed", taskId);
pendingTasks.put(taskId, entry.getValue());
iter.remove();
}
}
return pendingTasks;
}
private Map<TaskId, RuntimeException> closeAndRecycleTasks(final Map<Task, Set<TopicPartition>> tasksToRecycle,
final Set<Task> tasksToCloseClean) {
final Map<TaskId, RuntimeException> taskCloseExceptions = new LinkedHashMap<>();
final Set<Task> tasksToCloseDirty = new TreeSet<>(Comparator.comparing(Task::id));
// for all tasks to close or recycle, we should first write a checkpoint as in post-commit
final List<Task> tasksToCheckpoint = new ArrayList<>(tasksToCloseClean);
tasksToCheckpoint.addAll(tasksToRecycle.keySet());
for (final Task task : tasksToCheckpoint) {
try {
// Note that we are not actually committing here but just check if we need to write checkpoint file:
// 1) for active tasks prepareCommit should return empty if it has committed during suspension successfully,
// and their changelog positions should not change at all postCommit would not write the checkpoint again.
// 2) for standby tasks prepareCommit should always return empty, and then in postCommit we would probably
// write the checkpoint file.
final Map<TopicPartition, OffsetAndMetadata> offsets = task.prepareCommit(true);
if (!offsets.isEmpty()) {
log.error("Task {} should have been committed when it was suspended, but it reports non-empty " +
"offsets {} to commit; this means it failed during last commit and hence should be closed dirty",
task.id(), offsets);
tasksToCloseDirty.add(task);
} else if (!task.isActive()) {
// For standby tasks, always try to first suspend before committing (checkpointing) it;
// Since standby tasks do not actually need to commit offsets but only need to
// flush / checkpoint state stores, so we only need to call postCommit here.
task.suspend();
task.postCommit(true);
}
} catch (final RuntimeException e) {
final String uncleanMessage = String.format(
"Failed to checkpoint task %s. Attempting to close remaining tasks before re-throwing:",
task.id());
log.error(uncleanMessage, e);
taskCloseExceptions.putIfAbsent(task.id(), e);
// We've already recorded the exception (which is the point of clean).
// Now, we should go ahead and complete the close because a half-closed task is no good to anyone.
tasksToCloseDirty.add(task);
}
}
tasksToCloseClean.removeAll(tasksToCloseDirty);
for (final Task task : tasksToCloseClean) {
try {
closeTaskClean(task);
} catch (final RuntimeException closeTaskException) {
final String uncleanMessage = String.format(
"Failed to close task %s cleanly. Attempting to close remaining tasks before re-throwing:",
task.id());
log.error(uncleanMessage, closeTaskException);
if (task.state() != State.CLOSED) {
tasksToCloseDirty.add(task);
}
taskCloseExceptions.putIfAbsent(task.id(), closeTaskException);
}
}
tasksToRecycle.keySet().removeAll(tasksToCloseDirty);
for (final Map.Entry<Task, Set<TopicPartition>> entry : tasksToRecycle.entrySet()) {
final Task oldTask = entry.getKey();
final Set<TopicPartition> inputPartitions = entry.getValue();
try {
if (oldTask.isActive()) {
final StandbyTask standbyTask = convertActiveToStandby((StreamTask) oldTask, inputPartitions);
if (stateUpdater != null) {
tasks.removeTask(oldTask);
tasks.addPendingTasksToInit(Collections.singleton(standbyTask));
} else {
tasks.replaceActiveWithStandby(standbyTask);
}
} else {
final StreamTask activeTask = convertStandbyToActive((StandbyTask) oldTask, inputPartitions);
tasks.replaceStandbyWithActive(activeTask);
}
} catch (final RuntimeException e) {
final String uncleanMessage = String.format("Failed to recycle task %s cleanly. " +
"Attempting to close remaining tasks before re-throwing:", oldTask.id());
log.error(uncleanMessage, e);
taskCloseExceptions.putIfAbsent(oldTask.id(), e);
tasksToCloseDirty.add(oldTask);
}
}
// for tasks that cannot be cleanly closed or recycled, close them dirty
for (final Task task : tasksToCloseDirty) {
closeTaskDirty(task, true);
}
return taskCloseExceptions;
}
private StandbyTask convertActiveToStandby(final StreamTask activeTask, final Set<TopicPartition> partitions) {
return standbyTaskCreator.createStandbyTaskFromActive(activeTask, partitions);
}
private StreamTask convertStandbyToActive(final StandbyTask standbyTask, final Set<TopicPartition> partitions) {
return activeTaskCreator.createActiveTaskFromStandby(standbyTask, partitions, mainConsumer);
}
/**
* Tries to initialize any new or still-uninitialized tasks, then checks if they can/have completed restoration.
*
* @throws IllegalStateException If store gets registered after initialized is already finished
* @throws StreamsException if the store's change log does not contain the partition
* @return {@code true} if all tasks are fully restored
*/
boolean tryToCompleteRestoration(final long now,
final java.util.function.Consumer<Set<TopicPartition>> offsetResetter) {
boolean allRunning = true;
// transit to restore active is idempotent so we can call it multiple times
changelogReader.enforceRestoreActive();
final List<Task> activeTasks = new LinkedList<>();
for (final Task task : tasks.allTasks()) {
try {
task.initializeIfNeeded();
task.clearTaskTimeout();
} catch (final LockException lockException) {
// it is possible that if there are multiple threads within the instance that one thread
// trying to grab the task from the other, while the other has not released the lock since
// it did not participate in the rebalance. In this case we can just retry in the next iteration
log.debug("Could not initialize task {} since: {}; will retry", task.id(), lockException.getMessage());
allRunning = false;
} catch (final TimeoutException timeoutException) {
task.maybeInitTaskTimeoutOrThrow(now, timeoutException);
allRunning = false;
}
if (task.isActive()) {
activeTasks.add(task);
}
}
if (allRunning && !activeTasks.isEmpty()) {
final Set<TopicPartition> restored = changelogReader.completedChangelogs();
for (final Task task : activeTasks) {
if (restored.containsAll(task.changelogPartitions())) {
try {
task.completeRestoration(offsetResetter);
task.clearTaskTimeout();
} catch (final TimeoutException timeoutException) {
task.maybeInitTaskTimeoutOrThrow(now, timeoutException);
log.debug(
String.format(
"Could not complete restoration for %s due to the following exception; will retry",
task.id()),
timeoutException
);
allRunning = false;
}
} else {
// we found a restoring task that isn't done restoring, which is evidence that
// not all tasks are running
allRunning = false;
}
}
}
if (allRunning) {
// we can call resume multiple times since it is idempotent.
mainConsumer.resume(mainConsumer.assignment());
changelogReader.transitToUpdateStandby();
}
return allRunning;
}
public boolean checkStateUpdater(final long now,
final java.util.function.Consumer<Set<TopicPartition>> offsetResetter) {
addTasksToStateUpdater();
if (stateUpdater.hasExceptionsAndFailedTasks()) {
handleExceptionsFromStateUpdater();
}
if (stateUpdater.restoresActiveTasks()) {
handleRestoredTasksFromStateUpdater(now, offsetResetter);
}
return !stateUpdater.restoresActiveTasks()
&& !tasks.hasPendingTasksToInit();
}
private void recycleTaskFromStateUpdater(final Task task,
final Set<TopicPartition> inputPartitions,
final Set<Task> tasksToCloseDirty,
final Map<TaskId, RuntimeException> taskExceptions) {
Task newTask = null;
try {
task.suspend();
newTask = task.isActive() ?
convertActiveToStandby((StreamTask) task, inputPartitions) :
convertStandbyToActive((StandbyTask) task, inputPartitions);
tasks.addPendingTasksToInit(Collections.singleton(newTask));
} catch (final RuntimeException e) {
final TaskId taskId = task.id();
final String uncleanMessage = String.format("Failed to recycle task %s cleanly. " +
"Attempting to close remaining tasks before re-throwing:", taskId);
log.error(uncleanMessage, e);
if (task.state() != State.CLOSED) {
tasksToCloseDirty.add(task);
}
if (newTask != null && newTask.state() != State.CLOSED) {
tasksToCloseDirty.add(newTask);
}
taskExceptions.putIfAbsent(taskId, e);
}
}
private void closeTaskClean(final Task task,
final Set<Task> tasksToCloseDirty,
final Map<TaskId, RuntimeException> taskExceptions) {
try {
task.suspend();
task.closeClean();
} catch (final RuntimeException e) {
final String uncleanMessage = String.format("Failed to close task %s cleanly. " +
"Attempting to close remaining tasks before re-throwing:", task.id());
log.error(uncleanMessage, e);
if (task.state() != State.CLOSED) {
tasksToCloseDirty.add(task);
}
taskExceptions.putIfAbsent(task.id(), e);
}
}
private void transitRestoredTaskToRunning(final Task task,
final long now,
final java.util.function.Consumer<Set<TopicPartition>> offsetResetter) {
try {
task.completeRestoration(offsetResetter);
tasks.addTask(task);
mainConsumer.resume(task.inputPartitions());
task.clearTaskTimeout();
} catch (final TimeoutException timeoutException) {
task.maybeInitTaskTimeoutOrThrow(now, timeoutException);
stateUpdater.add(task);
log.debug(
String.format(
"Could not complete restoration for %s due to the following exception; adding the task " +
"back to the state updater and will retry",
task.id()),
timeoutException
);
}
}
private void addTasksToStateUpdater() {
final Map<TaskId, RuntimeException> taskExceptions = new LinkedHashMap<>();
for (final Task task : tasks.drainPendingTasksToInit()) {
try {
addTaskToStateUpdater(task);
} catch (final RuntimeException e) {
// need to add task back to the bookkeeping to be handled by the stream thread
tasks.addFailedTask(task);
taskExceptions.put(task.id(), e);
}
}
maybeThrowTaskExceptions(taskExceptions);
}
private void addTaskToStateUpdater(final Task task) {
final long nowMs = time.milliseconds();
try {
if (canTryInitializeTask(task.id(), nowMs)) {
task.initializeIfNeeded();
task.clearTaskTimeout();
taskIdToBackoffRecord.remove(task.id());
stateUpdater.add(task);
} else {
log.trace("Task {} is still not allowed to retry acquiring the state directory lock", task.id());
tasks.addPendingTasksToInit(Collections.singleton(task));
}
} catch (final LockException lockException) {
// The state directory may still be locked by another thread, when the rebalance just happened.
// Retry in the next iteration.
log.info("Encountered lock exception. Reattempting locking the state in the next iteration. Error message was: {}",
lockException.getMessage());
tasks.addPendingTasksToInit(Collections.singleton(task));
updateOrCreateBackoffRecord(task.id(), nowMs);
} catch (final TimeoutException timeoutException) {
// A timeout can occur either during producer initialization OR while fetching committed offsets.
// Retry in the next iteration.
task.maybeInitTaskTimeoutOrThrow(nowMs, timeoutException);
tasks.addPendingTasksToInit(Collections.singleton(task));
updateOrCreateBackoffRecord(task.id(), nowMs);
log.info("Encountered timeout exception. Reattempting initialization in the next iteration. Error message was: {}",
timeoutException.getMessage());
}
}
public void handleExceptionsFromStateUpdater() {
final Map<TaskId, RuntimeException> taskExceptions = collectExceptionsAndFailedTasksFromStateUpdater();
maybeThrowTaskExceptions(taskExceptions);
}
public Map<TaskId, RuntimeException> collectExceptionsAndFailedTasksFromStateUpdater() {
final Map<TaskId, RuntimeException> taskExceptions = new LinkedHashMap<>();
for (final StateUpdater.ExceptionAndTask exceptionAndTask : stateUpdater.drainExceptionsAndFailedTasks()) {
final RuntimeException exception = exceptionAndTask.exception();
final Task failedTask = exceptionAndTask.task();
// need to add task back to the bookkeeping to be handled by the stream thread
tasks.addFailedTask(failedTask);
taskExceptions.put(failedTask.id(), exception);
}
return taskExceptions;
}
private void handleRestoredTasksFromStateUpdater(final long now,
final java.util.function.Consumer<Set<TopicPartition>> offsetResetter) {
final Duration timeout = Duration.ZERO;
for (final Task task : stateUpdater.drainRestoredActiveTasks(timeout)) {
transitRestoredTaskToRunning(task, now, offsetResetter);
}
}
/**
* Handle the revoked partitions and prepare for closing the associated tasks in {@link #handleAssignment(Map, Map)}
* We should commit the revoking tasks first before suspending them as we will not officially own them anymore when
* {@link #handleAssignment(Map, Map)} is called. Note that only active task partitions are passed in from the
* rebalance listener, so we only need to consider/commit active tasks here
*
* If eos-v2 is used, we must commit ALL tasks. Otherwise, we can just commit those (active) tasks which are revoked
*
* @throws TaskMigratedException if the task producer got fenced (EOS only)
*/
void handleRevocation(final Collection<TopicPartition> revokedPartitions) {
final Set<TopicPartition> remainingRevokedPartitions = new HashSet<>(revokedPartitions);
final Set<Task> revokedActiveTasks = new TreeSet<>(Comparator.comparing(Task::id));
final Set<Task> commitNeededActiveTasks = new TreeSet<>(Comparator.comparing(Task::id));
final Map<Task, Map<TopicPartition, OffsetAndMetadata>> consumedOffsetsPerTask = new HashMap<>();
final AtomicReference<RuntimeException> firstException = new AtomicReference<>(null);
final Set<TaskId> lockedTaskIds = activeRunningTaskIterable().stream().map(Task::id).collect(Collectors.toSet());
maybeLockTasks(lockedTaskIds);
boolean revokedTasksNeedCommit = false;
for (final Task task : activeRunningTaskIterable()) {
if (remainingRevokedPartitions.containsAll(task.inputPartitions())) {
// when the task input partitions are included in the revoked list,
// this is an active task and should be revoked
revokedActiveTasks.add(task);
remainingRevokedPartitions.removeAll(task.inputPartitions());
revokedTasksNeedCommit |= task.commitNeeded();
} else if (task.commitNeeded()) {
commitNeededActiveTasks.add(task);
}
}
revokeTasksInStateUpdater(remainingRevokedPartitions);
if (!remainingRevokedPartitions.isEmpty()) {
log.debug("The following revoked partitions {} are missing from the current task partitions. It could "
+ "potentially be due to race condition of consumer detecting the heartbeat failure, or the tasks " +
"have been cleaned up by the handleAssignment callback.", remainingRevokedPartitions);
}
if (revokedTasksNeedCommit) {
prepareCommitAndAddOffsetsToMap(revokedActiveTasks, consumedOffsetsPerTask);
// if we need to commit any revoking task then we just commit all of those needed committing together
prepareCommitAndAddOffsetsToMap(commitNeededActiveTasks, consumedOffsetsPerTask);
}
// even if commit failed, we should still continue and complete suspending those tasks, so we would capture
// any exception and rethrow it at the end. some exceptions may be handled immediately and then swallowed,
// as such we just need to skip those dirty tasks in the checkpoint
final Set<Task> dirtyTasks = new TreeSet<>(Comparator.comparing(Task::id));
try {
if (revokedTasksNeedCommit) {
// in handleRevocation we must call commitOffsetsOrTransaction() directly rather than
// commitAndFillInConsumedOffsetsAndMetadataPerTaskMap() to make sure we don't skip the
// offset commit because we are in a rebalance
taskExecutor.commitOffsetsOrTransaction(consumedOffsetsPerTask);
}
} catch (final TaskCorruptedException e) {
log.warn("Some tasks were corrupted when trying to commit offsets, these will be cleaned and revived: {}",
e.corruptedTasks());
// If we hit a TaskCorruptedException it must be EOS, just handle the cleanup for those corrupted tasks right here
dirtyTasks.addAll(tasks.tasks(e.corruptedTasks()));
closeDirtyAndRevive(dirtyTasks, true);
} catch (final TimeoutException e) {
log.warn("Timed out while trying to commit all tasks during revocation, these will be cleaned and revived");
// If we hit a TimeoutException it must be ALOS, just close dirty and revive without wiping the state
dirtyTasks.addAll(consumedOffsetsPerTask.keySet());
closeDirtyAndRevive(dirtyTasks, false);
} catch (final RuntimeException e) {
log.error("Exception caught while committing those revoked tasks " + revokedActiveTasks, e);
firstException.compareAndSet(null, e);
dirtyTasks.addAll(consumedOffsetsPerTask.keySet());
}
// we enforce checkpointing upon suspending a task: if it is resumed later we just proceed normally, if it is
// going to be closed we would checkpoint by then
for (final Task task : revokedActiveTasks) {
if (!dirtyTasks.contains(task)) {
try {
task.postCommit(true);
} catch (final RuntimeException e) {
log.error("Exception caught while post-committing task " + task.id(), e);
maybeSetFirstException(false, maybeWrapTaskException(e, task.id()), firstException);
}
}
}
if (revokedTasksNeedCommit) {
for (final Task task : commitNeededActiveTasks) {
if (!dirtyTasks.contains(task)) {
try {
// for non-revoking active tasks, we should not enforce checkpoint
// since if it is EOS enabled, no checkpoint should be written while
// the task is in RUNNING tate
task.postCommit(false);
} catch (final RuntimeException e) {
log.error("Exception caught while post-committing task " + task.id(), e);
maybeSetFirstException(false, maybeWrapTaskException(e, task.id()), firstException);
}
}
}
}
for (final Task task : revokedActiveTasks) {
try {
task.suspend();
} catch (final RuntimeException e) {
log.error("Caught the following exception while trying to suspend revoked task " + task.id(), e);
maybeSetFirstException(false, maybeWrapTaskException(e, task.id()), firstException);
}
}
maybeUnlockTasks(lockedTaskIds);
if (firstException.get() != null) {
throw firstException.get();
}
}
private void revokeTasksInStateUpdater(final Set<TopicPartition> remainingRevokedPartitions) {
if (stateUpdater != null) {
final Map<TaskId, CompletableFuture<StateUpdater.RemovedTaskResult>> futures = new LinkedHashMap<>();
final Map<TaskId, RuntimeException> failedTasksFromStateUpdater = new HashMap<>();
for (final Task restoringTask : stateUpdater.tasks()) {
if (restoringTask.isActive()) {
if (remainingRevokedPartitions.containsAll(restoringTask.inputPartitions())) {
futures.put(restoringTask.id(), stateUpdater.remove(restoringTask.id()));
remainingRevokedPartitions.removeAll(restoringTask.inputPartitions());
}
}
}
getNonFailedTasks(futures, failedTasksFromStateUpdater).forEach(task -> {
task.suspend();
tasks.addTask(task);
});
maybeThrowTaskExceptions(failedTasksFromStateUpdater);
}
}
private void prepareCommitAndAddOffsetsToMap(final Set<Task> tasksToPrepare,
final Map<Task, Map<TopicPartition, OffsetAndMetadata>> consumedOffsetsPerTask) {
for (final Task task : tasksToPrepare) {
try {
final Map<TopicPartition, OffsetAndMetadata> committableOffsets = task.prepareCommit(true);
if (!committableOffsets.isEmpty()) {
consumedOffsetsPerTask.put(task, committableOffsets);
}
} catch (final StreamsException e) {
e.setTaskId(task.id());
throw e;
} catch (final Exception e) {
throw new StreamsException(e, task.id());
}
}
}
/**
* Closes active tasks as zombies, as these partitions have been lost and are no longer owned.
* NOTE this method assumes that when it is called, EVERY task/partition has been lost and must
* be closed as a zombie.
*
* @throws TaskMigratedException if the task producer got fenced (EOS only)
*/
void handleLostAll() {
log.debug("Closing lost active tasks as zombies.");
closeRunningTasksDirty();
removeLostActiveTasksFromStateUpdaterAndPendingTasksToInit();
if (processingMode == EXACTLY_ONCE_V2) {
activeTaskCreator.reInitializeProducer();
}
}
private void closeRunningTasksDirty() {
final Set<Task> allTask = tasks.allTasks();
final Set<TaskId> allTaskIds = tasks.allTaskIds();
maybeLockTasks(allTaskIds);
for (final Task task : allTask) {
// Even though we've apparently dropped out of the group, we can continue safely to maintain our
// standby tasks while we rejoin.
if (task.isActive()) {
closeTaskDirty(task, true);
}
}
maybeUnlockTasks(allTaskIds);
}
private void removeLostActiveTasksFromStateUpdaterAndPendingTasksToInit() {
if (stateUpdater != null) {
final Map<TaskId, CompletableFuture<StateUpdater.RemovedTaskResult>> futures = new LinkedHashMap<>();
final Set<Task> tasksToCloseClean = new TreeSet<>(Comparator.comparing(Task::id));
tasksToCloseClean.addAll(tasks.drainPendingActiveTasksToInit());
final Set<Task> tasksToCloseDirty = new TreeSet<>(Comparator.comparing(Task::id));
for (final Task restoringTask : stateUpdater.tasks()) {
if (restoringTask.isActive()) {
futures.put(restoringTask.id(), stateUpdater.remove(restoringTask.id()));
}
}
addToTasksToClose(futures, tasksToCloseClean, tasksToCloseDirty);
for (final Task task : tasksToCloseClean) {
closeTaskClean(task, tasksToCloseDirty, new HashMap<>());
}
for (final Task task : tasksToCloseDirty) {
closeTaskDirty(task, false);
}
}
}
public void signalResume() {
if (stateUpdater != null) {
stateUpdater.signalResume();
}
if (schedulingTaskManager != null) {
schedulingTaskManager.signalTaskExecutors();
}
}
/**
* Compute the offset total summed across all stores in a task. Includes offset sum for any tasks we own the
* lock for, which includes assigned and unassigned tasks we locked in {@link #tryToLockAllNonEmptyTaskDirectories()}.
* Does not include stateless or non-logged tasks.
*/
public Map<TaskId, Long> taskOffsetSums() {
final Map<TaskId, Long> taskOffsetSums = new HashMap<>();
// Not all tasks will create directories, and there may be directories for tasks we don't currently own,
// so we consider all tasks that are either owned or on disk. This includes stateless tasks, which should
// just have an empty changelogOffsets map.
final Map<TaskId, Task> tasks = allTasks();
final Set<TaskId> lockedTaskDirectoriesOfNonOwnedTasksAndClosedAndCreatedTasks =
union(HashSet::new, lockedTaskDirectories, tasks.keySet());
for (final Task task : tasks.values()) {
if (task.state() != State.CREATED && task.state() != State.CLOSED) {
final Map<TopicPartition, Long> changelogOffsets = task.changelogOffsets();
if (changelogOffsets.isEmpty()) {
log.debug("Skipping to encode apparently stateless (or non-logged) offset sum for task {}",
task.id());
} else {
taskOffsetSums.put(task.id(), sumOfChangelogOffsets(task.id(), changelogOffsets));
}
lockedTaskDirectoriesOfNonOwnedTasksAndClosedAndCreatedTasks.remove(task.id());
}
}
for (final TaskId id : lockedTaskDirectoriesOfNonOwnedTasksAndClosedAndCreatedTasks) {
final File checkpointFile = stateDirectory.checkpointFileFor(id);
try {
if (checkpointFile.exists()) {
taskOffsetSums.put(id, sumOfChangelogOffsets(id, new OffsetCheckpoint(checkpointFile).read()));
}
} catch (final IOException e) {
log.warn(String.format("Exception caught while trying to read checkpoint for task %s:", id), e);
}
}
return taskOffsetSums;
}
/**
* Makes a weak attempt to lock all non-empty task directories in the state dir. We are responsible for computing and
* reporting the offset sum for any unassigned tasks we obtain the lock for in the upcoming rebalance. Tasks
* that we locked but didn't own will be released at the end of the rebalance (unless of course we were
* assigned the task as a result of the rebalance). This method should be idempotent.
*/
private void tryToLockAllNonEmptyTaskDirectories() {
// Always clear the set at the beginning as we're always dealing with the
// current set of actually-locked tasks.
lockedTaskDirectories.clear();
final Map<TaskId, Task> allTasks = allTasks();
for (final TaskDirectory taskDir : stateDirectory.listNonEmptyTaskDirectories()) {
final File dir = taskDir.file();
final String namedTopology = taskDir.namedTopology();
try {
final TaskId id = parseTaskDirectoryName(dir.getName(), namedTopology);
if (stateDirectory.lock(id)) {
// Check again in case the cleaner thread ran and emptied the directory
if (stateDirectory.directoryForTaskIsEmpty(id)) {
log.debug("Releasing lock on empty directory for task {}", id);
stateDirectory.unlock(id);
} else {
lockedTaskDirectories.add(id);
if (!allTasks.containsKey(id)) {
log.debug("Temporarily locked unassigned task {} for the upcoming rebalance", id);
}
}
}
} catch (final TaskIdFormatException e) {
// ignore any unknown files that sit in the same directory
}
}
}
/**
* Clean up after closed or removed tasks by making sure to unlock any remaining locked directories for them, for
* example unassigned tasks or those in the CREATED state when closed, since Task#close will not unlock them
*/
private void releaseLockedDirectoriesForTasks(final Set<TaskId> tasksToUnlock) {
final Iterator<TaskId> taskIdIterator = lockedTaskDirectories.iterator();
while (taskIdIterator.hasNext()) {
final TaskId id = taskIdIterator.next();
if (tasksToUnlock.contains(id)) {
stateDirectory.unlock(id);
taskIdIterator.remove();
}
}
}
/**
* We must release the lock for any unassigned tasks that we temporarily locked in preparation for a
* rebalance in {@link #tryToLockAllNonEmptyTaskDirectories()}.
*/
private void releaseLockedUnassignedTaskDirectories() {
final Iterator<TaskId> taskIdIterator = lockedTaskDirectories.iterator();
final Map<TaskId, Task> allTasks = allTasks();
while (taskIdIterator.hasNext()) {
final TaskId id = taskIdIterator.next();
if (!allTasks.containsKey(id)) {
stateDirectory.unlock(id);
taskIdIterator.remove();
}
}
}
private long sumOfChangelogOffsets(final TaskId id, final Map<TopicPartition, Long> changelogOffsets) {
long offsetSum = 0L;
for (final Map.Entry<TopicPartition, Long> changelogEntry : changelogOffsets.entrySet()) {
final long offset = changelogEntry.getValue();
if (offset == Task.LATEST_OFFSET) {
// this condition can only be true for active tasks; never for standby
// for this case, the offset of all partitions is set to `LATEST_OFFSET`
// and we "forward" the sentinel value directly
return Task.LATEST_OFFSET;
} else if (offset != OffsetCheckpoint.OFFSET_UNKNOWN) {
if (offset < 0) {
throw new StreamsException(
new IllegalStateException("Expected not to get a sentinel offset, but got: " + changelogEntry),
id);
}
offsetSum += offset;
if (offsetSum < 0) {
log.warn("Sum of changelog offsets for task {} overflowed, pinning to Long.MAX_VALUE", id);
return Long.MAX_VALUE;
}
}
}
return offsetSum;
}
private void closeTaskDirty(final Task task, final boolean removeFromTasksRegistry) {
try {
// we call this function only to flush the case if necessary
// before suspending and closing the topology
task.prepareCommit(false);
} catch (final RuntimeException swallow) {
log.warn("Error flushing cache of dirty task {}. " +
"Since the task is closing dirty, the following exception is swallowed: {}",
task.id(), swallow.getMessage());
}
try {
task.suspend();
} catch (final RuntimeException swallow) {
log.warn("Error suspending dirty task {}. " +
"Since the task is closing dirty, the following exception is swallowed: {}",
task.id(), swallow.getMessage());
}
task.closeDirty();
try {
if (removeFromTasksRegistry) {
tasks.removeTask(task);
}
} catch (final RuntimeException swallow) {
log.warn("Error removing dirty task {}. " +
"Since the task is closing dirty, the following exception is swallowed: {}",
task.id(), swallow);
}
}
private void closeTaskClean(final Task task) {
task.closeClean();
tasks.removeTask(task);
}
void shutdown(final boolean clean) {
shutdownStateUpdater();
shutdownSchedulingTaskManager();
final AtomicReference<RuntimeException> firstException = new AtomicReference<>(null);
// TODO: change type to `StreamTask`
final Set<Task> activeTasks = new TreeSet<>(Comparator.comparing(Task::id));
activeTasks.addAll(tasks.activeTasks());
executeAndMaybeSwallow(
clean,
() -> closeAndCleanUpTasks(activeTasks, standbyTaskIterable(), clean),
e -> firstException.compareAndSet(null, e),
e -> log.warn("Ignoring an exception while unlocking remaining task directories.", e)
);
executeAndMaybeSwallow(
clean,
activeTaskCreator::close,
e -> firstException.compareAndSet(null, e),
e -> log.warn("Ignoring an exception while closing thread producer.", e)
);
tasks.clear();
// this should be called after closing all tasks and clearing them from `tasks` to make sure we unlock the dir
// for any tasks that may have still been in CREATED at the time of shutdown, since Task#close will not do so
executeAndMaybeSwallow(
clean,
this::releaseLockedUnassignedTaskDirectories,
e -> firstException.compareAndSet(null, e),
e -> log.warn("Ignoring an exception while unlocking remaining task directories.", e)
);
final RuntimeException fatalException = firstException.get();
if (fatalException != null) {
throw fatalException;
}
log.info("Shutdown complete");
}
private void shutdownStateUpdater() {
if (stateUpdater != null) {
// If there are failed tasks handling them first
for (final StateUpdater.ExceptionAndTask exceptionAndTask : stateUpdater.drainExceptionsAndFailedTasks()) {
final Task failedTask = exceptionAndTask.task();
closeTaskDirty(failedTask, false);
}
final Map<TaskId, CompletableFuture<StateUpdater.RemovedTaskResult>> futures = new LinkedHashMap<>();
for (final Task task : stateUpdater.tasks()) {
final CompletableFuture<StateUpdater.RemovedTaskResult> future = stateUpdater.remove(task.id());
futures.put(task.id(), future);
}
final Set<Task> tasksToCloseClean = new TreeSet<>(Comparator.comparing(Task::id));
final Set<Task> tasksToCloseDirty = new TreeSet<>(Comparator.comparing(Task::id));
addToTasksToClose(futures, tasksToCloseClean, tasksToCloseDirty);
// at this point we removed all tasks, so the shutdown should not take a lot of time
stateUpdater.shutdown(Duration.ofMinutes(1L));
for (final Task task : tasksToCloseClean) {
tasks.addTask(task);
}
for (final Task task : tasksToCloseDirty) {
closeTaskDirty(task, false);
}
// Handling all failures that occurred during the remove process
for (final StateUpdater.ExceptionAndTask exceptionAndTask : stateUpdater.drainExceptionsAndFailedTasks()) {
final Task failedTask = exceptionAndTask.task();
closeTaskDirty(failedTask, false);
}
// If there is anything left unhandled due to timeouts, handling now
for (final Task task : stateUpdater.tasks()) {
closeTaskDirty(task, false);
}
}
}
private void shutdownSchedulingTaskManager() {
if (schedulingTaskManager != null) {
schedulingTaskManager.shutdown(Duration.ofMinutes(5L));
}
}
/**
* Closes and cleans up after the provided tasks, including closing their corresponding task producers
*/
void closeAndCleanUpTasks(final Collection<Task> activeTasks, final Collection<Task> standbyTasks, final boolean clean) {
final AtomicReference<RuntimeException> firstException = new AtomicReference<>(null);
final Set<TaskId> ids =
activeTasks.stream()
.map(Task::id)
.collect(Collectors.toSet());
maybeLockTasks(ids);
final Set<Task> tasksToCloseDirty = new TreeSet<>(Comparator.comparing(Task::id));
tasksToCloseDirty.addAll(tryCloseCleanActiveTasks(activeTasks, clean, firstException));
tasksToCloseDirty.addAll(tryCloseCleanStandbyTasks(standbyTasks, clean, firstException));
for (final Task task : tasksToCloseDirty) {
closeTaskDirty(task, true);
}
maybeUnlockTasks(ids);
final RuntimeException exception = firstException.get();
if (exception != null) {
throw exception;
}
}
// Returns the set of active tasks that must be closed dirty
private Collection<Task> tryCloseCleanActiveTasks(final Collection<Task> activeTasksToClose,
final boolean clean,
final AtomicReference<RuntimeException> firstException) {
if (!clean) {
return activeTaskIterable();
}
final Comparator<Task> byId = Comparator.comparing(Task::id);
final Set<Task> tasksToCommit = new TreeSet<>(byId);
final Set<Task> tasksToCloseDirty = new TreeSet<>(byId);
final Set<Task> tasksToCloseClean = new TreeSet<>(byId);
final Map<Task, Map<TopicPartition, OffsetAndMetadata>> consumedOffsetsAndMetadataPerTask = new HashMap<>();
// first committing all tasks and then suspend and close them clean
for (final Task task : activeTasksToClose) {
try {
final Map<TopicPartition, OffsetAndMetadata> committableOffsets = task.prepareCommit(true);
tasksToCommit.add(task);
if (!committableOffsets.isEmpty()) {
consumedOffsetsAndMetadataPerTask.put(task, committableOffsets);
}
tasksToCloseClean.add(task);
} catch (final TaskMigratedException e) {
// just ignore the exception as it doesn't matter during shutdown
tasksToCloseDirty.add(task);
} catch (final StreamsException e) {
e.setTaskId(task.id());
firstException.compareAndSet(null, e);
tasksToCloseDirty.add(task);
} catch (final RuntimeException e) {
firstException.compareAndSet(null, new StreamsException(e, task.id()));
tasksToCloseDirty.add(task);
}
}
// If any active tasks can't be committed, none of them can be, and all that need a commit must be closed dirty
if (processingMode == EXACTLY_ONCE_V2 && !tasksToCloseDirty.isEmpty()) {
tasksToCloseClean.removeAll(tasksToCommit);
tasksToCloseDirty.addAll(tasksToCommit);
} else {
try {
taskExecutor.commitOffsetsOrTransaction(consumedOffsetsAndMetadataPerTask);
} catch (final RuntimeException e) {
log.error("Exception caught while committing tasks " + consumedOffsetsAndMetadataPerTask.keySet(), e);
// TODO: should record the task ids when handling this exception
maybeSetFirstException(false, e, firstException);
if (e instanceof TaskCorruptedException) {
final TaskCorruptedException taskCorruptedException = (TaskCorruptedException) e;
final Set<TaskId> corruptedTaskIds = taskCorruptedException.corruptedTasks();
final Set<Task> corruptedTasks = tasksToCommit
.stream()
.filter(task -> corruptedTaskIds.contains(task.id()))
.collect(Collectors.toSet());
tasksToCloseClean.removeAll(corruptedTasks);
tasksToCloseDirty.addAll(corruptedTasks);
} else {
// If the commit fails, everyone who participated in it must be closed dirty
tasksToCloseClean.removeAll(tasksToCommit);
tasksToCloseDirty.addAll(tasksToCommit);
}
}
for (final Task task : activeTaskIterable()) {
try {
task.postCommit(true);
} catch (final RuntimeException e) {
log.error("Exception caught while post-committing task " + task.id(), e);
maybeSetFirstException(false, maybeWrapTaskException(e, task.id()), firstException);
tasksToCloseDirty.add(task);
tasksToCloseClean.remove(task);
}
}
}
for (final Task task : tasksToCloseClean) {
try {
task.suspend();
closeTaskClean(task);
} catch (final RuntimeException e) {
log.error("Exception caught while clean-closing active task {}: {}", task.id(), e.getMessage());
if (task.state() != State.CLOSED) {
tasksToCloseDirty.add(task);
}
// ignore task migrated exception as it doesn't matter during shutdown
maybeSetFirstException(true, maybeWrapTaskException(e, task.id()), firstException);
}
}
return tasksToCloseDirty;
}
// Returns the set of standby tasks that must be closed dirty
private Collection<Task> tryCloseCleanStandbyTasks(final Collection<Task> standbyTasksToClose,
final boolean clean,
final AtomicReference<RuntimeException> firstException) {
if (!clean) {
return standbyTaskIterable();
}
final Set<Task> tasksToCloseDirty = new TreeSet<>(Comparator.comparing(Task::id));
// first committing and then suspend / close clean
for (final Task task : standbyTasksToClose) {
try {
task.prepareCommit(true);
task.postCommit(true);
task.suspend();
closeTaskClean(task);
} catch (final RuntimeException e) {
log.error("Exception caught while clean-closing standby task {}: {}", task.id(), e.getMessage());
if (task.state() != State.CLOSED) {
tasksToCloseDirty.add(task);
}
// ignore task migrated exception as it doesn't matter during shutdown
maybeSetFirstException(true, maybeWrapTaskException(e, task.id()), firstException);
}
}
return tasksToCloseDirty;
}
Set<TaskId> activeTaskIds() {
return activeTaskStream()
.map(Task::id)
.collect(Collectors.toSet());
}
Set<TaskId> activeRunningTaskIds() {
return activeRunningTaskStream()
.map(Task::id)
.collect(Collectors.toSet());
}
Set<TaskId> standbyTaskIds() {
return standbyTaskStream().map(Task::id).collect(Collectors.toSet());
}
Map<TaskId, Task> allTasks() {
// not bothering with an unmodifiable map, since the tasks themselves are mutable, but
// if any outside code modifies the map or the tasks, it would be a severe transgression.
if (stateUpdater != null) {
final Map<TaskId, Task> ret = stateUpdater.tasks().stream().collect(Collectors.toMap(Task::id, x -> x));
ret.putAll(tasks.allTasksPerId());
ret.putAll(tasks.pendingTasksToInit().stream().collect(Collectors.toMap(Task::id, x -> x)));
return ret;
} else {
return tasks.allTasksPerId();
}
}
/**
* Returns tasks owned by the stream thread. With state updater disabled, these are all tasks. With
* state updater enabled, this does not return any tasks currently owned by the state updater.
*
* TODO: after we complete switching to state updater, we could rename this function as allRunningTasks
* to be differentiated from allTasks including running and restoring tasks
*/
Map<TaskId, Task> allOwnedTasks() {
// not bothering with an unmodifiable map, since the tasks themselves are mutable, but
// if any outside code modifies the map or the tasks, it would be a severe transgression.
return tasks.allTasksPerId();
}
Set<Task> readOnlyAllTasks() {
// not bothering with an unmodifiable map, since the tasks themselves are mutable, but
// if any outside code modifies the map or the tasks, it would be a severe transgression.
if (stateUpdater != null) {
final HashSet<Task> ret = new HashSet<>(stateUpdater.tasks());
ret.addAll(tasks.allTasks());
return Collections.unmodifiableSet(ret);
} else {
return Collections.unmodifiableSet(tasks.allTasks());
}
}
Map<TaskId, Task> notPausedTasks() {
return Collections.unmodifiableMap(tasks.allTasks()
.stream()
.filter(t -> !topologyMetadata.isPaused(t.id().topologyName()))
.collect(Collectors.toMap(Task::id, v -> v)));
}
Map<TaskId, Task> activeTaskMap() {
return activeTaskStream().collect(Collectors.toMap(Task::id, t -> t));
}
List<Task> activeTaskIterable() {
return activeTaskStream().collect(Collectors.toList());
}
List<Task> activeRunningTaskIterable() {
return activeRunningTaskStream().collect(Collectors.toList());
}
private Stream<Task> activeTaskStream() {
if (stateUpdater != null) {
return Stream.concat(
activeRunningTaskStream(),
stateUpdater.tasks().stream().filter(Task::isActive)
);
}
return activeRunningTaskStream();
}
private Stream<Task> activeRunningTaskStream() {
return tasks.allTasks().stream().filter(Task::isActive);
}
Map<TaskId, Task> standbyTaskMap() {
return standbyTaskStream().collect(Collectors.toMap(Task::id, t -> t));
}
private List<Task> standbyTaskIterable() {
return standbyTaskStream().collect(Collectors.toList());
}
private Stream<Task> standbyTaskStream() {
final Stream<Task> standbyTasksInTaskRegistry = tasks.allTasks().stream().filter(t -> !t.isActive());
if (stateUpdater != null) {
return Stream.concat(
stateUpdater.standbyTasks().stream(),
standbyTasksInTaskRegistry
);
} else {
return standbyTasksInTaskRegistry;
}
}
// For testing only.
int commitAll() {
return commit(tasks.allTasks());
}
/**
* Resumes polling in the main consumer for all partitions for which
* the corresponding record queues have capacity (again).
*/
public void resumePollingForPartitionsWithAvailableSpace() {
for (final Task t: tasks.activeTasks()) {
t.resumePollingForPartitionsWithAvailableSpace();
}
}
/**
* Fetches up-to-date lag information from the consumer.
*/
public void updateLags() {
for (final Task t: tasks.activeTasks()) {
t.updateLags();
}
}
/**
* Wake-up any sleeping processing threads.
*/
public void signalTaskExecutors() {
if (schedulingTaskManager != null) {
// Wake up sleeping task executors after every poll, in case there is processing or punctuation to-do.
schedulingTaskManager.signalTaskExecutors();
}
}
/**
* Take records and add them to each respective task
*
* @param records Records, can be null
*/
void addRecordsToTasks(final ConsumerRecords<byte[], byte[]> records) {
for (final TopicPartition partition : records.partitions()) {
final Task activeTask = getActiveTask(partition);
activeTask.addRecords(partition, records.records(partition));
}
}
/**
* Update the next offsets for each task
*
* @param nextOffsets A map of offsets keyed by partition
*/
void updateNextOffsets(final Map<TopicPartition, OffsetAndMetadata> nextOffsets) {
for (final Map.Entry<TopicPartition, OffsetAndMetadata> entry : nextOffsets.entrySet()) {
final Task activeTask = getActiveTask(entry.getKey());
activeTask.updateNextOffsets(entry.getKey(), entry.getValue());
}
}
void maybeInitTaskTimeoutsOrThrow(
final Collection<TopicPartition> partitions,
final TimeoutException timeoutException,
final long nowMs
) {
for (final TopicPartition partition : partitions) {
final Task task = getActiveTask(partition);
task.maybeInitTaskTimeoutOrThrow(nowMs, timeoutException);
stateUpdater.add(task);
}
}
private Task getActiveTask(final TopicPartition partition) {
final Task activeTask = tasks.activeTasksForInputPartition(partition);
if (activeTask == null) {
log.error("Unable to locate active task for received-record partition {}. Current tasks: {}",
partition, toString(">"));
throw new NullPointerException("Task was unexpectedly missing for partition " + partition);
}
return activeTask;
}
private void maybeLockTasks(final Set<TaskId> ids) {
if (schedulingTaskManager != null && !ids.isEmpty()) {
if (log.isDebugEnabled()) {
log.debug("Locking tasks {}", ids.stream().map(TaskId::toString).collect(Collectors.joining(", ")));
}
boolean locked = false;
while (!locked) {
try {
schedulingTaskManager.lockTasks(ids).get();
locked = true;
} catch (final InterruptedException e) {
log.warn("Interrupted while waiting for tasks {} to be locked",
ids.stream().map(TaskId::toString).collect(Collectors.joining(",")));
} catch (final ExecutionException e) {
log.info("Failed to lock tasks");
throw new RuntimeException(e);
}
}
}
}
private void maybeUnlockTasks(final Set<TaskId> ids) {
if (schedulingTaskManager != null && !ids.isEmpty()) {
if (log.isDebugEnabled()) {
log.debug("Unlocking tasks {}", ids.stream().map(TaskId::toString).collect(Collectors.joining(", ")));
}
schedulingTaskManager.unlockTasks(ids);
}
}
public void maybeThrowTaskExceptionsFromProcessingThreads() {
if (schedulingTaskManager != null) {
maybeThrowTaskExceptions(schedulingTaskManager.drainUncaughtExceptions());
}
}
/**
* @throws TaskMigratedException if committing offsets failed (non-EOS)
* or if the task producer got fenced (EOS)
* @throws TimeoutException if task.timeout.ms has been exceeded (non-EOS)
* @throws TaskCorruptedException if committing offsets failed due to TimeoutException (EOS)
* @return number of committed offsets, or -1 if we are in the middle of a rebalance and cannot commit
*/
int commit(final Collection<Task> tasksToCommit) {
int committed = 0;
final Set<TaskId> ids =
tasksToCommit.stream()
.map(Task::id)
.collect(Collectors.toSet());
maybeLockTasks(ids);
// We have to throw the first uncaught exception after locking the tasks, to not attempt to commit failure records.
maybeThrowTaskExceptionsFromProcessingThreads();
final Map<Task, Map<TopicPartition, OffsetAndMetadata>> consumedOffsetsAndMetadataPerTask = new HashMap<>();
try {
committed = commitTasksAndMaybeUpdateCommittableOffsets(tasksToCommit, consumedOffsetsAndMetadataPerTask);
} catch (final TimeoutException timeoutException) {
consumedOffsetsAndMetadataPerTask
.keySet()
.forEach(t -> t.maybeInitTaskTimeoutOrThrow(time.milliseconds(), timeoutException));
}
maybeUnlockTasks(ids);
return committed;
}
/**
* @throws TaskMigratedException if committing offsets failed (non-EOS)
* or if the task producer got fenced (EOS)
*/
int maybeCommitActiveTasksPerUserRequested() {
if (rebalanceInProgress) {
return -1;
} else {
for (final Task task : activeRunningTaskIterable()) {
if (task.commitRequested() && task.commitNeeded()) {
return commit(activeRunningTaskIterable());
}
}
return 0;
}
}
private int commitTasksAndMaybeUpdateCommittableOffsets(final Collection<Task> tasksToCommit,
final Map<Task, Map<TopicPartition, OffsetAndMetadata>> consumedOffsetsAndMetadata) {
if (rebalanceInProgress) {
return -1;
} else {
return taskExecutor.commitTasksAndMaybeUpdateCommittableOffsets(tasksToCommit, consumedOffsetsAndMetadata);
}
}
public void updateTaskEndMetadata(final TopicPartition topicPartition, final Long offset) {
for (final Task task : tasks.activeTasks()) {
if (task instanceof StreamTask) {
if (task.inputPartitions().contains(topicPartition)) {
((StreamTask) task).updateEndOffsets(topicPartition, offset);
}
}
}
}
/**
* Handle any added or removed NamedTopologies. Check if any uncreated assigned tasks belong to a newly
* added NamedTopology and create them if so, then close any tasks whose named topology no longer exists
*/
void handleTopologyUpdates() {
topologyMetadata.executeTopologyUpdatesAndBumpThreadVersion(
this::createPendingTasks,
this::maybeCloseTasksFromRemovedTopologies
);
if (topologyMetadata.isEmpty()) {
log.info("Proactively unsubscribing from all topics due to empty topology");
mainConsumer.unsubscribe();
}
topologyMetadata.maybeNotifyTopologyVersionListeners();
}
void maybeCloseTasksFromRemovedTopologies(final Set<String> currentNamedTopologies) {
try {
final Set<Task> activeTasksToRemove = new TreeSet<>(Comparator.comparing(Task::id));
final Set<Task> standbyTasksToRemove = new TreeSet<>(Comparator.comparing(Task::id));
for (final Task task : tasks.allTasks()) {
if (!currentNamedTopologies.contains(task.id().topologyName())) {
if (task.isActive()) {
activeTasksToRemove.add(task);
} else {
standbyTasksToRemove.add(task);
}
}
}
final Set<Task> allTasksToRemove = union(HashSet::new, activeTasksToRemove, standbyTasksToRemove);
closeAndCleanUpTasks(activeTasksToRemove, standbyTasksToRemove, true);
releaseLockedDirectoriesForTasks(allTasksToRemove.stream().map(Task::id).collect(Collectors.toSet()));
} catch (final Exception e) {
// TODO KAFKA-12648: for now just swallow the exception to avoid interfering with the other topologies
// that are running alongside, but eventually we should be able to rethrow up to the handler to inform
// the user of an error in this named topology without killing the thread and delaying the others
log.error("Caught the following exception while closing tasks from a removed topology:", e);
}
}
void createPendingTasks(final Set<String> currentNamedTopologies) {
final Map<TaskId, Set<TopicPartition>> activeTasksToCreate = tasks.drainPendingActiveTasksForTopologies(currentNamedTopologies);
final Map<TaskId, Set<TopicPartition>> standbyTasksToCreate = tasks.drainPendingStandbyTasksForTopologies(currentNamedTopologies);
createNewTasks(activeTasksToCreate, standbyTasksToCreate);
}
/**
* @throws TaskMigratedException if the task producer got fenced (EOS only)
* @throws StreamsException if any task threw an exception while processing
*/
int process(final int maxNumRecords, final Time time) {
return taskExecutor.process(maxNumRecords, time);
}
void recordTaskProcessRatio(final long totalProcessLatencyMs, final long now) {
for (final Task task : activeRunningTaskIterable()) {
task.recordProcessTimeRatioAndBufferSize(totalProcessLatencyMs, now);
}
}
/**
* @throws TaskMigratedException if the task producer got fenced (EOS only)
*/
int punctuate() {
return taskExecutor.punctuate();
}
void maybePurgeCommittedRecords() {
// we do not check any possible exceptions since none of them are fatal
// that should cause the application to fail, and we will try delete with
// newer offsets anyways.
if (deleteRecordsResult == null || deleteRecordsResult.all().isDone()) {
if (deleteRecordsResult != null && deleteRecordsResult.all().isCompletedExceptionally()) {
log.debug("Previous delete-records request has failed: {}. Try sending the new request now",
deleteRecordsResult.lowWatermarks());
}
final Map<TopicPartition, RecordsToDelete> recordsToDelete = new HashMap<>();
for (final Task task : activeRunningTaskIterable()) {
for (final Map.Entry<TopicPartition, Long> entry : task.purgeableOffsets().entrySet()) {
recordsToDelete.put(entry.getKey(), RecordsToDelete.beforeOffset(entry.getValue()));
}
}
if (!recordsToDelete.isEmpty()) {
deleteRecordsResult = adminClient.deleteRecords(recordsToDelete);
log.trace("Sent delete-records request: {}", recordsToDelete);
}
}
}
/**
* Produces a string representation containing useful information about the TaskManager.
* This is useful in debugging scenarios.
*
* @return A string representation of the TaskManager instance.
*/
@Override
public String toString() {
return toString("");
}
public String toString(final String indent) {
final StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("TaskManager\n");
stringBuilder.append(indent).append("\tMetadataState:\n");
stringBuilder.append(indent).append("\tTasks:\n");
for (final Task task : tasks.allTasks()) {
stringBuilder.append(indent)
.append("\t\t")
.append(task.id())
.append(" ")
.append(task.state())
.append(" ")
.append(task.getClass().getSimpleName())
.append('(').append(task.isActive() ? "active" : "standby").append(')');
}
return stringBuilder.toString();
}
Map<MetricName, Metric> producerMetrics() {
return activeTaskCreator.producerMetrics();
}
String producerClientIds() {
return activeTaskCreator.producerClientIds();
}
Set<TaskId> lockedTaskDirectories() {
return Collections.unmodifiableSet(lockedTaskDirectories);
}
private void maybeSetFirstException(final boolean ignoreTaskMigrated,
final RuntimeException exception,
final AtomicReference<RuntimeException> firstException) {
if (!ignoreTaskMigrated || !(exception instanceof TaskMigratedException)) {
firstException.compareAndSet(null, exception);
}
}
private StreamsException maybeWrapTaskException(final RuntimeException exception, final TaskId taskId) {
if (exception instanceof StreamsException) {
final StreamsException streamsException = (StreamsException) exception;
streamsException.setTaskId(taskId);
return streamsException;
} else {
return new StreamsException(exception, taskId);
}
}
public static void executeAndMaybeSwallow(final boolean clean,
final Runnable runnable,
final java.util.function.Consumer<RuntimeException> actionIfClean,
final java.util.function.Consumer<RuntimeException> actionIfNotClean) {
try {
runnable.run();
} catch (final RuntimeException e) {
if (clean) {
actionIfClean.accept(e);
} else {
actionIfNotClean.accept(e);
}
}
}
public static void executeAndMaybeSwallow(final boolean clean,
final Runnable runnable,
final String name,
final Logger log) {
executeAndMaybeSwallow(
clean,
runnable,
e -> {
throw e;
},
e -> log.debug("Ignoring error in unclean {}", name));
}
boolean needsInitializationOrRestoration() {
return activeTaskStream().anyMatch(Task::needsInitializationOrRestoration);
}
// for testing only
void addTask(final Task task) {
tasks.addTask(task);
}
private boolean canTryInitializeTask(final TaskId taskId, final long nowMs) {
return !taskIdToBackoffRecord.containsKey(taskId) || taskIdToBackoffRecord.get(taskId).canAttempt(nowMs);
}
private void updateOrCreateBackoffRecord(final TaskId taskId, final long nowMs) {
if (taskIdToBackoffRecord.containsKey(taskId)) {
taskIdToBackoffRecord.get(taskId).recordAttempt(nowMs);
} else {
taskIdToBackoffRecord.put(taskId, new BackoffRecord(nowMs));
}
}
public static | TaskManager |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/bigintegers/BigIntegers_assertNotEqual_Test.java | {
"start": 1495,
"end": 3406
} | class ____ extends BigIntegersBaseTest {
@Test
void should_fail_if_actual_is_null() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> numbers.assertNotEqual(someInfo(), null, ONE))
.withMessage(actualIsNull());
}
@Test
void should_pass_if_big_integers_are_not_equal() {
numbers.assertNotEqual(someInfo(), ONE, TEN);
}
@Test
void should_fail_if_big_integers_are_equal() {
AssertionInfo info = someInfo();
Throwable error = catchThrowable(() -> numbers.assertNotEqual(info, ONE, ONE));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldNotBeEqual(ONE, ONE));
}
@Test
void should_fail_if_actual_is_null_whatever_custom_comparison_strategy_is() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> numbersWithComparatorComparisonStrategy.assertNotEqual(someInfo(),
null,
ONE))
.withMessage(actualIsNull());
}
@Test
void should_pass_if_big_integers_are_not_equal_according_to_custom_comparison_strategy() {
numbersWithComparatorComparisonStrategy.assertNotEqual(someInfo(), TEN, ONE);
}
@Test
void should_fail_if_big_integers_are_equal_according_to_custom_comparison_strategy() {
AssertionInfo info = someInfo();
Throwable error = catchThrowable(() -> numbersWithComparatorComparisonStrategy.assertNotEqual(info, ONE, ONE));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldNotBeEqual(ONE, ONE, comparatorComparisonStrategy));
}
}
| BigIntegers_assertNotEqual_Test |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/BootstrapUtilsTests.java | {
"start": 7710,
"end": 7783
} | class ____ extends DefaultTestContextBootstrapper {}
static | FooBootstrapper |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.