language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
apache__camel
|
dsl/camel-jbang/camel-jbang-it/src/test/java/org/apache/camel/dsl/jbang/it/support/JiraUtil.java
|
{
"start": 1379,
"end": 3006
}
|
class ____ {
private static final Logger LOGGER = LoggerFactory.getLogger(JiraUtil.class);
private static final String ISSUE_ENDPOINT
= "https://issues.apache.org/jira/rest/api/latest/issue/%s?fields=resolution,fixVersions";
private static final HttpClient httpClient = HttpClient.newHttpClient();
private static List<String> solvedStatuses = List.of("Fixed");
public static boolean isIssueSolved(final String issue, final String inVersion) {
try (InputStream response = httpClient.send(HttpRequest
.newBuilder(new URI(String.format(ISSUE_ENDPOINT, issue)))
.timeout(Duration.ofSeconds(10))
.GET()
.build(),
HttpResponse.BodyHandlers.ofInputStream()).body()) {
final Map fields = (Map) new ObjectMapper()
.readValue(response, Map.class).get("fields");
return Optional.ofNullable(fields.get("resolution"))
.map(r -> solvedStatuses.contains(((Map) r).get("name")))
.isPresent()
&&
Optional.ofNullable(fields.get("fixVersions"))
.map(f -> ((List) f).stream().map(fv -> ((Map) fv).get("name"))
.anyMatch(fv -> VersionHelper.isGE(inVersion, (String) fv)))
.get();
} catch (IOException | InterruptedException | URISyntaxException e) {
LOGGER.warn("unable to verify Jira issue", e);
return true; //consider it as solved, so ready to be tested
}
}
}
|
JiraUtil
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/PluginsVisitFullBindingGraphTest.java
|
{
"start": 1240,
"end": 1561
}
|
class ____ {
private static final Source MODULE_WITHOUT_ERRORS =
CompilerTests.javaSource(
"test.ModuleWithoutErrors",
"package test;",
"",
"import dagger.Binds;",
"import dagger.Module;",
"",
"@Module",
"
|
PluginsVisitFullBindingGraphTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/compliance/CriteriaGetCorrelatedJoinsTest.java
|
{
"start": 977,
"end": 2038
}
|
class ____ {
@Test
public void testGetCorrelatedJoins(EntityManagerFactoryScope scope) {
scope.inEntityManager(
entityManager -> {
final CriteriaBuilder criteriaBuilder = entityManager.getCriteriaBuilder();
final CriteriaQuery<Person> query = criteriaBuilder.createQuery( Person.class );
final Root<Person> person = query.from( Person.class );
query.select( person );
final Subquery<Address> subquery = query.subquery( Address.class );
Set<Join<?, ?>> correlatedJoins = subquery.getCorrelatedJoins();
assertNotNull( correlatedJoins );
assertEquals( 0, correlatedJoins.size() );
final Join<Person, Address> sqo = subquery
.correlate( person.join( person.getModel().getCollection( "addresses", Address.class ) ) );
subquery.select( sqo );
correlatedJoins = subquery.getCorrelatedJoins();
assertNotNull( correlatedJoins );
assertEquals( 1, correlatedJoins.size() );
}
);
}
@Entity(name = "Person")
@Table(name = "PERSON_TABLE")
public static
|
CriteriaGetCorrelatedJoinsTest
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/BindsMethodValidationTest.java
|
{
"start": 2539,
"end": 2742
}
|
interface ____ {}");
Source fooImpl =
CompilerTests.javaSource(
"test.FooImpl", // Prevents formatting onto a single line
"package test;",
"",
"
|
Foo
|
java
|
junit-team__junit5
|
junit-platform-engine/src/main/java/org/junit/platform/engine/discovery/DiscoverySelectors.java
|
{
"start": 48916,
"end": 53010
}
|
class ____ not be null");
Preconditions.notNull(method, "Method must not be null");
return new NestedMethodSelector(enclosingClasses, nestedClass, method);
}
/**
* Create a {@code UniqueIdSelector} for the supplied {@link UniqueId}.
*
* @param uniqueId the {@code UniqueId} to select; never {@code null}
* @see UniqueIdSelector
*/
public static UniqueIdSelector selectUniqueId(UniqueId uniqueId) {
Preconditions.notNull(uniqueId, "UniqueId must not be null");
return new UniqueIdSelector(uniqueId);
}
/**
* Create a {@code UniqueIdSelector} for the supplied unique ID.
*
* @param uniqueId the unique ID to select; never {@code null} or blank
* @see UniqueIdSelector
*/
public static UniqueIdSelector selectUniqueId(String uniqueId) {
Preconditions.notBlank(uniqueId, "Unique ID must not be null or blank");
return new UniqueIdSelector(UniqueId.parse(uniqueId));
}
/**
* Create an {@code IterationSelector} for the supplied parent selector and
* iteration indices.
*
* @param parentSelector the parent selector to select iterations for; never
* {@code null}
* @param iterationIndices the iteration indices to select; never {@code null}
* or empty
* @since 1.9
* @see IterationSelector
*/
@API(status = MAINTAINED, since = "1.13.3")
public static IterationSelector selectIteration(DiscoverySelector parentSelector, int... iterationIndices) {
Preconditions.notNull(parentSelector, "Parent selector must not be null");
Preconditions.notEmpty(iterationIndices, "iteration indices must not be empty");
return new IterationSelector(parentSelector, iterationIndices);
}
/**
* Parse the supplied string representation of a {@link DiscoverySelectorIdentifier}.
*
* @param identifier the string representation of a {@code DiscoverySelectorIdentifier};
* never {@code null} or blank
* @return an {@link Optional} containing the corresponding {@link DiscoverySelector};
* never {@code null} but potentially empty
* @since 1.11
* @see DiscoverySelectorIdentifierParser
*/
@API(status = MAINTAINED, since = "1.13.3")
public static Optional<? extends DiscoverySelector> parse(String identifier) {
return DiscoverySelectorIdentifierParsers.parse(identifier);
}
/**
* Parse the supplied {@link DiscoverySelectorIdentifier}.
*
* @param identifier the {@code DiscoverySelectorIdentifier} to parse;
* never {@code null}
* @return an {@link Optional} containing the corresponding {@link DiscoverySelector};
* never {@code null} but potentially empty
* @since 1.11
* @see DiscoverySelectorIdentifierParser
*/
@API(status = MAINTAINED, since = "1.13.3")
public static Optional<? extends DiscoverySelector> parse(DiscoverySelectorIdentifier identifier) {
return DiscoverySelectorIdentifierParsers.parse(identifier);
}
/**
* Parse the supplied string representations of
* {@link DiscoverySelectorIdentifier DiscoverySelectorIdentifiers}.
*
* @param identifiers the string representations of
* {@code DiscoverySelectorIdentifiers} to parse; never {@code null}
* @return a stream of the corresponding {@link DiscoverySelector DiscoverySelectors};
* never {@code null} but potentially empty
* @since 1.11
* @see DiscoverySelectorIdentifierParser
*/
@API(status = MAINTAINED, since = "1.13.3")
public static Stream<? extends DiscoverySelector> parseAll(String... identifiers) {
return DiscoverySelectorIdentifierParsers.parseAll(identifiers);
}
/**
* Parse the supplied {@link DiscoverySelectorIdentifier
* DiscoverySelectorIdentifiers}.
*
* @param identifiers the {@code DiscoverySelectorIdentifiers} to parse;
* never {@code null}
* @return a stream of the corresponding {@link DiscoverySelector DiscoverySelectors};
* never {@code null} but potentially empty
* @since 1.11
* @see DiscoverySelectorIdentifierParser
*/
@API(status = MAINTAINED, since = "1.13.3")
public static Stream<? extends DiscoverySelector> parseAll(Collection<DiscoverySelectorIdentifier> identifiers) {
return DiscoverySelectorIdentifierParsers.parseAll(identifiers);
}
}
|
must
|
java
|
apache__camel
|
components/camel-servlet/src/test/java/org/apache/camel/component/servlet/ServletSwitchingStatusCode204Test.java
|
{
"start": 1033,
"end": 2514
}
|
class ____ extends ServletCamelRouterTestSupport {
@Test
public void testSwitchingNoBodyTo204() throws Exception {
WebRequest req = new GetMethodWebRequest(contextUrl + "/services/bar");
WebResponse response = query(req);
assertEquals(204, response.getResponseCode());
assertEquals("", response.getText());
}
@Test
public void testNoSwitchingNoCode() throws Exception {
WebRequest req = new GetMethodWebRequest(contextUrl + "/services/foo");
WebResponse response = query(req);
assertEquals(200, response.getResponseCode());
assertEquals("No Content", response.getText());
}
@Test
public void testNoSwitchingNoBody() throws Exception {
WebRequest req = new GetMethodWebRequest(contextUrl + "/services/foobar");
WebResponse response = query(req);
assertEquals(200, response.getResponseCode());
assertEquals("", response.getText());
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
from("servlet:/bar").setBody().constant("");
from("servlet:/foo").setBody().constant("No Content");
from("servlet:/foobar").setHeader(Exchange.HTTP_RESPONSE_CODE, constant(200)).setBody().constant("");
}
};
}
}
|
ServletSwitchingStatusCode204Test
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/jackson/MapEntry.java
|
{
"start": 1514,
"end": 3411
}
|
class ____ {
@JsonProperty
@JacksonXmlProperty(isAttribute = true)
private String key;
@JsonProperty
@JacksonXmlProperty(isAttribute = true)
private String value;
@JsonCreator
public MapEntry(@JsonProperty("key") final String key, @JsonProperty("value") final String value) {
this.setKey(key);
this.setValue(value);
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof MapEntry)) {
return false;
}
final MapEntry other = (MapEntry) obj;
if (this.getKey() == null) {
if (other.getKey() != null) {
return false;
}
} else if (!this.getKey().equals(other.getKey())) {
return false;
}
if (this.getValue() == null) {
if (other.getValue() != null) {
return false;
}
} else if (!this.getValue().equals(other.getValue())) {
return false;
}
return true;
}
public String getKey() {
return this.key;
}
public String getValue() {
return this.value;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((this.getKey() == null) ? 0 : this.getKey().hashCode());
result = prime * result
+ ((this.getValue() == null) ? 0 : this.getValue().hashCode());
return result;
}
public void setKey(final String key) {
this.key = key;
}
public void setValue(final String value) {
this.value = value;
}
@Override
public String toString() {
return Strings.EMPTY + this.getKey() + "=" + this.getValue();
}
}
|
MapEntry
|
java
|
apache__dubbo
|
dubbo-config/dubbo-config-api/src/test/java/org/apache/dubbo/config/integration/multiple/exportmetadata/MultipleRegistryCenterExportMetadataIntegrationTest.java
|
{
"start": 1960,
"end": 7411
}
|
class ____ implements IntegrationTest {
private static final Logger logger =
LoggerFactory.getLogger(MultipleRegistryCenterExportMetadataIntegrationTest.class);
/**
* Define the provider application name.
*/
private static String PROVIDER_APPLICATION_NAME = "multiple-registry-center-export-metadata";
/**
* The name for getting the specified instance, which is loaded using SPI.
*/
private static String SPI_NAME = "multipleConfigCenterExportMetadata";
/**
* Define the protocol's name.
*/
private static String PROTOCOL_NAME = "injvm";
/**
* Define the {@link ServiceConfig} instance.
*/
private ServiceConfig<MultipleRegistryCenterExportMetadataService> serviceConfig;
/**
* The listener to record exported services
*/
private MultipleRegistryCenterExportMetadataServiceListener serviceListener;
/**
* The listener to record exported exporters.
*/
private MultipleRegistryCenterExportMetadataExporterListener exporterListener;
@BeforeEach
public void setUp() throws Exception {
logger.info(getClass().getSimpleName() + " testcase is beginning...");
DubboBootstrap.reset();
// initialize service config
serviceConfig = new ServiceConfig<>();
serviceConfig.setInterface(MultipleRegistryCenterExportMetadataService.class);
serviceConfig.setRef(new MultipleRegistryCenterExportMetadataServiceImpl());
serviceConfig.setAsync(false);
serviceConfig.setScope(SCOPE_LOCAL);
// initialize bootstrap
DubboBootstrap.getInstance()
.application(new ApplicationConfig(PROVIDER_APPLICATION_NAME))
.protocol(new ProtocolConfig(PROTOCOL_NAME))
.service(serviceConfig)
.registry(new RegistryConfig(ZookeeperRegistryCenterConfig.getConnectionAddress1()))
.registry(new RegistryConfig(ZookeeperRegistryCenterConfig.getConnectionAddress2()));
}
/**
* Define {@link ServiceListener}, {@link ExporterListener} and {@link Filter} for helping check.
* <p>Use SPI to load them before exporting.
* <p>After that, there are some checkpoints need to verify as follow:
* <ul>
* <li>There is nothing in ServiceListener or not</li>
* <li>There is nothing in ExporterListener or not</li>
* <li>ServiceConfig is exported or not</li>
* </ul>
*/
private void beforeExport() {
// ---------------initialize--------------- //
serviceListener = (MultipleRegistryCenterExportMetadataServiceListener)
ExtensionLoader.getExtensionLoader(ServiceListener.class).getExtension(SPI_NAME);
exporterListener = (MultipleRegistryCenterExportMetadataExporterListener)
ExtensionLoader.getExtensionLoader(ExporterListener.class).getExtension(SPI_NAME);
// ---------------checkpoints--------------- //
// There is nothing in ServiceListener
Assertions.assertTrue(serviceListener.getExportedServices().isEmpty());
// There is nothing in ExporterListener
Assertions.assertTrue(exporterListener.getExportedExporters().isEmpty());
// ServiceConfig isn't exported
Assertions.assertFalse(serviceConfig.isExported());
}
/**
* {@inheritDoc}
*/
@Test
@Override
public void integrate() {
beforeExport();
DubboBootstrap.getInstance().start();
afterExport();
}
/**
* There are some checkpoints need to check after exported as follow:
* <ul>
* <li>The metadata service is only one or not</li>
* <li>The exported service is MetadataService or not</li>
* <li>The MetadataService is exported or not</li>
* <li>The exported exporters are right or not</li>
* </ul>
*/
private void afterExport() {
// The metadata service is only one
Assertions.assertEquals(serviceListener.getExportedServices().size(), 1);
// The exported service is MetadataService
Assertions.assertEquals(
serviceListener.getExportedServices().get(0).getInterfaceClass(), MetadataService.class);
// The MetadataService is exported
Assertions.assertTrue(serviceListener.getExportedServices().get(0).isExported());
// FIXME there may be something wrong with the whole process of
// registering service-discovery-registry.
// So, all testcases may need to be modified.
// There are two exported exporters
// 1. Metadata Service exporter with Injvm protocol
// 2. MultipleRegistryCenterExportMetadataService exporter with Injvm protocol
Assertions.assertEquals(exporterListener.getExportedExporters().size(), 2);
List<Exporter<?>> injvmExporters = exporterListener.getExportedExporters();
// Make sure there two injvmExporters
Assertions.assertEquals(2, injvmExporters.size());
}
@AfterEach
public void tearDown() throws IOException {
DubboBootstrap.reset();
serviceConfig = null;
// The exported service has been unexported
Assertions.assertTrue(serviceListener.getExportedServices().isEmpty());
serviceListener = null;
logger.info(getClass().getSimpleName() + " testcase is ending...");
}
}
|
MultipleRegistryCenterExportMetadataIntegrationTest
|
java
|
apache__camel
|
core/camel-util/src/main/java/org/apache/camel/util/ObjectHelper.java
|
{
"start": 19851,
"end": 20235
}
|
class ____ used to
* load this class
*
* @param name the name of the resource to load
* @return the stream or null if it could not be loaded
*/
public static InputStream loadResourceAsStream(String name) {
return loadResourceAsStream(name, null);
}
/**
* Attempts to load the given resource as a stream using first the given
|
loader
|
java
|
hibernate__hibernate-orm
|
hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/query/AssociationToManyJoinQueryTest.java
|
{
"start": 1837,
"end": 3783
}
|
class ____ {
@Id
private Long id;
private String name;
@OneToMany
@AuditJoinTable(name = "entitya_onetomany_entityb_aud")
private Set<EntityB> bOneToMany = new HashSet<>();
@ManyToMany
@JoinTable(name = "entitya_manytomany_entityb")
private Set<EntityB> bManyToMany = new HashSet<>();
@OneToMany(mappedBy = "bidiAManyToOneOwning")
private Set<EntityC> bidiCOneToManyInverse = new HashSet<>();
@ManyToMany
@AuditJoinTable(name = "entitya_entityc_bidi_aud")
private Set<EntityC> bidiCManyToManyOwning = new HashSet<>();
@ManyToMany(mappedBy = "bidiAManyToManyOwning")
private Set<EntityC> bidiCManyToManyInverse = new HashSet<>();
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Set<EntityB> getbOneToMany() {
return bOneToMany;
}
public void setbOneToMany(Set<EntityB> bOneToMany) {
this.bOneToMany = bOneToMany;
}
public Set<EntityB> getbManyToMany() {
return bManyToMany;
}
public void setbManyToMany(Set<EntityB> bManyToMany) {
this.bManyToMany = bManyToMany;
}
public Set<EntityC> getBidiCOneToManyInverse() {
return bidiCOneToManyInverse;
}
public void setBidiCOneToManyInverse(Set<EntityC> bidiCOneToManyInverse) {
this.bidiCOneToManyInverse = bidiCOneToManyInverse;
}
public Set<EntityC> getBidiCManyToManyOwning() {
return bidiCManyToManyOwning;
}
public void setBidiCManyToManyOwning(Set<EntityC> bidiCManyToManyOwning) {
this.bidiCManyToManyOwning = bidiCManyToManyOwning;
}
public Set<EntityC> getBidiCManyToManyInverse() {
return bidiCManyToManyInverse;
}
public void setBidiCManyToManyInverse(Set<EntityC> bidiCManyToManyInverse) {
this.bidiCManyToManyInverse = bidiCManyToManyInverse;
}
}
@Entity(name = "EntityB")
@Audited
public static
|
EntityA
|
java
|
google__guava
|
guava/src/com/google/common/collect/Platform.java
|
{
"start": 1065,
"end": 5617
}
|
class ____ {
/** Returns the platform preferred implementation of a map based on a hash table. */
static <K extends @Nullable Object, V extends @Nullable Object>
Map<K, V> newHashMapWithExpectedSize(int expectedSize) {
return Maps.newHashMapWithExpectedSize(expectedSize);
}
/**
* Returns the platform preferred implementation of an insertion ordered map based on a hash
* table.
*/
static <K extends @Nullable Object, V extends @Nullable Object>
Map<K, V> newLinkedHashMapWithExpectedSize(int expectedSize) {
return Maps.newLinkedHashMapWithExpectedSize(expectedSize);
}
/** Returns the platform preferred implementation of a set based on a hash table. */
static <E extends @Nullable Object> Set<E> newHashSetWithExpectedSize(int expectedSize) {
return Sets.newHashSetWithExpectedSize(expectedSize);
}
/** Returns the platform preferred implementation of a thread-safe hash set. */
static <E> Set<E> newConcurrentHashSet() {
return ConcurrentHashMap.newKeySet();
}
/**
* Returns the platform preferred implementation of an insertion ordered set based on a hash
* table.
*/
static <E extends @Nullable Object> Set<E> newLinkedHashSetWithExpectedSize(int expectedSize) {
return Sets.newLinkedHashSetWithExpectedSize(expectedSize);
}
/**
* Returns the platform preferred map implementation that preserves insertion order when used only
* for insertions.
*/
static <K extends @Nullable Object, V extends @Nullable Object>
Map<K, V> preservesInsertionOrderOnPutsMap() {
return new LinkedHashMap<>();
}
/**
* Returns the platform preferred map implementation that preserves insertion order when used only
* for insertions, with a hint for how many entries to expect.
*/
static <K extends @Nullable Object, V extends @Nullable Object>
Map<K, V> preservesInsertionOrderOnPutsMapWithExpectedSize(int expectedSize) {
return Maps.newLinkedHashMapWithExpectedSize(expectedSize);
}
/**
* Returns the platform preferred set implementation that preserves insertion order when used only
* for insertions.
*/
static <E extends @Nullable Object> Set<E> preservesInsertionOrderOnAddsSet() {
return CompactHashSet.create();
}
/**
* Returns a new array of the given length with the same type as a reference array.
*
* @param reference any array of the desired type
* @param length the length of the new array
*/
/*
* The new array contains nulls, even if the old array did not. If we wanted to be accurate, we
* would declare a return type of `@Nullable T[]`. However, we've decided not to think too hard
* about arrays for now, as they're a mess. (We previously discussed this in the review of
* ObjectArrays, which is the main caller of this method.)
*/
static <T extends @Nullable Object> T[] newArray(T[] reference, int length) {
T[] empty = reference.length == 0 ? reference : Arrays.copyOf(reference, 0);
return Arrays.copyOf(empty, length);
}
/** Equivalent to Arrays.copyOfRange(source, from, to, arrayOfType.getClass()). */
/*
* Arrays are a mess from a nullness perspective, and Class instances for object-array types are
* even worse. For now, we just suppress and move on with our lives.
*
* - https://github.com/jspecify/jspecify/issues/65
*
* - https://github.com/jspecify/jdk/commit/71d826792b8c7ef95d492c50a274deab938f2552
*/
/*
* TODO(cpovirk): Is the unchecked cast avoidable? Would System.arraycopy be similarly fast (if
* likewise not type-checked)? Could our single caller do something different?
*/
@SuppressWarnings({"nullness", "unchecked"})
static <T extends @Nullable Object> T[] copy(Object[] source, int from, int to, T[] arrayOfType) {
return Arrays.copyOfRange(source, from, to, (Class<? extends T[]>) arrayOfType.getClass());
}
/**
* Configures the given map maker to use weak keys, if possible; does nothing otherwise (i.e., in
* GWT). This is sometimes acceptable, when only server-side code could generate enough volume
* that reclamation becomes important.
*/
@J2ktIncompatible
static MapMaker tryWeakKeys(MapMaker mapMaker) {
return mapMaker.weakKeys();
}
static <E extends Enum<E>> Class<E> getDeclaringClassOrObjectForJ2cl(E e) {
return e.getDeclaringClass();
}
static int reduceIterationsIfGwt(int iterations) {
return iterations;
}
static int reduceExponentIfGwt(int exponent) {
return exponent;
}
private Platform() {}
}
|
Platform
|
java
|
spring-projects__spring-framework
|
spring-messaging/src/main/java/org/springframework/messaging/rsocket/service/RSocketExchangeBeanRegistrationAotProcessor.java
|
{
"start": 1900,
"end": 2728
}
|
class ____ implements BeanRegistrationAotProcessor {
@Override
public @Nullable BeanRegistrationAotContribution processAheadOfTime(RegisteredBean registeredBean) {
Class<?> beanClass = registeredBean.getBeanClass();
Set<Class<?>> exchangeInterfaces = new HashSet<>();
Search search = MergedAnnotations.search(TYPE_HIERARCHY);
for (Class<?> interfaceClass : ClassUtils.getAllInterfacesForClass(beanClass)) {
ReflectionUtils.doWithMethods(interfaceClass, method -> {
if (!exchangeInterfaces.contains(interfaceClass) &&
search.from(method).isPresent(RSocketExchange.class)) {
exchangeInterfaces.add(interfaceClass);
}
});
}
if (!exchangeInterfaces.isEmpty()) {
return new AotContribution(exchangeInterfaces);
}
return null;
}
private static
|
RSocketExchangeBeanRegistrationAotProcessor
|
java
|
spring-projects__spring-boot
|
module/spring-boot-artemis/src/test/java/org/springframework/boot/artemis/autoconfigure/ArtemisAutoConfigurationTests.java
|
{
"start": 22132,
"end": 22860
}
|
class ____ {
@Bean
JMSQueueConfiguration sampleQueueConfiguration() {
JMSQueueConfigurationImpl jmsQueueConfiguration = new JMSQueueConfigurationImpl();
jmsQueueConfiguration.setName("sampleQueue");
jmsQueueConfiguration.setSelector("foo=bar");
jmsQueueConfiguration.setDurable(false);
jmsQueueConfiguration.setBindings("/queue/1");
return jmsQueueConfiguration;
}
@Bean
TopicConfiguration sampleTopicConfiguration() {
TopicConfigurationImpl topicConfiguration = new TopicConfigurationImpl();
topicConfiguration.setName("sampleTopic");
topicConfiguration.setBindings("/topic/1");
return topicConfiguration;
}
}
@Configuration(proxyBeanMethods = false)
static
|
DestinationConfiguration
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/vectors/TextEmbeddingQueryVectorBuilder.java
|
{
"start": 1744,
"end": 6391
}
|
class ____ implements QueryVectorBuilder {
public static final String NAME = "text_embedding";
public static final ParseField MODEL_TEXT = new ParseField("model_text");
public static final ConstructingObjectParser<TextEmbeddingQueryVectorBuilder, Void> PARSER = new ConstructingObjectParser<>(
NAME,
args -> new TextEmbeddingQueryVectorBuilder((String) args[0], (String) args[1])
);
static {
PARSER.declareString(optionalConstructorArg(), TrainedModelConfig.MODEL_ID);
PARSER.declareString(constructorArg(), MODEL_TEXT);
}
public static TextEmbeddingQueryVectorBuilder fromXContent(XContentParser parser) throws IOException {
return PARSER.parse(parser, null);
}
private final String modelId;
private final String modelText;
public TextEmbeddingQueryVectorBuilder(String modelId, String modelText) {
this.modelId = modelId;
this.modelText = modelText;
}
public TextEmbeddingQueryVectorBuilder(StreamInput in) throws IOException {
if (in.getTransportVersion().supports(TransportVersions.V_8_18_0)) {
this.modelId = in.readOptionalString();
} else {
this.modelId = in.readString();
}
this.modelText = in.readString();
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersions.V_8_7_0;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
if (out.getTransportVersion().supports(TransportVersions.V_8_18_0)) {
out.writeOptionalString(modelId);
} else {
out.writeString(modelId);
}
out.writeString(modelText);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
if (modelId != null) {
builder.field(TrainedModelConfig.MODEL_ID.getPreferredName(), modelId);
}
builder.field(MODEL_TEXT.getPreferredName(), modelText);
builder.endObject();
return builder;
}
@Override
public void buildVector(Client client, ActionListener<float[]> listener) {
if (modelId == null) {
throw new IllegalArgumentException("[model_id] must not be null.");
}
CoordinatedInferenceAction.Request inferRequest = CoordinatedInferenceAction.Request.forTextInput(
modelId,
List.of(modelText),
TextEmbeddingConfigUpdate.EMPTY_INSTANCE,
false,
null
);
inferRequest.setHighPriority(true);
inferRequest.setPrefixType(TrainedModelPrefixStrings.PrefixType.SEARCH);
executeAsyncWithOrigin(client, ML_ORIGIN, CoordinatedInferenceAction.INSTANCE, inferRequest, ActionListener.wrap(response -> {
if (response.getInferenceResults().isEmpty()) {
listener.onFailure(new IllegalStateException("text embedding inference response contain no results"));
return;
}
if (response.getInferenceResults().get(0) instanceof MlDenseEmbeddingResults textEmbeddingResults) {
listener.onResponse(textEmbeddingResults.getInferenceAsFloat());
} else if (response.getInferenceResults().get(0) instanceof WarningInferenceResults warning) {
listener.onFailure(new IllegalStateException(warning.getWarning()));
} else {
throw new IllegalArgumentException(
"expected a result of type ["
+ MlDenseEmbeddingResults.NAME
+ "] received ["
+ response.getInferenceResults().get(0).getWriteableName()
+ "]. Is ["
+ modelId
+ "] a text embedding model?"
);
}
}, listener::onFailure));
}
public String getModelText() {
return modelText;
}
public String getModelId() {
return modelId;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TextEmbeddingQueryVectorBuilder that = (TextEmbeddingQueryVectorBuilder) o;
return Objects.equals(modelId, that.modelId) && Objects.equals(modelText, that.modelText);
}
@Override
public int hashCode() {
return Objects.hash(modelId, modelText);
}
}
|
TextEmbeddingQueryVectorBuilder
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationStatus.java
|
{
"start": 960,
"end": 1668
}
|
enum ____ {
/** Log Aggregation is Disabled. */
DISABLED,
/** Log Aggregation does not Start. */
NOT_START,
/** Log Aggregation is Running. */
RUNNING,
/** Log Aggregation is Running, but has failures in previous cycles. */
RUNNING_WITH_FAILURE,
/**
* Log Aggregation is Succeeded. All of the logs have been aggregated
* successfully.
*/
SUCCEEDED,
/**
* Log Aggregation is completed. But at least one of the logs have not been
* aggregated.
*/
FAILED,
/**
* The application is finished, but the log aggregation status is not updated
* for a long time.
* @see YarnConfiguration#LOG_AGGREGATION_STATUS_TIME_OUT_MS
*/
TIME_OUT
}
|
LogAggregationStatus
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestManifestCommitProtocol.java
|
{
"start": 16584,
"end": 40030
}
|
class ____ {
private final Job job;
private final JobContext jContext;
private final TaskAttemptContext tContext;
private final ManifestCommitter committer;
private final Configuration conf;
private Path writtenTextPath; // null if not written to
public JobData(Job job,
JobContext jContext,
TaskAttemptContext tContext,
ManifestCommitter committer) {
this.job = job;
this.jContext = jContext;
this.tContext = tContext;
this.committer = committer;
conf = job.getConfiguration();
}
public String jobId() {
return committer.getJobUniqueId();
}
}
/**
* Create a new job. Sets the task attempt ID,
* and output dir; asks for a success marker.
* @return the new job
* @throws IOException failure
*/
public Job newJob() throws IOException {
return newJob(outputDir, getConfiguration(), attempt0);
}
/**
* Create a new job. Sets the task attempt ID,
* and output dir; asks for a success marker.
* Committer factory is set to manifest factory, so is independent
* of FS schema.
* @param dir dest dir
* @param configuration config to get the job from
* @param taskAttemptId task attempt
* @return the new job
* @throws IOException failure
*/
private Job newJob(Path dir, Configuration configuration,
String taskAttemptId) throws IOException {
Job job = Job.getInstance(configuration);
Configuration conf = job.getConfiguration();
conf.set(MRJobConfig.TASK_ATTEMPT_ID, taskAttemptId);
enableManifestCommitter(conf);
FileOutputFormat.setOutputPath(job, dir);
return job;
}
/**
* Start a job with a committer; optionally write the test data.
* Always register the job to be aborted (quietly) in teardown.
* This is, from an "OO-purity perspective" the wrong kind of method to
* do: it's setting things up, mixing functionality, registering for teardown.
* Its aim is simple though: a common body of code for starting work
* in test cases.
* @param writeText should the text be written?
* @return the job data 4-tuple
* @throws IOException IO problems
* @throws InterruptedException interruption during write
*/
protected JobData startJob(boolean writeText)
throws IOException, InterruptedException {
return startJob(localCommitterFactory, writeText);
}
/**
* Start a job with a committer; optionally write the test data.
* Always register the job to be aborted (quietly) in teardown.
* This is, from an "OO-purity perspective" the wrong kind of method to
* do: it's setting things up, mixing functionality, registering for teardown.
* Its aim is simple though: a common body of code for starting work
* in test cases.
* @param factory the committer factory to use
* @param writeText should the text be written?
* @return the job data 4-tuple
* @throws IOException IO problems
* @throws InterruptedException interruption during write
*/
protected JobData startJob(CommitterFactory factory, boolean writeText)
throws IOException, InterruptedException {
Job job = newJob();
Configuration conf = job.getConfiguration();
assertConfigurationUsesManifestCommitter(conf);
conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt0);
conf.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, 1);
JobContext jContext = new JobContextImpl(conf, taskAttempt0.getJobID());
TaskAttemptContext tContext = new TaskAttemptContextImpl(conf,
taskAttempt0);
ManifestCommitter committer = factory.createCommitter(tContext);
// setup
JobData jobData = new JobData(job, jContext, tContext, committer);
setupJob(jobData);
abortInTeardown(jobData);
if (writeText) {
// write output
jobData.writtenTextPath = writeTextOutput(tContext);
}
return jobData;
}
/**
* Set up the job and task.
* @param jobData job data
* @throws IOException problems
*/
protected void setupJob(JobData jobData) throws IOException {
ManifestCommitter committer = jobData.committer;
JobContext jContext = jobData.jContext;
TaskAttemptContext tContext = jobData.tContext;
describe("\nsetup job");
try (DurationInfo d = new DurationInfo(LOG,
"setup job %s", jContext.getJobID())) {
committer.setupJob(jContext);
}
setupCommitter(committer, tContext);
describe("setup complete");
}
private void setupCommitter(
final ManifestCommitter committer,
final TaskAttemptContext tContext) throws IOException {
try (DurationInfo d = new DurationInfo(LOG,
"setup task %s", tContext.getTaskAttemptID())) {
committer.setupTask(tContext);
}
}
/**
* Abort a job quietly.
* @param jobData job info
*/
protected void abortJobQuietly(JobData jobData) {
abortJobQuietly(jobData.committer, jobData.jContext, jobData.tContext);
}
/**
* Abort a job quietly: first task, then job.
* @param committer committer
* @param jContext job context
* @param tContext task context
*/
protected void abortJobQuietly(ManifestCommitter committer,
JobContext jContext,
TaskAttemptContext tContext) {
describe("\naborting task");
try {
committer.abortTask(tContext);
} catch (Exception e) {
log().warn("Exception aborting task:", e);
}
describe("\naborting job");
try {
committer.abortJob(jContext, JobStatus.State.KILLED);
} catch (Exception e) {
log().warn("Exception aborting job", e);
}
}
/**
* Commit the task and then the job.
* @param committer committer
* @param jContext job context
* @param tContext task context
* @throws IOException problems
*/
protected void commitTaskAndJob(ManifestCommitter committer,
JobContext jContext,
TaskAttemptContext tContext) throws IOException {
try (DurationInfo d = new DurationInfo(LOG,
"committing Job %s", jContext.getJobID())) {
describe("\ncommitting task");
committer.commitTask(tContext);
describe("\ncommitting job");
committer.commitJob(jContext);
describe("commit complete\n");
}
}
/**
* Execute work as part of a test, after creating the job.
* After the execution, {@link #abortJobQuietly(JobData)} is
* called for abort/cleanup.
* @param name name of work (for logging)
* @param action action to execute
* @throws Exception failure
*/
protected void executeWork(String name, ActionToTest action)
throws Exception {
executeWork(name, startJob(false), action);
}
/**
* Execute work as part of a test, against the created job.
* After the execution, {@link #abortJobQuietly(JobData)} is
* called for abort/cleanup.
* @param name name of work (for logging)
* @param jobData job info
* @param action action to execute
* @throws Exception failure
*/
public void executeWork(String name,
JobData jobData,
ActionToTest action) throws Exception {
try (DurationInfo d = new DurationInfo(LOG, "Executing %s", name)) {
action.exec(jobData.job,
jobData.jContext,
jobData.tContext,
jobData.committer);
} finally {
abortJobQuietly(jobData);
}
}
/**
* Load a manifest from the test FS.
* @param path path
* @return the manifest
* @throws IOException failure to load
*/
TaskManifest loadManifest(Path path) throws IOException {
return TaskManifest.load(getFileSystem(), path);
}
/**
* Verify that recovery doesn't work for these committers.
*/
@Test
@SuppressWarnings("deprecation")
public void testRecoveryAndCleanup() throws Exception {
describe("Test (unsupported) task recovery.");
JobData jobData = startJob(true);
TaskAttemptContext tContext = jobData.tContext;
ManifestCommitter committer = jobData.committer;
Assertions.assertThat(committer.getWorkPath())
.as("null workPath in committer " + committer)
.isNotNull();
Assertions.assertThat(committer.getOutputPath())
.as("null outputPath in committer " + committer)
.isNotNull();
// Commit the task.
commitTask(committer, tContext);
// load and log the manifest
final TaskManifest manifest = loadManifest(
committer.getTaskManifestPath(tContext));
LOG.info("Manifest {}", manifest);
Configuration conf2 = jobData.job.getConfiguration();
conf2.set(MRJobConfig.TASK_ATTEMPT_ID, attempt0);
conf2.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, 2);
JobContext jContext2 = new JobContextImpl(conf2, taskAttempt0.getJobID());
TaskAttemptContext tContext2 = new TaskAttemptContextImpl(conf2,
taskAttempt0);
ManifestCommitter committer2 = createCommitter(tContext2);
committer2.setupJob(tContext2);
Assertions.assertThat(committer2.isRecoverySupported())
.as("recoverySupported in " + committer2)
.isFalse();
intercept(IOException.class, "recover",
() -> committer2.recoverTask(tContext2));
// at this point, task attempt 0 has failed to recover
// it should be abortable though. This will be a no-op as it already
// committed
describe("aborting task attempt 2; expect nothing to clean up");
committer2.abortTask(tContext2);
describe("Aborting job 2; expect pending commits to be aborted");
committer2.abortJob(jContext2, JobStatus.State.KILLED);
}
/**
* Assert that the task attempt FS Doesn't have a task attempt
* directory.
* @param committer committer
* @param context task context
* @throws IOException IO failure.
*/
protected void assertTaskAttemptPathDoesNotExist(
ManifestCommitter committer, TaskAttemptContext context)
throws IOException {
Path attemptPath = committer.getTaskAttemptPath(context);
ContractTestUtils.assertPathDoesNotExist(
attemptPath.getFileSystem(context.getConfiguration()),
"task attempt dir",
attemptPath);
}
protected void assertJobAttemptPathDoesNotExist(
ManifestCommitter committer, JobContext context)
throws IOException {
Path attemptPath = committer.getJobAttemptPath(context);
ContractTestUtils.assertPathDoesNotExist(
attemptPath.getFileSystem(context.getConfiguration()),
"job attempt dir",
attemptPath);
}
/**
* Verify the output of the directory.
* That includes the {@code part-m-00000-*}
* file existence and contents, as well as optionally, the success marker.
* @param dir directory to scan.
* @param expectSuccessMarker check the success marker?
* @param expectedJobId job ID, verified if non-empty and success data loaded
* @throws Exception failure.
* @return the success data
*/
private ManifestSuccessData validateContent(Path dir,
boolean expectSuccessMarker,
String expectedJobId) throws Exception {
lsR(getFileSystem(), dir, true);
ManifestSuccessData successData;
if (expectSuccessMarker) {
successData = verifySuccessMarker(dir, expectedJobId);
} else {
successData = null;
}
Path expectedFile = getPart0000(dir);
log().debug("Validating content in {}", expectedFile);
StringBuilder expectedOutput = new StringBuilder();
expectedOutput.append(KEY_1).append('\t').append(VAL_1).append("\n");
expectedOutput.append(VAL_1).append("\n");
expectedOutput.append(VAL_2).append("\n");
expectedOutput.append(KEY_2).append("\n");
expectedOutput.append(KEY_1).append("\n");
expectedOutput.append(KEY_2).append('\t').append(VAL_2).append("\n");
String output = readFile(expectedFile);
Assertions.assertThat(output)
.describedAs("Content of %s", expectedFile)
.isEqualTo(expectedOutput.toString());
return successData;
}
/**
* Identify any path under the directory which begins with the
* {@code "part-m-00000"} sequence. There's some compensation for
* eventual consistency here.
* @param dir directory to scan
* @return the full path
* @throws FileNotFoundException the path is missing.
* @throws Exception failure.
*/
protected Path getPart0000(final Path dir) throws Exception {
final FileSystem fs = dir.getFileSystem(getConfiguration());
FileStatus[] statuses = fs.listStatus(dir,
path -> path.getName().startsWith(PART_00000));
if (statuses.length != 1) {
// fail, with a listing of the parent dir
ContractTestUtils.assertPathExists(fs, "Output file",
new Path(dir, PART_00000));
}
return statuses[0].getPath();
}
/**
* Look for the partFile subdir of the output dir
* and the ma and data entries.
* @param fs filesystem
* @param dir output dir
* @throws Exception failure.
*/
private void validateMapFileOutputContent(
FileSystem fs, Path dir) throws Exception {
// map output is a directory with index and data files
assertPathExists("Map output", dir);
Path expectedMapDir = getPart0000(dir);
assertPathExists("Map output", expectedMapDir);
assertIsDirectory(expectedMapDir);
FileStatus[] files = fs.listStatus(expectedMapDir);
Assertions.assertThat(files)
.as("No files found in " + expectedMapDir)
.isNotEmpty();
assertPathExists("index file in " + expectedMapDir,
new Path(expectedMapDir, MapFile.INDEX_FILE_NAME));
assertPathExists("data file in " + expectedMapDir,
new Path(expectedMapDir, MapFile.DATA_FILE_NAME));
}
/**
* Full test of the expected lifecycle: start job, task, write, commit task,
* commit job.
* @throws Exception on a failure
*/
@Test
public void testCommitLifecycle() throws Exception {
describe("Full test of the expected lifecycle:\n" +
" start job, task, write, commit task, commit job.\n" +
"Verify:\n" +
"* no files are visible after task commit\n" +
"* the expected file is visible after job commit\n");
JobData jobData = startJob(false);
JobContext jContext = jobData.jContext;
TaskAttemptContext tContext = jobData.tContext;
ManifestCommitter committer = jobData.committer;
assertCommitterFactoryIsManifestCommitter(tContext,
tContext.getWorkingDirectory());
validateTaskAttemptWorkingDirectory(committer, tContext);
// write output
describe("1. Writing output");
final Path textOutputPath = writeTextOutput(tContext);
describe("Output written to %s", textOutputPath);
describe("2. Committing task");
Assertions.assertThat(committer.needsTaskCommit(tContext))
.as("No files to commit were found by " + committer)
.isTrue();
commitTask(committer, tContext);
final TaskManifest taskManifest = requireNonNull(
committer.getTaskAttemptCommittedManifest(), "committerTaskManifest");
final String manifestJSON = taskManifest.toJson();
LOG.info("Task manifest {}", manifestJSON);
int filesCreated = 1;
Assertions.assertThat(taskManifest.getFilesToCommit())
.describedAs("Files to commit in task manifest %s", manifestJSON)
.hasSize(filesCreated);
Assertions.assertThat(taskManifest.getDestDirectories())
.describedAs("Directories to create in task manifest %s",
manifestJSON)
.isEmpty();
// this is only task commit; there MUST be no part- files in the dest dir
try {
RemoteIterators.foreach(getFileSystem().listFiles(outputDir, false),
(status) ->
Assertions.assertThat(status.getPath().toString())
.as("task committed file to dest :" + status)
.contains("part"));
} catch (FileNotFoundException ignored) {
log().info("Outdir {} is not created by task commit phase ",
outputDir);
}
describe("3. Committing job");
commitJob(committer, jContext);
// validate output
describe("4. Validating content");
String jobUniqueId = jobData.jobId();
ManifestSuccessData successData = validateContent(outputDir,
true,
jobUniqueId);
// look in the SUMMARY
Assertions.assertThat(successData.getDiagnostics())
.describedAs("Stage entry in SUCCESS")
.containsEntry(STAGE, OP_STAGE_JOB_COMMIT);
IOStatisticsSnapshot jobStats = successData.getIOStatistics();
// manifest
verifyStatisticCounterValue(jobStats,
OP_LOAD_MANIFEST, 1);
FileStatus st = getFileSystem().getFileStatus(getPart0000(outputDir));
verifyStatisticCounterValue(jobStats,
COMMITTER_FILES_COMMITTED_COUNT, filesCreated);
verifyStatisticCounterValue(jobStats,
COMMITTER_BYTES_COMMITTED_COUNT, st.getLen());
// now load and examine the job report.
// this MUST contain all the stats of the summary, plus timings on
// job commit itself
ManifestSuccessData report = loadReport(jobUniqueId, true);
Map<String, String> diag = report.getDiagnostics();
Assertions.assertThat(diag)
.describedAs("Stage entry in report")
.containsEntry(STAGE, OP_STAGE_JOB_COMMIT);
IOStatisticsSnapshot reportStats = report.getIOStatistics();
verifyStatisticCounterValue(reportStats,
OP_LOAD_MANIFEST, 1);
verifyStatisticCounterValue(reportStats,
OP_STAGE_JOB_COMMIT, 1);
verifyStatisticCounterValue(reportStats,
COMMITTER_FILES_COMMITTED_COUNT, filesCreated);
verifyStatisticCounterValue(reportStats,
COMMITTER_BYTES_COMMITTED_COUNT, st.getLen());
}
/**
* Load a summary from the report dir.
* @param jobUniqueId job ID
* @param expectSuccess is the job expected to have succeeded.
* @throws IOException failure to load
* @return the report
*/
private ManifestSuccessData loadReport(String jobUniqueId,
boolean expectSuccess) throws IOException {
File file = new File(getReportDir(),
createJobSummaryFilename(jobUniqueId));
ContractTestUtils.assertIsFile(FileSystem.getLocal(getConfiguration()),
new Path(file.toURI()));
ManifestSuccessData report = ManifestSuccessData.serializer().load(file);
LOG.info("Report for job {}:\n{}", jobUniqueId, report.toJson());
Assertions.assertThat(report.getSuccess())
.describedAs("success flag in report")
.isEqualTo(expectSuccess);
return report;
}
/**
* Repeated commit call after job commit.
*/
@Test
public void testCommitterWithDuplicatedCommit() throws Exception {
describe("Call a task then job commit twice;" +
"expect the second task commit to fail.");
JobData jobData = startJob(true);
JobContext jContext = jobData.jContext;
TaskAttemptContext tContext = jobData.tContext;
ManifestCommitter committer = jobData.committer;
// do commit
describe("committing task");
committer.commitTask(tContext);
// repeated commit while TA dir exists fine/idempotent
committer.commitTask(tContext);
describe("committing job");
committer.commitJob(jContext);
describe("commit complete\n");
describe("cleanup");
committer.cleanupJob(jContext);
// validate output
validateContent(outputDir, shouldExpectSuccessMarker(),
committer.getJobUniqueId());
// commit task to fail on retry as task attempt dir doesn't exist
describe("Attempting commit of the same task after job commit -expecting failure");
expectFNFEonTaskCommit(committer, tContext);
}
/**
* HADOOP-17258. If a second task attempt is committed, it
* must succeed, and the output of the first TA, even if already
* committed, MUST NOT be visible in the final output.
* <p></p>
* What's important is not just that only one TA must succeed,
* but it must be the last one executed.
*/
@Test
public void testTwoTaskAttemptsCommit() throws Exception {
describe("Commit two task attempts;" +
" expect the second attempt to succeed.");
JobData jobData = startJob(false);
TaskAttemptContext tContext = jobData.tContext;
ManifestCommitter committer = jobData.committer;
// do commit
describe("\ncommitting task");
// write output for TA 1,
Path outputTA1 = writeTextOutput(tContext);
// speculatively execute committer 2.
// jobconf with a different base to its parts.
Configuration conf2 = jobData.conf;
conf2.set("mapreduce.output.basename", "attempt2");
String attempt2 = "attempt_" + jobId + "_m_000000_1";
TaskAttemptID ta2 = TaskAttemptID.forName(attempt2);
TaskAttemptContext tContext2 = new TaskAttemptContextImpl(
conf2, ta2);
ManifestCommitter committer2 = localCommitterFactory
.createCommitter(tContext2);
setupCommitter(committer2, tContext2);
// verify working dirs are different
Assertions.assertThat(committer.getWorkPath())
.describedAs("Working dir of %s", committer)
.isNotEqualTo(committer2.getWorkPath());
// write output for TA 2,
Path outputTA2 = writeTextOutput(tContext2);
// verify the names are different.
String name1 = outputTA1.getName();
String name2 = outputTA2.getName();
Assertions.assertThat(name1)
.describedAs("name of task attempt output %s", outputTA1)
.isNotEqualTo(name2);
// commit task 1
committer.commitTask(tContext);
// then pretend that task1 didn't respond, so
// commit task 2
committer2.commitTask(tContext2);
// and the job
committer2.commitJob(tContext);
// validate output
FileSystem fs = getFileSystem();
ManifestSuccessData successData = validateSuccessFile(fs, outputDir,
1,
"");
Assertions.assertThat(successData.getFilenames())
.describedAs("Files committed")
.hasSize(1);
assertPathExists("attempt2 output", new Path(outputDir, name2));
assertPathDoesNotExist("attempt1 output", new Path(outputDir, name1));
}
protected boolean shouldExpectSuccessMarker() {
return true;
}
/**
* Simulate a failure on the first job commit; expect the
* second to succeed.
*/
/*@Test
public void testCommitterWithFailure() throws Exception {
describe("Fail the first job commit then retry");
JobData jobData = startJob(new FailingCommitterFactory(), true);
JobContext jContext = jobData.jContext;
TaskAttemptContext tContext = jobData.tContext;
ManifestCommitter committer = jobData.committer;
// do commit
committer.commitTask(tContext);
// now fail job
expectSimulatedFailureOnJobCommit(jContext, committer);
commitJob(committer, jContext);
// but the data got there, due to the order of operations.
validateContent(outDir, shouldExpectSuccessMarker(),
committer.getUUID());
expectJobCommitToFail(jContext, committer);
}
*/
/**
* Override point: the failure expected on the attempt to commit a failed
* job.
* @param jContext job context
* @param committer committer
* @throws Exception any unexpected failure.
*/
protected void expectJobCommitToFail(JobContext jContext,
ManifestCommitter committer) throws Exception {
// next attempt will fail as there is no longer a directory to commit
expectJobCommitFailure(jContext, committer,
FileNotFoundException.class);
}
/**
* Expect a job commit operation to fail with a specific exception.
* @param jContext job context
* @param committer committer
* @param clazz
|
JobData
|
java
|
apache__camel
|
core/camel-core-xml/src/main/java/org/apache/camel/core/xml/AbstractCamelContextFactoryBean.java
|
{
"start": 7726,
"end": 8384
}
|
class ____<T extends ModelCamelContext> extends IdentifiedType
implements RouteTemplateContainer, RouteConfigurationContainer, RouteContainer, RestContainer,
TemplatedRouteContainer {
private static final Logger LOG = LoggerFactory.getLogger(AbstractCamelContextFactoryBean.class);
@XmlTransient
private final List<RoutesBuilder> builders = new ArrayList<>();
@XmlTransient
private final ClassLoader contextClassLoaderOnStart;
@XmlTransient
private final AtomicBoolean routesSetupDone = new AtomicBoolean();
public AbstractCamelContextFactoryBean() {
// Keep track of the
|
AbstractCamelContextFactoryBean
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/data/util/DataFormatConverters.java
|
{
"start": 21084,
"end": 22703
}
|
class ____<Internal, External> implements Serializable {
private static final long serialVersionUID = 1L;
/**
* Converts a external(Java) data format to its internal equivalent while automatically
* handling nulls.
*/
public final Internal toInternal(External value) {
return value == null ? null : toInternalImpl(value);
}
/** Converts a non-null external(Java) data format to its internal equivalent. */
abstract Internal toInternalImpl(External value);
/**
* Convert a internal data format to its external(Java) equivalent while automatically
* handling nulls.
*/
public final External toExternal(Internal value) {
return value == null ? null : toExternalImpl(value);
}
/** Convert a non-null internal data format to its external(Java) equivalent. */
abstract External toExternalImpl(Internal value);
/**
* Given a internalType row, convert the value at column `column` to its external(Java)
* equivalent. This method will only be called on non-null columns.
*/
abstract External toExternalImpl(RowData row, int column);
/**
* Given a internalType row, convert the value at column `column` to its external(Java)
* equivalent.
*/
public final External toExternal(RowData row, int column) {
return row.isNullAt(column) ? null : toExternalImpl(row, column);
}
}
/** Identity converter. */
public abstract static
|
DataFormatConverter
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/deser/creators/NullValueViaCreatorTest.java
|
{
"start": 369,
"end": 451
}
|
class ____
extends DatabindTestUtil
{
protected static
|
NullValueViaCreatorTest
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DestinationParam.java
|
{
"start": 931,
"end": 1794
}
|
class ____ extends StringParam {
/** Parameter name. */
public static final String NAME = "destination";
/** Default parameter value. */
public static final String DEFAULT = "";
private static final Domain DOMAIN = new Domain(NAME, null);
private static String validate(final String str) {
if (str == null || str.equals(DEFAULT)) {
return null;
}
if (!str.startsWith(Path.SEPARATOR)) {
throw new IllegalArgumentException("Invalid parameter value: " + NAME
+ " = \"" + str + "\" is not an absolute path.");
}
return new Path(str).toUri().getPath();
}
/**
* Constructor.
* @param str a string representation of the parameter value.
*/
public DestinationParam(final String str) {
super(DOMAIN, validate(str));
}
@Override
public String getName() {
return NAME;
}
}
|
DestinationParam
|
java
|
spring-projects__spring-security
|
web/src/main/java/org/springframework/security/web/FilterInvocation.java
|
{
"start": 2089,
"end": 5115
}
|
class ____ {
static final FilterChain DUMMY_CHAIN = (req, res) -> {
throw new UnsupportedOperationException("Dummy filter chain");
};
private final FilterChain chain;
private HttpServletRequest request;
private @Nullable HttpServletResponse response;
public FilterInvocation(ServletRequest request, ServletResponse response, FilterChain chain) {
Assert.isTrue(request != null && response != null && chain != null, "Cannot pass null values to constructor");
this.request = (HttpServletRequest) request;
this.response = (HttpServletResponse) response;
this.chain = chain;
}
public FilterInvocation(String servletPath, String method) {
this(null, servletPath, method);
}
public FilterInvocation(@Nullable String contextPath, String servletPath, String method) {
this(contextPath, servletPath, method, null);
}
public FilterInvocation(@Nullable String contextPath, String servletPath, @Nullable String method,
@Nullable ServletContext servletContext) {
this(contextPath, servletPath, null, null, method, servletContext);
}
public FilterInvocation(String contextPath, String servletPath, String pathInfo, String query, String method) {
this(contextPath, servletPath, pathInfo, query, method, null);
}
public FilterInvocation(@Nullable String contextPath, String servletPath, @Nullable String pathInfo,
@Nullable String query, @Nullable String method, @Nullable ServletContext servletContext) {
DummyRequest request = new DummyRequest();
contextPath = (contextPath != null) ? contextPath : "/cp";
request.setContextPath(contextPath);
request.setServletPath(servletPath);
request.setRequestURI(contextPath + servletPath + ((pathInfo != null) ? pathInfo : ""));
request.setPathInfo(pathInfo);
request.setQueryString(query);
request.setMethod(method);
request.setServletContext(servletContext);
this.request = request;
this.chain = DUMMY_CHAIN;
}
public FilterChain getChain() {
return this.chain;
}
/**
* Indicates the URL that the user agent used for this request.
* <p>
* @return the full URL of this request
*/
public String getFullRequestUrl() {
return UrlUtils.buildFullRequestUrl(this.request);
}
public HttpServletRequest getHttpRequest() {
return this.request;
}
public @Nullable HttpServletResponse getHttpResponse() {
return this.response;
}
/**
* Obtains the web application-specific fragment of the URL.
* @return the URL, excluding any server name, context path or servlet path
*/
public String getRequestUrl() {
return UrlUtils.buildRequestUrl(this.request);
}
public HttpServletRequest getRequest() {
return getHttpRequest();
}
public @Nullable HttpServletResponse getResponse() {
return getHttpResponse();
}
@Override
public String toString() {
if (!StringUtils.hasLength(this.request.getMethod())) {
return "filter invocation [" + getRequestUrl() + "]";
}
else {
return "filter invocation [" + this.request.getMethod() + " " + getRequestUrl() + "]";
}
}
static
|
FilterInvocation
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/processor/SplitAggregateInOutTest.java
|
{
"start": 1262,
"end": 2754
}
|
class ____ extends ContextTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(SplitAggregateInOutTest.class);
private final String expectedBody = "Response[(id=1,item=A);(id=2,item=B);(id=3,item=C)]";
@Test
public void testSplitAndAggregateInOut() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived(expectedBody);
// use requestBody as its InOut
Object out = template.requestBody("direct:start", "A@B@C");
assertEquals(expectedBody, out);
LOG.debug("Response to caller: {}", out);
assertMockEndpointsSatisfied();
}
@Override
protected Registry createCamelRegistry() throws Exception {
Registry jndi = super.createCamelRegistry();
jndi.bind("MyOrderService", new MyOrderService());
return jndi;
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
// START SNIPPET: e1
// this routes starts from the direct:start endpoint
// the body is then split based on @ separator
// the splitter in Camel supports InOut as well and for that we
// need
// to be able to aggregate what response we need to send back,
// so we provide our
// own strategy with the
|
SplitAggregateInOutTest
|
java
|
spring-projects__spring-framework
|
spring-core/src/test/java/org/springframework/core/annotation/MergedAnnotationsTests.java
|
{
"start": 139987,
"end": 140160
}
|
class ____ {
}
@ComponentScan(excludeFilters = { @Filter(pattern = "*Foo"),
@Filter(pattern = "*Bar") })
static
|
TransitiveImplicitAliasesForAliasPairTestConfigurationClass
|
java
|
apache__rocketmq
|
tools/src/test/java/org/apache/rocketmq/tools/command/topic/UpdateTopicPermSubCommandTest.java
|
{
"start": 1116,
"end": 1993
}
|
class ____ {
@Test
public void testExecute() {
UpdateTopicPermSubCommand cmd = new UpdateTopicPermSubCommand();
Options options = ServerUtil.buildCommandlineOptions(new Options());
String[] subargs = new String[] {"-b 127.0.0.1:10911", "-c default-cluster", "-t unit-test", "-p 6"};
final CommandLine commandLine =
ServerUtil.parseCmdLine("mqadmin " + cmd.commandName(), subargs,
cmd.buildCommandlineOptions(options), new DefaultParser());
assertThat(commandLine.getOptionValue('b').trim()).isEqualTo("127.0.0.1:10911");
assertThat(commandLine.getOptionValue('c').trim()).isEqualTo("default-cluster");
assertThat(commandLine.getOptionValue('t').trim()).isEqualTo("unit-test");
assertThat(commandLine.getOptionValue('p').trim()).isEqualTo("6");
}
}
|
UpdateTopicPermSubCommandTest
|
java
|
mockito__mockito
|
mockito-core/src/main/java/org/mockito/internal/configuration/InjectingAnnotationEngine.java
|
{
"start": 693,
"end": 1122
}
|
class ____ implements AnnotationEngine {
private final AnnotationEngine delegate = new IndependentAnnotationEngine();
private final AnnotationEngine spyAnnotationEngine = new SpyAnnotationEngine();
/**
* Process the fields of the test instance and create Mocks, Spies, Captors and inject them on fields
* annotated @InjectMocks.
*
* <p>
* This code process the test
|
InjectingAnnotationEngine
|
java
|
hibernate__hibernate-orm
|
hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/strategy/RevisionEndTimestampJoinedInheritanceTest.java
|
{
"start": 5663,
"end": 6662
}
|
class ____ extends Employee {
private Integer hourlyRate;
Contractor() {
}
Contractor(String name, Integer hourlyRate) {
super( name );
this.hourlyRate = hourlyRate;
}
public Integer getHourlyRate() {
return hourlyRate;
}
public void setHourlyRate(Integer hourlyRate) {
this.hourlyRate = hourlyRate;
}
@Override
public int hashCode() {
int result = super.hashCode();
result = result * 31 + ( hourlyRate != null ? hourlyRate.hashCode() : 0 );
return result;
}
@Override
public boolean equals(Object object) {
if ( this == object ) {
return true;
}
if ( object == null || !( object instanceof Contractor ) ) {
return false;
}
if ( !super.equals( object ) ) {
return false;
}
Contractor that = (Contractor) object;
return !( hourlyRate != null ? !hourlyRate.equals( that.hourlyRate ) : that.hourlyRate != null );
}
}
@Audited
@Entity(name = "Executive")
@DiscriminatorValue("EXEC")
public static
|
Contractor
|
java
|
quarkusio__quarkus
|
extensions/resteasy-classic/resteasy/deployment/src/test/java/io/quarkus/resteasy/test/security/authzpolicy/DenyAllUnannotatedWithAuthzPolicyTest.java
|
{
"start": 476,
"end": 2239
}
|
class ____ {
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(ForbidViewerClassLevelPolicyResource.class, ForbidViewerMethodLevelPolicyResource.class,
ForbidAllButViewerAuthorizationPolicy.class, TestIdentityProvider.class,
TestIdentityController.class)
.addAsResource(new StringAsset("quarkus.security.jaxrs.deny-unannotated-endpoints=true\n"),
"application.properties"));
@BeforeAll
public static void setupUsers() {
TestIdentityController.resetRoles()
.add("admin", "admin", "admin", "viewer")
.add("user", "user")
.add("viewer", "viewer", "viewer");
}
@Test
public void testEndpointWithoutAuthorizationPolicyIsDenied() {
RestAssured.given().auth().preemptive().basic("admin", "admin").get("/forbid-viewer-method-level-policy/unsecured")
.then().statusCode(403);
RestAssured.given().auth().preemptive().basic("viewer", "viewer").get("/forbid-viewer-method-level-policy/unsecured")
.then().statusCode(403);
}
@Test
public void testEndpointWithAuthorizationPolicyIsNotDenied() {
// test not denied for authorized
RestAssured.given().auth().preemptive().basic("admin", "admin").get("/forbid-viewer-method-level-policy")
.then().statusCode(403);
RestAssured.given().auth().preemptive().basic("viewer", "viewer").get("/forbid-viewer-method-level-policy")
.then().statusCode(200).body(Matchers.equalTo("viewer"));
}
}
|
DenyAllUnannotatedWithAuthzPolicyTest
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/BindsMethodValidationTest.java
|
{
"start": 13749,
"end": 14257
}
|
interface ____ {",
" @Binds Map<K, V> bind(@TestQualifier Map<K, V> impl);",
"",
" @Provides",
" @TestQualifier",
" static Map<K, V> provideMap() {",
" return (Map<K, V>) null;",
" }",
"}");
Source qualifier =
CompilerTests.javaSource(
"test.TestQualifier",
"package test;",
"import javax.inject.Qualifier;",
"",
"@Qualifier @
|
TestModule
|
java
|
spring-projects__spring-framework
|
spring-tx/src/test/java/org/springframework/transaction/support/TestTransactionManager.java
|
{
"start": 896,
"end": 2773
}
|
class ____ extends AbstractPlatformTransactionManager {
private static final Object TRANSACTION = "transaction";
private final boolean existingTransaction;
private final boolean canCreateTransaction;
protected boolean begin = false;
protected boolean commit = false;
protected boolean rollback = false;
protected boolean rollbackOnly = false;
protected TestTransactionManager(boolean existingTransaction, boolean canCreateTransaction) {
this.existingTransaction = existingTransaction;
this.canCreateTransaction = canCreateTransaction;
setTransactionSynchronization(SYNCHRONIZATION_NEVER);
}
@Override
protected Object doGetTransaction() {
return TRANSACTION;
}
@Override
protected boolean isExistingTransaction(Object transaction) {
return existingTransaction;
}
@Override
protected void doBegin(Object transaction, TransactionDefinition definition) {
if (!TRANSACTION.equals(transaction)) {
throw new IllegalArgumentException("Not the same transaction object");
}
if (!this.canCreateTransaction) {
throw new CannotCreateTransactionException("Cannot create transaction");
}
this.begin = true;
}
@Override
protected void doCommit(DefaultTransactionStatus status) {
if (!TRANSACTION.equals(status.getTransaction())) {
throw new IllegalArgumentException("Not the same transaction object");
}
this.commit = true;
}
@Override
protected void doRollback(DefaultTransactionStatus status) {
if (!TRANSACTION.equals(status.getTransaction())) {
throw new IllegalArgumentException("Not the same transaction object");
}
this.rollback = true;
}
@Override
protected void doSetRollbackOnly(DefaultTransactionStatus status) {
if (!TRANSACTION.equals(status.getTransaction())) {
throw new IllegalArgumentException("Not the same transaction object");
}
this.rollbackOnly = true;
}
}
|
TestTransactionManager
|
java
|
google__error-prone
|
check_api/src/main/java/com/google/errorprone/DescriptionListener.java
|
{
"start": 888,
"end": 1102
}
|
interface ____ {
/** Reports a suggested modification to the code. */
void onDescribed(Description description);
/** Factory for creating DescriptionListeners while compiling each file. */
|
DescriptionListener
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/LongArrayFieldTest_primitive.java
|
{
"start": 1121,
"end": 1332
}
|
class ____ {
private long[] value;
public long[] getValue() {
return value;
}
public void setValue(long[] value) {
this.value = value;
}
}
}
|
V0
|
java
|
dropwizard__dropwizard
|
dropwizard-client/src/main/java/io/dropwizard/client/DropwizardApacheConnector.java
|
{
"start": 1989,
"end": 7995
}
|
class ____ implements Connector {
private static final String ERROR_BUFFERING_ENTITY = "Error buffering the entity.";
private static final String APACHE_HTTP_CLIENT_VERSION = VersionInfo
.loadVersionInfo("org.apache.hc.client5", DropwizardApacheConnector.class.getClassLoader())
.getRelease();
/**
* Actual HTTP client
*/
private final CloseableHttpClient client;
/**
* Default HttpUriRequestConfig
*/
@Nullable
private final RequestConfig defaultRequestConfig;
/**
* Should a chunked encoding be used in POST requests
*/
private final boolean chunkedEncodingEnabled;
public DropwizardApacheConnector(CloseableHttpClient client, @Nullable RequestConfig defaultRequestConfig,
boolean chunkedEncodingEnabled) {
this.client = client;
this.defaultRequestConfig = defaultRequestConfig;
this.chunkedEncodingEnabled = chunkedEncodingEnabled;
}
/**
* {@inheritDoc}
*/
@Override
public ClientResponse apply(ClientRequest jerseyRequest) {
try {
final HttpUriRequest apacheRequest = buildApacheRequest(jerseyRequest);
final CloseableHttpResponse apacheResponse = client.execute(apacheRequest);
final String reasonPhrase = apacheResponse.getReasonPhrase();
final Response.StatusType status = Statuses.from(apacheResponse.getCode(), reasonPhrase == null ? "" : reasonPhrase);
final ClientResponse jerseyResponse = new ClientResponse(status, jerseyRequest);
for (Header header : apacheResponse.getHeaders()) {
jerseyResponse.getHeaders().computeIfAbsent(header.getName(), k -> new ArrayList<>())
.add(header.getValue());
}
final HttpEntity httpEntity = apacheResponse.getEntity();
jerseyResponse.setEntityStream(httpEntity != null ? httpEntity.getContent() :
new ByteArrayInputStream(new byte[0]));
return jerseyResponse;
} catch (Exception e) {
throw new ProcessingException(e);
}
}
/**
* Build a new Apache's {@link HttpUriRequest}
* from Jersey's {@link org.glassfish.jersey.client.ClientRequest}
* <p>
* Convert a method, URI, body, headers and override a user-agent if necessary
* </p>
*
* @param jerseyRequest representation of an HTTP request in Jersey
* @return a new {@link HttpUriRequest}
*/
private HttpUriRequest buildApacheRequest(ClientRequest jerseyRequest) {
HttpUriRequestBase base = new HttpUriRequestBase(jerseyRequest.getMethod(), jerseyRequest.getUri());
base.setEntity(getHttpEntity(jerseyRequest));
for (String headerName : jerseyRequest.getHeaders().keySet()) {
base.addHeader(headerName, jerseyRequest.getHeaderString(headerName));
}
final Optional<RequestConfig> requestConfig = addJerseyRequestConfig(jerseyRequest);
requestConfig.ifPresent(base::setConfig);
return base;
}
private Optional<RequestConfig> addJerseyRequestConfig(ClientRequest clientRequest) {
final Integer timeout = clientRequest.resolveProperty(ClientProperties.READ_TIMEOUT, Integer.class);
final Integer connectTimeout = clientRequest.resolveProperty(ClientProperties.CONNECT_TIMEOUT, Integer.class);
final Boolean followRedirects = clientRequest.resolveProperty(ClientProperties.FOLLOW_REDIRECTS, Boolean.class);
if (timeout != null || connectTimeout != null || followRedirects != null) {
final RequestConfig.Builder requestConfig = RequestConfig.copy(defaultRequestConfig);
if (timeout != null) {
requestConfig.setResponseTimeout(timeout, TimeUnit.MILLISECONDS);
}
if (connectTimeout != null) {
requestConfig.setConnectTimeout(connectTimeout, TimeUnit.MILLISECONDS);
}
if (followRedirects != null) {
requestConfig.setRedirectsEnabled(followRedirects);
}
return Optional.of(requestConfig.build());
}
return Optional.empty();
}
/**
* Get an Apache's {@link HttpEntity}
* from Jersey's {@link org.glassfish.jersey.client.ClientRequest}
* <p>
* Create a custom HTTP entity, because Jersey doesn't provide
* a request stream or a byte buffer.
* </p>
*
* @param jerseyRequest representation of an HTTP request in Jersey
* @return a correct {@link HttpEntity} implementation
*/
@Nullable
protected HttpEntity getHttpEntity(ClientRequest jerseyRequest) {
if (jerseyRequest.getEntity() == null) {
return null;
}
return chunkedEncodingEnabled ? new JerseyRequestHttpEntity(jerseyRequest) :
new BufferedJerseyRequestHttpEntity(jerseyRequest);
}
/**
* {@inheritDoc}
*/
@Override
public Future<?> apply(final ClientRequest request, final AsyncConnectorCallback callback) {
// Simulate an asynchronous execution
return new DirectExecutorService().submit(() -> {
try {
callback.response(apply(request));
} catch (Exception e) {
callback.failure(e);
}
});
}
/**
* {@inheritDoc}
*/
@Override
public String getName() {
return "Apache-HttpClient/" + APACHE_HTTP_CLIENT_VERSION;
}
/**
* {@inheritDoc}
*/
@Override
public void close() {
// Should not close the client here, because it's managed by the Dropwizard environment
}
/**
* A custom {@link org.apache.hc.core5.http.io.entity.AbstractHttpEntity} that uses
* a Jersey request as a content source. It's chunked because we don't
* know the content length beforehand.
*/
private static
|
DropwizardApacheConnector
|
java
|
google__error-prone
|
core/src/main/java/com/google/errorprone/bugpatterns/UnnecessaryOptionalGet.java
|
{
"start": 1923,
"end": 4664
}
|
class ____ extends BugChecker
implements MethodInvocationTreeMatcher {
private static final Matcher<ExpressionTree> OPTIONAL_FUNCTIONS_WITH_FUNCTIONAL_ARG =
anyOf(
instanceMethod()
.onExactClass("java.util.Optional")
.namedAnyOf("map", "filter", "ifPresent", "flatMap"),
instanceMethod().onExactClass("java.util.OptionalLong").named("ifPresent"),
instanceMethod().onExactClass("java.util.OptionalInt").named("ifPresent"),
instanceMethod().onExactClass("java.util.OptionalDouble").named("ifPresent"),
instanceMethod().onExactClass("com.google.common.base.Optional").named("transform"));
private static final Matcher<ExpressionTree> OPTIONAL_GET =
anyOf(
instanceMethod()
.onExactClass("java.util.Optional")
.namedAnyOf("get", "orElseThrow", "orElse", "orElseGet", "orElseThrow"),
instanceMethod()
.onExactClass("java.util.OptionalLong")
.namedAnyOf("getAsLong", "orElse", "orElseGet", "orElseThrow"),
instanceMethod()
.onExactClass("java.util.OptionalInt")
.namedAnyOf("getAsInt", "orElse", "orElseGet", "orElseThrow"),
instanceMethod()
.onExactClass("java.util.OptionalDouble")
.namedAnyOf("getAsDouble", "orElse", "orElseGet", "orElseThrow"),
instanceMethod()
.onExactClass("com.google.common.base.Optional")
.namedAnyOf("get", "or", "orNull"));
@Override
public Description matchMethodInvocation(MethodInvocationTree tree, VisitorState state) {
if (!OPTIONAL_FUNCTIONS_WITH_FUNCTIONAL_ARG.matches(tree, state)) {
return Description.NO_MATCH;
}
ExpressionTree onlyArg = getOnlyElement(tree.getArguments());
if (!(onlyArg instanceof LambdaExpressionTree lambdaExpressionTree)) {
return Description.NO_MATCH;
}
VariableTree arg = getOnlyElement(lambdaExpressionTree.getParameters());
SuggestedFix.Builder fix = SuggestedFix.builder();
new TreeScanner<Void, VisitorState>() {
@Override
public Void visitMethodInvocation(
MethodInvocationTree methodInvocationTree, VisitorState visitorState) {
if (OPTIONAL_GET.matches(methodInvocationTree, visitorState)
&& sameVariable(getReceiver(tree), getReceiver(methodInvocationTree))) {
fix.replace(methodInvocationTree, state.getSourceForNode(arg));
}
return super.visitMethodInvocation(methodInvocationTree, visitorState);
}
}.scan(onlyArg, state);
if (fix.isEmpty()) {
return Description.NO_MATCH;
}
return describeMatch(tree, fix.build());
}
}
|
UnnecessaryOptionalGet
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java
|
{
"start": 782,
"end": 3000
}
|
class ____ extends AbstractVectorBlock implements Block, RefCounted {
private final DocVector vector;
DocBlock(DocVector vector) {
this.vector = vector;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public DocVector asVector() {
return vector;
}
@Override
public ElementType elementType() {
return ElementType.DOC;
}
@Override
public Block filter(int... positions) {
return new DocBlock(vector.filter(positions));
}
@Override
public Block deepCopy(BlockFactory blockFactory) {
return new DocBlock(vector.deepCopy(blockFactory));
}
@Override
public Block keepMask(BooleanVector mask) {
return vector.keepMask(mask);
}
@Override
public ReleasableIterator<? extends Block> lookup(IntBlock positions, ByteSizeValue targetBlockSize) {
throw new UnsupportedOperationException("can't lookup values from DocBlock");
}
@Override
public DocBlock expand() {
incRef();
return this;
}
@Override
public int hashCode() {
return vector.hashCode();
}
@Override
public boolean equals(Object obj) {
if (obj instanceof DocBlock == false) {
return false;
}
return this == obj || vector.equals(((DocBlock) obj).vector);
}
@Override
public long ramBytesUsed() {
return vector.ramBytesUsed();
}
@Override
public void closeInternal() {
assert (vector.isReleased() == false) : "can't release block [" + this + "] containing already released vector";
Releasables.closeExpectNoException(vector);
}
@Override
public String toString() {
final StringBuffer sb = new StringBuffer("DocBlock[");
sb.append("vector=").append(vector);
sb.append(']');
return sb.toString();
}
/**
* A builder the for {@link DocBlock}.
*/
public static Builder newBlockBuilder(BlockFactory blockFactory, int estimatedSize) {
return new Builder(blockFactory, estimatedSize);
}
public static
|
DocBlock
|
java
|
quarkusio__quarkus
|
extensions/funqy/funqy-server-common/runtime/src/main/java/io/quarkus/funqy/runtime/RequestContext.java
|
{
"start": 65,
"end": 363
}
|
interface ____ {
Object getProperty(String name);
Map<String, Object> getProperties();
void setProperty(String name, Object value);
<T> T getContextData(Class<T> key);
void setContextData(Class<?> key, Object value);
Map<Class<?>, Object> getContextData();
}
|
RequestContext
|
java
|
spring-projects__spring-framework
|
spring-web/src/main/java/org/springframework/web/util/HierarchicalUriComponents.java
|
{
"start": 25937,
"end": 26363
}
|
interface ____ extends Serializable {
String getPath();
List<String> getPathSegments();
PathComponent encode(BiFunction<String, Type, String> encoder);
void verify();
PathComponent expand(UriTemplateVariables uriVariables, @Nullable UnaryOperator<String> encoder);
void copyToUriComponentsBuilder(UriComponentsBuilder builder);
}
/**
* Represents a path backed by a String.
*/
static final
|
PathComponent
|
java
|
apache__camel
|
components/camel-sql/src/generated/java/org/apache/camel/component/sql/stored/template/generated/ParseException.java
|
{
"start": 420,
"end": 526
}
|
class ____ customize your error reporting
* mechanisms so long as you retain the public fields.
*/
public
|
to
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableSkipLast.java
|
{
"start": 846,
"end": 1215
}
|
class ____<T> extends AbstractFlowableWithUpstream<T, T> {
final int skip;
public FlowableSkipLast(Flowable<T> source, int skip) {
super(source);
this.skip = skip;
}
@Override
protected void subscribeActual(Subscriber<? super T> s) {
source.subscribe(new SkipLastSubscriber<>(s, skip));
}
static final
|
FlowableSkipLast
|
java
|
spring-projects__spring-framework
|
spring-expression/src/test/java/org/springframework/expression/spel/SpelCompilationPerformanceTests.java
|
{
"start": 21549,
"end": 21667
}
|
class ____ {
double duration = 0.4d;
public double getDuration() {
return duration;
}
}
public static
|
Three
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/codec/SnappyCodec.java
|
{
"start": 1169,
"end": 4354
}
|
class ____ extends BaseCodec {
static final Logger log = LoggerFactory.getLogger(SnappyCodec.class);
private static final FastThreadLocal<Snappy> SNAPPY_DECODER = new FastThreadLocal<Snappy>() {
protected Snappy initialValue() {
return new Snappy();
};
};
private static final FastThreadLocal<Snappy> SNAPPY_ENCODER = new FastThreadLocal<Snappy>() {
protected Snappy initialValue() {
return new Snappy();
};
};
private final Codec innerCodec;
public SnappyCodec() {
this(new Kryo5Codec());
}
public SnappyCodec(Codec innerCodec) {
this.innerCodec = innerCodec;
log.warn("SnappyCodec is deprecated and will be removed in future. Use SnappyCodecV2 instead.");
}
public SnappyCodec(ClassLoader classLoader) {
this(new Kryo5Codec(classLoader));
}
public SnappyCodec(ClassLoader classLoader, SnappyCodec codec) throws ReflectiveOperationException {
this(copy(classLoader, codec.innerCodec));
}
private final Decoder<Object> decoder = new Decoder<Object>() {
@Override
public Object decode(ByteBuf buf, State state) throws IOException {
ByteBuf out = ByteBufAllocator.DEFAULT.buffer();
try {
while (buf.isReadable()) {
int chunkSize = buf.readInt();
ByteBuf chunk = buf.readSlice(chunkSize);
SNAPPY_DECODER.get().decode(chunk, out);
SNAPPY_DECODER.get().reset();
}
return innerCodec.getValueDecoder().decode(out, state);
} finally {
SNAPPY_DECODER.get().reset();
out.release();
}
}
};
private final Encoder encoder = new Encoder() {
@Override
public ByteBuf encode(Object in) throws IOException {
ByteBuf buf = innerCodec.getValueEncoder().encode(in);
ByteBuf out = ByteBufAllocator.DEFAULT.buffer();
try {
int chunksAmount = (int) Math.ceil(buf.readableBytes() / (double) Short.MAX_VALUE);
for (int i = 1; i <= chunksAmount; i++) {
int chunkSize = Math.min(Short.MAX_VALUE, buf.readableBytes());
ByteBuf chunk = buf.readSlice(chunkSize);
int lenIndex = out.writerIndex();
out.writeInt(0);
SNAPPY_ENCODER.get().encode(chunk, out, chunk.readableBytes());
int compressedDataLength = out.writerIndex() - 4 - lenIndex;
out.setInt(lenIndex, compressedDataLength);
}
return out;
} finally {
buf.release();
SNAPPY_ENCODER.get().reset();
}
}
};
@Override
public Decoder<Object> getValueDecoder() {
return decoder;
}
@Override
public Encoder getValueEncoder() {
return encoder;
}
@Override
public ClassLoader getClassLoader() {
return innerCodec.getClassLoader();
}
}
|
SnappyCodec
|
java
|
apache__kafka
|
metadata/src/main/java/org/apache/kafka/metadata/authorizer/AclMutator.java
|
{
"start": 1401,
"end": 2416
}
|
interface ____ {
/**
* Create the specified ACLs. If any ACL already exists, nothing will be done for that
* one, and we will return a success result for it.
*
* @param context The controller request context.
* @param aclBindings The ACLs to create.
*
* @return The results for each AclBinding, in the order they were passed.
*/
CompletableFuture<List<AclCreateResult>> createAcls(
ControllerRequestContext context,
List<AclBinding> aclBindings
);
/**
* Delete some ACLs based on the set of filters that is passed in.
*
* @param context The controller request context.
* @param aclBindingFilters The filters.
*
* @return The results for each filter, in the order they were passed.
*/
CompletableFuture<List<AclDeleteResult>> deleteAcls(
ControllerRequestContext context,
List<AclBindingFilter> aclBindingFilters
);
}
|
AclMutator
|
java
|
micronaut-projects__micronaut-core
|
inject-java/src/test/groovy/io/micronaut/inject/field/nullableinjection/B.java
|
{
"start": 736,
"end": 838
}
|
class ____ {
@Inject @Nullable protected A a;
public A getA() {
return this.a;
}
}
|
B
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/TestExceptionCheckerTest.java
|
{
"start": 2695,
"end": 3279
}
|
class ____ {
@Test(expected = IOException.class)
public void test() throws Exception {
Path p = Paths.get("NOSUCH");
Files.readAllBytes(p);
}
}
""")
.addOutputLines(
"out/ExceptionTest.java",
"""
import static com.google.common.truth.Truth.assertThat;
import static org.junit.Assert.assertThrows;
import java.io.IOException;
import java.nio.file.*;
import org.junit.Test;
|
ExceptionTest
|
java
|
apache__logging-log4j2
|
log4j-appserver/src/main/java/org/apache/logging/log4j/appserver/jetty/Log4j2Logger.java
|
{
"start": 1406,
"end": 1795
}
|
class ____.
* </p>
*
* <p>
* From the command line with:
* </p>
* <pre>-Dorg.eclipse.jetty.util.log.class = org.apache.logging.log4j.appserver.jetty.Log4j2Logger</pre>
*
* <p>
* Programmatically with:
* </p>
* <pre>System.setProperty("org.eclipse.jetty.util.log.class", "org.apache.logging.log4j.appserver.jetty.Log4j2Logger");</pre>
*
* @since Apache Log4j 2.10.0
*/
public
|
name
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/deser/creators/NullValueViaCreatorTest.java
|
{
"start": 737,
"end": 891
}
|
class ____ implements Contained<Object> {}
protected static final NullContained NULL_CONTAINED = new NullContained();
protected static
|
NullContained
|
java
|
spring-projects__spring-boot
|
core/spring-boot-test/src/test/java/org/springframework/boot/test/context/SpringBootContextLoaderTests.java
|
{
"start": 19993,
"end": 20481
}
|
class ____
implements ApplicationContextFailureProcessor {
static @Nullable ApplicationContext failedContext;
static @Nullable Throwable contextLoadException;
@Override
public void processLoadFailure(ApplicationContext context, @Nullable Throwable exception) {
failedContext = context;
contextLoadException = exception;
}
private static void reset() {
failedContext = null;
contextLoadException = null;
}
}
}
|
ContextLoaderApplicationContextFailureProcessor
|
java
|
hibernate__hibernate-orm
|
hibernate-envers/src/main/java/org/hibernate/envers/configuration/internal/metadata/JoinColumnCollectionMetadataGenerator.java
|
{
"start": 2007,
"end": 9513
}
|
class ____ extends AbstractCollectionMetadataGenerator {
private static final EnversMessageLogger LOG = Logger.getMessageLogger(
MethodHandles.lookup(),
EnversMessageLogger.class,
JoinColumnCollectionMetadataGenerator.class.getName()
);
public JoinColumnCollectionMetadataGenerator(
EnversMetadataBuildingContext metadataBuildingContext,
BasicMetadataGenerator basicMetadataGenerator,
ValueMetadataGenerator valueMetadataGenerator) {
super( metadataBuildingContext, basicMetadataGenerator, valueMetadataGenerator );
}
@Override
public void addCollection(CollectionMetadataContext context) {
LOG.debugf(
"Adding audit mapping for property %s.%s: one-to-many collection, using a join column on the referenced entity",
context.getReferencingEntityName(),
context.getPropertyName()
);
final Collection collection = context.getCollection();
final PropertyAuditingData propertyAuditingData = context.getPropertyAuditingData();
final String mappedBy = CollectionMappedByResolver.resolveMappedBy( collection, propertyAuditingData );
final IdMappingData referencedIdMapping = getReferencedIdMappingData(
context.getReferencingEntityName(),
context.getReferencedEntityName(),
propertyAuditingData,
false
);
final EntityConfiguration referencingEntityConfiguration = context.getReferencingEntityConfiguration();
final IdMappingData referencingIdMapping = referencingEntityConfiguration.getIdMappingData();
// Generating the id mappers data for the referencing side of the relation.
final MiddleIdData referencingIdData = createMiddleIdData(
referencingIdMapping,
mappedBy + "_",
context.getReferencingEntityName()
);
// And for the referenced side. The prefixed mapper won't be used (as this collection isn't persisted
// in a join table, so the prefix value is arbitrary).
final MiddleIdData referencedIdData = createMiddleIdData(
referencedIdMapping,
null,
context.getReferencedEntityName()
);
// Generating the element mapping.
final MiddleRelatedComponentMapper elementComponentMapper = new MiddleRelatedComponentMapper( referencedIdData );
final MiddleComponentData elementComponentData = new MiddleComponentData( elementComponentMapper );
// Generating the index mapping, if an index exists. It can only exists in case a jakarta.persistence.MapKey
// annotation is present on the entity. So the middleEntityXml will be not be used. The queryGeneratorBuilder
// will only be checked for nullnes.
MiddleComponentData indexComponentData = addIndex( context, null, null );
// Generating the query generator - it should read directly from the related entity.
final RelationQueryGenerator queryGenerator = new OneAuditEntityQueryGenerator(
getMetadataBuildingContext().getConfiguration(),
referencingIdData,
context.getReferencedEntityName(),
referencedIdData,
context.getCollection().getElement() instanceof ComponentType,
mappedBy,
CollectionMappedByResolver.isMappedByKey( collection, mappedBy ),
getOrderByCollectionRole( collection, collection.getOrderBy() )
);
// Creating common mapper data.
final CommonCollectionMapperData commonCollectionMapperData = createCommonCollectionMapperData(
context,
context.getReferencedEntityName(),
referencingIdData,
queryGenerator
);
PropertyMapper fakeBidirectionalRelationMapper;
PropertyMapper fakeBidirectionalRelationIndexMapper;
if ( context.isFakeOneToManyBidirectional() || hasCollectionIndex( context ) ) {
// In case of a fake many-to-one bidirectional relation, we have to generate a mapper which maps
// the mapped-by property name to the id of the related entity (which is the owner of the collection).
final String auditMappedBy = getAddOneToManyAttachedAuditMappedBy( context );
fakeBidirectionalRelationMapper = getBidirectionalRelationMapper(
context.getReferencingEntityName(),
referencingIdMapping,
auditMappedBy
);
// Checking if there's an index defined. If so, adding a mapper for it.
final String positionMappedBy = getAttachedPositionMappedBy( context );
if ( positionMappedBy != null ) {
fakeBidirectionalRelationIndexMapper = getBidirectionalRelationIndexMapper( context, positionMappedBy );
indexComponentData = getBidirectionalIndexData( indexComponentData, positionMappedBy );
}
else {
fakeBidirectionalRelationIndexMapper = null;
}
}
else {
fakeBidirectionalRelationMapper = null;
fakeBidirectionalRelationIndexMapper = null;
}
// Checking the type of the collection and adding an appropriate mapper.
addMapper( context, commonCollectionMapperData, elementComponentData, indexComponentData );
// Storing information about this relation.
referencingEntityConfiguration.addToManyNotOwningRelation(
context.getPropertyName(),
mappedBy,
context.getReferencedEntityName(),
referencingIdData.getPrefixedMapper(),
fakeBidirectionalRelationMapper,
fakeBidirectionalRelationIndexMapper,
hasCollectionIndex( context )
);
}
private boolean hasCollectionIndex(CollectionMetadataContext context) {
return context.getCollection().isIndexed() && ( (IndexedCollection) context.getCollection() ).getIndex() != null;
}
private String getAddOneToManyAttachedAuditMappedBy(CollectionMetadataContext context) {
if ( context.isFakeOneToManyBidirectional() ) {
return context.getPropertyAuditingData().getAuditMappedBy();
}
return context.getCollection().getMappedByProperty();
}
private PropertyMapper getBidirectionalRelationMapper(String entityName, IdMappingData idData, String auditMappedBy) {
// Creating a prefixed relation mapper.
final IdMapper relMapper = idData.getIdMapper().prefixMappedProperties(
MappingTools.createToOneRelationPrefix( auditMappedBy )
);
return new ToOneIdMapper(
relMapper,
// The mapper will only be used to map from entity to map, so no need to provide other details
// when constructing the PropertyData.
new PropertyData( auditMappedBy, null, null ),
entityName,
false,
false
);
}
private PropertyMapper getBidirectionalRelationIndexMapper(CollectionMetadataContext context, String positionMappedBy) {
if ( positionMappedBy != null ) {
final Type indexType = getCollectionIndexType( context );
return new SinglePropertyMapper( PropertyData.forProperty( positionMappedBy, indexType ) );
}
return null;
}
private Type getCollectionIndexType(CollectionMetadataContext context) {
if ( context.getCollection().isIndexed() ) {
return ( (IndexedCollection) context.getCollection() ).getIndex().getType();
}
// todo - do we need to reverse lookup the type anyway?
return null;
}
private String getAttachedPositionMappedBy(CollectionMetadataContext context) {
if ( context.isFakeOneToManyBidirectional() ) {
return context.getPropertyAuditingData().getPositionMappedBy();
}
else if ( hasCollectionIndex( context ) ) {
return ( (IndexedCollection) context.getCollection() ).getIndex().getSelectables().get( 0 ).getText();
}
return null;
}
private MiddleComponentData getBidirectionalIndexData(MiddleComponentData original, String positionMappedBy) {
if ( positionMappedBy != null ) {
// overwriting the index component data to properly read the index.
return new MiddleComponentData( new MiddleStraightComponentMapper( positionMappedBy ) );
}
return original;
}
}
|
JoinColumnCollectionMetadataGenerator
|
java
|
hibernate__hibernate-orm
|
hibernate-envers/src/main/java/org/hibernate/envers/configuration/internal/metadata/PersistentEntityInstantiator.java
|
{
"start": 982,
"end": 1116
}
|
class ____ to help facilitate the instantiation of {@link PersistentEntity} implementations.
*
* @author Chris Cranford
*/
public
|
meant
|
java
|
elastic__elasticsearch
|
libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java
|
{
"start": 4921,
"end": 17683
}
|
class ____ name as the scope, and read the entitlements for that scope.
* If it's not, we use the single {@code ALL-UNNAMED} scope – in this case there is one scope and all entitlements apply
* to all the plugin code.
* </p>
* <p>
* (*) implementation detail: this is currently done in an indirect way: we know the module is not in the system layer
* (otherwise the check would have been already trivially allowed), so we just check that the module is named, and it belongs to the
* boot {@link ModuleLayer}. We might want to change this in the future to make it more consistent/easier to maintain.
* </p>
*
* @param componentName the plugin name or else one of the special component names like "(server)".
*/
protected record ModuleEntitlements(
String componentName,
String moduleName,
Map<Class<? extends Entitlement>, List<Entitlement>> entitlementsByType,
FileAccessTree fileAccess
) {
public ModuleEntitlements {
entitlementsByType = Map.copyOf(entitlementsByType);
}
public boolean hasEntitlement(Class<? extends Entitlement> entitlementClass) {
return entitlementsByType.containsKey(entitlementClass);
}
public <E extends Entitlement> Stream<E> getEntitlements(Class<E> entitlementClass) {
var entitlements = entitlementsByType.get(entitlementClass);
if (entitlements == null) {
return Stream.empty();
}
return entitlements.stream().map(entitlementClass::cast);
}
Logger logger(Class<?> requestingClass) {
var packageName = requestingClass.getPackageName();
var loggerSuffix = "." + componentName + "." + ((moduleName == null) ? ALL_UNNAMED : moduleName) + "." + packageName;
return LogManager.getLogger(PolicyManager.class.getName() + loggerSuffix);
}
}
private FileAccessTree getDefaultFileAccess(Collection<Path> componentPaths) {
return FileAccessTree.withoutExclusivePaths(FilesEntitlement.EMPTY, pathLookup, componentPaths);
}
// pkg private for testing
ModuleEntitlements defaultEntitlements(String componentName, Collection<Path> componentPaths, String moduleName) {
return new ModuleEntitlements(componentName, moduleName, Map.of(), getDefaultFileAccess(componentPaths));
}
// pkg private for testing
ModuleEntitlements policyEntitlements(
String componentName,
Collection<Path> componentPaths,
String moduleName,
List<Entitlement> entitlements
) {
FilesEntitlement filesEntitlement = FilesEntitlement.EMPTY;
for (Entitlement entitlement : entitlements) {
if (entitlement instanceof FilesEntitlement) {
filesEntitlement = (FilesEntitlement) entitlement;
}
}
return new ModuleEntitlements(
componentName,
moduleName,
entitlements.stream().collect(groupingBy(Entitlement::getClass)),
FileAccessTree.of(componentName, moduleName, filesEntitlement, pathLookup, componentPaths, exclusivePaths)
);
}
final Map<Module, ModuleEntitlements> moduleEntitlementsMap = new ConcurrentHashMap<>();
private final Map<String, List<Entitlement>> serverEntitlements;
private final List<Entitlement> apmAgentEntitlements;
private final Map<String, Map<String, List<Entitlement>>> pluginsEntitlements;
private final Function<Class<?>, PolicyScope> scopeResolver;
private final PathLookup pathLookup;
private static final Set<Module> SYSTEM_LAYER_MODULES = findSystemLayerModules();
private static Set<Module> findSystemLayerModules() {
var systemModulesDescriptors = ModuleFinder.ofSystem()
.findAll()
.stream()
.map(ModuleReference::descriptor)
.collect(Collectors.toUnmodifiableSet());
return Stream.concat(
// entitlements is a "system" module, we can do anything from it
Stream.of(PolicyManager.class.getModule()),
// anything in the boot layer is also part of the system
ModuleLayer.boot()
.modules()
.stream()
.filter(
m -> systemModulesDescriptors.contains(m.getDescriptor())
&& MODULES_EXCLUDED_FROM_SYSTEM_MODULES.contains(m.getName()) == false
)
).collect(Collectors.toUnmodifiableSet());
}
// Anything in the boot layer that is not in the system layer, is in the server layer
public static final Set<Module> SERVER_LAYER_MODULES = ModuleLayer.boot()
.modules()
.stream()
.filter(m -> SYSTEM_LAYER_MODULES.contains(m) == false)
.collect(Collectors.toUnmodifiableSet());
private final Function<String, Collection<Path>> pluginSourcePathsResolver;
/**
* Paths that are only allowed for a single module. Used to generate
* structures to indicate other modules aren't allowed to use these
* files in {@link FileAccessTree}s.
*/
private final List<ExclusivePath> exclusivePaths;
public PolicyManager(
Policy serverPolicy,
List<Entitlement> apmAgentEntitlements,
Map<String, Policy> pluginPolicies,
Function<Class<?>, PolicyScope> scopeResolver,
Function<String, Collection<Path>> pluginSourcePathsResolver,
PathLookup pathLookup
) {
this.serverEntitlements = buildScopeEntitlementsMap(requireNonNull(serverPolicy));
this.apmAgentEntitlements = apmAgentEntitlements;
this.pluginsEntitlements = requireNonNull(pluginPolicies).entrySet()
.stream()
.collect(toUnmodifiableMap(Map.Entry::getKey, e -> buildScopeEntitlementsMap(e.getValue())));
this.scopeResolver = scopeResolver;
this.pluginSourcePathsResolver = pluginSourcePathsResolver;
this.pathLookup = requireNonNull(pathLookup);
List<ExclusiveFileEntitlement> exclusiveFileEntitlements = new ArrayList<>();
for (var e : serverEntitlements.entrySet()) {
validateEntitlementsPerModule(SERVER.componentName, e.getKey(), e.getValue(), exclusiveFileEntitlements);
}
validateEntitlementsPerModule(APM_AGENT.componentName, ALL_UNNAMED, apmAgentEntitlements, exclusiveFileEntitlements);
for (var p : pluginsEntitlements.entrySet()) {
for (var m : p.getValue().entrySet()) {
validateEntitlementsPerModule(p.getKey(), m.getKey(), m.getValue(), exclusiveFileEntitlements);
}
}
List<ExclusivePath> exclusivePaths = FileAccessTree.buildExclusivePathList(
exclusiveFileEntitlements,
pathLookup,
FileAccessTree.DEFAULT_COMPARISON
);
FileAccessTree.validateExclusivePaths(exclusivePaths, FileAccessTree.DEFAULT_COMPARISON);
this.exclusivePaths = exclusivePaths;
}
private static Map<String, List<Entitlement>> buildScopeEntitlementsMap(Policy policy) {
return policy.scopes().stream().collect(toUnmodifiableMap(Scope::moduleName, Scope::entitlements));
}
private static void validateEntitlementsPerModule(
String componentName,
String moduleName,
List<Entitlement> entitlements,
List<ExclusiveFileEntitlement> exclusiveFileEntitlements
) {
Set<Class<? extends Entitlement>> found = new HashSet<>();
for (var e : entitlements) {
if (found.contains(e.getClass())) {
throw new IllegalArgumentException(
"[" + componentName + "] using module [" + moduleName + "] found duplicate entitlement [" + e.getClass().getName() + "]"
);
}
found.add(e.getClass());
if (e instanceof FilesEntitlement fe) {
exclusiveFileEntitlements.add(new ExclusiveFileEntitlement(componentName, moduleName, fe));
}
}
}
protected ModuleEntitlements getEntitlements(Class<?> requestingClass) {
return moduleEntitlementsMap.computeIfAbsent(requestingClass.getModule(), m -> computeEntitlements(requestingClass));
}
protected final ModuleEntitlements computeEntitlements(Class<?> requestingClass) {
var policyScope = scopeResolver.apply(requestingClass);
var componentName = policyScope.componentName();
var moduleName = policyScope.moduleName();
switch (policyScope.kind()) {
case SERVER -> {
return getModuleScopeEntitlements(
serverEntitlements,
moduleName,
SERVER.componentName,
getComponentPathsFromClass(requestingClass)
);
}
case APM_AGENT -> {
// The APM agent is the only thing running non-modular in the system classloader
return policyEntitlements(
APM_AGENT.componentName,
getComponentPathsFromClass(requestingClass),
ALL_UNNAMED,
apmAgentEntitlements
);
}
case UNKNOWN -> {
return defaultEntitlements(UNKNOWN.componentName, List.of(), moduleName);
}
default -> {
assert policyScope.kind() == PLUGIN;
var pluginEntitlements = pluginsEntitlements.get(componentName);
Collection<Path> componentPaths = Objects.requireNonNullElse(
pluginSourcePathsResolver.apply(componentName),
Collections.emptyList()
);
if (pluginEntitlements == null) {
return defaultEntitlements(componentName, componentPaths, moduleName);
} else {
return getModuleScopeEntitlements(pluginEntitlements, moduleName, componentName, componentPaths);
}
}
}
}
protected Collection<Path> getComponentPathsFromClass(Class<?> requestingClass) {
var codeSource = requestingClass.getProtectionDomain().getCodeSource();
if (codeSource == null) {
return List.of();
}
try {
return List.of(Paths.get(codeSource.getLocation().toURI()));
} catch (Exception e) {
// If we get a URISyntaxException, or any other Exception due to an invalid URI, we return null to safely skip this location
generalLogger.info(
"Cannot get component path for [{}]: [{}] cannot be converted to a valid Path",
requestingClass.getName(),
codeSource.getLocation().toString()
);
return List.of();
}
}
private ModuleEntitlements getModuleScopeEntitlements(
Map<String, List<Entitlement>> scopeEntitlements,
String scopeName,
String componentName,
Collection<Path> componentPaths
) {
var entitlements = scopeEntitlements.get(scopeName);
if (entitlements == null) {
return defaultEntitlements(componentName, componentPaths, scopeName);
}
return policyEntitlements(componentName, componentPaths, scopeName, entitlements);
}
/**
* @return true if permission is granted regardless of the entitlement
*/
boolean isTriviallyAllowed(Class<?> requestingClass) {
// note: do not log exceptions in here, this could interfere with loading of additionally necessary classes such as ThrowableProxy
if (requestingClass == null) {
generalLogger.trace("Entitlement trivially allowed: no caller frames outside the entitlement library");
return true;
}
if (requestingClass == NO_CLASS) {
generalLogger.trace("Entitlement trivially allowed from outermost frame");
return true;
}
if (isTrustedSystemClass(requestingClass)) {
// note: no logging here, this has caused ClassCircularityErrors in certain cases
return true;
}
generalLogger.trace("Entitlement not trivially allowed");
return false;
}
/**
* The main decision point for what counts as a trusted built-in JDK class.
*/
protected boolean isTrustedSystemClass(Class<?> requestingClass) {
return SYSTEM_LAYER_MODULES.contains(requestingClass.getModule());
}
@Override
public String toString() {
return "PolicyManager{" + "serverEntitlements=" + serverEntitlements + ", pluginsEntitlements=" + pluginsEntitlements + '}';
}
}
|
module
|
java
|
apache__camel
|
components/camel-tracing/src/test/java/org/apache/camel/tracing/decorators/CqlSpanDecoratorTest.java
|
{
"start": 1284,
"end": 3235
}
|
class ____ {
@Test
public void testPreCqlFromUri() {
String cql = "select%20*%20from%20users";
String keyspace = "test";
Endpoint endpoint = Mockito.mock(Endpoint.class);
Exchange exchange = Mockito.mock(Exchange.class);
Message message = Mockito.mock(Message.class);
Mockito.when(endpoint.getEndpointUri()).thenReturn("cql://host1,host2:8080/" + keyspace + "?cql="
+ cql + "&consistencyLevel=quorum");
Mockito.when(exchange.getIn()).thenReturn(message);
SpanDecorator decorator = new CqlSpanDecorator();
MockSpanAdapter span = new MockSpanAdapter();
decorator.pre(span, exchange, endpoint);
assertEquals(CqlSpanDecorator.CASSANDRA_DB_TYPE, span.tags().get(TagConstants.DB_SYSTEM));
assertEquals(cql, span.tags().get(TagConstants.DB_STATEMENT));
assertEquals(keyspace, span.tags().get(TagConstants.DB_NAME));
}
@Test
public void testPreCqlFromHeader() {
String cql = "select * from users";
Endpoint endpoint = Mockito.mock(Endpoint.class);
Exchange exchange = Mockito.mock(Exchange.class);
Message message = Mockito.mock(Message.class);
Mockito.when(endpoint.getEndpointUri()).thenReturn("cql://host1,host2?consistencyLevel=quorum");
Mockito.when(exchange.getIn()).thenReturn(message);
Mockito.when(message.getHeader(CqlSpanDecorator.CAMEL_CQL_QUERY, String.class)).thenReturn(cql);
SpanDecorator decorator = new CqlSpanDecorator();
MockSpanAdapter span = new MockSpanAdapter();
decorator.pre(span, exchange, endpoint);
assertEquals(CqlSpanDecorator.CASSANDRA_DB_TYPE, span.tags().get(TagConstants.DB_SYSTEM));
assertEquals(cql, span.tags().get(TagConstants.DB_STATEMENT));
assertNull(span.tags().get(TagConstants.DB_NAME));
}
}
|
CqlSpanDecoratorTest
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/ser/filter/JsonIncludeArrayTest.java
|
{
"start": 625,
"end": 814
}
|
class ____ {
@JsonInclude(JsonInclude.Include.NON_EMPTY)
public short[] value;
public NonEmptyShortArray(short... v) { value = v; }
}
static
|
NonEmptyShortArray
|
java
|
alibaba__nacos
|
config/src/test/java/com/alibaba/nacos/config/server/utils/TraceLogUtilTest.java
|
{
"start": 786,
"end": 1086
}
|
class ____ {
@Test
void testRequestLog() {
Logger requestLog = TraceLogUtil.requestLog;
assertTrue(requestLog instanceof Logger);
Logger pollingLog = TraceLogUtil.pollingLog;
assertTrue(pollingLog instanceof Logger);
}
}
|
TraceLogUtilTest
|
java
|
netty__netty
|
transport/src/test/java/io/netty/channel/DefaultChannelPipelineTest.java
|
{
"start": 13097,
"end": 14877
}
|
class ____ extends Exception { }
@Override
public void channelReadComplete(ChannelHandlerContext ctx) throws Exception {
throw new TestException();
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
if (cause instanceof TestException) {
ctx.executor().execute(new Runnable() {
@Override
public void run() {
latch.countDown();
}
});
}
counter.incrementAndGet();
throw new Exception();
}
});
channel.pipeline().fireChannelReadComplete();
latch.await();
assertEquals(1, counter.get());
} finally {
channel.close().syncUninterruptibly();
}
}
@Test
@Timeout(value = 3000, unit = TimeUnit.MILLISECONDS)
public void testThrowInOtherHandlerAfterInvokedFromExceptionCaught() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final AtomicInteger counter = new AtomicInteger();
Channel channel = new LocalChannel();
try {
group.register(channel).syncUninterruptibly();
channel.pipeline().addLast(new ChannelInboundHandlerAdapter() {
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
ctx.fireChannelReadComplete();
}
}, new ChannelInboundHandlerAdapter() {
|
TestException
|
java
|
quarkusio__quarkus
|
extensions/opentelemetry/runtime/src/test/java/io/quarkus/opentelemetry/runtime/tracing/TracerUtilTest.java
|
{
"start": 627,
"end": 1522
}
|
class ____ {
@Test
public void testMapResourceAttributes() {
List<String> resourceAttributes = Arrays.asList(
"service.name=myservice",
"service.namespace=mynamespace",
"service.version=1.0",
"deployment.environment=production");
Resource resource = TracerUtil.mapResourceAttributes(resourceAttributes, null, null);
Attributes attributes = resource.getAttributes();
Assertions.assertThat(attributes.size()).isEqualTo(4);
Assertions.assertThat(attributes.get(SERVICE_NAME)).isEqualTo("myservice");
Assertions.assertThat(attributes.get(SERVICE_NAMESPACE)).isEqualTo("mynamespace");
Assertions.assertThat(attributes.get(SERVICE_VERSION)).isEqualTo("1.0");
Assertions.assertThat(attributes.get(DEPLOYMENT_ENVIRONMENT)).isEqualTo("production");
}
}
|
TracerUtilTest
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/common/requests/SyncGroupResponse.java
|
{
"start": 1082,
"end": 2191
}
|
class ____ extends AbstractResponse {
private final SyncGroupResponseData data;
public SyncGroupResponse(SyncGroupResponseData data) {
super(ApiKeys.SYNC_GROUP);
this.data = data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
public Errors error() {
return Errors.forCode(data.errorCode());
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(Errors.forCode(data.errorCode()));
}
@Override
public SyncGroupResponseData data() {
return data;
}
@Override
public String toString() {
return data.toString();
}
public static SyncGroupResponse parse(Readable readable, short version) {
return new SyncGroupResponse(new SyncGroupResponseData(readable, version));
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 2;
}
}
|
SyncGroupResponse
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/context/Lifecycle.java
|
{
"start": 680,
"end": 830
}
|
interface ____ methods for start/stop lifecycle control.
* The typical use case for this is to control asynchronous processing.
* <b>NOTE: This
|
defining
|
java
|
spring-cloud__spring-cloud-gateway
|
spring-cloud-gateway-proxyexchange-webflux/src/test/java/org/springframework/cloud/gateway/webflux/ProductionConfigurationTests.java
|
{
"start": 3156,
"end": 10295
}
|
class ____ {
@Autowired
private TestRestTemplate rest;
@Autowired
private TestApplication application;
@LocalServerPort
private int port;
@BeforeEach
public void init() throws Exception {
application.setHome(new URI("http://localhost:" + port));
rest.getRestTemplate().setRequestFactory(new SimpleClientHttpRequestFactory());
}
@Test
public void get() throws Exception {
assertThat(rest.getForObject("/proxy/0", Foo.class).getName()).isEqualTo("bye");
}
@Test
public void forwardGet() throws Exception {
assertThat(rest.getForObject("/proxy/forward/0", Foo.class).getName()).isEqualTo("bye");
}
@Test
public void path() throws Exception {
assertThat(rest.getForObject("/proxy/path/1", Foo.class).getName()).isEqualTo("foo");
}
@Test
public void resource() throws Exception {
assertThat(rest.getForObject("/proxy/html/test.html", String.class)).contains("<body>Test");
}
@Test
public void resourceWithNoType() throws Exception {
assertThat(rest.getForObject("/proxy/typeless/test.html", String.class)).contains("<body>Test");
}
@Test
public void missing() throws Exception {
assertThat(rest.getForEntity("/proxy/missing/0", Foo.class).getStatusCode()).isEqualTo(HttpStatus.NOT_FOUND);
}
@Test
public void uri() throws Exception {
assertThat(rest.getForObject("/proxy/0", Foo.class).getName()).isEqualTo("bye");
}
@Test
public void post() throws Exception {
assertThat(rest.postForObject("/proxy/0", Collections.singletonMap("name", "foo"), Bar.class).getName())
.isEqualTo("host=localhost:" + port + ";foo");
}
@Test
public void postJsonWithWhitespace() {
var json = """
{
"foo": "bar"
}""";
var headers = new HttpHeaders();
headers.setContentType(MediaType.APPLICATION_JSON);
headers.setContentLength(json.length());
var request = new HttpEntity<>(json, headers);
assertThat(rest.postForEntity("/proxy/checkContentLength", request, Void.class).getStatusCode())
.isEqualTo(HttpStatus.OK);
}
@Test
public void forwardPost() throws Exception {
assertThat(rest.postForObject("/proxy/forward/0", Collections.singletonMap("name", "foo"), Bar.class).getName())
.isEqualTo("host=localhost:" + port + ";foo");
}
@Test
public void list() throws Exception {
ResponseEntity<List<Bar>> result = rest.exchange(
RequestEntity.post(rest.getRestTemplate().getUriTemplateHandler().expand("/proxy"))
.contentType(MediaType.APPLICATION_JSON)
.body(Collections.singletonList(Collections.singletonMap("name", "foo"))),
new ParameterizedTypeReference<List<Bar>>() {
});
assertThat(result.getBody().iterator().next().getName()).isEqualTo("host=localhost:" + port + ";foo");
}
@Test
public void bodyless() throws Exception {
assertThat(rest.postForObject("/proxy/0", Collections.singletonMap("name", "foo"), Bar.class).getName())
.isEqualTo("host=localhost:" + port + ";foo");
}
@Test
public void entity() throws Exception {
assertThat(
rest.exchange(
RequestEntity.post(rest.getRestTemplate().getUriTemplateHandler().expand("/proxy/entity"))
.body(Collections.singletonMap("name", "foo")),
new ParameterizedTypeReference<List<Bar>>() {
})
.getBody()
.iterator()
.next()
.getName())
.isEqualTo("host=localhost:" + port + ";foo");
}
@Test
public void entityWithType() throws Exception {
assertThat(
rest.exchange(
RequestEntity.post(rest.getRestTemplate().getUriTemplateHandler().expand("/proxy/type"))
.body(Collections.singletonMap("name", "foo")),
new ParameterizedTypeReference<List<Bar>>() {
})
.getBody()
.iterator()
.next()
.getName())
.isEqualTo("host=localhost:" + port + ";foo");
}
@Test
public void single() throws Exception {
assertThat(rest.postForObject("/proxy/single", Collections.singletonMap("name", "foobar"), Bar.class).getName())
.isEqualTo("host=localhost:" + port + ";foobar");
}
@Test
public void converter() throws Exception {
assertThat(
rest.postForObject("/proxy/converter", Collections.singletonMap("name", "foobar"), Bar.class).getName())
.isEqualTo("host=localhost:" + port + ";foobar");
}
@Test
@SuppressWarnings({ "Duplicates", "unchecked" })
public void testSensitiveHeadersOverride() throws Exception {
Map<String, List<String>> headers = rest
.exchange(RequestEntity.get(rest.getRestTemplate().getUriTemplateHandler().expand("/proxy/headers"))
.header("foo", "bar")
.header("abc", "xyz")
.header("cookie", "monster")
.build(), Map.class)
.getBody();
assertThat(headers).doesNotContainKey("foo").doesNotContainKey("hello").containsKeys("bar", "abc");
assertThat(headers.get("cookie")).containsOnly("monster");
}
@Test
public void testSensitiveHeadersDefault() throws Exception {
Map<String, List<String>> headers = rest
.exchange(RequestEntity
.get(rest.getRestTemplate().getUriTemplateHandler().expand("/proxy/sensitive-headers-default"))
.header("cookie", "monster")
.build(), Map.class)
.getBody();
assertThat(headers).doesNotContainKey("cookie");
}
@Test
@SuppressWarnings({ "Duplicates", "unchecked" })
public void headers() throws Exception {
Map<String, List<String>> headers = rest
.exchange(RequestEntity.get(rest.getRestTemplate().getUriTemplateHandler().expand("/proxy/headers"))
.header("foo", "bar")
.header("abc", "xyz")
.header("baz", "fob")
.build(), Map.class)
.getBody();
assertThat(headers).doesNotContainKey("foo").doesNotContainKey("hello").containsKeys("bar", "abc");
assertThat(headers.get("bar")).containsOnly("hello");
assertThat(headers.get("abc")).containsOnly("123");
assertThat(headers.get("baz")).containsOnly("fob");
}
@Test
public void forwardedHeaderUsesHost() throws Exception {
Map<String, List<String>> headers = rest
.exchange(RequestEntity.get(rest.getRestTemplate().getUriTemplateHandler().expand("/proxy/headers"))
.header("host", "foo:1234")
.build(), Map.class)
.getBody();
assertThat(headers).containsKey("forwarded");
assertThat(headers.get("forwarded").size()).isEqualTo(1);
assertThat(headers.get("forwarded").get(0)).isEqualTo("host=localhost:" + port);
}
@Test
public void deleteWithoutBody() throws Exception {
ResponseEntity<Void> deleteResponse = rest.exchange("/proxy/{id}/no-body", HttpMethod.DELETE, null, Void.TYPE,
Collections.singletonMap("id", "123"));
assertThat(deleteResponse.getStatusCode()).isEqualTo(HttpStatus.OK);
}
@Test
public void deleteWithBody() throws Exception {
Foo foo = new Foo("to-be-deleted");
ParameterizedTypeReference<Map<String, Foo>> returnType = new ParameterizedTypeReference<Map<String, Foo>>() {
};
ResponseEntity<Map<String, Foo>> deleteResponse = rest.exchange("/proxy/{id}", HttpMethod.DELETE,
new HttpEntity<Foo>(foo), returnType, Collections.singletonMap("id", "123"));
assertThat(deleteResponse.getStatusCode()).isEqualTo(HttpStatus.OK);
assertThat(deleteResponse.getBody().get("deleted")).usingRecursiveComparison().isEqualTo(foo);
}
@SpringBootApplication
static
|
ProductionConfigurationTests
|
java
|
spring-projects__spring-security
|
messaging/src/test/java/org/springframework/security/messaging/context/AuthenticationPrincipalArgumentResolverTests.java
|
{
"start": 1888,
"end": 9860
}
|
class ____ {
private Object expectedPrincipal;
private AuthenticationPrincipalArgumentResolver resolver;
@BeforeEach
public void setup() {
this.resolver = new AuthenticationPrincipalArgumentResolver();
}
@AfterEach
public void cleanup() {
SecurityContextHolder.clearContext();
}
@Test
public void supportsParameterNoAnnotation() {
assertThat(this.resolver.supportsParameter(showUserNoAnnotation())).isFalse();
}
@Test
public void supportsParameterAnnotation() {
assertThat(this.resolver.supportsParameter(showUserAnnotationObject())).isTrue();
}
@Test
public void supportsParameterCustomAnnotation() {
assertThat(this.resolver.supportsParameter(showUserCustomAnnotation())).isTrue();
}
@Test
public void resolveArgumentNullAuthentication() throws Exception {
assertThat(this.resolver.resolveArgument(showUserAnnotationString(), null)).isNull();
}
@Test
public void resolveArgumentNullPrincipal() throws Exception {
setAuthenticationPrincipal(null);
assertThat(this.resolver.resolveArgument(showUserAnnotationString(), null)).isNull();
}
@Test
public void resolveArgumentString() throws Exception {
setAuthenticationPrincipal("john");
assertThat(this.resolver.resolveArgument(showUserAnnotationString(), null)).isEqualTo(this.expectedPrincipal);
}
@Test
public void resolveArgumentPrincipalStringOnObject() throws Exception {
setAuthenticationPrincipal("john");
assertThat(this.resolver.resolveArgument(showUserAnnotationObject(), null)).isEqualTo(this.expectedPrincipal);
}
@Test
public void resolveArgumentUserDetails() throws Exception {
setAuthenticationPrincipal(new User("user", "password", AuthorityUtils.createAuthorityList("ROLE_USER")));
assertThat(this.resolver.resolveArgument(showUserAnnotationUserDetails(), null))
.isEqualTo(this.expectedPrincipal);
}
@Test
public void resolveArgumentCustomUserPrincipal() throws Exception {
setAuthenticationPrincipal(new CustomUserPrincipal());
assertThat(this.resolver.resolveArgument(showUserAnnotationCustomUserPrincipal(), null))
.isEqualTo(this.expectedPrincipal);
}
@Test
public void resolveArgumentCustomAnnotation() throws Exception {
setAuthenticationPrincipal(new CustomUserPrincipal());
assertThat(this.resolver.resolveArgument(showUserCustomAnnotation(), null)).isEqualTo(this.expectedPrincipal);
}
@Test
public void resolveArgumentSpel() throws Exception {
CustomUserPrincipal principal = new CustomUserPrincipal();
setAuthenticationPrincipal(principal);
this.expectedPrincipal = principal.property;
assertThat(this.resolver.resolveArgument(showUserSpel(), null)).isEqualTo(this.expectedPrincipal);
}
@Test
public void resolveArgumentSpelCopy() throws Exception {
CopyUserPrincipal principal = new CopyUserPrincipal("property");
setAuthenticationPrincipal(principal);
Object resolveArgument = this.resolver.resolveArgument(showUserSpelCopy(), null);
assertThat(resolveArgument).isEqualTo(principal);
assertThat(resolveArgument).isNotSameAs(principal);
}
@Test
public void resolveArgumentSpelPrimitive() throws Exception {
CustomUserPrincipal principal = new CustomUserPrincipal();
setAuthenticationPrincipal(principal);
this.expectedPrincipal = principal.id;
assertThat(this.resolver.resolveArgument(showUserSpelPrimitive(), null)).isEqualTo(this.expectedPrincipal);
}
@Test
public void resolveArgumentNullOnInvalidType() throws Exception {
setAuthenticationPrincipal(new CustomUserPrincipal());
assertThat(this.resolver.resolveArgument(showUserAnnotationString(), null)).isNull();
}
@Test
public void resolveArgumentErrorOnInvalidType() throws Exception {
setAuthenticationPrincipal(new CustomUserPrincipal());
assertThatExceptionOfType(ClassCastException.class)
.isThrownBy(() -> this.resolver.resolveArgument(showUserAnnotationErrorOnInvalidType(), null));
}
@Test
public void resolveArgumentCustomserErrorOnInvalidType() throws Exception {
setAuthenticationPrincipal(new CustomUserPrincipal());
assertThatExceptionOfType(ClassCastException.class)
.isThrownBy(() -> this.resolver.resolveArgument(showUserAnnotationCurrentUserErrorOnInvalidType(), null));
}
@Test
public void resolveArgumentObject() throws Exception {
setAuthenticationPrincipal(new Object());
assertThat(this.resolver.resolveArgument(showUserAnnotationObject(), null)).isEqualTo(this.expectedPrincipal);
}
@Test
public void resolveArgumentCustomMetaAnnotation() throws Exception {
CustomUserPrincipal principal = new CustomUserPrincipal();
setAuthenticationPrincipal(principal);
this.expectedPrincipal = principal.id;
assertThat(this.resolver.resolveArgument(showUserCustomMetaAnnotation(), null)).isEqualTo(principal.id);
}
@Test
public void resolveArgumentCustomMetaAnnotationTpl() throws Exception {
CustomUserPrincipal principal = new CustomUserPrincipal();
setAuthenticationPrincipal(principal);
this.resolver.setTemplateDefaults(new AnnotationTemplateExpressionDefaults());
this.expectedPrincipal = principal.id;
assertThat(this.resolver.resolveArgument(showUserCustomMetaAnnotationTpl(), null)).isEqualTo(principal.id);
}
@Test
public void resolveArgumentWhenAliasForOnInterfaceThenInherits() {
CustomUserPrincipal principal = new CustomUserPrincipal();
setAuthenticationPrincipal(principal);
assertThat(this.resolver.resolveArgument(showUserNoConcreteAnnotation(), null)).isEqualTo(principal.property);
}
private MethodParameter showUserNoAnnotation() {
return getMethodParameter("showUserNoAnnotation", String.class);
}
private MethodParameter showUserNoConcreteAnnotation() {
return getMethodParameter("showUserNoConcreteAnnotation", String.class);
}
private MethodParameter showUserAnnotationString() {
return getMethodParameter("showUserAnnotation", String.class);
}
private MethodParameter showUserAnnotationErrorOnInvalidType() {
return getMethodParameter("showUserAnnotationErrorOnInvalidType", String.class);
}
private MethodParameter showUserAnnotationCurrentUserErrorOnInvalidType() {
return getMethodParameter("showUserAnnotationCurrentUserErrorOnInvalidType", String.class);
}
private MethodParameter showUserAnnotationUserDetails() {
return getMethodParameter("showUserAnnotation", UserDetails.class);
}
private MethodParameter showUserAnnotationCustomUserPrincipal() {
return getMethodParameter("showUserAnnotation", CustomUserPrincipal.class);
}
private MethodParameter showUserCustomAnnotation() {
return getMethodParameter("showUserCustomAnnotation", CustomUserPrincipal.class);
}
private MethodParameter showUserCustomMetaAnnotation() {
return getMethodParameter("showUserCustomMetaAnnotation", int.class);
}
private MethodParameter showUserCustomMetaAnnotationTpl() {
return getMethodParameter("showUserCustomMetaAnnotationTpl", int.class);
}
private MethodParameter showUserSpel() {
return getMethodParameter("showUserSpel", String.class);
}
private MethodParameter showUserSpelCopy() {
return getMethodParameter("showUserSpelCopy", CopyUserPrincipal.class);
}
private MethodParameter showUserSpelPrimitive() {
return getMethodParameter("showUserSpelPrimitive", int.class);
}
private MethodParameter showUserAnnotationObject() {
return getMethodParameter("showUserAnnotation", Object.class);
}
private MethodParameter getMethodParameter(String methodName, Class<?>... paramTypes) {
Method method = ReflectionUtils.findMethod(TestController.class, methodName, paramTypes);
return new AnnotatedMethod(method).getMethodParameters()[0];
}
private void setAuthenticationPrincipal(Object principal) {
this.expectedPrincipal = principal;
SecurityContextHolder.getContext()
.setAuthentication(new TestingAuthenticationToken(this.expectedPrincipal, "password", "ROLE_USER"));
}
@Target({ ElementType.PARAMETER })
@Retention(RetentionPolicy.RUNTIME)
@AuthenticationPrincipal
static @
|
AuthenticationPrincipalArgumentResolverTests
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/MiloServerEndpointBuilderFactory.java
|
{
"start": 1586,
"end": 1956
}
|
interface ____
extends
EndpointConsumerBuilder {
default AdvancedMiloServerEndpointConsumerBuilder advanced() {
return (AdvancedMiloServerEndpointConsumerBuilder) this;
}
}
/**
* Advanced builder for endpoint consumers for the OPC UA Server component.
*/
public
|
MiloServerEndpointConsumerBuilder
|
java
|
apache__kafka
|
streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskManagerTest.java
|
{
"start": 6345,
"end": 246258
}
|
class ____ {
private final String topic1 = "topic1";
private final String topic2 = "topic2";
private final TaskId taskId00 = new TaskId(0, 0);
private final TopicPartition t1p0 = new TopicPartition(topic1, 0);
private final TopicPartition t1p0changelog = new TopicPartition("changelog", 0);
private final Set<TopicPartition> taskId00Partitions = Set.of(t1p0);
private final Set<TopicPartition> taskId00ChangelogPartitions = Set.of(t1p0changelog);
private final Map<TaskId, Set<TopicPartition>> taskId00Assignment = singletonMap(taskId00, taskId00Partitions);
private final TaskId taskId01 = new TaskId(0, 1);
private final TopicPartition t1p1 = new TopicPartition(topic1, 1);
private final TopicPartition t2p2 = new TopicPartition(topic2, 1);
private final TopicPartition t1p1changelog = new TopicPartition("changelog", 1);
private final TopicPartition t1p1changelog2 = new TopicPartition("changelog2", 1);
private final Set<TopicPartition> taskId01Partitions = Set.of(t1p1);
private final Set<TopicPartition> taskId01ChangelogPartitions = Set.of(t1p1changelog);
private final Map<TaskId, Set<TopicPartition>> taskId01Assignment = singletonMap(taskId01, taskId01Partitions);
private final TaskId taskId02 = new TaskId(0, 2);
private final TopicPartition t1p2 = new TopicPartition(topic1, 2);
private final TopicPartition t1p2changelog = new TopicPartition("changelog", 2);
private final Set<TopicPartition> taskId02Partitions = Set.of(t1p2);
private final Set<TopicPartition> taskId02ChangelogPartitions = Set.of(t1p2changelog);
private final TaskId taskId03 = new TaskId(0, 3);
private final TopicPartition t1p3 = new TopicPartition(topic1, 3);
private final TopicPartition t1p3changelog = new TopicPartition("changelog", 3);
private final Set<TopicPartition> taskId03Partitions = Set.of(t1p3);
private final Set<TopicPartition> taskId03ChangelogPartitions = Set.of(t1p3changelog);
private final TaskId taskId04 = new TaskId(0, 4);
private final TopicPartition t1p4 = new TopicPartition(topic1, 4);
private final TopicPartition t1p4changelog = new TopicPartition("changelog", 4);
private final Set<TopicPartition> taskId04Partitions = Set.of(t1p4);
private final Set<TopicPartition> taskId04ChangelogPartitions = Set.of(t1p4changelog);
private final TaskId taskId05 = new TaskId(0, 5);
private final TopicPartition t1p5 = new TopicPartition(topic1, 5);
private final TopicPartition t1p5changelog = new TopicPartition("changelog", 5);
private final Set<TopicPartition> taskId05Partitions = Set.of(t1p5);
private final Set<TopicPartition> taskId05ChangelogPartitions = Set.of(t1p5changelog);
private final TaskId taskId10 = new TaskId(1, 0);
private final TopicPartition t2p0 = new TopicPartition(topic2, 0);
private final Set<TopicPartition> taskId10Partitions = Set.of(t2p0);
private final Set<TopicPartition> assignment = singleton(new TopicPartition("assignment", 0));
final java.util.function.Consumer<Set<TopicPartition>> noOpResetter = partitions -> { };
@Mock
private InternalTopologyBuilder topologyBuilder;
@Mock
private StateDirectory stateDirectory;
@Mock
private ChangelogReader changeLogReader;
@Mock
private Consumer<byte[], byte[]> consumer;
@Mock
private ActiveTaskCreator activeTaskCreator;
@Mock
private StandbyTaskCreator standbyTaskCreator;
@Mock
private Admin adminClient;
@Mock
private ProcessorStateManager stateManager;
final StateUpdater stateUpdater = mock(StateUpdater.class);
final DefaultTaskManager schedulingTaskManager = mock(DefaultTaskManager.class);
private TaskManager taskManager;
private TopologyMetadata topologyMetadata;
private final Time time = new MockTime();
@TempDir
Path testFolder;
@BeforeEach
public void setUp() {
taskManager = setUpTaskManagerWithoutStateUpdater(StreamsConfigUtils.ProcessingMode.AT_LEAST_ONCE, null, false);
}
private TaskManager setUpTaskManagerWithStateUpdater(final ProcessingMode processingMode, final TasksRegistry tasks) {
return setUpTaskManagerWithStateUpdater(processingMode, tasks, false);
}
private TaskManager setUpTaskManagerWithStateUpdater(final ProcessingMode processingMode,
final TasksRegistry tasks,
final boolean processingThreadsEnabled) {
topologyMetadata = new TopologyMetadata(topologyBuilder, new DummyStreamsConfig(processingMode));
final TaskManager taskManager = new TaskManager(
time,
changeLogReader,
ProcessId.randomProcessId(),
"taskManagerTest",
activeTaskCreator,
standbyTaskCreator,
tasks != null ? tasks : new Tasks(new LogContext()),
topologyMetadata,
adminClient,
stateDirectory,
stateUpdater,
processingThreadsEnabled ? schedulingTaskManager : null
);
taskManager.setMainConsumer(consumer);
return taskManager;
}
private TaskManager setUpTaskManagerWithoutStateUpdater(final ProcessingMode processingMode,
final TasksRegistry tasks,
final boolean processingThreadsEnabled) {
topologyMetadata = new TopologyMetadata(topologyBuilder, new DummyStreamsConfig(processingMode));
final TaskManager taskManager = new TaskManager(
time,
changeLogReader,
ProcessId.randomProcessId(),
"taskManagerTest",
activeTaskCreator,
standbyTaskCreator,
tasks != null ? tasks : new Tasks(new LogContext()),
topologyMetadata,
adminClient,
stateDirectory,
null,
processingThreadsEnabled ? schedulingTaskManager : null
);
taskManager.setMainConsumer(consumer);
return taskManager;
}
@Test
public void shouldLockAllTasksOnCorruptionWithProcessingThreads() {
final StreamTask activeTask1 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks, true);
when(tasks.activeTaskIds()).thenReturn(Set.of(taskId00, taskId01));
when(tasks.task(taskId00)).thenReturn(activeTask1);
final KafkaFuture<Void> mockFuture = KafkaFuture.completedFuture(null);
when(schedulingTaskManager.lockTasks(any())).thenReturn(mockFuture);
taskManager.handleCorruption(Set.of(taskId00));
verify(consumer).assignment();
verify(schedulingTaskManager).lockTasks(Set.of(taskId00, taskId01));
verify(schedulingTaskManager).unlockTasks(Set.of(taskId00, taskId01));
}
@Test
public void shouldLockCommitableTasksOnCorruptionWithProcessingThreads() {
final StreamTask activeTask1 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions).build();
final StreamTask activeTask2 = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId01Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks, true);
final KafkaFuture<Void> mockFuture = KafkaFuture.completedFuture(null);
when(schedulingTaskManager.lockTasks(any())).thenReturn(mockFuture);
taskManager.commit(Set.of(activeTask1, activeTask2));
verify(schedulingTaskManager).lockTasks(Set.of(taskId00, taskId01));
verify(schedulingTaskManager).unlockTasks(Set.of(taskId00, taskId01));
}
@Test
public void shouldLockActiveOnHandleAssignmentWithProcessingThreads() {
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks, true);
when(tasks.allTaskIds()).thenReturn(Set.of(taskId00, taskId01));
final KafkaFuture<Void> mockFuture = KafkaFuture.completedFuture(null);
when(schedulingTaskManager.lockTasks(any())).thenReturn(mockFuture);
taskManager.handleAssignment(
mkMap(mkEntry(taskId00, taskId00Partitions)),
mkMap(mkEntry(taskId01, taskId01Partitions))
);
verify(schedulingTaskManager).lockTasks(Set.of(taskId00, taskId01));
verify(schedulingTaskManager).unlockTasks(Set.of(taskId00, taskId01));
}
@Test
public void shouldLockAffectedTasksOnHandleRevocation() {
final StreamTask activeTask1 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions).build();
final StreamTask activeTask2 = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId01Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks, true);
when(tasks.allTasks()).thenReturn(Set.of(activeTask1, activeTask2));
final KafkaFuture<Void> mockFuture = KafkaFuture.completedFuture(null);
when(schedulingTaskManager.lockTasks(any())).thenReturn(mockFuture);
taskManager.handleRevocation(taskId01Partitions);
verify(schedulingTaskManager).lockTasks(Set.of(taskId00, taskId01));
verify(schedulingTaskManager).unlockTasks(Set.of(taskId00, taskId01));
}
@Test
public void shouldLockTasksOnClose() {
final StreamTask activeTask1 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions).build();
final StreamTask activeTask2 = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId01Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks, true);
when(tasks.allTasks()).thenReturn(Set.of(activeTask1, activeTask2));
final KafkaFuture<Void> mockFuture = KafkaFuture.completedFuture(null);
when(schedulingTaskManager.lockTasks(any())).thenReturn(mockFuture);
taskManager.closeAndCleanUpTasks(Set.of(activeTask1), Set.of(), false);
verify(schedulingTaskManager).lockTasks(Set.of(taskId00));
verify(schedulingTaskManager).unlockTasks(Set.of(taskId00));
}
@Test
public void shouldResumePollingForPartitionsWithAvailableSpaceForAllActiveTasks() {
final StreamTask activeTask1 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions).build();
final StreamTask activeTask2 = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId01Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(tasks.activeTasks()).thenReturn(Set.of(activeTask1, activeTask2));
taskManager.resumePollingForPartitionsWithAvailableSpace();
verify(activeTask1).resumePollingForPartitionsWithAvailableSpace();
verify(activeTask2).resumePollingForPartitionsWithAvailableSpace();
}
@Test
public void shouldUpdateLagForAllActiveTasks() {
final StreamTask activeTask1 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions).build();
final StreamTask activeTask2 = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId01Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(tasks.activeTasks()).thenReturn(Set.of(activeTask1, activeTask2));
taskManager.updateLags();
verify(activeTask1).updateLags();
verify(activeTask2).updateLags();
}
@Test
public void shouldRemoveUnusedActiveTaskFromStateUpdaterAndCloseCleanly() {
final StreamTask activeTaskToClose = statefulTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId03Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(activeTaskToClose));
final CompletableFuture<StateUpdater.RemovedTaskResult> future = new CompletableFuture<>();
when(stateUpdater.remove(activeTaskToClose.id())).thenReturn(future);
future.complete(new StateUpdater.RemovedTaskResult(activeTaskToClose));
taskManager.handleAssignment(Collections.emptyMap(), Collections.emptyMap());
verify(activeTaskToClose).suspend();
verify(activeTaskToClose).closeClean();
verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap());
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
}
@Test
public void shouldRemoveUnusedFailedActiveTaskFromStateUpdaterAndCloseDirty() {
final StreamTask activeTaskToClose = statefulTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId03Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(activeTaskToClose));
final CompletableFuture<StateUpdater.RemovedTaskResult> future = new CompletableFuture<>();
when(stateUpdater.remove(activeTaskToClose.id())).thenReturn(future);
future.complete(new StateUpdater.RemovedTaskResult(activeTaskToClose, new RuntimeException("KABOOM!")));
taskManager.handleAssignment(Collections.emptyMap(), Collections.emptyMap());
verify(activeTaskToClose).prepareCommit(false);
verify(activeTaskToClose).suspend();
verify(activeTaskToClose).closeDirty();
verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap());
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
}
@Test
public void shouldRemoveUnusedStandbyTaskFromStateUpdaterAndCloseCleanly() {
final StandbyTask standbyTaskToClose = standbyTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId02Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(standbyTaskToClose));
final CompletableFuture<StateUpdater.RemovedTaskResult> future = new CompletableFuture<>();
when(stateUpdater.remove(standbyTaskToClose.id())).thenReturn(future);
future.complete(new StateUpdater.RemovedTaskResult(standbyTaskToClose));
taskManager.handleAssignment(Collections.emptyMap(), Collections.emptyMap());
verify(standbyTaskToClose).suspend();
verify(standbyTaskToClose).closeClean();
verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap());
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
}
@Test
public void shouldRemoveUnusedFailedStandbyTaskFromStateUpdaterAndCloseDirty() {
final StandbyTask standbyTaskToClose = standbyTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId02Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(standbyTaskToClose));
final CompletableFuture<StateUpdater.RemovedTaskResult> future = new CompletableFuture<>();
when(stateUpdater.remove(standbyTaskToClose.id())).thenReturn(future);
future.complete(new StateUpdater.RemovedTaskResult(standbyTaskToClose, new RuntimeException("KABOOM!")));
taskManager.handleAssignment(Collections.emptyMap(), Collections.emptyMap());
verify(standbyTaskToClose).prepareCommit(false);
verify(standbyTaskToClose).suspend();
verify(standbyTaskToClose).closeDirty();
verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap());
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
}
@Test
public void shouldCollectFailedTaskFromStateUpdaterAndRethrow() {
final StandbyTask failedStandbyTask = standbyTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId02Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(failedStandbyTask));
final CompletableFuture<StateUpdater.RemovedTaskResult> future = new CompletableFuture<>();
when(stateUpdater.remove(failedStandbyTask.id())).thenReturn(future);
final RuntimeException kaboom = new RuntimeException("KABOOM!");
future.completeExceptionally(kaboom);
when(stateUpdater.drainExceptionsAndFailedTasks())
.thenReturn(singletonList(new ExceptionAndTask(new RuntimeException("KABOOM!"), failedStandbyTask)));
final StreamsException exception = assertThrows(
StreamsException.class,
() -> taskManager.handleAssignment(Collections.emptyMap(), Collections.emptyMap())
);
assertEquals("Encounter unexpected fatal error for task " + failedStandbyTask.id(), exception.getMessage());
assertInstanceOf(RuntimeException.class, exception.getCause());
assertEquals(kaboom.getMessage(), exception.getCause().getMessage());
verify(tasks).addFailedTask(failedStandbyTask);
}
@Test
public void shouldUpdateInputPartitionOfActiveTaskInStateUpdater() {
final StreamTask activeTaskToUpdateInputPartitions = statefulTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId03Partitions).build();
final Set<TopicPartition> newInputPartitions = taskId02Partitions;
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(activeTaskToUpdateInputPartitions));
final CompletableFuture<StateUpdater.RemovedTaskResult> future = new CompletableFuture<>();
when(stateUpdater.remove(activeTaskToUpdateInputPartitions.id())).thenReturn(future);
future.complete(new StateUpdater.RemovedTaskResult(activeTaskToUpdateInputPartitions));
taskManager.handleAssignment(
mkMap(mkEntry(activeTaskToUpdateInputPartitions.id(), newInputPartitions)),
Collections.emptyMap()
);
final InOrder updateInputPartitionsThenAddBack = inOrder(stateUpdater, activeTaskToUpdateInputPartitions);
updateInputPartitionsThenAddBack.verify(activeTaskToUpdateInputPartitions)
.updateInputPartitions(eq(newInputPartitions), any());
updateInputPartitionsThenAddBack.verify(stateUpdater).add(activeTaskToUpdateInputPartitions);
verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap());
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
}
@Test
public void shouldRecycleActiveTaskInStateUpdater() {
final StreamTask activeTaskToRecycle = statefulTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId03Partitions).build();
final StandbyTask recycledStandbyTask = standbyTask(taskId03, taskId03ChangelogPartitions)
.inState(State.CREATED)
.withInputPartitions(taskId03Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(activeTaskToRecycle));
when(standbyTaskCreator.createStandbyTaskFromActive(activeTaskToRecycle, taskId03Partitions))
.thenReturn(recycledStandbyTask);
final CompletableFuture<StateUpdater.RemovedTaskResult> future = new CompletableFuture<>();
when(stateUpdater.remove(taskId03)).thenReturn(future);
future.complete(new StateUpdater.RemovedTaskResult(activeTaskToRecycle));
taskManager.handleAssignment(
Collections.emptyMap(),
mkMap(mkEntry(activeTaskToRecycle.id(), activeTaskToRecycle.inputPartitions()))
);
verify(tasks).addPendingTasksToInit(Collections.singleton(recycledStandbyTask));
verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap());
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
}
@Test
public void shouldHandleExceptionThrownDuringRecyclingActiveTask() {
final StreamTask activeTaskToRecycle = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId00Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(activeTaskToRecycle));
when(standbyTaskCreator.createStandbyTaskFromActive(activeTaskToRecycle, activeTaskToRecycle.inputPartitions()))
.thenThrow(new RuntimeException());
final CompletableFuture<StateUpdater.RemovedTaskResult> future = new CompletableFuture<>();
when(stateUpdater.remove(activeTaskToRecycle.id())).thenReturn(future);
future.complete(new StateUpdater.RemovedTaskResult(activeTaskToRecycle));
assertThrows(
StreamsException.class,
() -> taskManager.handleAssignment(
Collections.emptyMap(),
mkMap(mkEntry(activeTaskToRecycle.id(), activeTaskToRecycle.inputPartitions()))
)
);
verify(stateUpdater, never()).add(any());
verify(tasks, never()).addPendingTasksToInit(Collections.singleton(any()));
verify(activeTaskToRecycle).closeDirty();
}
@Test
public void shouldRecycleStandbyTaskInStateUpdater() {
final StandbyTask standbyTaskToRecycle = standbyTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId03Partitions).build();
final StreamTask recycledActiveTask = statefulTask(taskId03, taskId03ChangelogPartitions)
.inState(State.CREATED)
.withInputPartitions(taskId03Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(standbyTaskToRecycle));
when(activeTaskCreator.createActiveTaskFromStandby(standbyTaskToRecycle, taskId03Partitions, consumer))
.thenReturn(recycledActiveTask);
final CompletableFuture<StateUpdater.RemovedTaskResult> future = new CompletableFuture<>();
when(stateUpdater.remove(standbyTaskToRecycle.id())).thenReturn(future);
future.complete(new StateUpdater.RemovedTaskResult(standbyTaskToRecycle));
taskManager.handleAssignment(
mkMap(mkEntry(standbyTaskToRecycle.id(), standbyTaskToRecycle.inputPartitions())),
Collections.emptyMap()
);
verify(tasks).addPendingTasksToInit(Collections.singleton(recycledActiveTask));
verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap());
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
}
@Test
public void shouldHandleExceptionThrownDuringRecyclingStandbyTask() {
final StandbyTask standbyTaskToRecycle = standbyTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId03Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(standbyTaskToRecycle));
when(activeTaskCreator.createActiveTaskFromStandby(
standbyTaskToRecycle,
standbyTaskToRecycle.inputPartitions(),
consumer))
.thenThrow(new RuntimeException());
final CompletableFuture<StateUpdater.RemovedTaskResult> future = new CompletableFuture<>();
when(stateUpdater.remove(standbyTaskToRecycle.id())).thenReturn(future);
future.complete(new StateUpdater.RemovedTaskResult(standbyTaskToRecycle));
assertThrows(
StreamsException.class,
() -> taskManager.handleAssignment(
mkMap(mkEntry(standbyTaskToRecycle.id(), standbyTaskToRecycle.inputPartitions())),
Collections.emptyMap()
)
);
verify(stateUpdater, never()).add(any());
verify(tasks, never()).addPendingTasksToInit(Collections.singleton(any()));
verify(standbyTaskToRecycle).closeDirty();
}
@Test
public void shouldKeepReassignedActiveTaskInStateUpdater() {
final StreamTask reassignedActiveTask = statefulTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId03Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(reassignedActiveTask));
taskManager.handleAssignment(
mkMap(mkEntry(reassignedActiveTask.id(), reassignedActiveTask.inputPartitions())),
Collections.emptyMap()
);
verify(stateUpdater, never()).remove(reassignedActiveTask.id());
verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap());
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
}
@Test
public void shouldMoveReassignedSuspendedActiveTaskToStateUpdater() {
final StreamTask reassignedActiveTask = statefulTask(taskId03, taskId03ChangelogPartitions)
.inState(State.SUSPENDED)
.withInputPartitions(taskId03Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(tasks.allNonFailedTasks()).thenReturn(Set.of(reassignedActiveTask));
taskManager.handleAssignment(
mkMap(mkEntry(reassignedActiveTask.id(), reassignedActiveTask.inputPartitions())),
Collections.emptyMap()
);
verify(tasks).removeTask(reassignedActiveTask);
verify(stateUpdater).add(reassignedActiveTask);
verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap());
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
}
@Test
public void shouldAddFailedActiveTaskToRecycleDuringAssignmentToTaskRegistry() {
final StreamTask failedActiveTaskToRecycle = statefulTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId03Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(failedActiveTaskToRecycle));
final RuntimeException taskException = new RuntimeException("Nobody expects the Spanish inquisition!");
when(stateUpdater.remove(failedActiveTaskToRecycle.id()))
.thenReturn(CompletableFuture.completedFuture(
new StateUpdater.RemovedTaskResult(failedActiveTaskToRecycle, taskException)
));
final StreamsException exception = assertThrows(
StreamsException.class,
() -> taskManager.handleAssignment(
Collections.emptyMap(),
mkMap(mkEntry(failedActiveTaskToRecycle.id(), failedActiveTaskToRecycle.inputPartitions()))
)
);
assertEquals("Encounter unexpected fatal error for task " + failedActiveTaskToRecycle.id(), exception.getMessage());
assertEquals(taskException, exception.getCause());
verify(tasks).addFailedTask(failedActiveTaskToRecycle);
verify(tasks, never()).addTask(failedActiveTaskToRecycle);
verify(tasks).allNonFailedTasks();
verify(standbyTaskCreator, never()).createStandbyTaskFromActive(failedActiveTaskToRecycle, taskId03Partitions);
}
@Test
public void shouldAddFailedStandbyTaskToRecycleDuringAssignmentToTaskRegistry() {
final StandbyTask failedStandbyTaskToRecycle = standbyTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId03Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(failedStandbyTaskToRecycle));
final RuntimeException taskException = new RuntimeException("Nobody expects the Spanish inquisition!");
when(stateUpdater.remove(failedStandbyTaskToRecycle.id()))
.thenReturn(CompletableFuture.completedFuture(
new StateUpdater.RemovedTaskResult(failedStandbyTaskToRecycle, taskException)
));
final StreamsException exception = assertThrows(
StreamsException.class,
() -> taskManager.handleAssignment(
mkMap(mkEntry(failedStandbyTaskToRecycle.id(), failedStandbyTaskToRecycle.inputPartitions())),
Collections.emptyMap()
)
);
assertEquals("Encounter unexpected fatal error for task " + failedStandbyTaskToRecycle.id(), exception.getMessage());
assertEquals(taskException, exception.getCause());
verify(tasks).addFailedTask(failedStandbyTaskToRecycle);
verify(tasks, never()).addTask(failedStandbyTaskToRecycle);
verify(tasks).allNonFailedTasks();
verify(activeTaskCreator, never()).createActiveTaskFromStandby(failedStandbyTaskToRecycle, taskId03Partitions, consumer);
}
@Test
public void shouldAddFailedActiveTasksToReassignWithDifferentInputPartitionsDuringAssignmentToTaskRegistry() {
final StreamTask failedActiveTaskToReassign = statefulTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId03Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(failedActiveTaskToReassign));
final RuntimeException taskException = new RuntimeException("Nobody expects the Spanish inquisition!");
when(stateUpdater.remove(failedActiveTaskToReassign.id()))
.thenReturn(CompletableFuture.completedFuture(
new StateUpdater.RemovedTaskResult(failedActiveTaskToReassign, taskException)
));
final StreamsException exception = assertThrows(
StreamsException.class,
() -> taskManager.handleAssignment(
mkMap(mkEntry(failedActiveTaskToReassign.id(), taskId00Partitions)),
Collections.emptyMap()
)
);
assertEquals("Encounter unexpected fatal error for task " + failedActiveTaskToReassign.id(), exception.getMessage());
assertEquals(taskException, exception.getCause());
verify(tasks).addFailedTask(failedActiveTaskToReassign);
verify(tasks, never()).addTask(failedActiveTaskToReassign);
verify(tasks).allNonFailedTasks();
verify(tasks, never()).updateActiveTaskInputPartitions(failedActiveTaskToReassign, taskId00Partitions);
}
@Test
public void shouldFirstHandleTasksInStateUpdaterThenSuspendedActiveTasksInTaskRegistry() {
final StreamTask reassignedActiveTask1 = statefulTask(taskId03, taskId03ChangelogPartitions)
.inState(State.SUSPENDED)
.withInputPartitions(taskId03Partitions).build();
final StreamTask reassignedActiveTask2 = statefulTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId02Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(tasks.allNonFailedTasks()).thenReturn(Set.of(reassignedActiveTask1));
when(stateUpdater.tasks()).thenReturn(Set.of(reassignedActiveTask2));
when(stateUpdater.remove(reassignedActiveTask2.id()))
.thenReturn(CompletableFuture.completedFuture(new StateUpdater.RemovedTaskResult(reassignedActiveTask2)));
taskManager.handleAssignment(
mkMap(
mkEntry(reassignedActiveTask1.id(), reassignedActiveTask1.inputPartitions()),
mkEntry(reassignedActiveTask2.id(), taskId00Partitions)
),
Collections.emptyMap()
);
final InOrder inOrder = inOrder(stateUpdater, tasks);
inOrder.verify(stateUpdater).remove(reassignedActiveTask2.id());
inOrder.verify(tasks).removeTask(reassignedActiveTask1);
inOrder.verify(stateUpdater).add(reassignedActiveTask1);
}
@Test
public void shouldNeverUpdateInputPartitionsOfStandbyTaskInStateUpdater() {
final StandbyTask standbyTaskToUpdateInputPartitions = standbyTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId02Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(standbyTaskToUpdateInputPartitions));
taskManager.handleAssignment(
Collections.emptyMap(),
mkMap(mkEntry(standbyTaskToUpdateInputPartitions.id(), taskId03Partitions))
);
verify(stateUpdater, never()).remove(standbyTaskToUpdateInputPartitions.id());
verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap());
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
}
@Test
public void shouldKeepReassignedStandbyTaskInStateUpdater() {
final StandbyTask reassignedStandbyTask = standbyTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId02Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(reassignedStandbyTask));
taskManager.handleAssignment(
Collections.emptyMap(),
mkMap(mkEntry(reassignedStandbyTask.id(), reassignedStandbyTask.inputPartitions()))
);
verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap());
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
}
@Test
public void shouldAssignMultipleTasksInStateUpdater() {
final StreamTask activeTaskToClose = statefulTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId03Partitions).build();
final StandbyTask standbyTaskToRecycle = standbyTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId02Partitions).build();
final StreamTask recycledActiveTask = statefulTask(taskId02, taskId02ChangelogPartitions)
.inState(State.CREATED)
.withInputPartitions(taskId02Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(activeTaskToClose, standbyTaskToRecycle));
final CompletableFuture<StateUpdater.RemovedTaskResult> futureForActiveTaskToClose = new CompletableFuture<>();
when(stateUpdater.remove(activeTaskToClose.id())).thenReturn(futureForActiveTaskToClose);
futureForActiveTaskToClose.complete(new StateUpdater.RemovedTaskResult(activeTaskToClose));
when(activeTaskCreator.createActiveTaskFromStandby(standbyTaskToRecycle, taskId02Partitions, consumer))
.thenReturn(recycledActiveTask);
final CompletableFuture<StateUpdater.RemovedTaskResult> futureForStandbyTaskToRecycle = new CompletableFuture<>();
when(stateUpdater.remove(standbyTaskToRecycle.id())).thenReturn(futureForStandbyTaskToRecycle);
futureForStandbyTaskToRecycle.complete(new StateUpdater.RemovedTaskResult(standbyTaskToRecycle));
taskManager.handleAssignment(
mkMap(mkEntry(standbyTaskToRecycle.id(), standbyTaskToRecycle.inputPartitions())),
Collections.emptyMap()
);
verify(tasks).addPendingTasksToInit(Collections.singleton(recycledActiveTask));
verify(activeTaskToClose).suspend();
verify(activeTaskToClose).closeClean();
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap());
}
@Test
public void shouldReturnRunningTasksStateUpdaterTasksAndTasksToInitInAllTasks() {
final StreamTask activeTaskToInit = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.CREATED)
.withInputPartitions(taskId03Partitions).build();
final StreamTask runningActiveTask = statefulTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId03Partitions).build();
final StandbyTask standbyTaskInStateUpdater = standbyTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId02Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(standbyTaskInStateUpdater));
when(tasks.allTasksPerId()).thenReturn(mkMap(mkEntry(taskId03, runningActiveTask)));
when(tasks.pendingTasksToInit()).thenReturn(Set.of(activeTaskToInit));
assertEquals(
taskManager.allTasks(),
mkMap(
mkEntry(taskId03, runningActiveTask),
mkEntry(taskId02, standbyTaskInStateUpdater),
mkEntry(taskId01, activeTaskToInit)
)
);
}
@Test
public void shouldNotReturnStateUpdaterTasksInOwnedTasks() {
final StreamTask activeTask = statefulTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId03Partitions).build();
standbyTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId02Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(tasks.allTasksPerId()).thenReturn(mkMap(mkEntry(taskId03, activeTask)));
assertEquals(taskManager.allOwnedTasks(), mkMap(mkEntry(taskId03, activeTask)));
}
@Test
public void shouldCreateActiveTaskDuringAssignment() {
final StreamTask activeTaskToBeCreated = statefulTask(taskId03, taskId03ChangelogPartitions)
.inState(State.CREATED)
.withInputPartitions(taskId03Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
final Set<Task> createdTasks = Set.of(activeTaskToBeCreated);
final Map<TaskId, Set<TopicPartition>> tasksToBeCreated = mkMap(
mkEntry(activeTaskToBeCreated.id(), activeTaskToBeCreated.inputPartitions()));
when(activeTaskCreator.createTasks(consumer, tasksToBeCreated)).thenReturn(createdTasks);
taskManager.handleAssignment(tasksToBeCreated, Collections.emptyMap());
verify(tasks).addPendingTasksToInit(createdTasks);
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
}
@Test
public void shouldCreateStandbyTaskDuringAssignment() {
final StandbyTask standbyTaskToBeCreated = standbyTask(taskId02, taskId02ChangelogPartitions)
.inState(State.CREATED)
.withInputPartitions(taskId02Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
final Set<Task> createdTasks = Set.of(standbyTaskToBeCreated);
when(standbyTaskCreator.createTasks(mkMap(
mkEntry(standbyTaskToBeCreated.id(), standbyTaskToBeCreated.inputPartitions())))
).thenReturn(createdTasks);
taskManager.handleAssignment(
Collections.emptyMap(),
mkMap(mkEntry(standbyTaskToBeCreated.id(), standbyTaskToBeCreated.inputPartitions()))
);
verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap());
verify(tasks).addPendingTasksToInit(createdTasks);
}
@Test
public void shouldAddRecycledStandbyTasksFromActiveToPendingTasksToInitWithStateUpdaterEnabled() {
final StreamTask activeTaskToRecycle = statefulTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING).build();
final StandbyTask standbyTask = standbyTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.CREATED).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allNonFailedTasks()).thenReturn(Set.of(activeTaskToRecycle));
when(standbyTaskCreator.createStandbyTaskFromActive(activeTaskToRecycle, taskId01Partitions))
.thenReturn(standbyTask);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
taskManager.handleAssignment(emptyMap(), mkMap(mkEntry(taskId01, taskId01Partitions)));
verify(activeTaskToRecycle).prepareCommit(true);
verify(tasks).addPendingTasksToInit(Set.of(standbyTask));
verify(tasks).removeTask(activeTaskToRecycle);
verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap());
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
}
@Test
public void shouldThrowDuringAssignmentIfStandbyTaskToRecycleIsFoundInTasksRegistryWithStateUpdaterEnabled() {
final StandbyTask standbyTaskToRecycle = standbyTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId03Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allNonFailedTasks()).thenReturn(Set.of(standbyTaskToRecycle));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
final IllegalStateException illegalStateException = assertThrows(
IllegalStateException.class,
() -> taskManager.handleAssignment(
mkMap(mkEntry(standbyTaskToRecycle.id(), standbyTaskToRecycle.inputPartitions())),
Collections.emptyMap()
)
);
assertEquals("Standby tasks should only be managed by the state updater, " +
"but standby task " + taskId03 + " is managed by the stream thread", illegalStateException.getMessage());
verifyNoInteractions(activeTaskCreator);
}
@Test
public void shouldAssignActiveTaskInTasksRegistryToBeClosedCleanlyWithStateUpdaterEnabled() {
final StreamTask activeTaskToClose = statefulTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId03Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(tasks.allNonFailedTasks()).thenReturn(Set.of(activeTaskToClose));
taskManager.handleAssignment(Collections.emptyMap(), Collections.emptyMap());
verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap());
verify(activeTaskToClose).prepareCommit(true);
verify(activeTaskToClose).closeClean();
verify(tasks).removeTask(activeTaskToClose);
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
}
@Test
public void shouldThrowDuringAssignmentIfStandbyTaskToCloseIsFoundInTasksRegistryWithStateUpdaterEnabled() {
final StandbyTask standbyTaskToClose = standbyTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId03Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(tasks.allNonFailedTasks()).thenReturn(Set.of(standbyTaskToClose));
final IllegalStateException illegalStateException = assertThrows(
IllegalStateException.class,
() -> taskManager.handleAssignment(Collections.emptyMap(), Collections.emptyMap())
);
assertEquals("Standby tasks should only be managed by the state updater, " +
"but standby task " + taskId03 + " is managed by the stream thread", illegalStateException.getMessage());
verifyNoInteractions(activeTaskCreator);
}
@Test
public void shouldAssignActiveTaskInTasksRegistryToUpdateInputPartitionsWithStateUpdaterEnabled() {
final StreamTask activeTaskToUpdateInputPartitions = statefulTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId03Partitions).build();
final Set<TopicPartition> newInputPartitions = taskId02Partitions;
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(tasks.allNonFailedTasks()).thenReturn(Set.of(activeTaskToUpdateInputPartitions));
when(tasks.updateActiveTaskInputPartitions(activeTaskToUpdateInputPartitions, newInputPartitions)).thenReturn(true);
taskManager.handleAssignment(
mkMap(mkEntry(activeTaskToUpdateInputPartitions.id(), newInputPartitions)),
Collections.emptyMap()
);
verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap());
verify(activeTaskToUpdateInputPartitions).updateInputPartitions(eq(newInputPartitions), any());
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
}
@Test
public void shouldResumeActiveRunningTaskInTasksRegistryWithStateUpdaterEnabled() {
final StreamTask activeTaskToResume = statefulTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId03Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(tasks.allNonFailedTasks()).thenReturn(Set.of(activeTaskToResume));
taskManager.handleAssignment(
mkMap(mkEntry(activeTaskToResume.id(), activeTaskToResume.inputPartitions())),
Collections.emptyMap()
);
verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap());
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
}
@Test
public void shouldResumeActiveSuspendedTaskInTasksRegistryAndAddToStateUpdater() {
final StreamTask activeTaskToResume = statefulTask(taskId03, taskId03ChangelogPartitions)
.inState(State.SUSPENDED)
.withInputPartitions(taskId03Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(tasks.allNonFailedTasks()).thenReturn(Set.of(activeTaskToResume));
taskManager.handleAssignment(
mkMap(mkEntry(activeTaskToResume.id(), activeTaskToResume.inputPartitions())),
Collections.emptyMap()
);
verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap());
verify(activeTaskToResume).resume();
verify(stateUpdater).add(activeTaskToResume);
verify(tasks).removeTask(activeTaskToResume);
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
}
@Test
public void shouldThrowDuringAssignmentIfStandbyTaskToUpdateInputPartitionsIsFoundInTasksRegistryWithStateUpdaterEnabled() {
final StandbyTask standbyTaskToUpdateInputPartitions = standbyTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId02Partitions).build();
final Set<TopicPartition> newInputPartitions = taskId03Partitions;
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(tasks.allNonFailedTasks()).thenReturn(Set.of(standbyTaskToUpdateInputPartitions));
final IllegalStateException illegalStateException = assertThrows(
IllegalStateException.class,
() -> taskManager.handleAssignment(
Collections.emptyMap(),
mkMap(mkEntry(standbyTaskToUpdateInputPartitions.id(), newInputPartitions))
)
);
assertEquals("Standby tasks should only be managed by the state updater, " +
"but standby task " + taskId02 + " is managed by the stream thread", illegalStateException.getMessage());
verifyNoInteractions(activeTaskCreator);
}
@Test
public void shouldAssignMultipleTasksInTasksRegistryWithStateUpdaterEnabled() {
final StreamTask activeTaskToClose = statefulTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId03Partitions).build();
final StreamTask activeTaskToCreate = statefulTask(taskId02, taskId02ChangelogPartitions)
.inState(State.CREATED)
.withInputPartitions(taskId02Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(tasks.allNonFailedTasks()).thenReturn(Set.of(activeTaskToClose));
taskManager.handleAssignment(
mkMap(mkEntry(activeTaskToCreate.id(), activeTaskToCreate.inputPartitions())),
Collections.emptyMap()
);
verify(activeTaskCreator).createTasks(
consumer,
mkMap(mkEntry(activeTaskToCreate.id(), activeTaskToCreate.inputPartitions()))
);
verify(activeTaskToClose).closeClean();
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
}
@Test
public void shouldAddTasksToStateUpdater() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RESTORING).build();
final StandbyTask task01 = standbyTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.drainPendingTasksToInit()).thenReturn(Set.of(task00, task01));
taskManager = setUpTaskManagerWithStateUpdater(StreamsConfigUtils.ProcessingMode.AT_LEAST_ONCE, tasks, false);
taskManager.checkStateUpdater(time.milliseconds(), noOpResetter);
verify(task00).initializeIfNeeded();
verify(task01).initializeIfNeeded();
verify(stateUpdater).add(task00);
verify(stateUpdater).add(task01);
}
@Test
public void shouldRetryInitializationWhenLockExceptionInStateUpdater() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RESTORING).build();
final StandbyTask task01 = standbyTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.drainPendingTasksToInit()).thenReturn(Set.of(task00, task01));
final LockException lockException = new LockException("Where are my keys??");
doThrow(lockException).when(task00).initializeIfNeeded();
taskManager = setUpTaskManagerWithStateUpdater(StreamsConfigUtils.ProcessingMode.AT_LEAST_ONCE, tasks, false);
taskManager.checkStateUpdater(time.milliseconds(), noOpResetter);
verify(task00).initializeIfNeeded();
verify(task01).initializeIfNeeded();
verify(task00, never()).clearTaskTimeout();
verify(task01).clearTaskTimeout();
verify(tasks).addPendingTasksToInit(
argThat(tasksToInit -> tasksToInit.contains(task00) && !tasksToInit.contains(task01))
);
verify(stateUpdater, never()).add(task00);
verify(stateUpdater).add(task01);
}
@Test
public void shouldRetryInitializationWhenTimeoutExceptionInStateUpdater() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RESTORING).build();
final StandbyTask task01 = standbyTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.drainPendingTasksToInit()).thenReturn(Set.of(task00, task01));
final TimeoutException timeoutException = new TimeoutException("Timed out!");
doThrow(timeoutException).when(task00).initializeIfNeeded();
taskManager = setUpTaskManagerWithStateUpdater(StreamsConfigUtils.ProcessingMode.AT_LEAST_ONCE, tasks, false);
taskManager.checkStateUpdater(time.milliseconds(), noOpResetter);
verify(task00).initializeIfNeeded();
verify(task01).initializeIfNeeded();
verify(task00).maybeInitTaskTimeoutOrThrow(anyLong(), eq(timeoutException));
verify(task00, never()).clearTaskTimeout();
verify(task01).clearTaskTimeout();
verify(tasks).addPendingTasksToInit(
argThat(tasksToInit -> tasksToInit.contains(task00) && !tasksToInit.contains(task01))
);
verify(stateUpdater, never()).add(task00);
verify(stateUpdater).add(task01);
}
@Test
public void shouldRetryInitializationWithBackoffWhenInitializationFails() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RESTORING).build();
final StandbyTask task01 = standbyTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.drainPendingTasksToInit()).thenReturn(Set.of(task00, task01));
doThrow(new LockException("Lock Exception!")).when(task00).initializeIfNeeded();
taskManager = setUpTaskManagerWithStateUpdater(StreamsConfigUtils.ProcessingMode.AT_LEAST_ONCE, tasks, false);
taskManager.checkStateUpdater(time.milliseconds(), noOpResetter);
// task00 should not be initialized due to LockException, task01 should be initialized
verify(task00).initializeIfNeeded();
verify(task01).initializeIfNeeded();
verify(tasks).addPendingTasksToInit(
argThat(tasksToInit -> tasksToInit.contains(task00) && !tasksToInit.contains(task01))
);
verify(stateUpdater, never()).add(task00);
verify(stateUpdater).add(task01);
time.sleep(500);
taskManager.checkStateUpdater(time.milliseconds(), noOpResetter);
// task00 should not be initialized since the backoff period has not passed
verify(task00, times(1)).initializeIfNeeded();
verify(tasks, times(2)).addPendingTasksToInit(
argThat(tasksToInit -> tasksToInit.contains(task00))
);
verify(stateUpdater, never()).add(task00);
time.sleep(5000);
// task00 should call initialize since the backoff period has passed
doNothing().when(task00).initializeIfNeeded();
taskManager.checkStateUpdater(time.milliseconds(), noOpResetter);
verify(task00, times(2)).initializeIfNeeded();
verify(tasks, times(2)).addPendingTasksToInit(
argThat(tasksToInit -> tasksToInit.contains(task00))
);
verify(stateUpdater).add(task00);
}
@Test
public void shouldRethrowRuntimeExceptionInInitTaskWithStateUpdater() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.CREATED).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.drainPendingTasksToInit()).thenReturn(Set.of(task00));
final RuntimeException runtimeException = new RuntimeException("KABOOM!");
doThrow(runtimeException).when(task00).initializeIfNeeded();
taskManager = setUpTaskManagerWithStateUpdater(StreamsConfigUtils.ProcessingMode.AT_LEAST_ONCE, tasks, false);
final StreamsException streamsException = assertThrows(
StreamsException.class,
() -> taskManager.checkStateUpdater(time.milliseconds(), noOpResetter)
);
verify(stateUpdater, never()).add(task00);
verify(tasks).addFailedTask(task00);
assertTrue(streamsException.taskId().isPresent());
assertEquals(task00.id(), streamsException.taskId().get());
assertEquals("Encounter unexpected fatal error for task 0_0", streamsException.getMessage());
assertEquals(runtimeException, streamsException.getCause());
}
@Test
public void shouldRethrowTaskCorruptedExceptionFromInitialization() {
final StreamTask statefulTask0 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.CREATED)
.withInputPartitions(taskId00Partitions).build();
final StreamTask statefulTask1 = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.CREATED)
.withInputPartitions(taskId01Partitions).build();
final StreamTask statefulTask2 = statefulTask(taskId02, taskId02ChangelogPartitions)
.inState(State.CREATED)
.withInputPartitions(taskId02Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.EXACTLY_ONCE_V2, tasks, false);
when(tasks.drainPendingTasksToInit()).thenReturn(Set.of(statefulTask0, statefulTask1, statefulTask2));
doThrow(new TaskCorruptedException(Collections.singleton(statefulTask0.id))).when(statefulTask0).initializeIfNeeded();
doThrow(new TaskCorruptedException(Collections.singleton(statefulTask1.id))).when(statefulTask1).initializeIfNeeded();
final TaskCorruptedException thrown = assertThrows(
TaskCorruptedException.class,
() -> taskManager.checkStateUpdater(time.milliseconds(), noOpResetter)
);
verify(tasks).addFailedTask(statefulTask0);
verify(tasks).addFailedTask(statefulTask1);
verify(stateUpdater).add(statefulTask2);
assertEquals(Set.of(taskId00, taskId01), thrown.corruptedTasks());
assertEquals("Tasks [0_1, 0_0] are corrupted and hence need to be re-initialized", thrown.getMessage());
}
@Test
public void shouldReturnFalseFromCheckStateUpdaterIfActiveTasksAreRestoring() {
when(stateUpdater.restoresActiveTasks()).thenReturn(true);
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
assertFalse(taskManager.checkStateUpdater(time.milliseconds(), noOpResetter));
}
@Test
public void shouldReturnFalseFromCheckStateUpdaterIfActiveTasksAreNotRestoringAndNoPendingTaskToRecycleButPendingTasksToInit() {
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.hasPendingTasksToInit()).thenReturn(true);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
assertFalse(taskManager.checkStateUpdater(time.milliseconds(), noOpResetter));
}
@Test
public void shouldReturnTrueFromCheckStateUpdaterIfActiveTasksAreNotRestoringAndNoPendingInit() {
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
assertTrue(taskManager.checkStateUpdater(time.milliseconds(), noOpResetter));
}
@Test
public void shouldSuspendActiveTaskWithRevokedInputPartitionsInStateUpdater() {
final StreamTask task = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId00Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setupForRevocationAndLost(Set.of(task), tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(task));
final CompletableFuture<StateUpdater.RemovedTaskResult> future = new CompletableFuture<>();
when(stateUpdater.remove(task.id())).thenReturn(future);
future.complete(new StateUpdater.RemovedTaskResult(task));
taskManager.handleRevocation(task.inputPartitions());
verify(task).suspend();
verify(tasks).addTask(task);
verify(stateUpdater).remove(task.id());
}
@Test
public void shouldSuspendMultipleActiveTasksWithRevokedInputPartitionsInStateUpdater() {
final StreamTask task1 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId00Partitions).build();
final StreamTask task2 = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId01Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setupForRevocationAndLost(Set.of(task1, task2), tasks);
final CompletableFuture<StateUpdater.RemovedTaskResult> future1 = new CompletableFuture<>();
when(stateUpdater.remove(task1.id())).thenReturn(future1);
future1.complete(new StateUpdater.RemovedTaskResult(task1));
final CompletableFuture<StateUpdater.RemovedTaskResult> future2 = new CompletableFuture<>();
when(stateUpdater.remove(task2.id())).thenReturn(future2);
future2.complete(new StateUpdater.RemovedTaskResult(task2));
taskManager.handleRevocation(union(HashSet::new, taskId00Partitions, taskId01Partitions));
verify(task1).suspend();
verify(tasks).addTask(task1);
verify(task2).suspend();
verify(tasks).addTask(task2);
}
@Test
public void shouldNotSuspendActiveTaskWithoutRevokedInputPartitionsInStateUpdater() {
final StreamTask task = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId00Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setupForRevocationAndLost(Set.of(task), tasks);
taskManager.handleRevocation(taskId01Partitions);
verify(task, never()).suspend();
verify(tasks, never()).addTask(task);
verify(stateUpdater, never()).remove(task.id());
}
@Test
public void shouldNotRevokeStandbyTaskInStateUpdaterOnRevocation() {
final StandbyTask task = standbyTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId00Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setupForRevocationAndLost(Set.of(task), tasks);
taskManager.handleRevocation(taskId00Partitions);
verify(task, never()).suspend();
verify(tasks, never()).addTask(task);
verify(stateUpdater, never()).remove(task.id());
}
@Test
public void shouldThrowIfRevokingTasksInStateUpdaterFindsFailedTasks() {
final StreamTask task1 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId00Partitions).build();
final StreamTask task2 = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId01Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setupForRevocationAndLost(Set.of(task1, task2), tasks);
final CompletableFuture<StateUpdater.RemovedTaskResult> future1 = new CompletableFuture<>();
when(stateUpdater.remove(task1.id())).thenReturn(future1);
future1.complete(new StateUpdater.RemovedTaskResult(task1));
final CompletableFuture<StateUpdater.RemovedTaskResult> future2 = new CompletableFuture<>();
when(stateUpdater.remove(task2.id())).thenReturn(future2);
final RuntimeException taskException = new RuntimeException("Nobody expects the Spanish inquisition!");
future2.complete(new StateUpdater.RemovedTaskResult(task2, taskException));
final StreamsException thrownException = assertThrows(
StreamsException.class,
() -> taskManager.handleRevocation(union(HashSet::new, taskId00Partitions, taskId01Partitions))
);
assertEquals("Encounter unexpected fatal error for task " + task2.id(), thrownException.getMessage());
assertEquals(thrownException.getCause(), taskException);
verify(task1).suspend();
verify(tasks).addTask(task1);
verify(task2, never()).suspend();
verify(tasks).addFailedTask(task2);
}
@Test
public void shouldCloseCleanWhenRemoveAllActiveTasksFromStateUpdaterOnPartitionLost() {
final StreamTask task1 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId00Partitions).build();
final StandbyTask task2 = standbyTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId01Partitions).build();
final StreamTask task3 = statefulTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId02Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setupForRevocationAndLost(Set.of(task1, task2, task3), tasks);
final CompletableFuture<StateUpdater.RemovedTaskResult> future1 = new CompletableFuture<>();
when(stateUpdater.remove(task1.id())).thenReturn(future1);
future1.complete(new StateUpdater.RemovedTaskResult(task1));
final CompletableFuture<StateUpdater.RemovedTaskResult> future3 = new CompletableFuture<>();
when(stateUpdater.remove(task3.id())).thenReturn(future3);
future3.complete(new StateUpdater.RemovedTaskResult(task3));
taskManager.handleLostAll();
verify(task1).suspend();
verify(task1).closeClean();
verify(task3).suspend();
verify(task3).closeClean();
verify(stateUpdater, never()).remove(task2.id());
}
@Test
public void shouldCloseCleanTasksPendingInitOnPartitionLost() {
final StreamTask task1 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.CREATED)
.withInputPartitions(taskId00Partitions).build();
final StreamTask task2 = statefulTask(taskId02, taskId02ChangelogPartitions)
.inState(State.CREATED)
.withInputPartitions(taskId02Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.drainPendingActiveTasksToInit()).thenReturn(Set.of(task1, task2));
final TaskManager taskManager = setupForRevocationAndLost(emptySet(), tasks);
taskManager.handleLostAll();
verify(task1).suspend();
verify(task1).closeClean();
verify(task2).suspend();
verify(task2).closeClean();
}
@Test
public void shouldCloseDirtyWhenRemoveFailedActiveTasksFromStateUpdaterOnPartitionLost() {
final StreamTask task1 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId00Partitions).build();
final StreamTask task2 = statefulTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId02Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setupForRevocationAndLost(Set.of(task1, task2), tasks);
final CompletableFuture<StateUpdater.RemovedTaskResult> future1 = new CompletableFuture<>();
when(stateUpdater.remove(task1.id())).thenReturn(future1);
future1.complete(new StateUpdater.RemovedTaskResult(task1, new StreamsException("Something happened")));
final CompletableFuture<StateUpdater.RemovedTaskResult> future3 = new CompletableFuture<>();
when(stateUpdater.remove(task2.id())).thenReturn(future3);
future3.complete(new StateUpdater.RemovedTaskResult(task2, new StreamsException("Something else happened")));
taskManager.handleLostAll();
verify(task1).prepareCommit(false);
verify(task1).suspend();
verify(task1).closeDirty();
verify(task2).prepareCommit(false);
verify(task2).suspend();
verify(task2).closeDirty();
}
@Test
public void shouldCloseTasksWhenRemoveFailedActiveTasksFromStateUpdaterOnPartitionLost() {
final StreamTask task1 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.CREATED)
.withInputPartitions(taskId00Partitions).build();
final StreamTask task2 = statefulTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId02Partitions).build();
final StreamTask task3 = statefulTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId03Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.drainPendingActiveTasksToInit()).thenReturn(Set.of(task1));
final TaskManager taskManager = setupForRevocationAndLost(Set.of(task2, task3), tasks);
final CompletableFuture<StateUpdater.RemovedTaskResult> future2 = new CompletableFuture<>();
when(stateUpdater.remove(task2.id())).thenReturn(future2);
future2.complete(new StateUpdater.RemovedTaskResult(task2, new StreamsException("Something happened")));
final CompletableFuture<StateUpdater.RemovedTaskResult> future3 = new CompletableFuture<>();
when(stateUpdater.remove(task3.id())).thenReturn(future3);
future3.complete(new StateUpdater.RemovedTaskResult(task3));
taskManager.handleLostAll();
verify(task1).suspend();
verify(task1).closeClean();
verify(task2).prepareCommit(false);
verify(task2).suspend();
verify(task2).closeDirty();
verify(task3).suspend();
verify(task3).closeClean();
}
private TaskManager setupForRevocationAndLost(final Set<Task> tasksInStateUpdater,
final TasksRegistry tasks) {
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(tasksInStateUpdater);
return taskManager;
}
@Test
public void shouldTransitRestoredTaskToRunning() {
final StreamTask task = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId00Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTransitionToRunningOfRestoredTask(Set.of(task), tasks);
taskManager.checkStateUpdater(time.milliseconds(), noOpResetter);
verifyTransitionToRunningOfRestoredTask(Set.of(task), tasks);
}
@Test
public void shouldTransitMultipleRestoredTasksToRunning() {
final StreamTask task1 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId00Partitions).build();
final StreamTask task2 = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId01Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTransitionToRunningOfRestoredTask(Set.of(task1, task2), tasks);
taskManager.checkStateUpdater(time.milliseconds(), noOpResetter);
verifyTransitionToRunningOfRestoredTask(Set.of(task1, task2), tasks);
}
private void verifyTransitionToRunningOfRestoredTask(final Set<StreamTask> restoredTasks,
final TasksRegistry tasks) {
for (final StreamTask restoredTask : restoredTasks) {
verify(restoredTask).completeRestoration(noOpResetter);
verify(restoredTask, atLeastOnce()).clearTaskTimeout();
verify(tasks).addTask(restoredTask);
verify(consumer).resume(restoredTask.inputPartitions());
}
}
@Test
public void shouldHandleTimeoutExceptionInTransitRestoredTaskToRunning() {
final StreamTask task = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId00Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTransitionToRunningOfRestoredTask(Set.of(task), tasks);
final TimeoutException timeoutException = new TimeoutException();
doThrow(timeoutException).when(task).completeRestoration(noOpResetter);
taskManager.checkStateUpdater(time.milliseconds(), noOpResetter);
verify(task).maybeInitTaskTimeoutOrThrow(anyLong(), eq(timeoutException));
verify(stateUpdater).add(task);
verify(tasks, never()).addTask(task);
verify(task, never()).clearTaskTimeout();
verifyNoInteractions(consumer);
}
private TaskManager setUpTransitionToRunningOfRestoredTask(final Set<StreamTask> statefulTasks,
final TasksRegistry tasks) {
when(stateUpdater.restoresActiveTasks()).thenReturn(true);
when(stateUpdater.drainRestoredActiveTasks(any(Duration.class))).thenReturn(statefulTasks);
return setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
}
@Test
public void shouldReturnCorrectBooleanWhenTryingToCompleteRestorationWithStateUpdater() {
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, null, false);
when(stateUpdater.restoresActiveTasks()).thenReturn(false);
assertTrue(taskManager.checkStateUpdater(time.milliseconds(), noOpResetter));
when(stateUpdater.restoresActiveTasks()).thenReturn(true);
assertFalse(taskManager.checkStateUpdater(time.milliseconds(), noOpResetter));
}
@Test
public void shouldRethrowStreamsExceptionFromStateUpdater() {
final StreamTask statefulTask = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId00Partitions).build();
final StreamsException exception = new StreamsException("boom!");
final ExceptionAndTask exceptionAndTasks = new ExceptionAndTask(exception, statefulTask);
when(stateUpdater.hasExceptionsAndFailedTasks()).thenReturn(true);
when(stateUpdater.drainExceptionsAndFailedTasks()).thenReturn(Collections.singletonList(exceptionAndTasks));
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
final StreamsException thrown = assertThrows(
StreamsException.class,
() -> taskManager.checkStateUpdater(time.milliseconds(), noOpResetter)
);
assertEquals(exception, thrown);
assertEquals(statefulTask.id(), thrown.taskId().orElseThrow());
}
@Test
public void shouldRethrowTaskCorruptedExceptionFromStateUpdater() {
final StreamTask statefulTask0 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId00Partitions).build();
final StreamTask statefulTask1 = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId01Partitions).build();
final ExceptionAndTask exceptionAndTasks0 =
new ExceptionAndTask(new TaskCorruptedException(Collections.singleton(taskId00)), statefulTask0);
final ExceptionAndTask exceptionAndTasks1 =
new ExceptionAndTask(new TaskCorruptedException(Collections.singleton(taskId01)), statefulTask1);
when(stateUpdater.hasExceptionsAndFailedTasks()).thenReturn(true);
when(stateUpdater.drainExceptionsAndFailedTasks()).thenReturn(Arrays.asList(exceptionAndTasks0, exceptionAndTasks1));
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
final TaskCorruptedException thrown = assertThrows(
TaskCorruptedException.class,
() -> taskManager.checkStateUpdater(time.milliseconds(), noOpResetter)
);
assertEquals(Set.of(taskId00, taskId01), thrown.corruptedTasks());
assertEquals("Tasks [0_1, 0_0] are corrupted and hence need to be re-initialized", thrown.getMessage());
}
@Test
public void shouldAddSubscribedTopicsFromAssignmentToTopologyMetadata() {
final Map<TaskId, Set<TopicPartition>> activeTasksAssignment = mkMap(
mkEntry(taskId01, Set.of(t1p1)),
mkEntry(taskId02, Set.of(t1p2, t2p2))
);
final Map<TaskId, Set<TopicPartition>> standbyTasksAssignment = mkMap(
mkEntry(taskId03, Set.of(t1p3)),
mkEntry(taskId04, Set.of(t1p4))
);
when(standbyTaskCreator.createTasks(standbyTasksAssignment)).thenReturn(Collections.emptySet());
taskManager.handleAssignment(activeTasksAssignment, standbyTasksAssignment);
verify(topologyBuilder).addSubscribedTopicsFromAssignment(eq(Set.of(t1p1, t1p2, t2p2)), anyString());
verify(topologyBuilder, never()).addSubscribedTopicsFromAssignment(eq(Set.of(t1p3, t1p4)), anyString());
verify(activeTaskCreator).createTasks(any(), eq(activeTasksAssignment));
}
@Test
public void shouldNotLockAnythingIfStateDirIsEmpty() {
when(stateDirectory.listNonEmptyTaskDirectories()).thenReturn(new ArrayList<>());
taskManager.handleRebalanceStart(singleton("topic"));
assertTrue(taskManager.lockedTaskDirectories().isEmpty());
}
@Test
public void shouldTryToLockValidTaskDirsAtRebalanceStart() throws Exception {
expectLockObtainedFor(taskId01);
expectLockFailedFor(taskId10);
expectDirectoryNotEmpty(taskId01);
makeTaskFolders(
taskId01.toString(),
taskId10.toString(),
"dummy"
);
taskManager.handleRebalanceStart(singleton("topic"));
assertThat(taskManager.lockedTaskDirectories(), is(singleton(taskId01)));
}
@Test
public void shouldUnlockEmptyDirsAtRebalanceStart() throws Exception {
expectLockObtainedFor(taskId01, taskId10);
expectDirectoryNotEmpty(taskId01);
when(stateDirectory.directoryForTaskIsEmpty(taskId10)).thenReturn(true);
makeTaskFolders(taskId01.toString(), taskId10.toString());
taskManager.handleRebalanceStart(singleton("topic"));
verify(stateDirectory).unlock(taskId10);
assertThat(taskManager.lockedTaskDirectories(), is(singleton(taskId01)));
}
@Test
public void shouldPauseAllTopicsOnRebalanceComplete() {
final Set<TopicPartition> assigned = Set.of(t1p0, t1p1);
when(consumer.assignment()).thenReturn(assigned);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, null);
taskManager.handleRebalanceComplete();
verify(consumer).pause(assigned);
}
@Test
public void shouldNotPauseReadyTasksOnRebalanceComplete() {
final StreamTask statefulTask0 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(tasks.allNonFailedTasks()).thenReturn(Set.of(statefulTask0));
final Set<TopicPartition> assigned = Set.of(t1p0, t1p1);
when(consumer.assignment()).thenReturn(assigned);
taskManager.handleRebalanceComplete();
verify(consumer).pause(Set.of(t1p1));
}
@Test
public void shouldReleaseLockForUnassignedTasksAfterRebalance() throws Exception {
final StreamTask runningStatefulTask = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions).build();
final StreamTask restoringStatefulTask = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId01Partitions).build();
final StandbyTask standbyTask = standbyTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId02Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(tasks.allTasksPerId()).thenReturn(mkMap(mkEntry(taskId00, runningStatefulTask)));
when(stateUpdater.tasks()).thenReturn(Set.of(standbyTask, restoringStatefulTask));
when(tasks.allNonFailedTasks()).thenReturn(Set.of(runningStatefulTask));
expectLockObtainedFor(taskId00, taskId01, taskId02, taskId03);
expectDirectoryNotEmpty(taskId00, taskId01, taskId02, taskId03);
makeTaskFolders(
taskId00.toString(),
taskId01.toString(),
taskId02.toString(),
taskId03.toString()
);
final Set<TopicPartition> assigned = Set.of(t1p0, t1p1, t1p2);
when(consumer.assignment()).thenReturn(assigned);
taskManager.handleRebalanceStart(singleton("topic"));
taskManager.handleRebalanceComplete();
verify(consumer).pause(Set.of(t1p1, t1p2));
verify(stateDirectory).unlock(taskId03);
assertThat(taskManager.lockedTaskDirectories(), is(Set.of(taskId00, taskId01, taskId02)));
}
@Test
public void shouldComputeOffsetSumForRunningStatefulTask() {
final StreamTask runningStatefulTask = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING).build();
final long changelogOffsetOfRunningTask = Task.LATEST_OFFSET;
final Map<TopicPartition, Long> changelogOffsets = mkMap(
mkEntry(t1p0changelog, changelogOffsetOfRunningTask)
);
when(runningStatefulTask.changelogOffsets()).thenReturn(changelogOffsets);
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(tasks.allTasksPerId()).thenReturn(mkMap(mkEntry(taskId00, runningStatefulTask)));
assertThat(
taskManager.taskOffsetSums(),
is(mkMap(mkEntry(taskId00, changelogOffsetOfRunningTask)))
);
}
@Test
public void shouldComputeOffsetSumForNonRunningActiveTask() throws Exception {
final StreamTask restoringStatefulTask = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RESTORING).build();
final Map<TopicPartition, Long> changelogOffsets = mkMap(
mkEntry(new TopicPartition("changelog", 0), 5L),
mkEntry(new TopicPartition("changelog", 1), 10L)
);
final Map<TaskId, Long> expectedOffsetSums = mkMap(
mkEntry(taskId00, 15L)
);
when(restoringStatefulTask.changelogOffsets())
.thenReturn(changelogOffsets);
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(restoringStatefulTask));
assertThat(taskManager.taskOffsetSums(), is(expectedOffsetSums));
}
@Test
public void shouldComputeOffsetSumForRestoringActiveTask() throws Exception {
final StreamTask restoringStatefulTask = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RESTORING).build();
final long changelogOffset = 42L;
final Map<TaskId, Long> expectedOffsetSums = mkMap(
mkEntry(taskId00, changelogOffset)
);
when(restoringStatefulTask.changelogOffsets()).thenReturn(mkMap(mkEntry(t1p0changelog, changelogOffset)));
expectLockObtainedFor(taskId00);
makeTaskFolders(taskId00.toString());
final Map<TopicPartition, Long> changelogOffsetInCheckpoint = mkMap(mkEntry(t1p0changelog, 24L));
writeCheckpointFile(taskId00, changelogOffsetInCheckpoint);
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(restoringStatefulTask));
taskManager.handleRebalanceStart(singleton("topic"));
assertThat(taskManager.taskOffsetSums(), is(expectedOffsetSums));
}
@Test
public void shouldComputeOffsetSumForRestoringStandbyTask() throws Exception {
final StandbyTask restoringStandbyTask = standbyTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING).build();
final long changelogOffset = 42L;
when(restoringStandbyTask.changelogOffsets()).thenReturn(mkMap(mkEntry(t1p0changelog, changelogOffset)));
expectLockObtainedFor(taskId00);
makeTaskFolders(taskId00.toString());
final Map<TopicPartition, Long> changelogOffsetInCheckpoint = mkMap(mkEntry(t1p0changelog, 24L));
writeCheckpointFile(taskId00, changelogOffsetInCheckpoint);
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(restoringStandbyTask));
taskManager.handleRebalanceStart(singleton("topic"));
assertThat(taskManager.taskOffsetSums(), is(mkMap(mkEntry(taskId00, changelogOffset))));
}
@Test
public void shouldComputeOffsetSumForRunningStatefulTaskAndRestoringTask() {
final StreamTask runningStatefulTask = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING).build();
final StreamTask restoringStatefulTask = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RESTORING).build();
final StandbyTask restoringStandbyTask = standbyTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RUNNING).build();
final long changelogOffsetOfRunningTask = Task.LATEST_OFFSET;
final long changelogOffsetOfRestoringStatefulTask = 24L;
final long changelogOffsetOfRestoringStandbyTask = 84L;
when(runningStatefulTask.changelogOffsets())
.thenReturn(mkMap(mkEntry(t1p0changelog, changelogOffsetOfRunningTask)));
when(restoringStatefulTask.changelogOffsets())
.thenReturn(mkMap(mkEntry(t1p1changelog, changelogOffsetOfRestoringStatefulTask)));
when(restoringStandbyTask.changelogOffsets())
.thenReturn(mkMap(mkEntry(t1p2changelog, changelogOffsetOfRestoringStandbyTask)));
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(tasks.allTasksPerId()).thenReturn(mkMap(mkEntry(taskId00, runningStatefulTask)));
when(stateUpdater.tasks()).thenReturn(Set.of(restoringStandbyTask, restoringStatefulTask));
assertThat(
taskManager.taskOffsetSums(),
is(mkMap(
mkEntry(taskId00, changelogOffsetOfRunningTask),
mkEntry(taskId01, changelogOffsetOfRestoringStatefulTask),
mkEntry(taskId02, changelogOffsetOfRestoringStandbyTask)
))
);
}
@Test
public void shouldSkipUnknownOffsetsWhenComputingOffsetSum() {
final StreamTask restoringStatefulTask = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RESTORING).build();
final long changelogOffsetOfRestoringStandbyTask = 84L;
when(restoringStatefulTask.changelogOffsets())
.thenReturn(mkMap(
mkEntry(t1p1changelog, changelogOffsetOfRestoringStandbyTask),
mkEntry(t1p1changelog2, OffsetCheckpoint.OFFSET_UNKNOWN)
));
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks, false);
when(tasks.allTasksPerId()).thenReturn(mkMap(mkEntry(taskId01, restoringStatefulTask)));
when(stateUpdater.tasks()).thenReturn(Set.of(restoringStatefulTask));
assertThat(
taskManager.taskOffsetSums(),
is(mkMap(
mkEntry(taskId01, changelogOffsetOfRestoringStandbyTask)
))
);
}
@Test
public void shouldComputeOffsetSumForStandbyTask() throws Exception {
final Map<TopicPartition, Long> changelogOffsets = mkMap(
mkEntry(new TopicPartition("changelog", 0), 5L),
mkEntry(new TopicPartition("changelog", 1), 10L)
);
final Map<TaskId, Long> expectedOffsetSums = mkMap(mkEntry(taskId00, 15L));
final StandbyTask standbyTask = standbyTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions)
.build();
when(standbyTask.changelogOffsets()).thenReturn(changelogOffsets);
final TasksRegistry tasks = mock(TasksRegistry.class);
taskManager = setUpTaskManagerWithStateUpdater(StreamsConfigUtils.ProcessingMode.AT_LEAST_ONCE, tasks, false);
when(stateUpdater.tasks()).thenReturn(Set.of(standbyTask));
expectLockObtainedFor(taskId00);
expectDirectoryNotEmpty(taskId00);
makeTaskFolders(taskId00.toString());
taskManager.handleRebalanceStart(singleton("topic"));
taskManager.handleAssignment(emptyMap(), taskId00Assignment);
assertThat(taskManager.taskOffsetSums(), is(expectedOffsetSums));
}
@Test
public void shouldComputeOffsetSumForUnassignedTaskWeCanLock() throws Exception {
final Map<TopicPartition, Long> changelogOffsets = mkMap(
mkEntry(new TopicPartition("changelog", 0), 5L),
mkEntry(new TopicPartition("changelog", 1), 10L)
);
final Map<TaskId, Long> expectedOffsetSums = mkMap(mkEntry(taskId00, 15L));
expectLockObtainedFor(taskId00);
makeTaskFolders(taskId00.toString());
writeCheckpointFile(taskId00, changelogOffsets);
taskManager.handleRebalanceStart(singleton("topic"));
assertThat(taskManager.taskOffsetSums(), is(expectedOffsetSums));
}
@ParameterizedTest
@EnumSource(value = State.class, names = {"CREATED", "CLOSED"})
public void shouldComputeOffsetSumFromCheckpointFileForCreatedAndClosedTasks(final State state) throws Exception {
final Map<TopicPartition, Long> changelogOffsets = mkMap(
mkEntry(new TopicPartition("changelog", 0), 5L),
mkEntry(new TopicPartition("changelog", 1), 10L)
);
final Map<TaskId, Long> expectedOffsetSums = mkMap(mkEntry(taskId00, 15L));
final StreamTask task = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(state)
.withInputPartitions(taskId00Partitions)
.build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasksPerId()).thenReturn(mkMap(mkEntry(taskId00, task)));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
expectLockObtainedFor(taskId00);
makeTaskFolders(taskId00.toString());
writeCheckpointFile(taskId00, changelogOffsets);
taskManager.handleRebalanceStart(singleton("topic"));
assertThat(taskManager.taskOffsetSums(), is(expectedOffsetSums));
}
@Test
public void shouldNotReportOffsetSumsForTaskWeCantLock() throws Exception {
expectLockFailedFor(taskId00);
makeTaskFolders(taskId00.toString());
taskManager.handleRebalanceStart(singleton("topic"));
assertTrue(taskManager.lockedTaskDirectories().isEmpty());
assertTrue(taskManager.taskOffsetSums().isEmpty());
}
@Test
public void shouldNotReportOffsetSumsAndReleaseLockForUnassignedTaskWithoutCheckpoint() throws Exception {
expectLockObtainedFor(taskId00);
makeTaskFolders(taskId00.toString());
expectDirectoryNotEmpty(taskId00);
when(stateDirectory.checkpointFileFor(taskId00)).thenReturn(getCheckpointFile(taskId00));
taskManager.handleRebalanceStart(singleton("topic"));
assertTrue(taskManager.taskOffsetSums().isEmpty());
}
@Test
public void shouldPinOffsetSumToLongMaxValueInCaseOfOverflow() throws Exception {
final long largeOffset = Long.MAX_VALUE / 2;
final Map<TopicPartition, Long> changelogOffsets = mkMap(
mkEntry(new TopicPartition("changelog", 1), largeOffset),
mkEntry(new TopicPartition("changelog", 2), largeOffset),
mkEntry(new TopicPartition("changelog", 3), largeOffset)
);
final Map<TaskId, Long> expectedOffsetSums = mkMap(mkEntry(taskId00, Long.MAX_VALUE));
expectLockObtainedFor(taskId00);
makeTaskFolders(taskId00.toString());
writeCheckpointFile(taskId00, changelogOffsets);
taskManager.handleRebalanceStart(singleton("topic"));
assertThat(taskManager.taskOffsetSums(), is(expectedOffsetSums));
}
@Test
public void shouldCloseActiveUnassignedSuspendedTasksWhenClosingRevokedTasks() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.SUSPENDED)
.build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allNonFailedTasks()).thenReturn(Set.of(task00));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
taskManager.handleAssignment(emptyMap(), emptyMap());
verify(task00).prepareCommit(true);
verify(task00).closeClean();
verify(tasks).removeTask(task00);
}
@Test
public void shouldCloseDirtyActiveUnassignedTasksWhenErrorCleanClosingTask() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.SUSPENDED)
.build();
doThrow(new RuntimeException("KABOOM!")).when(task00).closeClean();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allNonFailedTasks()).thenReturn(Set.of(task00));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
final RuntimeException thrown = assertThrows(
RuntimeException.class,
() -> taskManager.handleAssignment(emptyMap(), emptyMap())
);
verify(task00).closeClean();
verify(task00).closeDirty();
verify(tasks).removeTask(task00);
assertThat(
thrown.getMessage(),
is("Encounter unexpected fatal error for task 0_0")
);
assertThat(thrown.getCause().getMessage(), is("KABOOM!"));
}
@Test
public void shouldCloseActiveTasksWhenHandlingLostTasks() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
final StandbyTask task01 = standbyTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RUNNING)
.build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasks()).thenReturn(Set.of(task00, task01));
when(tasks.allTaskIds()).thenReturn(Set.of(taskId00, taskId01));
final ArrayList<TaskDirectory> taskFolders = new ArrayList<>(2);
taskFolders.add(new TaskDirectory(testFolder.resolve(taskId00.toString()).toFile(), null));
taskFolders.add(new TaskDirectory(testFolder.resolve(taskId01.toString()).toFile(), null));
when(stateDirectory.listNonEmptyTaskDirectories())
.thenReturn(taskFolders)
.thenReturn(new ArrayList<>());
expectLockObtainedFor(taskId00, taskId01);
expectDirectoryNotEmpty(taskId00, taskId01);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
taskManager.handleRebalanceStart(emptySet());
assertThat(taskManager.lockedTaskDirectories(), is(Set.of(taskId00, taskId01)));
// this should close only active tasks as zombies
taskManager.handleLostAll();
// close of active task
verify(task00).prepareCommit(false);
verify(task00).suspend();
verify(task00).closeDirty();
verify(tasks).removeTask(task00);
// standby task not closed
verify(task01, never()).prepareCommit(anyBoolean());
verify(task01, never()).suspend();
verify(task01, never()).closeDirty();
verify(task01, never()).closeClean();
verify(tasks, never()).removeTask(task01);
// The locked task map will not be cleared.
assertThat(taskManager.lockedTaskDirectories(), is(Set.of(taskId00, taskId01)));
taskManager.handleRebalanceStart(emptySet());
assertThat(taskManager.lockedTaskDirectories(), is(emptySet()));
}
@Test
public void shouldReInitializeStreamsProducerOnHandleLostAllIfEosV2Enabled() {
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.EXACTLY_ONCE_V2, null, false);
taskManager.handleLostAll();
verify(activeTaskCreator).reInitializeProducer();
}
@Test
public void shouldReAddRevivedTasksToStateUpdater() {
final StreamTask corruptedActiveTask = statefulTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId03Partitions).build();
final StandbyTask corruptedStandbyTask = standbyTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId02Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(tasks.task(taskId03)).thenReturn(corruptedActiveTask);
when(tasks.task(taskId02)).thenReturn(corruptedStandbyTask);
taskManager.handleCorruption(Set.of(corruptedActiveTask.id(), corruptedStandbyTask.id()));
final InOrder activeTaskOrder = inOrder(corruptedActiveTask);
activeTaskOrder.verify(corruptedActiveTask).closeDirty();
activeTaskOrder.verify(corruptedActiveTask).revive();
final InOrder standbyTaskOrder = inOrder(corruptedStandbyTask);
standbyTaskOrder.verify(corruptedStandbyTask).closeDirty();
standbyTaskOrder.verify(corruptedStandbyTask).revive();
verify(tasks).removeTask(corruptedActiveTask);
verify(tasks).removeTask(corruptedStandbyTask);
verify(tasks).addPendingTasksToInit(Set.of(corruptedActiveTask));
verify(tasks).addPendingTasksToInit(Set.of(corruptedStandbyTask));
verify(consumer).assignment();
}
@Test
public void shouldReviveCorruptTasks() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.task(taskId00)).thenReturn(task00);
when(tasks.allTasksPerId()).thenReturn(singletonMap(taskId00, task00));
when(tasks.activeTaskIds()).thenReturn(Set.of(taskId00));
when(task00.prepareCommit(false)).thenReturn(emptyMap());
doNothing().when(task00).postCommit(anyBoolean());
when(task00.changelogPartitions()).thenReturn(taskId00ChangelogPartitions);
when(consumer.assignment()).thenReturn(taskId00Partitions);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
taskManager.handleCorruption(singleton(taskId00));
verify(task00).prepareCommit(false);
verify(task00).postCommit(true);
verify(task00).addPartitionsForOffsetReset(taskId00Partitions);
verify(task00).changelogPartitions();
verify(task00).closeDirty();
verify(task00).revive();
verify(tasks).removeTask(task00);
verify(tasks).addPendingTasksToInit(Set.of(task00));
verify(consumer, never()).commitSync(emptyMap());
}
@Test
public void shouldReviveCorruptTasksEvenIfTheyCannotCloseClean() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.task(taskId00)).thenReturn(task00);
when(tasks.allTasksPerId()).thenReturn(singletonMap(taskId00, task00));
when(tasks.activeTaskIds()).thenReturn(Set.of(taskId00));
when(task00.prepareCommit(false)).thenReturn(emptyMap());
when(task00.changelogPartitions()).thenReturn(taskId00ChangelogPartitions);
doThrow(new RuntimeException("oops")).when(task00).suspend();
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
taskManager.handleCorruption(singleton(taskId00));
verify(task00).prepareCommit(false);
verify(task00).suspend();
verify(task00, never()).postCommit(anyBoolean()); // postCommit is NOT called
verify(task00).closeDirty();
verify(task00).revive();
verify(tasks).removeTask(task00);
verify(tasks).addPendingTasksToInit(Set.of(task00));
verify(task00).addPartitionsForOffsetReset(emptySet());
}
@Test
public void shouldCommitNonCorruptedTasksOnTaskCorruptedException() {
final StreamTask corruptedTask = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
final StreamTask nonCorruptedTask = statefulTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING)
.build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.task(taskId00)).thenReturn(corruptedTask);
when(tasks.allTasksPerId()).thenReturn(mkMap(
mkEntry(taskId00, corruptedTask),
mkEntry(taskId01, nonCorruptedTask)
));
when(tasks.activeTaskIds()).thenReturn(Set.of(taskId00, taskId01));
when(nonCorruptedTask.commitNeeded()).thenReturn(true);
when(nonCorruptedTask.prepareCommit(true)).thenReturn(emptyMap());
when(corruptedTask.prepareCommit(false)).thenReturn(emptyMap());
doNothing().when(corruptedTask).postCommit(anyBoolean());
when(consumer.assignment()).thenReturn(taskId00Partitions);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
taskManager.handleCorruption(Set.of(taskId00));
verify(nonCorruptedTask).prepareCommit(true);
verify(nonCorruptedTask, never()).addPartitionsForOffsetReset(any());
verify(corruptedTask).addPartitionsForOffsetReset(taskId00Partitions);
verify(corruptedTask).changelogPartitions();
verify(corruptedTask).postCommit(true);
// check that we should not commit empty map either
verify(consumer, never()).commitSync(emptyMap());
}
@Test
public void shouldNotCommitNonCorruptedRestoringActiveTasksAndNotCommitRunningStandbyTasks() {
final StreamTask activeRestoringTask = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RESTORING).build();
final StandbyTask standbyTask = standbyTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING).build();
final StreamTask corruptedTask = statefulTask(taskId02, taskId02ChangelogPartitions)
.withInputPartitions(taskId02Partitions)
.inState(State.RUNNING).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasksPerId()).thenReturn(mkMap(mkEntry(taskId02, corruptedTask)));
when(tasks.task(taskId02)).thenReturn(corruptedTask);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(consumer.assignment()).thenReturn(intersection(HashSet::new, taskId00Partitions, taskId01Partitions, taskId02Partitions));
taskManager.handleCorruption(Set.of(taskId02));
verify(activeRestoringTask, never()).commitNeeded();
verify(activeRestoringTask, never()).prepareCommit(true);
verify(activeRestoringTask, never()).postCommit(anyBoolean());
verify(standbyTask, never()).commitNeeded();
verify(standbyTask, never()).prepareCommit(true);
verify(standbyTask, never()).postCommit(anyBoolean());
}
@Test
public void shouldCleanAndReviveCorruptedStandbyTasksBeforeCommittingNonCorruptedTasks() {
final StandbyTask corruptedStandby = standbyTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions).build();
final StreamTask runningNonCorruptedActive = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId01Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.task(taskId00)).thenReturn(corruptedStandby);
when(tasks.allTasksPerId()).thenReturn(mkMap(
mkEntry(taskId00, corruptedStandby),
mkEntry(taskId01, runningNonCorruptedActive)
));
when(tasks.activeTaskIds()).thenReturn(Set.of(taskId01));
when(runningNonCorruptedActive.commitNeeded()).thenReturn(true);
when(runningNonCorruptedActive.prepareCommit(true))
.thenThrow(new TaskMigratedException("You dropped out of the group!", new RuntimeException()));
when(corruptedStandby.changelogPartitions()).thenReturn(taskId00ChangelogPartitions);
when(corruptedStandby.prepareCommit(false)).thenReturn(emptyMap());
doNothing().when(corruptedStandby).suspend();
doNothing().when(corruptedStandby).postCommit(anyBoolean());
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
assertThrows(TaskMigratedException.class, () -> taskManager.handleCorruption(singleton(taskId00)));
// verifying the entire task lifecycle
final InOrder taskOrder = inOrder(corruptedStandby, runningNonCorruptedActive);
taskOrder.verify(corruptedStandby).prepareCommit(false);
taskOrder.verify(corruptedStandby).suspend();
taskOrder.verify(corruptedStandby).postCommit(true);
taskOrder.verify(corruptedStandby).closeDirty();
taskOrder.verify(corruptedStandby).revive();
taskOrder.verify(runningNonCorruptedActive).prepareCommit(true);
verify(tasks).removeTask(corruptedStandby);
verify(tasks).addPendingTasksToInit(Set.of(corruptedStandby));
}
@Test
public void shouldNotAttemptToCommitInHandleCorruptedDuringARebalance() {
final StreamTask corruptedActive = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
final StreamTask uncorruptedActive = statefulTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING)
.build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.task(taskId00)).thenReturn(corruptedActive);
when(tasks.allTasksPerId()).thenReturn(mkMap(
mkEntry(taskId00, corruptedActive),
mkEntry(taskId01, uncorruptedActive)
));
when(tasks.activeTaskIds()).thenReturn(Set.of(taskId00, taskId01));
when(uncorruptedActive.commitNeeded()).thenReturn(true);
when(uncorruptedActive.prepareCommit(true)).thenReturn(emptyMap());
when(corruptedActive.prepareCommit(false)).thenReturn(emptyMap());
doNothing().when(corruptedActive).postCommit(anyBoolean());
when(consumer.assignment()).thenReturn(taskId00Partitions);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
taskManager.handleRebalanceStart(singleton(topic1));
assertThat(taskManager.rebalanceInProgress(), is(true));
taskManager.handleCorruption(singleton(taskId00));
verify(uncorruptedActive, never()).prepareCommit(anyBoolean());
verify(uncorruptedActive, never()).postCommit(anyBoolean());
verify(corruptedActive).changelogPartitions();
verify(corruptedActive).postCommit(true);
verify(corruptedActive).addPartitionsForOffsetReset(taskId00Partitions);
verify(consumer, never()).commitSync(emptyMap());
}
@Test
public void shouldCloseAndReviveUncorruptedTasksWhenTimeoutExceptionThrownFromCommitDuringHandleCorruptedWithEOS() {
final StreamTask corruptedActive = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
// this task will time out during commit
final StreamTask uncorruptedActive = statefulTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING)
.build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.task(taskId00)).thenReturn(corruptedActive);
when(tasks.allTasksPerId()).thenReturn(mkMap(
mkEntry(taskId00, corruptedActive),
mkEntry(taskId01, uncorruptedActive)
));
when(tasks.activeTaskIds()).thenReturn(Set.of(taskId00, taskId01));
final StreamsProducer producer = mock(StreamsProducer.class);
when(activeTaskCreator.streamsProducer()).thenReturn(producer);
final ConsumerGroupMetadata groupMetadata = mock(ConsumerGroupMetadata.class);
when(consumer.groupMetadata()).thenReturn(groupMetadata);
when(consumer.assignment()).thenReturn(union(HashSet::new, taskId00Partitions, taskId01Partitions));
// mock uncorrupted task to indicate that it needs commit and will return offsets
final Map<TopicPartition, OffsetAndMetadata> offsets = singletonMap(t1p1, new OffsetAndMetadata(0L, null));
when(tasks.tasks(singleton(taskId01))).thenReturn(Set.of(uncorruptedActive));
when(uncorruptedActive.commitNeeded()).thenReturn(true);
when(uncorruptedActive.prepareCommit(true)).thenReturn(offsets);
when(uncorruptedActive.prepareCommit(false)).thenReturn(emptyMap());
when(uncorruptedActive.changelogPartitions()).thenReturn(taskId01ChangelogPartitions);
doNothing().when(uncorruptedActive).suspend();
doNothing().when(uncorruptedActive).closeDirty();
doNothing().when(uncorruptedActive).revive();
doNothing().when(uncorruptedActive).markChangelogAsCorrupted(taskId01ChangelogPartitions);
// corrupted task doesn't need commit
when(corruptedActive.commitNeeded()).thenReturn(false);
when(corruptedActive.prepareCommit(false)).thenReturn(emptyMap());
when(corruptedActive.changelogPartitions()).thenReturn(taskId00ChangelogPartitions);
doNothing().when(corruptedActive).suspend();
doNothing().when(corruptedActive).postCommit(true);
doNothing().when(corruptedActive).closeDirty();
doNothing().when(corruptedActive).revive();
doThrow(new TimeoutException()).when(producer).commitTransaction(offsets, groupMetadata);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.EXACTLY_ONCE_V2, tasks);
taskManager.handleCorruption(singleton(taskId00));
// 1. verify corrupted task was closed dirty and revived
final InOrder corruptedOrder = inOrder(corruptedActive, tasks);
corruptedOrder.verify(corruptedActive).prepareCommit(false);
corruptedOrder.verify(corruptedActive).suspend();
corruptedOrder.verify(corruptedActive).postCommit(true);
corruptedOrder.verify(corruptedActive).closeDirty();
corruptedOrder.verify(tasks).removeTask(corruptedActive);
corruptedOrder.verify(corruptedActive).revive();
corruptedOrder.verify(tasks).addPendingTasksToInit(Set.of(corruptedActive));
// 2. verify uncorrupted task attempted commit, failed with timeout, then was closed dirty and revived
final InOrder uncorruptedOrder = inOrder(uncorruptedActive, producer, tasks);
uncorruptedOrder.verify(uncorruptedActive).prepareCommit(true);
uncorruptedOrder.verify(producer).commitTransaction(offsets, groupMetadata); // tries to commit, throws TimeoutException
uncorruptedOrder.verify(uncorruptedActive).suspend();
uncorruptedOrder.verify(uncorruptedActive).postCommit(true);
uncorruptedOrder.verify(uncorruptedActive).closeDirty();
uncorruptedOrder.verify(tasks).removeTask(uncorruptedActive);
uncorruptedOrder.verify(uncorruptedActive).revive();
uncorruptedOrder.verify(tasks).addPendingTasksToInit(Set.of(uncorruptedActive));
// verify both tasks had their input partitions reset
verify(corruptedActive).addPartitionsForOffsetReset(taskId00Partitions);
verify(uncorruptedActive).addPartitionsForOffsetReset(taskId01Partitions);
}
@Test
public void shouldCloseAndReviveUncorruptedTasksWhenTimeoutExceptionThrownFromCommitWithAlos() {
final StreamTask corruptedActive = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
// this task will time out during commit
final StreamTask uncorruptedActive = statefulTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING)
.build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.task(taskId00)).thenReturn(corruptedActive);
when(tasks.allTasksPerId()).thenReturn(mkMap(
mkEntry(taskId00, corruptedActive),
mkEntry(taskId01, uncorruptedActive)
));
when(tasks.activeTaskIds()).thenReturn(Set.of(taskId00, taskId01));
when(tasks.activeTasks()).thenReturn(Set.of(corruptedActive, uncorruptedActive));
// we need to mock uncorrupted task to indicate that it needs commit and will return offsets
final Map<TopicPartition, OffsetAndMetadata> offsets = singletonMap(t1p1, new OffsetAndMetadata(0L, null));
when(uncorruptedActive.commitNeeded()).thenReturn(true);
when(uncorruptedActive.prepareCommit(true)).thenReturn(offsets);
when(uncorruptedActive.changelogPartitions()).thenReturn(taskId01ChangelogPartitions);
doNothing().when(uncorruptedActive).suspend();
doNothing().when(uncorruptedActive).closeDirty();
doNothing().when(uncorruptedActive).revive();
// corrupted task doesn't need commit
when(corruptedActive.commitNeeded()).thenReturn(false);
when(corruptedActive.prepareCommit(false)).thenReturn(emptyMap());
when(corruptedActive.changelogPartitions()).thenReturn(taskId00ChangelogPartitions);
doNothing().when(corruptedActive).suspend();
doNothing().when(corruptedActive).postCommit(anyBoolean());
doNothing().when(corruptedActive).closeDirty();
doNothing().when(corruptedActive).revive();
doThrow(new TimeoutException()).when(consumer).commitSync(offsets);
when(consumer.assignment()).thenReturn(union(HashSet::new, taskId00Partitions, taskId01Partitions));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
taskManager.handleCorruption(singleton(taskId00));
// 1. verify corrupted task was closed dirty and revived
final InOrder corruptedOrder = inOrder(corruptedActive, tasks);
corruptedOrder.verify(corruptedActive).prepareCommit(false);
corruptedOrder.verify(corruptedActive).suspend();
corruptedOrder.verify(corruptedActive).postCommit(true);
corruptedOrder.verify(corruptedActive).closeDirty();
corruptedOrder.verify(tasks).removeTask(corruptedActive);
corruptedOrder.verify(corruptedActive).revive();
corruptedOrder.verify(tasks).addPendingTasksToInit(Set.of(corruptedActive));
// 2. verify uncorrupted task attempted commit, failed with timeout, then was closed dirty and revived
final InOrder uncorruptedOrder = inOrder(uncorruptedActive, consumer, tasks);
uncorruptedOrder.verify(uncorruptedActive).prepareCommit(true);
uncorruptedOrder.verify(consumer).commitSync(offsets); // attempt commit, throws TimeoutException
uncorruptedOrder.verify(uncorruptedActive).prepareCommit(false);
uncorruptedOrder.verify(uncorruptedActive).suspend();
uncorruptedOrder.verify(uncorruptedActive).closeDirty();
uncorruptedOrder.verify(tasks).removeTask(uncorruptedActive);
uncorruptedOrder.verify(uncorruptedActive).revive();
uncorruptedOrder.verify(tasks).addPendingTasksToInit(Set.of(uncorruptedActive));
// verify both tasks had their input partitions reset
verify(corruptedActive).addPartitionsForOffsetReset(taskId00Partitions);
verify(uncorruptedActive).addPartitionsForOffsetReset(taskId01Partitions);
}
@Test
public void shouldCloseAndReviveUncorruptedTasksWhenTimeoutExceptionThrownFromCommitDuringRevocationWithAlos() {
// task being revoked - needs commit
final StreamTask revokedActiveTask = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
// unrevoked task that needs commit - this will also be affected by timeout
final StreamTask unrevokedActiveTaskWithCommit = statefulTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING)
.build();
// unrevoked task without commit needed - this should stay RUNNING
final StreamTask unrevokedActiveTaskWithoutCommit = statefulTask(taskId02, taskId02ChangelogPartitions)
.withInputPartitions(taskId02Partitions)
.inState(State.RUNNING)
.build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasks()).thenReturn(Set.of(revokedActiveTask, unrevokedActiveTaskWithCommit, unrevokedActiveTaskWithoutCommit));
when(consumer.assignment()).thenReturn(union(HashSet::new, taskId00Partitions, taskId01Partitions, taskId02Partitions));
// revoked task needs commit
final Map<TopicPartition, OffsetAndMetadata> revokedTaskOffsets = singletonMap(t1p0, new OffsetAndMetadata(0L, null));
when(revokedActiveTask.commitNeeded()).thenReturn(true);
when(revokedActiveTask.prepareCommit(true)).thenReturn(revokedTaskOffsets);
when(revokedActiveTask.changelogPartitions()).thenReturn(taskId00ChangelogPartitions);
doNothing().when(revokedActiveTask).suspend();
doNothing().when(revokedActiveTask).closeDirty();
doNothing().when(revokedActiveTask).revive();
// unrevoked task with commit also takes part in commit
final Map<TopicPartition, OffsetAndMetadata> unrevokedTaskOffsets = singletonMap(t1p1, new OffsetAndMetadata(1L, null));
when(unrevokedActiveTaskWithCommit.commitNeeded()).thenReturn(true);
when(unrevokedActiveTaskWithCommit.prepareCommit(true)).thenReturn(unrevokedTaskOffsets);
when(unrevokedActiveTaskWithCommit.changelogPartitions()).thenReturn(taskId01ChangelogPartitions);
doNothing().when(unrevokedActiveTaskWithCommit).suspend();
doNothing().when(unrevokedActiveTaskWithCommit).closeDirty();
doNothing().when(unrevokedActiveTaskWithCommit).revive();
// unrevoked task without commit needed
when(unrevokedActiveTaskWithoutCommit.commitNeeded()).thenReturn(false);
// mock timeout during commit - all offsets from tasks needing commit
final Map<TopicPartition, OffsetAndMetadata> expectedCommittedOffsets = new HashMap<>();
expectedCommittedOffsets.putAll(revokedTaskOffsets);
expectedCommittedOffsets.putAll(unrevokedTaskOffsets);
doThrow(new TimeoutException()).when(consumer).commitSync(expectedCommittedOffsets);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
taskManager.handleRevocation(taskId00Partitions);
// 1. verify that the revoked task was suspended, closed dirty, and revived
final InOrder revokedOrder = inOrder(revokedActiveTask, tasks);
revokedOrder.verify(revokedActiveTask).prepareCommit(true);
revokedOrder.verify(revokedActiveTask).suspend();
revokedOrder.verify(revokedActiveTask).closeDirty();
revokedOrder.verify(tasks).removeTask(revokedActiveTask);
revokedOrder.verify(revokedActiveTask).revive();
revokedOrder.verify(tasks).addPendingTasksToInit(argThat(set -> set.contains(revokedActiveTask)));
// 2. verify that the unrevoked task with commit also tried to commit and was closed dirty due to timeout
final InOrder unrevokedOrder = inOrder(unrevokedActiveTaskWithCommit, consumer, tasks);
unrevokedOrder.verify(unrevokedActiveTaskWithCommit).prepareCommit(true);
unrevokedOrder.verify(consumer).commitSync(expectedCommittedOffsets); // timeout thrown here
unrevokedOrder.verify(unrevokedActiveTaskWithCommit).suspend();
unrevokedOrder.verify(unrevokedActiveTaskWithCommit).closeDirty();
unrevokedOrder.verify(tasks).removeTask(unrevokedActiveTaskWithCommit);
unrevokedOrder.verify(unrevokedActiveTaskWithCommit).revive();
unrevokedOrder.verify(tasks).addPendingTasksToInit(argThat(set -> set.contains(unrevokedActiveTaskWithCommit)));
// 3. verify that the unrevoked task without commit needed was not affected
verify(unrevokedActiveTaskWithoutCommit, never()).prepareCommit(anyBoolean());
verify(unrevokedActiveTaskWithoutCommit, never()).suspend();
verify(unrevokedActiveTaskWithoutCommit, never()).closeDirty();
// input partitions were reset for affected tasks
verify(revokedActiveTask).addPartitionsForOffsetReset(taskId00Partitions);
verify(unrevokedActiveTaskWithCommit).addPartitionsForOffsetReset(taskId01Partitions);
verify(unrevokedActiveTaskWithoutCommit, never()).addPartitionsForOffsetReset(any());
}
@Test
public void shouldCloseAndReviveUncorruptedTasksWhenTimeoutExceptionThrownFromCommitDuringRevocationWithEOS() {
// task being revoked - needs commit
final StreamTask revokedActiveTask = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
// unrevoked task that needs commit - this will also be affected by timeout
final StreamTask unrevokedActiveTaskWithCommit = statefulTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING)
.build();
// unrevoked task without commit needed - this should remain RUNNING
final StreamTask unrevokedActiveTaskWithoutCommit = statefulTask(taskId02, taskId02ChangelogPartitions)
.withInputPartitions(taskId02Partitions)
.inState(State.RUNNING)
.build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasks()).thenReturn(Set.of(revokedActiveTask, unrevokedActiveTaskWithCommit, unrevokedActiveTaskWithoutCommit));
when(tasks.tasks(Set.of(taskId00, taskId01))).thenReturn(Set.of(revokedActiveTask, unrevokedActiveTaskWithCommit));
final StreamsProducer producer = mock(StreamsProducer.class);
when(activeTaskCreator.streamsProducer()).thenReturn(producer);
final ConsumerGroupMetadata groupMetadata = mock(ConsumerGroupMetadata.class);
when(consumer.groupMetadata()).thenReturn(groupMetadata);
when(consumer.assignment()).thenReturn(union(HashSet::new, taskId00Partitions, taskId01Partitions, taskId02Partitions));
// revoked task needs commit
final Map<TopicPartition, OffsetAndMetadata> revokedTaskOffsets = singletonMap(t1p0, new OffsetAndMetadata(0L, null));
when(revokedActiveTask.commitNeeded()).thenReturn(true);
when(revokedActiveTask.prepareCommit(true)).thenReturn(revokedTaskOffsets);
when(revokedActiveTask.changelogPartitions()).thenReturn(taskId00ChangelogPartitions);
doNothing().when(revokedActiveTask).suspend();
doNothing().when(revokedActiveTask).closeDirty();
doNothing().when(revokedActiveTask).revive();
doNothing().when(revokedActiveTask).markChangelogAsCorrupted(taskId00ChangelogPartitions);
// unrevoked task with commit also takes part in EOS-v2 commit
final Map<TopicPartition, OffsetAndMetadata> unrevokedTaskOffsets = singletonMap(t1p1, new OffsetAndMetadata(1L, null));
when(unrevokedActiveTaskWithCommit.commitNeeded()).thenReturn(true);
when(unrevokedActiveTaskWithCommit.prepareCommit(true)).thenReturn(unrevokedTaskOffsets);
when(unrevokedActiveTaskWithCommit.changelogPartitions()).thenReturn(taskId01ChangelogPartitions);
doNothing().when(unrevokedActiveTaskWithCommit).suspend();
doNothing().when(unrevokedActiveTaskWithCommit).closeDirty();
doNothing().when(unrevokedActiveTaskWithCommit).revive();
doNothing().when(unrevokedActiveTaskWithCommit).markChangelogAsCorrupted(taskId01ChangelogPartitions);
// unrevoked task without commit needed
when(unrevokedActiveTaskWithoutCommit.commitNeeded()).thenReturn(false);
// mock timeout during commit - all offsets from tasks needing commit
final Map<TopicPartition, OffsetAndMetadata> expectedCommittedOffsets = new HashMap<>();
expectedCommittedOffsets.putAll(revokedTaskOffsets);
expectedCommittedOffsets.putAll(unrevokedTaskOffsets);
doThrow(new TimeoutException()).when(producer).commitTransaction(expectedCommittedOffsets, groupMetadata);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.EXACTLY_ONCE_V2, tasks);
taskManager.handleRevocation(taskId00Partitions);
// 1. verify that the revoked task was suspended, closed dirty, and revived
final InOrder revokedOrder = inOrder(revokedActiveTask, tasks);
revokedOrder.verify(revokedActiveTask).prepareCommit(true);
revokedOrder.verify(revokedActiveTask).suspend();
revokedOrder.verify(revokedActiveTask).closeDirty();
revokedOrder.verify(tasks).removeTask(revokedActiveTask);
revokedOrder.verify(revokedActiveTask).revive();
revokedOrder.verify(tasks).addPendingTasksToInit(argThat(set -> set.contains(revokedActiveTask)));
// 2. verify that the unrevoked task with commit also tried to commit and was closed dirty due to timeout
final InOrder unrevokedOrder = inOrder(unrevokedActiveTaskWithCommit, producer, tasks);
unrevokedOrder.verify(unrevokedActiveTaskWithCommit).prepareCommit(true);
unrevokedOrder.verify(producer).commitTransaction(expectedCommittedOffsets, groupMetadata); // timeout thrown here
unrevokedOrder.verify(unrevokedActiveTaskWithCommit).suspend();
unrevokedOrder.verify(unrevokedActiveTaskWithCommit).closeDirty();
unrevokedOrder.verify(tasks).removeTask(unrevokedActiveTaskWithCommit);
unrevokedOrder.verify(unrevokedActiveTaskWithCommit).revive();
unrevokedOrder.verify(tasks).addPendingTasksToInit(argThat(set -> set.contains(unrevokedActiveTaskWithCommit)));
// 3. verify that the unrevoked task without commit needed was not affected
verify(unrevokedActiveTaskWithoutCommit, never()).prepareCommit(anyBoolean());
verify(unrevokedActiveTaskWithoutCommit, never()).suspend();
verify(unrevokedActiveTaskWithoutCommit, never()).closeDirty();
// verify input partitions were reset for affected tasks
verify(revokedActiveTask).addPartitionsForOffsetReset(taskId00Partitions);
verify(unrevokedActiveTaskWithCommit).addPartitionsForOffsetReset(taskId01Partitions);
verify(unrevokedActiveTaskWithoutCommit, never()).addPartitionsForOffsetReset(any());
}
@Test
public void shouldCloseStandbyUnassignedTasksWhenCreatingNewTasks() {
final StandbyTask task00 = standbyTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions)
.build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.drainPendingTasksToInit()).thenReturn(emptySet());
taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(task00));
// mock future for removing task from StateUpdater
final CompletableFuture<StateUpdater.RemovedTaskResult> future = new CompletableFuture<>();
when(stateUpdater.remove(task00.id())).thenReturn(future);
future.complete(new StateUpdater.RemovedTaskResult(task00));
taskManager.handleAssignment(emptyMap(), emptyMap());
verify(stateUpdater).remove(task00.id());
verify(task00).suspend();
verify(task00).closeClean();
verify(activeTaskCreator).createTasks(any(), eq(emptyMap()));
verify(standbyTaskCreator).createTasks(emptyMap());
}
@Test
public void shouldAddNonResumedSuspendedTasks() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
final StandbyTask task01 = standbyTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING)
.build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allNonFailedTasks()).thenReturn(Set.of(task00));
when(tasks.drainPendingTasksToInit()).thenReturn(emptySet());
when(tasks.hasPendingTasksToInit()).thenReturn(false);
taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(task01));
when(stateUpdater.restoresActiveTasks()).thenReturn(false);
when(stateUpdater.hasExceptionsAndFailedTasks()).thenReturn(false);
taskManager.handleAssignment(taskId00Assignment, taskId01Assignment);
// checkStateUpdater should return true (all tasks ready, no pending work)
assertTrue(taskManager.checkStateUpdater(time.milliseconds(), noOpResetter));
verify(stateUpdater, never()).add(any(Task.class));
verify(activeTaskCreator).createTasks(any(), eq(emptyMap()));
verify(standbyTaskCreator).createTasks(emptyMap());
// verify idempotence
taskManager.handleAssignment(taskId00Assignment, taskId01Assignment);
assertTrue(taskManager.checkStateUpdater(time.milliseconds(), noOpResetter));
verify(stateUpdater, never()).add(any(Task.class));
}
@Test
public void shouldUpdateInputPartitionsAfterRebalance() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final Set<TopicPartition> newPartitionsSet = Set.of(t1p1);
when(tasks.allNonFailedTasks()).thenReturn(Set.of(task00));
when(tasks.drainPendingTasksToInit()).thenReturn(emptySet());
when(tasks.hasPendingTasksToInit()).thenReturn(false);
when(tasks.updateActiveTaskInputPartitions(task00, newPartitionsSet)).thenReturn(true);
taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(emptySet());
when(stateUpdater.restoresActiveTasks()).thenReturn(false);
when(stateUpdater.hasExceptionsAndFailedTasks()).thenReturn(false);
final Map<TaskId, Set<TopicPartition>> taskIdSetMap = singletonMap(taskId00, newPartitionsSet);
taskManager.handleAssignment(taskIdSetMap, emptyMap());
verify(task00).updateInputPartitions(eq(newPartitionsSet), any());
assertTrue(taskManager.checkStateUpdater(time.milliseconds(), noOpResetter));
assertThat(task00.state(), is(Task.State.RUNNING));
verify(activeTaskCreator).createTasks(any(), eq(emptyMap()));
verify(standbyTaskCreator).createTasks(emptyMap());
}
@Test
public void shouldAddNewActiveTasks() {
// task in created state
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.CREATED)
.withInputPartitions(taskId00Partitions)
.build();
final Map<TaskId, Set<TopicPartition>> assignment = taskId00Assignment;
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
// first, we need to handle assignment -- creates tasks and adds to pending initialization
when(activeTaskCreator.createTasks(any(), eq(assignment))).thenReturn(singletonList(task00));
taskManager.handleAssignment(assignment, emptyMap());
verify(tasks).addPendingTasksToInit(singletonList(task00));
// next, drain pending tasks, initialize them, and then add to stateupdater
when(tasks.drainPendingTasksToInit()).thenReturn(Set.of(task00));
taskManager.checkStateUpdater(time.milliseconds(), noOpResetter);
verify(task00).initializeIfNeeded();
verify(stateUpdater).add(task00);
// last, drain the restored tasks from stateupdater and transition to running
when(stateUpdater.restoresActiveTasks()).thenReturn(true);
when(stateUpdater.drainRestoredActiveTasks(any(Duration.class))).thenReturn(Set.of(task00));
taskManager.checkStateUpdater(time.milliseconds(), noOpResetter);
verifyTransitionToRunningOfRestoredTask(Set.of(task00), tasks);
}
@Test
public void shouldNotCompleteRestorationIfTasksCannotInitialize() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.CREATED)
.build();
final StreamTask task01 = statefulTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.CREATED)
.build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
final Map<TaskId, Set<TopicPartition>> assignment = mkMap(
mkEntry(taskId00, taskId00Partitions),
mkEntry(taskId01, taskId01Partitions)
);
when(activeTaskCreator.createTasks(any(), eq(assignment)))
.thenReturn(asList(task00, task01));
taskManager.handleAssignment(assignment, emptyMap());
verify(tasks).addPendingTasksToInit(asList(task00, task01));
when(tasks.drainPendingTasksToInit()).thenReturn(Set.of(task00, task01));
final LockException lockException = new LockException("can't lock");
final TimeoutException timeoutException = new TimeoutException("timeout during init");
doThrow(lockException).when(task00).initializeIfNeeded();
doThrow(timeoutException).when(task01).initializeIfNeeded();
when(tasks.hasPendingTasksToInit()).thenReturn(true);
final boolean restorationComplete = taskManager.checkStateUpdater(time.milliseconds(), noOpResetter);
assertFalse(restorationComplete);
verify(task00).initializeIfNeeded();
verify(task01).initializeIfNeeded();
verify(task00, never()).maybeInitTaskTimeoutOrThrow(anyLong(), any());
verify(task01).maybeInitTaskTimeoutOrThrow(anyLong(), eq(timeoutException));
verify(task00, never()).clearTaskTimeout();
verify(task01, never()).clearTaskTimeout();
verify(tasks).addPendingTasksToInit(Collections.singleton(task00));
verify(tasks).addPendingTasksToInit(Collections.singleton(task01));
verify(stateUpdater, never()).add(task00);
verify(stateUpdater, never()).add(task01);
verifyNoInteractions(consumer);
}
@Test
public void shouldNotCompleteRestorationIfTaskCannotCompleteRestoration() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RESTORING)
.build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.restoresActiveTasks()).thenReturn(true);
when(stateUpdater.drainRestoredActiveTasks(any(Duration.class))).thenReturn(Set.of(task00));
final TimeoutException timeoutException = new TimeoutException("timeout!");
doThrow(timeoutException).when(task00).completeRestoration(any());
final boolean restorationComplete = taskManager.checkStateUpdater(time.milliseconds(), noOpResetter);
assertFalse(restorationComplete);
verify(task00).completeRestoration(any());
verify(stateUpdater).add(task00);
verify(tasks, never()).addTask(task00);
verifyNoInteractions(consumer);
}
@Test
public void shouldSuspendActiveTasksDuringRevocation() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasks()).thenReturn(Set.of(task00));
final Map<TopicPartition, OffsetAndMetadata> offsets = singletonMap(t1p0, new OffsetAndMetadata(0L, null));
when(task00.commitNeeded()).thenReturn(true);
when(task00.prepareCommit(true)).thenReturn(offsets);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
taskManager.handleRevocation(taskId00Partitions);
verify(task00).prepareCommit(true);
verify(task00).postCommit(true);
verify(task00).suspend();
}
@Test
public void shouldCommitAllActiveTasksThatNeedCommittingOnHandleRevocationWithEosV2() {
// task being revoked, needs commit
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
// unrevoked task that needs commit, this should also be committed with EOS-v2
final StreamTask task01 = statefulTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING)
.build();
// unrevoked task that doesn't need commit, should not be committed
final StreamTask task02 = statefulTask(taskId02, taskId02ChangelogPartitions)
.withInputPartitions(taskId02Partitions)
.inState(State.RUNNING)
.build();
// standby task should not be committed
final StandbyTask task10 = standbyTask(taskId10, emptySet())
.withInputPartitions(taskId10Partitions)
.inState(State.RUNNING)
.build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasks()).thenReturn(Set.of(task00, task01, task02, task10));
final StreamsProducer producer = mock(StreamsProducer.class);
when(activeTaskCreator.streamsProducer()).thenReturn(producer);
final ConsumerGroupMetadata groupMetadata = mock(ConsumerGroupMetadata.class);
when(consumer.groupMetadata()).thenReturn(groupMetadata);
final Map<TopicPartition, OffsetAndMetadata> offsets00 = singletonMap(t1p0, new OffsetAndMetadata(0L, null));
when(task00.commitNeeded()).thenReturn(true);
when(task00.prepareCommit(true)).thenReturn(offsets00);
doNothing().when(task00).postCommit(anyBoolean());
doNothing().when(task00).suspend();
final Map<TopicPartition, OffsetAndMetadata> offsets01 = singletonMap(t1p1, new OffsetAndMetadata(1L, null));
when(task01.commitNeeded()).thenReturn(true);
when(task01.prepareCommit(true)).thenReturn(offsets01);
doNothing().when(task01).postCommit(anyBoolean());
// task02 does not need commit
when(task02.commitNeeded()).thenReturn(false);
// standby task should not take part in commit
when(task10.commitNeeded()).thenReturn(false);
// expected committed offsets, only task00 and task01 (both need commit)
final Map<TopicPartition, OffsetAndMetadata> expectedCommittedOffsets = new HashMap<>();
expectedCommittedOffsets.putAll(offsets00);
expectedCommittedOffsets.putAll(offsets01);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.EXACTLY_ONCE_V2, tasks);
taskManager.handleRevocation(taskId00Partitions);
// Verify the commit transaction was called with offsets from task00 and task01
verify(producer).commitTransaction(expectedCommittedOffsets, groupMetadata);
// Verify task00 (revoked) was suspended and committed
verify(task00).prepareCommit(true);
verify(task00).postCommit(true);
verify(task00).suspend();
// Verify task01 (unrevoked but needs commit) was also committed
verify(task01).prepareCommit(true);
verify(task01).postCommit(false);
// Verify task02 (doesn't need commit) was not committed
verify(task02, never()).prepareCommit(anyBoolean());
verify(task02, never()).postCommit(anyBoolean());
// Verify standby task10 was not committed
verify(task10, never()).prepareCommit(anyBoolean());
verify(task10, never()).postCommit(anyBoolean());
}
@Test
public void shouldCommitAllNeededTasksOnHandleRevocation() {
// revoked task that needs commit
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
final Map<TopicPartition, OffsetAndMetadata> offsets00 = singletonMap(t1p0, new OffsetAndMetadata(0L, null));
when(task00.commitNeeded()).thenReturn(true);
when(task00.prepareCommit(true)).thenReturn(offsets00);
// non revoked task that needs commit
final StreamTask task01 = statefulTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING)
.build();
final Map<TopicPartition, OffsetAndMetadata> offsets01 = singletonMap(t1p1, new OffsetAndMetadata(1L, null));
when(task01.commitNeeded()).thenReturn(true);
when(task01.prepareCommit(true)).thenReturn(offsets01);
// non revoked task that does NOT need commit
final StreamTask task02 = statefulTask(taskId02, taskId02ChangelogPartitions)
.withInputPartitions(taskId02Partitions)
.inState(State.RUNNING)
.build();
when(task02.commitNeeded()).thenReturn(false);
// standby task (not be affected by revocation)
final StandbyTask task03 = standbyTask(taskId03, taskId03ChangelogPartitions)
.withInputPartitions(taskId03Partitions)
.inState(State.RUNNING)
.build();
final Map<TopicPartition, OffsetAndMetadata> expectedCommittedOffsets = new HashMap<>();
expectedCommittedOffsets.putAll(offsets00);
expectedCommittedOffsets.putAll(offsets01);
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasks()).thenReturn(Set.of(task00, task01, task02, task03));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
taskManager.handleRevocation(taskId00Partitions);
// both tasks needing commit had prepareCommit called
verify(task00).prepareCommit(true);
verify(task01).prepareCommit(true);
verify(task02, never()).prepareCommit(anyBoolean());
verify(task03, never()).prepareCommit(anyBoolean());
verify(consumer).commitSync(expectedCommittedOffsets);
// revoked task suspended
verify(task00).suspend();
verify(task00).postCommit(true);
// non-revoked task with commit was also post-committed (but not suspended)
verify(task01).postCommit(false);
verify(task01, never()).suspend();
// task02 and task03 should not be affected
verify(task02, never()).postCommit(anyBoolean());
verify(task02, never()).suspend();
verify(task03, never()).postCommit(anyBoolean());
verify(task03, never()).suspend();
}
@ParameterizedTest
@EnumSource(ProcessingMode.class)
public void shouldNotCommitIfNoRevokedTasksNeedCommitting(final ProcessingMode processingMode) {
// task00 being revoked, no commit needed
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
// task01 NOT being revoked, commit needed
final StreamTask task01 = statefulTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING)
.build();
// task02 NOT being revoked, no commit needed
final StreamTask task02 = statefulTask(taskId02, taskId02ChangelogPartitions)
.withInputPartitions(taskId02Partitions)
.inState(State.RUNNING)
.build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasks()).thenReturn(Set.of(task00, task01, task02));
when(task00.commitNeeded()).thenReturn(false);
when(task01.commitNeeded()).thenReturn(true); // only task01 needs commit
when(task02.commitNeeded()).thenReturn(false);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(processingMode, tasks);
taskManager.handleRevocation(taskId00Partitions);
verify(task00, never()).prepareCommit(anyBoolean());
verify(task01, never()).prepareCommit(anyBoolean());
verify(task02, never()).prepareCommit(anyBoolean());
verify(task00).suspend();
verify(task01, never()).suspend();
verify(task02, never()).suspend();
}
@Test
public void shouldNotCommitOnHandleAssignmentIfNoTaskClosed() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions).build();
final StandbyTask task01 = standbyTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId01Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(tasks.allNonFailedTasks()).thenReturn(Set.of(task00));
when(stateUpdater.tasks()).thenReturn(Set.of(task01));
final Map<TaskId, Set<TopicPartition>> assignmentActive = singletonMap(taskId00, taskId00Partitions);
final Map<TaskId, Set<TopicPartition>> assignmentStandby = singletonMap(taskId01, taskId01Partitions);
taskManager.handleAssignment(assignmentActive, assignmentStandby);
// active task stays in task manager
verify(tasks, never()).removeTask(task00);
verify(task00, never()).prepareCommit(anyBoolean());
verify(task00, never()).postCommit(anyBoolean());
// standby task not removed from state updater
verify(stateUpdater, never()).remove(task01.id());
verify(task01, never()).prepareCommit(anyBoolean());
verify(task01, never()).postCommit(anyBoolean());
verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap());
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
}
@Test
public void shouldNotCommitOnHandleAssignmentIfOnlyStandbyTaskClosed() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions).build();
final StandbyTask task01 = standbyTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId01Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(tasks.allNonFailedTasks()).thenReturn(Set.of(task00));
when(stateUpdater.tasks()).thenReturn(Set.of(task01));
// mock to remove standby task from state updater
final CompletableFuture<StateUpdater.RemovedTaskResult> future = new CompletableFuture<>();
when(stateUpdater.remove(task01.id())).thenReturn(future);
future.complete(new StateUpdater.RemovedTaskResult(task01));
final Map<TaskId, Set<TopicPartition>> assignmentActive = singletonMap(taskId00, taskId00Partitions);
taskManager.handleAssignment(assignmentActive, Collections.emptyMap());
verify(task00, never()).prepareCommit(anyBoolean());
verify(task00, never()).postCommit(anyBoolean());
verify(stateUpdater).remove(task01.id());
verify(task01).suspend();
verify(task01).closeClean();
verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap());
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
}
@Test
public void shouldNotCommitCreatedTasksOnRevocationOrClosure() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.CREATED)
.withInputPartitions(taskId00Partitions)
.build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(activeTaskCreator.createTasks(consumer, taskId00Assignment))
.thenReturn(singletonList(task00));
taskManager.handleAssignment(taskId00Assignment, emptyMap());
verify(tasks).addPendingTasksToInit(singletonList(task00));
// when handle revocation is called, the tasks in pendingTasksToInit are NOT affected
// by revocation. They remain in the pending queue untouched
taskManager.handleRevocation(taskId00Partitions);
// tasks in pendingTasksToInit are not managed by handleRevocation
verify(task00, never()).suspend();
verify(task00, never()).prepareCommit(anyBoolean());
when(tasks.drainPendingTasksToInit()).thenReturn(Set.of(task00));
// this calls handleTasksPendingInitialization()
// which drains pendingTasksToInit and closes those tasks
taskManager.handleAssignment(emptyMap(), emptyMap());
// close clean without ever being committed
verify(task00).closeClean();
verify(task00, never()).prepareCommit(anyBoolean());
}
@Test
public void shouldPassUpIfExceptionDuringSuspend() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
doThrow(new RuntimeException("KABOOM!")).when(task00).suspend();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasks()).thenReturn(Set.of(task00));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
assertThrows(RuntimeException.class, () -> taskManager.handleRevocation(taskId00Partitions));
verify(task00).suspend();
}
@Test
public void shouldCloseActiveTasksAndPropagateExceptionsOnCleanShutdownWithAlos() {
shouldCloseActiveTasksAndPropagateExceptionsOnCleanShutdown(ProcessingMode.AT_LEAST_ONCE);
}
@Test
public void shouldCloseActiveTasksAndPropagateExceptionsOnCleanShutdownWithExactlyOnceV2() {
when(activeTaskCreator.streamsProducer()).thenReturn(mock(StreamsProducer.class));
shouldCloseActiveTasksAndPropagateExceptionsOnCleanShutdown(ProcessingMode.EXACTLY_ONCE_V2);
}
private void shouldCloseActiveTasksAndPropagateExceptionsOnCleanShutdown(final ProcessingMode processingMode) {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions).build();
final StreamTask task01 = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId01Partitions).build();
final StreamTask task02 = statefulTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId02Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(processingMode, tasks);
doThrow(new TaskMigratedException("migrated", new RuntimeException("cause")))
.when(task01).suspend();
doThrow(new RuntimeException("oops"))
.when(task02).suspend();
when(tasks.activeTasks()).thenReturn(Set.of(task00, task01, task02));
final RuntimeException exception = assertThrows(
RuntimeException.class,
() -> taskManager.shutdown(true)
);
assertThat(exception.getCause().getMessage(), is("oops"));
// Verify tasks that threw exceptions were closed dirty
verify(task00).prepareCommit(true);
verify(task00).suspend();
verify(task00).closeClean();
verify(task01).prepareCommit(true);
verify(task01, times(2)).suspend();
verify(task01).closeDirty();
verify(task02).prepareCommit(true);
verify(task02, times(2)).suspend();
verify(task02).closeDirty();
assertThat(taskManager.activeTaskMap(), Matchers.anEmptyMap());
assertThat(taskManager.standbyTaskMap(), Matchers.anEmptyMap());
verify(activeTaskCreator).close();
verify(stateUpdater).shutdown(Duration.ofMinutes(1L));
}
@Test
public void shouldCloseActiveTasksAndPropagateStreamsProducerExceptionsOnCleanShutdown() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions)
.build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
doThrow(new RuntimeException("whatever")).when(activeTaskCreator).close();
when(tasks.activeTasks()).thenReturn(Set.of(task00));
final RuntimeException exception = assertThrows(
RuntimeException.class,
() -> taskManager.shutdown(true)
);
assertThat(exception.getMessage(), is("whatever"));
verify(task00).prepareCommit(true);
verify(task00).suspend();
verify(task00).closeClean();
assertThat(taskManager.activeTaskMap(), Matchers.anEmptyMap());
assertThat(taskManager.standbyTaskMap(), Matchers.anEmptyMap());
verify(activeTaskCreator).close();
verify(stateUpdater).shutdown(Duration.ofMinutes(1L));
}
@SuppressWarnings("unchecked")
@Test
public void shouldCloseTasksIfStateUpdaterTimesOutOnRemove() throws Exception {
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, null, false);
final Map<TaskId, Set<TopicPartition>> assignment = mkMap(
mkEntry(taskId00, taskId00Partitions)
);
final Task task00 = spy(new StateMachineTask(taskId00, taskId00Partitions, true, stateManager));
when(activeTaskCreator.createTasks(any(), eq(assignment))).thenReturn(singletonList(task00));
taskManager.handleAssignment(assignment, emptyMap());
when(stateUpdater.tasks()).thenReturn(singleton(task00));
final CompletableFuture<StateUpdater.RemovedTaskResult> future = mock(CompletableFuture.class);
when(stateUpdater.remove(eq(taskId00))).thenReturn(future);
when(future.get(anyLong(), any())).thenThrow(new java.util.concurrent.TimeoutException());
taskManager.shutdown(true);
verify(task00).closeDirty();
}
@Test
public void shouldPropagateSuspendExceptionWhenRevokingStandbyTask() {
final StandbyTask task00 = standbyTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions)
.build();
final StandbyTask task01 = standbyTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId01Partitions)
.build();
doThrow(new RuntimeException("task 0_1 suspend boom!")).when(task01).suspend();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.EXACTLY_ONCE_V2, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(task00, task01));
// task01 is revoked, task00 stays
final CompletableFuture<StateUpdater.RemovedTaskResult> futureTask01 = new CompletableFuture<>();
when(stateUpdater.remove(task01.id())).thenReturn(futureTask01);
futureTask01.complete(new StateUpdater.RemovedTaskResult(task01));
final RuntimeException thrown = assertThrows(RuntimeException.class,
() -> taskManager.handleAssignment(
Collections.emptyMap(),
singletonMap(taskId00, taskId00Partitions)
));
assertThat(thrown.getCause().getMessage(), is("task 0_1 suspend boom!"));
verify(task01, times(2)).suspend();
verify(task01).closeDirty();
verify(stateUpdater, never()).remove(task00.id());
verify(task00, never()).suspend();
verify(task00, never()).prepareCommit(anyBoolean());
verify(task00, never()).closeClean();
verify(task00, never()).closeDirty();
}
@Test
public void shouldSuspendAllRevokedActiveTasksAndPropagateSuspendException() {
// will not be revoked
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions).build();
// will be revoked and throws exception during suspend
final StreamTask task01 = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId01Partitions).build();
doThrow(new RuntimeException("task 0_1 suspend boom!")).when(task01).suspend();
// will be revoked with no exception
final StreamTask task02 = statefulTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId02Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(tasks.allTasks()).thenReturn(Set.of(task00, task01, task02));
final RuntimeException thrown = assertThrows(RuntimeException.class,
() -> taskManager.handleRevocation(union(HashSet::new, taskId01Partitions, taskId02Partitions)));
assertThat(thrown.getCause().getMessage(), is("task 0_1 suspend boom!"));
verify(task01).suspend();
verify(task02).suspend();
verify(task00, never()).suspend();
verifyNoInteractions(activeTaskCreator);
}
@Test
public void shouldCloseActiveTasksAndIgnoreExceptionsOnUncleanShutdown() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions)
.build();
final StreamTask task01 = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId01Partitions)
.build();
final StreamTask task02 = statefulTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId02Partitions)
.build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
doThrow(new TaskMigratedException("migrated", new RuntimeException("cause")))
.when(task01).suspend();
doThrow(new RuntimeException("oops"))
.when(task02).suspend();
doThrow(new RuntimeException("whatever")).when(activeTaskCreator).close();
when(tasks.allTasks()).thenReturn(Set.of(task00, task01, task02));
when(tasks.activeTasks()).thenReturn(Set.of(task00, task01, task02));
taskManager.shutdown(false);
verify(task00).prepareCommit(false);
verify(task00).suspend();
verify(task00).closeDirty();
verify(task00, never()).closeClean();
verify(task01).prepareCommit(false);
verify(task01).suspend();
verify(task01).closeDirty();
verify(task01, never()).closeClean();
verify(task02).prepareCommit(false);
verify(task02).suspend();
verify(task02).closeDirty();
verify(task02, never()).closeClean();
verify(tasks).clear();
// the active task creator should also get closed (so that it closes the thread producer if applicable)
verify(activeTaskCreator).close();
verify(stateUpdater).shutdown(Duration.ofMinutes(1L));
}
@Test
public void shouldCloseStandbyTasksOnShutdown() {
final TasksRegistry tasks = mock(TasksRegistry.class);
final StandbyTask standbyTask00 = standbyTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions)
.build();
when(stateUpdater.tasks()).thenReturn(Set.of(standbyTask00)).thenReturn(Set.of());
when(stateUpdater.standbyTasks()).thenReturn(Set.of(standbyTask00));
final CompletableFuture<StateUpdater.RemovedTaskResult> futureForStandbyTask = new CompletableFuture<>();
when(stateUpdater.remove(taskId00)).thenReturn(futureForStandbyTask);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
futureForStandbyTask.complete(new StateUpdater.RemovedTaskResult(standbyTask00)); // simulate successful removal
taskManager.shutdown(true);
verify(stateUpdater).shutdown(Duration.ofMinutes(1L));
verify(tasks).addTask(standbyTask00);
verify(standbyTask00).prepareCommit(true);
verify(standbyTask00).postCommit(true);
verify(standbyTask00).suspend();
verify(standbyTask00).closeClean();
// the active task creator should also get closed (so that it closes the thread producer if applicable)
verify(activeTaskCreator).close();
verifyNoInteractions(consumer);
}
@Test
public void shouldShutDownStateUpdaterAndCloseFailedTasksDirty() {
final TasksRegistry tasks = mock(TasksRegistry.class);
final StreamTask failedStatefulTask = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RESTORING).build();
final StandbyTask failedStandbyTask = standbyTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RUNNING).build();
when(stateUpdater.drainExceptionsAndFailedTasks())
.thenReturn(Arrays.asList(
new ExceptionAndTask(new RuntimeException(), failedStatefulTask),
new ExceptionAndTask(new RuntimeException(), failedStandbyTask))
)
.thenReturn(Collections.emptyList());
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
taskManager.shutdown(true);
verify(activeTaskCreator).close();
verify(stateUpdater).shutdown(Duration.ofMinutes(1L));
verify(failedStatefulTask).prepareCommit(false);
verify(failedStatefulTask).suspend();
verify(failedStatefulTask).closeDirty();
}
@Test
public void shouldShutdownSchedulingTaskManager() {
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks, true);
taskManager.shutdown(true);
verify(schedulingTaskManager).shutdown(Duration.ofMinutes(5L));
}
@Test
public void shouldShutDownStateUpdaterAndCloseDirtyTasksFailedDuringRemoval() {
final TasksRegistry tasks = mock(TasksRegistry.class);
final StreamTask removedStatefulTask = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RESTORING).build();
final StandbyTask removedStandbyTask = standbyTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RUNNING).build();
final StreamTask removedFailedStatefulTask = statefulTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RESTORING).build();
final StandbyTask removedFailedStandbyTask = standbyTask(taskId04, taskId04ChangelogPartitions)
.inState(State.RUNNING).build();
final StreamTask removedFailedStatefulTaskDuringRemoval = statefulTask(taskId05, taskId05ChangelogPartitions)
.inState(State.RESTORING).build();
final StandbyTask removedFailedStandbyTaskDuringRemoval = standbyTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING).build();
when(stateUpdater.tasks())
.thenReturn(Set.of(
removedStatefulTask,
removedStandbyTask,
removedFailedStatefulTask,
removedFailedStandbyTask,
removedFailedStatefulTaskDuringRemoval,
removedFailedStandbyTaskDuringRemoval)
).thenReturn(Collections.emptySet());
final CompletableFuture<StateUpdater.RemovedTaskResult> futureForRemovedStatefulTask = new CompletableFuture<>();
final CompletableFuture<StateUpdater.RemovedTaskResult> futureForRemovedStandbyTask = new CompletableFuture<>();
final CompletableFuture<StateUpdater.RemovedTaskResult> futureForRemovedFailedStatefulTask = new CompletableFuture<>();
final CompletableFuture<StateUpdater.RemovedTaskResult> futureForRemovedFailedStandbyTask = new CompletableFuture<>();
final CompletableFuture<StateUpdater.RemovedTaskResult> futureForRemovedFailedStatefulTaskDuringRemoval = new CompletableFuture<>();
final CompletableFuture<StateUpdater.RemovedTaskResult> futureForRemovedFailedStandbyTaskDuringRemoval = new CompletableFuture<>();
when(stateUpdater.remove(removedStatefulTask.id())).thenReturn(futureForRemovedStatefulTask);
when(stateUpdater.remove(removedStandbyTask.id())).thenReturn(futureForRemovedStandbyTask);
when(stateUpdater.remove(removedFailedStatefulTask.id())).thenReturn(futureForRemovedFailedStatefulTask);
when(stateUpdater.remove(removedFailedStandbyTask.id())).thenReturn(futureForRemovedFailedStandbyTask);
when(stateUpdater.remove(removedFailedStatefulTaskDuringRemoval.id()))
.thenReturn(futureForRemovedFailedStatefulTaskDuringRemoval);
when(stateUpdater.remove(removedFailedStandbyTaskDuringRemoval.id()))
.thenReturn(futureForRemovedFailedStandbyTaskDuringRemoval);
when(stateUpdater.drainExceptionsAndFailedTasks())
.thenReturn(Arrays.asList(
new ExceptionAndTask(new StreamsException("KABOOM!"), removedFailedStatefulTaskDuringRemoval),
new ExceptionAndTask(new StreamsException("KABOOM!"), removedFailedStandbyTaskDuringRemoval))
).thenReturn(Collections.emptyList());
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
futureForRemovedStatefulTask.complete(new StateUpdater.RemovedTaskResult(removedStatefulTask));
futureForRemovedStandbyTask.complete(new StateUpdater.RemovedTaskResult(removedStandbyTask));
futureForRemovedFailedStatefulTask
.complete(new StateUpdater.RemovedTaskResult(removedFailedStatefulTask, new StreamsException("KABOOM!")));
futureForRemovedFailedStandbyTask
.complete(new StateUpdater.RemovedTaskResult(removedFailedStandbyTask, new StreamsException("KABOOM!")));
futureForRemovedFailedStatefulTaskDuringRemoval
.completeExceptionally(new StreamsException("KABOOM!"));
futureForRemovedFailedStandbyTaskDuringRemoval
.completeExceptionally(new StreamsException("KABOOM!"));
taskManager.shutdown(true);
verify(stateUpdater).shutdown(Duration.ofMinutes(1L));
verify(tasks).addTask(removedStatefulTask);
verify(tasks).addTask(removedStandbyTask);
verify(removedFailedStatefulTask).prepareCommit(false);
verify(removedFailedStatefulTask).suspend();
verify(removedFailedStatefulTask).closeDirty();
verify(removedFailedStandbyTask).prepareCommit(false);
verify(removedFailedStandbyTask).suspend();
verify(removedFailedStandbyTask).closeDirty();
verify(removedFailedStatefulTaskDuringRemoval).prepareCommit(false);
verify(removedFailedStatefulTaskDuringRemoval).suspend();
verify(removedFailedStatefulTaskDuringRemoval).closeDirty();
verify(removedFailedStandbyTaskDuringRemoval).prepareCommit(false);
verify(removedFailedStandbyTaskDuringRemoval).suspend();
verify(removedFailedStandbyTaskDuringRemoval).closeDirty();
}
@Test
public void shouldInitializeNewStandbyTasks() {
final StandbyTask task01 = standbyTask(taskId01, taskId01ChangelogPartitions)
.inState(State.CREATED)
.withInputPartitions(taskId01Partitions)
.build();
final Map<TaskId, Set<TopicPartition>> assignment = taskId01Assignment;
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(standbyTaskCreator.createTasks(assignment)).thenReturn(singletonList(task01));
taskManager.handleAssignment(emptyMap(), assignment);
verify(tasks).addPendingTasksToInit(singletonList(task01));
when(tasks.drainPendingTasksToInit()).thenReturn(Set.of(task01));
taskManager.checkStateUpdater(time.milliseconds(), noOpResetter);
verify(task01).initializeIfNeeded();
verify(stateUpdater).add(task01);
verifyNoInteractions(consumer);
}
@Test
public void shouldHandleRebalanceEvents() {
when(consumer.assignment()).thenReturn(assignment);
when(stateDirectory.listNonEmptyTaskDirectories()).thenReturn(new ArrayList<>());
assertThat(taskManager.rebalanceInProgress(), is(false));
taskManager.handleRebalanceStart(emptySet());
assertThat(taskManager.rebalanceInProgress(), is(true));
taskManager.handleRebalanceComplete();
assertThat(taskManager.rebalanceInProgress(), is(false));
verify(consumer).pause(assignment);
}
@Test
public void shouldCommitActiveAndStandbyTasks() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
final Map<TopicPartition, OffsetAndMetadata> offsets = singletonMap(t1p0, new OffsetAndMetadata(0L, null));
final StandbyTask task01 = standbyTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING)
.build();
when(task00.commitNeeded()).thenReturn(true);
when(task00.prepareCommit(true)).thenReturn(offsets);
when(task01.commitNeeded()).thenReturn(true);
when(task01.prepareCommit(true)).thenReturn(emptyMap());
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasks()).thenReturn(Set.of(task00, task01));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
assertThat(taskManager.commitAll(), equalTo(2));
verify(task00, times(2)).commitNeeded();
verify(task00).prepareCommit(true);
verify(task00).postCommit(false);
verify(task01, times(2)).commitNeeded();
verify(task01).prepareCommit(true);
verify(task01).postCommit(false);
verify(consumer).commitSync(offsets);
}
@Test
public void shouldCommitProvidedTasksIfNeeded() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
final Map<TopicPartition, OffsetAndMetadata> offsetsTask00 = singletonMap(t1p0, new OffsetAndMetadata(0L, null));
final StreamTask task01 = statefulTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING)
.build();
final Map<TopicPartition, OffsetAndMetadata> offsetsTask01 = singletonMap(t1p1, new OffsetAndMetadata(1L, null));
final StreamTask task02 = statefulTask(taskId02, taskId02ChangelogPartitions)
.withInputPartitions(taskId02Partitions)
.inState(State.RUNNING)
.build();
final StandbyTask task03 = standbyTask(taskId03, taskId03ChangelogPartitions)
.withInputPartitions(taskId03Partitions)
.inState(State.RUNNING)
.build();
final StandbyTask task04 = standbyTask(taskId04, taskId04ChangelogPartitions)
.withInputPartitions(taskId04Partitions)
.inState(State.RUNNING)
.build();
final StandbyTask task05 = standbyTask(taskId05, taskId05ChangelogPartitions)
.withInputPartitions(taskId05Partitions)
.inState(State.RUNNING)
.build();
when(task00.commitNeeded()).thenReturn(true);
when(task00.prepareCommit(true)).thenReturn(offsetsTask00);
when(task01.commitNeeded()).thenReturn(true);
when(task01.prepareCommit(true)).thenReturn(offsetsTask01);
when(task02.commitNeeded()).thenReturn(false);
when(task03.commitNeeded()).thenReturn(true);
when(task03.prepareCommit(true)).thenReturn(emptyMap());
when(task04.commitNeeded()).thenReturn(true);
when(task04.prepareCommit(true)).thenReturn(emptyMap());
when(task05.commitNeeded()).thenReturn(false);
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
assertThat(taskManager.commit(Set.of(task00, task02, task03, task05)), equalTo(2));
verify(task00, times(2)).commitNeeded();
verify(task00).prepareCommit(true);
verify(task00).postCommit(false);
verify(task01, never()).prepareCommit(anyBoolean());
verify(task01, never()).postCommit(anyBoolean());
verify(task02, atLeastOnce()).commitNeeded();
verify(task02, never()).prepareCommit(anyBoolean());
verify(task02, never()).postCommit(anyBoolean());
verify(task03, times(2)).commitNeeded();
verify(task03).prepareCommit(true);
verify(task03).postCommit(false);
verify(task04, never()).prepareCommit(anyBoolean());
verify(task04, never()).postCommit(anyBoolean());
verify(task05, atLeastOnce()).commitNeeded();
verify(task05, never()).prepareCommit(anyBoolean());
verify(task05, never()).postCommit(anyBoolean());
verify(consumer).commitSync(offsetsTask00);
}
@Test
public void shouldNotCommitOffsetsIfOnlyStandbyTasksAssigned() {
final StandbyTask task00 = standbyTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
when(task00.commitNeeded()).thenReturn(true);
when(task00.prepareCommit(true)).thenReturn(emptyMap());
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasks()).thenReturn(Set.of(task00));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
assertThat(taskManager.commitAll(), equalTo(1));
verify(task00, times(2)).commitNeeded();
verify(task00).prepareCommit(true);
verify(task00).postCommit(false);
verify(consumer, never()).commitSync(any(Map.class));
}
@Test
public void shouldNotCommitActiveAndStandbyTasksWhileRebalanceInProgress() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
final StandbyTask task01 = standbyTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING)
.build();
when(task00.commitNeeded()).thenReturn(true);
when(task01.commitNeeded()).thenReturn(true);
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasks()).thenReturn(Set.of(task00, task01));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
taskManager.handleRebalanceStart(emptySet());
assertThat(
taskManager.commitAll(),
equalTo(-1) // sentinel indicating that nothing was done because a rebalance is in progress
);
assertThat(
taskManager.maybeCommitActiveTasksPerUserRequested(),
equalTo(-1) // sentinel indicating that nothing was done because a rebalance is in progress
);
}
@Test
public void shouldCommitViaConsumerIfEosDisabled() {
final StreamTask task01 = statefulTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING)
.build();
final Map<TopicPartition, OffsetAndMetadata> offsets = singletonMap(t1p1, new OffsetAndMetadata(0L, null));
when(task01.commitNeeded()).thenReturn(true);
when(task01.prepareCommit(true)).thenReturn(offsets);
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasks()).thenReturn(Set.of(task01));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
assertThat(taskManager.commitAll(), equalTo(1));
verify(task01, times(2)).commitNeeded();
verify(task01).prepareCommit(true);
verify(task01).postCommit(false);
verify(consumer).commitSync(offsets);
}
@Test
public void shouldCommitViaProducerIfEosV2Enabled() {
final StreamTask task01 = statefulTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING)
.build();
final StreamTask task02 = statefulTask(taskId02, taskId02ChangelogPartitions)
.withInputPartitions(taskId02Partitions)
.inState(State.RUNNING)
.build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasks()).thenReturn(Set.of(task01, task02));
final StreamsProducer producer = mock(StreamsProducer.class);
when(activeTaskCreator.streamsProducer()).thenReturn(producer);
final Map<TopicPartition, OffsetAndMetadata> offsetsT01 = singletonMap(t1p1, new OffsetAndMetadata(0L, null));
final Map<TopicPartition, OffsetAndMetadata> offsetsT02 = singletonMap(t1p2, new OffsetAndMetadata(1L, null));
final Map<TopicPartition, OffsetAndMetadata> allOffsets = new HashMap<>();
allOffsets.putAll(offsetsT01);
allOffsets.putAll(offsetsT02);
when(task01.commitNeeded()).thenReturn(true);
when(task01.prepareCommit(true)).thenReturn(offsetsT01);
doNothing().when(task01).postCommit(false);
when(task02.commitNeeded()).thenReturn(true);
when(task02.prepareCommit(true)).thenReturn(offsetsT02);
doNothing().when(task02).postCommit(false);
final ConsumerGroupMetadata groupMetadata = mock(ConsumerGroupMetadata.class);
when(consumer.groupMetadata()).thenReturn(groupMetadata);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.EXACTLY_ONCE_V2, tasks);
taskManager.commitAll();
verify(producer).commitTransaction(allOffsets, groupMetadata);
verify(task01, times(2)).commitNeeded();
verify(task01).prepareCommit(true);
verify(task01).postCommit(false);
verify(task02, times(2)).commitNeeded();
verify(task02).prepareCommit(true);
verify(task02).postCommit(false);
verifyNoMoreInteractions(producer);
}
@Test
public void shouldPropagateExceptionFromActiveCommit() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
when(task00.commitNeeded()).thenReturn(true);
when(task00.prepareCommit(true)).thenThrow(new RuntimeException("opsh."));
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasks()).thenReturn(Set.of(task00));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
final RuntimeException thrown =
assertThrows(RuntimeException.class, taskManager::commitAll);
assertThat(thrown.getMessage(), equalTo("opsh."));
verify(task00).commitNeeded();
verify(task00).prepareCommit(true);
}
@Test
public void shouldPropagateExceptionFromStandbyCommit() {
final StandbyTask task01 = standbyTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING)
.build();
when(task01.commitNeeded()).thenReturn(true);
when(task01.prepareCommit(true)).thenThrow(new RuntimeException("opsh."));
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasks()).thenReturn(Set.of(task01));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
final RuntimeException thrown =
assertThrows(RuntimeException.class, () -> taskManager.commitAll());
assertThat(thrown.getMessage(), equalTo("opsh."));
verify(task01).commitNeeded();
verify(task01).prepareCommit(true);
}
@Test
public void shouldSendPurgeData() {
when(adminClient.deleteRecords(singletonMap(t1p1, RecordsToDelete.beforeOffset(5L))))
.thenReturn(new DeleteRecordsResult(singletonMap(t1p1, completedFuture())));
when(adminClient.deleteRecords(singletonMap(t1p1, RecordsToDelete.beforeOffset(17L))))
.thenReturn(new DeleteRecordsResult(singletonMap(t1p1, completedFuture())));
final InOrder inOrder = inOrder(adminClient);
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
when(task00.purgeableOffsets())
.thenReturn(new HashMap<>())
.thenReturn(singletonMap(t1p1, 5L))
.thenReturn(singletonMap(t1p1, 17L));
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasks()).thenReturn(Set.of(task00));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
taskManager.maybePurgeCommittedRecords(); // no-op
taskManager.maybePurgeCommittedRecords(); // sends purge for offset 5L
taskManager.maybePurgeCommittedRecords(); // sends purge for offset 17L
inOrder.verify(adminClient).deleteRecords(singletonMap(t1p1, RecordsToDelete.beforeOffset(5L)));
inOrder.verify(adminClient).deleteRecords(singletonMap(t1p1, RecordsToDelete.beforeOffset(17L)));
inOrder.verifyNoMoreInteractions();
}
@Test
public void shouldNotSendPurgeDataIfPreviousNotDone() {
final KafkaFutureImpl<DeletedRecords> futureDeletedRecords = new KafkaFutureImpl<>();
when(adminClient.deleteRecords(singletonMap(t1p1, RecordsToDelete.beforeOffset(5L))))
.thenReturn(new DeleteRecordsResult(singletonMap(t1p1, futureDeletedRecords)));
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
when(task00.purgeableOffsets())
.thenReturn(new HashMap<>())
.thenReturn(singletonMap(t1p1, 5L))
.thenReturn(singletonMap(t1p1, 17L));
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasks()).thenReturn(Set.of(task00));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
taskManager.maybePurgeCommittedRecords();
taskManager.maybePurgeCommittedRecords();
// this call should be a no-op.
// because the previous deleteRecords request
// has not completed yet, so no new request is sent.
taskManager.maybePurgeCommittedRecords();
}
@Test
public void shouldIgnorePurgeDataErrors() {
final KafkaFutureImpl<DeletedRecords> futureDeletedRecords = new KafkaFutureImpl<>();
final DeleteRecordsResult deleteRecordsResult = new DeleteRecordsResult(singletonMap(t1p1, futureDeletedRecords));
futureDeletedRecords.completeExceptionally(new Exception("KABOOM!"));
when(adminClient.deleteRecords(any())).thenReturn(deleteRecordsResult);
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
when(task00.purgeableOffsets()).thenReturn(singletonMap(t1p1, 5L));
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasks()).thenReturn(Set.of(task00));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
taskManager.maybePurgeCommittedRecords();
taskManager.maybePurgeCommittedRecords();
}
@Test
public void shouldMaybeCommitAllActiveTasksThatNeedCommit() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
final Map<TopicPartition, OffsetAndMetadata> offsets0 = singletonMap(t1p0, new OffsetAndMetadata(0L, null));
final StreamTask task01 = statefulTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING)
.build();
final Map<TopicPartition, OffsetAndMetadata> offsets1 = singletonMap(t1p1, new OffsetAndMetadata(1L, null));
final StreamTask task02 = statefulTask(taskId02, taskId02ChangelogPartitions)
.withInputPartitions(taskId02Partitions)
.inState(State.RUNNING)
.build();
final StreamTask task03 = statefulTask(taskId03, taskId03ChangelogPartitions)
.withInputPartitions(taskId03Partitions)
.inState(State.RUNNING)
.build();
// for task00 both commitRequested AND commitNeeded - so it should trigger commit
when(task00.commitRequested()).thenReturn(true);
when(task00.commitNeeded()).thenReturn(true);
when(task00.prepareCommit(true)).thenReturn(offsets0);
// for task01 only commitNeeded (no commitRequested) so it gets committed when triggered
when(task01.commitRequested()).thenReturn(false);
when(task01.commitNeeded()).thenReturn(true);
when(task01.prepareCommit(true)).thenReturn(offsets1);
// for task02 only commitRequested (no commitNeeded), so does not get committed
when(task02.commitRequested()).thenReturn(true);
when(task02.commitNeeded()).thenReturn(false);
// for task03 both commitRequested AND commitNeeded, so should trigger commit
when(task03.commitRequested()).thenReturn(true);
when(task03.commitNeeded()).thenReturn(true);
when(task03.prepareCommit(true)).thenReturn(emptyMap());
// expected committed offsets only for task00 and task01 (task03 has empty offsets)
final Map<TopicPartition, OffsetAndMetadata> expectedCommittedOffsets = new HashMap<>();
expectedCommittedOffsets.putAll(offsets0);
expectedCommittedOffsets.putAll(offsets1);
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasks()).thenReturn(Set.of(task00, task01, task02, task03));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
// maybeCommitActiveTasksPerUserRequested checks if any task has both commitRequested AND commitNeeded
// If found, commits all active running tasks that have commitNeeded
// Returns count of committed tasks: task00, task01, and task03 (3 tasks)
assertThat(taskManager.maybeCommitActiveTasksPerUserRequested(), equalTo(3));
// Verify commit flow for tasks that needed commit
verify(task00, atLeastOnce()).commitNeeded();
verify(task00).prepareCommit(true);
verify(task00).postCommit(false);
verify(task01, atLeastOnce()).commitNeeded();
verify(task01).prepareCommit(true);
verify(task01).postCommit(false);
verify(task03, atLeastOnce()).commitNeeded();
verify(task03).prepareCommit(true);
verify(task03).postCommit(false);
// task02 should not be committed (no commitNeeded)
verify(task02, never()).prepareCommit(anyBoolean());
// Consumer should commit combined offsets from task00 and task01
verify(consumer).commitSync(expectedCommittedOffsets);
}
@Test
public void shouldProcessActiveTasks() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions)
.build();
final StreamTask task01 = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId01Partitions)
.build();
// simulate processing records from the queue
when(task00.process(anyLong()))
.thenReturn(true) // record 1
.thenReturn(true) // record 2
.thenReturn(true) // record 3
.thenReturn(true) // record 4
.thenReturn(true) // record 5
.thenReturn(true) // record 6
.thenReturn(false); // no more records
when(task01.process(anyLong()))
.thenReturn(true) // record 1
.thenReturn(true) // record 2
.thenReturn(true) // record 3
.thenReturn(true) // record 4
.thenReturn(true) // record 5
.thenReturn(false); // no more records
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.activeTasks()).thenReturn(Set.of(task00, task01));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
// check that we should be processing at most max num records
assertThat(taskManager.process(3, time), is(6));
// check that if there's no records processable, we would stop early
assertThat(taskManager.process(3, time), is(5));
assertThat(taskManager.process(3, time), is(0));
}
@Test
public void shouldNotFailOnTimeoutException() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions)
.build();
// throws TimeoutException on first call, then processes 2 records
final StreamTask task01 = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId01Partitions)
.build();
final StreamTask task02 = statefulTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId02Partitions)
.build();
when(task00.process(anyLong()))
.thenReturn(true)
.thenReturn(true)
.thenReturn(false);
when(task01.process(anyLong()))
.thenThrow(new TimeoutException("Skip me!")) // throws TimeoutException
.thenReturn(true)
.thenReturn(true)
.thenReturn(false);
when(task02.process(anyLong()))
.thenReturn(true)
.thenReturn(true)
.thenReturn(false);
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.activeTasks()).thenReturn(Set.of(task00, task01, task02));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
// should only process 2 records, because task01 throws TimeoutException
assertThat(taskManager.process(1, time), is(2));
verify(task01).maybeInitTaskTimeoutOrThrow(anyLong(), any(TimeoutException.class));
// retry without error
assertThat(taskManager.process(1, time), is(3));
verify(task01).clearTaskTimeout();
// there should still be one record for task01 to be processed
assertThat(taskManager.process(1, time), is(1));
}
@Test
public void shouldPropagateTaskMigratedExceptionsInProcessActiveTasks() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions)
.build();
when(task00.process(anyLong()))
.thenThrow(new TaskMigratedException("migrated", new RuntimeException("cause")));
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.activeTasks()).thenReturn(Set.of(task00));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
assertThrows(TaskMigratedException.class, () -> taskManager.process(1, time));
}
@Test
public void shouldWrapRuntimeExceptionsInProcessActiveTasksAndSetTaskId() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions)
.build();
when(task00.process(anyLong())).thenThrow(new RuntimeException("oops"));
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.activeTasks()).thenReturn(Set.of(task00));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
final StreamsException exception = assertThrows(StreamsException.class, () -> taskManager.process(1, time));
assertThat(exception.taskId().isPresent(), is(true));
assertThat(exception.taskId().get(), is(taskId00));
assertThat(exception.getCause().getMessage(), is("oops"));
}
@Test
public void shouldPropagateTaskMigratedExceptionsInPunctuateActiveTasks() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions)
.build();
when(task00.maybePunctuateStreamTime())
.thenThrow(new TaskMigratedException("migrated", new RuntimeException("cause")));
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.activeTasks()).thenReturn(Set.of(task00));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
assertThrows(TaskMigratedException.class, taskManager::punctuate);
}
@Test
public void shouldPropagateKafkaExceptionsInPunctuateActiveTasks() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions)
.build();
when(task00.maybePunctuateStreamTime()).thenThrow(new KafkaException("oops"));
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.activeTasks()).thenReturn(Set.of(task00));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
assertThrows(KafkaException.class, taskManager::punctuate);
}
@Test
public void shouldPunctuateActiveTasks() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
when(task00.maybePunctuateStreamTime()).thenReturn(true);
when(task00.maybePunctuateSystemTime()).thenReturn(true);
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.activeTasks()).thenReturn(Set.of(task00));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
// one for stream and one for system time
assertThat(taskManager.punctuate(), equalTo(2));
verify(task00).maybePunctuateStreamTime();
verify(task00).maybePunctuateSystemTime();
}
@Test
public void shouldReturnFalseWhenThereAreStillNonRunningTasks() {
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
// mock that the state updater is still restoring active tasks
when(stateUpdater.restoresActiveTasks()).thenReturn(true);
assertThat(taskManager.checkStateUpdater(time.milliseconds(), noOpResetter), is(false));
verifyNoInteractions(consumer);
}
@Test
public void shouldHaveRemainingPartitionsUncleared() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions)
.build();
final Map<TopicPartition, OffsetAndMetadata> offsets = singletonMap(t1p0, new OffsetAndMetadata(0L, null));
when(task00.prepareCommit(false)).thenReturn(offsets);
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(tasks.allTasks()).thenReturn(Set.of(task00));
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(TaskManager.class)) {
appender.setClassLogger(TaskManager.class, Level.DEBUG);
taskManager.handleRevocation(Set.of(t1p0, new TopicPartition("unknown", 0)));
verify(task00).suspend();
final List<String> messages = appender.getMessages();
assertThat(
messages,
hasItem("taskManagerTestThe following revoked partitions [unknown-0] are missing " +
"from the current task partitions. It could potentially be due to race " +
"condition of consumer detecting the heartbeat failure, or the " +
"tasks have been cleaned up by the handleAssignment callback.")
);
}
}
@Test
public void shouldThrowTaskMigratedWhenAllTaskCloseExceptionsAreTaskMigrated() {
final StandbyTask migratedTask01 = standbyTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId01Partitions)
.build();
final StandbyTask migratedTask02 = standbyTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId02Partitions)
.build();
doThrow(new TaskMigratedException("t1 close exception", new RuntimeException()))
.when(migratedTask01).suspend();
doThrow(new TaskMigratedException("t2 close exception", new RuntimeException()))
.when(migratedTask02).suspend();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(migratedTask01, migratedTask02));
// mock futures for removing tasks from StateUpdater
final CompletableFuture<StateUpdater.RemovedTaskResult> future01 = new CompletableFuture<>();
when(stateUpdater.remove(taskId01)).thenReturn(future01);
future01.complete(new StateUpdater.RemovedTaskResult(migratedTask01));
final CompletableFuture<StateUpdater.RemovedTaskResult> future02 = new CompletableFuture<>();
when(stateUpdater.remove(taskId02)).thenReturn(future02);
future02.complete(new StateUpdater.RemovedTaskResult(migratedTask02));
final TaskMigratedException thrown = assertThrows(
TaskMigratedException.class,
() -> taskManager.handleAssignment(emptyMap(), emptyMap())
);
// The task map orders tasks based on topic group id and partition, so here
// t1 should always be the first.
assertThat(
thrown.getMessage(),
equalTo("t2 close exception; it means all tasks belonging to this thread should be migrated.")
);
verify(migratedTask01, times(2)).suspend();
verify(migratedTask02, times(2)).suspend();
verify(stateUpdater).remove(taskId01);
verify(stateUpdater).remove(taskId02);
}
@Test
public void shouldThrowRuntimeExceptionWhenEncounteredUnknownExceptionDuringTaskClose() {
final StandbyTask migratedTask01 = standbyTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId01Partitions)
.build();
final StandbyTask migratedTask02 = standbyTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId02Partitions)
.build();
doThrow(new TaskMigratedException("t1 close exception", new RuntimeException()))
.when(migratedTask01).suspend();
doThrow(new IllegalStateException("t2 illegal state exception", new RuntimeException()))
.when(migratedTask02).suspend();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(migratedTask01, migratedTask02));
// mock futures for removing tasks from StateUpdater
final CompletableFuture<StateUpdater.RemovedTaskResult> future01 = new CompletableFuture<>();
when(stateUpdater.remove(taskId01)).thenReturn(future01);
future01.complete(new StateUpdater.RemovedTaskResult(migratedTask01));
final CompletableFuture<StateUpdater.RemovedTaskResult> future02 = new CompletableFuture<>();
when(stateUpdater.remove(taskId02)).thenReturn(future02);
future02.complete(new StateUpdater.RemovedTaskResult(migratedTask02));
final RuntimeException thrown = assertThrows(
RuntimeException.class,
() -> taskManager.handleAssignment(emptyMap(), emptyMap())
);
// Fatal exception thrown first.
assertThat(thrown.getMessage(), equalTo("Encounter unexpected fatal error for task 0_2"));
assertThat(thrown.getCause().getMessage(), equalTo("t2 illegal state exception"));
verify(migratedTask01, times(2)).suspend();
verify(migratedTask02, times(2)).suspend();
verify(stateUpdater).remove(taskId01);
verify(stateUpdater).remove(taskId02);
}
@Test
public void shouldThrowSameKafkaExceptionWhenEncounteredDuringTaskClose() {
final StandbyTask migratedTask01 = standbyTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId01Partitions)
.build();
final StandbyTask migratedTask02 = standbyTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId02Partitions)
.build();
doThrow(new TaskMigratedException("t1 close exception", new RuntimeException()))
.when(migratedTask01).suspend();
doThrow(new KafkaException("Kaboom for t2!", new RuntimeException()))
.when(migratedTask02).suspend();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Set.of(migratedTask01, migratedTask02));
// mock futures for removing tasks from StateUpdater
final CompletableFuture<StateUpdater.RemovedTaskResult> future01 = new CompletableFuture<>();
when(stateUpdater.remove(taskId01)).thenReturn(future01);
future01.complete(new StateUpdater.RemovedTaskResult(migratedTask01));
final CompletableFuture<StateUpdater.RemovedTaskResult> future02 = new CompletableFuture<>();
when(stateUpdater.remove(taskId02)).thenReturn(future02);
future02.complete(new StateUpdater.RemovedTaskResult(migratedTask02));
final StreamsException thrown = assertThrows(
StreamsException.class,
() -> taskManager.handleAssignment(emptyMap(), emptyMap())
);
assertThat(thrown.taskId().isPresent(), is(true));
assertThat(thrown.taskId().get(), is(taskId02));
// Expecting the original Kafka exception wrapped in the StreamsException.
assertThat(thrown.getCause().getMessage(), equalTo("Kaboom for t2!"));
verify(migratedTask01, times(2)).suspend();
verify(migratedTask02, times(2)).suspend();
verify(stateUpdater).remove(taskId01);
verify(stateUpdater).remove(taskId02);
}
@Test
public void shouldTransmitProducerMetrics() {
final MetricName testMetricName = new MetricName("test_metric", "", "", new HashMap<>());
final Metric testMetric = new KafkaMetric(
new Object(),
testMetricName,
(Measurable) (config, now) -> 0,
null,
new MockTime());
final Map<MetricName, Metric> dummyProducerMetrics = singletonMap(testMetricName, testMetric);
when(activeTaskCreator.producerMetrics()).thenReturn(dummyProducerMetrics);
assertThat(taskManager.producerMetrics(), is(dummyProducerMetrics));
}
private Map<TaskId, StateMachineTask> handleAssignment(final Map<TaskId, Set<TopicPartition>> runningActiveAssignment,
final Map<TaskId, Set<TopicPartition>> standbyAssignment,
final Map<TaskId, Set<TopicPartition>> restoringActiveAssignment) {
final Set<Task> runningTasks = runningActiveAssignment.entrySet().stream()
.map(t -> new StateMachineTask(t.getKey(), t.getValue(), true, stateManager))
.collect(Collectors.toSet());
final Set<Task> standbyTasks = standbyAssignment.entrySet().stream()
.map(t -> new StateMachineTask(t.getKey(), t.getValue(), false, stateManager))
.collect(Collectors.toSet());
final Set<Task> restoringTasks = restoringActiveAssignment.entrySet().stream()
.map(t -> new StateMachineTask(t.getKey(), t.getValue(), true, stateManager))
.collect(Collectors.toSet());
// give the restoring tasks some uncompleted changelog partitions so they'll stay in restoring
restoringTasks.forEach(t -> ((StateMachineTask) t).setChangelogOffsets(singletonMap(new TopicPartition("changelog", 0), 0L)));
// Initially assign only the active tasks we want to complete restoration
final Map<TaskId, Set<TopicPartition>> allActiveTasksAssignment = new HashMap<>(runningActiveAssignment);
allActiveTasksAssignment.putAll(restoringActiveAssignment);
final Set<Task> allActiveTasks = new HashSet<>(runningTasks);
allActiveTasks.addAll(restoringTasks);
when(standbyTaskCreator.createTasks(standbyAssignment)).thenReturn(standbyTasks);
when(activeTaskCreator.createTasks(any(), eq(allActiveTasksAssignment))).thenReturn(allActiveTasks);
lenient().when(consumer.assignment()).thenReturn(assignment);
taskManager.handleAssignment(allActiveTasksAssignment, standbyAssignment);
taskManager.tryToCompleteRestoration(time.milliseconds(), null);
final Map<TaskId, StateMachineTask> allTasks = new HashMap<>();
// Just make sure all tasks ended up in the expected state
for (final Task task : runningTasks) {
assertThat(task.state(), is(Task.State.RUNNING));
allTasks.put(task.id(), (StateMachineTask) task);
}
for (final Task task : restoringTasks) {
assertThat(task.state(), is(Task.State.RESTORING));
allTasks.put(task.id(), (StateMachineTask) task);
}
for (final Task task : standbyTasks) {
assertThat(task.state(), is(Task.State.RUNNING));
allTasks.put(task.id(), (StateMachineTask) task);
}
return allTasks;
}
private void expectLockObtainedFor(final TaskId... tasks) {
for (final TaskId task : tasks) {
when(stateDirectory.lock(task)).thenReturn(true);
}
}
private void expectLockFailedFor(final TaskId... tasks) {
for (final TaskId task : tasks) {
when(stateDirectory.lock(task)).thenReturn(false);
}
}
private void expectDirectoryNotEmpty(final TaskId... tasks) {
for (final TaskId taskId : tasks) {
when(stateDirectory.directoryForTaskIsEmpty(taskId)).thenReturn(false);
}
}
@Test
public void shouldThrowTaskMigratedExceptionOnCommitFailed() {
final StreamTask task01 = statefulTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING)
.build();
final Map<TopicPartition, OffsetAndMetadata> offsets = singletonMap(t1p0, new OffsetAndMetadata(0L, null));
when(task01.commitNeeded()).thenReturn(true);
when(task01.prepareCommit(true)).thenReturn(offsets);
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasks()).thenReturn(Set.of(task01));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
doThrow(new CommitFailedException()).when(consumer).commitSync(offsets);
final TaskMigratedException thrown = assertThrows(
TaskMigratedException.class,
taskManager::commitAll
);
assertThat(thrown.getCause(), instanceOf(CommitFailedException.class));
assertThat(
thrown.getMessage(),
equalTo("Consumer committing offsets failed, indicating the corresponding thread is no longer part of the group;" +
" it means all tasks belonging to this thread should be migrated.")
);
}
@SuppressWarnings("unchecked")
@Test
public void shouldNotFailForTimeoutExceptionOnConsumerCommit() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
final StreamTask task01 = statefulTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING)
.build();
final Map<TopicPartition, OffsetAndMetadata> offsets = taskId00Partitions.stream()
.collect(Collectors.toMap(p -> p, p -> new OffsetAndMetadata(0)));
when(task00.commitNeeded()).thenReturn(true);
when(task00.prepareCommit(true)).thenReturn(offsets);
when(task01.commitNeeded()).thenReturn(false);
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
doThrow(new TimeoutException("KABOOM!")).doNothing().when(consumer).commitSync(any(Map.class));
assertThat(taskManager.commit(Set.of(task00, task01)), equalTo(0));
verify(task00).maybeInitTaskTimeoutOrThrow(anyLong(), any(TimeoutException.class));
assertThat(taskManager.commit(Set.of(task00, task01)), equalTo(1));
verify(task00).clearTaskTimeout();
verify(consumer, times(2)).commitSync(any(Map.class));
}
@Test
public void shouldThrowTaskCorruptedExceptionForTimeoutExceptionOnCommitWithEosV2() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
final StreamTask task01 = statefulTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING)
.build();
final StreamTask task02 = statefulTask(taskId02, taskId02ChangelogPartitions)
.withInputPartitions(taskId02Partitions)
.inState(State.RUNNING)
.build();
final Map<TopicPartition, OffsetAndMetadata> offsetsT00 = singletonMap(t1p0, new OffsetAndMetadata(0L, null));
final Map<TopicPartition, OffsetAndMetadata> offsetsT01 = singletonMap(t1p1, new OffsetAndMetadata(1L, null));
final Map<TopicPartition, OffsetAndMetadata> allOffsets = new HashMap<>(offsetsT00);
allOffsets.putAll(offsetsT01);
when(task00.commitNeeded()).thenReturn(true);
when(task00.prepareCommit(true)).thenReturn(offsetsT00);
when(task01.commitNeeded()).thenReturn(true);
when(task01.prepareCommit(true)).thenReturn(offsetsT01);
when(task02.commitNeeded()).thenReturn(false);
final StreamsProducer producer = mock(StreamsProducer.class);
when(activeTaskCreator.streamsProducer()).thenReturn(producer);
final ConsumerGroupMetadata groupMetadata = mock(ConsumerGroupMetadata.class);
when(consumer.groupMetadata()).thenReturn(groupMetadata);
doThrow(new TimeoutException("KABOOM!")).when(producer).commitTransaction(allOffsets, groupMetadata);
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.EXACTLY_ONCE_V2, tasks);
final TaskCorruptedException exception = assertThrows(
TaskCorruptedException.class,
() -> taskManager.commit(Set.of(task00, task01, task02))
);
assertThat(
exception.corruptedTasks(),
equalTo(Set.of(taskId00, taskId01))
);
verify(consumer).groupMetadata();
}
@Test
public void shouldStreamsExceptionOnCommitError() {
final StreamTask task01 = statefulTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING)
.build();
final Map<TopicPartition, OffsetAndMetadata> offsets = singletonMap(t1p0, new OffsetAndMetadata(0L, null));
when(task01.commitNeeded()).thenReturn(true);
when(task01.prepareCommit(true)).thenReturn(offsets);
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasks()).thenReturn(Set.of(task01));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
doThrow(new KafkaException()).when(consumer).commitSync(offsets);
final StreamsException thrown = assertThrows(
StreamsException.class,
taskManager::commitAll
);
assertThat(thrown.getCause(), instanceOf(KafkaException.class));
assertThat(thrown.getMessage(), equalTo("Error encountered committing offsets via consumer"));
verify(task01).commitNeeded();
verify(task01).prepareCommit(true);
}
@Test
public void shouldFailOnCommitFatal() {
final StreamTask task01 = statefulTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING)
.build();
final Map<TopicPartition, OffsetAndMetadata> offsets = singletonMap(t1p0, new OffsetAndMetadata(0L, null));
when(task01.commitNeeded()).thenReturn(true);
when(task01.prepareCommit(true)).thenReturn(offsets);
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasks()).thenReturn(Set.of(task01));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
doThrow(new RuntimeException("KABOOM")).when(consumer).commitSync(offsets);
final RuntimeException thrown = assertThrows(
RuntimeException.class,
taskManager::commitAll
);
assertThat(thrown.getMessage(), equalTo("KABOOM"));
verify(task01).commitNeeded();
verify(task01).prepareCommit(true);
}
@Test
public void shouldSuspendAllTasksButSkipCommitIfSuspendingFailsDuringRevocation() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions)
.build();
final StreamTask task01 = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId01Partitions)
.build();
doThrow(new RuntimeException("KABOOM!")).when(task00).suspend();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasks()).thenReturn(Set.of(task00, task01));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
final RuntimeException thrown = assertThrows(
RuntimeException.class,
() -> taskManager.handleRevocation(union(HashSet::new, taskId00Partitions, taskId01Partitions)));
assertThat(thrown.getCause().getMessage(), is("KABOOM!"));
// verify both tasks had suspend called
verify(task00).suspend();
verify(task01).suspend();
verifyNoInteractions(consumer);
}
@Test
public void shouldConvertActiveTaskToStandbyTask() {
final StreamTask activeTaskToRecycle = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions).build();
final StandbyTask recycledStandbyTask = standbyTask(taskId00, taskId00ChangelogPartitions)
.inState(State.CREATED)
.withInputPartitions(taskId00Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(activeTaskCreator.createTasks(consumer, taskId00Assignment)).thenReturn(singletonList(activeTaskToRecycle));
when(standbyTaskCreator.createStandbyTaskFromActive(activeTaskToRecycle, taskId00Partitions))
.thenReturn(recycledStandbyTask);
// create active task
taskManager.handleAssignment(taskId00Assignment, Collections.emptyMap());
// convert active to standby
when(stateUpdater.tasks()).thenReturn(Set.of(activeTaskToRecycle));
final CompletableFuture<StateUpdater.RemovedTaskResult> future = new CompletableFuture<>();
when(stateUpdater.remove(activeTaskToRecycle.id())).thenReturn(future);
future.complete(new StateUpdater.RemovedTaskResult(activeTaskToRecycle));
taskManager.handleAssignment(Collections.emptyMap(), taskId00Assignment);
verify(activeTaskCreator).createTasks(consumer, emptyMap());
verify(standbyTaskCreator, times(2)).createTasks(Collections.emptyMap());
verify(standbyTaskCreator).createStandbyTaskFromActive(activeTaskToRecycle, taskId00Partitions);
verify(tasks).addPendingTasksToInit(Collections.singleton(recycledStandbyTask));
}
@Test
public void shouldConvertStandbyTaskToActiveTask() {
final StandbyTask standbyTaskToRecycle = standbyTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId00Partitions).build();
final StreamTask recycledActiveTask = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.CREATED)
.withInputPartitions(taskId00Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(standbyTaskCreator.createTasks(taskId00Assignment)).thenReturn(singletonList(standbyTaskToRecycle));
when(activeTaskCreator.createActiveTaskFromStandby(standbyTaskToRecycle, taskId00Partitions, consumer))
.thenReturn(recycledActiveTask);
// create standby task
taskManager.handleAssignment(Collections.emptyMap(), taskId00Assignment);
// convert standby to active
when(stateUpdater.tasks()).thenReturn(Set.of(standbyTaskToRecycle));
final CompletableFuture<StateUpdater.RemovedTaskResult> future = new CompletableFuture<>();
when(stateUpdater.remove(standbyTaskToRecycle.id())).thenReturn(future);
future.complete(new StateUpdater.RemovedTaskResult(standbyTaskToRecycle));
taskManager.handleAssignment(taskId00Assignment, Collections.emptyMap());
verify(activeTaskCreator, times(2)).createTasks(consumer, emptyMap());
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
verify(activeTaskCreator).createActiveTaskFromStandby(standbyTaskToRecycle, taskId00Partitions, consumer);
verify(tasks).addPendingTasksToInit(Collections.singleton(recycledActiveTask));
}
@Test
public void shouldListNotPausedTasks() {
final StreamTask task00 = statefulTask(taskId00, taskId00ChangelogPartitions)
.withInputPartitions(taskId00Partitions)
.inState(State.RUNNING)
.build();
final StreamTask task01 = statefulTask(taskId01, taskId01ChangelogPartitions)
.withInputPartitions(taskId01Partitions)
.inState(State.RUNNING)
.build();
final TasksRegistry tasks = mock(TasksRegistry.class);
when(tasks.allTasks()).thenReturn(Set.of(task00, task01));
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, tasks);
when(stateUpdater.tasks()).thenReturn(Collections.emptySet());
assertEquals(2, taskManager.notPausedTasks().size());
assertTrue(taskManager.notPausedTasks().containsKey(taskId00));
assertTrue(taskManager.notPausedTasks().containsKey(taskId01));
topologyMetadata.pauseTopology(UNNAMED_TOPOLOGY);
assertEquals(0, taskManager.notPausedTasks().size());
}
@Test
public void shouldRecycleStartupTasksFromStateDirectoryAsActive() {
final Tasks taskRegistry = new Tasks(new LogContext());
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, taskRegistry);
final StandbyTask startupTask = standbyTask(taskId00, taskId00ChangelogPartitions).build();
final StreamTask activeTask = statefulTask(taskId00, taskId00ChangelogPartitions).build();
when(activeTaskCreator.createActiveTaskFromStandby(eq(startupTask), eq(taskId00Partitions), any()))
.thenReturn(activeTask);
when(stateDirectory.hasStartupTasks()).thenReturn(true, false);
when(stateDirectory.removeStartupTask(taskId00)).thenReturn(startupTask, (Task) null);
taskManager.handleAssignment(taskId00Assignment, Collections.emptyMap());
// ensure we used our existing startup Task directly as a Standby
assertTrue(taskRegistry.hasPendingTasksToInit());
assertEquals(Collections.singleton(activeTask), taskRegistry.drainPendingTasksToInit());
// we're using a mock StateUpdater here, so now that we've drained the task from the queue of startup tasks to init
// let's "add" it to our mock StateUpdater
when(stateUpdater.tasks()).thenReturn(Collections.singleton(activeTask));
when(stateUpdater.standbyTasks()).thenReturn(Collections.emptySet());
// ensure we recycled our existing startup Standby into an Active task
verify(activeTaskCreator).createActiveTaskFromStandby(eq(startupTask), eq(taskId00Partitions), any());
// ensure we didn't construct any new Tasks
verify(activeTaskCreator).createTasks(any(), eq(Collections.emptyMap()));
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
verifyNoMoreInteractions(activeTaskCreator);
verifyNoMoreInteractions(standbyTaskCreator);
// verify the recycled task is now being used as an assigned Active
assertEquals(Collections.singletonMap(taskId00, activeTask), taskManager.activeTaskMap());
assertEquals(Collections.emptyMap(), taskManager.standbyTaskMap());
}
@Test
public void shouldUseStartupTasksFromStateDirectoryAsStandby() {
final Tasks taskRegistry = new Tasks(new LogContext());
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, taskRegistry);
final StandbyTask startupTask = standbyTask(taskId00, taskId00ChangelogPartitions).build();
when(stateDirectory.hasStartupTasks()).thenReturn(true, true, false);
when(stateDirectory.removeStartupTask(taskId00)).thenReturn(startupTask, (Task) null);
assertFalse(taskRegistry.hasPendingTasksToInit());
taskManager.handleAssignment(Collections.emptyMap(), taskId00Assignment);
// ensure we used our existing startup Task directly as a Standby
assertTrue(taskRegistry.hasPendingTasksToInit());
assertEquals(Collections.singleton(startupTask), taskRegistry.drainPendingTasksToInit());
// we're using a mock StateUpdater here, so now that we've drained the task from the queue of startup tasks to init
// let's "add" it to our mock StateUpdater
when(stateUpdater.tasks()).thenReturn(Collections.singleton(startupTask));
when(stateUpdater.standbyTasks()).thenReturn(Collections.singleton(startupTask));
// ensure we didn't construct any new Tasks, or recycle an existing Task; we only used the one we already have
verify(activeTaskCreator).createTasks(any(), eq(Collections.emptyMap()));
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
verifyNoMoreInteractions(activeTaskCreator);
verifyNoMoreInteractions(standbyTaskCreator);
// verify the startup Standby is now being used as an assigned Standby
assertEquals(Collections.emptyMap(), taskManager.activeTaskMap());
assertEquals(Collections.singletonMap(taskId00, startupTask), taskManager.standbyTaskMap());
}
@Test
public void shouldStartStateUpdaterOnInit() {
final TaskManager taskManager = setUpTaskManagerWithStateUpdater(ProcessingMode.AT_LEAST_ONCE, null);
taskManager.init();
verify(stateUpdater).start();
}
private static KafkaFutureImpl<DeletedRecords> completedFuture() {
final KafkaFutureImpl<DeletedRecords> futureDeletedRecords = new KafkaFutureImpl<>();
futureDeletedRecords.complete(null);
return futureDeletedRecords;
}
private void makeTaskFolders(final String... names) throws Exception {
final ArrayList<TaskDirectory> taskFolders = new ArrayList<>(names.length);
for (int i = 0; i < names.length; i++) {
final String name = names[i];
final Path path = testFolder.resolve(name).toAbsolutePath();
if (Files.notExists(path)) {
Files.createDirectories(path);
}
taskFolders.add(new TaskDirectory(path.toFile(), null));
}
when(stateDirectory.listNonEmptyTaskDirectories()).thenReturn(taskFolders);
}
private void writeCheckpointFile(final TaskId task, final Map<TopicPartition, Long> offsets) throws Exception {
final File checkpointFile = getCheckpointFile(task);
final Path checkpointFilePath = checkpointFile.toPath();
Files.createFile(checkpointFilePath);
new OffsetCheckpoint(checkpointFile).write(offsets);
lenient().when(stateDirectory.checkpointFileFor(task)).thenReturn(checkpointFile);
expectDirectoryNotEmpty(task);
}
private File getCheckpointFile(final TaskId task) {
return new File(new File(testFolder.toAbsolutePath().toString(), task.toString()), StateManagerUtil.CHECKPOINT_FILE_NAME);
}
private static ConsumerRecord<byte[], byte[]> getConsumerRecord(final TopicPartition topicPartition, final long offset) {
return new ConsumerRecord<>(topicPartition.topic(), topicPartition.partition(), offset, null, null);
}
private static
|
TaskManagerTest
|
java
|
spring-projects__spring-boot
|
module/spring-boot-devtools/src/main/java/org/springframework/boot/devtools/filewatch/FileSystemWatcherFactory.java
|
{
"start": 826,
"end": 1008
}
|
interface ____ {
/**
* Create a new {@link FileSystemWatcher}.
* @return a new {@link FileSystemWatcher}
*/
FileSystemWatcher getFileSystemWatcher();
}
|
FileSystemWatcherFactory
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/InfiniteRecursionTest.java
|
{
"start": 3665,
"end": 4151
}
|
class ____ {
void f(int x) {}
void f() {
f(42);
}
int g() {
return 0;
}
int g(int x) {
return x == 0 ? g() : g(x - 1);
}
}
""")
.doTest();
}
@Test
public void positiveMultipleStatementsNotFirst() {
compilationHelper
.addSourceLines(
"Test.java",
"""
|
Test
|
java
|
google__dagger
|
java/dagger/example/spi/BindingGraphVisualizer.java
|
{
"start": 6226,
"end": 6364
}
|
class ____ extends DotStatement<DotNode> {
DotNode(Object nodeName) {
super(quote(nodeName.toString()));
}
}
static
|
DotNode
|
java
|
quarkusio__quarkus
|
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/buildextension/annotations/SimpleInterceptor.java
|
{
"start": 262,
"end": 411
}
|
class ____ {
@AroundInvoke
Object mySuperCoolAroundInvoke(InvocationContext ctx) throws Exception {
return 10;
}
}
|
SimpleInterceptor
|
java
|
apache__logging-log4j2
|
log4j-core-test/src/test/java/org/apache/logging/log4j/core/appender/XmlCompactFileAppenderTest.java
|
{
"start": 1588,
"end": 3362
}
|
class ____ {
@BeforeAll
static void beforeClass() {
System.setProperty(ConfigurationFactory.CONFIGURATION_FILE_PROPERTY, "XmlCompactFileAppenderTest.xml");
}
@Test
void testFlushAtEndOfBatch() throws Exception {
final File file = new File("target", "XmlCompactFileAppenderTest.log");
file.delete();
final Logger log = LogManager.getLogger("com.foo.Bar");
final String logMsg = "Message flushed with immediate flush=false";
log.info(logMsg);
CoreLoggerContexts.stopLoggerContext(false, file); // stop async thread
String line1;
try (final BufferedReader reader = new BufferedReader(new FileReader(file))) {
line1 = reader.readLine();
} finally {
file.delete();
}
assertNotNull(line1, "line1");
final String msg1 = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>";
assertTrue(line1.contains(msg1), "line1 incorrect: [" + line1 + "], does not contain: [" + msg1 + ']');
final String msg2 = "<Events xmlns=\"http://logging.apache.org/log4j/2.0/events\">";
assertTrue(line1.contains(msg2), "line1 incorrect: [" + line1 + "], does not contain: [" + msg2 + ']');
final String msg3 = "<Event ";
assertTrue(line1.contains(msg3), "line1 incorrect: [" + line1 + "], does not contain: [" + msg3 + ']');
final String msg4 = logMsg;
assertTrue(line1.contains(msg4), "line1 incorrect: [" + line1 + "], does not contain: [" + msg4 + ']');
final String location = "testFlushAtEndOfBatch";
assertFalse(line1.contains(location), "no location");
assertEquals(-1, line1.indexOf('\r'));
assertEquals(-1, line1.indexOf('\n'));
}
}
|
XmlCompactFileAppenderTest
|
java
|
apache__flink
|
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/PendingSplitsCheckpointSerializer.java
|
{
"start": 1405,
"end": 6291
}
|
class ____<T extends FileSourceSplit>
implements SimpleVersionedSerializer<PendingSplitsCheckpoint<T>> {
private static final int VERSION = 1;
private static final int VERSION_1_MAGIC_NUMBER = 0xDEADBEEF;
private final SimpleVersionedSerializer<T> splitSerializer;
public PendingSplitsCheckpointSerializer(SimpleVersionedSerializer<T> splitSerializer) {
this.splitSerializer = checkNotNull(splitSerializer);
}
// ------------------------------------------------------------------------
@Override
public int getVersion() {
return VERSION;
}
@Override
public byte[] serialize(PendingSplitsCheckpoint<T> checkpoint) throws IOException {
checkArgument(
checkpoint.getClass() == PendingSplitsCheckpoint.class,
"Cannot serialize subclasses of PendingSplitsCheckpoint");
// optimization: the splits lazily cache their own serialized form
if (checkpoint.serializedFormCache != null) {
return checkpoint.serializedFormCache;
}
final SimpleVersionedSerializer<T> splitSerializer = this.splitSerializer; // stack cache
final Collection<T> splits = checkpoint.getSplits();
final Collection<Path> processedPaths = checkpoint.getAlreadyProcessedPaths();
final ArrayList<byte[]> serializedSplits = new ArrayList<>(splits.size());
final ArrayList<byte[]> serializedPaths = new ArrayList<>(processedPaths.size());
int totalLen =
16; // four ints: magic, version of split serializer, count splits, count paths
for (T split : splits) {
final byte[] serSplit = splitSerializer.serialize(split);
serializedSplits.add(serSplit);
totalLen += serSplit.length + 4; // 4 bytes for the length field
}
for (Path path : processedPaths) {
final byte[] serPath = path.toString().getBytes(StandardCharsets.UTF_8);
serializedPaths.add(serPath);
totalLen += serPath.length + 4; // 4 bytes for the length field
}
final byte[] result = new byte[totalLen];
final ByteBuffer byteBuffer = ByteBuffer.wrap(result).order(ByteOrder.LITTLE_ENDIAN);
byteBuffer.putInt(VERSION_1_MAGIC_NUMBER);
byteBuffer.putInt(splitSerializer.getVersion());
byteBuffer.putInt(serializedSplits.size());
byteBuffer.putInt(serializedPaths.size());
for (byte[] splitBytes : serializedSplits) {
byteBuffer.putInt(splitBytes.length);
byteBuffer.put(splitBytes);
}
for (byte[] pathBytes : serializedPaths) {
byteBuffer.putInt(pathBytes.length);
byteBuffer.put(pathBytes);
}
assert byteBuffer.remaining() == 0;
// optimization: cache the serialized from, so we avoid the byte work during repeated
// serialization
checkpoint.serializedFormCache = result;
return result;
}
@Override
public PendingSplitsCheckpoint<T> deserialize(int version, byte[] serialized)
throws IOException {
if (version == 1) {
return deserializeV1(serialized);
}
throw new IOException("Unknown version: " + version);
}
private PendingSplitsCheckpoint<T> deserializeV1(byte[] serialized) throws IOException {
final ByteBuffer bb = ByteBuffer.wrap(serialized).order(ByteOrder.LITTLE_ENDIAN);
final int magic = bb.getInt();
if (magic != VERSION_1_MAGIC_NUMBER) {
throw new IOException(
String.format(
"Invalid magic number for PendingSplitsCheckpoint. "
+ "Expected: %X , found %X",
VERSION_1_MAGIC_NUMBER, magic));
}
final int splitSerializerVersion = bb.getInt();
final int numSplits = bb.getInt();
final int numPaths = bb.getInt();
final SimpleVersionedSerializer<T> splitSerializer = this.splitSerializer; // stack cache
final ArrayList<T> splits = new ArrayList<>(numSplits);
final ArrayList<Path> paths = new ArrayList<>(numPaths);
for (int remaining = numSplits; remaining > 0; remaining--) {
final byte[] bytes = new byte[bb.getInt()];
bb.get(bytes);
final T split = splitSerializer.deserialize(splitSerializerVersion, bytes);
splits.add(split);
}
for (int remaining = numPaths; remaining > 0; remaining--) {
final byte[] bytes = new byte[bb.getInt()];
bb.get(bytes);
final Path path = new Path(new String(bytes, StandardCharsets.UTF_8));
paths.add(path);
}
return PendingSplitsCheckpoint.reusingCollection(splits, paths);
}
}
|
PendingSplitsCheckpointSerializer
|
java
|
apache__camel
|
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/Web3jComponentBuilderFactory.java
|
{
"start": 22719,
"end": 28775
}
|
class ____
extends AbstractComponentBuilder<Web3jComponent>
implements Web3jComponentBuilder {
@Override
protected Web3jComponent buildConcreteComponent() {
return new Web3jComponent();
}
private org.apache.camel.component.web3j.Web3jConfiguration getOrCreateConfiguration(Web3jComponent component) {
if (component.getConfiguration() == null) {
component.setConfiguration(new org.apache.camel.component.web3j.Web3jConfiguration());
}
return component.getConfiguration();
}
@Override
protected boolean setPropertyOnComponent(
Component component,
String name,
Object value) {
switch (name) {
case "addresses": getOrCreateConfiguration((Web3jComponent) component).setAddresses((java.lang.String) value); return true;
case "configuration": ((Web3jComponent) component).setConfiguration((org.apache.camel.component.web3j.Web3jConfiguration) value); return true;
case "fromAddress": getOrCreateConfiguration((Web3jComponent) component).setFromAddress((java.lang.String) value); return true;
case "fromBlock": getOrCreateConfiguration((Web3jComponent) component).setFromBlock((java.lang.String) value); return true;
case "fullTransactionObjects": getOrCreateConfiguration((Web3jComponent) component).setFullTransactionObjects((boolean) value); return true;
case "gasLimit": getOrCreateConfiguration((Web3jComponent) component).setGasLimit((java.math.BigInteger) value); return true;
case "privateFor": getOrCreateConfiguration((Web3jComponent) component).setPrivateFor((java.lang.String) value); return true;
case "quorumAPI": getOrCreateConfiguration((Web3jComponent) component).setQuorumAPI((boolean) value); return true;
case "toAddress": getOrCreateConfiguration((Web3jComponent) component).setToAddress((java.lang.String) value); return true;
case "toBlock": getOrCreateConfiguration((Web3jComponent) component).setToBlock((java.lang.String) value); return true;
case "topics": getOrCreateConfiguration((Web3jComponent) component).setTopics((java.lang.String) value); return true;
case "web3j": getOrCreateConfiguration((Web3jComponent) component).setWeb3j((org.web3j.protocol.Web3j) value); return true;
case "bridgeErrorHandler": ((Web3jComponent) component).setBridgeErrorHandler((boolean) value); return true;
case "address": getOrCreateConfiguration((Web3jComponent) component).setAddress((java.lang.String) value); return true;
case "atBlock": getOrCreateConfiguration((Web3jComponent) component).setAtBlock((java.lang.String) value); return true;
case "blockHash": getOrCreateConfiguration((Web3jComponent) component).setBlockHash((java.lang.String) value); return true;
case "clientId": getOrCreateConfiguration((Web3jComponent) component).setClientId((java.lang.String) value); return true;
case "data": getOrCreateConfiguration((Web3jComponent) component).setData((java.lang.String) value); return true;
case "databaseName": getOrCreateConfiguration((Web3jComponent) component).setDatabaseName((java.lang.String) value); return true;
case "filterId": getOrCreateConfiguration((Web3jComponent) component).setFilterId((java.math.BigInteger) value); return true;
case "gasPrice": getOrCreateConfiguration((Web3jComponent) component).setGasPrice((java.math.BigInteger) value); return true;
case "hashrate": getOrCreateConfiguration((Web3jComponent) component).setHashrate((java.lang.String) value); return true;
case "headerPowHash": getOrCreateConfiguration((Web3jComponent) component).setHeaderPowHash((java.lang.String) value); return true;
case "index": getOrCreateConfiguration((Web3jComponent) component).setIndex((java.math.BigInteger) value); return true;
case "keyName": getOrCreateConfiguration((Web3jComponent) component).setKeyName((java.lang.String) value); return true;
case "lazyStartProducer": ((Web3jComponent) component).setLazyStartProducer((boolean) value); return true;
case "mixDigest": getOrCreateConfiguration((Web3jComponent) component).setMixDigest((java.lang.String) value); return true;
case "nonce": getOrCreateConfiguration((Web3jComponent) component).setNonce((java.lang.String) value); return true;
case "operation": getOrCreateConfiguration((Web3jComponent) component).setOperation((java.lang.String) value); return true;
case "position": getOrCreateConfiguration((Web3jComponent) component).setPosition((java.math.BigInteger) value); return true;
case "priority": getOrCreateConfiguration((Web3jComponent) component).setPriority((java.math.BigInteger) value); return true;
case "sha3HashOfDataToSign": getOrCreateConfiguration((Web3jComponent) component).setSha3HashOfDataToSign((java.lang.String) value); return true;
case "signedTransactionData": getOrCreateConfiguration((Web3jComponent) component).setSignedTransactionData((java.lang.String) value); return true;
case "sourceCode": getOrCreateConfiguration((Web3jComponent) component).setSourceCode((java.lang.String) value); return true;
case "transactionHash": getOrCreateConfiguration((Web3jComponent) component).setTransactionHash((java.lang.String) value); return true;
case "ttl": getOrCreateConfiguration((Web3jComponent) component).setTtl((java.math.BigInteger) value); return true;
case "value": getOrCreateConfiguration((Web3jComponent) component).setValue((java.math.BigInteger) value); return true;
case "autowiredEnabled": ((Web3jComponent) component).setAutowiredEnabled((boolean) value); return true;
default: return false;
}
}
}
}
|
Web3jComponentBuilderImpl
|
java
|
spring-projects__spring-framework
|
spring-beans/src/main/java/org/springframework/beans/factory/support/BeanDefinitionReaderUtils.java
|
{
"start": 5054,
"end": 7474
}
|
class ____ with unique suffix if necessary.
return uniqueBeanName(generatedBeanName, registry);
}
/**
* Turn the given bean name into a unique bean name for the given bean factory,
* appending a unique counter as suffix if necessary.
* @param beanName the original bean name
* @param registry the bean factory that the definition is going to be
* registered with (to check for existing bean names)
* @return the unique bean name to use
* @since 5.1
*/
public static String uniqueBeanName(String beanName, BeanDefinitionRegistry registry) {
String id = beanName;
int counter = -1;
// Increase counter until the id is unique.
String prefix = beanName + GENERATED_BEAN_NAME_SEPARATOR;
while (counter == -1 || registry.containsBeanDefinition(id)) {
counter++;
id = prefix + counter;
}
return id;
}
/**
* Register the given bean definition with the given bean factory.
* @param definitionHolder the bean definition including name and aliases
* @param registry the bean factory to register with
* @throws BeanDefinitionStoreException if registration failed
*/
public static void registerBeanDefinition(
BeanDefinitionHolder definitionHolder, BeanDefinitionRegistry registry)
throws BeanDefinitionStoreException {
// Register bean definition under primary name.
String beanName = definitionHolder.getBeanName();
registry.registerBeanDefinition(beanName, definitionHolder.getBeanDefinition());
// Register aliases for bean name, if any.
String[] aliases = definitionHolder.getAliases();
if (aliases != null) {
for (String alias : aliases) {
registry.registerAlias(beanName, alias);
}
}
}
/**
* Register the given bean definition with a generated name,
* unique within the given bean factory.
* @param definition the bean definition to generate a bean name for
* @param registry the bean factory to register with
* @return the generated bean name
* @throws BeanDefinitionStoreException if no unique name can be generated
* for the given bean definition or the definition cannot be registered
*/
public static String registerWithGeneratedName(
AbstractBeanDefinition definition, BeanDefinitionRegistry registry)
throws BeanDefinitionStoreException {
String generatedName = generateBeanName(definition, registry, false);
registry.registerBeanDefinition(generatedName, definition);
return generatedName;
}
}
|
name
|
java
|
mockito__mockito
|
mockito-core/src/test/java/org/mockitousage/debugging/VerificationListenerCallBackTest.java
|
{
"start": 4844,
"end": 5367
}
|
class ____ implements VerificationListener {
Object mock;
VerificationMode mode;
VerificationData data;
Throwable cause;
@Override
public void onVerification(VerificationEvent verificationEvent) {
this.mock = verificationEvent.getMock();
this.mode = verificationEvent.getMode();
this.data = verificationEvent.getData();
this.cause = verificationEvent.getVerificationError();
}
}
private static
|
RememberingListener
|
java
|
elastic__elasticsearch
|
modules/lang-painless/src/test/java/org/elasticsearch/painless/FactoryTests.java
|
{
"start": 11645,
"end": 12077
}
|
class ____ {
private final Map<String, Object> params;
public FactoryTestConverterScript(Map<String, Object> params) {
this.params = params;
}
public Map<String, Object> getParams() {
return params;
}
public static final String[] PARAMETERS = new String[] { "test" };
public abstract long[] execute(int test);
public
|
FactoryTestConverterScript
|
java
|
grpc__grpc-java
|
xds/src/main/java/io/grpc/xds/CdsLoadBalancerProvider.java
|
{
"start": 1078,
"end": 1294
}
|
class ____ not be directly referenced in
* code. The policy should be accessed through {@link io.grpc.LoadBalancerRegistry#getProvider}
* with the name "cds" (currently "cds_experimental").
*/
@Internal
public
|
should
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/spring/session/config/EnableRedissonHttpSession.java
|
{
"start": 1251,
"end": 1434
}
|
interface ____ {
int maxInactiveIntervalInSeconds() default MapSession.DEFAULT_MAX_INACTIVE_INTERVAL_SECONDS;
String keyPrefix() default "";
}
|
EnableRedissonHttpSession
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/XExecutableTypesTest.java
|
{
"start": 6067,
"end": 6330
}
|
class ____ {",
" <T, Q> void m(Collection i) { throw new RuntimeException(); }",
"}");
Source bar =
CompilerTests.javaSource(
"test.Bar",
"package test;",
"import java.util.*;",
"
|
Foo
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java
|
{
"start": 16934,
"end": 27799
}
|
class ____ extends AbstractRefCounted {
private final DiscoveryNodes discoveryNodes;
private final ClusterState newState;
private final ClusterState previousState;
private final Task task;
private final boolean sendFullVersion;
private final Map<DiscoveryNode, Transport.Connection> nodeConnections = new HashMap<>();
// All the values of these maps have one ref for the context (while it's open) and one for each in-flight message.
private final Map<TransportVersion, ReleasableBytesReference> serializedStates = new ConcurrentHashMap<>();
private final Map<TransportVersion, ReleasableBytesReference> serializedDiffs = new HashMap<>();
PublicationContext(ClusterStatePublicationEvent clusterStatePublicationEvent) {
discoveryNodes = clusterStatePublicationEvent.getNewState().nodes();
newState = clusterStatePublicationEvent.getNewState();
previousState = clusterStatePublicationEvent.getOldState();
task = clusterStatePublicationEvent.getTask();
sendFullVersion = previousState.getBlocks().disableStatePersistence();
}
void buildDiffAndSerializeStates() {
assert refCount() > 0;
final LazyInitializable<Diff<ClusterState>, RuntimeException> diffSupplier = new LazyInitializable<>(
() -> newState.diff(previousState)
);
for (DiscoveryNode node : discoveryNodes) {
if (node.equals(transportService.getLocalNode())) {
// publication to local node bypasses any serialization
continue;
}
Transport.Connection connection;
try {
connection = transportService.getConnection(node);
} catch (NodeNotConnectedException e) {
// can't send to this node, don't need to serialize anything for it
logger.debug(() -> format("No connection to [%s] available, skipping serialization", node), e);
continue;
}
nodeConnections.put(node, connection);
if (sendFullVersion || previousState.nodes().nodeExists(node) == false) {
serializedStates.computeIfAbsent(connection.getTransportVersion(), v -> serializeFullClusterState(newState, node, v));
} else {
serializedDiffs.computeIfAbsent(
connection.getTransportVersion(),
v -> serializeDiffClusterState(newState, diffSupplier.getOrCompute(), node, v)
);
}
}
}
public void sendPublishRequest(
DiscoveryNode destination,
PublishRequest publishRequest,
ActionListener<PublishWithJoinResponse> listener
) {
assert refCount() > 0;
assert publishRequest.getAcceptedState() == newState : "state got switched on us";
assert transportService.getThreadPool().getThreadContext().isSystemContext();
final var newStateVersion = newState.version();
if (destination.equals(discoveryNodes.getLocalNode())) {
// The transport service normally avoids serializing/deserializing requests to the local node but here we have special
// handling to re-use the serialized representation of the cluster state across requests which means we must also handle
// local requests differently to avoid having to decompress and deserialize the request on the master.
//
// Also, the master needs the original non-serialized state as it contains some transient information that isn't replicated
// because it only makes sense on the local node (e.g. UnassignedInfo#unassignedTimeNanos).
final boolean isVotingOnlyNode = discoveryNodes.getLocalNode().getRoles().contains(DiscoveryNodeRole.VOTING_ONLY_NODE_ROLE);
logger.trace("handling cluster state version [{}] locally on [{}]", newStateVersion, destination);
transportService.getThreadPool()
.executor(ThreadPool.Names.CLUSTER_COORDINATION)
.execute(
transportService.getThreadPool()
.getThreadContext()
.preserveContext(ActionRunnable.supply(listener, new CheckedSupplier<>() {
@Override
public PublishWithJoinResponse get() {
if (isVotingOnlyNode) {
// Voting-only nodes publish their cluster state to other nodes in order to freshen the state held
// on other full master nodes, but then fail the publication before committing. However there's no
// need to freshen our local state so we can fail right away.
throw new TransportException(
new ElasticsearchException("voting-only node skipping local publication to " + destination)
);
} else {
return handlePublishRequest.apply(publishRequest);
}
}
@Override
public String toString() {
return "handling cluster state version [" + newStateVersion + "] locally on [" + destination + "]";
}
}))
);
} else if (sendFullVersion || previousState.nodes().nodeExists(destination) == false) {
logger.trace("sending full cluster state version [{}] to [{}]", newStateVersion, destination);
sendFullClusterState(destination, listener);
} else {
logger.trace("sending cluster state diff for version [{}] to [{}]", newStateVersion, destination);
sendClusterStateDiff(destination, listener);
}
}
private void sendFullClusterState(DiscoveryNode destination, ActionListener<PublishWithJoinResponse> listener) {
assert refCount() > 0;
Transport.Connection connection = nodeConnections.get(destination);
if (connection == null) {
logger.debug("No connection to [{}] available, skipping send", destination);
listener.onFailure(new NodeNotConnectedException(destination, "No connection available"));
return;
}
var version = connection.getTransportVersion();
ReleasableBytesReference bytes = serializedStates.get(version);
if (bytes == null) {
try {
bytes = serializedStates.computeIfAbsent(version, v -> serializeFullClusterState(newState, destination, v));
} catch (Exception e) {
logger.warn(() -> format("failed to serialize cluster state before publishing it to node %s", destination), e);
listener.onFailure(e);
return;
}
}
sendClusterState(connection, bytes, listener);
}
private void sendClusterStateDiff(DiscoveryNode destination, ActionListener<PublishWithJoinResponse> listener) {
Transport.Connection connection = nodeConnections.get(destination);
if (connection == null) {
logger.debug("No connection to [{}] available, skipping send", destination);
listener.onFailure(new NodeNotConnectedException(destination, "No connection available"));
return;
}
final ReleasableBytesReference bytes = serializedDiffs.get(connection.getTransportVersion());
assert bytes != null
: "failed to find serialized diff for node "
+ destination
+ " of version ["
+ connection.getTransportVersion().toReleaseVersion()
+ "]";
// acquire a ref to the context just in case we need to try again with the full cluster state
if (tryIncRef() == false) {
assert false;
listener.onFailure(new IllegalStateException("publication context released before transmission"));
return;
}
sendClusterState(connection, bytes, ActionListener.runAfter(listener.delegateResponse((delegate, e) -> {
if (e instanceof final TransportException transportException) {
if (transportException.unwrapCause() instanceof IncompatibleClusterStateVersionException) {
logger.debug(
() -> format(
"resending full cluster state to node %s reason %s",
destination,
transportException.getDetailedMessage()
)
);
sendFullClusterState(destination, delegate);
return;
}
}
logger.debug(() -> format("failed to send cluster state to %s", destination), e);
delegate.onFailure(e);
}), this::decRef));
}
private void sendClusterState(
Transport.Connection connection,
ReleasableBytesReference bytes,
ActionListener<PublishWithJoinResponse> listener
) {
assert refCount() > 0;
if (bytes.tryIncRef() == false) {
assert false;
listener.onFailure(new IllegalStateException("serialized cluster state released before transmission"));
return;
}
transportService.sendChildRequest(
connection,
PUBLISH_STATE_ACTION_NAME,
new BytesTransportRequest(bytes, connection.getTransportVersion()),
task,
STATE_REQUEST_OPTIONS,
new CleanableResponseHandler<>(listener, PublishWithJoinResponse::new, clusterCoordinationExecutor, bytes::decRef)
);
}
@Override
protected void closeInternal() {
serializedDiffs.values().forEach(Releasables::closeExpectNoException);
serializedStates.values().forEach(Releasables::closeExpectNoException);
}
}
private static
|
PublicationContext
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/embeddable/EmbeddableAscDescQueryTest.java
|
{
"start": 8599,
"end": 8887
}
|
class ____ {
@Id
private Integer id;
private String name;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
}
|
EntityD
|
java
|
spring-projects__spring-security
|
docs/modules/ROOT/examples/kerberos/SpnegoConfig.java
|
{
"start": 2303,
"end": 6194
}
|
class ____ {
@Value("${app.ad-domain}")
private String adDomain;
@Value("${app.ad-server}")
private String adServer;
@Value("${app.service-principal}")
private String servicePrincipal;
@Value("${app.keytab-location}")
private String keytabLocation;
@Value("${app.ldap-search-base}")
private String ldapSearchBase;
@Value("${app.ldap-search-filter}")
private String ldapSearchFilter;
@Bean
public SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
KerberosServiceAuthenticationProvider kerberosServiceAuthenticationProvider = kerberosServiceAuthenticationProvider();
ActiveDirectoryLdapAuthenticationProvider activeDirectoryLdapAuthenticationProvider = activeDirectoryLdapAuthenticationProvider();
ProviderManager providerManager = new ProviderManager(kerberosServiceAuthenticationProvider,
activeDirectoryLdapAuthenticationProvider);
http
.authorizeHttpRequests((authz) -> authz
.requestMatchers("/", "/home").permitAll()
.anyRequest().authenticated()
)
.exceptionHandling()
.authenticationEntryPoint(spnegoEntryPoint())
.and()
.formLogin()
.loginPage("/login").permitAll()
.and()
.logout()
.permitAll()
.and()
.authenticationProvider(activeDirectoryLdapAuthenticationProvider())
.authenticationProvider(kerberosServiceAuthenticationProvider())
.addFilterBefore(spnegoAuthenticationProcessingFilter(providerManager),
BasicAuthenticationFilter.class);
return http.build();
}
@Bean
public ActiveDirectoryLdapAuthenticationProvider activeDirectoryLdapAuthenticationProvider() {
return new ActiveDirectoryLdapAuthenticationProvider(adDomain, adServer);
}
@Bean
public SpnegoEntryPoint spnegoEntryPoint() {
return new SpnegoEntryPoint("/login");
}
public SpnegoAuthenticationProcessingFilter spnegoAuthenticationProcessingFilter(
AuthenticationManager authenticationManager) {
SpnegoAuthenticationProcessingFilter filter = new SpnegoAuthenticationProcessingFilter();
filter.setAuthenticationManager(authenticationManager);
return filter;
}
public KerberosServiceAuthenticationProvider kerberosServiceAuthenticationProvider() throws Exception {
KerberosServiceAuthenticationProvider provider = new KerberosServiceAuthenticationProvider();
provider.setTicketValidator(sunJaasKerberosTicketValidator());
provider.setUserDetailsService(ldapUserDetailsService());
return provider;
}
@Bean
public SunJaasKerberosTicketValidator sunJaasKerberosTicketValidator() {
SunJaasKerberosTicketValidator ticketValidator = new SunJaasKerberosTicketValidator();
ticketValidator.setServicePrincipal(servicePrincipal);
ticketValidator.setKeyTabLocation(new FileSystemResource(keytabLocation));
ticketValidator.setDebug(true);
return ticketValidator;
}
@Bean
public KerberosLdapContextSource kerberosLdapContextSource() throws Exception {
KerberosLdapContextSource contextSource = new KerberosLdapContextSource(adServer);
contextSource.setLoginConfig(loginConfig());
return contextSource;
}
public SunJaasKrb5LoginConfig loginConfig() throws Exception {
SunJaasKrb5LoginConfig loginConfig = new SunJaasKrb5LoginConfig();
loginConfig.setKeyTabLocation(new FileSystemResource(keytabLocation));
loginConfig.setServicePrincipal(servicePrincipal);
loginConfig.setDebug(true);
loginConfig.setIsInitiator(true);
loginConfig.afterPropertiesSet();
return loginConfig;
}
@Bean
public LdapUserDetailsService ldapUserDetailsService() throws Exception {
FilterBasedLdapUserSearch userSearch =
new FilterBasedLdapUserSearch(ldapSearchBase, ldapSearchFilter, kerberosLdapContextSource());
LdapUserDetailsService service =
new LdapUserDetailsService(userSearch, new ActiveDirectoryLdapAuthoritiesPopulator());
service.setUserDetailsMapper(new LdapUserDetailsMapper());
return service;
}
}
//end::snippetA[]
|
WebSecurityConfig
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHealthMonitorWithDedicatedHealthAddress.java
|
{
"start": 1063,
"end": 1366
}
|
class ____
extends TestHealthMonitor {
@Override
protected DummyHAService createDummyHAService() {
return new DummyHAService(HAServiceState.ACTIVE,
new InetSocketAddress("0.0.0.0", 0),
new InetSocketAddress("0.0.0.0", 0), true);
}
}
|
TestHealthMonitorWithDedicatedHealthAddress
|
java
|
spring-projects__spring-boot
|
integration-test/spring-boot-actuator-integration-tests/src/test/java/org/springframework/boot/actuate/endpoint/web/annotation/AbstractWebEndpointIntegrationTests.java
|
{
"start": 31240,
"end": 31404
}
|
class ____ {
@ReadOperation
String read(String foo, @Nullable String bar) {
return foo;
}
}
@Endpoint(id = "principal")
static
|
RequiredParametersEndpoint
|
java
|
lettuce-io__lettuce-core
|
src/test/java/io/lettuce/core/commands/BitStringCodec.java
|
{
"start": 145,
"end": 564
}
|
class ____ extends StringCodec {
@Override
public String decodeValue(ByteBuffer bytes) {
StringBuilder bits = new StringBuilder(bytes.remaining() * 8);
while (bytes.remaining() > 0) {
byte b = bytes.get();
for (int i = 0; i < 8; i++) {
bits.append(Integer.valueOf(b >>> i & 1));
}
}
return bits.toString();
}
}
|
BitStringCodec
|
java
|
grpc__grpc-java
|
xds/src/test/java/io/grpc/xds/FakeControlPlaneXdsIntegrationTest.java
|
{
"start": 9706,
"end": 11921
}
|
class ____ extends ClientStreamTracer {
boolean usedCluster1;
@Override
public void addOptionalLabel(String key, String value) {
if ("grpc.lb.backend_service".equals(key)) {
usedCluster1 = "cluster1".equals(value);
}
}
}
ClusterClientStreamTracer tracer = new ClusterClientStreamTracer();
ClientStreamTracer.Factory tracerFactory = new ClientStreamTracer.Factory() {
@Override
public ClientStreamTracer newClientStreamTracer(
ClientStreamTracer.StreamInfo info, Metadata headers) {
return tracer;
}
};
ClientInterceptor tracerInterceptor = new ClientInterceptor() {
@Override
public <ReqT, RespT> ClientCall<ReqT, RespT> interceptCall(
MethodDescriptor<ReqT, RespT> method, CallOptions callOptions, Channel next) {
return next.newCall(method, callOptions.withStreamTracerFactory(tracerFactory));
}
};
SimpleServiceGrpc.SimpleServiceBlockingStub stub = SimpleServiceGrpc
.newBlockingStub(dataPlane.getManagedChannel())
.withInterceptors(tracerInterceptor);
SimpleRequest request = SimpleRequest.getDefaultInstance();
SimpleResponse goldenResponse = SimpleResponse.newBuilder()
.setResponseMessage("Hi, xDS! Authority= test-server")
.build();
assertThat(stub.unaryRpc(request)).isEqualTo(goldenResponse);
assertThat(tracer.usedCluster1).isFalse();
// Check for errors when swapping route to cluster1
controlPlane.setRdsConfig(RouteConfiguration.newBuilder()
.setName("route-config.googleapis.com")
.addVirtualHosts(VirtualHost.newBuilder()
.addDomains("test-server")
.addRoutes(Route.newBuilder()
.setMatch(RouteMatch.newBuilder().setPrefix("/").build())
.setRoute(RouteAction.newBuilder().setCluster("cluster1").build())
.build())
.build())
.build());
for (int j = 0; j < 10; j++) {
stub.unaryRpc(request);
if (tracer.usedCluster1) {
break;
}
}
assertThat(tracer.usedCluster1).isTrue();
}
// Captures response headers from the server.
private static
|
ClusterClientStreamTracer
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppendIntEvaluator.java
|
{
"start": 3429,
"end": 4225
}
|
class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory field1;
private final EvalOperator.ExpressionEvaluator.Factory field2;
public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory field1,
EvalOperator.ExpressionEvaluator.Factory field2) {
this.source = source;
this.field1 = field1;
this.field2 = field2;
}
@Override
public MvAppendIntEvaluator get(DriverContext context) {
return new MvAppendIntEvaluator(source, field1.get(context), field2.get(context), context);
}
@Override
public String toString() {
return "MvAppendIntEvaluator[" + "field1=" + field1 + ", field2=" + field2 + "]";
}
}
}
|
Factory
|
java
|
resilience4j__resilience4j
|
resilience4j-circuitbreaker/src/main/java/io/github/resilience4j/circuitbreaker/CircuitBreaker.java
|
{
"start": 43203,
"end": 45887
}
|
class ____<T> implements Future<T> {
private final Future<T> future;
private final OnceConsumer<CircuitBreaker> onceToCircuitbreaker;
private final long start;
CircuitBreakerFuture(CircuitBreaker circuitBreaker, Future<T> future) {
this(circuitBreaker, future, circuitBreaker.getCurrentTimestamp());
}
CircuitBreakerFuture(CircuitBreaker circuitBreaker, Future<T> future, long start) {
Objects.requireNonNull(future, "Non null Future is required to decorate");
this.onceToCircuitbreaker = OnceConsumer.of(circuitBreaker);
this.future = future;
this.start = start;
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return future.cancel(mayInterruptIfRunning);
}
@Override
public boolean isCancelled() {
return future.isCancelled();
}
@Override
public boolean isDone() {
return future.isDone();
}
@Override
public T get() throws InterruptedException, ExecutionException {
try {
T v = future.get();
onceToCircuitbreaker
.applyOnce(cb ->
cb.onResult(cb.getCurrentTimestamp() - start, cb.getTimestampUnit(), v)
);
return v;
} catch (CancellationException | InterruptedException e) {
onceToCircuitbreaker.applyOnce(cb -> cb.releasePermission());
throw e;
} catch (Exception e) {
onceToCircuitbreaker.applyOnce(
cb -> cb.onError(cb.getCurrentTimestamp() - start, cb.getTimestampUnit(), e));
throw e;
}
}
@Override
public T get(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
try {
T v = future.get(timeout, unit);
onceToCircuitbreaker
.applyOnce(cb ->
cb.onResult(cb.getCurrentTimestamp() - start, cb.getTimestampUnit(), v)
);
return v;
} catch (CancellationException | InterruptedException e) {
onceToCircuitbreaker.applyOnce(CircuitBreaker::releasePermission);
throw e;
} catch (Exception e) {
onceToCircuitbreaker.applyOnce(
cb -> cb.onError(cb.getCurrentTimestamp() - start, cb.getTimestampUnit(), e));
throw e;
}
}
}
}
|
CircuitBreakerFuture
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
|
{
"start": 36648,
"end": 37153
}
|
class ____ an internal dir of the viewFs
* ie internal dir of the mount table.
* It is a ready only mount tbale and create, mkdir or delete operations
* are not allowed.
* If called on create or mkdir then this target is the parent of the
* directory in which one is trying to create or mkdir; hence
* in this case the path name passed in is the last component.
* Otherwise this target is the end point of the path and hence
* the path name passed in is null.
*/
static
|
represents
|
java
|
elastic__elasticsearch
|
x-pack/plugin/async-search/src/javaRestTest/java/org/elasticsearch/xpack/search/AsyncSearchHeadersIT.java
|
{
"start": 625,
"end": 2187
}
|
class ____ extends ESRestTestCase {
@ClassRule
public static ElasticsearchCluster cluster = ElasticsearchCluster.local().module("x-pack-async-search").build();
@Override
protected String getTestRestCluster() {
return cluster.getHttpAddresses();
}
@Before
public void createIndex() throws IOException {
client().performRequest(new Request("PUT", "/test_index"));
}
public void testAsyncHeaders() throws IOException {
Response submitResponse = client().performRequest(new Request("POST", "/test_index/_async_search?keep_on_completion=true"));
var asyncExecutionId = assertAsyncHeaders(submitResponse);
Response statusResponse = client().performRequest(new Request("GET", "/_async_search/status/" + asyncExecutionId));
assertAsyncHeaders(statusResponse);
Response resultResponse = client().performRequest(new Request("GET", "/_async_search/" + asyncExecutionId));
assertAsyncHeaders(resultResponse);
}
private String assertAsyncHeaders(Response response) throws IOException {
var json = entityAsMap(response);
var asyncExecutionId = (String) json.get("id");
var isRunning = (boolean) json.get("is_running");
if (asyncExecutionId != null) {
assertThat(response.getHeader("X-ElasticSearch-Async-Id"), equalTo(asyncExecutionId));
}
assertThat(response.getHeader("X-ElasticSearch-Async-Is-Running"), equalTo(isRunning ? "?1" : "?0"));
return asyncExecutionId;
}
}
|
AsyncSearchHeadersIT
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/streaming/api/operators/source/TimestampsAndWatermarksContext.java
|
{
"start": 1393,
"end": 2048
}
|
class ____
implements TimestampAssignerSupplier.Context, WatermarkGeneratorSupplier.Context {
private final MetricGroup metricGroup;
private final RelativeClock inputActivityClock;
public TimestampsAndWatermarksContext(
MetricGroup metricGroup, RelativeClock inputActivityClock) {
this.metricGroup = checkNotNull(metricGroup);
this.inputActivityClock = inputActivityClock;
}
@Override
public MetricGroup getMetricGroup() {
return metricGroup;
}
@Override
public RelativeClock getInputActivityClock() {
return inputActivityClock;
}
}
|
TimestampsAndWatermarksContext
|
java
|
spring-projects__spring-framework
|
spring-web/src/test/java/org/springframework/web/method/annotation/ModelFactoryOrderingTests.java
|
{
"start": 9804,
"end": 10618
}
|
class ____ extends AbstractController {
@ModelAttribute
public A getA(Model model) throws IOException {
return updateAndReturn(model, "getA", new A());
}
@ModelAttribute
public C1 getC1(@ModelAttribute B1 b1, Model model) throws IOException {
return updateAndReturn(model, "getC1", new C1());
}
@ModelAttribute
public C2 getC2(@ModelAttribute B1 b1, Model model) throws IOException {
return updateAndReturn(model, "getC2", new C2());
}
@ModelAttribute
public C3 getC3(@ModelAttribute B2 b2, Model model) throws IOException {
return updateAndReturn(model, "getC3", new C3());
}
@ModelAttribute
public C4 getC4(@ModelAttribute B2 b2, Model model) throws IOException {
return updateAndReturn(model, "getC4", new C4());
}
}
private static
|
UnresolvedDependencyController
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractAutoCreatedLeafQueue.java
|
{
"start": 1317,
"end": 1428
}
|
class ____ dynamic auto created queues managed by an implementation
* of AbstractManagedParentQueue
*/
public
|
for
|
java
|
spring-projects__spring-framework
|
spring-core/src/test/java/org/springframework/core/BridgeMethodResolverTests.java
|
{
"start": 31201,
"end": 31268
}
|
class ____ implements Serializable {
}
private static
|
ParameterType
|
java
|
apache__kafka
|
server-common/src/main/java/org/apache/kafka/server/share/persister/ReadShareGroupStateSummaryResult.java
|
{
"start": 1113,
"end": 2428
}
|
class ____ implements PersisterResult {
private final List<TopicData<PartitionStateSummaryData>> topicsData;
private ReadShareGroupStateSummaryResult(List<TopicData<PartitionStateSummaryData>> topicsData) {
this.topicsData = topicsData;
}
public static ReadShareGroupStateSummaryResult from(ReadShareGroupStateSummaryResponseData data) {
return new Builder()
.setTopicsData(data.results().stream()
.map(readStateSummaryResult -> new TopicData<>(readStateSummaryResult.topicId(),
readStateSummaryResult.partitions().stream()
.map(partitionResult -> PartitionFactory.newPartitionStateSummaryData(
partitionResult.partition(), partitionResult.stateEpoch(), partitionResult.startOffset(),
partitionResult.deliveryCompleteCount(), partitionResult.leaderEpoch(), partitionResult.errorCode(),
partitionResult.errorMessage()))
.collect(Collectors.toList())))
.collect(Collectors.toList()))
.build();
}
public static
|
ReadShareGroupStateSummaryResult
|
java
|
apache__logging-log4j2
|
log4j-1.2-api/src/main/java/org/apache/log4j/spi/OptionHandler.java
|
{
"start": 922,
"end": 979
}
|
interface ____ {
void activateOptions();
}
|
OptionHandler
|
java
|
quarkusio__quarkus
|
extensions/smallrye-fault-tolerance/deployment/src/test/java/io/quarkus/smallrye/faulttolerance/test/circuitbreaker/CircuitBreakerBean.java
|
{
"start": 319,
"end": 639
}
|
class ____ {
private final AtomicBoolean shouldFail = new AtomicBoolean();
@CircuitBreaker(requestVolumeThreshold = 5)
@CircuitBreakerName("my-cb")
public void hello() {
if (!shouldFail.getAndSet(true)) {
return;
}
throw new RuntimeException();
}
}
|
CircuitBreakerBean
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/liveobject/resolver/RIdResolver.java
|
{
"start": 836,
"end": 914
}
|
interface ____<V> {
/**
* RLiveObjectService instantiate the
|
RIdResolver
|
java
|
elastic__elasticsearch
|
x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/action/GetTopNFunctionsActionIT.java
|
{
"start": 420,
"end": 2722
}
|
class ____ extends ProfilingTestCase {
public void testGetTopNFunctionsUnfiltered() throws Exception {
GetStackTracesRequest request = new GetStackTracesRequest(
1000,
600.0d,
1.0d,
1.0d,
null,
null,
null,
null,
null,
null,
null,
null,
null
);
request.setAdjustSampleCount(true);
GetTopNFunctionsResponse response = client().execute(GetTopNFunctionsAction.INSTANCE, request).get();
assertEquals(671, response.getTopN().size());
}
public void testGetTopNFunctionsGroupedByServiceName() throws Exception {
GetStackTracesRequest request = new GetStackTracesRequest(
1000,
600.0d,
1.0d,
1.0d,
null,
null,
null,
new String[] { "service.name" },
null,
null,
null,
null,
null
);
request.setAdjustSampleCount(true);
request.setLimit(50);
GetTopNFunctionsResponse response = client().execute(GetTopNFunctionsAction.INSTANCE, request).get();
assertEquals(50, response.getTopN().size());
}
public void testGetTopNFunctionsFromAPM() throws Exception {
BoolQueryBuilder query = QueryBuilders.boolQuery();
query.must().add(QueryBuilders.termQuery("transaction.name", "encodeSha1"));
query.must().add(QueryBuilders.rangeQuery("@timestamp").lte("1698624000"));
GetStackTracesRequest request = new GetStackTracesRequest(
null,
1.0d,
1.0d,
1.0d,
query,
// also match an index that does not contain stacktrace ids to ensure it is ignored
new String[] { "apm-test-*", "apm-legacy-test-*" },
"transaction.profiler_stack_trace_ids",
new String[] { "transaction.name" },
null,
null,
null,
null,
null
);
GetTopNFunctionsResponse response = client().execute(GetTopNFunctionsAction.INSTANCE, request).get();
assertEquals(42, response.getTopN().size());
}
}
|
GetTopNFunctionsActionIT
|
java
|
google__error-prone
|
core/src/main/java/com/google/errorprone/bugpatterns/RedundantSetterCall.java
|
{
"start": 11159,
"end": 12931
}
|
enum ____ {
SINGLE {
@Override
@Nullable FieldWithValue match(String name, MethodInvocationTree tree, VisitorState state) {
if ((name.startsWith("set") || isWithinAutoValueBuilder(getSymbol(tree), state))
&& tree.getArguments().size() == 1) {
Field field = new SingleField(name);
return new FieldWithValue(field, tree, tree.getArguments().get(0));
}
return null;
}
},
REPEATED {
@Override
@Nullable FieldWithValue match(String name, MethodInvocationTree tree, VisitorState state) {
if (name.startsWith("set") && tree.getArguments().size() == 2) {
Integer index = ASTHelpers.constValue(tree.getArguments().get(0), Integer.class);
if (index != null) {
Field field = new RepeatedField(name, index);
return new FieldWithValue(field, tree, tree.getArguments().get(1));
}
}
return null;
}
},
MAP {
@Override
@Nullable FieldWithValue match(String name, MethodInvocationTree tree, VisitorState state) {
if (name.startsWith("put") && tree.getArguments().size() == 2) {
Object key = ASTHelpers.constValue(tree.getArguments().get(0), Object.class);
if (key != null) {
Field field = new MapField(name, key);
return new FieldWithValue(field, tree, tree.getArguments().get(1));
}
}
return null;
}
};
abstract FieldWithValue match(String name, MethodInvocationTree tree, VisitorState state);
}
private static boolean isWithinAutoValueBuilder(MethodSymbol symbol, VisitorState state) {
return hasAnnotation(symbol.owner, "com.google.auto.value.AutoValue.Builder", state);
}
|
FieldType
|
java
|
google__dagger
|
javatests/dagger/functional/builder/BuilderTest.java
|
{
"start": 9280,
"end": 9335
}
|
interface ____ {}
@Singleton
@Component
|
DepComponent
|
java
|
alibaba__nacos
|
api/src/test/java/com/alibaba/nacos/api/ai/model/mcp/registry/ServerVersionDetailTest.java
|
{
"start": 1011,
"end": 2255
}
|
class ____ extends BasicRequestTest {
@Test
void testSerialize() throws JsonProcessingException {
ServerVersionDetail serverVersionDetail = new ServerVersionDetail();
serverVersionDetail.setVersion("1.0.0");
serverVersionDetail.setRelease_date("2022-01-01T00:00:00Z");
serverVersionDetail.setIs_latest(true);
String json = mapper.writeValueAsString(serverVersionDetail);
assertNotNull(json);
assertTrue(json.contains("\"version\":\"1.0.0\""));
assertTrue(json.contains("\"release_date\":\"2022-01-01T00:00:00Z\""));
assertTrue(json.contains("\"is_latest\":true"));
}
@Test
void testDeserialize() throws JsonProcessingException {
String json = "{\"version\":\"1.0.0\",\"release_date\":\"2022-01-01T00:00:00Z\",\"is_latest\":true}";
ServerVersionDetail serverVersionDetail = mapper.readValue(json, ServerVersionDetail.class);
assertNotNull(serverVersionDetail);
assertEquals("1.0.0", serverVersionDetail.getVersion());
assertEquals("2022-01-01T00:00:00Z", serverVersionDetail.getRelease_date());
assertEquals(true, serverVersionDetail.getIs_latest());
}
}
|
ServerVersionDetailTest
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/OptionalMapUnusedValueTest.java
|
{
"start": 4825,
"end": 5110
}
|
class ____ {
public void bar(Optional<Integer> optional) {
optional.map(
v -> {
return 2;
});
}
}
""")
.expectUnchanged()
.doTest();
}
}
|
Test
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJobCounters.java
|
{
"start": 18633,
"end": 19829
}
|
class ____ {
private static final int DEFAULT_UNIT_LOAD_SIZE = 10 * 1024 * 1024; // 10mb
// the target value to reach
private long targetValue;
// a list to hold the load objects
private List<String> loadObjects = new ArrayList<String>();
MemoryLoader(long targetValue) {
this.targetValue = targetValue;
}
/**
* Loads the memory to the target value.
*/
void load() {
while (Runtime.getRuntime().totalMemory() < targetValue) {
System.out.println("Loading memory with " + DEFAULT_UNIT_LOAD_SIZE
+ " characters. Current usage : "
+ Runtime.getRuntime().totalMemory());
// load some objects in the memory
loadObjects.add(RandomStringUtils.random(DEFAULT_UNIT_LOAD_SIZE));
// sleep for 100ms
try {
Thread.sleep(100);
} catch (InterruptedException ie) {}
}
}
}
/**
* A mapper that increases the JVM's heap usage to a target value configured
* via {@link MemoryLoaderMapper#TARGET_VALUE} using a {@link MemoryLoader}.
*/
@SuppressWarnings({"deprecation", "unchecked"})
static
|
MemoryLoader
|
java
|
apache__logging-log4j2
|
log4j-api/src/main/java/org/apache/logging/log4j/status/StatusLogger.java
|
{
"start": 18082,
"end": 23390
}
|
class ____ {
@Nullable
static String readProperty(final Map<String, Object> normalizedProperties, final String propertyName) {
final String normalizedPropertyName = normalizePropertyName(propertyName);
final Object value = normalizedProperties.get(normalizedPropertyName);
return (value instanceof String) ? (String) value : null;
}
static Map<String, Object> readAllAvailableProperties() {
final Properties systemProperties = System.getProperties();
final Properties environmentProperties = readEnvironmentProperties();
final Properties fileProvidedProperties = readPropertiesFile(PROPERTIES_FILE_NAME);
return normalizeProperties(systemProperties, environmentProperties, fileProvidedProperties);
}
private static Properties readEnvironmentProperties() {
final Properties properties = new Properties();
properties.putAll(System.getenv());
return properties;
}
// We need to roll out our own `.properties` reader.
// We could have used `PropertiesUtil`, `PropertyFilePropertySource`, etc.
// Consequently, they would delegate to `LoaderUtil`, etc.
// All these mechanisms expect a working `StatusLogger`.
// Hence, in order to be self-sufficient, we cannot rely on them.
static Properties readPropertiesFile(final String propertiesFileName) {
final Properties properties = new Properties();
// Unlike `ClassLoader#getResource()`, which takes absolute resource paths, `Class#getResource()` supports
// relative resource paths. Without a `/` prefix, the resource must be placed into JAR resources as
// `org/apache/logging/log4j/status/log4j2.StatusLogger.properties`. Hence, the `/` prefix.
final String resourceName = '/' + propertiesFileName;
final URL url = StatusLogger.class.getResource(resourceName);
if (url == null) {
return properties;
}
try (final InputStream stream = url.openStream()) {
properties.load(stream);
} catch (final IOException error) {
final String message = String.format("failed reading properties from `%s`", propertiesFileName);
final RuntimeException extendedError = new RuntimeException(message, error);
// There is no logging system at this stage.
// There is nothing we can do but simply dumping the failure.
extendedError.printStackTrace(System.err);
}
return properties;
}
private static Map<String, Object> normalizeProperties(Properties... propertiesList) {
Map<String, Object> map = new HashMap<>();
for (Properties properties : propertiesList) {
properties.forEach((name, value) -> {
final boolean relevant = isRelevantPropertyName(name);
if (relevant) {
final String normalizedName = normalizePropertyName((String) name);
map.put(normalizedName, value);
}
});
}
return map;
}
/**
* Filter to exclude irrelevant property names (i.e., non-string and not {@code log4j}-prefixed) to speed up matching.
* @param propertyName a property name
* @return {@code true}, if the property name is relevant; {@code false}, otherwise
*/
private static boolean isRelevantPropertyName(@Nullable final Object propertyName) {
return propertyName instanceof String && ((String) propertyName).matches("^(?i)log4j.*");
}
/**
* An imperfect property name normalization routine.
* <p>
* It is imperfect, because {@code foo.bar} would match with {@code fo.obar}.
* But it is good enough for the {@code StatusLogger} needs.
* </p>
*
* @param propertyName the input property name
* @return the normalized property name
*/
private static String normalizePropertyName(final String propertyName) {
return propertyName
// Remove separators:
// - dots (properties)
// - dashes (kebab-case)
// - underscores (environment variables)
.replaceAll("[._-]", "")
// Replace all non-ASCII characters.
// Don't remove, otherwise `fooàö` would incorrectly match with `foo`.
// It is safe to replace them with dots, since we've just removed all dots above.
.replaceAll("\\P{InBasic_Latin}", ".")
// Lowercase ASCII – this is safe, since we've just removed all non-ASCII
.toLowerCase(Locale.US)
.replaceAll("^log4j2", "log4j");
}
}
/**
* Wrapper for the default instance for lazy initialization.
* <p>
* The initialization will be performed when the JVM initializes the class.
* Since {@code InstanceHolder} has no other fields or methods,
|
PropertiesUtilsDouble
|
java
|
apache__logging-log4j2
|
log4j-api/src/main/java/org/apache/logging/log4j/status/StatusLogger.java
|
{
"start": 23689,
"end": 39941
}
|
class ____ {
private static volatile StatusLogger INSTANCE = new StatusLogger();
}
private final Config config;
private final StatusConsoleListener fallbackListener;
private final List<StatusListener> listeners;
private final transient ReadWriteLock listenerLock = new ReentrantReadWriteLock();
private final transient Lock listenerReadLock = listenerLock.readLock();
private final transient Lock listenerWriteLock = listenerLock.writeLock();
private final Queue<StatusData> buffer = new ConcurrentLinkedQueue<>();
/**
* Constructs the default instance.
* <p>
* This method is visible for tests.
* </p>
*/
StatusLogger() {
this(
StatusLogger.class.getSimpleName(),
ParameterizedNoReferenceMessageFactory.INSTANCE,
Config.getInstance(),
new StatusConsoleListener(requireNonNull(Config.getInstance().fallbackListenerLevel), System.err));
}
/**
* Constructs an instance using given properties.
* <b>Users should not create new instances, but use {@link #getLogger()} instead!</b>
*
* @param name the logger name
* @param messageFactory the message factory
* @param config the configuration
* @param fallbackListener the fallback listener
* @throws NullPointerException on null {@code name}, {@code messageFactory}, {@code config}, or {@code fallbackListener}
* @since 2.23.0
*/
public StatusLogger(
final String name,
final MessageFactory messageFactory,
final Config config,
final StatusConsoleListener fallbackListener) {
super(requireNonNull(name, "name"), requireNonNull(messageFactory, "messageFactory"));
this.config = requireNonNull(config, "config");
this.fallbackListener = requireNonNull(fallbackListener, "fallbackListener");
this.listeners = new ArrayList<>();
}
/**
* Gets the static instance.
*
* @return the singleton instance
*/
public static StatusLogger getLogger() {
return InstanceHolder.INSTANCE;
}
/**
* Sets the static (i.e., singleton) instance returned by {@link #getLogger()}.
* This method is intended for testing purposes and can have unforeseen consequences if used in production code.
*
* @param logger a logger instance
* @throws NullPointerException on null {@code logger}
* @since 2.23.0
*/
public static void setLogger(final StatusLogger logger) {
InstanceHolder.INSTANCE = requireNonNull(logger, "logger");
}
/**
* Returns the fallback listener.
*
* @return the fallback listener
* @since 2.23.0
*/
public StatusConsoleListener getFallbackListener() {
return fallbackListener;
}
/**
* Sets the level of the fallback listener.
*
* @param level a level
* @deprecated Since 2.23.0, instead use the {@link StatusConsoleListener#setLevel(Level) setLevel(Level)} method
* on the fallback listener returned by {@link #getFallbackListener()}.
*/
@Deprecated
public void setLevel(final Level level) {
requireNonNull(level, "level");
fallbackListener.setLevel(level);
}
/**
* Registers a new listener.
*
* @param listener a listener to register
*/
public void registerListener(final StatusListener listener) {
requireNonNull(listener, "listener");
listenerWriteLock.lock();
try {
listeners.add(listener);
} finally {
listenerWriteLock.unlock();
}
}
/**
* Removes the given listener.
*
* @param listener a listener to remove
*/
public void removeListener(final StatusListener listener) {
requireNonNull(listener, "listener");
listenerWriteLock.lock();
try {
listeners.remove(listener);
closeListenerSafely(listener);
} finally {
listenerWriteLock.unlock();
}
}
/**
* Sets the level of the fallback listener.
*
* @param level a level
* @since 2.6
* @deprecated Since 2.23.0, instead use the {@link StatusConsoleListener#setLevel(Level) setLevel(Level)} method
* on the fallback listener returned by {@link #getFallbackListener()}.
*/
@Deprecated
public void updateListenerLevel(final Level level) {
requireNonNull(level, "level");
fallbackListener.setLevel(level);
}
/**
* Returns the listener collection.
*
* @return a thread-safe read-only collection of listeners
*/
public Iterable<StatusListener> getListeners() {
listenerReadLock.lock();
try {
return Collections.unmodifiableCollection(listeners);
} finally {
listenerReadLock.unlock();
}
}
/**
* Clears the event buffer, removes the <em>registered</em> (not the fallback one!) listeners, and resets the fallback listener.
*/
public void reset() {
listenerWriteLock.lock();
try {
final Iterator<StatusListener> listenerIterator = listeners.iterator();
while (listenerIterator.hasNext()) {
final StatusListener listener = listenerIterator.next();
closeListenerSafely(listener);
listenerIterator.remove();
}
} finally {
listenerWriteLock.unlock();
}
fallbackListener.close();
buffer.clear();
}
private static void closeListenerSafely(final StatusListener listener) {
try {
listener.close();
} catch (final IOException error) {
final String message = String.format("failed closing listener: %s", listener);
final RuntimeException extendedError = new RuntimeException(message, error);
// There is no logging system at this stage.
// There is nothing we can do but simply dumping the failure.
extendedError.printStackTrace(System.err);
}
}
/**
* Returns buffered events.
*
* @deprecated Since 2.23.0, instead of relying on the buffering provided by {@code StatusLogger},
* users should register their own listeners to access to logged events.
* @return a thread-safe read-only collection of buffered events
*/
@Deprecated
public List<StatusData> getStatusData() {
// Wrapping the buffer clone with an unmodifiable list.
// By disallowing modifications, we make it clear to the user that mutations will not get propagated.
// `Collections.unmodifiableList(new ArrayList<>(...))` should be replaced with `List.of()` in Java 9+.
return Collections.unmodifiableList(new ArrayList<>(buffer));
}
/**
* Clears the event buffer.
*
* @deprecated Since 2.23.0, instead of relying on the buffering provided by {@code StatusLogger},
* users should register their own listeners to access to logged events.
*/
@Deprecated
public void clear() {
buffer.clear();
}
/**
* Returns the least specific level among listeners, if registered any; otherwise, the fallback listener level.
*
* @return the least specific listener level, if registered any; otherwise, the fallback listener level
*/
@Override
public Level getLevel() {
Level leastSpecificLevel = fallbackListener.getStatusLevel();
// noinspection ForLoopReplaceableByForEach (avoid iterator instantiation)
for (int listenerIndex = 0; listenerIndex < listeners.size(); listenerIndex++) {
final StatusListener listener = listeners.get(listenerIndex);
final Level listenerLevel = listener.getStatusLevel();
if (listenerLevel.isLessSpecificThan(leastSpecificLevel)) {
leastSpecificLevel = listenerLevel;
}
}
return leastSpecificLevel;
}
@Override
@SuppressFBWarnings("INFORMATION_EXPOSURE_THROUGH_AN_ERROR_MESSAGE")
public void logMessage(
final String fqcn,
final Level level,
final Marker marker,
final Message message,
final Throwable throwable) {
try {
final StatusData statusData = createStatusData(fqcn, level, message, throwable);
buffer(statusData);
notifyListeners(statusData);
} catch (final Exception error) {
// We are at the lowest level of the system.
// Hence, there is nothing better we can do but dumping the failure.
error.printStackTrace(System.err);
}
}
private void buffer(final StatusData statusData) {
if (config.bufferCapacity == 0) {
return;
}
buffer.add(statusData);
while (buffer.size() >= config.bufferCapacity) {
buffer.remove();
}
}
private void notifyListeners(final StatusData statusData) {
final boolean foundListeners;
listenerReadLock.lock();
try {
foundListeners = !listeners.isEmpty();
listeners.forEach(listener -> notifyListener(listener, statusData));
} finally {
listenerReadLock.unlock();
}
if (!foundListeners) {
notifyListener(fallbackListener, statusData);
}
}
private void notifyListener(final StatusListener listener, final StatusData statusData) {
final boolean levelEnabled = isLevelEnabled(listener.getStatusLevel(), statusData.getLevel());
if (levelEnabled) {
listener.log(statusData);
}
}
private StatusData createStatusData(
@Nullable final String fqcn,
final Level level,
final Message message,
@Nullable final Throwable throwable) {
final StackTraceElement caller = getStackTraceElement(fqcn);
final Instant instant = Instant.now();
return new StatusData(caller, level, message, throwable, null, config.instantFormatter, instant);
}
@Nullable
private static StackTraceElement getStackTraceElement(@Nullable final String fqcn) {
if (fqcn == null) {
return null;
}
boolean next = false;
final StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace();
for (final StackTraceElement element : stackTrace) {
final String className = element.getClassName();
if (next && !fqcn.equals(className)) {
return element;
}
if (fqcn.equals(className)) {
next = true;
} else if ("?".equals(className)) {
break;
}
}
return null;
}
@Override
public boolean isEnabled(final Level level, final Marker marker, final String message, final Throwable throwable) {
return isEnabled(level, marker);
}
@Override
public boolean isEnabled(final Level level, final Marker marker, final String message) {
return isEnabled(level, marker);
}
@Override
public boolean isEnabled(final Level level, final Marker marker, final String message, final Object... params) {
return isEnabled(level, marker);
}
@Override
public boolean isEnabled(final Level level, final Marker marker, final String message, final Object p0) {
return isEnabled(level, marker);
}
@Override
public boolean isEnabled(
final Level level, final Marker marker, final String message, final Object p0, final Object p1) {
return isEnabled(level, marker);
}
@Override
public boolean isEnabled(
final Level level,
final Marker marker,
final String message,
final Object p0,
final Object p1,
final Object p2) {
return isEnabled(level, marker);
}
@Override
public boolean isEnabled(
final Level level,
final Marker marker,
final String message,
final Object p0,
final Object p1,
final Object p2,
final Object p3) {
return isEnabled(level, marker);
}
@Override
public boolean isEnabled(
final Level level,
final Marker marker,
final String message,
final Object p0,
final Object p1,
final Object p2,
final Object p3,
final Object p4) {
return isEnabled(level, marker);
}
@Override
public boolean isEnabled(
final Level level,
final Marker marker,
final String message,
final Object p0,
final Object p1,
final Object p2,
final Object p3,
final Object p4,
final Object p5) {
return isEnabled(level, marker);
}
@Override
public boolean isEnabled(
final Level level,
final Marker marker,
final String message,
final Object p0,
final Object p1,
final Object p2,
final Object p3,
final Object p4,
final Object p5,
final Object p6) {
return isEnabled(level, marker);
}
@Override
public boolean isEnabled(
final Level level,
final Marker marker,
final String message,
final Object p0,
final Object p1,
final Object p2,
final Object p3,
final Object p4,
final Object p5,
final Object p6,
final Object p7) {
return isEnabled(level, marker);
}
@Override
public boolean isEnabled(
final Level level,
final Marker marker,
final String message,
final Object p0,
final Object p1,
final Object p2,
final Object p3,
final Object p4,
final Object p5,
final Object p6,
final Object p7,
final Object p8) {
return isEnabled(level, marker);
}
@Override
public boolean isEnabled(
final Level level,
final Marker marker,
final String message,
final Object p0,
final Object p1,
final Object p2,
final Object p3,
final Object p4,
final Object p5,
final Object p6,
final Object p7,
final Object p8,
final Object p9) {
return isEnabled(level, marker);
}
@Override
public boolean isEnabled(
final Level level, final Marker marker, final CharSequence message, final Throwable throwable) {
return isEnabled(level, marker);
}
@Override
public boolean isEnabled(final Level level, final Marker marker, final Object message, final Throwable throwable) {
return isEnabled(level, marker);
}
@Override
public boolean isEnabled(final Level level, final Marker marker, final Message message, final Throwable throwable) {
return isEnabled(level, marker);
}
@Override
public boolean isEnabled(final Level messageLevel, final Marker marker) {
requireNonNull(messageLevel, "messageLevel");
final Level loggerLevel = getLevel();
return isLevelEnabled(loggerLevel, messageLevel);
}
/**
* Checks if the message level is allowed for the filtering level (e.g., of logger, of listener) by taking debug mode into account.
*
* @param filteringLevel the level (e.g., of logger, of listener) to filter messages
* @param messageLevel the level of the message
* @return {@code true}, if the sink level is less specific than the message level; {@code false}, otherwise
*/
private boolean isLevelEnabled(final Level filteringLevel, final Level messageLevel) {
return config.debugEnabled || filteringLevel.isLessSpecificThan(messageLevel);
}
}
|
InstanceHolder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.