language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | micronaut-projects__micronaut-core | inject/src/main/java/io/micronaut/inject/DefaultBeanIdentifier.java | {
"start": 829,
"end": 1881
} | class ____ implements BeanIdentifier {
private final String id;
/**
* The constructor.
*
* @param id The id
*/
DefaultBeanIdentifier(String id) {
this.id = id;
}
@Override
public String toString() {
return id;
}
@Override
public String getName() {
return id;
}
@Override
public int length() {
return id.length();
}
@Override
public char charAt(int index) {
return id.charAt(index);
}
@Override
public CharSequence subSequence(int start, int end) {
return id.subSequence(start, end);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
DefaultBeanIdentifier that = (DefaultBeanIdentifier) o;
return Objects.equals(id, that.id);
}
@Override
public int hashCode() {
return id.hashCode();
}
}
| DefaultBeanIdentifier |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/context/annotation/DependsOn.java | {
"start": 2210,
"end": 2266
} | interface ____ {
String[] value() default {};
}
| DependsOn |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/utils/RankProcessStrategy.java | {
"start": 3924,
"end": 4434
} | class ____ implements RankProcessStrategy {
@JsonCreator
public RetractStrategy() {}
@Override
public String toString() {
return "RetractStrategy";
}
}
/**
* A strategy which only works when input shouldn't contains deletion changes and input should
* have the given {@link #primaryKeys} and should be monotonic on the order by field.
*/
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonTypeName("UpdateFast")
| RetractStrategy |
java | elastic__elasticsearch | x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcTestUtils.java | {
"start": 411,
"end": 691
} | class ____ {
private JdbcTestUtils() {}
static ZonedDateTime nowWithMillisResolution(ZoneId zoneId) {
Clock millisResolutionClock = Clock.tick(Clock.system(zoneId), Duration.ofMillis(1));
return ZonedDateTime.now(millisResolutionClock);
}
}
| JdbcTestUtils |
java | spring-projects__spring-framework | spring-jms/src/main/java/org/springframework/jms/TransactionInProgressException.java | {
"start": 866,
"end": 1040
} | class ____ extends JmsException {
public TransactionInProgressException(jakarta.jms.TransactionInProgressException cause) {
super(cause);
}
}
| TransactionInProgressException |
java | quarkusio__quarkus | extensions/devui/deployment-spi/src/main/java/io/quarkus/devui/spi/page/LibraryLink.java | {
"start": 91,
"end": 2371
} | class ____ {
private String groupId;
private String artifactId;
private String name;
private URL url;
private String version;
public LibraryLink() {
}
public LibraryLink(String groupId, String artifactId, String name, URL url) {
this(groupId, artifactId, name, url, null);
}
public LibraryLink(String groupId, String artifactId, String name, URL url, String version) {
this.groupId = groupId;
this.artifactId = artifactId;
this.name = name;
this.url = url;
this.version = version;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public URL getUrl() {
return url;
}
public void setUrl(URL url) {
this.url = url;
}
public String getVersion() {
return version;
}
public void setVersion(String version) {
this.version = version;
}
public String getGroupId() {
return groupId;
}
public void setGroupId(String groupId) {
this.groupId = groupId;
}
public String getArtifactId() {
return artifactId;
}
public void setArtifactId(String artifactId) {
this.artifactId = artifactId;
}
@Override
public int hashCode() {
int hash = 7;
hash = 53 * hash + Objects.hashCode(this.groupId);
hash = 53 * hash + Objects.hashCode(this.artifactId);
hash = 53 * hash + Objects.hashCode(this.name);
hash = 53 * hash + Objects.hashCode(this.url);
return hash;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final LibraryLink other = (LibraryLink) obj;
if (!Objects.equals(this.groupId, other.groupId)) {
return false;
}
if (!Objects.equals(this.artifactId, other.artifactId)) {
return false;
}
if (!Objects.equals(this.name, other.name)) {
return false;
}
return Objects.equals(this.url, other.url);
}
}
| LibraryLink |
java | apache__dubbo | dubbo-test/dubbo-test-check/src/main/java/org/apache/dubbo/test/check/registrycenter/processor/ZookeeperUnixProcessor.java | {
"start": 1740,
"end": 4865
} | class ____ implements Processor {
private static final ErrorTypeAwareLogger logger =
LoggerFactory.getErrorTypeAwareLogger(ZookeeperUnixProcessor.class);
@Override
public void process(Context context) throws DubboTestException {
ZookeeperContext zookeeperContext = (ZookeeperContext) context;
for (int clientPort : zookeeperContext.getClientPorts()) {
Process process = this.doProcess(zookeeperContext, clientPort);
this.logErrorStream(process.getErrorStream());
this.awaitProcessReady(process.getInputStream());
// kill the process
try {
process.destroy();
} catch (Throwable cause) {
logger.warn(
TESTING_REGISTRY_FAILED_TO_STOP_ZOOKEEPER,
"",
"",
String.format("Failed to kill the process, with client port %s !", clientPort),
cause);
}
}
}
/**
* Prints the error log after run {@link Process}.
*
* @param errorStream the error stream.
*/
private void logErrorStream(final InputStream errorStream) {
try (final BufferedReader reader = new BufferedReader(new InputStreamReader(errorStream))) {
String line;
while ((line = reader.readLine()) != null) {
logger.error(TESTING_REGISTRY_FAILED_TO_START_ZOOKEEPER, "", "", line);
}
} catch (IOException e) {
/* eat quietly */
}
}
/**
* Wait until the server is started successfully.
*
* @param inputStream the log after run {@link Process}.
* @throws DubboTestException if cannot match the given pattern.
*/
private void awaitProcessReady(final InputStream inputStream) throws DubboTestException {
final StringBuilder log = new StringBuilder();
try (final BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream))) {
String line;
while ((line = reader.readLine()) != null) {
if (this.getPattern().matcher(line).matches()) {
return;
}
log.append('\n').append(line);
}
} catch (IOException e) {
throw new DubboTestException("Failed to read the log after executed process.", e);
}
throw new DubboTestException("Ready pattern not found in log, log: " + log);
}
/**
* Use {@link Process} to handle the command.
*
* @param context the global zookeeper context.
* @param clientPort the client port of zookeeper.
* @return the instance of {@link Process}.
* @throws DubboTestException when any exception occurred.
*/
protected abstract Process doProcess(ZookeeperContext context, int clientPort) throws DubboTestException;
/**
* Gets the pattern to check the server is ready or not.
*
* @return the pattern for checking.
*/
protected abstract Pattern getPattern();
}
| ZookeeperUnixProcessor |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/masterreplica/SentinelTopologyRefreshConnections.java | {
"start": 678,
"end": 1532
} | class ____ {@code expectedCount} notifications.
*
* @param expectedCount
*/
public SentinelTopologyRefreshConnections(int expectedCount) {
super(expectedCount);
}
@Override
protected void onAccept(StatefulRedisPubSubConnection<String, String> value) {
success.incrementAndGet();
}
@Override
protected void onError(Throwable value) {
exceptions.add(value);
}
@Override
protected void onEmit(Emission<SentinelTopologyRefreshConnections> emission) {
if (success.get() == 0) {
RedisException exception = new RedisException("Cannot attach to Redis Sentinel for topology refresh");
exceptions.forEach(exception::addSuppressed);
emission.error(exception);
} else {
emission.success(this);
}
}
}
| expecting |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/NegativeBooleanTest.java | {
"start": 1821,
"end": 2108
} | class ____ {
void foo() {
boolean notableBar;
}
}
""")
.doTest();
}
@Test
public void integer_isNotFlagged() {
testHelper
.addSourceLines(
"Test.java",
"""
| Test |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherMultiGeoPointIndexFieldData.java | {
"start": 1137,
"end": 1272
} | class ____ extends SourceValueFetcherIndexFieldData<MultiGeoPointValues> {
public static | SourceValueFetcherMultiGeoPointIndexFieldData |
java | alibaba__nacos | naming/src/main/java/com/alibaba/nacos/naming/push/NamingSubscriberService.java | {
"start": 860,
"end": 2473
} | interface ____ {
/**
* Get all push target subscribers for specified service.
* TODO use {@link com.alibaba.nacos.api.naming.pojo.maintainer.SubscriberInfo} replaced return
*
* @param namespaceId namespace id
* @param serviceName service name
* @return list of subscribers
*/
Collection<Subscriber> getSubscribers(String namespaceId, String serviceName);
/**
* Get all push target subscribers for specified service.
* TODO use {@link com.alibaba.nacos.api.naming.pojo.maintainer.SubscriberInfo} replaced return
*
* @param service {@link Service}
* @return list of subscribers
*/
Collection<Subscriber> getSubscribers(Service service);
/**
* Fuzzy get subscribers. Only support fuzzy serviceName.
*
* <p>Warning: This method cost much performance, use less.
* TODO use {@link com.alibaba.nacos.api.naming.pojo.maintainer.SubscriberInfo} replaced return
*
* @param namespaceId namespace id
* @param serviceName fuzzy serviceName
* @return list of subscribers
*/
Collection<Subscriber> getFuzzySubscribers(String namespaceId, String serviceName);
/**
* Fuzzy get subscribers. Only support fuzzy serviceName.
*
* <p>Warning: This method cost much performance, use less.
* TODO use {@link com.alibaba.nacos.api.naming.pojo.maintainer.SubscriberInfo} replaced return
*
* @param service {@link Service}
* @return list of subscribers
*/
Collection<Subscriber> getFuzzySubscribers(Service service);
}
| NamingSubscriberService |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/argumentselectiondefects/NameInCommentHeuristicTest.java | {
"start": 1821,
"end": 3103
} | class ____ extends BugChecker
implements MethodInvocationTreeMatcher {
@Override
public Description matchMethodInvocation(MethodInvocationTree tree, VisitorState state) {
ImmutableList<Parameter> formal =
Parameter.createListFromVarSymbols(ASTHelpers.getSymbol(tree).getParameters());
Stream<Parameter> actual =
Parameter.createListFromExpressionTrees(tree.getArguments()).stream();
Changes changes =
Changes.create(
formal.stream().map(f -> 1.0).collect(toImmutableList()),
formal.stream().map(f -> 0.0).collect(toImmutableList()),
Streams.zip(formal.stream(), actual, ParameterPair::create)
.collect(toImmutableList()));
boolean result =
!new NameInCommentHeuristic()
.isAcceptableChange(changes, tree, ASTHelpers.getSymbol(tree), state);
return buildDescription(tree).setMessage(String.valueOf(result)).build();
}
}
@Test
public void nameInCommentHeuristic_returnsTrue_whereCommentMatchesFormalParameter() {
CompilationTestHelper.newInstance(NameInCommentHeuristicChecker.class, getClass())
.addSourceLines(
"Test.java",
"""
abstract | NameInCommentHeuristicChecker |
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/GetInferenceModelResponseTests.java | {
"start": 906,
"end": 2322
} | class ____ extends AbstractWireSerializingTestCase<GetInferenceModelAction.Response> {
@Override
protected Writeable.Reader<GetInferenceModelAction.Response> instanceReader() {
return GetInferenceModelAction.Response::new;
}
@Override
protected GetInferenceModelAction.Response createTestInstance() {
int numModels = randomIntBetween(1, 5);
var modelConfigs = new ArrayList<ModelConfigurations>();
for (int i = 0; i < numModels; i++) {
modelConfigs.add(ModelConfigurationsTests.createRandomInstance());
}
return new GetInferenceModelAction.Response(modelConfigs);
}
@Override
protected GetInferenceModelAction.Response mutateInstance(GetInferenceModelAction.Response instance) throws IOException {
var modifiedConfigs = new ArrayList<>(instance.getEndpoints());
modifiedConfigs.add(ModelConfigurationsTests.createRandomInstance());
return new GetInferenceModelAction.Response(modifiedConfigs);
}
@Override
protected NamedWriteableRegistry getNamedWriteableRegistry() {
List<NamedWriteableRegistry.Entry> namedWriteables = new ArrayList<>(InferenceNamedWriteablesProvider.getNamedWriteables());
namedWriteables.addAll(XPackClientPlugin.getChunkingSettingsNamedWriteables());
return new NamedWriteableRegistry(namedWriteables);
}
}
| GetInferenceModelResponseTests |
java | micronaut-projects__micronaut-core | inject-java/src/test/groovy/io/micronaut/inject/lifecycle/beancreationeventlistener/OffendingChainListener.java | {
"start": 230,
"end": 563
} | class ____ implements BeanCreatedEventListener<B> {
static boolean initialized;
static boolean executed;
OffendingChainListener(D d) {
initialized = true;
}
@Override
public B onCreated(BeanCreatedEvent<B> event) {
executed = true;
return event.getBean();
}
}
| OffendingChainListener |
java | hibernate__hibernate-orm | hibernate-spatial/src/test/java/org/hibernate/spatial/testing/datareader/TestSupport.java | {
"start": 913,
"end": 1403
} | class ____ {
//TODO -- make this abstract
public NativeSQLTemplates templates() {
return null;
}
//TODO -- make this abstract
public PredicateRegexes predicateRegexes() {
return null;
}
public Map<CommonSpatialFunction, String> hqlOverrides() {
return new HashMap<>();
}
public List<CommonSpatialFunction> getExcludeFromTests() {
return new ArrayList<>();
}
public GeometryEquality getGeometryEquality() {
return new GeometryPositionEquality();
}
public | TestSupport |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/testng/AnnotationConfigTransactionalTestNGSpringContextTests.java | {
"start": 2397,
"end": 5577
} | class ____
extends AbstractTransactionalTestNGSpringContextTests {
private static final String JANE = "jane";
private static final String SUE = "sue";
private static final String YODA = "yoda";
private static final int NUM_TESTS = 2;
private static final int NUM_TX_TESTS = 1;
private static int numSetUpCalls = 0;
private static int numSetUpCallsInTransaction = 0;
private static int numTearDownCalls = 0;
private static int numTearDownCallsInTransaction = 0;
@Autowired
private Employee employee;
@Autowired
private Pet pet;
private int createPerson(String name) {
return jdbcTemplate.update("INSERT INTO person VALUES(?)", name);
}
private int deletePerson(String name) {
return jdbcTemplate.update("DELETE FROM person WHERE name=?", name);
}
private void assertNumRowsInPersonTable(int expectedNumRows, String testState) {
assertThat(countRowsInTable("person"))
.as("the number of rows in the person table (" + testState + ").")
.isEqualTo(expectedNumRows);
}
private void assertAddPerson(String name) {
assertThat(createPerson(name)).as("Adding '%s'", name).isEqualTo(1);
}
@BeforeClass
void beforeClass() {
numSetUpCalls = 0;
numSetUpCallsInTransaction = 0;
numTearDownCalls = 0;
numTearDownCallsInTransaction = 0;
}
@AfterClass
void afterClass() {
assertThat(numSetUpCalls).as("number of calls to setUp().").isEqualTo(NUM_TESTS);
assertThat(numSetUpCallsInTransaction).as("number of calls to setUp() within a transaction.").isEqualTo(NUM_TX_TESTS);
assertThat(numTearDownCalls).as("number of calls to tearDown().").isEqualTo(NUM_TESTS);
assertThat(numTearDownCallsInTransaction).as("number of calls to tearDown() within a transaction.").isEqualTo(NUM_TX_TESTS);
}
@Test
@Transactional(propagation = Propagation.NOT_SUPPORTED)
void autowiringFromConfigClass() {
assertThat(employee).as("The employee should have been autowired.").isNotNull();
assertThat(employee.getName()).isEqualTo("John Smith");
assertThat(pet).as("The pet should have been autowired.").isNotNull();
assertThat(pet.getName()).isEqualTo("Fido");
}
@BeforeTransaction
void beforeTransaction() {
assertNumRowsInPersonTable(1, "before a transactional test method");
assertAddPerson(YODA);
}
@BeforeMethod
void setUp() {
numSetUpCalls++;
if (isActualTransactionActive()) {
numSetUpCallsInTransaction++;
}
assertNumRowsInPersonTable((isActualTransactionActive() ? 2 : 1), "before a test method");
}
@Test
void modifyTestDataWithinTransaction() {
assertThatTransaction().isActive();
assertAddPerson(JANE);
assertAddPerson(SUE);
assertNumRowsInPersonTable(4, "in modifyTestDataWithinTransaction()");
}
@AfterMethod
void tearDown() {
numTearDownCalls++;
if (isActualTransactionActive()) {
numTearDownCallsInTransaction++;
}
assertNumRowsInPersonTable((isActualTransactionActive() ? 4 : 1), "after a test method");
}
@AfterTransaction
void afterTransaction() {
assertThat(deletePerson(YODA)).as("Deleting yoda").isEqualTo(1);
assertNumRowsInPersonTable(1, "after a transactional test method");
}
@Configuration
static | AnnotationConfigTransactionalTestNGSpringContextTests |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/JUnit4SetUpNotRunTest.java | {
"start": 7546,
"end": 7735
} | class ____ {
@Before
public void setUp() {}
}
/** setUp() method overrides parent method with @Before. It will be run by JUnit4BlockRunner */
@RunWith(JUnit4.class)
| SetUpAnnotatedBaseClass |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/writeAsArray/WriteAsArray_string_special_Reader.java | {
"start": 318,
"end": 1424
} | class ____ extends TestCase {
public void test_0() throws Exception {
Model model = new Model();
model.name = "a\\bc";
String text = JSON.toJSONString(model, SerializerFeature.BeanToArray);
Assert.assertEquals("[\"a\\\\bc\"]", text);
JSONReader reader = new JSONReader(new StringReader(text));
reader.config(Feature.SupportArrayToBean, true);
Model model2 = reader.readObject(Model.class);
Assert.assertEquals(model.name, model2.name);
reader.close();
}
public void test_1() throws Exception {
Model model = new Model();
model.name = "a\\bc\"";
String text = JSON.toJSONString(model, SerializerFeature.BeanToArray);
Assert.assertEquals("[\"a\\\\bc\\\"\"]", text);
JSONReader reader = new JSONReader(new StringReader(text));
reader.config(Feature.SupportArrayToBean, true);
Model model2 = reader.readObject(Model.class);
Assert.assertEquals(model.name, model2.name);
reader.close();
}
public static | WriteAsArray_string_special_Reader |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/MonoNever.java | {
"start": 1109,
"end": 1864
} | class ____
extends Mono<Object> implements SourceProducer<Object> {
static final Mono<Object> INSTANCE = new MonoNever();
MonoNever() {
// deliberately no op
}
@Override
public void subscribe(CoreSubscriber<? super Object> actual) {
actual.onSubscribe(Operators.emptySubscription());
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
return SourceProducer.super.scanUnsafe(key);
}
/**
* Returns a properly parametrized instance of this never Publisher.
*
* @param <T> the value type
* @return a properly parametrized instance of this never Publisher
*/
@SuppressWarnings("unchecked")
static <T> Mono<T> instance() {
return (Mono<T>) INSTANCE;
}
}
| MonoNever |
java | spring-projects__spring-boot | loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/DefaultLibraryCoordinates.java | {
"start": 828,
"end": 2052
} | class ____ implements LibraryCoordinates {
private final @Nullable String groupId;
private final @Nullable String artifactId;
private final @Nullable String version;
/**
* Create a new instance from discrete elements.
* @param groupId the group ID
* @param artifactId the artifact ID
* @param version the version
*/
DefaultLibraryCoordinates(@Nullable String groupId, @Nullable String artifactId, @Nullable String version) {
this.groupId = groupId;
this.artifactId = artifactId;
this.version = version;
}
/**
* Return the group ID of the coordinates.
* @return the group ID
*/
@Override
public @Nullable String getGroupId() {
return this.groupId;
}
/**
* Return the artifact ID of the coordinates.
* @return the artifact ID
*/
@Override
public @Nullable String getArtifactId() {
return this.artifactId;
}
/**
* Return the version of the coordinates.
* @return the version
*/
@Override
public @Nullable String getVersion() {
return this.version;
}
/**
* Return the coordinates in the form {@code groupId:artifactId:version}.
*/
@Override
public String toString() {
return LibraryCoordinates.toStandardNotationString(this);
}
}
| DefaultLibraryCoordinates |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/LockNotBeforeTryTest.java | {
"start": 4739,
"end": 5209
} | class ____ {
private void test(ReentrantLock lock) {
lock.lock();
System.out.println("hi");
try {
System.out.println("hi");
} finally {
lock.unlock();
}
}
}
""")
.addOutputLines(
"Test.java",
"""
import java.util.concurrent.locks.ReentrantLock;
| Test |
java | quarkusio__quarkus | extensions/oidc-token-propagation-reactive/deployment/src/test/java/io/quarkus/oidc/token/propagation/reactive/deployment/test/AccessTokenAnnotationTest.java | {
"start": 7016,
"end": 7250
} | interface ____ {
@GET
String getUserName();
}
@RegisterRestClient(baseUri = "http://localhost:8081/protected")
@AccessToken(exchangeTokenClient = "named")
@Path("/")
public | DefaultClientEnabledExchange |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/chararray/CharArrayAssert_hasSizeBetween_Test.java | {
"start": 800,
"end": 1142
} | class ____ extends CharArrayAssertBaseTest {
@Override
protected CharArrayAssert invoke_api_method() {
return assertions.hasSizeBetween(4, 6);
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertHasSizeBetween(getInfo(assertions), getActual(assertions), 4, 6);
}
}
| CharArrayAssert_hasSizeBetween_Test |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/collectionincompatibletype/AssertSameIncompatibleTest.java | {
"start": 3079,
"end": 3621
} | class ____ {
void f() {
// BUG: Diagnostic contains: always fail
assertThat(1L).isSameInstanceAs("foo");
// BUG: Diagnostic contains: always pass
assertThat(1L).isNotSameInstanceAs("foo");
}
}
""")
.doTest();
}
@Test
public void bothInterfaces_alwaysCompatible() {
helper
.addSourceLines(
"Test.java",
"""
import static org.junit.Assert.assertSame;
| Test |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/TestSubtypes.java | {
"start": 845,
"end": 989
} | class ____ extends SuperType {
public int d;
}
// "Empty" bean
@JsonTypeInfo(use=JsonTypeInfo.Id.NAME)
static abstract | SubD |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/oauth2/client/OAuth2LoginConfigurerTests.java | {
"start": 54936,
"end": 55865
} | class ____ {
private final ClientRegistrationRepository clientRegistrationRepository = new InMemoryClientRegistrationRepository(
GOOGLE_CLIENT_REGISTRATION);
private final ObjectPostProcessor<AuthenticationProvider> postProcessor = new SpyObjectPostProcessor();
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.oauth2Login((oauth2Login) -> oauth2Login
.clientRegistrationRepository(this.clientRegistrationRepository)
.withObjectPostProcessor(this.postProcessor)
);
// @formatter:on
return http.build();
}
@Bean
ObjectPostProcessor<AuthenticationProvider> mockPostProcessor() {
return this.postProcessor;
}
@Bean
HttpSessionOAuth2AuthorizationRequestRepository oauth2AuthorizationRequestRepository() {
return new HttpSessionOAuth2AuthorizationRequestRepository();
}
static | OAuth2LoginConfigCustomWithPostProcessor |
java | apache__flink | flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/PatternTimeoutFunction.java | {
"start": 1697,
"end": 2423
} | interface ____<IN, OUT> extends Function, Serializable {
/**
* Generates a timeout result from the given map of events and timeout timestamp. The partial
* events are identified by their names. Only one resulting element can be generated.
*
* @param pattern Map containing the found partial pattern. Events are identified by their names
* @param timeoutTimestamp Timestamp of the timeout
* @return Resulting timeout element
* @throws Exception This method may throw exceptions. Throwing an exception will cause the
* operation to fail and may trigger recovery.
*/
OUT timeout(Map<String, List<IN>> pattern, long timeoutTimestamp) throws Exception;
}
| PatternTimeoutFunction |
java | micronaut-projects__micronaut-core | core/src/main/java/io/micronaut/core/reflect/InstantiationUtils.java | {
"start": 10482,
"end": 11623
} | class ____ any exceptions as {@link InstantiationException},
* only using reflection.
*
* @param type The type
* @param argTypes The argument types
* @param args The values of arguments
* @param <T> The generic type
* @return The instantiated instance
* @throws InstantiationException When an error occurs
* @since 4.8.0
*/
@NonNull
@UsedByGeneratedCode
public static <T> T instantiateReflectively(Class<T> type, Class<?>[] argTypes, Object[] args) {
try {
Logger log = ClassUtils.REFLECTION_LOGGER;
if (log.isDebugEnabled()) {
log.debug("Reflectively instantiating type: {}", type);
}
final Constructor<T> declaredConstructor = type.getDeclaredConstructor(argTypes);
declaredConstructor.setAccessible(true);
return declaredConstructor.newInstance(args);
} catch (Throwable e) {
throw new InstantiationException("Could not instantiate type [" + type.getName() + "]: " + e.getMessage(), e);
}
}
/**
* Instantiate the given | rethrowing |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/tasks/TaskCancellationService.java | {
"start": 17658,
"end": 18795
} | class ____ extends AbstractTransportRequest {
private final TaskId parentTaskId;
private final long childRequestId;
private final String reason;
static CancelChildRequest createCancelChildRequest(TaskId parentTaskId, long childRequestId, String reason) {
return new CancelChildRequest(parentTaskId, childRequestId, reason);
}
private CancelChildRequest(TaskId parentTaskId, long childRequestId, String reason) {
this.parentTaskId = parentTaskId;
this.childRequestId = childRequestId;
this.reason = reason;
}
private CancelChildRequest(StreamInput in) throws IOException {
super(in);
parentTaskId = TaskId.readFromStream(in);
childRequestId = in.readLong();
reason = in.readString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
parentTaskId.writeTo(out);
out.writeLong(childRequestId);
out.writeString(reason);
}
}
private | CancelChildRequest |
java | elastic__elasticsearch | x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java | {
"start": 11904,
"end": 13599
} | class ____ extends Field {
private static final FieldType TYPE = new FieldType();
static {
TYPE.setDimensions(2, Integer.BYTES);
TYPE.setDocValuesType(DocValuesType.SORTED_NUMERIC);
TYPE.freeze();
}
// holds the doc value value.
private final long docValue;
XYFieldWithDocValues(String name, float x, float y) {
super(name, TYPE);
final byte[] bytes;
if (fieldsData == null) {
bytes = new byte[8];
fieldsData = new BytesRef(bytes);
} else {
bytes = ((BytesRef) fieldsData).bytes;
}
int xEncoded = XYEncodingUtils.encode(x);
int yEncoded = XYEncodingUtils.encode(y);
NumericUtils.intToSortableBytes(xEncoded, bytes, 0);
NumericUtils.intToSortableBytes(yEncoded, bytes, 4);
docValue = (((long) xEncoded) << 32) | (yEncoded & 0xFFFFFFFFL);
}
@Override
public Number numericValue() {
return docValue;
}
@Override
public String toString() {
StringBuilder result = new StringBuilder();
result.append(getClass().getSimpleName());
result.append(" <");
result.append(name);
result.append(':');
byte[] bytes = ((BytesRef) fieldsData).bytes;
result.append(XYEncodingUtils.decode(bytes, 0));
result.append(',');
result.append(XYEncodingUtils.decode(bytes, Integer.BYTES));
result.append('>');
return result.toString();
}
}
}
| XYFieldWithDocValues |
java | apache__camel | core/camel-base-engine/src/main/java/org/apache/camel/impl/engine/RouteService.java | {
"start": 2323,
"end": 17859
} | class ____ extends ChildServiceSupport {
private final CamelContext camelContext;
private final StartupStepRecorder startupStepRecorder;
private final Route route;
private boolean removingRoutes;
private Consumer input;
private final Lock lock = new ReentrantLock();
private final AtomicBoolean setUpDone = new AtomicBoolean();
private final AtomicBoolean warmUpDone = new AtomicBoolean();
private final AtomicBoolean endpointDone = new AtomicBoolean();
public RouteService(Route route) {
this.route = route;
this.camelContext = this.route.getCamelContext();
this.startupStepRecorder = this.camelContext.getCamelContextExtension().getStartupStepRecorder();
}
public String getId() {
return route.getId();
}
public String getLocation() {
return route.getSourceLocationShort();
}
public CamelContext getCamelContext() {
return camelContext;
}
public Route getRoute() {
return route;
}
/**
* Gather all the endpoints this route service uses
* <p/>
* This implementation finds the endpoints by searching all the child services for {@link EndpointAware} processors
* which uses an endpoint.
*/
public Set<Endpoint> gatherEndpoints() {
Set<Endpoint> answer = new LinkedHashSet<>();
Set<Service> services = gatherChildServices();
for (Service service : services) {
if (service instanceof EndpointAware endpointAware) {
Endpoint endpoint = endpointAware.getEndpoint();
if (endpoint != null) {
answer.add(endpoint);
}
}
}
return answer;
}
public Consumer getInput() {
return input;
}
public boolean isRemovingRoutes() {
return removingRoutes;
}
public void setRemovingRoutes(boolean removingRoutes) {
this.removingRoutes = removingRoutes;
}
public void warmUp() throws FailedToStartRouteException {
try {
doWarmUp();
} catch (Exception e) {
throw new FailedToStartRouteException(getId(), getLocation(), e.getLocalizedMessage(), e);
}
}
public void setUp() throws FailedToStartRouteException {
if (setUpDone.compareAndSet(false, true)) {
try {
doSetup();
} catch (Exception e) {
throw new FailedToStartRouteException(getId(), getLocation(), e.getLocalizedMessage(), e);
}
}
}
public boolean isAutoStartup() {
if (!getCamelContext().isAutoStartup()) {
return false;
}
if (!getRoute().isAutoStartup()) {
return false;
}
if (getCamelContext().getAutoStartupExcludePattern() != null) {
String[] patterns = getCamelContext().getAutoStartupExcludePattern().split(",");
String id = getRoute().getRouteId();
String url = getRoute().getEndpoint().getEndpointUri();
if (PatternHelper.matchPatterns(id, patterns) || PatternHelper.matchPatterns(url, patterns)) {
return false;
}
}
return true;
}
protected void doSetup() throws Exception {
lock.lock();
try {
// to setup we initialize the services
ServiceHelper.initService(route.getEndpoint());
try (MDCHelper mdcHelper = new MDCHelper(route.getId())) {
// ensure services are initialized first
route.initializeServices();
List<Service> services = route.getServices();
// split into consumers and child services as we need to start the consumers
// afterwards to avoid them being active while the others start
List<Service> list = new ArrayList<>();
for (Service service : services) {
// inject the route
if (service instanceof RouteAware routeAware) {
routeAware.setRoute(route);
}
if (service instanceof RouteIdAware routeIdAware) {
routeIdAware.setRouteId(route.getId());
}
// inject camel context
CamelContextAware.trySetCamelContext(service, camelContext);
if (service instanceof Consumer consumer) {
this.input = consumer;
} else {
list.add(service);
}
}
initChildServices(list);
}
} finally {
lock.unlock();
}
}
protected void doWarmUp() throws Exception {
lock.lock();
try {
if (endpointDone.compareAndSet(false, true)) {
// endpoints should only be started once as they can be reused on other routes
// and whatnot, thus their lifecycle is to start once, and only to stop when Camel shutdown
// ensure endpoint is started first (before the route services, such as the consumer)
ServiceHelper.startService(route.getEndpoint());
}
if (warmUpDone.compareAndSet(false, true)) {
try (MDCHelper mdcHelper = new MDCHelper(route.getId())) {
// warm up the route first
route.warmUp();
startChildServices(route, childServices);
// fire event
EventHelper.notifyRouteAdded(camelContext, route);
}
// ensure lifecycle strategy is invoked which among others enlist the route in JMX
for (LifecycleStrategy strategy : camelContext.getLifecycleStrategies()) {
strategy.onRoutesAdd(Collections.singletonList(route));
}
// add routes to camel context
camelContext.getCamelContextExtension().addRoute(route);
// add the routes to the inflight registry so they are pre-installed
camelContext.getInflightRepository().addRoute(route.getId());
}
} finally {
lock.unlock();
}
}
@Override
protected void doStart() {
try (MDCHelper mdcHelper = new MDCHelper(route.getId())) {
// fire event
EventHelper.notifyRouteStarting(camelContext, route);
}
// ensure we are warmed up
warmUp();
try (MDCHelper mdcHelper = new MDCHelper(route.getId())) {
// start the route itself
ServiceHelper.startService(route);
// invoke callbacks on route policy
routePolicyCallback(RoutePolicy::onStart);
// fire event
EventHelper.notifyRouteStarted(camelContext, route);
}
}
@Override
protected void doStop() {
try (MDCHelper mdcHelper = new MDCHelper(route.getId())) {
// fire event
EventHelper.notifyRouteStopping(camelContext, route);
}
// if we are stopping CamelContext then we are shutting down
boolean isShutdownCamelContext = camelContext.isStopping();
if (isShutdownCamelContext || isRemovingRoutes()) {
// need to call onRoutesRemove when the CamelContext is shutting down or Route is shutdown
for (LifecycleStrategy strategy : camelContext.getLifecycleStrategies()) {
strategy.onRoutesRemove(Collections.singletonList(route));
}
}
try (MDCHelper mdcHelper = new MDCHelper(route.getId())) {
// gather list of services to stop
Set<Service> services = gatherChildServices();
// stop services
stopChildServices(route, services, isShutdownCamelContext);
// stop the route itself
if (isShutdownCamelContext) {
ServiceHelper.stopAndShutdownServices(route);
} else {
ServiceHelper.stopService(route);
}
// invoke callbacks on route policy
routePolicyCallback(RoutePolicy::onStop);
// fire event
EventHelper.notifyRouteStopped(camelContext, route);
}
if (isRemovingRoutes()) {
camelContext.getCamelContextExtension().removeRoute(route);
}
// need to redo if we start again after being stopped
input = null;
childServices = null;
warmUpDone.set(false);
setUpDone.set(false);
endpointDone.set(false);
setUpDone.set(false);
warmUpDone.set(false);
}
@Override
protected void doShutdown() {
try (MDCHelper mdcHelper = new MDCHelper(route.getId())) {
// gather list of services to shutdown
Set<Service> services = gatherChildServices();
// shutdown services
stopChildServices(route, services, true);
// shutdown the route itself
ServiceHelper.stopAndShutdownServices(route);
// endpoints should only be stopped when Camel is shutting down
// see more details in the warmUp method
ServiceHelper.stopAndShutdownServices(route.getEndpoint());
// invoke callbacks on route policy
routePolicyCallback(RoutePolicy::onRemove);
// fire event
EventHelper.notifyRouteRemoved(camelContext, route);
}
// need to call onRoutesRemove when the CamelContext is shutting down or Route is shutdown
for (LifecycleStrategy strategy : camelContext.getLifecycleStrategies()) {
strategy.onRoutesRemove(Collections.singletonList(route));
}
// remove the routes from the inflight registry
camelContext.getInflightRepository().removeRoute(route.getId());
// remove the routes from the collections
camelContext.getCamelContextExtension().removeRoute(route);
// clear inputs on shutdown
input = null;
childServices = null;
warmUpDone.set(false);
setUpDone.set(false);
endpointDone.set(false);
}
@Override
protected void doSuspend() {
// suspend and resume logic is provided by DefaultCamelContext which leverages ShutdownStrategy
// to safely suspend and resume
try (MDCHelper mdcHelper = new MDCHelper(route.getId())) {
routePolicyCallback(RoutePolicy::onSuspend);
}
}
@Override
protected void doResume() {
// suspend and resume logic is provided by DefaultCamelContext which leverages ShutdownStrategy
// to safely suspend and resume
try (MDCHelper mdcHelper = new MDCHelper(route.getId())) {
routePolicyCallback(RoutePolicy::onResume);
}
}
private void routePolicyCallback(java.util.function.BiConsumer<RoutePolicy, Route> callback) {
if (route.getRoutePolicyList() != null) {
for (RoutePolicy routePolicy : route.getRoutePolicyList()) {
callback.accept(routePolicy, route);
}
}
}
private StartupStep beginStep(Service service, String description) {
Class<?> type = service instanceof Processor ? Processor.class : Service.class;
description = description + " " + service.getClass().getSimpleName();
String id = null;
if (service instanceof IdAware idAware) {
id = idAware.getId();
}
return startupStepRecorder.beginStep(type, id, description);
}
protected void initChildServices(List<Service> services) {
for (Service service : services) {
StartupStep step = null;
// skip internal services / route pipeline (starting point for route)
boolean shouldRecord
= !(service instanceof InternalProcessor || "RoutePipeline".equals(service.getClass().getSimpleName()));
if (shouldRecord) {
step = beginStep(service, "Init");
}
ServiceHelper.initService(service);
if (step != null) {
startupStepRecorder.endStep(step);
}
// add and remember as child service
addChildService(service);
}
}
protected void startChildServices(Route route, List<Service> services) {
for (Service service : services) {
StartupStep step = null;
// skip internal services / route pipeline (starting point for route)
boolean shouldRecord
= !(service instanceof InternalProcessor || "RoutePipeline".equals(service.getClass().getSimpleName()));
if (shouldRecord) {
step = beginStep(service, "Start");
}
for (LifecycleStrategy strategy : camelContext.getLifecycleStrategies()) {
strategy.onServiceAdd(camelContext, service, route);
}
ServiceHelper.startService(service);
if (step != null) {
startupStepRecorder.endStep(step);
}
}
}
protected void stopChildServices(Route route, Set<Service> services, boolean shutdown) {
for (Service service : services) {
for (LifecycleStrategy strategy : camelContext.getLifecycleStrategies()) {
strategy.onServiceRemove(camelContext, service, route, shutdown);
}
if (shutdown) {
ServiceHelper.stopAndShutdownService(service);
} else {
ServiceHelper.stopService(service);
}
removeChildService(service);
}
}
/**
* Gather all child services
*/
private Set<Service> gatherChildServices() {
List<Service> services = new ArrayList<>(route.getServices());
// also get route scoped services
doGetRouteServices(services);
Set<Service> list = new LinkedHashSet<>();
for (Service service : services) {
list.addAll(ServiceHelper.getChildServices(service));
}
// also get route scoped error handler (which must be done last)
doGetErrorHandler(list);
return list;
}
/**
* Gather the route scoped error handler from the given route
*/
private void doGetErrorHandler(Set<Service> services) {
// only include error handlers if they are route scoped
List<Service> extra = new ArrayList<>();
for (Service service : services) {
if (service instanceof Channel channel) {
Processor eh = channel.getErrorHandler();
if (eh instanceof Service s) {
extra.add(s);
}
}
}
if (!extra.isEmpty()) {
services.addAll(extra);
}
}
/**
* Gather all other kind of route services from the given route, except error handler
*/
protected void doGetRouteServices(List<Service> services) {
for (Processor proc : getRoute().getOnExceptions()) {
if (proc instanceof Service service) {
services.add(service);
}
}
for (Processor proc : getRoute().getOnCompletions()) {
if (proc instanceof Service service) {
services.add(service);
}
}
}
| RouteService |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/web/servlet/samples/client/standalone/resultmatches/FlashAttributeAssertionTests.java | {
"start": 1884,
"end": 3424
} | class ____ {
private final WebTestClient client =
MockMvcWebTestClient.bindToController(new PersonController())
.alwaysExpect(status().isFound())
.alwaysExpect(flash().attributeCount(3))
.build();
@Test
void attributeCountWithWrongCount() {
assertThatExceptionOfType(AssertionError.class)
.isThrownBy(() -> performRequest().andExpect(flash().attributeCount(1)))
.withMessage("FlashMap size expected:<1> but was:<3>");
}
@Test
void attributeExists() throws Exception {
performRequest().andExpect(flash().attributeExists("one", "two", "three"));
}
@Test
void attributeEqualTo() throws Exception {
performRequest()
.andExpect(flash().attribute("one", "1"))
.andExpect(flash().attribute("two", 2.222))
.andExpect(flash().attribute("three", new URL("https://example.com")));
}
@Test
void attributeMatchers() throws Exception {
performRequest()
.andExpect(flash().attribute("one", containsString("1")))
.andExpect(flash().attribute("two", closeTo(2, 0.5)))
.andExpect(flash().attribute("three", notNullValue()))
.andExpect(flash().attribute("one", equalTo("1")))
.andExpect(flash().attribute("two", equalTo(2.222)))
.andExpect(flash().attribute("three", equalTo(new URL("https://example.com"))));
}
private ResultActions performRequest() {
EntityExchangeResult<Void> result = client.post().uri("/persons").exchange().expectBody().isEmpty();
return MockMvcWebTestClient.resultActionsFor(result);
}
@Controller
private static | FlashAttributeAssertionTests |
java | quarkusio__quarkus | independent-projects/bootstrap/maven-resolver/src/main/java/io/quarkus/bootstrap/resolver/maven/EffectiveModelResolver.java | {
"start": 222,
"end": 613
} | interface ____ {
static EffectiveModelResolver of(MavenArtifactResolver resolver) {
return new DefaultEffectiveModelResolver(resolver);
}
default Model resolveEffectiveModel(ArtifactCoords coords) {
return resolveEffectiveModel(coords, List.of());
}
Model resolveEffectiveModel(ArtifactCoords coords, List<RemoteRepository> repos);
}
| EffectiveModelResolver |
java | apache__maven | compat/maven-compat/src/main/java/org/apache/maven/artifact/resolver/filter/InversionArtifactFilter.java | {
"start": 958,
"end": 1707
} | class ____ implements ArtifactFilter {
private final ArtifactFilter toInvert;
public InversionArtifactFilter(ArtifactFilter toInvert) {
this.toInvert = toInvert;
}
@Override
public boolean include(Artifact artifact) {
return !toInvert.include(artifact);
}
@Override
public int hashCode() {
int hash = 17;
hash = hash * 31 + toInvert.hashCode();
return hash;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof InversionArtifactFilter other) {
return toInvert.equals(other.toInvert);
} else {
return false;
}
}
}
| InversionArtifactFilter |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/circular/CircularDependenciesChainTest.java | {
"start": 1043,
"end": 1195
} | class ____ {
@Inject
Bar bar;
String ping() {
return bar.ping();
}
}
@ApplicationScoped
static | Foo |
java | apache__flink | flink-metrics/flink-metrics-prometheus/src/test/java/org/apache/flink/metrics/prometheus/PrometheusReporterTest.java | {
"start": 11187,
"end": 11826
} | class ____ {
private int base = 9000;
/**
* Returns the next port range containing exactly 100 ports as string.
*
* @return next port range
*/
public String nextRange() {
if (!hasNext()) {
throw new NoSuchElementException();
}
int lowEnd = base;
int highEnd = base + 99;
base += 100;
return lowEnd + "-" + highEnd;
}
private boolean hasNext() {
return base < 14000; // arbitrary limit that should be sufficient for test purposes
}
}
}
| PortRangeProvider |
java | elastic__elasticsearch | x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailFilterTests.java | {
"start": 3924,
"end": 149754
} | class ____ extends ESTestCase {
private static final String FILTER_MARKER = "filterMarker_";
private static final String UNFILTER_MARKER = "nofilter_";
private Settings settings;
private DiscoveryNode localNode;
private ClusterService clusterService;
private ClusterSettings clusterSettings;
private ApiKeyService apiKeyService;
@Before
public void init() throws Exception {
settings = Settings.builder()
.put(LoggingAuditTrail.EMIT_HOST_ADDRESS_SETTING.getKey(), randomBoolean())
.put(LoggingAuditTrail.EMIT_HOST_NAME_SETTING.getKey(), randomBoolean())
.put(LoggingAuditTrail.EMIT_NODE_NAME_SETTING.getKey(), randomBoolean())
.put(LoggingAuditTrail.EMIT_NODE_ID_SETTING.getKey(), randomBoolean())
.put(LoggingAuditTrail.EMIT_CLUSTER_NAME_SETTING.getKey(), randomBoolean())
.put(LoggingAuditTrail.EMIT_CLUSTER_UUID_SETTING.getKey(), randomBoolean())
.put(ClusterName.CLUSTER_NAME_SETTING.getKey(), randomAlphaOfLength(16))
.put(LoggingAuditTrail.INCLUDE_REQUEST_BODY.getKey(), randomBoolean())
.put(LoggingAuditTrail.INCLUDE_EVENT_SETTINGS.getKey(), "_all")
.build();
localNode = mock(DiscoveryNode.class);
when(localNode.getHostAddress()).thenReturn(buildNewFakeTransportAddress().toString());
final ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings))
.metadata(Metadata.builder().clusterUUID(UUIDs.randomBase64UUID()).build())
.build();
clusterService = mock(ClusterService.class);
when(clusterService.localNode()).thenReturn(localNode);
when(clusterService.getClusterName()).thenReturn(ClusterName.CLUSTER_NAME_SETTING.get(settings));
when(clusterService.lifecycleState()).thenReturn(Lifecycle.State.STARTED);
when(clusterService.state()).thenReturn(clusterState);
clusterSettings = mockClusterSettings();
when(clusterService.getClusterSettings()).thenReturn(clusterSettings);
Mockito.doAnswer((Answer) invocation -> {
final LoggingAuditTrail arg0 = (LoggingAuditTrail) invocation.getArguments()[0];
arg0.updateLocalNodeInfo(localNode);
return null;
}).when(clusterService).addListener(Mockito.isA(LoggingAuditTrail.class));
apiKeyService = new ApiKeyService(
settings,
Clock.systemUTC(),
mock(Client.class),
mock(SecurityIndexManager.class),
clusterService,
mock(CacheInvalidatorRegistry.class),
mock(ThreadPool.class),
MeterRegistry.NOOP,
mock(FeatureService.class)
);
}
public void testPolicyDoesNotMatchNullValuesInEvent() throws Exception {
final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO, null);
final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
final Settings.Builder settingsBuilder = Settings.builder().put(settings);
// filter by username
final List<String> filteredUsernames = randomNonEmptyListOfFilteredNames();
final List<User> filteredUsers = filteredUsernames.stream().map(u -> { return new User(u); }).collect(Collectors.toList());
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.userPolicy.users", filteredUsernames);
// filter by realms
final List<String> filteredRealms = randomNonEmptyListOfFilteredNames();
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.realmsPolicy.realms", filteredRealms);
// filter by roles
final List<String> filteredRoles = randomNonEmptyListOfFilteredNames();
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.rolesPolicy.roles", filteredRoles);
// filter by indices
final List<String> filteredIndices = randomNonEmptyListOfFilteredNames();
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.indicesPolicy.indices", filteredIndices);
// filter by actions
final List<String> filteredActions = randomNonEmptyListOfFilteredActions();
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.actionsPolicy.actions", filteredActions);
final LoggingAuditTrail auditTrail = new LoggingAuditTrail(settingsBuilder.build(), clusterService, logger, threadContext);
// user field matches
assertTrue(
"Matches the user filter predicate.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(randomFrom(filteredUsers)),
Optional.empty(),
Optional.empty(),
Optional.empty(),
Optional.empty()
)
)
);
final User unfilteredUser = mock(User.class);
// null user field does NOT match
assertFalse(
"Does not match the user filter predicate because of null username.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(unfilteredUser),
Optional.empty(),
Optional.empty(),
Optional.empty(),
Optional.empty()
)
)
);
// realm field matches
assertTrue(
"Matches the realm filter predicate.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.empty(),
Optional.of(randomFrom(filteredRealms)),
Optional.empty(),
Optional.empty(),
Optional.empty()
)
)
);
// null realm field does NOT match
assertFalse(
"Does not match the realm filter predicate because of null realm.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.empty(),
Optional.ofNullable(null),
Optional.empty(),
Optional.empty(),
Optional.empty()
)
)
);
// role field matches
assertTrue(
"Matches the role filter predicate.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.empty(),
Optional.empty(),
Optional.of(
authzInfo(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0]))
),
Optional.empty(),
Optional.empty()
)
)
);
// action field matches
Random random = random();
assertTrue(
"Matches the actions filter predicate.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.empty(),
Optional.empty(),
Optional.empty(),
Optional.empty(),
Optional.of(randomFrom(filteredActions))
)
)
);
// null privilege field does NOT match
assertFalse(
"Does not matches the actions filter predicate.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.empty(),
Optional.empty(),
Optional.empty(),
Optional.empty(),
Optional.ofNullable(null)
)
)
);
final List<String> unfilteredRoles = new ArrayList<>();
unfilteredRoles.add(null);
unfilteredRoles.addAll(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles));
// null role among roles field does NOT match
assertFalse(
"Does not match the role filter predicate because of null role.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.empty(),
Optional.empty(),
Optional.of(authzInfo(unfilteredRoles.toArray(new String[0]))),
Optional.empty(),
Optional.empty()
)
)
);
// indices field matches
assertTrue(
"Matches the index filter predicate.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.empty(),
Optional.empty(),
Optional.empty(),
Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0])),
Optional.empty()
)
)
);
final List<String> unfilteredIndices = new ArrayList<>();
unfilteredIndices.add(null);
unfilteredIndices.addAll(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices));
// null index among indices field does NOT match
assertFalse(
"Does not match the indices filter predicate because of null index.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.empty(),
Optional.empty(),
Optional.empty(),
Optional.of(unfilteredIndices.toArray(new String[0])),
Optional.empty()
)
)
);
}
public void testSingleCompletePolicyPredicate() throws Exception {
final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO, null);
final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
// create complete filter policy
final Settings.Builder settingsBuilder = Settings.builder().put(settings);
// filter by username
final List<String> filteredUsernames = randomNonEmptyListOfFilteredNames();
final List<User> filteredUsers = filteredUsernames.stream().map(u -> { return new User(u); }).collect(Collectors.toList());
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.completeFilterPolicy.users", filteredUsernames);
// filter by realms
final List<String> filteredRealms = randomNonEmptyListOfFilteredNames();
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.completeFilterPolicy.realms", filteredRealms);
// filter by roles
final List<String> filteredRoles = randomNonEmptyListOfFilteredNames();
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.completeFilterPolicy.roles", filteredRoles);
// filter by indices
final List<String> filteredIndices = randomNonEmptyListOfFilteredNames();
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.completeFilterPolicy.indices", filteredIndices);
// filter by actions
final List<String> filteredActions = randomNonEmptyListOfFilteredActions();
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.completeFilterPolicy.actions", filteredActions);
final LoggingAuditTrail auditTrail = new LoggingAuditTrail(settingsBuilder.build(), clusterService, logger, threadContext);
// all fields match
Random random = random();
assertTrue(
"Matches the filter predicate.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(randomFrom(filteredUsers)),
Optional.of(randomFrom(filteredRealms)),
Optional.of(
authzInfo(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0]))
),
Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0])),
Optional.of(randomFrom(filteredActions))
)
)
);
final User unfilteredUser;
unfilteredUser = new User(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8));
// one field does not match or is empty
assertFalse(
"Does not match the filter predicate because of the user.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(unfilteredUser),
Optional.of(randomFrom(filteredRealms)),
Optional.of(
authzInfo(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0]))
),
Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0])),
Optional.of(randomFrom(filteredActions))
)
)
);
assertFalse(
"Does not match the filter predicate because of the empty user.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.empty(),
Optional.of(randomFrom(filteredRealms)),
Optional.of(
authzInfo(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0]))
),
Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0])),
Optional.of(randomFrom(filteredActions))
)
)
);
assertFalse(
"Does not match the filter predicate because of the realm.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(randomFrom(filteredUsers)),
Optional.of(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8)),
Optional.of(
authzInfo(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0]))
),
Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0])),
Optional.of(randomFrom(filteredActions))
)
)
);
assertFalse(
"Does not match the filter predicate because of the empty realm.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(randomFrom(filteredUsers)),
Optional.empty(),
Optional.of(
authzInfo(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0]))
),
Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0])),
Optional.of(randomFrom(filteredActions))
)
)
);
assertFalse(
"Does not match the filter predicate because of the empty actions.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(randomFrom(filteredUsers)),
Optional.of(randomFrom(filteredRealms)),
Optional.of(
authzInfo(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0]))
),
Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0])),
Optional.empty()
)
)
);
final List<String> someRolesDoNotMatch = new ArrayList<>(randomSubsetOf(randomIntBetween(0, filteredRoles.size()), filteredRoles));
for (int i = 0; i < randomIntBetween(1, 8); i++) {
someRolesDoNotMatch.add(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8));
}
assertFalse(
"Does not match the filter predicate because of some of the roles.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(randomFrom(filteredUsers)),
Optional.of(randomFrom(filteredRealms)),
Optional.of(authzInfo(someRolesDoNotMatch.toArray(new String[0]))),
Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0])),
Optional.of(randomFrom(filteredActions))
)
)
);
final Optional<AuthorizationInfo> emptyRoles = randomBoolean() ? Optional.empty() : Optional.of(authzInfo(new String[0]));
assertFalse(
"Does not match the filter predicate because of the empty roles.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(randomFrom(filteredUsers)),
Optional.of(randomFrom(filteredRealms)),
emptyRoles,
Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0])),
Optional.of(randomFrom(filteredActions))
)
)
);
final List<String> someIndicesDoNotMatch = new ArrayList<>(
randomSubsetOf(randomIntBetween(0, filteredIndices.size()), filteredIndices)
);
for (int i = 0; i < randomIntBetween(1, 8); i++) {
someIndicesDoNotMatch.add(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8));
}
assertFalse(
"Does not match the filter predicate because of some of the indices.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(randomFrom(filteredUsers)),
Optional.of(randomFrom(filteredRealms)),
Optional.of(
authzInfo(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0]))
),
Optional.of(someIndicesDoNotMatch.toArray(new String[0])),
Optional.of(randomFrom(filteredActions))
)
)
);
final Optional<String[]> emptyIndices = randomBoolean() ? Optional.empty() : Optional.of(new String[0]);
assertFalse(
"Does not match the filter predicate because of the empty indices.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(randomFrom(filteredUsers)),
Optional.of(randomFrom(filteredRealms)),
Optional.of(
authzInfo(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0]))
),
emptyIndices,
Optional.of(randomFrom(filteredActions))
)
)
);
}
public void testSingleCompleteWithEmptyFieldPolicyPredicate() throws Exception {
final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO, null);
final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
// create complete filter policy
final Settings.Builder settingsBuilder = Settings.builder().put(settings);
// filter by username
final List<String> filteredUsernames = randomNonEmptyListOfFilteredNames();
final List<User> filteredUsers = filteredUsernames.stream().map(u -> { return new User(u); }).collect(Collectors.toList());
filteredUsernames.add(""); // filter by missing user name
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.completeFilterPolicy.users", filteredUsernames);
// filter by realms
final List<String> filteredRealms = randomNonEmptyListOfFilteredNames();
filteredRealms.add(""); // filter by missing realm name
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.completeFilterPolicy.realms", filteredRealms);
filteredRealms.remove("");
// filter by roles
final List<String> filteredRoles = randomNonEmptyListOfFilteredNames();
filteredRoles.add(""); // filter by missing role name
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.completeFilterPolicy.roles", filteredRoles);
filteredRoles.remove("");
// filter by indices
final List<String> filteredIndices = randomNonEmptyListOfFilteredNames();
filteredIndices.add(""); // filter by missing index name
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.completeFilterPolicy.indices", filteredIndices);
filteredIndices.remove("");
// filter by actions
final List<String> filteredActions = randomNonEmptyListOfFilteredActions();
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.completeFilterPolicy.actions", filteredActions);
final LoggingAuditTrail auditTrail = new LoggingAuditTrail(settingsBuilder.build(), clusterService, logger, threadContext);
// all fields match
Random random = random();
assertTrue(
"Matches the filter predicate.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(randomFrom(filteredUsers)),
Optional.of(randomFrom(filteredRealms)),
Optional.of(
authzInfo(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0]))
),
Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0])),
Optional.of(randomFrom(filteredActions))
)
)
);
final User unfilteredUser;
unfilteredUser = new User(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8));
// one field does not match or is empty
assertFalse(
"Does not match the filter predicate because of the user.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(unfilteredUser),
Optional.of(randomFrom(filteredRealms)),
Optional.of(
authzInfo(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0]))
),
Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0])),
Optional.of(randomFrom(filteredActions))
)
)
);
assertTrue(
"Matches the filter predicate because of the empty user.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.empty(),
Optional.of(randomFrom(filteredRealms)),
Optional.of(
authzInfo(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0]))
),
Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0])),
Optional.of(randomFrom(filteredActions))
)
)
);
assertFalse(
"Does not match the filter predicate because of the realm.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(randomFrom(filteredUsers)),
Optional.of(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8)),
Optional.of(
authzInfo(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0]))
),
Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0])),
Optional.of(randomFrom(filteredActions))
)
)
);
assertTrue(
"Matches the filter predicate because of the empty realm.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(randomFrom(filteredUsers)),
Optional.empty(),
Optional.of(
authzInfo(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0]))
),
Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0])),
Optional.of(randomFrom(filteredActions))
)
)
);
assertFalse(
"Does not match the filter predicate because of the pivilege.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(randomFrom(filteredUsers)),
Optional.of(randomFrom(filteredRealms)),
Optional.of(
authzInfo(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0]))
),
Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0])),
Optional.of(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8))
)
)
);
final List<String> someRolesDoNotMatch = new ArrayList<>(randomSubsetOf(randomIntBetween(0, filteredRoles.size()), filteredRoles));
for (int i = 0; i < randomIntBetween(1, 8); i++) {
someRolesDoNotMatch.add(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8));
}
assertFalse(
"Does not match the filter predicate because of some of the roles.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(randomFrom(filteredUsers)),
Optional.of(randomFrom(filteredRealms)),
Optional.of(authzInfo(someRolesDoNotMatch.toArray(new String[0]))),
Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0])),
Optional.of(randomFrom(filteredActions))
)
)
);
final Optional<AuthorizationInfo> emptyRoles = randomBoolean() ? Optional.empty() : Optional.of(authzInfo(new String[0]));
assertTrue(
"Matches the filter predicate because of the empty roles.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(randomFrom(filteredUsers)),
Optional.of(randomFrom(filteredRealms)),
emptyRoles,
Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0])),
Optional.of(randomFrom(filteredActions))
)
)
);
final List<String> someIndicesDoNotMatch = new ArrayList<>(
randomSubsetOf(randomIntBetween(0, filteredIndices.size()), filteredIndices)
);
for (int i = 0; i < randomIntBetween(1, 8); i++) {
someIndicesDoNotMatch.add(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8));
}
assertFalse(
"Does not match the filter predicate because of some of the indices.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(randomFrom(filteredUsers)),
Optional.of(randomFrom(filteredRealms)),
Optional.of(
authzInfo(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0]))
),
Optional.of(someIndicesDoNotMatch.toArray(new String[0])),
Optional.of(randomFrom(filteredActions))
)
)
);
assertTrue(
"Matches the filter predicate because of the empty indices.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(randomFrom(filteredUsers)),
Optional.of(randomFrom(filteredRealms)),
Optional.of(
authzInfo(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0]))
),
Optional.empty(),
Optional.of(randomFrom(filteredActions))
)
)
);
assertTrue(
"Matches the filter predicate because of the empty indices.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(randomFrom(filteredUsers)),
Optional.of(randomFrom(filteredRealms)),
Optional.of(
authzInfo(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0]))
),
Optional.of(new String[0]),
Optional.of(randomFrom(filteredActions))
)
)
);
assertTrue(
"Matches the filter predicate because of the empty indices.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(randomFrom(filteredUsers)),
Optional.of(randomFrom(filteredRealms)),
Optional.of(
authzInfo(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0]))
),
Optional.of(new String[] { null }),
Optional.of(randomFrom(filteredActions))
)
)
);
}
public void testTwoPolicyPredicatesWithMissingFields() throws Exception {
final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO, null);
final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
final Settings.Builder settingsBuilder = Settings.builder().put(settings);
// first policy: realms and roles filters
final List<String> filteredRealms = randomNonEmptyListOfFilteredNames();
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.firstPolicy.realms", filteredRealms);
final List<String> filteredRoles = randomNonEmptyListOfFilteredNames();
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.firstPolicy.roles", filteredRoles);
// second policy: users and indices filters
final List<String> filteredUsernames = randomNonEmptyListOfFilteredNames();
final List<User> filteredUsers = filteredUsernames.stream().map(u -> { return new User(u); }).collect(Collectors.toList());
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.secondPolicy.users", filteredUsernames);
// filter by indices
final List<String> filteredIndices = randomNonEmptyListOfFilteredNames();
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.secondPolicy.indices", filteredIndices);
final LoggingAuditTrail auditTrail = new LoggingAuditTrail(settingsBuilder.build(), clusterService, logger, threadContext);
final User unfilteredUser;
unfilteredUser = new User(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8));
final List<String> someRolesDoNotMatch = new ArrayList<>(randomSubsetOf(randomIntBetween(0, filteredRoles.size()), filteredRoles));
for (int i = 0; i < randomIntBetween(1, 8); i++) {
someRolesDoNotMatch.add(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8));
}
final List<String> someIndicesDoNotMatch = new ArrayList<>(
randomSubsetOf(randomIntBetween(0, filteredIndices.size()), filteredIndices)
);
for (int i = 0; i < randomIntBetween(1, 8); i++) {
someIndicesDoNotMatch.add(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8));
}
// matches both the first and the second policies
assertTrue(
"Matches both the first and the second filter predicates.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(randomFrom(filteredUsers)),
Optional.of(randomFrom(filteredRealms)),
Optional.of(
authzInfo(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0]))
),
Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0])),
Optional.empty()
)
)
);
// matches first policy but not the second
assertTrue(
"Matches the first filter predicate but not the second.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(unfilteredUser),
Optional.of(randomFrom(filteredRealms)),
Optional.of(
authzInfo(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0]))
),
Optional.of(someIndicesDoNotMatch.toArray(new String[0])),
Optional.of("_action")
)
)
);
// matches the second policy but not the first
assertTrue(
"Matches the second filter predicate but not the first.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(randomFrom(filteredUsers)),
Optional.of(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8)),
Optional.of(authzInfo(someRolesDoNotMatch.toArray(new String[0]))),
Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0])),
Optional.empty()
)
)
);
// matches neither the first nor the second policies
assertFalse(
"Matches neither the first nor the second filter predicates.",
auditTrail.eventFilterPolicyRegistry.ignorePredicate()
.test(
new AuditEventMetaInfo(
Optional.of(unfilteredUser),
Optional.of(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8)),
Optional.of(authzInfo(someRolesDoNotMatch.toArray(new String[0]))),
Optional.of(someIndicesDoNotMatch.toArray(new String[0])),
Optional.empty()
)
)
);
}
public void testUsersFilter() throws Exception {
final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO, null);
final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
final List<String> allFilteredUsers = new ArrayList<>();
final Settings.Builder settingsBuilder = Settings.builder().put(settings);
for (int i = 0; i < randomIntBetween(1, 4); i++) {
final List<String> filteredUsers = randomNonEmptyListOfFilteredNames();
allFilteredUsers.addAll(filteredUsers);
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.policy" + i + ".users", filteredUsers);
}
// a filter for a field consisting of an empty string ("") or an empty list([])
// will match events that lack that field
final boolean filterMissingUser = randomBoolean();
if (filterMissingUser) {
if (randomBoolean()) {
final List<String> filteredUsers = randomNonEmptyListOfFilteredNames();
// possibly renders list empty
filteredUsers.remove(0);
allFilteredUsers.addAll(filteredUsers);
filteredUsers.add("");
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.missingPolicy.users", filteredUsers);
} else {
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.missingPolicy.users", Collections.emptyList());
}
}
Authentication filteredAuthentication;
if (randomBoolean()) {
filteredAuthentication = createAuthentication(
new User(randomFrom(allFilteredUsers), "r1"),
new User("authUsername", "r2"),
"effectiveRealmName"
);
} else {
filteredAuthentication = createAuthentication(new User(randomFrom(allFilteredUsers), "r1"), "effectiveRealmName");
}
if (randomBoolean()) {
filteredAuthentication = createApiKeyAuthentication(apiKeyService, filteredAuthentication);
}
Authentication unfilteredAuthentication;
if (randomBoolean()) {
unfilteredAuthentication = createAuthentication(
new User(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 4), "r1"),
new User("authUsername", "r2"),
"effectiveRealmName"
);
} else {
unfilteredAuthentication = createAuthentication(
new User(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 4), "r1"),
"effectiveRealmName"
);
}
if (randomBoolean()) {
unfilteredAuthentication = createApiKeyAuthentication(apiKeyService, unfilteredAuthentication);
}
final TransportRequest request = randomBoolean()
? new MockRequest(threadContext)
: new MockIndicesRequest(threadContext, new String[] { "idx1", "idx2" });
final MockToken filteredToken = new MockToken(randomFrom(allFilteredUsers));
final MockToken unfilteredToken = new MockToken(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 4));
final LoggingAuditTrail auditTrail = new LoggingAuditTrail(settingsBuilder.build(), clusterService, logger, threadContext);
final List<String> logOutput = CapturingLogger.output(logger.getName(), Level.INFO);
// anonymous accessDenied
auditTrail.anonymousAccessDenied(randomAlphaOfLength(8), "_action", request);
if (filterMissingUser) {
assertThat("Anonymous message: not filtered out by the missing user filter", logOutput.size(), is(0));
} else {
assertThat("Anonymous message: filtered out by the user filters", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.anonymousAccessDenied(randomAlphaOfLength(8), getHttpRequest());
if (filterMissingUser) {
assertThat("Anonymous rest request: not filtered out by the missing user filter", logOutput.size(), is(0));
} else {
assertThat("Anonymous rest request: filtered out by user filters", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
// authenticationFailed
auditTrail.authenticationFailed(randomAlphaOfLength(8), getHttpRequest());
if (filterMissingUser) {
assertThat("AuthenticationFailed no token rest request: not filtered out by the missing user filter", logOutput.size(), is(0));
} else {
assertThat("AuthenticationFailed no token rest request: is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), unfilteredToken, "_action", request);
assertThat("AuthenticationFailed token request: unfiltered user is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), filteredToken, "_action", request);
assertThat("AuthenticationFailed token request: filtered user is not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), "_action", request);
if (filterMissingUser) {
assertThat("AuthenticationFailed no token message: not filtered out by the missing user filter", logOutput.size(), is(0));
} else {
assertThat("AuthenticationFailed no token message: is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), unfilteredToken, getHttpRequest());
assertThat("AuthenticationFailed rest request: unfiltered user is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), filteredToken, getHttpRequest());
assertThat("AuthenticationFailed rest request: filtered user is not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), "_realm", unfilteredToken, "_action", request);
assertThat("AuthenticationFailed realm message: unfiltered user is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), "_realm", filteredToken, "_action", request);
assertThat("AuthenticationFailed realm message: filtered user is not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), "_realm", unfilteredToken, getHttpRequest());
assertThat("AuthenticationFailed realm rest request: unfiltered user is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), "_realm", filteredToken, getHttpRequest());
assertThat("AuthenticationFailed realm rest request: filtered user is not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
// accessGranted
auditTrail.accessGranted(randomAlphaOfLength(8), unfilteredAuthentication, "_action", request, authzInfo(new String[] { "role1" }));
assertThat("AccessGranted message: unfiltered user is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessGranted(randomAlphaOfLength(8), filteredAuthentication, "_action", request, authzInfo(new String[] { "role1" }));
assertThat("AccessGranted message: filtered user is not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessGranted(
randomAlphaOfLength(8),
createSystemUserAuthentication(randomBoolean()),
"internal:_action",
request,
authzInfo(new String[] { "role1" })
);
assertThat("AccessGranted internal message: system user is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessGranted(
randomAlphaOfLength(8),
unfilteredAuthentication,
"internal:_action",
request,
authzInfo(new String[] { "role1" })
);
assertThat("AccessGranted internal message: unfiltered user is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessGranted(
randomAlphaOfLength(8),
filteredAuthentication,
"internal:_action",
request,
authzInfo(new String[] { "role1" })
);
assertThat("AccessGranted internal message: filtered user is not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
// accessDenied
auditTrail.accessDenied(randomAlphaOfLength(8), unfilteredAuthentication, "_action", request, authzInfo(new String[] { "role1" }));
assertThat("AccessDenied message: unfiltered user is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessDenied(randomAlphaOfLength(8), filteredAuthentication, "_action", request, authzInfo(new String[] { "role1" }));
assertThat("AccessDenied message: filtered user is not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessDenied(
randomAlphaOfLength(8),
createSystemUserAuthentication(randomBoolean()),
"internal:_action",
request,
authzInfo(new String[] { "role1" })
);
assertThat("AccessDenied internal message: system user is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessDenied(
randomAlphaOfLength(8),
unfilteredAuthentication,
"internal:_action",
request,
authzInfo(new String[] { "role1" })
);
assertThat("AccessDenied internal message: unfiltered user is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessDenied(
randomAlphaOfLength(8),
filteredAuthentication,
"internal:_action",
request,
authzInfo(new String[] { "role1" })
);
assertThat("AccessDenied internal request: filtered user is not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
// tamperedRequest
auditTrail.tamperedRequest(randomAlphaOfLength(8), getHttpRequest());
if (filterMissingUser) {
assertThat("Tampered rest: is not filtered out by the missing user filter", logOutput.size(), is(0));
} else {
assertThat("Tampered rest: is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.tamperedRequest(randomAlphaOfLength(8), "_action", request);
if (filterMissingUser) {
assertThat("Tampered message: is not filtered out by the missing user filter", logOutput.size(), is(0));
} else {
assertThat("Tampered message: is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.tamperedRequest(randomAlphaOfLength(8), unfilteredAuthentication, "_action", request);
assertThat("Tampered message: unfiltered user is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.tamperedRequest(randomAlphaOfLength(8), filteredAuthentication, "_action", request);
assertThat("Tampered message: filtered user is not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
// connection denied
auditTrail.connectionDenied(randomLoopbackInetSocketAddress(), "default", new SecurityIpFilterRule(false, "_all"));
if (filterMissingUser) {
assertThat("Connection denied: is not filtered out by the missing user filter", logOutput.size(), is(0));
} else {
assertThat("Connection denied: is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
// connection granted
auditTrail.connectionGranted(randomLoopbackInetSocketAddress(), "default", new SecurityIpFilterRule(false, "_all"));
if (filterMissingUser) {
assertThat("Connection granted: is not filtered out by the missing user filter", logOutput.size(), is(0));
} else {
assertThat("Connection granted: is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
// runAsGranted
auditTrail.runAsGranted(
randomAlphaOfLength(8),
unfilteredAuthentication,
"_action",
new MockRequest(threadContext),
authzInfo(new String[] { "role1" })
);
assertThat("RunAsGranted message: unfiltered user is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.runAsGranted(
randomAlphaOfLength(8),
filteredAuthentication,
"_action",
new MockRequest(threadContext),
authzInfo(new String[] { "role1" })
);
assertThat("RunAsGranted message: filtered user is not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
// runAsDenied
auditTrail.runAsDenied(
randomAlphaOfLength(8),
unfilteredAuthentication,
"_action",
new MockRequest(threadContext),
authzInfo(new String[] { "role1" })
);
assertThat("RunAsDenied message: unfiltered user is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.runAsDenied(
randomAlphaOfLength(8),
filteredAuthentication,
"_action",
new MockRequest(threadContext),
authzInfo(new String[] { "role1" })
);
assertThat("RunAsDenied message: filtered user is not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.runAsDenied(randomAlphaOfLength(8), unfilteredAuthentication, getHttpRequest(), authzInfo(new String[] { "role1" }));
assertThat("RunAsDenied rest request: unfiltered user is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.runAsDenied(randomAlphaOfLength(8), filteredAuthentication, getHttpRequest(), authzInfo(new String[] { "role1" }));
assertThat("RunAsDenied rest request: filtered user is not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
// authentication Success
AuditUtil.generateRequestId(threadContext);
unfilteredAuthentication.writeToContext(threadContext);
auditTrail.authenticationSuccess(getRestRequest());
assertThat("AuthenticationSuccess rest request: unfiltered user is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
AuditUtil.generateRequestId(threadContext);
filteredAuthentication.writeToContext(threadContext);
auditTrail.authenticationSuccess(getRestRequest());
assertThat("AuthenticationSuccess rest request: filtered user is not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationSuccess(randomAlphaOfLength(8), unfilteredAuthentication, "_action", request);
assertThat("AuthenticationSuccess message: unfiltered user is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationSuccess(randomAlphaOfLength(8), filteredAuthentication, "_action", request);
assertThat("AuthenticationSuccess message: filtered user is not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
}
public void testRealmsFilter() throws Exception {
final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO, null);
final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
final List<String> allFilteredRealms = new ArrayList<>();
final Settings.Builder settingsBuilder = Settings.builder().put(settings);
for (int i = 0; i < randomIntBetween(1, 4); i++) {
final List<String> filteredRealms = randomNonEmptyListOfFilteredNames();
allFilteredRealms.addAll(filteredRealms);
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.policy" + i + ".realms", filteredRealms);
}
// For SystemUser
final boolean filterFallbackRealm = randomBoolean();
if (filterFallbackRealm) {
settingsBuilder.putList(
"xpack.security.audit.logfile.events.ignore_filters.policy42.realms",
AuthenticationField.FALLBACK_REALM_NAME
);
} else {
settingsBuilder.putList(
"xpack.security.audit.logfile.events.ignore_filters.policy42.realms",
AuthenticationField.ATTACH_REALM_NAME
);
}
// a filter for a field consisting of an empty string ("") or an empty list([])
// will match events that lack that field
final boolean filterMissingRealm = randomBoolean();
if (filterMissingRealm) {
if (randomBoolean()) {
final List<String> filteredRealms = randomNonEmptyListOfFilteredNames();
// possibly renders list empty
filteredRealms.remove(0);
allFilteredRealms.addAll(filteredRealms);
filteredRealms.add("");
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.missingPolicy.realms", filteredRealms);
} else {
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.missingPolicy.realms", Collections.emptyList());
}
}
final String filteredRealm = randomFrom(allFilteredRealms);
final String unfilteredRealm = UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 4);
final User user, authUser;
if (randomBoolean()) {
user = new User("user1", "r1");
authUser = new User("authUsername", "r2");
} else {
user = new User("user1", "r1");
authUser = null;
}
final TransportRequest request = randomBoolean()
? new MockRequest(threadContext)
: new MockIndicesRequest(threadContext, new String[] { "idx1", "idx2" });
final MockToken authToken = new MockToken("token1");
final LoggingAuditTrail auditTrail = new LoggingAuditTrail(settingsBuilder.build(), clusterService, logger, threadContext);
final List<String> logOutput = CapturingLogger.output(logger.getName(), Level.INFO);
// anonymous accessDenied
auditTrail.anonymousAccessDenied(randomAlphaOfLength(8), "_action", request);
if (filterMissingRealm) {
assertThat("Anonymous message: not filtered out by the missing realm filter", logOutput.size(), is(0));
} else {
assertThat("Anonymous message: filtered out by the realm filters", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.anonymousAccessDenied(randomAlphaOfLength(8), getHttpRequest());
if (filterMissingRealm) {
assertThat("Anonymous rest request: not filtered out by the missing realm filter", logOutput.size(), is(0));
} else {
assertThat("Anonymous rest request: filtered out by realm filters", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
// authenticationFailed
auditTrail.authenticationFailed(randomAlphaOfLength(8), getHttpRequest());
if (filterMissingRealm) {
assertThat("AuthenticationFailed no token rest request: not filtered out by the missing realm filter", logOutput.size(), is(0));
} else {
assertThat("AuthenticationFailed no token rest request: is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), authToken, "_action", request);
if (filterMissingRealm) {
assertThat("AuthenticationFailed token request: not filtered out by the missing realm filter", logOutput.size(), is(0));
} else {
assertThat("AuthenticationFailed token request: is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), "_action", request);
if (filterMissingRealm) {
assertThat("AuthenticationFailed no token message: not filtered out by the missing realm filter", logOutput.size(), is(0));
} else {
assertThat("AuthenticationFailed no token message: is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), authToken, getHttpRequest());
if (filterMissingRealm) {
assertThat("AuthenticationFailed rest request: not filtered out by the missing realm filter", logOutput.size(), is(0));
} else {
assertThat("AuthenticationFailed rest request: is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), unfilteredRealm, authToken, "_action", request);
assertThat("AuthenticationFailed realm message: unfiltered realm is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), filteredRealm, authToken, "_action", request);
assertThat("AuthenticationFailed realm message: filtered realm is not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), unfilteredRealm, authToken, getHttpRequest());
assertThat("AuthenticationFailed realm rest request: unfiltered realm is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), filteredRealm, authToken, getHttpRequest());
assertThat("AuthenticationFailed realm rest request: filtered realm is not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
// accessGranted
Authentication authentication = randomBoolean()
? createAuthentication(user, authUser, filteredRealm)
: createApiKeyAuthentication(apiKeyService, createAuthentication(user, authUser, filteredRealm));
auditTrail.accessGranted(randomAlphaOfLength(8), authentication, "_action", request, authzInfo(new String[] { "role1" }));
if (authentication.getAuthenticationType() == Authentication.AuthenticationType.API_KEY
&& false == authentication.getAuthenticatingSubject()
.getMetadata()
.containsKey(AuthenticationField.API_KEY_CREATOR_REALM_NAME)) {
if (filterMissingRealm) {
assertThat("AccessGranted message: not filtered out by the missing realm filter", logOutput.size(), is(0));
} else {
assertThat("AccessGranted message: filtered out by the realm filters", logOutput.size(), is(1));
}
} else {
assertThat("AccessGranted message: filtered realm is not filtered out", logOutput.size(), is(0));
}
logOutput.clear();
threadContext.stashContext();
authentication = randomBoolean()
? createAuthentication(user, authUser, unfilteredRealm)
: createApiKeyAuthentication(apiKeyService, createAuthentication(user, authUser, unfilteredRealm));
auditTrail.accessGranted(randomAlphaOfLength(8), authentication, "_action", request, authzInfo(new String[] { "role1" }));
if (authentication.getAuthenticationType() == Authentication.AuthenticationType.API_KEY
&& false == authentication.getAuthenticatingSubject()
.getMetadata()
.containsKey(AuthenticationField.API_KEY_CREATOR_REALM_NAME)) {
if (filterMissingRealm) {
assertThat("AccessGranted message: not filtered out by the missing realm filter", logOutput.size(), is(0));
} else {
assertThat("AccessGranted message: filtered out by the realm filters", logOutput.size(), is(1));
}
} else {
assertThat("AccessGranted message: unfiltered realm is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.accessGranted(
randomAlphaOfLength(8),
createSystemUserAuthentication(filterFallbackRealm),
"internal:_action",
request,
authzInfo(new String[] { "role1" })
);
assertThat("AccessGranted internal message system user: filtered realm is not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessGranted(
randomAlphaOfLength(8),
createSystemUserAuthentication(false == filterFallbackRealm),
"internal:_action",
request,
authzInfo(new String[] { "role1" })
);
assertThat("AccessGranted internal message system user: unfiltered realm is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
authentication = randomBoolean()
? createAuthentication(user, authUser, filteredRealm)
: createApiKeyAuthentication(apiKeyService, createAuthentication(user, authUser, filteredRealm));
auditTrail.accessGranted(randomAlphaOfLength(8), authentication, "internal:_action", request, authzInfo(new String[] { "role1" }));
if (authentication.getAuthenticationType() == Authentication.AuthenticationType.API_KEY
&& false == authentication.getAuthenticatingSubject()
.getMetadata()
.containsKey(AuthenticationField.API_KEY_CREATOR_REALM_NAME)) {
if (filterMissingRealm) {
assertThat("AccessGranted internal message: not filtered out by the missing realm filter", logOutput.size(), is(0));
} else {
assertThat("AccessGranted internal message: filtered out by the realm filters", logOutput.size(), is(1));
}
} else {
assertThat("AccessGranted internal message: filtered realm is not filtered out", logOutput.size(), is(0));
}
logOutput.clear();
threadContext.stashContext();
authentication = randomBoolean()
? createAuthentication(user, authUser, unfilteredRealm)
: createApiKeyAuthentication(apiKeyService, createAuthentication(user, authUser, unfilteredRealm));
auditTrail.accessGranted(randomAlphaOfLength(8), authentication, "internal:_action", request, authzInfo(new String[] { "role1" }));
if (authentication.getAuthenticationType() == Authentication.AuthenticationType.API_KEY
&& false == authentication.getAuthenticatingSubject()
.getMetadata()
.containsKey(AuthenticationField.API_KEY_CREATOR_REALM_NAME)) {
if (filterMissingRealm) {
assertThat("AccessGranted internal message: not filtered out by the missing realm filter", logOutput.size(), is(0));
} else {
assertThat("AccessGranted internal message: filtered out by the realm filters", logOutput.size(), is(1));
}
} else {
assertThat("AccessGranted internal message: unfiltered realm is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
// accessDenied
authentication = randomBoolean()
? createAuthentication(user, authUser, filteredRealm)
: createApiKeyAuthentication(apiKeyService, createAuthentication(user, authUser, filteredRealm));
auditTrail.accessDenied(randomAlphaOfLength(8), authentication, "_action", request, authzInfo(new String[] { "role1" }));
if (authentication.getAuthenticationType() == Authentication.AuthenticationType.API_KEY
&& false == authentication.getAuthenticatingSubject()
.getMetadata()
.containsKey(AuthenticationField.API_KEY_CREATOR_REALM_NAME)) {
if (filterMissingRealm) {
assertThat("AccessDenied message: not filtered out by the missing realm filter", logOutput.size(), is(0));
} else {
assertThat("AccessDenied message: filtered out by the realm filters", logOutput.size(), is(1));
}
} else {
assertThat("AccessDenied message: filtered realm is not filtered out", logOutput.size(), is(0));
}
logOutput.clear();
threadContext.stashContext();
authentication = randomBoolean()
? createAuthentication(user, authUser, unfilteredRealm)
: createApiKeyAuthentication(apiKeyService, createAuthentication(user, authUser, unfilteredRealm));
auditTrail.accessDenied(randomAlphaOfLength(8), authentication, "_action", request, authzInfo(new String[] { "role1" }));
if (authentication.getAuthenticationType() == Authentication.AuthenticationType.API_KEY
&& false == authentication.getAuthenticatingSubject()
.getMetadata()
.containsKey(AuthenticationField.API_KEY_CREATOR_REALM_NAME)) {
if (filterMissingRealm) {
assertThat("AccessDenied message: not filtered out by the missing realm filter", logOutput.size(), is(0));
} else {
assertThat("AccessDenied message: filtered out by the realm filters", logOutput.size(), is(1));
}
} else {
assertThat("AccessDenied message: unfiltered realm is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.accessDenied(
randomAlphaOfLength(8),
createSystemUserAuthentication(filterFallbackRealm),
"internal:_action",
request,
authzInfo(new String[] { "role1" })
);
assertThat("AccessDenied internal message system user: filtered realm is not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessDenied(
randomAlphaOfLength(8),
createSystemUserAuthentication(false == filterFallbackRealm),
"internal:_action",
request,
authzInfo(new String[] { "role1" })
);
assertThat("AccessDenied internal message system user: unfiltered realm is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
authentication = randomBoolean()
? createAuthentication(user, authUser, filteredRealm)
: createApiKeyAuthentication(apiKeyService, createAuthentication(user, authUser, filteredRealm));
auditTrail.accessDenied(randomAlphaOfLength(8), authentication, "internal:_action", request, authzInfo(new String[] { "role1" }));
if (authentication.getAuthenticationType() == Authentication.AuthenticationType.API_KEY
&& false == authentication.getAuthenticatingSubject()
.getMetadata()
.containsKey(AuthenticationField.API_KEY_CREATOR_REALM_NAME)) {
if (filterMissingRealm) {
assertThat("AccessDenied internal message: not filtered out by the missing realm filter", logOutput.size(), is(0));
} else {
assertThat("AccessDenied internal message: filtered out by the realm filters", logOutput.size(), is(1));
}
} else {
assertThat("AccessDenied internal message: filtered realm is filtered out", logOutput.size(), is(0));
}
logOutput.clear();
threadContext.stashContext();
authentication = randomBoolean()
? createAuthentication(user, authUser, unfilteredRealm)
: createApiKeyAuthentication(apiKeyService, createAuthentication(user, authUser, unfilteredRealm));
auditTrail.accessDenied(randomAlphaOfLength(8), authentication, "internal:_action", request, authzInfo(new String[] { "role1" }));
if (authentication.getAuthenticationType() == Authentication.AuthenticationType.API_KEY
&& false == authentication.getAuthenticatingSubject()
.getMetadata()
.containsKey(AuthenticationField.API_KEY_CREATOR_REALM_NAME)) {
if (filterMissingRealm) {
assertThat("AccessDenied internal message: not filtered out by the missing realm filter", logOutput.size(), is(0));
} else {
assertThat("AccessDenied internal message: filtered out by the realm filters", logOutput.size(), is(1));
}
} else {
assertThat("AccessDenied internal message: unfiltered realm is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
// tamperedRequest
auditTrail.tamperedRequest(randomAlphaOfLength(8), getHttpRequest());
if (filterMissingRealm) {
assertThat("Tampered rest: is not filtered out by the missing realm filter", logOutput.size(), is(0));
} else {
assertThat("Tampered rest: is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.tamperedRequest(randomAlphaOfLength(8), "_action", request);
if (filterMissingRealm) {
assertThat("Tampered message: is not filtered out by the missing realm filter", logOutput.size(), is(0));
} else {
assertThat("Tampered message: is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
authentication = randomBoolean()
? createAuthentication(user, authUser, filteredRealm)
: createApiKeyAuthentication(apiKeyService, createAuthentication(user, authUser, filteredRealm));
auditTrail.tamperedRequest(randomAlphaOfLength(8), authentication, "_action", request);
if (authentication.getAuthenticationType() == Authentication.AuthenticationType.API_KEY
&& false == authentication.getAuthenticatingSubject()
.getMetadata()
.containsKey(AuthenticationField.API_KEY_CREATOR_REALM_NAME)) {
if (filterMissingRealm) {
assertThat("Tampered message: not filtered out by the missing realm filter", logOutput.size(), is(0));
} else {
assertThat("Tampered message: filtered out by the realm filters", logOutput.size(), is(1));
}
} else {
assertThat("Tampered message: filtered realm is not filtered out", logOutput.size(), is(0));
}
logOutput.clear();
threadContext.stashContext();
authentication = randomBoolean()
? createAuthentication(user, authUser, unfilteredRealm)
: createApiKeyAuthentication(apiKeyService, createAuthentication(user, authUser, unfilteredRealm));
auditTrail.tamperedRequest(randomAlphaOfLength(8), authentication, "_action", request);
if (authentication.getAuthenticationType() == Authentication.AuthenticationType.API_KEY
&& false == authentication.getAuthenticatingSubject()
.getMetadata()
.containsKey(AuthenticationField.API_KEY_CREATOR_REALM_NAME)) {
if (filterMissingRealm) {
assertThat("Tampered message: not filtered out by the missing realm filter", logOutput.size(), is(0));
} else {
assertThat("Tampered message: filtered out by the realm filters", logOutput.size(), is(1));
}
} else {
assertThat("Tampered message: unfiltered realm is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
// connection denied
auditTrail.connectionDenied(randomLoopbackInetSocketAddress(), "default", new SecurityIpFilterRule(false, "_all"));
if (filterMissingRealm) {
assertThat("Connection denied: is not filtered out by the missing realm filter", logOutput.size(), is(0));
} else {
assertThat("Connection denied: is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
// connection granted
auditTrail.connectionGranted(randomLoopbackInetSocketAddress(), "default", new SecurityIpFilterRule(false, "_all"));
if (filterMissingRealm) {
assertThat("Connection granted: is not filtered out by the missing realm filter", logOutput.size(), is(0));
} else {
assertThat("Connection granted: is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
// runAsGranted
auditTrail.runAsGranted(
randomAlphaOfLength(8),
createAuthentication(user, authUser, filteredRealm),
"_action",
new MockRequest(threadContext),
authzInfo(new String[] { "role1" })
);
assertThat("RunAsGranted message: filtered realm is not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.runAsGranted(
randomAlphaOfLength(8),
createAuthentication(user, authUser, unfilteredRealm),
"_action",
new MockRequest(threadContext),
authzInfo(new String[] { "role1" })
);
assertThat("RunAsGranted message: unfiltered realm is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
// runAsDenied
auditTrail.runAsDenied(
randomAlphaOfLength(8),
createAuthentication(user, authUser, filteredRealm),
"_action",
new MockRequest(threadContext),
authzInfo(new String[] { "role1" })
);
assertThat("RunAsDenied message: filtered realm is not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.runAsDenied(
randomAlphaOfLength(8),
createAuthentication(user, authUser, unfilteredRealm),
"_action",
new MockRequest(threadContext),
authzInfo(new String[] { "role1" })
);
assertThat("RunAsDenied message: unfiltered realm is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.runAsDenied(
randomAlphaOfLength(8),
createAuthentication(user, authUser, filteredRealm),
getHttpRequest(),
authzInfo(new String[] { "role1" })
);
assertThat("RunAsDenied rest request: filtered realm is not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.runAsDenied(
randomAlphaOfLength(8),
createAuthentication(user, authUser, unfilteredRealm),
getHttpRequest(),
authzInfo(new String[] { "role1" })
);
assertThat("RunAsDenied rest request: unfiltered realm is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
// authentication Success
AuditUtil.generateRequestId(threadContext);
createAuthentication(user, authUser, unfilteredRealm).writeToContext(threadContext);
auditTrail.authenticationSuccess(getRestRequest());
assertThat("AuthenticationSuccess rest request: unfiltered realm is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
AuditUtil.generateRequestId(threadContext);
createAuthentication(user, authUser, filteredRealm).writeToContext(threadContext);
auditTrail.authenticationSuccess(getRestRequest());
assertThat("AuthenticationSuccess rest request: filtered realm is not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationSuccess(randomAlphaOfLength(8), createAuthentication(user, authUser, unfilteredRealm), "_action", request);
assertThat("AuthenticationSuccess message: unfiltered realm is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationSuccess(randomAlphaOfLength(8), createAuthentication(user, authUser, filteredRealm), "_action", request);
assertThat("AuthenticationSuccess message: filtered realm is not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
}
public void testRolesFilter() throws Exception {
final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO, null);
final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
final List<List<String>> allFilteredRoles = new ArrayList<>();
final Settings.Builder settingsBuilder = Settings.builder().put(settings);
for (int i = 0; i < randomIntBetween(1, 4); i++) {
final List<String> filteredRoles = randomNonEmptyListOfFilteredNames();
allFilteredRoles.add(new ArrayList<>(filteredRoles));
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.policy" + i + ".roles", filteredRoles);
}
// a filter for a field consisting of an empty string ("") or an empty list([])
// will match events that lack that field
final boolean filterMissingRoles = randomBoolean();
if (filterMissingRoles) {
if (randomBoolean()) {
final List<String> filteredRoles = randomNonEmptyListOfFilteredNames();
// possibly renders list empty
filteredRoles.remove(0);
if (filteredRoles.isEmpty() == false) {
allFilteredRoles.add(new ArrayList<>(filteredRoles));
}
filteredRoles.add("");
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.missingPolicy.roles", filteredRoles);
} else {
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.missingPolicy.roles", Collections.emptyList());
}
}
// filtered roles are a subset of the roles of any policy
final List<String> filterPolicy = randomFrom(allFilteredRoles);
final String[] filteredRoles = randomListFromLengthBetween(filterPolicy, 1, filterPolicy.size()).toArray(new String[0]);
// unfiltered role sets either have roles distinct from any other policy or are
// a mix of roles from 2 or more policies
final List<String> unfilteredPolicy = randomFrom(allFilteredRoles);
List<String> _unfilteredRoles;
if (randomBoolean()) {
_unfilteredRoles = randomListFromLengthBetween(unfilteredPolicy, 0, unfilteredPolicy.size());
// add roles distinct from any role in any filter policy
for (int i = 0; i < randomIntBetween(1, 4); i++) {
_unfilteredRoles.add(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 4));
}
} else {
_unfilteredRoles = randomListFromLengthBetween(unfilteredPolicy, 1, unfilteredPolicy.size());
// add roles from other filter policies
final List<String> otherRoles = randomNonEmptyListOfFilteredNames("other");
_unfilteredRoles.addAll(randomListFromLengthBetween(otherRoles, 1, otherRoles.size()));
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.otherPolicy.roles", otherRoles);
}
final String[] unfilteredRoles = _unfilteredRoles.toArray(new String[0]);
Authentication authentication;
if (randomBoolean()) {
authentication = createAuthentication(new User("user1", "r1"), new User("authUsername", "r2"), "effectiveRealmName");
} else {
authentication = createAuthentication(new User("user1", "r1"), "effectiveRealmName");
}
if (randomBoolean()) {
authentication = createApiKeyAuthentication(apiKeyService, authentication);
}
final TransportRequest request = randomBoolean()
? new MockRequest(threadContext)
: new MockIndicesRequest(threadContext, new String[] { "idx1", "idx2" });
final MockToken authToken = new MockToken("token1");
final LoggingAuditTrail auditTrail = new LoggingAuditTrail(settingsBuilder.build(), clusterService, logger, threadContext);
final List<String> logOutput = CapturingLogger.output(logger.getName(), Level.INFO);
// anonymous accessDenied
auditTrail.anonymousAccessDenied(randomAlphaOfLength(8), "_action", request);
if (filterMissingRoles) {
assertThat("Anonymous message: not filtered out by the missing roles filter", logOutput.size(), is(0));
} else {
assertThat("Anonymous message: filtered out by the roles filters", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.anonymousAccessDenied(randomAlphaOfLength(8), getHttpRequest());
if (filterMissingRoles) {
assertThat("Anonymous rest request: not filtered out by the missing roles filter", logOutput.size(), is(0));
} else {
assertThat("Anonymous rest request: filtered out by roles filters", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
// authenticationFailed
auditTrail.authenticationFailed(randomAlphaOfLength(8), getHttpRequest());
if (filterMissingRoles) {
assertThat("AuthenticationFailed no token rest request: not filtered out by the missing roles filter", logOutput.size(), is(0));
} else {
assertThat("AuthenticationFailed no token rest request: is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), authToken, "_action", request);
if (filterMissingRoles) {
assertThat("AuthenticationFailed token request: not filtered out by the missing roles filter", logOutput.size(), is(0));
} else {
assertThat("AuthenticationFailed token request: is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), "_action", request);
if (filterMissingRoles) {
assertThat("AuthenticationFailed no token message: not filtered out by the missing roles filter", logOutput.size(), is(0));
} else {
assertThat("AuthenticationFailed no token message: is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), authToken, getHttpRequest());
if (filterMissingRoles) {
assertThat("AuthenticationFailed rest request: not filtered out by the missing roles filter", logOutput.size(), is(0));
} else {
assertThat("AuthenticationFailed rest request: is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), "_realm", authToken, "_action", request);
if (filterMissingRoles) {
assertThat("AuthenticationFailed realm message: not filtered out by the missing roles filter", logOutput.size(), is(0));
} else {
assertThat("AuthenticationFailed realm message: is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), "_realm", authToken, getHttpRequest());
if (filterMissingRoles) {
assertThat("AuthenticationFailed realm rest request: not filtered out by the missing roles filter", logOutput.size(), is(0));
} else {
assertThat("AuthenticationFailed realm rest request: is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
// accessGranted
auditTrail.accessGranted(randomAlphaOfLength(8), authentication, "_action", request, authzInfo(unfilteredRoles));
assertThat("AccessGranted message: unfiltered roles filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessGranted(randomAlphaOfLength(8), authentication, "_action", request, authzInfo(filteredRoles));
assertThat("AccessGranted message: filtered roles not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessGranted(
randomAlphaOfLength(8),
createSystemUserAuthentication(randomBoolean()),
"internal:_action",
request,
authzInfo(unfilteredRoles)
);
assertThat("AccessGranted internal message system user: unfiltered roles filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessGranted(
randomAlphaOfLength(8),
createSystemUserAuthentication(randomBoolean()),
"internal:_action",
request,
authzInfo(filteredRoles)
);
assertThat("AccessGranted internal message system user: filtered roles not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessGranted(randomAlphaOfLength(8), authentication, "internal:_action", request, authzInfo(unfilteredRoles));
assertThat("AccessGranted internal message: unfiltered roles filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessGranted(randomAlphaOfLength(8), authentication, "internal:_action", request, authzInfo(filteredRoles));
assertThat("AccessGranted internal message: filtered roles not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
// accessDenied
auditTrail.accessDenied(randomAlphaOfLength(8), authentication, "_action", request, authzInfo(unfilteredRoles));
assertThat("AccessDenied message: unfiltered roles filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessDenied(randomAlphaOfLength(8), authentication, "_action", request, authzInfo(filteredRoles));
assertThat("AccessDenied message: filtered roles not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessDenied(
randomAlphaOfLength(8),
createSystemUserAuthentication(randomBoolean()),
"internal:_action",
request,
authzInfo(unfilteredRoles)
);
assertThat("AccessDenied internal message system user: unfiltered roles filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessDenied(
randomAlphaOfLength(8),
createSystemUserAuthentication(randomBoolean()),
"internal:_action",
request,
authzInfo(filteredRoles)
);
assertThat("AccessDenied internal message system user: filtered roles not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessDenied(randomAlphaOfLength(8), authentication, "internal:_action", request, authzInfo(unfilteredRoles));
assertThat("AccessDenied internal message: unfiltered roles filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessDenied(randomAlphaOfLength(8), authentication, "internal:_action", request, authzInfo(filteredRoles));
assertThat("AccessDenied internal message: filtered roles not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
// connection denied
auditTrail.connectionDenied(randomLoopbackInetSocketAddress(), "default", new SecurityIpFilterRule(false, "_all"));
if (filterMissingRoles) {
assertThat("Connection denied: is not filtered out by the missing roles filter", logOutput.size(), is(0));
} else {
assertThat("Connection denied: is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
// connection granted
auditTrail.connectionGranted(randomLoopbackInetSocketAddress(), "default", new SecurityIpFilterRule(false, "_all"));
if (filterMissingRoles) {
assertThat("Connection granted: is not filtered out by the missing roles filter", logOutput.size(), is(0));
} else {
assertThat("Connection granted: is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
// runAsGranted
auditTrail.runAsGranted(
randomAlphaOfLength(8),
authentication,
"_action",
new MockRequest(threadContext),
authzInfo(unfilteredRoles)
);
assertThat("RunAsGranted message: unfiltered roles filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.runAsGranted(
randomAlphaOfLength(8),
authentication,
"_action",
new MockRequest(threadContext),
authzInfo(filteredRoles)
);
assertThat("RunAsGranted message: filtered roles not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
// runAsDenied
auditTrail.runAsDenied(
randomAlphaOfLength(8),
authentication,
"_action",
new MockRequest(threadContext),
authzInfo(unfilteredRoles)
);
assertThat("RunAsDenied message: unfiltered roles filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.runAsDenied(randomAlphaOfLength(8), authentication, "_action", new MockRequest(threadContext), authzInfo(filteredRoles));
assertThat("RunAsDenied message: filtered roles not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.runAsDenied(randomAlphaOfLength(8), authentication, getHttpRequest(), authzInfo(unfilteredRoles));
assertThat("RunAsDenied rest request: unfiltered roles filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.runAsDenied(randomAlphaOfLength(8), authentication, getHttpRequest(), authzInfo(filteredRoles));
assertThat("RunAsDenied rest request: filtered roles not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
// authentication Success
AuditUtil.generateRequestId(threadContext);
authentication.writeToContext(threadContext);
auditTrail.authenticationSuccess(getRestRequest());
if (filterMissingRoles) {
assertThat("AuthenticationSuccess rest request: is not filtered out by the missing roles filter", logOutput.size(), is(0));
} else {
assertThat("AuthenticationSuccess rest request: unfiltered realm is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationSuccess(randomAlphaOfLength(8), authentication, "_action", request);
if (filterMissingRoles) {
assertThat("AuthenticationSuccess message: is not filtered out by the missing roles filter", logOutput.size(), is(0));
} else {
assertThat("AuthenticationSuccess message: unfiltered realm is filtered out", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
}
public void testIndicesFilter() throws Exception {
final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO, null);
final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
final List<List<String>> allFilteredIndices = new ArrayList<>();
final Settings.Builder settingsBuilder = Settings.builder().put(settings);
for (int i = 0; i < randomIntBetween(1, 3); i++) {
final List<String> filteredIndices = randomNonEmptyListOfFilteredNames();
allFilteredIndices.add(new ArrayList<>(filteredIndices));
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.policy" + i + ".indices", filteredIndices);
}
// a filter for a field consisting of an empty string ("") or an empty list([])
// will match events that lack that field
final boolean filterMissingIndices = randomBoolean();
if (filterMissingIndices) {
if (randomBoolean()) {
final List<String> filteredIndices = randomNonEmptyListOfFilteredNames();
// possibly renders list empty
filteredIndices.remove(0);
if (filteredIndices.isEmpty() == false) {
allFilteredIndices.add(new ArrayList<>(filteredIndices));
}
filteredIndices.add("");
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.missingPolicy.indices", filteredIndices);
} else {
settingsBuilder.putList(
"xpack.security.audit.logfile.events.ignore_filters.missingPolicy.indices",
Collections.emptyList()
);
}
}
// filtered indices are a subset of the indices of any policy
final List<String> filterPolicy = randomFrom(allFilteredIndices);
final String[] filteredIndices = randomListFromLengthBetween(filterPolicy, 1, filterPolicy.size()).toArray(new String[0]);
// unfiltered index sets either have indices distinct from any other in any
// policy or are a mix of indices from 2 or more policies
final List<String> unfilteredPolicy = randomFrom(allFilteredIndices);
List<String> _unfilteredIndices;
if (randomBoolean()) {
_unfilteredIndices = randomListFromLengthBetween(unfilteredPolicy, 0, unfilteredPolicy.size());
// add indices distinct from any index in any filter policy
for (int i = 0; i < randomIntBetween(1, 4); i++) {
_unfilteredIndices.add(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 4));
}
} else {
_unfilteredIndices = randomListFromLengthBetween(unfilteredPolicy, 1, unfilteredPolicy.size());
// add indices from other filter policies
final List<String> otherIndices = randomNonEmptyListOfFilteredNames("other");
_unfilteredIndices.addAll(randomListFromLengthBetween(otherIndices, 1, otherIndices.size()));
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.otherPolicy.indices", otherIndices);
}
final String[] unfilteredIndices = _unfilteredIndices.toArray(new String[0]);
Authentication authentication;
if (randomBoolean()) {
authentication = createAuthentication(new User("user1", "r1"), new User("authUsername", "r2"), "effectiveRealmName");
} else {
authentication = createAuthentication(new User("user1", "r1"), "effectiveRealmName");
}
if (randomBoolean()) {
authentication = createApiKeyAuthentication(apiKeyService, authentication);
}
final MockToken authToken = new MockToken("token1");
final TransportRequest noIndexRequest = new MockRequest(threadContext);
final LoggingAuditTrail auditTrail = new LoggingAuditTrail(settingsBuilder.build(), clusterService, logger, threadContext);
final List<String> logOutput = CapturingLogger.output(logger.getName(), Level.INFO);
// anonymous accessDenied
auditTrail.anonymousAccessDenied(randomAlphaOfLength(8), "_action", noIndexRequest);
if (filterMissingIndices) {
assertThat("Anonymous message no index: not filtered out by the missing indices filter", logOutput.size(), is(0));
} else {
assertThat("Anonymous message no index: filtered out by indices filters", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.anonymousAccessDenied(randomAlphaOfLength(8), "_action", new MockIndicesRequest(threadContext, unfilteredIndices));
assertThat("Anonymous message unfiltered indices: filtered out by indices filters", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.anonymousAccessDenied(randomAlphaOfLength(8), "_action", new MockIndicesRequest(threadContext, filteredIndices));
assertThat("Anonymous message filtered indices: not filtered out by indices filters", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.anonymousAccessDenied(randomAlphaOfLength(8), getHttpRequest());
if (filterMissingIndices) {
assertThat("Anonymous rest request: not filtered out by the missing indices filter", logOutput.size(), is(0));
} else {
assertThat("Anonymous rest request: filtered out by indices filters", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
// authenticationFailed
auditTrail.authenticationFailed(randomAlphaOfLength(8), getHttpRequest());
if (filterMissingIndices) {
assertThat(
"AuthenticationFailed no token rest request: not filtered out by the missing indices filter",
logOutput.size(),
is(0)
);
} else {
assertThat("AuthenticationFailed no token rest request: filtered out by indices filters", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), authToken, "_action", noIndexRequest);
if (filterMissingIndices) {
assertThat(
"AuthenticationFailed token request no index: not filtered out by the missing indices filter",
logOutput.size(),
is(0)
);
} else {
assertThat("AuthenticationFailed token request no index: filtered out by indices filter", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(
randomAlphaOfLength(8),
authToken,
"_action",
new MockIndicesRequest(threadContext, unfilteredIndices)
);
assertThat("AuthenticationFailed token request unfiltered indices: filtered out by indices filter", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(
randomAlphaOfLength(8),
authToken,
"_action",
new MockIndicesRequest(threadContext, filteredIndices)
);
assertThat("AuthenticationFailed token request filtered indices: not filtered out by indices filter", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), "_action", noIndexRequest);
if (filterMissingIndices) {
assertThat(
"AuthenticationFailed no token message no index: not filtered out by the missing indices filter",
logOutput.size(),
is(0)
);
} else {
assertThat("AuthenticationFailed no token message: filtered out by indices filter", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), "_action", new MockIndicesRequest(threadContext, unfilteredIndices));
assertThat("AuthenticationFailed no token request unfiltered indices: filtered out by indices filter", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), "_action", new MockIndicesRequest(threadContext, filteredIndices));
assertThat("AuthenticationFailed no token request filtered indices: not filtered out by indices filter", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), authToken, getHttpRequest());
if (filterMissingIndices) {
assertThat("AuthenticationFailed rest request: not filtered out by the missing indices filter", logOutput.size(), is(0));
} else {
assertThat("AuthenticationFailed rest request: filtered out by indices filter", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), "_realm", authToken, "_action", noIndexRequest);
if (filterMissingIndices) {
assertThat(
"AuthenticationFailed realm message no index: not filtered out by the missing indices filter",
logOutput.size(),
is(0)
);
} else {
assertThat("AuthenticationFailed realm message no index: filtered out by indices filter", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(
randomAlphaOfLength(8),
"_realm",
authToken,
"_action",
new MockIndicesRequest(threadContext, unfilteredIndices)
);
assertThat("AuthenticationFailed realm message unfiltered indices: filtered out by indices filter", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(
randomAlphaOfLength(8),
"_realm",
authToken,
"_action",
new MockIndicesRequest(threadContext, filteredIndices)
);
assertThat("AuthenticationFailed realm message filtered indices: not filtered out by indices filter", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), "_realm", authToken, getHttpRequest());
if (filterMissingIndices) {
assertThat("AuthenticationFailed realm rest request: not filtered out by the missing indices filter", logOutput.size(), is(0));
} else {
assertThat("AuthenticationFailed realm rest request: filtered out by indices filter", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
// accessGranted
auditTrail.accessGranted(randomAlphaOfLength(8), authentication, "_action", noIndexRequest, authzInfo(new String[] { "role1" }));
if (filterMissingIndices) {
assertThat("AccessGranted message no index: not filtered out by the missing indices filter", logOutput.size(), is(0));
} else {
assertThat("AccessGranted message no index: filtered out by indices filter", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.accessGranted(
randomAlphaOfLength(8),
authentication,
"_action",
new MockIndicesRequest(threadContext, unfilteredIndices),
authzInfo(new String[] { "role1" })
);
assertThat("AccessGranted message unfiltered indices: filtered out by indices filter", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessGranted(
randomAlphaOfLength(8),
authentication,
"_action",
new MockIndicesRequest(threadContext, filteredIndices),
authzInfo(new String[] { "role1" })
);
assertThat("AccessGranted message filtered indices: not filtered out by indices filter", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessGranted(
randomAlphaOfLength(8),
createSystemUserAuthentication(randomBoolean()),
"internal:_action",
noIndexRequest,
authzInfo(new String[] { "role1" })
);
if (filterMissingIndices) {
assertThat(
"AccessGranted message system user no index: not filtered out by the missing indices filter",
logOutput.size(),
is(0)
);
} else {
assertThat("AccessGranted message system user no index: filtered out by indices filter", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.accessGranted(
randomAlphaOfLength(8),
createSystemUserAuthentication(randomBoolean()),
"internal:_action",
new MockIndicesRequest(threadContext, unfilteredIndices),
authzInfo(new String[] { "role1" })
);
assertThat("AccessGranted message system user unfiltered indices: filtered out by indices filter", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessGranted(
randomAlphaOfLength(8),
createSystemUserAuthentication(randomBoolean()),
"internal:_action",
new MockIndicesRequest(threadContext, filteredIndices),
authzInfo(new String[] { "role1" })
);
assertThat("AccessGranted message system user filtered indices: not filtered out by indices filter", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
// accessDenied
auditTrail.accessDenied(randomAlphaOfLength(8), authentication, "_action", noIndexRequest, authzInfo(new String[] { "role1" }));
if (filterMissingIndices) {
assertThat("AccessDenied message no index: not filtered out by the missing indices filter", logOutput.size(), is(0));
} else {
assertThat("AccessDenied message no index: filtered out by indices filter", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.accessDenied(
randomAlphaOfLength(8),
authentication,
"_action",
new MockIndicesRequest(threadContext, unfilteredIndices),
authzInfo(new String[] { "role1" })
);
assertThat("AccessDenied message unfiltered indices: filtered out by indices filter", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessDenied(
randomAlphaOfLength(8),
authentication,
"_action",
new MockIndicesRequest(threadContext, filteredIndices),
authzInfo(new String[] { "role1" })
);
assertThat("AccessDenied message filtered indices: not filtered out by indices filter", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessDenied(
randomAlphaOfLength(8),
createSystemUserAuthentication(randomBoolean()),
"internal:_action",
noIndexRequest,
authzInfo(new String[] { "role1" })
);
if (filterMissingIndices) {
assertThat(
"AccessDenied message system user no index: not filtered out by the missing indices filter",
logOutput.size(),
is(0)
);
} else {
assertThat("AccessDenied message system user no index: filtered out by indices filter", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.accessDenied(
randomAlphaOfLength(8),
createSystemUserAuthentication(randomBoolean()),
"internal:_action",
new MockIndicesRequest(threadContext, unfilteredIndices),
authzInfo(new String[] { "role1" })
);
assertThat("AccessDenied message system user unfiltered indices: filtered out by indices filter", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessDenied(
randomAlphaOfLength(8),
createSystemUserAuthentication(randomBoolean()),
"internal:_action",
new MockIndicesRequest(threadContext, filteredIndices),
authzInfo(new String[] { "role1" })
);
assertThat("AccessGranted message system user filtered indices: not filtered out by indices filter", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
// connection denied
auditTrail.connectionDenied(randomLoopbackInetSocketAddress(), "default", new SecurityIpFilterRule(false, "_all"));
if (filterMissingIndices) {
assertThat("Connection denied: not filtered out by missing indices filter", logOutput.size(), is(0));
} else {
assertThat("Connection denied: filtered out by indices filter", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
// connection granted
auditTrail.connectionGranted(randomLoopbackInetSocketAddress(), "default", new SecurityIpFilterRule(false, "_all"));
if (filterMissingIndices) {
assertThat("Connection granted: not filtered out by missing indices filter", logOutput.size(), is(0));
} else {
assertThat("Connection granted: filtered out by indices filter", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
// runAsGranted
auditTrail.runAsGranted(randomAlphaOfLength(8), authentication, "_action", noIndexRequest, authzInfo(new String[] { "role1" }));
if (filterMissingIndices) {
assertThat("RunAsGranted message no index: not filtered out by missing indices filter", logOutput.size(), is(0));
} else {
assertThat("RunAsGranted message no index: filtered out by indices filter", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.runAsGranted(
randomAlphaOfLength(8),
authentication,
"_action",
new MockIndicesRequest(threadContext, unfilteredIndices),
authzInfo(new String[] { "role1" })
);
assertThat("RunAsGranted message unfiltered indices: filtered out by indices filter", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.runAsGranted(
randomAlphaOfLength(8),
authentication,
"_action",
new MockIndicesRequest(threadContext, filteredIndices),
authzInfo(new String[] { "role1" })
);
assertThat("RunAsGranted message filtered indices: not filtered out by indices filter", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
// runAsDenied
auditTrail.runAsDenied(randomAlphaOfLength(8), authentication, "_action", noIndexRequest, authzInfo(new String[] { "role1" }));
if (filterMissingIndices) {
assertThat("RunAsDenied message no index: not filtered out by missing indices filter", logOutput.size(), is(0));
} else {
assertThat("RunAsDenied message no index: filtered out by indices filter", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.runAsDenied(
randomAlphaOfLength(8),
authentication,
"_action",
new MockIndicesRequest(threadContext, unfilteredIndices),
authzInfo(new String[] { "role1" })
);
assertThat("RunAsDenied message unfiltered indices: filtered out by indices filter", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.runAsDenied(
randomAlphaOfLength(8),
authentication,
"_action",
new MockIndicesRequest(threadContext, filteredIndices),
authzInfo(new String[] { "role1" })
);
assertThat("RunAsDenied message filtered indices: not filtered out by indices filter", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.runAsDenied(randomAlphaOfLength(8), authentication, getHttpRequest(), authzInfo(new String[] { "role1" }));
if (filterMissingIndices) {
assertThat("RunAsDenied rest request: not filtered out by missing indices filter", logOutput.size(), is(0));
} else {
assertThat("RunAsDenied rest request: filtered out by indices filter", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
// authentication Success
AuditUtil.generateRequestId(threadContext);
authentication.writeToContext(threadContext);
auditTrail.authenticationSuccess(getRestRequest());
if (filterMissingIndices) {
assertThat("AuthenticationSuccess rest request: is not filtered out by the missing indices filter", logOutput.size(), is(0));
} else {
assertThat("AuthenticationSuccess rest request: filtered out by indices filter", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationSuccess(randomAlphaOfLength(8), authentication, "_action", noIndexRequest);
if (filterMissingIndices) {
assertThat("AuthenticationSuccess message no index: not filtered out by missing indices filter", logOutput.size(), is(0));
} else {
assertThat("AuthenticationSuccess message no index: filtered out by indices filter", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationSuccess(
randomAlphaOfLength(8),
authentication,
"_action",
new MockIndicesRequest(threadContext, unfilteredIndices)
);
assertThat("AuthenticationSuccess message unfiltered indices: filtered out by indices filter", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationSuccess(
randomAlphaOfLength(8),
authentication,
"_action",
new MockIndicesRequest(threadContext, filteredIndices)
);
assertThat("AuthenticationSuccess message filtered indices: not filtered out by indices filter", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
}
public void testActionsFilter() throws Exception {
final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO, null);
final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
final List<String> filteredActions = randomNonEmptyListOfFilteredActions();
final Settings.Builder settingsBuilder = Settings.builder().put(settings);
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.actionsPolicy.actions", filteredActions);
// a filter for a field consisting of an empty string ("") or an empty list([])
// will match events that lack that field
final boolean filterMissingAction = randomBoolean();
if (filterMissingAction) {
if (randomBoolean()) {
filteredActions.add("");
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.missingPolicy.actions", filteredActions);
} else {
settingsBuilder.putList(
"xpack.security.audit.logfile.events.ignore_filters.missingPolicy.actions",
Collections.emptyList()
);
}
}
final String filteredAction = randomFrom(filteredActions);
final String unfilteredAction = "mock_action/mock_action";
final User user, authUser;
if (randomBoolean()) {
user = new User("user1", "r1");
authUser = new User("authUsername", "r2");
} else {
user = new User("user1", "r1");
authUser = null;
}
final TransportRequest request = randomBoolean()
? new MockRequest(threadContext)
: new MockIndicesRequest(threadContext, new String[] { "idx1", "idx2" });
final MockToken authToken = new MockToken("token1");
final LoggingAuditTrail auditTrail = new LoggingAuditTrail(settingsBuilder.build(), clusterService, logger, threadContext);
final List<String> logOutput = CapturingLogger.output(logger.getName(), Level.INFO);
// anonymous accessDenied
auditTrail.anonymousAccessDenied(randomAlphaOfLength(8), filteredAction, request);
assertThat("Anonymous message: not filtered out by the action filter", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.anonymousAccessDenied(randomAlphaOfLength(8), getHttpRequest());
if (filterMissingAction) {
assertThat("Anonymous rest request: not filtered out by the missing action filter", logOutput.size(), is(0));
} else {
assertThat("Anonymous rest request: filtered out by action filter", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
// authenticationFailed
auditTrail.authenticationFailed(randomAlphaOfLength(8), getHttpRequest());
if (filterMissingAction) {
assertThat("AuthenticationFailed: not filtered out by the missing action filter", logOutput.size(), is(0));
} else {
assertThat("AuthenticationFailed: filtered out by action filter", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), authToken, filteredAction, request);
assertThat("AuthenticationFailed: not filtered out by the action filter", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), filteredAction, request);
assertThat("AuthenticationFailed no token message: not filtered out by the action filter", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), authToken, getHttpRequest());
if (filterMissingAction) {
assertThat("AuthenticationFailed rest request: not filtered out by the missing action filter", logOutput.size(), is(0));
} else {
assertThat("AuthenticationFailed rest request: filtered out by action filter", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), "realm", authToken, unfilteredAction, request);
assertThat("AuthenticationFailed realm message: unfiltered action is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), "realm", authToken, filteredAction, request);
assertThat("AuthenticationFailed realm message: filtered action is not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationFailed(randomAlphaOfLength(8), "realm", authToken, getHttpRequest());
if (filterMissingAction) {
assertThat("AuthenticationFailed realm rest request: not filtered out by the missing action filter", logOutput.size(), is(0));
} else {
assertThat("AuthenticationFailed realm rest request: filtered out by the action filters", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
// accessGranted
Authentication authentication = createAuthentication(user, authUser, "realm");
auditTrail.accessGranted(randomAlphaOfLength(8), authentication, filteredAction, request, authzInfo(new String[] { "role1" }));
assertThat("AccessGranted message: not filtered out by the action filters", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessGranted(randomAlphaOfLength(8), authentication, unfilteredAction, request, authzInfo(new String[] { "role1" }));
assertThat("AccessGranted message: unfiltered action filtered out by the action filter", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
logOutput.clear();
threadContext.stashContext();
// accessDenied
auditTrail.accessDenied(randomAlphaOfLength(8), authentication, filteredAction, request, authzInfo(new String[] { "role1" }));
assertThat("AccessDenied message: not filtered out by the action filters", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.accessDenied(randomAlphaOfLength(8), authentication, unfilteredAction, request, authzInfo(new String[] { "role1" }));
assertThat("AccessDenied message: unfiltered action filtered out by the action filter", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
// tamperedRequest
auditTrail.tamperedRequest(randomAlphaOfLength(8), getHttpRequest());
if (filterMissingAction) {
assertThat("Tampered rest: not filtered out by the missing action filter", logOutput.size(), is(0));
} else {
assertThat("Tampered rest: filtered out by the action filters", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.tamperedRequest(randomAlphaOfLength(8), filteredAction, request);
assertThat("Tampered message: not filtered out by the action filters", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.tamperedRequest(randomAlphaOfLength(8), authentication, filteredAction, request);
assertThat("Tampered message: not filtered out by the action filters", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.tamperedRequest(randomAlphaOfLength(8), authentication, unfilteredAction, request);
assertThat("Tampered message: unfiltered action filtered out by the action filter", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
// connection denied
auditTrail.connectionDenied(randomLoopbackInetSocketAddress(), "default", new SecurityIpFilterRule(false, "_all"));
if (filterMissingAction) {
assertThat("Connection denied: not filtered out by the missing action filter", logOutput.size(), is(0));
} else {
assertThat("Connection denied: filtered out by the action filters", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
// connection granted
auditTrail.connectionGranted(randomLoopbackInetSocketAddress(), "default", new SecurityIpFilterRule(false, "_all"));
if (filterMissingAction) {
assertThat("Connection granted: not filtered out by the missing action filter", logOutput.size(), is(0));
} else {
assertThat("Connection granted: filtered out by the action filters", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
// runAsGranted
auditTrail.runAsGranted(
randomAlphaOfLength(8),
createAuthentication(user, authUser, "realm"),
filteredAction,
new MockRequest(threadContext),
authzInfo(new String[] { "role1" })
);
assertThat("RunAsGranted message: not filtered out by the action filters", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.runAsGranted(
randomAlphaOfLength(8),
createAuthentication(user, authUser, "realm"),
unfilteredAction,
new MockRequest(threadContext),
authzInfo(new String[] { "role1" })
);
assertThat("RunAsGranted message: unfiltered action is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
// runAsDenied
auditTrail.runAsDenied(
randomAlphaOfLength(8),
createAuthentication(user, authUser, "realm"),
filteredAction,
new MockRequest(threadContext),
authzInfo(new String[] { "role1" })
);
assertThat("RunAsDenied message: not filtered out by the action filters", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.runAsDenied(
randomAlphaOfLength(8),
createAuthentication(user, authUser, "realm"),
unfilteredAction,
new MockRequest(threadContext),
authzInfo(new String[] { "role1" })
);
assertThat("RunAsDenied message: unfiltered action filtered out by the action filters", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
auditTrail.runAsDenied(
randomAlphaOfLength(8),
createAuthentication(user, authUser, "realm"),
getHttpRequest(),
authzInfo(new String[] { "role1" })
);
if (filterMissingAction) {
assertThat("RunAsDenied rest request: not filtered out by the missing action filter", logOutput.size(), is(0));
} else {
assertThat("RunAsDenied rest request: filtered out by the action filters", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
// authentication Success
AuditUtil.generateRequestId(threadContext);
createAuthentication(user, authUser, "realm").writeToContext(threadContext);
auditTrail.authenticationSuccess(getRestRequest());
if (filterMissingAction) {
assertThat("AuthenticationSuccess rest request: not filtered out by the missing action filter", logOutput.size(), is(0));
} else {
assertThat("AuthenticationSuccess rest request: filtered out by the action filters", logOutput.size(), is(1));
}
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationSuccess(randomAlphaOfLength(8), createAuthentication(user, authUser, "realm"), filteredAction, request);
assertThat("AuthenticationSuccess message: filtered action is not filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
auditTrail.authenticationSuccess(randomAlphaOfLength(8), createAuthentication(user, authUser, "realm"), unfilteredAction, request);
assertThat("AuthenticationSuccess message: unfiltered action is filtered out", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
}
public void testRemoveIgnoreFilter() throws IllegalAccessException, IOException {
final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO, null);
final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
final String policyName = randomAlphaOfLengthBetween(5, 8);
final List<String> filteredUsers = randomNonEmptyListOfFilteredNames();
final List<String> filteredRoles = randomNonEmptyListOfFilteredNames();
final List<String> filteredRealms = randomNonEmptyListOfFilteredNames();
final List<String> filteredIndices = randomNonEmptyListOfFilteredNames();
final List<String> filteredActions = randomNonEmptyListOfFilteredActions();
// First create an auditTrail with no filtering
final LoggingAuditTrail auditTrail = new LoggingAuditTrail(
Settings.builder().put(settings).build(),
clusterService,
logger,
threadContext
);
final List<String> logOutput = CapturingLogger.output(logger.getName(), Level.INFO);
// First create a working ignore filter
final Settings.Builder settingsBuilder = Settings.builder();
final String username;
if (randomBoolean()) {
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters." + policyName + ".users", filteredUsers);
username = randomFrom(filteredUsers);
} else {
username = null;
}
final String realmName;
if (randomBoolean()) {
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters." + policyName + ".realms", filteredRealms);
realmName = randomFrom(filteredRealms);
} else {
realmName = null;
}
final String roleName;
if (randomBoolean()) {
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters." + policyName + ".roles", filteredRoles);
roleName = randomFrom(filteredRoles);
} else {
roleName = null;
}
final String indexName;
if (randomBoolean()) {
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters." + policyName + ".indices", filteredIndices);
indexName = randomFrom(filteredIndices);
} else {
indexName = null;
}
// If nothing is filtered so far due to randomisation, always filter on action name
final String actionName;
if (randomBoolean() || (username == null && realmName == null && roleName == null && indexName == null)) {
settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters." + policyName + ".actions", filteredActions);
actionName = randomFrom(filteredActions);
} else {
actionName = null;
}
final String requestId = randomAlphaOfLength(10);
final Authentication authentication = Authentication.newRealmAuthentication(
new User(
username != null ? username : randomAlphaOfLengthBetween(3, 10),
roleName != null ? roleName : randomAlphaOfLengthBetween(3, 10)
),
new RealmRef(
realmName != null ? realmName : randomAlphaOfLengthBetween(3, 10),
randomAlphaOfLengthBetween(3, 10),
randomAlphaOfLengthBetween(3, 8),
randomFrom(AuthenticationTestHelper.randomDomain(randomBoolean()), null)
)
);
final MockIndicesRequest request = new MockIndicesRequest(
threadContext,
indexName != null ? indexName : randomAlphaOfLengthBetween(3, 10)
);
final AuthorizationInfo authorizationInfo = authzInfo(authentication.getEffectiveSubject().getUser().roles());
final String action = actionName != null ? actionName : randomAlphaOfLengthBetween(3, 10);
// Filter not created yet, message should be logged
auditTrail.accessGranted(requestId, authentication, action, request, authorizationInfo);
assertThat("AccessGranted message: should not filter since we have no filter", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
// Create the filter, the same message should be filtered
clusterSettings.applySettings(settingsBuilder.build());
auditTrail.accessGranted(requestId, authentication, action, request, authorizationInfo);
assertThat("AccessGranted message: should be filtered out", logOutput.size(), is(0));
logOutput.clear();
threadContext.stashContext();
// Remove the filter, the message is logged again
clusterSettings.applySettings(Settings.EMPTY);
auditTrail.accessGranted(requestId, authentication, action, request, authorizationInfo);
assertThat("AccessGranted message: should not filter since filter is removed", logOutput.size(), is(1));
logOutput.clear();
threadContext.stashContext();
}
private InetSocketAddress randomLoopbackInetSocketAddress() {
return new InetSocketAddress(InetAddress.getLoopbackAddress(), randomIntBetween(0, 65535));
}
private <T> List<T> randomListFromLengthBetween(List<T> l, int min, int max) {
assert (min >= 0) && (min <= max) && (max <= l.size());
final int len = randomIntBetween(min, max);
final List<T> ans = new ArrayList<>(len);
while (ans.size() < len) {
ans.add(randomFrom(l));
}
return ans;
}
private static Authentication createSystemUserAuthentication(boolean isFallback) {
if (isFallback) {
return Authentication.newInternalFallbackAuthentication(InternalUsers.SYSTEM_USER, randomAlphaOfLengthBetween(3, 8));
} else {
return Authentication.newInternalAuthentication(
InternalUsers.SYSTEM_USER,
TransportVersion.current(),
randomAlphaOfLengthBetween(3, 8)
);
}
}
private static Authentication createAuthentication(User user, String effectiveRealmName) {
return createAuthentication(user, null, effectiveRealmName);
}
private static Authentication createAuthentication(User effectiveUser, @Nullable User authenticatingUser, String effectiveRealmName) {
assert false == effectiveUser instanceof InternalUser;
if (authenticatingUser != null) {
return AuthenticationTestHelper.builder()
.user(authenticatingUser)
.realmRef(new RealmRef(UNFILTER_MARKER + randomAlphaOfLength(4), "test", "foo"))
.runAs()
.user(effectiveUser)
.realmRef(new RealmRef(effectiveRealmName, "up", "by"))
.build();
} else {
return AuthenticationTestHelper.builder()
.user(effectiveUser)
.realmRef(new RealmRef(effectiveRealmName, "test", "foo"))
.build(false);
}
}
private ClusterSettings mockClusterSettings() {
final List<Setting<?>> settingsList = new ArrayList<>();
LoggingAuditTrail.registerSettings(settingsList);
settingsList.addAll(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
settingsList.add(ApiKeyService.DELETE_RETENTION_PERIOD);
settingsList.add(ApiKeyService.DELETE_INTERVAL);
return new ClusterSettings(settings, new HashSet<>(settingsList));
}
private List<String> randomNonEmptyListOfFilteredNames(String... namePrefix) {
final List<String> filtered = new ArrayList<>(4);
for (int i = 0; i < randomIntBetween(1, 4); i++) {
filtered.add(FILTER_MARKER + Strings.arrayToCommaDelimitedString(namePrefix) + randomAlphaOfLengthBetween(1, 4));
}
return filtered;
}
private List<String> randomNonEmptyListOfFilteredActions() {
final List<String> filtered = new ArrayList<>(4);
final String[] actionPatterns = {
"internal:transport/proxy/indices:*",
"indices:data/read/*",
"internal:transport/proxy/indices:data/read/*",
"indices:data/write/index*",
"indices:data/write/bulk*",
"indices:data/write/index",
"indices:data/write/index[*",
"indices:data/write/index:op_type/create",
"indices:data/write/update*",
"indices:data/write/delete*",
"indices:data/write/*",
"indices:monitor/*",
"indices:admin/*",
"indices:admin/ilm/*",
"indices:admin/refresh*",
"indices:admin/flush*",
"indices:admin/synced_flush",
"indices:admin/forcemerge*",
"cluster:admin/xpack/security/*",
"cluster:admin/xpack/security/saml/*",
"cluster:admin/xpack/security/oidc/*",
"cluster:admin/xpack/security/token/*",
"cluster:admin/xpack/security/api_key/*",
"cluster:monitor/*",
"cluster:monitor/xpack/ml/*",
"cluster:monitor/text_structure/*",
"cluster:monitor/data_frame/*",
"cluster:monitor/xpack/watcher/*",
"cluster:monitor/xpack/rollup/*",
"cluster:*",
"indices:admin/index_template/*",
"indices:admin/data_stream/*",
"cluster:admin/xpack/ml/*",
"cluster:admin/data_frame/*",
"cluster:monitor/data_frame/*",
"cluster:monitor/transform/*",
"cluster:admin/transform/*",
"cluster:admin/xpack/watcher/*",
"cluster:monitor/nodes/liveness",
"cluster:monitor/state",
"indices:admin/template/*",
"cluster:admin/component_template/*",
"cluster:admin/ingest/pipeline/*",
"cluster:admin/xpack/rollup/*",
"cluster:admin/xpack/ccr/*",
"cluster:admin/ilm/*",
"cluster:admin/slm/*",
"cluster:admin/xpack/enrich/*" };
Random random = random();
for (int i = 0; i < randomIntBetween(1, 4); i++) {
Object name = actionPatterns[random.nextInt(actionPatterns.length)];
filtered.add((String) name);
}
return filtered;
}
private RestRequest getRestRequest() throws IOException {
final RestContent content = randomFrom(RestContent.values());
final FakeRestRequest.Builder builder = new Builder(NamedXContentRegistry.EMPTY);
if (content.hasContent()) {
builder.withContent(content.content(), XContentType.JSON);
}
builder.withPath("_uri");
final byte address[] = InetAddress.getByName(randomBoolean() ? "127.0.0.1" : "::1").getAddress();
builder.withRemoteAddress(new InetSocketAddress(InetAddress.getByAddress("_hostname", address), 9200));
builder.withParams(Collections.emptyMap());
return builder.build();
}
private HttpRequest getHttpRequest() throws IOException {
return getRestRequest().getHttpRequest();
}
private static | LoggingAuditTrailFilterTests |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/ClassLoaderCheck.java | {
"start": 847,
"end": 896
} | class ____ {
/**
* Verifies the | ClassLoaderCheck |
java | google__dagger | dagger-compiler/main/java/dagger/internal/codegen/writing/OptionalFactories.java | {
"start": 11929,
"end": 12073
} | class ____
* {@code Provider<Optional<Provider<T>>>}.
* <li>If {@code optionalRequestKind} is {@link RequestKind#LAZY}, the | implements |
java | resilience4j__resilience4j | resilience4j-spring-boot2/src/main/java/io/github/resilience4j/timelimiter/autoconfigure/TimeLimiterConfigurationOnMissingBean.java | {
"start": 1073,
"end": 1814
} | class ____ extends AbstractTimeLimiterConfigurationOnMissingBean {
/**
* The EventConsumerRegistry is used to manage EventConsumer instances.
* The EventConsumerRegistry is used by the TimeLimiter events monitor to show the latest TimeLimiterEvent events
* for each TimeLimiter instance.
*
* @return a default EventConsumerRegistry {@link DefaultEventConsumerRegistry}
*/
@Bean
@ConditionalOnMissingBean(value = TimeLimiterEvent.class, parameterizedContainer = EventConsumerRegistry.class)
public EventConsumerRegistry<TimeLimiterEvent> timeLimiterEventsConsumerRegistry() {
return timeLimiterConfiguration.timeLimiterEventsConsumerRegistry();
}
}
| TimeLimiterConfigurationOnMissingBean |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/aot/BeanFactoryInitializationAotProcessor.java | {
"start": 1207,
"end": 1375
} | interface ____ a registered bean will cause the bean <em>and</em>
* all of its dependencies to be initialized during AOT processing. We generally
* recommend that this | on |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/builders/WebSecurityTests.java | {
"start": 6479,
"end": 6750
} | class ____ {
@Bean
WebSecurityCustomizer webSecurityCustomizer() {
return (web) -> web
.requestRejectedHandler(new HttpStatusRequestRejectedHandler(HttpStatus.BAD_REQUEST.value()));
}
}
@Configuration
@EnableWebSecurity
static | RequestRejectedHandlerConfig |
java | apache__maven | impl/maven-core/src/main/java/org/apache/maven/project/CycleDetectedException.java | {
"start": 874,
"end": 1285
} | class ____ extends Exception {
private final List<String> cycle;
public CycleDetectedException(String message, List<String> cycle) {
super(message);
this.cycle = cycle;
}
public List<String> getCycle() {
return cycle;
}
@Override
public String getMessage() {
return super.getMessage() + " " + String.join(" --> ", cycle);
}
}
| CycleDetectedException |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java | {
"start": 82566,
"end": 83836
} | class ____ {
/** A byte field used to store the state flags. */
private byte flag;
private static final byte FAIL_OVER_BIT = 1;
private static final byte SHOULD_USE_OBSERVER_BIT = 2;
private static final byte COMPLETE_BIT = 4;
public ExecutionStatus() {
this(false, false);
}
public ExecutionStatus(boolean failOver, boolean shouldUseObserver) {
this.flag = 0;
setFailOver(failOver);
setShouldUseObserver(shouldUseObserver);
setComplete(false);
}
public void setFailOver(boolean failOver) {
flag = (byte) (failOver ? (flag | FAIL_OVER_BIT) : (flag & ~FAIL_OVER_BIT));
}
public void setShouldUseObserver(boolean shouldUseObserver) {
flag = (byte) (shouldUseObserver ?
(flag | SHOULD_USE_OBSERVER_BIT) : (flag & ~SHOULD_USE_OBSERVER_BIT));
}
public void setComplete(boolean complete) {
flag = (byte) (complete ? (flag | COMPLETE_BIT) : (flag & ~COMPLETE_BIT));
}
public boolean isFailOver() {
return (flag & FAIL_OVER_BIT) != 0;
}
public boolean isShouldUseObserver() {
return (flag & SHOULD_USE_OBSERVER_BIT) != 0;
}
public boolean isComplete() {
return (flag & COMPLETE_BIT) != 0;
}
}
}
| ExecutionStatus |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/integers/Integers_assertIsPositive_Test.java | {
"start": 1112,
"end": 2331
} | class ____ extends IntegersBaseTest {
@Test
void should_succeed_since_actual_is_positive() {
integers.assertIsPositive(someInfo(), 6);
}
@Test
void should_fail_since_actual_is_not_positive() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> integers.assertIsPositive(someInfo(), -6))
.withMessage("%nExpecting actual:%n -6%nto be greater than:%n 0%n".formatted());
}
@Test
void should_succeed_since_actual_is_positive_according_to_custom_comparison_strategy() {
integersWithAbsValueComparisonStrategy.assertIsPositive(someInfo(), -1);
}
@Test
void should_fail_since_actual_is_not_positive_according_to_custom_comparison_strategy() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> integersWithAbsValueComparisonStrategy.assertIsPositive(someInfo(),
0))
.withMessage("%nExpecting actual:%n 0%nto be greater than:%n 0%nwhen comparing values using AbsValueComparator".formatted());
}
}
| Integers_assertIsPositive_Test |
java | spring-projects__spring-framework | spring-webflux/src/test/java/org/springframework/web/reactive/result/method/annotation/ModelAttributeMethodArgumentResolverTests.java | {
"start": 16938,
"end": 17338
} | class ____ {
private final String name;
private final int age;
private int count;
public DataClass(String name, int age) {
this.name = name;
this.age = age;
}
public String getName() {
return name;
}
public int getAge() {
return this.age;
}
public int getCount() {
return count;
}
public void setCount(int count) {
this.count = count;
}
}
}
| DataClass |
java | google__guava | android/guava-tests/test/com/google/common/base/FinalizableReferenceQueueClassLoaderUnloadingTest.java | {
"start": 2015,
"end": 2424
} | class ____ its ClassLoader cannot be
* garbage-collected, even if there are no more instances of FinalizableReferenceQueue itself.
* The code in FinalizableReferenceQueue goes to considerable trouble to ensure that there are
* no such references and the tests here check that that trouble has not been in vain.
*
* When we reference FinalizableReferenceQueue in this test, we are referencing a | then |
java | elastic__elasticsearch | x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/lucene86/Lucene86MetadataOnlyPointsReader.java | {
"start": 1776,
"end": 4797
} | class ____ extends PointsReader {
final SegmentReadState readState;
final Map<Integer, PointValues> readers = new HashMap<>();
public Lucene86MetadataOnlyPointsReader(SegmentReadState readState) throws IOException {
this.readState = readState;
String metaFileName = IndexFileNames.segmentFileName(
readState.segmentInfo.name,
readState.segmentSuffix,
Lucene86MetadataOnlyPointsFormat.META_EXTENSION
);
boolean success = false;
try {
try (
ChecksumIndexInput metaIn = EndiannessReverserUtil.openChecksumInput(readState.directory, metaFileName, readState.context)
) {
Throwable priorE = null;
try {
CodecUtil.checkIndexHeader(
metaIn,
Lucene86MetadataOnlyPointsFormat.META_CODEC_NAME,
Lucene86MetadataOnlyPointsFormat.VERSION_START,
Lucene86MetadataOnlyPointsFormat.VERSION_CURRENT,
readState.segmentInfo.getId(),
readState.segmentSuffix
);
while (true) {
int fieldNumber = metaIn.readInt();
if (fieldNumber == -1) {
break;
} else if (fieldNumber < 0) {
throw new CorruptIndexException("Illegal field number: " + fieldNumber, metaIn);
}
PointValues reader = new MetadataOnlyBKDReader(metaIn, true);
readers.put(fieldNumber, reader);
}
metaIn.readLong();
metaIn.readLong();
} catch (Throwable t) {
priorE = t;
} finally {
CodecUtil.checkFooter(metaIn, priorE);
}
}
success = true;
} finally {
if (success == false) {
IOUtils.closeWhileHandlingException(this);
}
}
}
@Override
public PointValues getValues(String fieldName) {
FieldInfo fieldInfo = readState.fieldInfos.fieldInfo(fieldName);
if (fieldInfo == null) {
throw new IllegalArgumentException("field=\"" + fieldName + "\" is unrecognized");
}
if (fieldInfo.getPointDimensionCount() == 0) {
throw new IllegalArgumentException("field=\"" + fieldName + "\" did not index point values");
}
return readers.get(fieldInfo.number);
}
// We only open the metadata field, and do nothing with the other two files (index/data),
// for which Lucene checks integrity but we don't need to.
@Override
public void checkIntegrity() {}
@Override
public void close() throws IOException {
// Free up heap:
readers.clear();
}
}
| Lucene86MetadataOnlyPointsReader |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/throwable/ThrowableAssert_hasRootCauseInstanceOf_Test.java | {
"start": 839,
"end": 1242
} | class ____ extends ThrowableAssertBaseTest {
@Override
protected ThrowableAssert<Throwable> invoke_api_method() {
return assertions.hasRootCauseInstanceOf(Exception.class);
}
@Override
protected void verify_internal_effects() {
verify(throwables).assertHasRootCauseInstanceOf(getInfo(assertions), getActual(assertions), Exception.class);
}
}
| ThrowableAssert_hasRootCauseInstanceOf_Test |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/deser/asm/TestASM_double.java | {
"start": 447,
"end": 658
} | class ____ {
private double value = 32.5F;
public double getValue() {
return value;
}
public void setValue(double i) {
this.value = i;
}
}
}
| V0 |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/metrics/Sum.java | {
"start": 1090,
"end": 3547
} | class ____ extends InternalNumericMetricsAggregation.SingleValue {
private final double sum;
public Sum(String name, double sum, DocValueFormat formatter, Map<String, Object> metadata) {
super(name, formatter, metadata);
this.sum = sum;
}
/**
* Read from a stream.
*/
public Sum(StreamInput in) throws IOException {
super(in);
sum = in.readDouble();
}
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
out.writeNamedWriteable(format);
out.writeDouble(sum);
}
@Override
public String getWriteableName() {
return SumAggregationBuilder.NAME;
}
public static Sum empty(String name, DocValueFormat format, Map<String, Object> metadata) {
return new Sum(name, 0.0, format, metadata);
}
@Override
public double value() {
return sum;
}
@Override
protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceContext, int size) {
return new AggregatorReducer() {
final CompensatedSum kahanSummation = new CompensatedSum(0, 0);
@Override
public void accept(InternalAggregation aggregation) {
kahanSummation.add(((Sum) aggregation).sum);
}
@Override
public InternalAggregation get() {
return new Sum(name, kahanSummation.value(), format, getMetadata());
}
};
}
@Override
public InternalAggregation finalizeSampling(SamplingContext samplingContext) {
return new Sum(name, samplingContext.scaleUp(sum), format, getMetadata());
}
@Override
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
builder.field(CommonFields.VALUE.getPreferredName(), sum);
if (format != DocValueFormat.RAW) {
builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(sum).toString());
}
return builder;
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), sum);
}
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null || getClass() != obj.getClass()) return false;
if (super.equals(obj) == false) return false;
Sum that = (Sum) obj;
return Objects.equals(sum, that.sum);
}
}
| Sum |
java | apache__flink | flink-core/src/test/java/org/apache/flink/core/plugin/TestingPluginManager.java | {
"start": 1007,
"end": 1426
} | class ____ implements PluginManager {
private final Map<Class<?>, Iterator<?>> plugins;
public TestingPluginManager(Map<Class<?>, Iterator<?>> plugins) {
this.plugins = plugins;
}
@SuppressWarnings("unchecked")
@Override
public <P> Iterator<P> load(Class<P> service) {
return (Iterator<P>) plugins.getOrDefault(service, IteratorUtils.emptyIterator());
}
}
| TestingPluginManager |
java | spring-projects__spring-boot | module/spring-boot-devtools/src/main/java/org/springframework/boot/devtools/classpath/ClassPathRestartStrategy.java | {
"start": 943,
"end": 1077
} | class ____ would.
*
* @author Phillip Webb
* @since 1.3.0
* @see PatternClassPathRestartStrategy
*/
@FunctionalInterface
public | files |
java | alibaba__nacos | naming/src/main/java/com/alibaba/nacos/naming/core/v2/client/AbstractClient.java | {
"start": 1612,
"end": 8384
} | class ____ implements Client {
protected final ConcurrentHashMap<Service, InstancePublishInfo> publishers = new ConcurrentHashMap<>(16, 0.75f, 1);
protected final ConcurrentHashMap<Service, Subscriber> subscribers = new ConcurrentHashMap<>(16, 0.75f, 1);
protected volatile long lastUpdatedTime;
protected final AtomicLong revision;
protected ClientAttributes attributes;
public AbstractClient(Long revision) {
lastUpdatedTime = System.currentTimeMillis();
this.revision = new AtomicLong(revision == null ? 0 : revision);
}
@Override
public void setLastUpdatedTime() {
this.lastUpdatedTime = System.currentTimeMillis();
}
@Override
public long getLastUpdatedTime() {
return lastUpdatedTime;
}
@Override
public boolean addServiceInstance(Service service, InstancePublishInfo instancePublishInfo) {
if (instancePublishInfo instanceof BatchInstancePublishInfo) {
InstancePublishInfo old = publishers.put(service, instancePublishInfo);
MetricsMonitor.incrementIpCountWithBatchRegister(old, (BatchInstancePublishInfo) instancePublishInfo);
} else {
if (null == publishers.put(service, instancePublishInfo)) {
MetricsMonitor.incrementInstanceCount();
}
}
NotifyCenter.publishEvent(new ClientEvent.ClientChangedEvent(this));
Loggers.SRV_LOG.info("Client change for service {}, {}", service, getClientId());
return true;
}
@Override
public InstancePublishInfo removeServiceInstance(Service service) {
InstancePublishInfo result = publishers.remove(service);
if (null != result) {
if (result instanceof BatchInstancePublishInfo) {
MetricsMonitor.decrementIpCountWithBatchRegister(result);
} else {
MetricsMonitor.decrementInstanceCount();
}
NotifyCenter.publishEvent(new ClientEvent.ClientChangedEvent(this));
}
Loggers.SRV_LOG.info("Client remove for service {}, {}", service, getClientId());
return result;
}
@Override
public InstancePublishInfo getInstancePublishInfo(Service service) {
return publishers.get(service);
}
@Override
public Collection<Service> getAllPublishedService() {
return publishers.keySet();
}
@Override
public boolean addServiceSubscriber(Service service, Subscriber subscriber) {
if (null == subscribers.put(service, subscriber)) {
MetricsMonitor.incrementSubscribeCount();
}
return true;
}
@Override
public boolean removeServiceSubscriber(Service service) {
if (null != subscribers.remove(service)) {
MetricsMonitor.decrementSubscribeCount();
}
return true;
}
@Override
public Subscriber getSubscriber(Service service) {
return subscribers.get(service);
}
@Override
public Collection<Service> getAllSubscribeService() {
return subscribers.keySet();
}
@Override
public ClientSyncData generateSyncData() {
List<String> namespaces = new LinkedList<>();
List<String> groupNames = new LinkedList<>();
List<String> serviceNames = new LinkedList<>();
List<String> batchNamespaces = new LinkedList<>();
List<String> batchGroupNames = new LinkedList<>();
List<String> batchServiceNames = new LinkedList<>();
List<InstancePublishInfo> instances = new LinkedList<>();
List<BatchInstancePublishInfo> batchInstancePublishInfos = new LinkedList<>();
BatchInstanceData batchInstanceData = new BatchInstanceData();
for (Map.Entry<Service, InstancePublishInfo> entry : publishers.entrySet()) {
InstancePublishInfo instancePublishInfo = entry.getValue();
if (instancePublishInfo instanceof BatchInstancePublishInfo) {
BatchInstancePublishInfo batchInstance = (BatchInstancePublishInfo) instancePublishInfo;
batchInstancePublishInfos.add(batchInstance);
buildBatchInstanceData(batchInstanceData, batchNamespaces, batchGroupNames, batchServiceNames, entry);
batchInstanceData.setBatchInstancePublishInfos(batchInstancePublishInfos);
} else {
namespaces.add(entry.getKey().getNamespace());
groupNames.add(entry.getKey().getGroup());
serviceNames.add(entry.getKey().getName());
instances.add(entry.getValue());
}
}
ClientSyncData data = new ClientSyncData(getClientId(), namespaces, groupNames, serviceNames, instances, batchInstanceData);
data.getAttributes().addClientAttribute(REVISION, getRevision());
return data;
}
private static BatchInstanceData buildBatchInstanceData(BatchInstanceData batchInstanceData, List<String> batchNamespaces,
List<String> batchGroupNames, List<String> batchServiceNames, Map.Entry<Service, InstancePublishInfo> entry) {
batchNamespaces.add(entry.getKey().getNamespace());
batchGroupNames.add(entry.getKey().getGroup());
batchServiceNames.add(entry.getKey().getName());
batchInstanceData.setNamespaces(batchNamespaces);
batchInstanceData.setGroupNames(batchGroupNames);
batchInstanceData.setServiceNames(batchServiceNames);
return batchInstanceData;
}
@Override
public void release() {
Collection<InstancePublishInfo> instancePublishInfos = publishers.values();
for (InstancePublishInfo instancePublishInfo : instancePublishInfos) {
if (instancePublishInfo instanceof BatchInstancePublishInfo) {
MetricsMonitor.decrementIpCountWithBatchRegister(instancePublishInfo);
} else {
MetricsMonitor.getIpCountMonitor().decrementAndGet();
}
}
MetricsMonitor.getSubscriberCount().addAndGet(-1 * subscribers.size());
}
@Override
public long recalculateRevision() {
int hash = DistroUtils.hash(this);
revision.set(hash);
return hash;
}
@Override
public long getRevision() {
return revision.get();
}
@Override
public void setRevision(long revision) {
this.revision.set(revision);
}
/**
* get client attributes.
*/
public ClientAttributes getClientAttributes() {
return attributes;
}
public void setAttributes(ClientAttributes attributes) {
this.attributes = attributes;
}
}
| AbstractClient |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/serde/LogicalTypeJsonDeserializer.java | {
"start": 6835,
"end": 22081
} | class ____ extends StdDeserializer<LogicalType> {
private static final long serialVersionUID = 1L;
LogicalTypeJsonDeserializer() {
super(LogicalType.class);
}
@Override
public LogicalType deserialize(JsonParser jsonParser, DeserializationContext ctx)
throws IOException {
final JsonNode logicalTypeNode = jsonParser.readValueAsTree();
final SerdeContext serdeContext = SerdeContext.get(ctx);
return deserialize(logicalTypeNode, serdeContext);
}
static LogicalType deserialize(JsonNode logicalTypeNode, SerdeContext serdeContext) {
if (logicalTypeNode.isTextual()) {
return deserializeWithCompactSerialization(logicalTypeNode.asText(), serdeContext);
} else {
return deserializeWithExtendedSerialization(logicalTypeNode, serdeContext);
}
}
private static LogicalType deserializeWithCompactSerialization(
String serializableString, SerdeContext serdeContext) {
final DataTypeFactory dataTypeFactory =
serdeContext.getFlinkContext().getCatalogManager().getDataTypeFactory();
return dataTypeFactory.createLogicalType(serializableString);
}
private static LogicalType deserializeWithExtendedSerialization(
JsonNode logicalTypeNode, SerdeContext serdeContext) {
final LogicalType logicalType = deserializeFromRoot(logicalTypeNode, serdeContext);
if (logicalTypeNode.has(FIELD_NAME_NULLABLE)) {
final boolean isNullable = logicalTypeNode.get(FIELD_NAME_NULLABLE).asBoolean();
return logicalType.copy(isNullable);
}
return logicalType.copy(true);
}
private static LogicalType deserializeFromRoot(
JsonNode logicalTypeNode, SerdeContext serdeContext) {
final LogicalTypeRoot typeRoot =
LogicalTypeRoot.valueOf(logicalTypeNode.get(FIELD_NAME_TYPE_NAME).asText());
switch (typeRoot) {
case CHAR:
case VARCHAR:
case BINARY:
case VARBINARY:
return deserializeZeroLengthString(typeRoot, logicalTypeNode);
case TIMESTAMP_WITHOUT_TIME_ZONE:
case TIMESTAMP_WITH_TIME_ZONE:
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
return deserializeTimestamp(typeRoot, logicalTypeNode);
case ARRAY:
case MULTISET:
return deserializeCollection(typeRoot, logicalTypeNode, serdeContext);
case MAP:
return deserializeMap(logicalTypeNode, serdeContext);
case ROW:
return deserializeRow(logicalTypeNode, serdeContext);
case DISTINCT_TYPE:
return deserializeDistinctType(logicalTypeNode, serdeContext);
case STRUCTURED_TYPE:
return deserializeStructuredType(logicalTypeNode, serdeContext);
case SYMBOL:
return new SymbolType<>();
case RAW:
return deserializeSpecializedRaw(logicalTypeNode, serdeContext);
default:
throw new TableException("Unsupported type root: " + typeRoot);
}
}
private static LogicalType deserializeZeroLengthString(
LogicalTypeRoot typeRoot, JsonNode logicalTypeNode) {
final int length = logicalTypeNode.get(FIELD_NAME_LENGTH).asInt();
if (length != 0) {
throw new TableException("String length should be 0.");
}
switch (typeRoot) {
case CHAR:
return CharType.ofEmptyLiteral();
case VARCHAR:
return VarCharType.ofEmptyLiteral();
case BINARY:
return BinaryType.ofEmptyLiteral();
case VARBINARY:
return VarBinaryType.ofEmptyLiteral();
default:
throw new TableException("String type root expected.");
}
}
private static LogicalType deserializeTimestamp(
LogicalTypeRoot typeRoot, JsonNode logicalTypeNode) {
final int precision = logicalTypeNode.get(FIELD_NAME_PRECISION).asInt();
final TimestampKind kind =
TimestampKind.valueOf(logicalTypeNode.get(FIELD_NAME_TIMESTAMP_KIND).asText());
switch (typeRoot) {
case TIMESTAMP_WITHOUT_TIME_ZONE:
return new TimestampType(true, kind, precision);
case TIMESTAMP_WITH_TIME_ZONE:
return new ZonedTimestampType(true, kind, precision);
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
return new LocalZonedTimestampType(true, kind, precision);
default:
throw new TableException("Timestamp type root expected.");
}
}
private static LogicalType deserializeCollection(
LogicalTypeRoot typeRoot, JsonNode logicalTypeNode, SerdeContext serdeContext) {
final JsonNode elementNode = logicalTypeNode.get(FIELD_NAME_ELEMENT_TYPE);
final LogicalType elementType = deserialize(elementNode, serdeContext);
switch (typeRoot) {
case ARRAY:
return new ArrayType(elementType);
case MULTISET:
return new MultisetType(elementType);
default:
throw new TableException("Collection type root expected.");
}
}
private static LogicalType deserializeMap(JsonNode logicalTypeNode, SerdeContext serdeContext) {
final JsonNode keyNode = logicalTypeNode.get(FIELD_NAME_KEY_TYPE);
final LogicalType keyType = deserialize(keyNode, serdeContext);
final JsonNode valueNode = logicalTypeNode.get(FIELD_NAME_VALUE_TYPE);
final LogicalType valueType = deserialize(valueNode, serdeContext);
return new MapType(keyType, valueType);
}
private static LogicalType deserializeRow(JsonNode logicalTypeNode, SerdeContext serdeContext) {
final ArrayNode fieldNodes = (ArrayNode) logicalTypeNode.get(FIELD_NAME_FIELDS);
final List<RowField> fields = new ArrayList<>();
for (JsonNode fieldNode : fieldNodes) {
final String fieldName = fieldNode.get(FIELD_NAME_FIELD_NAME).asText();
final LogicalType fieldType =
deserialize(fieldNode.get(FIELD_NAME_FIELD_TYPE), serdeContext);
final String fieldDescription;
if (fieldNode.has(FIELD_NAME_FIELD_DESCRIPTION)) {
fieldDescription = fieldNode.get(FIELD_NAME_FIELD_DESCRIPTION).asText();
} else {
fieldDescription = null;
}
fields.add(new RowField(fieldName, fieldType, fieldDescription));
}
return new RowType(fields);
}
private static LogicalType deserializeDistinctType(
JsonNode logicalTypeNode, SerdeContext serdeContext) {
final ObjectIdentifier identifier =
ObjectIdentifierJsonDeserializer.deserialize(
logicalTypeNode.get(FIELD_NAME_OBJECT_IDENTIFIER).asText(), serdeContext);
final CatalogPlanRestore restoreStrategy =
serdeContext
.getConfiguration()
.get(TableConfigOptions.PLAN_RESTORE_CATALOG_OBJECTS);
switch (restoreStrategy) {
case ALL:
if (logicalTypeNode.has(FIELD_NAME_SOURCE_TYPE)) {
return deserializeDistinctTypeFromPlan(
identifier, logicalTypeNode, serdeContext);
}
return deserializeUserDefinedTypeFromCatalog(identifier, serdeContext);
case ALL_ENFORCED:
return deserializeDistinctTypeFromPlan(identifier, logicalTypeNode, serdeContext);
case IDENTIFIER:
return deserializeUserDefinedTypeFromCatalog(identifier, serdeContext);
default:
throw new TableException("Unsupported catalog restore strategy.");
}
}
private static LogicalType deserializeDistinctTypeFromPlan(
ObjectIdentifier identifier, JsonNode logicalTypeNode, SerdeContext serdeContext) {
final LogicalType sourceType =
deserialize(logicalTypeNode.get(FIELD_NAME_SOURCE_TYPE), serdeContext);
final DistinctType.Builder builder = DistinctType.newBuilder(identifier, sourceType);
if (logicalTypeNode.has(FIELD_NAME_FIELD_DESCRIPTION)) {
builder.description(logicalTypeNode.get(FIELD_NAME_FIELD_DESCRIPTION).asText());
}
return builder.build();
}
private static LogicalType deserializeStructuredType(
JsonNode logicalTypeNode, SerdeContext serdeContext) {
// inline structured types have no object identifier
if (!logicalTypeNode.has(FIELD_NAME_OBJECT_IDENTIFIER)) {
return deserializeStructuredTypeFromPlan(logicalTypeNode, serdeContext);
}
// for catalog structured types
final ObjectIdentifier identifier =
ObjectIdentifierJsonDeserializer.deserialize(
logicalTypeNode.get(FIELD_NAME_OBJECT_IDENTIFIER).asText(), serdeContext);
final CatalogPlanRestore restoreStrategy =
serdeContext
.getConfiguration()
.get(TableConfigOptions.PLAN_RESTORE_CATALOG_OBJECTS);
switch (restoreStrategy) {
case ALL:
if (logicalTypeNode.has(FIELD_NAME_ATTRIBUTES)) {
return deserializeStructuredTypeFromPlan(logicalTypeNode, serdeContext);
}
return deserializeUserDefinedTypeFromCatalog(identifier, serdeContext);
case ALL_ENFORCED:
return deserializeStructuredTypeFromPlan(logicalTypeNode, serdeContext);
case IDENTIFIER:
return deserializeUserDefinedTypeFromCatalog(identifier, serdeContext);
default:
throw new TableException("Unsupported catalog restore strategy.");
}
}
private static LogicalType deserializeUserDefinedTypeFromCatalog(
ObjectIdentifier identifier, SerdeContext serdeContext) {
final DataTypeFactory dataTypeFactory =
serdeContext.getFlinkContext().getCatalogManager().getDataTypeFactory();
return dataTypeFactory.createLogicalType(UnresolvedIdentifier.of(identifier));
}
private static LogicalType deserializeStructuredTypeFromPlan(
JsonNode logicalTypeNode, SerdeContext serdeContext) {
final ObjectIdentifier identifier;
if (logicalTypeNode.has(FIELD_NAME_OBJECT_IDENTIFIER)) {
identifier =
ObjectIdentifierJsonDeserializer.deserialize(
logicalTypeNode.get(FIELD_NAME_OBJECT_IDENTIFIER).asText(),
serdeContext);
} else {
identifier = null;
}
final String className;
final Class<?> implementationClass;
if (logicalTypeNode.has(FIELD_NAME_IMPLEMENTATION_CLASS)) {
className = logicalTypeNode.get(FIELD_NAME_IMPLEMENTATION_CLASS).asText();
implementationClass =
StructuredType.resolveClass(serdeContext.getClassLoader(), className)
.orElse(null);
} else {
className = null;
implementationClass = null;
}
final StructuredType.Builder builder;
if (identifier != null && implementationClass != null) {
builder = StructuredType.newBuilder(identifier, implementationClass);
} else if (identifier != null) {
builder = StructuredType.newBuilder(identifier);
} else if (implementationClass != null) {
builder = StructuredType.newBuilder(implementationClass);
} else {
builder = StructuredType.newBuilder(className);
}
if (logicalTypeNode.has(FIELD_NAME_DESCRIPTION)) {
builder.description(logicalTypeNode.get(FIELD_NAME_FIELD_DESCRIPTION).asText());
}
final ArrayNode attributeNodes = (ArrayNode) logicalTypeNode.get(FIELD_NAME_ATTRIBUTES);
final List<StructuredAttribute> attributes = new ArrayList<>();
for (JsonNode attributeNode : attributeNodes) {
final String attributeName = attributeNode.get(FIELD_NAME_ATTRIBUTE_NAME).asText();
final LogicalType attributeType =
deserialize(attributeNode.get(FIELD_NAME_ATTRIBUTE_TYPE), serdeContext);
final String attributeDescription;
if (attributeNode.has(FIELD_NAME_ATTRIBUTE_DESCRIPTION)) {
attributeDescription = attributeNode.get(FIELD_NAME_ATTRIBUTE_DESCRIPTION).asText();
} else {
attributeDescription = null;
}
attributes.add(
new StructuredAttribute(attributeName, attributeType, attributeDescription));
}
builder.attributes(attributes);
if (logicalTypeNode.has(FIELD_NAME_FINAL)) {
builder.setFinal(logicalTypeNode.get(FIELD_NAME_FINAL).asBoolean());
}
if (logicalTypeNode.has(FIELD_NAME_INSTANTIABLE)) {
builder.setInstantiable(logicalTypeNode.get(FIELD_NAME_INSTANTIABLE).asBoolean());
}
if (logicalTypeNode.has(FIELD_NAME_COMPARISON)) {
builder.comparison(
StructuredComparison.valueOf(
logicalTypeNode.get(FIELD_NAME_COMPARISON).asText()));
}
if (logicalTypeNode.has(FIELD_NAME_SUPER_TYPE)) {
final StructuredType superType =
(StructuredType)
deserialize(logicalTypeNode.get(FIELD_NAME_SUPER_TYPE), serdeContext);
builder.superType(superType);
}
return builder.build();
}
@SuppressWarnings({"unchecked", "rawtypes"})
private static LogicalType deserializeSpecializedRaw(
JsonNode logicalTypeNode, SerdeContext serdeContext) {
final Class<?> clazz =
loadClass(logicalTypeNode.get(FIELD_NAME_CLASS).asText(), serdeContext, "RAW type");
final TypeSerializer<?> serializer;
if (logicalTypeNode.has(FIELD_NAME_SPECIAL_SERIALIZER)) {
final String specialSerializer =
logicalTypeNode.get(FIELD_NAME_SPECIAL_SERIALIZER).asText();
if (FIELD_VALUE_EXTERNAL_SERIALIZER_NULL.equals(specialSerializer)) {
serializer = NullSerializer.INSTANCE;
} else {
throw new TableException("Unknown external serializer: " + specialSerializer);
}
} else if (logicalTypeNode.has(FIELD_NAME_EXTERNAL_DATA_TYPE)) {
final DataType dataType =
DataTypeJsonDeserializer.deserialize(
logicalTypeNode.get(FIELD_NAME_EXTERNAL_DATA_TYPE), serdeContext);
serializer = ExternalSerializer.of(dataType);
} else {
throw new TableException("Invalid RAW type.");
}
return new RawType(clazz, serializer);
}
}
| LogicalTypeJsonDeserializer |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/custom_collection_handling/CustomObjectFactory.java | {
"start": 1147,
"end": 3783
} | class ____ implements ObjectFactory {
@Override
public <T> T create(Class<T> type) {
return create(type, null, null);
}
@Override
public <T> T create(Class<T> type, List<Class<?>> constructorArgTypes, List<Object> constructorArgs) {
Class<?> classToCreate = resolveInterface(type);
return (T) instantiateClass(classToCreate, constructorArgTypes, constructorArgs);
}
private <T> T instantiateClass(Class<T> type, List<Class<?>> constructorArgTypes, List<Object> constructorArgs) {
try {
Constructor<T> constructor;
if (constructorArgTypes == null || constructorArgs == null) {
constructor = type.getDeclaredConstructor();
if (!constructor.canAccess(null)) {
constructor.setAccessible(true);
}
return constructor.newInstance();
}
constructor = type.getDeclaredConstructor(constructorArgTypes.toArray(new Class[constructorArgTypes.size()]));
if (!constructor.canAccess(null)) {
constructor.setAccessible(true);
}
return constructor.newInstance(constructorArgs.toArray(new Object[constructorArgs.size()]));
} catch (Exception e) {
StringBuilder argTypes = new StringBuilder();
if (constructorArgTypes != null) {
for (Class<?> argType : constructorArgTypes) {
argTypes.append(argType.getSimpleName());
argTypes.append(",");
}
}
StringBuilder argValues = new StringBuilder();
if (constructorArgs != null) {
for (Object argValue : constructorArgs) {
argValues.append(String.valueOf(argValue));
argValues.append(",");
}
}
throw new ReflectionException("Error instantiating " + type + " with invalid types (" + argTypes + ") or values ("
+ argValues + "). Cause: " + e, e);
}
}
private Class<?> resolveInterface(Class<?> type) {
Class<?> classToCreate;
if (type == List.class || type == Collection.class) {
classToCreate = LinkedList.class;
} else if (type == Map.class) {
classToCreate = LinkedHashMap.class;
} else if (type == SortedSet.class) { // issue #510 Collections Support
classToCreate = TreeSet.class;
} else if (type == Set.class) {
classToCreate = HashSet.class;
} else {
classToCreate = type;
}
return classToCreate;
}
@Override
public <T> boolean isCollection(Class<T> type) {
return CustomCollection.class.isAssignableFrom(type);
}
@SuppressWarnings("unchecked")
public <T> T[] createArray(Class<T> type, int size) {
return (T[]) Array.newInstance(type, size);
}
}
| CustomObjectFactory |
java | apache__hadoop | hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Pre21JobHistoryConstants.java | {
"start": 1248,
"end": 1736
} | enum ____ {
JOBTRACKERID,
START_TIME, FINISH_TIME, JOBID, JOBNAME, USER, JOBCONF, SUBMIT_TIME,
LAUNCH_TIME, TOTAL_MAPS, TOTAL_REDUCES, FAILED_MAPS, FAILED_REDUCES,
FINISHED_MAPS, FINISHED_REDUCES, JOB_STATUS, TASKID, HOSTNAME, TASK_TYPE,
ERROR, TASK_ATTEMPT_ID, TASK_STATUS, COPY_PHASE, SORT_PHASE, REDUCE_PHASE,
SHUFFLE_FINISHED, SORT_FINISHED, MAP_FINISHED, COUNTERS, SPLITS,
JOB_PRIORITY, HTTP_PORT, TRACKER_NAME, STATE_STRING, VERSION
}
/**
* This | Keys |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/SpringBootConfiguration.java | {
"start": 1101,
"end": 1724
} | class ____ Spring Boot application
* {@link Configuration @Configuration}. Can be used as an alternative to the Spring's
* standard {@code @Configuration} annotation so that configuration can be found
* automatically (for example in tests).
* <p>
* Application should only ever include <em>one</em> {@code @SpringBootConfiguration} and
* most idiomatic Spring Boot applications will inherit it from
* {@code @SpringBootApplication}.
*
* @author Phillip Webb
* @author Andy Wilkinson
* @since 1.4.0
*/
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
@Documented
@Configuration
@Indexed
public @ | provides |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceProfilesResponse.java | {
"start": 1248,
"end": 1974
} | class ____ {
public static GetAllResourceProfilesResponse newInstance() {
return Records.newRecord(GetAllResourceProfilesResponse.class);
}
public abstract void setResourceProfiles(Map<String, Resource> profiles);
public abstract Map<String, Resource> getResourceProfiles();
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (!(other instanceof GetAllResourceProfilesResponse)) {
return false;
}
return ((GetAllResourceProfilesResponse) other).getResourceProfiles()
.equals(this.getResourceProfiles());
}
@Override
public int hashCode() {
return this.getResourceProfiles().hashCode();
}
}
| GetAllResourceProfilesResponse |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/context/ServletContextAware.java | {
"start": 1058,
"end": 1720
} | interface ____ extends Aware {
/**
* Set the {@link ServletContext} that this object runs in.
* <p>Invoked after population of normal bean properties but before an init
* callback like InitializingBean's {@code afterPropertiesSet} or a
* custom init-method. Invoked after ApplicationContextAware's
* {@code setApplicationContext}.
* @param servletContext the ServletContext object to be used by this object
* @see org.springframework.beans.factory.InitializingBean#afterPropertiesSet
* @see org.springframework.context.ApplicationContextAware#setApplicationContext
*/
void setServletContext(ServletContext servletContext);
}
| ServletContextAware |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/builditem/nativeimage/LambdaCapturingTypeBuildItem.java | {
"start": 182,
"end": 587
} | class ____ extends MultiBuildItem {
private final String className;
public LambdaCapturingTypeBuildItem(Class<?> lambdaCapturingType) {
this.className = lambdaCapturingType.getName();
}
public LambdaCapturingTypeBuildItem(String className) {
this.className = className;
}
public String getClassName() {
return className;
}
}
| LambdaCapturingTypeBuildItem |
java | redisson__redisson | redisson/src/main/java/org/redisson/client/handler/CommandPubSubDecoder.java | {
"start": 1698,
"end": 12956
} | class ____ extends CommandDecoder {
private static final Set<String> UNSUBSCRIBE_COMMANDS = new HashSet<>(Arrays.asList(RedisCommands.PUNSUBSCRIBE.getName(), RedisCommands.UNSUBSCRIBE.getName(), RedisCommands.SUNSUBSCRIBE.getName()));
private static final Set<String> SUBSCRIBE_COMMANDS = new HashSet<>(Arrays.asList(RedisCommands.PSUBSCRIBE.getName(), RedisCommands.SUBSCRIBE.getName(), RedisCommands.SSUBSCRIBE.getName()));
private static final Set<String> MESSAGES = new HashSet<>(Arrays.asList("subscribe", "psubscribe", "punsubscribe", "unsubscribe", "ssubscribe", "sunsubscribe"));
private static final Set<String> TYPE_MESSAGES = new HashSet<>(Arrays.asList("message", "smessage", "pmessage"));
// It is not needed to use concurrent map because responses are coming consecutive
private final Map<ChannelName, PubSubEntry> entries = new HashMap<>();
private final Map<PubSubKey, CommandData<Object, Object>> commands = new ConcurrentHashMap<>();
private final RedisClientConfig config;
public CommandPubSubDecoder(RedisClientConfig config) {
super(config.getAddress().getScheme());
this.config = config;
}
public void addPubSubCommand(ChannelName channel, CommandData<Object, Object> data) {
String operation = data.getCommand().getName().toLowerCase(Locale.ENGLISH);
commands.put(new PubSubKey(channel, operation), data);
}
@Override
protected QueueCommandHolder getCommand(ChannelHandlerContext ctx) {
return ctx.channel().attr(CommandsQueuePubSub.CURRENT_COMMAND).get();
}
@Override
protected void sendNext(Channel channel) {
CommandsQueuePubSub handler = channel.pipeline().get(CommandsQueuePubSub.class);
if (handler != null) {
handler.sendNextCommand(channel);
}
state(null);
}
@Override
protected void decodeCommand(Channel channel, ByteBuf in, QueueCommand data, int endIndex, State state) throws Exception {
try {
while (in.writerIndex() > in.readerIndex()) {
if (data != null) {
if (((CommandData<Object, Object>) data).getPromise().isDone()) {
data = null;
}
}
decode(in, (CommandData<Object, Object>) data, null, channel, false, null, 0, state);
}
sendNext(channel, data);
} catch (Exception e) {
log.error("Unable to decode data. channel: {}, reply: {}", channel, LogHelper.toString(in), e);
if (data != null) {
data.tryFailure(e);
}
sendNext(channel);
throw e;
}
}
@Override
protected void onError(Channel channel, String error) {
Set<String> cmds = new HashSet<>(RedisCommands.PUBSUB_COMMANDS);
cmds.remove(RedisCommands.SUBSCRIBE.getName());
cmds.remove(RedisCommands.UNSUBSCRIBE.getName());
String cmd = null;
String e = error.toLowerCase(Locale.ENGLISH);
for (String value : cmds) {
if (e.contains(value.toLowerCase(Locale.ENGLISH))) {
cmd = value;
break;
}
}
if (cmd == null) {
if (e.contains(RedisCommands.UNSUBSCRIBE.getName().toLowerCase(Locale.ENGLISH))) {
cmd = RedisCommands.UNSUBSCRIBE.getName();
} else if (e.contains(RedisCommands.SUBSCRIBE.getName().toLowerCase(Locale.ENGLISH))) {
cmd = RedisCommands.SUBSCRIBE.getName();
}
}
if (cmd != null) {
String c = cmd;
commands.keySet().stream()
.filter(v -> v.getOperation().equalsIgnoreCase(c))
.forEach(v -> {
CommandData<Object, Object> dd = commands.get(v);
dd.getPromise().completeExceptionally(new RedisException(error));
});
} else {
super.onError(channel, error);
}
}
@Override
protected void decodeResult(CommandData<Object, Object> data, List<Object> parts, Channel channel,
Object result) throws IOException {
try {
if (config.getExecutor().isShutdown()) {
return;
}
} catch (IllegalStateException e) {
// arise in JBOSS. skipped
}
if (result instanceof Message) {
checkpoint();
RedisPubSubConnection pubSubConnection = RedisPubSubConnection.getFrom(channel);
ChannelName channelName = ((Message) result).getChannel();
if (result instanceof PubSubStatusMessage) {
String operation = ((PubSubStatusMessage) result).getType().name().toLowerCase(Locale.ENGLISH);
PubSubKey key = new PubSubKey(channelName, operation);
CommandData<Object, Object> d = commands.get(key);
if (SUBSCRIBE_COMMANDS.contains(d.getCommand().getName())) {
commands.remove(key);
entries.put(channelName, new PubSubEntry(d.getMessageDecoder()));
}
if (UNSUBSCRIBE_COMMANDS.contains(d.getCommand().getName())) {
commands.remove(key);
if (result instanceof PubSubPatternMessage) {
channelName = ((PubSubPatternMessage) result).getPattern();
}
PubSubEntry entry = entries.remove(channelName);
if (config.isKeepPubSubOrder() && entry != null) {
enqueueMessage(result, pubSubConnection, entry);
}
}
}
if (config.isKeepPubSubOrder()) {
if (result instanceof PubSubPatternMessage) {
channelName = ((PubSubPatternMessage) result).getPattern();
}
PubSubEntry entry = entries.get(channelName);
if (entry != null) {
enqueueMessage(result, pubSubConnection, entry);
}
} else {
config.getExecutor().execute(new Runnable() {
@Override
public void run() {
if (result instanceof PubSubStatusMessage) {
pubSubConnection.onMessage((PubSubStatusMessage) result);
} else if (result instanceof PubSubMessage) {
pubSubConnection.onMessage((PubSubMessage) result);
} else if (result instanceof PubSubPatternMessage) {
pubSubConnection.onMessage((PubSubPatternMessage) result);
}
}
});
}
} else {
super.decodeResult(data, parts, channel, result);
}
}
private void enqueueMessage(Object res, RedisPubSubConnection pubSubConnection, PubSubEntry entry) {
if (res != null) {
entry.getQueue().add((Message) res);
}
if (!entry.getSent().compareAndSet(false, true)) {
return;
}
config.getExecutor().execute(() -> {
try {
while (true) {
Message result = entry.getQueue().poll();
if (result != null) {
if (result instanceof PubSubStatusMessage) {
pubSubConnection.onMessage((PubSubStatusMessage) result);
} else if (result instanceof PubSubMessage) {
pubSubConnection.onMessage((PubSubMessage) result);
} else if (result instanceof PubSubPatternMessage) {
pubSubConnection.onMessage((PubSubPatternMessage) result);
}
} else {
break;
}
}
} finally {
entry.getSent().set(false);
if (!entry.getQueue().isEmpty()) {
enqueueMessage(null, pubSubConnection, entry);
}
}
});
}
@Override
protected MultiDecoder<Object> messageDecoder(CommandData<Object, Object> data, List<Object> parts) {
if (parts.isEmpty() || parts.get(0) == null) {
return null;
}
if ("invalidate".equals(parts.get(0))) {
parts.set(0, "message");
parts.add(1, ChannelName.TRACKING.getName());
}
String command = parts.get(0).toString();
if (MESSAGES.contains(command)) {
ChannelName channelName = new ChannelName((byte[]) parts.get(1));
PubSubKey key = new PubSubKey(channelName, command);
CommandData<Object, Object> commandData = commands.get(key);
if (commandData == null) {
return null;
}
return commandData.getCommand().getReplayMultiDecoder();
} else if (TYPE_MESSAGES.contains(command)) {
byte[] channelName = (byte[]) parts.get(1);
PubSubEntry entry = entries.get(new ChannelName(channelName));
if (entry == null) {
return null;
}
return entry.getDecoder();
} else if ("pong".equals(command) || data == null) {
return new ListObjectDecoder<>(0);
}
return data.getCommand().getReplayMultiDecoder();
}
@Override
protected Decoder<Object> selectDecoder(CommandData<Object, Object> data, List<Object> parts, long size, State state) {
if (parts != null) {
if (data != null && parts.size() == 1 && "pong".equals(parts.get(0))) {
return data.getCodec().getValueDecoder();
}
if (parts.size() == 1) {
return ByteArrayCodec.INSTANCE.getValueDecoder();
}
if (parts.size() == 2 && "pmessage".equals(parts.get(0))) {
return ByteArrayCodec.INSTANCE.getValueDecoder();
}
if (parts.size() == 2 && TYPE_MESSAGES.contains(parts.get(0))) {
byte[] channelName = (byte[]) parts.get(1);
return getDecoder(null, parts, channelName, size);
}
if (parts.size() == 3 && "pmessage".equals(parts.get(0))) {
byte[] patternName = (byte[]) parts.get(1);
return getDecoder(null, parts, patternName, size);
}
}
if (data != null && data.getCommand().getName().equals(RedisCommands.PING.getName())) {
return StringCodec.INSTANCE.getValueDecoder();
}
return super.selectDecoder(data, parts, size, state);
}
private Decoder<Object> getDecoder(Codec codec, List<Object> parts, byte[] name, long size) {
PubSubEntry entry = entries.get(new ChannelName(name));
if (entry != null) {
return entry.getDecoder().getDecoder(codec, parts.size(), state(), size, parts);
}
return ByteArrayCodec.INSTANCE.getValueDecoder();
}
}
| CommandPubSubDecoder |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/inheritance/QualifiersInheritanceTest.java | {
"start": 1645,
"end": 1752
} | class ____ extends SuperBean {
}
@InheritedQualifier("bravo")
@ApplicationScoped
static | Alpha |
java | apache__logging-log4j2 | log4j-api-test/src/main/java/org/apache/logging/log4j/test/TestLoggerContext.java | {
"start": 1069,
"end": 2153
} | class ____ implements LoggerContext {
private final Map<String, ExtendedLogger> map = new HashMap<>();
@Override
public ExtendedLogger getLogger(final String name) {
final ExtendedLogger extendedLogger = map.get(name);
if (extendedLogger != null) {
return extendedLogger;
}
final ExtendedLogger logger = new TestLogger(name);
map.put(name, logger);
return logger;
}
@Override
public ExtendedLogger getLogger(final String name, final MessageFactory messageFactory) {
return new TestLogger(name, messageFactory);
}
@Override
public Object getExternalContext() {
return null;
}
@Override
public boolean hasLogger(final String name) {
return false;
}
@Override
public boolean hasLogger(final String name, final MessageFactory messageFactory) {
return false;
}
@Override
public boolean hasLogger(final String name, final Class<? extends MessageFactory> messageFactoryClass) {
return false;
}
}
| TestLoggerContext |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/engine/LifecycleMethodOverridingTests.java | {
"start": 886,
"end": 1009
} | class ____ {
@Nested
@DisplayName("A package-private lifecycle method can be overridden by")
| LifecycleMethodOverridingTests |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/core/Single.java | {
"start": 213763,
"end": 298827
} | class ____ to filter the items emitted by the current {@code Single}
* @return the new {@link Maybe} instance
* @throws NullPointerException if {@code clazz} is {@code null}
* @see <a href="http://reactivex.io/documentation/operators/filter.html">ReactiveX operators documentation: Filter</a>
* @since 3.0.0
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <@NonNull U> Maybe<U> ofType(@NonNull Class<U> clazz) {
Objects.requireNonNull(clazz, "clazz is null");
return filter(Functions.isInstanceOf(clazz)).cast(clazz);
}
/**
* Signals the success item or the terminal signals of the current {@code Single} on the specified {@link Scheduler},
* asynchronously.
* <p>
* <img width="640" height="305" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.observeOn.v3.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>you specify which {@code Scheduler} this operator will use.</dd>
* </dl>
*
* @param scheduler
* the {@code Scheduler} to notify subscribers on
* @return the new {@code Single} instance
* @throws NullPointerException if {@code scheduler} is {@code null}
* @see <a href="http://reactivex.io/documentation/operators/observeon.html">ReactiveX operators documentation: ObserveOn</a>
* @see <a href="http://www.grahamlea.com/2014/07/rxjava-threading-examples/">RxJava Threading Examples</a>
* @see #subscribeOn
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.CUSTOM)
public final Single<T> observeOn(@NonNull Scheduler scheduler) {
Objects.requireNonNull(scheduler, "scheduler is null");
return RxJavaPlugins.onAssembly(new SingleObserveOn<>(this, scheduler));
}
/**
* Ends the flow with a success item returned by a function for the {@link Throwable} error signaled by the current
* {@code Single} instead of signaling the error via {@code onError}.
* <p>
* <img width="640" height="461" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.onErrorReturn.v3.png" alt="">
* <p>
* By default, when a {@code Single} encounters an error that prevents it from emitting the expected item to its
* subscriber, the {@code Single} invokes its subscriber's {@link SingleObserver#onError} method, and then quits
* without invoking any more of its observer's methods. The {@code onErrorReturn} method changes this
* behavior. If you pass a function ({@code resumeFunction}) to a {@code Single}'s {@code onErrorReturn} method, if
* the original {@code Single} encounters an error, instead of invoking its observer's
* {@link SingleObserver#onError} method, it will instead emit the return value of {@code resumeFunction}.
* <p>
* You can use this to prevent errors from propagating or to supply fallback data should errors be
* encountered.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code onErrorReturn} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param itemSupplier
* a function that returns an item that the new {@code Single} will emit if the current {@code Single} encounters
* an error
* @return the new {@code Single} instance
* @throws NullPointerException if {@code itemSupplier} is {@code null}
* @see <a href="http://reactivex.io/documentation/operators/catch.html">ReactiveX operators documentation: Catch</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Single<T> onErrorReturn(@NonNull Function<Throwable, ? extends T> itemSupplier) {
Objects.requireNonNull(itemSupplier, "itemSupplier is null");
return RxJavaPlugins.onAssembly(new SingleOnErrorReturn<>(this, itemSupplier, null));
}
/**
* Signals the specified value as success in case the current {@code Single} signals an error.
* <p>
* <img width="640" height="461" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.onErrorReturnItem.v3.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code onErrorReturnItem} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param item the value to signal if the current {@code Single} fails
* @return the new {@code Single} instance
* @throws NullPointerException if {@code item} is {@code null}
* @since 2.0
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Single<T> onErrorReturnItem(@NonNull T item) {
Objects.requireNonNull(item, "item is null");
return RxJavaPlugins.onAssembly(new SingleOnErrorReturn<>(this, null, item));
}
/**
* Resumes the flow with the given {@link SingleSource} when the current {@code Single} fails instead of
* signaling the error via {@code onError}.
* <p>
* <img width="640" height="461" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.onErrorResumeWith.png" alt="">
* <p>
* By default, when a {@code Single} encounters an error that prevents it from emitting the expected item to
* its {@link SingleObserver}, the {@code Single} invokes its {@code SingleObserver}'s {@code onError} method, and then quits
* without invoking any more of its {@code SingleObserver}'s methods. The {@code onErrorResumeWith} method changes this
* behavior. If you pass another {@code Single} ({@code resumeSingleInCaseOfError}) to a {@code Single}'s
* {@code onErrorResumeWith} method, if the original {@code Single} encounters an error, instead of invoking its
* {@code SingleObserver}'s {@code onError} method, it will instead relinquish control to {@code resumeSingleInCaseOfError} which
* will invoke the {@code SingleObserver}'s {@link SingleObserver#onSuccess onSuccess} method if it is able to do so. In such a case,
* because no {@code Single} necessarily invokes {@code onError}, the {@code SingleObserver} may never know that an error
* happened.
* <p>
* You can use this to prevent errors from propagating or to supply fallback data should errors be
* encountered.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code onErrorResumeWith} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param fallback a {@code Single} that will take control if source {@code Single} encounters an error.
* @return the new {@code Single} instance
* @throws NullPointerException if {@code fallback} is {@code null}
* @see <a href="http://reactivex.io/documentation/operators/catch.html">ReactiveX operators documentation: Catch</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Single<T> onErrorResumeWith(@NonNull SingleSource<? extends T> fallback) {
Objects.requireNonNull(fallback, "fallback is null");
return onErrorResumeNext(Functions.justFunction(fallback));
}
/**
* Returns a {@link Maybe} instance that if the current {@code Single} emits an error, it will emit an {@code onComplete}
* and swallow the throwable.
* <p>
* <img width="640" height="554" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.onErrorComplete.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code onErrorComplete} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @return the new {@code Maybe} instance
* @since 3.0.0
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
public final Maybe<T> onErrorComplete() {
return onErrorComplete(Functions.alwaysTrue());
}
/**
* Returns a {@link Maybe} instance that if this {@code Single} emits an error and the predicate returns
* {@code true}, it will emit an {@code onComplete} and swallow the throwable.
* <p>
* <img width="640" height="270" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.onErrorComplete.f.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code onErrorComplete} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param predicate the predicate to call when an {@link Throwable} is emitted which should return {@code true}
* if the {@code Throwable} should be swallowed and replaced with an {@code onComplete}.
* @return the new {@code Maybe} instance
* @throws NullPointerException if {@code predicate} is {@code null}
* @since 3.0.0
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> onErrorComplete(@NonNull Predicate<? super Throwable> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return RxJavaPlugins.onAssembly(new SingleOnErrorComplete<>(this, predicate));
}
/**
* Resumes the flow with a {@link SingleSource} returned for the failure {@link Throwable} of the current {@code Single} by a
* function instead of signaling the error via {@code onError}.
* <p>
* <img width="640" height="461" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.onErrorResumeNext.f.v3.png" alt="">
* <p>
* By default, when a {@code Single} encounters an error that prevents it from emitting the expected item to
* its {@link SingleObserver}, the {@code Single} invokes its {@code SingleObserver}'s {@code onError} method, and then quits
* without invoking any more of its {@code SingleObserver}'s methods. The {@code onErrorResumeNext} method changes this
* behavior. If you pass a function that will return another {@code Single} ({@code resumeFunctionInCaseOfError}) to a {@code Single}'s
* {@code onErrorResumeNext} method, if the original {@code Single} encounters an error, instead of invoking its
* {@code SingleObserver}'s {@code onError} method, it will instead relinquish control to {@code resumeSingleInCaseOfError} which
* will invoke the {@code SingleObserver}'s {@link SingleObserver#onSuccess onSuccess} method if it is able to do so. In such a case,
* because no {@code Single} necessarily invokes {@code onError}, the {@code SingleObserver} may never know that an error
* happened.
* <p>
* You can use this to prevent errors from propagating or to supply fallback data should errors be
* encountered.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code onErrorResumeNext} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param fallbackSupplier a function that returns a {@code SingleSource} that will take control if source {@code Single} encounters an error.
* @return the new {@code Single} instance
* @throws NullPointerException if {@code fallbackSupplier} is {@code null}
* @see <a href="http://reactivex.io/documentation/operators/catch.html">ReactiveX operators documentation: Catch</a>
* @since .20
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Single<T> onErrorResumeNext(
@NonNull Function<? super Throwable, ? extends SingleSource<? extends T>> fallbackSupplier) {
Objects.requireNonNull(fallbackSupplier, "fallbackSupplier is null");
return RxJavaPlugins.onAssembly(new SingleResumeNext<>(this, fallbackSupplier));
}
/**
* Nulls out references to the upstream producer and downstream {@link SingleObserver} if
* the sequence is terminated or downstream calls {@code dispose()}.
* <p>
* <img width="640" height="346" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.onTerminateDetach.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code onTerminateDetach} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* <p>History: 2.1.5 - experimental
* @return the new {@code Single} which {@code null}s out references to the upstream producer and downstream {@code SingleObserver} if
* the sequence is terminated or downstream calls {@code dispose()}
* @since 2.2
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
public final Single<T> onTerminateDetach() {
return RxJavaPlugins.onAssembly(new SingleDetach<>(this));
}
/**
* Repeatedly re-subscribes to the current {@code Single} and emits each success value as a {@link Flowable} sequence.
* <p>
* <img width="640" height="463" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.repeat.v3.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The returned {@code Flowable} honors the backpressure of the downstream consumer.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code repeat} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @return the new {@code Flowable} instance
* @since 2.0
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
public final Flowable<T> repeat() {
return toFlowable().repeat();
}
/**
* Re-subscribes to the current {@code Single} at most the given number of times and emits each success value as a {@link Flowable} sequence.
* <p>
* <img width="640" height="463" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.repeat.n.v3.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The returned {@code Flowable} honors the backpressure of the downstream consumer.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code repeat} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param times the number of times to re-subscribe to the current {@code Single}
* @return the new {@code Flowable} instance
* @throws IllegalArgumentException if {@code times} is negative
* @since 2.0
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
public final Flowable<T> repeat(long times) {
return toFlowable().repeat(times);
}
/**
* Re-subscribes to the current {@code Single} if
* the {@link Publisher} returned by the handler function signals a value in response to a
* value signaled through the {@link Flowable} the handler receives.
* <p>
* <img width="640" height="1480" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.repeatWhen.v3.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The returned {@code Flowable} honors the backpressure of the downstream consumer.
* The {@code Publisher} returned by the handler function is expected to honor backpressure as well.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code repeatWhen} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param handler the function that is called with a {@code Flowable} that signals a value when the {@code Single}
* signaled a success value and returns a {@code Publisher} that has to signal a value to
* trigger a resubscription to the current {@code Single}, otherwise the terminal signal of
* the {@code Publisher} will be the terminal signal of the sequence as well.
* @return the new {@code Flowable} instance
* @throws NullPointerException if {@code handler} is {@code null}
* @since 2.0
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
public final Flowable<T> repeatWhen(@NonNull Function<? super Flowable<Object>, @NonNull ? extends Publisher<@NonNull ?>> handler) {
return toFlowable().repeatWhen(handler);
}
/**
* Re-subscribes to the current {@code Single} until the given {@link BooleanSupplier} returns {@code true}
* and emits the success items as a {@link Flowable} sequence.
* <p>
* <img width="640" height="463" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.repeatUntil.v3.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The returned {@code Flowable} honors the backpressure of the downstream consumer.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code repeatUntil} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param stop the {@code BooleanSupplier} called after the current {@code Single} succeeds and if returns {@code false},
* the {@code Single} is re-subscribed; otherwise the sequence completes.
* @return the new {@code Flowable} instance
* @throws NullPointerException if {@code stop} is {@code null}
* @since 2.0
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
public final Flowable<T> repeatUntil(@NonNull BooleanSupplier stop) {
return toFlowable().repeatUntil(stop);
}
/**
* Repeatedly re-subscribes to the current {@code Single} indefinitely if it fails with an {@code onError}.
* <p>
* <img width="640" height="399" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.retry.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code retry} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @return the new {@code Single} instance
* @since 2.0
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
public final Single<T> retry() {
return toSingle(toFlowable().retry());
}
/**
* Repeatedly re-subscribe at most the specified times to the current {@code Single}
* if it fails with an {@code onError}.
* <p>
* <img width="640" height="329" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.retry.n.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code retry} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param times the number of times to resubscribe if the current {@code Single} fails
* @return the new {@code Single} instance
* @throws IllegalArgumentException if {@code times} is negative
* @since 2.0
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
public final Single<T> retry(long times) {
return toSingle(toFlowable().retry(times));
}
/**
* Re-subscribe to the current {@code Single} if the given predicate returns {@code true} when the {@code Single} fails
* with an {@code onError}.
* <p>
* <img width="640" height="230" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.retry.f2.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code retry} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param predicate the predicate called with the resubscription count and the failure {@link Throwable}
* and should return {@code true} if a resubscription should happen
* @return the new {@code Single} instance
* @throws NullPointerException if {@code predicate} is {@code null}
* @since 2.0
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
public final Single<T> retry(@NonNull BiPredicate<? super Integer, ? super Throwable> predicate) {
return toSingle(toFlowable().retry(predicate));
}
/**
* Repeatedly re-subscribe at most times or until the predicate returns {@code false}, whichever happens first
* if it fails with an {@code onError}.
* <p>
* <img width="640" height="259" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.retry.nf.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code retry} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* <p>History: 2.1.8 - experimental
* @param times the number of times to resubscribe if the current {@code Single} fails
* @param predicate the predicate called with the failure {@link Throwable}
* and should return {@code true} if a resubscription should happen
* @return the new {@code Single} instance
* @throws NullPointerException if {@code predicate} is {@code null}
* @throws IllegalArgumentException if {@code times} is negative
* @since 2.2
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
public final Single<T> retry(long times, @NonNull Predicate<? super Throwable> predicate) {
return toSingle(toFlowable().retry(times, predicate));
}
/**
* Re-subscribe to the current {@code Single} if the given predicate returns {@code true} when the {@code Single} fails
* with an {@code onError}.
* <p>
* <img width="640" height="240" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.retry.f.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code retry} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param predicate the predicate called with the failure {@link Throwable}
* and should return {@code true} if a resubscription should happen
* @return the new {@code Single} instance
* @throws NullPointerException if {@code predicate} is {@code null}
* @since 2.0
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
public final Single<T> retry(@NonNull Predicate<? super Throwable> predicate) {
return toSingle(toFlowable().retry(predicate));
}
/**
* Retries until the given stop function returns {@code true}.
* <p>
* <img width="640" height="364" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.retryUntil.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code retryUntil} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param stop the function that should return {@code true} to stop retrying
* @return the new {@code Single} instance
* @throws NullPointerException if {@code stop} is {@code null}
* @since 3.0.0
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Single<T> retryUntil(@NonNull BooleanSupplier stop) {
Objects.requireNonNull(stop, "stop is null");
return retry(Long.MAX_VALUE, Functions.predicateReverseFor(stop));
}
/**
* Re-subscribes to the current {@code Single} if and when the {@link Publisher} returned by the handler
* function signals a value.
* <p>
* <img width="640" height="405" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.retryWhen.png" alt="">
* <p>
* If the {@code Publisher} signals an {@code onComplete}, the resulting {@code Single} will signal a {@link NoSuchElementException}.
* <p>
* Note that the inner {@code Publisher} returned by the handler function should signal
* either {@code onNext}, {@code onError} or {@code onComplete} in response to the received
* {@link Throwable} to indicate the operator should retry or terminate. If the upstream to
* the operator is asynchronous, signaling {@code onNext} followed by {@code onComplete} immediately may
* result in the sequence to be completed immediately. Similarly, if this inner
* {@code Publisher} signals {@code onError} or {@code onComplete} while the upstream is
* active, the sequence is terminated with the same signal immediately.
* <p>
* The following example demonstrates how to retry an asynchronous source with a delay:
* <pre><code>
* Single.timer(1, TimeUnit.SECONDS)
* .doOnSubscribe(s -> System.out.println("subscribing"))
* .map(v -> { throw new RuntimeException(); })
* .retryWhen(errors -> {
* AtomicInteger counter = new AtomicInteger();
* return errors
* .takeWhile(e -> counter.getAndIncrement() != 3)
* .flatMap(e -> {
* System.out.println("delay retry by " + counter.get() + " second(s)");
* return Flowable.timer(counter.get(), TimeUnit.SECONDS);
* });
* })
* .blockingGet();
* </code></pre>
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code retryWhen} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param handler the function that receives a {@link Flowable} of the error the {@code Single} emits and should
* return a {@code Publisher} that should signal a normal value (in response to the
* throwable the {@code Flowable} emits) to trigger a resubscription or signal an error to
* be the output of the resulting {@code Single}
* @return the new {@code Single} instance
* @throws NullPointerException if {@code handler} is {@code null}
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
public final Single<T> retryWhen(@NonNull Function<? super Flowable<Throwable>, @NonNull ? extends Publisher<@NonNull ?>> handler) {
return toSingle(toFlowable().retryWhen(handler));
}
/**
* Wraps the given {@link SingleObserver}, catches any {@link RuntimeException}s thrown by its
* {@link SingleObserver#onSubscribe(Disposable)}, {@link SingleObserver#onSuccess(Object)} or
* {@link SingleObserver#onError(Throwable)} methods* and routes those to the global error handler
* via {@link RxJavaPlugins#onError(Throwable)}.
* <p>
* By default, the {@code Single} protocol forbids the {@code onXXX} methods to throw, but some
* {@code SingleObserver} implementation may do it anyway, causing undefined behavior in the
* upstream. This method and the underlying safe wrapper ensures such misbehaving consumers don't
* disrupt the protocol.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code safeSubscribe} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param observer the potentially misbehaving {@code SingleObserver}
* @throws NullPointerException if {@code observer} is {@code null}
* @see #subscribe(Consumer,Consumer)
* @since 3.0.0
*/
@SchedulerSupport(SchedulerSupport.NONE)
public final void safeSubscribe(@NonNull SingleObserver<? super T> observer) {
Objects.requireNonNull(observer, "observer is null");
subscribe(new SafeSingleObserver<>(observer));
}
/**
* Returns a {@link Flowable} which first runs the other {@link CompletableSource}
* then the current {@code Single} if the other completed normally.
* <p>
* <img width="640" height="360" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.startWith.c.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The returned {@code Flowable} honors the backpressure of the downstream consumer.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code startWith} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param other the other {@code CompletableSource} to run first
* @return the new {@code Flowable} instance
* @throws NullPointerException if {@code other} is {@code null}
* @since 3.0.0
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
@BackpressureSupport(BackpressureKind.FULL)
public final Flowable<T> startWith(@NonNull CompletableSource other) {
Objects.requireNonNull(other, "other is null");
return Flowable.concat(Completable.wrap(other).<T>toFlowable(), toFlowable());
}
/**
* Returns a {@link Flowable} which first runs the other {@link SingleSource}
* then the current {@code Single} if the other succeeded normally.
* <p>
* <img width="640" height="341" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.startWith.s.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The returned {@code Flowable} honors the backpressure of the downstream consumer.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code startWith} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param other the other {@code SingleSource} to run first
* @return the new {@code Flowable} instance
* @throws NullPointerException if {@code other} is {@code null}
* @since 3.0.0
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
@BackpressureSupport(BackpressureKind.FULL)
public final Flowable<T> startWith(@NonNull SingleSource<T> other) {
Objects.requireNonNull(other, "other is null");
return Flowable.concat(Single.wrap(other).toFlowable(), toFlowable());
}
/**
* Returns a {@link Flowable} which first runs the other {@link MaybeSource}
* then the current {@code Single} if the other succeeded or completed normally.
* <p>
* <img width="640" height="232" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.startWith.m.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The returned {@code Flowable} honors the backpressure of the downstream consumer.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code startWith} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param other the other {@code MaybeSource} to run first
* @return the new {@code Flowable} instance
* @throws NullPointerException if {@code other} is {@code null}
* @since 3.0.0
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
@BackpressureSupport(BackpressureKind.FULL)
public final Flowable<T> startWith(@NonNull MaybeSource<T> other) {
Objects.requireNonNull(other, "other is null");
return Flowable.concat(Maybe.wrap(other).toFlowable(), toFlowable());
}
/**
* Returns an {@link Observable} which first delivers the events
* of the other {@link ObservableSource} then runs the current {@code Single}.
* <p>
* <img width="640" height="175" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.startWith.o.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code startWith} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param other the other {@code ObservableSource} to run first
* @return the new {@code Observable} instance
* @throws NullPointerException if {@code other} is {@code null}
* @since 3.0.0
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Observable<T> startWith(@NonNull ObservableSource<T> other) {
Objects.requireNonNull(other, "other is null");
return Observable.wrap(other).concatWith(this.toObservable());
}
/**
* Returns a {@link Flowable} which first delivers the events
* of the other {@link Publisher} then runs the current {@code Single}.
* <p>
* <img width="640" height="175" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.startWith.p.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The returned {@code Flowable} honors the backpressure of the downstream consumer
* and expects the other {@code Publisher} to honor it as well.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code startWith} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param other the other {@code Publisher} to run first
* @return the new {@code Flowable} instance
* @throws NullPointerException if {@code other} is {@code null}
* @since 3.0.0
*/
@CheckReturnValue
@NonNull
@BackpressureSupport(BackpressureKind.FULL)
@SchedulerSupport(SchedulerSupport.NONE)
public final Flowable<T> startWith(@NonNull Publisher<T> other) {
Objects.requireNonNull(other, "other is null");
return toFlowable().startWith(other);
}
/**
* Subscribes to a {@code Single} but ignore its emission or notification.
* <p>
* <img width="640" height="340" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.subscribe.png" alt="">
* <p>
* If the {@code Single} emits an error, it is wrapped into an
* {@link io.reactivex.rxjava3.exceptions.OnErrorNotImplementedException OnErrorNotImplementedException}
* and routed to the {@link RxJavaPlugins#onError(Throwable)} handler.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code subscribe} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @return the new {@link Disposable} instance that can be used for disposing the subscription at any time
* @see <a href="http://reactivex.io/documentation/operators/subscribe.html">ReactiveX operators documentation: Subscribe</a>
* @see #subscribe(Consumer, Consumer, DisposableContainer)
*/
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
public final Disposable subscribe() {
return subscribe(Functions.emptyConsumer(), Functions.ON_ERROR_MISSING);
}
/**
* Subscribes to a {@code Single} and provides a composite callback to handle the item it emits
* or any error notification it issues.
* <p>
* <img width="640" height="340" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.subscribe.c2.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code subscribe} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param onCallback
* the callback that receives either the success value or the failure {@link Throwable}
* (whichever is not {@code null})
* @return the new {@link Disposable} instance that can be used for disposing the subscription at any time
* @throws NullPointerException
* if {@code onCallback} is {@code null}
* @see #subscribe(Consumer, Consumer, DisposableContainer)
* @see <a href="http://reactivex.io/documentation/operators/subscribe.html">ReactiveX operators documentation: Subscribe</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Disposable subscribe(@NonNull BiConsumer<@Nullable ? super T, @Nullable ? super Throwable> onCallback) {
Objects.requireNonNull(onCallback, "onCallback is null");
BiConsumerSingleObserver<T> observer = new BiConsumerSingleObserver<>(onCallback);
subscribe(observer);
return observer;
}
/**
* Subscribes to a {@code Single} and provides a callback to handle the item it emits.
* <p>
* <img width="640" height="341" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.subscribe.c.png" alt="">
* <p>
* If the {@code Single} emits an error, it is wrapped into an
* {@link io.reactivex.rxjava3.exceptions.OnErrorNotImplementedException OnErrorNotImplementedException}
* and routed to the {@link RxJavaPlugins#onError(Throwable)} handler.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code subscribe} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param onSuccess
* the {@code Consumer<T>} you have designed to accept the emission from the {@code Single}
* @return the new {@link Disposable} instance that can be used for disposing the subscription at any time
* @throws NullPointerException
* if {@code onSuccess} is {@code null}
* @see <a href="http://reactivex.io/documentation/operators/subscribe.html">ReactiveX operators documentation: Subscribe</a>
* @see #subscribe(Consumer, Consumer, DisposableContainer)
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
public final Disposable subscribe(@NonNull Consumer<? super T> onSuccess) {
return subscribe(onSuccess, Functions.ON_ERROR_MISSING);
}
/**
* Subscribes to a {@code Single} and provides callbacks to handle the item it emits or any error notification it
* issues.
* <p>
* <img width="640" height="340" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.subscribe.cc.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code subscribe} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param onSuccess
* the {@code Consumer<T>} you have designed to accept the emission from the {@code Single}
* @param onError
* the {@code Consumer<Throwable>} you have designed to accept any error notification from the
* {@code Single}
* @return the new {@link Disposable} instance that can be used for disposing the subscription at any time
* @throws NullPointerException
* if {@code onSuccess} or {@code onError} is {@code null}
* @see <a href="http://reactivex.io/documentation/operators/subscribe.html">ReactiveX operators documentation: Subscribe</a>
* @see #subscribe(Consumer, Consumer, DisposableContainer)
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Disposable subscribe(@NonNull Consumer<? super T> onSuccess, @NonNull Consumer<? super Throwable> onError) {
Objects.requireNonNull(onSuccess, "onSuccess is null");
Objects.requireNonNull(onError, "onError is null");
ConsumerSingleObserver<T> observer = new ConsumerSingleObserver<>(onSuccess, onError);
subscribe(observer);
return observer;
}
/**
* Wraps the given onXXX callbacks into a {@link Disposable} {@link SingleObserver},
* adds it to the given {@link DisposableContainer} and ensures, that if the upstream
* terminates or this particular {@code Disposable} is disposed, the {@code SingleObserver} is removed
* from the given container.
* <p>
* The {@code SingleObserver} will be removed after the callback for the terminal event has been invoked.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code subscribe} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param onSuccess the callback for upstream items
* @param onError the callback for an upstream error if any
* @param container the {@code DisposableContainer} (such as {@link CompositeDisposable}) to add and remove the
* created {@code Disposable} {@code SingleObserver}
* @return the {@code Disposable} that allows disposing the particular subscription.
* @throws NullPointerException
* if {@code onSuccess}, {@code onError}
* or {@code container} is {@code null}
* @since 3.1.0
*/
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
public final Disposable subscribe(
@NonNull Consumer<? super T> onSuccess,
@NonNull Consumer<? super Throwable> onError,
@NonNull DisposableContainer container) {
Objects.requireNonNull(onSuccess, "onSuccess is null");
Objects.requireNonNull(onError, "onError is null");
Objects.requireNonNull(container, "container is null");
DisposableAutoReleaseMultiObserver<T> observer = new DisposableAutoReleaseMultiObserver<>(
container, onSuccess, onError, Functions.EMPTY_ACTION);
container.add(observer);
subscribe(observer);
return observer;
}
@SchedulerSupport(SchedulerSupport.NONE)
@Override
public final void subscribe(@NonNull SingleObserver<? super T> observer) {
Objects.requireNonNull(observer, "observer is null");
observer = RxJavaPlugins.onSubscribe(this, observer);
Objects.requireNonNull(observer, "The RxJavaPlugins.onSubscribe hook returned a null SingleObserver. Please check the handler provided to RxJavaPlugins.setOnSingleSubscribe for invalid null returns. Further reading: https://github.com/ReactiveX/RxJava/wiki/Plugins");
try {
subscribeActual(observer);
} catch (NullPointerException ex) {
throw ex;
} catch (Throwable ex) {
Exceptions.throwIfFatal(ex);
NullPointerException npe = new NullPointerException("subscribeActual failed");
npe.initCause(ex);
throw npe;
}
}
/**
* Implement this method in subclasses to handle the incoming {@link SingleObserver}s.
* <p>There is no need to call any of the plugin hooks on the current {@code Single} instance or
* the {@code SingleObserver}; all hooks and basic safeguards have been
* applied by {@link #subscribe(SingleObserver)} before this method gets called.
* @param observer the {@code SingleObserver} to handle, not {@code null}
*/
protected abstract void subscribeActual(@NonNull SingleObserver<? super T> observer);
/**
* Subscribes a given {@link SingleObserver} (subclass) to this {@code Single} and returns the given
* {@code SingleObserver} as is.
* <p>
* <img width="640" height="338" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.subscribeWith.png" alt="">
* <p>Usage example:
* <pre><code>
* Single<Integer> source = Single.just(1);
* CompositeDisposable composite = new CompositeDisposable();
*
* DisposableSingleObserver<Integer> ds = new DisposableSingleObserver<>() {
* // ...
* };
*
* composite.add(source.subscribeWith(ds));
* </code></pre>
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code subscribeWith} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <E> the type of the {@code SingleObserver} to use and return
* @param observer the {@code SingleObserver} (subclass) to use and return, not {@code null}
* @return the input {@code observer}
* @throws NullPointerException if {@code observer} is {@code null}
* @since 2.0
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
public final <@NonNull E extends SingleObserver<? super T>> E subscribeWith(E observer) {
subscribe(observer);
return observer;
}
/**
* Asynchronously subscribes {@link SingleObserver}s to this {@code Single} on the specified {@link Scheduler}.
* <p>
* <img width="640" height="305" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.subscribeOn.v3.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>You specify which {@code Scheduler} this operator will use.</dd>
* </dl>
*
* @param scheduler
* the {@code Scheduler} to perform subscription actions on
* @return the new {@code Single} instance
* @throws NullPointerException if {@code scheduler} is {@code null}
* @see <a href="http://reactivex.io/documentation/operators/subscribeon.html">ReactiveX operators documentation: SubscribeOn</a>
* @see <a href="http://www.grahamlea.com/2014/07/rxjava-threading-examples/">RxJava Threading Examples</a>
* @see #observeOn
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.CUSTOM)
public final Single<T> subscribeOn(@NonNull Scheduler scheduler) {
Objects.requireNonNull(scheduler, "scheduler is null");
return RxJavaPlugins.onAssembly(new SingleSubscribeOn<>(this, scheduler));
}
/**
* Measures the time (in milliseconds) between the subscription and success item emission
* of the current {@code Single} and signals it as a tuple ({@link Timed})
* success value.
* <p>
* <img width="640" height="466" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.timeInterval.png" alt="">
* <p>
* If the current {@code Single} fails, the resulting {@code Single} will
* pass along the signal to the downstream. To measure the time to error,
* use {@link #materialize()} and apply {@link #timeInterval()}.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code timeInterval} uses the {@code computation} {@link Scheduler}
* for determining the current time upon subscription and upon receiving the
* success item from the current {@code Single}.</dd>
* </dl>
* @return the new {@code Single} instance
* @since 3.0.0
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.COMPUTATION)
public final Single<Timed<T>> timeInterval() {
return timeInterval(TimeUnit.MILLISECONDS, Schedulers.computation());
}
/**
* Measures the time (in milliseconds) between the subscription and success item emission
* of the current {@code Single} and signals it as a tuple ({@link Timed})
* success value.
* <p>
* <img width="640" height="463" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.timeInterval.s.png" alt="">
* <p>
* If the current {@code Single} fails, the resulting {@code Single} will
* pass along the signal to the downstream. To measure the time to error,
* use {@link #materialize()} and apply {@link #timeInterval(Scheduler)}.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code timeInterval} uses the provided {@link Scheduler}
* for determining the current time upon subscription and upon receiving the
* success item from the current {@code Single}.</dd>
* </dl>
* @param scheduler the {@code Scheduler} used for providing the current time
* @return the new {@code Single} instance
* @throws NullPointerException if {@code scheduler} is {@code null}
* @since 3.0.0
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.CUSTOM)
public final Single<Timed<T>> timeInterval(@NonNull Scheduler scheduler) {
return timeInterval(TimeUnit.MILLISECONDS, scheduler);
}
/**
* Measures the time between the subscription and success item emission
* of the current {@code Single} and signals it as a tuple ({@link Timed})
* success value.
* <p>
* <img width="640" height="466" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.timeInterval.png" alt="">
* <p>
* If the current {@code Single} fails, the resulting {@code Single} will
* pass along the signals to the downstream. To measure the time to error,
* use {@link #materialize()} and apply {@link #timeInterval(TimeUnit, Scheduler)}.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code timeInterval} uses the {@code computation} {@link Scheduler}
* for determining the current time upon subscription and upon receiving the
* success item from the current {@code Single}.</dd>
* </dl>
* @param unit the time unit for measurement
* @return the new {@code Single} instance
* @throws NullPointerException if {@code unit} is {@code null}
* @since 3.0.0
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.COMPUTATION)
public final Single<Timed<T>> timeInterval(@NonNull TimeUnit unit) {
return timeInterval(unit, Schedulers.computation());
}
/**
* Measures the time between the subscription and success item emission
* of the current {@code Single} and signals it as a tuple ({@link Timed})
* success value.
* <p>
* <img width="640" height="463" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.timeInterval.s.png" alt="">
* <p>
* If the current {@code Single} is empty or fails, the resulting {@code Single} will
* pass along the signals to the downstream. To measure the time to termination,
* use {@link #materialize()} and apply {@link #timeInterval(TimeUnit, Scheduler)}.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code timeInterval} uses the provided {@link Scheduler}
* for determining the current time upon subscription and upon receiving the
* success item from the current {@code Single}.</dd>
* </dl>
* @param unit the time unit for measurement
* @param scheduler the {@code Scheduler} used for providing the current time
* @return the new {@code Single} instance
* @throws NullPointerException if {@code unit} or {@code scheduler} is {@code null}
* @since 3.0.0
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.CUSTOM)
public final Single<Timed<T>> timeInterval(@NonNull TimeUnit unit, @NonNull Scheduler scheduler) {
Objects.requireNonNull(unit, "unit is null");
Objects.requireNonNull(scheduler, "scheduler is null");
return RxJavaPlugins.onAssembly(new SingleTimeInterval<>(this, unit, scheduler, true));
}
/**
* Combines the success value from the current {@code Single} with the current time (in milliseconds) of
* its reception, using the {@code computation} {@link Scheduler} as time source,
* then signals them as a {@link Timed} instance.
* <p>
* <img width="640" height="465" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.timestamp.png" alt="">
* <p>
* If the current {@code Single} is empty or fails, the resulting {@code Single} will
* pass along the signals to the downstream. To get the timestamp of the error,
* use {@link #materialize()} and apply {@link #timestamp()}.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code timestamp} uses the {@code computation} {@code Scheduler}
* for determining the current time upon receiving the
* success item from the current {@code Single}.</dd>
* </dl>
* @return the new {@code Single} instance
* @since 3.0.0
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.COMPUTATION)
public final Single<Timed<T>> timestamp() {
return timestamp(TimeUnit.MILLISECONDS, Schedulers.computation());
}
/**
* Combines the success value from the current {@code Single} with the current time (in milliseconds) of
* its reception, using the given {@link Scheduler} as time source,
* then signals them as a {@link Timed} instance.
* <p>
* <img width="640" height="465" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.timestamp.s.png" alt="">
* <p>
* If the current {@code Single} is empty or fails, the resulting {@code Single} will
* pass along the signals to the downstream. To get the timestamp of the error,
* use {@link #materialize()} and apply {@link #timestamp(Scheduler)}.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code timestamp} uses the provided {@code Scheduler}
* for determining the current time upon receiving the
* success item from the current {@code Single}.</dd>
* </dl>
* @param scheduler the {@code Scheduler} used for providing the current time
* @return the new {@code Single} instance
* @throws NullPointerException if {@code scheduler} is {@code null}
* @since 3.0.0
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.CUSTOM)
public final Single<Timed<T>> timestamp(@NonNull Scheduler scheduler) {
return timestamp(TimeUnit.MILLISECONDS, scheduler);
}
/**
* Combines the success value from the current {@code Single} with the current time of
* its reception, using the {@code computation} {@link Scheduler} as time source,
* then signals it as a {@link Timed} instance.
* <p>
* <img width="640" height="465" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.timestamp.png" alt="">
* <p>
* If the current {@code Single} is empty or fails, the resulting {@code Single} will
* pass along the signals to the downstream. To get the timestamp of the error,
* use {@link #materialize()} and apply {@link #timestamp(TimeUnit)}.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code timestamp} uses the {@code computation} {@code Scheduler},
* for determining the current time upon receiving the
* success item from the current {@code Single}.</dd>
* </dl>
* @param unit the time unit for measurement
* @return the new {@code Single} instance
* @throws NullPointerException if {@code unit} is {@code null}
* @since 3.0.0
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.COMPUTATION)
public final Single<Timed<T>> timestamp(@NonNull TimeUnit unit) {
return timestamp(unit, Schedulers.computation());
}
/**
* Combines the success value from the current {@code Single} with the current time of
* its reception, using the given {@link Scheduler} as time source,
* then signals it as a {@link Timed} instance.
* <p>
* <img width="640" height="465" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.timestamp.s.png" alt="">
* <p>
* If the current {@code Single} is empty or fails, the resulting {@code Single} will
* pass along the signals to the downstream. To get the timestamp of the error,
* use {@link #materialize()} and apply {@link #timestamp(TimeUnit, Scheduler)}.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code timestamp} uses the provided {@code Scheduler},
* which is used for determining the current time upon receiving the
* success item from the current {@code Single}.</dd>
* </dl>
* @param unit the time unit for measurement
* @param scheduler the {@code Scheduler} used for providing the current time
* @return the new {@code Single} instance
* @throws NullPointerException if {@code unit} or {@code scheduler} is {@code null}
* @since 3.0.0
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.CUSTOM)
public final Single<Timed<T>> timestamp(@NonNull TimeUnit unit, @NonNull Scheduler scheduler) {
Objects.requireNonNull(unit, "unit is null");
Objects.requireNonNull(scheduler, "scheduler is null");
return RxJavaPlugins.onAssembly(new SingleTimeInterval<>(this, unit, scheduler, false));
}
/**
* Returns a {@code Single} that emits the item emitted by the current {@code Single} until a {@link CompletableSource} terminates. Upon
* termination of {@code other}, this will emit a {@link CancellationException} rather than go to
* {@link SingleObserver#onSuccess(Object)}.
* <p>
* <img width="640" height="333" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.takeUntil.c.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code takeUntil} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param other
* the {@code CompletableSource} whose termination will cause {@code takeUntil} to emit the item from the current
* {@code Single}
* @return the new {@code Single} that emits the item emitted by the current {@code Single} until such time as {@code other} terminates.
* @throws NullPointerException if {@code other} is {@code null}
* @see <a href="http://reactivex.io/documentation/operators/takeuntil.html">ReactiveX operators documentation: TakeUntil</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Single<T> takeUntil(@NonNull CompletableSource other) {
Objects.requireNonNull(other, "other is null");
return takeUntil(new CompletableToFlowable<T>(other));
}
/**
* Returns a {@code Single} that emits the item emitted by the current {@code Single} until a {@link Publisher} emits an item or completes. Upon
* emission of an item from {@code other}, this will emit a {@link CancellationException} rather than go to
* {@link SingleObserver#onSuccess(Object)}.
* <p>
* <img width="640" height="215" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.takeUntil.p.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The {@code other} publisher is consumed in an unbounded fashion but will be
* cancelled after the first item it produced.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code takeUntil} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param other
* the {@code Publisher} whose first emitted item or completion will cause {@code takeUntil} to emit {@code CancellationException}
* if the current {@code Single} hasn't completed till then
* @param <E>
* the type of items emitted by {@code other}
* @return the new {@code Single} that emits the item emitted by the current {@code Single} until such time as {@code other} emits
* its first item
* @throws NullPointerException if {@code other} is {@code null}
* @see <a href="http://reactivex.io/documentation/operators/takeuntil.html">ReactiveX operators documentation: TakeUntil</a>
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <@NonNull E> Single<T> takeUntil(@NonNull Publisher<E> other) {
Objects.requireNonNull(other, "other is null");
return RxJavaPlugins.onAssembly(new SingleTakeUntil<>(this, other));
}
/**
* Returns a {@code Single} that emits the item emitted by the current {@code Single} until a second {@code Single} emits an item. Upon
* emission of an item from {@code other}, this will emit a {@link CancellationException} rather than go to
* {@link SingleObserver#onSuccess(Object)}.
* <p>
* <img width="640" height="314" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.takeUntil.s.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code takeUntil} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param other
* the {@code Single} whose emitted item will cause {@code takeUntil} to emit {@code CancellationException}
* if the current {@code Single} hasn't completed till then
* @param <E>
* the type of item emitted by {@code other}
* @return the new {@code Single} that emits the item emitted by the current {@code Single} until such time as {@code other} emits its item
* @throws NullPointerException if {@code other} is {@code null}
* @see <a href="http://reactivex.io/documentation/operators/takeuntil.html">ReactiveX operators documentation: TakeUntil</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <@NonNull E> Single<T> takeUntil(@NonNull SingleSource<? extends E> other) {
Objects.requireNonNull(other, "other is null");
return takeUntil(new SingleToFlowable<E>(other));
}
/**
* Signals a {@link TimeoutException} if the current {@code Single} doesn't signal a success value within the
* specified timeout window.
* <p>
* <img width="640" height="334" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.timeout.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code timeout} signals the {@code TimeoutException} on the {@code computation} {@link Scheduler}.</dd>
* </dl>
* @param timeout the timeout amount
* @param unit the time unit
* @return the new {@code Single} instance
* @throws NullPointerException if {@code unit} is {@code null}
* @since 2.0
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.COMPUTATION)
@NonNull
public final Single<T> timeout(long timeout, @NonNull TimeUnit unit) {
return timeout0(timeout, unit, Schedulers.computation(), null);
}
/**
* Signals a {@link TimeoutException} if the current {@code Single} doesn't signal a success value within the
* specified timeout window.
* <p>
* <img width="640" height="334" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.timeout.s.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code timeout} signals the {@code TimeoutException} on the {@link Scheduler} you specify.</dd>
* </dl>
* @param timeout the timeout amount
* @param unit the time unit
* @param scheduler the target {@code Scheduler} where the timeout is awaited and the {@code TimeoutException}
* signaled
* @return the new {@code Single} instance
* @throws NullPointerException if {@code unit} or {@code scheduler} is {@code null}
* @since 2.0
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.CUSTOM)
@NonNull
public final Single<T> timeout(long timeout, @NonNull TimeUnit unit, @NonNull Scheduler scheduler) {
return timeout0(timeout, unit, scheduler, null);
}
/**
* Runs the current {@code Single} and if it doesn't signal within the specified timeout window, it is
* disposed and the other {@link SingleSource} subscribed to.
* <p>
* <img width="640" height="283" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.timeout.sb.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code timeout} subscribes to the other {@code SingleSource} on the {@link Scheduler} you specify.</dd>
* </dl>
* @param timeout the timeout amount
* @param unit the time unit
* @param scheduler the {@code Scheduler} where the timeout is awaited and the subscription to other happens
* @param fallback the other {@code SingleSource} that gets subscribed to if the current {@code Single} times out
* @return the new {@code Single} instance
* @throws NullPointerException if {@code unit}, {@code scheduler} or {@code fallback} is {@code null}
* @since 2.0
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.CUSTOM)
public final Single<T> timeout(long timeout, @NonNull TimeUnit unit, @NonNull Scheduler scheduler, @NonNull SingleSource<? extends T> fallback) {
Objects.requireNonNull(fallback, "fallback is null");
return timeout0(timeout, unit, scheduler, fallback);
}
/**
* Runs the current {@code Single} and if it doesn't signal within the specified timeout window, it is
* disposed and the other {@link SingleSource} subscribed to.
* <p>
* <img width="640" height="282" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.timeout.b.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code timeout} subscribes to the other {@code SingleSource} on
* the {@code computation} {@link Scheduler}.</dd>
* </dl>
* @param timeout the timeout amount
* @param unit the time unit
* @param fallback the other {@code SingleSource} that gets subscribed to if the current {@code Single} times out
* @return the new {@code Single} instance
* @throws NullPointerException
* if {@code fallback} or {@code unit} is {@code null}
* @since 2.0
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.COMPUTATION)
public final Single<T> timeout(long timeout, @NonNull TimeUnit unit, @NonNull SingleSource<? extends T> fallback) {
Objects.requireNonNull(fallback, "fallback is null");
return timeout0(timeout, unit, Schedulers.computation(), fallback);
}
private Single<T> timeout0(final long timeout, final TimeUnit unit, final Scheduler scheduler, final SingleSource<? extends T> fallback) {
Objects.requireNonNull(unit, "unit is null");
Objects.requireNonNull(scheduler, "scheduler is null");
return RxJavaPlugins.onAssembly(new SingleTimeout<>(this, timeout, unit, scheduler, fallback));
}
/**
* Calls the specified converter function during assembly time and returns its resulting value.
* <p>
* <img width="640" height="553" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.to.v3.png" alt="">
* <p>
* This allows fluent conversion to any other type.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code to} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* <p>History: 2.1.7 - experimental
* @param <R> the resulting object type
* @param converter the function that receives the current {@code Single} instance and returns a value
* @return the converted value
* @throws NullPointerException if {@code converter} is {@code null}
* @since 2.2
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final <R> R to(@NonNull SingleConverter<T, ? extends R> converter) {
return Objects.requireNonNull(converter, "converter is null").apply(this);
}
/**
* Returns a {@link Completable} that ignores the success value of this {@code Single}
* and signals {@code onComplete} instead.
* <p>
* <img width="640" height="436" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.ignoreElement.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code ignoreElement} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @return the new {@code Completable} instance
* @since 2.1.13
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
public final Completable ignoreElement() {
return RxJavaPlugins.onAssembly(new CompletableFromSingle<>(this));
}
/**
* Converts this {@code Single} into a {@link Flowable}.
* <p>
* <img width="640" height="462" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.toFlowable.v3.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The returned {@code Flowable} honors the backpressure of the downstream consumer.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code toFlowable} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @return the new {@code Flowable} instance
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@SuppressWarnings("unchecked")
@NonNull
public final Flowable<T> toFlowable() {
if (this instanceof FuseToFlowable) {
return ((FuseToFlowable<T>)this).fuseToFlowable();
}
return RxJavaPlugins.onAssembly(new SingleToFlowable<>(this));
}
/**
* Returns a {@link Future} representing the single value emitted by this {@code Single}.
* <p>
* <img width="640" height="467" src="https://github.com/ReactiveX/RxJava/wiki/images/rx-operators/Single.toFuture.v3.png" alt="">
* <p>
* Cancelling the {@code Future} will cancel the subscription to the current {@code Single}.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code toFuture} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @return the new {@code Future} instance
* @see <a href="http://reactivex.io/documentation/operators/to.html">ReactiveX documentation: To</a>
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
public final Future<T> toFuture() {
return subscribeWith(new FutureMultiObserver<>());
}
/**
* Converts this {@code Single} into a {@link Maybe}.
* <p>
* <img width="640" height="463" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.toMaybe.v3.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code toMaybe} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @return the new {@code Maybe} instance
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@SuppressWarnings("unchecked")
@NonNull
public final Maybe<T> toMaybe() {
if (this instanceof FuseToMaybe) {
return ((FuseToMaybe<T>)this).fuseToMaybe();
}
return RxJavaPlugins.onAssembly(new MaybeFromSingle<>(this));
}
/**
* Converts this {@code Single} into an {@link Observable}.
* <p>
* <img width="640" height="305" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.toObservable.v3.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code toObservable} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @return the new {@code Observable} instance
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@SuppressWarnings("unchecked")
@NonNull
public final Observable<T> toObservable() {
if (this instanceof FuseToObservable) {
return ((FuseToObservable<T>)this).fuseToObservable();
}
return RxJavaPlugins.onAssembly(new SingleToObservable<>(this));
}
/**
* Returns a {@code Single} which makes sure when a {@link SingleObserver} disposes the {@link Disposable},
* that call is propagated up on the specified {@link Scheduler}.
* <p>
* <img width="640" height="693" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.unsubscribeOn.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code unsubscribeOn} calls {@code dispose()} of the upstream on the {@code Scheduler} you specify.</dd>
* </dl>
* <p>History: 2.0.9 - experimental
* @param scheduler the target scheduler where to execute the disposal
* @return the new {@code Single} instance
* @throws NullPointerException if {@code scheduler} is {@code null}
* @since 2.2
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.CUSTOM)
public final Single<T> unsubscribeOn(@NonNull Scheduler scheduler) {
Objects.requireNonNull(scheduler, "scheduler is null");
return RxJavaPlugins.onAssembly(new SingleUnsubscribeOn<>(this, scheduler));
}
/**
* Returns a {@code Single} that emits the result of applying a specified function to the pair of items emitted by
* the current {@code Single} and another specified {@link SingleSource}.
* <p>
* <img width="640" height="422" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.zipWith.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code zipWith} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <U>
* the type of items emitted by the {@code other} {@code Single}
* @param <R>
* the type of items emitted by the resulting {@code Single}
* @param other
* the other {@code SingleSource}
* @param zipper
* a function that combines the pairs of items from the two {@code SingleSource}s to generate the items to
* be emitted by the resulting {@code Single}
* @return the new {@code Single} that pairs up values from the current {@code Single} and the {@code other} {@code SingleSource}
* and emits the results of {@code zipFunction} applied to these pairs
* @throws NullPointerException if {@code other} or {@code zipper} is {@code null}
* @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX operators documentation: Zip</a>
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
public final <@NonNull U, @NonNull R> Single<R> zipWith(@NonNull SingleSource<U> other, @NonNull BiFunction<? super T, ? super U, ? extends R> zipper) {
return zip(this, other, zipper);
}
// -------------------------------------------------------------------------
// Fluent test support, super handy and reduces test preparation boilerplate
// -------------------------------------------------------------------------
/**
* Creates a {@link TestObserver} and subscribes it to this {@code Single}.
* <p>
* <img width="640" height="442" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.test.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code test} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @return the new {@code TestObserver} instance
* @since 2.0
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
public final TestObserver<T> test() {
TestObserver<T> to = new TestObserver<>();
subscribe(to);
return to;
}
/**
* Creates a {@link TestObserver} optionally in cancelled state, then subscribes it to this {@code Single}.
* <p>
* <img width="640" height="482" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.test.b.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code test} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param dispose if {@code true}, the {@code TestObserver} will be cancelled before subscribing to this
* {@code Single}.
* @return the new {@code TestObserver} instance
* @since 2.0
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
public final TestObserver<T> test(boolean dispose) {
TestObserver<T> to = new TestObserver<>();
if (dispose) {
to.dispose();
}
subscribe(to);
return to;
}
@NonNull
private static <T> Single<T> toSingle(@NonNull Flowable<T> source) {
return RxJavaPlugins.onAssembly(new FlowableSingleSingle<>(source, null));
}
// -------------------------------------------------------------------------
// JDK 8 Support
// -------------------------------------------------------------------------
/**
* Signals the completion value or error of the given (hot) {@link CompletionStage}-based asynchronous calculation.
* <p>
* <img width="640" height="262" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/fromCompletionStage.s.png" alt="">
* <p>
* Note that the operator takes an already instantiated, running or terminated {@code CompletionStage}.
* If the {@code CompletionStage} is to be created per consumer upon subscription, use {@link #defer(Supplier)}
* around {@code fromCompletionStage}:
* <pre><code>
* Single.defer(() -> Single.fromCompletionStage(createCompletionStage()));
* </code></pre>
* <p>
* If the {@code CompletionStage} completes with {@code null}, the resulting {@code Single} is terminated with
* a {@link NullPointerException}.
* <p>
* Canceling the flow can't cancel the execution of the {@code CompletionStage} because {@code CompletionStage}
* itself doesn't support cancellation. Instead, the operator detaches from the {@code CompletionStage}.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code fromCompletionStage} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <T> the element type of the {@code CompletionStage}
* @param stage the {@code CompletionStage} to convert to {@code Single} and signal its success value or error
* @return the new {@code Single} instance
* @throws NullPointerException if {@code stage} is {@code null}
* @since 3.0.0
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
public static <@NonNull T> Single<@NonNull T> fromCompletionStage(@NonNull CompletionStage<T> stage) {
Objects.requireNonNull(stage, "stage is null");
return RxJavaPlugins.onAssembly(new SingleFromCompletionStage<>(stage));
}
/**
* Maps the upstream success value into an {@link Optional} and emits the contained item if not empty as a {@link Maybe}.
* <p>
* <img width="640" height="323" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/mapOptional.s.png" alt="">
*
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code mapOptional} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <R> the non-{@code null} output type
* @param mapper the function that receives the upstream success item and should return a <em>non-empty</em> {@code Optional}
* to emit as the success output or an <em>empty</em> {@code Optional} to complete the {@code Maybe}
* @return the new {@code Maybe} instance
* @throws NullPointerException if {@code mapper} is {@code null}
* @since 3.0.0
* @see #map(Function)
* @see #filter(Predicate)
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
public final <@NonNull R> Maybe<R> mapOptional(@NonNull Function<? super T, @NonNull Optional<? extends R>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return RxJavaPlugins.onAssembly(new SingleMapOptional<>(this, mapper));
}
/**
* Signals the upstream success item (or error) via a {@link CompletionStage}.
* <p>
* <img width="640" height="321" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/toCompletionStage.s.png" alt="">
* <p>
* The upstream can be canceled by converting the resulting {@code CompletionStage} into
* {@link CompletableFuture} via {@link CompletionStage#toCompletableFuture()} and
* calling {@link CompletableFuture#cancel(boolean)} on it.
* The upstream will be also cancelled if the resulting {@code CompletionStage} is converted to and
* completed manually by {@link CompletableFuture#complete(Object)} or {@link CompletableFuture#completeExceptionally(Throwable)}.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code toCompletionStage} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @return the new {@code CompletionStage} instance
* @since 3.0.0
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
public final CompletionStage<T> toCompletionStage() {
return subscribeWith(new CompletionStageConsumer<>(false, null));
}
/**
* Maps the upstream succecss value into a Java {@link Stream} and emits its
* items to the downstream consumer as a {@link Flowable}.
* <img width="640" height="247" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/flattenStreamAsFlowable.s.png" alt="">
* <p>
* The operator closes the {@code Stream} upon cancellation and when it terminates. The exceptions raised when
* closing a {@code Stream} are routed to the global error handler ({@link RxJavaPlugins#onError(Throwable)}.
* If a {@code Stream} should not be closed, turn it into an {@link Iterable} and use {@link #flattenAsFlowable(Function)}:
* <pre><code>
* source.flattenAsFlowable(item -> createStream(item)::iterator);
* </code></pre>
* <p>
* Primitive streams are not supported and items have to be boxed manually (e.g., via {@link IntStream#boxed()}):
* <pre><code>
* source.flattenStreamAsFlowable(item -> IntStream.rangeClosed(1, 10).boxed());
* </code></pre>
* <p>
* {@code Stream} does not support concurrent usage so creating and/or consuming the same instance multiple times
* from multiple threads can lead to undefined behavior.
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The operator honors backpressure from downstream and iterates the given {@code Stream}
* on demand (i.e., when requested).</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code flattenStreamAsFlowable} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <R> the element type of the {@code Stream} and the output {@code Flowable}
* @param mapper the function that receives the upstream success item and should
* return a {@code Stream} of values to emit.
* @return the new {@code Flowable} instance
* @throws NullPointerException if {@code mapper} is {@code null}
* @since 3.0.0
* @see #flattenAsFlowable(Function)
* @see #flattenStreamAsObservable(Function)
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@BackpressureSupport(BackpressureKind.FULL)
@NonNull
public final <@NonNull R> Flowable<R> flattenStreamAsFlowable(@NonNull Function<? super T, @NonNull ? extends Stream<? extends R>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return RxJavaPlugins.onAssembly(new SingleFlattenStreamAsFlowable<>(this, mapper));
}
/**
* Maps the upstream succecss value into a Java {@link Stream} and emits its
* items to the downstream consumer as an {@link Observable}.
* <p>
* <img width="640" height="241" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/flattenStreamAsObservable.s.png" alt="">
* <p>
* The operator closes the {@code Stream} upon cancellation and when it terminates. The exceptions raised when
* closing a {@code Stream} are routed to the global error handler ({@link RxJavaPlugins#onError(Throwable)}.
* If a {@code Stream} should not be closed, turn it into an {@link Iterable} and use {@link #flattenAsObservable(Function)}:
* <pre><code>
* source.flattenAsObservable(item -> createStream(item)::iterator);
* </code></pre>
* <p>
* Primitive streams are not supported and items have to be boxed manually (e.g., via {@link IntStream#boxed()}):
* <pre><code>
* source.flattenStreamAsObservable(item -> IntStream.rangeClosed(1, 10).boxed());
* </code></pre>
* <p>
* {@code Stream} does not support concurrent usage so creating and/or consuming the same instance multiple times
* from multiple threads can lead to undefined behavior.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code flattenStreamAsObservable} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <R> the element type of the {@code Stream} and the output {@code Observable}
* @param mapper the function that receives the upstream success item and should
* return a {@code Stream} of values to emit.
* @return the new {@code Observable} instance
* @throws NullPointerException if {@code mapper} is {@code null}
* @since 3.0.0
* @see #flattenAsObservable(Function)
* @see #flattenStreamAsFlowable(Function)
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
public final <@NonNull R> Observable<R> flattenStreamAsObservable(@NonNull Function<? super T, @NonNull ? extends Stream<? extends R>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return RxJavaPlugins.onAssembly(new SingleFlattenStreamAsObservable<>(this, mapper));
}
}
| type |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/cache/config/CacheAdviceParser.java | {
"start": 7151,
"end": 9538
} | class ____ {
private final String key;
private final String keyGenerator;
private final String cacheManager;
private final String condition;
private final String method;
private String @Nullable [] caches;
Props(Element root) {
String defaultCache = root.getAttribute("cache");
this.key = root.getAttribute("key");
this.keyGenerator = root.getAttribute("key-generator");
this.cacheManager = root.getAttribute("cache-manager");
this.condition = root.getAttribute("condition");
this.method = root.getAttribute(METHOD_ATTRIBUTE);
if (StringUtils.hasText(defaultCache)) {
this.caches = StringUtils.commaDelimitedListToStringArray(defaultCache.trim());
}
}
<T extends CacheOperation.Builder> T merge(Element element, ReaderContext readerCtx, T builder) {
String cache = element.getAttribute("cache");
// sanity check
String[] localCaches = this.caches;
if (StringUtils.hasText(cache)) {
localCaches = StringUtils.commaDelimitedListToStringArray(cache.trim());
}
if (localCaches != null) {
builder.setCacheNames(localCaches);
}
else {
readerCtx.error("No cache specified for " + element.getNodeName(), element);
}
builder.setKey(getAttributeValue(element, "key", this.key));
builder.setKeyGenerator(getAttributeValue(element, "key-generator", this.keyGenerator));
builder.setCacheManager(getAttributeValue(element, "cache-manager", this.cacheManager));
builder.setCondition(getAttributeValue(element, "condition", this.condition));
if (StringUtils.hasText(builder.getKey()) && StringUtils.hasText(builder.getKeyGenerator())) {
throw new IllegalStateException("Invalid cache advice configuration on '" +
element.toString() + "'. Both 'key' and 'keyGenerator' attributes have been set. " +
"These attributes are mutually exclusive: either set the SpEL expression used to" +
"compute the key at runtime or set the name of the KeyGenerator bean to use.");
}
return builder;
}
@Nullable String merge(Element element, ReaderContext readerCtx) {
String method = element.getAttribute(METHOD_ATTRIBUTE);
if (StringUtils.hasText(method)) {
return method.trim();
}
if (StringUtils.hasText(this.method)) {
return this.method;
}
readerCtx.error("No method specified for " + element.getNodeName(), element);
return null;
}
}
}
| Props |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/processor/utils/InputPriorityGraphGeneratorTest.java | {
"start": 1337,
"end": 4184
} | class ____ {
@Test
void testCalculatePipelinedAncestors() {
// P = InputProperty.DamBehavior.PIPELINED, E = InputProperty.DamBehavior.END_INPUT
//
// 0 ------P----> 1 -E--> 2
// \-----P----> 3 -P-/
// 4 -E-> 5 -P-/ /
// 6 -----E-----/
TestingBatchExecNode[] nodes = new TestingBatchExecNode[7];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = new TestingBatchExecNode("TestingBatchExecNode" + i);
}
nodes[1].addInput(nodes[0]);
nodes[2].addInput(
nodes[1],
InputProperty.builder().damBehavior(InputProperty.DamBehavior.END_INPUT).build());
nodes[2].addInput(nodes[3]);
nodes[3].addInput(nodes[0]);
nodes[3].addInput(nodes[5]);
nodes[3].addInput(
nodes[6],
InputProperty.builder().damBehavior(InputProperty.DamBehavior.END_INPUT).build());
nodes[5].addInput(
nodes[4],
InputProperty.builder().damBehavior(InputProperty.DamBehavior.END_INPUT).build());
TestingInputPriorityConflictResolver resolver =
new TestingInputPriorityConflictResolver(
Collections.singletonList(nodes[2]),
Collections.emptySet(),
InputProperty.DamBehavior.END_INPUT);
List<ExecNode<?>> ancestors = resolver.calculatePipelinedAncestors(nodes[2]);
assertThat(ancestors).hasSize(2);
assertThat(ancestors).contains(nodes[0]);
assertThat(ancestors).contains(nodes[5]);
}
@Test
void testCalculateBoundedPipelinedAncestors() {
// P = InputProperty.DamBehavior.PIPELINED, E = InputProperty.DamBehavior.END_INPUT
//
// 0 -P-> 1 -P-> 2
// 3 -P-> 4 -E/
TestingBatchExecNode[] nodes = new TestingBatchExecNode[5];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = new TestingBatchExecNode("TestingBatchExecNode" + i);
}
nodes[1].addInput(nodes[0]);
nodes[2].addInput(nodes[1]);
nodes[2].addInput(
nodes[4],
InputProperty.builder().damBehavior(InputProperty.DamBehavior.END_INPUT).build());
nodes[4].addInput(nodes[3]);
TestingInputPriorityConflictResolver resolver =
new TestingInputPriorityConflictResolver(
Collections.singletonList(nodes[2]),
new HashSet<>(Collections.singleton(nodes[1])),
InputProperty.DamBehavior.END_INPUT);
List<ExecNode<?>> ancestors = resolver.calculatePipelinedAncestors(nodes[2]);
assertThat(ancestors).hasSize(1);
assertThat(ancestors).contains(nodes[1]);
}
private static | InputPriorityGraphGeneratorTest |
java | apache__kafka | connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java | {
"start": 6355,
"end": 65269
} | class ____ {
private static final Logger log = LoggerFactory.getLogger(ConnectWorkerIntegrationTest.class);
private static final int NUM_TOPIC_PARTITIONS = 3;
private static final long RECORD_TRANSFER_TIMEOUT_MS = TimeUnit.SECONDS.toMillis(60);
private static final long OFFSET_COMMIT_INTERVAL_MS = TimeUnit.SECONDS.toMillis(30);
private static final int NUM_WORKERS = 3;
private static final int NUM_TASKS = 4;
private static final int MESSAGES_PER_POLL = 10;
private static final String CONNECTOR_NAME = "simple-connector";
private static final String TOPIC_NAME = "test-topic";
private EmbeddedConnectCluster.Builder connectBuilder;
private EmbeddedConnectCluster connect;
private Map<String, String> workerProps;
private Properties brokerProps;
@BeforeEach
public void setup(TestInfo testInfo) {
log.info("Starting test {}", testInfo.getDisplayName());
// setup Connect worker properties
workerProps = new HashMap<>();
workerProps.put(OFFSET_COMMIT_INTERVAL_MS_CONFIG, String.valueOf(OFFSET_COMMIT_INTERVAL_MS));
workerProps.put(CONNECTOR_CLIENT_POLICY_CLASS_CONFIG, "All");
// setup Kafka broker properties
brokerProps = new Properties();
brokerProps.put("auto.create.topics.enable", String.valueOf(false));
// build a Connect cluster backed by a Kafka KRaft cluster
connectBuilder = new EmbeddedConnectCluster.Builder()
.name("connect-cluster")
.numWorkers(NUM_WORKERS)
.workerProps(workerProps)
.brokerProps(brokerProps)
.maskExitProcedures(true); // true is the default, setting here as example
}
@AfterEach
public void close(TestInfo testInfo) {
log.info("Finished test {}", testInfo.getDisplayName());
// stop the Connect cluster and its backing Kafka cluster.
connect.stop();
}
/**
* Simple test case to add and then remove a worker from the embedded Connect cluster while
* running a simple source connector.
*/
@Test
public void testAddAndRemoveWorker() throws Exception {
connect = connectBuilder.build();
// start the clusters
connect.start();
// create test topic
connect.kafka().createTopic(TOPIC_NAME, NUM_TOPIC_PARTITIONS);
// set up props for the source connector
Map<String, String> props = defaultSourceConnectorProps(TOPIC_NAME);
// start a source connector
connect.configureConnector(CONNECTOR_NAME, props);
connect.assertions().assertConnectorAndAtLeastNumTasksAreRunning(CONNECTOR_NAME, NUM_TASKS,
"Connector tasks did not start in time.");
WorkerHandle extraWorker = connect.addWorker();
connect.assertions().assertAtLeastNumWorkersAreUp(NUM_WORKERS + 1,
"Expanded group of workers did not start in time.");
connect.assertions().assertConnectorAndAtLeastNumTasksAreRunning(CONNECTOR_NAME, NUM_TASKS,
"Connector tasks are not all in running state.");
Set<WorkerHandle> workers = connect.healthyWorkers();
assertTrue(workers.contains(extraWorker));
connect.removeWorker(extraWorker);
connect.assertions().assertExactlyNumWorkersAreUp(NUM_WORKERS,
"Group of workers did not shrink in time.");
workers = connect.healthyWorkers();
assertFalse(workers.contains(extraWorker));
}
/**
* Verify that a failed task can be restarted successfully.
*/
@Test
public void testRestartFailedTask() throws Exception {
connect = connectBuilder.build();
// start the clusters
connect.start();
int numTasks = 1;
// setup up props for the source connector
Map<String, String> props = defaultSourceConnectorProps(TOPIC_NAME);
// Properties for the source connector. The task should fail at startup due to the bad broker address.
props.put(TASKS_MAX_CONFIG, Objects.toString(numTasks));
props.put(CONNECTOR_CLIENT_PRODUCER_OVERRIDES_PREFIX + BOOTSTRAP_SERVERS_CONFIG, "nobrokerrunningatthisaddress");
// Try to start the connector and its single task.
connect.configureConnector(CONNECTOR_NAME, props);
connect.assertions().assertConnectorIsRunningAndTasksHaveFailed(CONNECTOR_NAME, numTasks,
"Connector tasks did not fail in time");
// Reconfigure the connector without the bad broker address.
props.remove(CONNECTOR_CLIENT_PRODUCER_OVERRIDES_PREFIX + BOOTSTRAP_SERVERS_CONFIG);
connect.configureConnector(CONNECTOR_NAME, props);
// Restart the failed task
String taskRestartEndpoint = connect.endpointForResource(
String.format("connectors/%s/tasks/0/restart", CONNECTOR_NAME));
connect.requestPost(taskRestartEndpoint, "", Map.of());
// Ensure the task started successfully this time
connect.assertions().assertConnectorAndAtLeastNumTasksAreRunning(CONNECTOR_NAME, numTasks,
"Connector tasks are not all in running state.");
}
/**
* Verify that a set of tasks restarts correctly after a broker goes offline and back online
*/
@Test
public void testBrokerCoordinator() throws Exception {
ConnectorHandle connectorHandle = RuntimeHandles.get().connectorHandle(CONNECTOR_NAME);
workerProps.put(DistributedConfig.SCHEDULED_REBALANCE_MAX_DELAY_MS_CONFIG, String.valueOf(5000));
// start the clusters
connect = connectBuilder.build();
connect.start();
int numTasks = 4;
// create test topic
connect.kafka().createTopic(TOPIC_NAME, NUM_TOPIC_PARTITIONS);
// set up props for the source connector
Map<String, String> props = defaultSourceConnectorProps(TOPIC_NAME);
// start a source connector
connect.configureConnector(CONNECTOR_NAME, props);
connect.assertions().assertConnectorAndAtLeastNumTasksAreRunning(CONNECTOR_NAME, numTasks,
"Connector tasks did not start in time.");
// expect that the connector will be stopped once the coordinator is detected to be down
StartAndStopLatch stopLatch = connectorHandle.expectedStops(1, false);
connect.kafka().stopOnlyBrokers();
// Allow for the workers to discover that the coordinator is unavailable, wait is
// heartbeat timeout * 2 + 4sec
Thread.sleep(TimeUnit.SECONDS.toMillis(10));
connect.requestTimeout(1000);
assertFalse(
connect.anyWorkersHealthy(),
"No workers should be healthy when underlying Kafka cluster is down"
);
connect.workers().forEach(worker -> {
try (Response response = connect.healthCheck(worker)) {
assertEquals(INTERNAL_SERVER_ERROR.getStatusCode(), response.getStatus());
assertNotNull(response.getEntity());
String body = response.getEntity().toString();
String expectedSubstring = "Worker was unable to handle this request and may be unable to handle other requests";
assertTrue(
body.contains(expectedSubstring),
"Response body '" + body + "' did not contain expected message '" + expectedSubstring + "'"
);
}
});
connect.resetRequestTimeout();
// Wait for the connector to be stopped
assertTrue(stopLatch.await(CONNECTOR_SETUP_DURATION_MS, TimeUnit.MILLISECONDS),
"Failed to stop connector and tasks after coordinator failure within "
+ CONNECTOR_SETUP_DURATION_MS + "ms");
StartAndStopLatch startLatch = connectorHandle.expectedStarts(1, false);
connect.kafka().restartOnlyBrokers();
// Allow for the kafka brokers to come back online
Thread.sleep(TimeUnit.SECONDS.toMillis(10));
connect.assertions().assertExactlyNumWorkersAreUp(NUM_WORKERS,
"Group of workers did not remain the same within the designated time.");
// Allow for the workers to rebalance and reach a steady state
Thread.sleep(TimeUnit.SECONDS.toMillis(10));
connect.assertions().assertConnectorAndAtLeastNumTasksAreRunning(CONNECTOR_NAME, numTasks,
"Connector tasks did not start in time.");
// Expect that the connector has started again
assertTrue(startLatch.await(CONNECTOR_SETUP_DURATION_MS, TimeUnit.MILLISECONDS),
"Failed to stop connector and tasks after coordinator failure within "
+ CONNECTOR_SETUP_DURATION_MS + "ms");
}
/**
* Verify that the number of tasks listed in the REST API is updated correctly after changes to
* the "tasks.max" connector configuration.
*/
@Test
public void testTaskStatuses() throws Exception {
connect = connectBuilder.build();
// start the clusters
connect.start();
// base connector props
Map<String, String> props = defaultSourceConnectorProps(TOPIC_NAME);
props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName());
// start the connector with only one task
int initialNumTasks = 1;
props.put(TASKS_MAX_CONFIG, String.valueOf(initialNumTasks));
connect.configureConnector(CONNECTOR_NAME, props);
connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(CONNECTOR_NAME,
initialNumTasks, "Connector tasks did not start in time");
// then reconfigure it to use more tasks
int increasedNumTasks = 5;
props.put(TASKS_MAX_CONFIG, String.valueOf(increasedNumTasks));
connect.configureConnector(CONNECTOR_NAME, props);
connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(CONNECTOR_NAME,
increasedNumTasks, "Connector task statuses did not update in time.");
// then reconfigure it to use fewer tasks
int decreasedNumTasks = 3;
props.put(TASKS_MAX_CONFIG, String.valueOf(decreasedNumTasks));
connect.configureConnector(CONNECTOR_NAME, props);
connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(CONNECTOR_NAME,
decreasedNumTasks, "Connector task statuses did not update in time.");
}
@Test
public void testSourceTaskNotBlockedOnShutdownWithNonExistentTopic() throws Exception {
// When automatic topic creation is disabled on the broker
brokerProps.put("auto.create.topics.enable", "false");
connect = connectBuilder
.brokerProps(brokerProps)
.numWorkers(1)
.numBrokers(1)
.build();
connect.start();
// and when the connector is not configured to create topics
Map<String, String> props = defaultSourceConnectorProps("nonexistenttopic");
props.remove(DEFAULT_TOPIC_CREATION_PREFIX + REPLICATION_FACTOR_CONFIG);
props.remove(DEFAULT_TOPIC_CREATION_PREFIX + PARTITIONS_CONFIG);
props.put("throughput", "-1");
ConnectorHandle connector = RuntimeHandles.get().connectorHandle(CONNECTOR_NAME);
connector.expectedRecords(NUM_TASKS * MESSAGES_PER_POLL);
connect.configureConnector(CONNECTOR_NAME, props);
connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(CONNECTOR_NAME,
NUM_TASKS, "Connector tasks did not start in time");
connector.awaitRecords(TimeUnit.MINUTES.toMillis(1));
// Then, if we delete the connector, it and each of its tasks should be stopped by the framework
// even though the producer is blocked because there is no topic
StartAndStopLatch stopCounter = connector.expectedStops(1);
connect.deleteConnector(CONNECTOR_NAME);
assertTrue(stopCounter.await(1, TimeUnit.MINUTES), "Connector and all tasks were not stopped in time");
}
/**
* Verify that the target state (started, paused, stopped) of a connector can be updated, with
* an emphasis on ensuring that the transitions between each state are correct.
* <p>
* The transitions we need to cover are:
* <ol>
* <li>RUNNING -> PAUSED</li>
* <li>RUNNING -> STOPPED</li>
* <li>PAUSED -> RUNNING</li>
* <li>PAUSED -> STOPPED</li>
* <li>STOPPED -> RUNNING</li>
* <li>STOPPED -> PAUSED</li>
* </ol>
* With some reordering, we can perform each transition just once:
* <ul>
* <li>Start with RUNNING</li>
* <li>Transition to STOPPED (2)</li>
* <li>Transition to RUNNING (5)</li>
* <li>Transition to PAUSED (1)</li>
* <li>Transition to STOPPED (4)</li>
* <li>Transition to PAUSED (6)</li>
* <li>Transition to RUNNING (3)</li>
* </ul>
*/
@Test
public void testPauseStopResume() throws Exception {
connect = connectBuilder.build();
// start the clusters
connect.start();
// Want to make sure to use multiple tasks
final int numTasks = 4;
Map<String, String> props = defaultSourceConnectorProps(TOPIC_NAME);
props.put(TASKS_MAX_CONFIG, Integer.toString(numTasks));
// Start with RUNNING
connect.configureConnector(CONNECTOR_NAME, props);
connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(
CONNECTOR_NAME,
numTasks,
"Connector tasks did not start in time"
);
// Transition to STOPPED
connect.stopConnector(CONNECTOR_NAME);
// Issue a second request to ensure that this operation is idempotent
connect.stopConnector(CONNECTOR_NAME);
connect.assertions().assertConnectorIsStopped(
CONNECTOR_NAME,
"Connector did not stop in time"
);
// If the connector is truly stopped, we should also see an empty set of tasks and task configs
assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks());
assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME));
// Transition to RUNNING
connect.resumeConnector(CONNECTOR_NAME);
// Issue a second request to ensure that this operation is idempotent
connect.resumeConnector(CONNECTOR_NAME);
connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(
CONNECTOR_NAME,
numTasks,
"Connector tasks did not resume in time"
);
// Transition to PAUSED
connect.pauseConnector(CONNECTOR_NAME);
// Issue a second request to ensure that this operation is idempotent
connect.pauseConnector(CONNECTOR_NAME);
connect.assertions().assertConnectorAndExactlyNumTasksArePaused(
CONNECTOR_NAME,
numTasks,
"Connector did not pause in time"
);
// Transition to STOPPED
connect.stopConnector(CONNECTOR_NAME);
connect.assertions().assertConnectorIsStopped(
CONNECTOR_NAME,
"Connector did not stop in time"
);
assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks());
assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME));
// Transition to PAUSED
connect.pauseConnector(CONNECTOR_NAME);
connect.assertions().assertConnectorAndExactlyNumTasksArePaused(
CONNECTOR_NAME,
0,
"Connector did not pause in time"
);
// Transition to RUNNING
connect.resumeConnector(CONNECTOR_NAME);
connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(
CONNECTOR_NAME,
numTasks,
"Connector tasks did not resume in time"
);
// Delete the connector
connect.deleteConnector(CONNECTOR_NAME);
connect.assertions().assertConnectorDoesNotExist(
CONNECTOR_NAME,
"Connector wasn't deleted in time"
);
}
/**
* Test out the {@code STOPPED} state introduced in
* <a href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-875%3A+First-class+offsets+support+in+Kafka+Connect#KIP875:FirstclassoffsetssupportinKafkaConnect-Newtargetstate:STOPPED">KIP-875</a>,
* with an emphasis on correctly handling errors thrown from the connector.
*/
@Test
public void testStoppedState() throws Exception {
connect = connectBuilder.build();
// start the clusters
connect.start();
Map<String, String> props = defaultSourceConnectorProps(TOPIC_NAME);
// Fail the connector on startup
props.put("connector.start.inject.error", "true");
// Start the connector (should fail immediately and generate no tasks)
connect.configureConnector(CONNECTOR_NAME, props);
connect.assertions().assertConnectorIsFailedAndTasksHaveFailed(
CONNECTOR_NAME,
0,
"Connector should have failed and not generated any tasks"
);
// Stopping a failed connector updates its state to STOPPED in the REST API
connect.stopConnector(CONNECTOR_NAME);
connect.assertions().assertConnectorIsStopped(
CONNECTOR_NAME,
"Connector did not stop in time"
);
// If the connector is truly stopped, we should also see an empty set of tasks and task configs
assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks());
assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME));
// Can resume a connector after its Connector has failed before shutdown after receiving a stop request
props.remove("connector.start.inject.error");
connect.configureConnector(CONNECTOR_NAME, props);
connect.resumeConnector(CONNECTOR_NAME);
connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(
CONNECTOR_NAME,
NUM_TASKS,
"Connector or tasks did not start running healthily in time"
);
// Fail the connector on shutdown
props.put("connector.stop.inject.error", "true");
// Stopping a connector that fails during shutdown after receiving a stop request updates its state to STOPPED in the REST API
connect.configureConnector(CONNECTOR_NAME, props);
connect.stopConnector(CONNECTOR_NAME);
connect.assertions().assertConnectorIsStopped(
CONNECTOR_NAME,
"Connector did not stop in time"
);
assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks());
assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME));
// Can resume a connector after its Connector has failed during shutdown after receiving a stop request
connect.resumeConnector(CONNECTOR_NAME);
connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(
CONNECTOR_NAME,
NUM_TASKS,
"Connector or tasks did not start running healthily in time"
);
// Can delete a stopped connector
connect.deleteConnector(CONNECTOR_NAME);
connect.assertions().assertConnectorDoesNotExist(
CONNECTOR_NAME,
"Connector wasn't deleted in time"
);
}
@Test
public void testCreateConnectorWithPausedInitialState() throws Exception {
connect = connectBuilder.build();
// start the clusters
connect.start();
CreateConnectorRequest createConnectorRequest = new CreateConnectorRequest(
CONNECTOR_NAME,
defaultSourceConnectorProps(TOPIC_NAME),
CreateConnectorRequest.InitialState.PAUSED
);
connect.configureConnector(createConnectorRequest);
// Verify that the connector's status is PAUSED and also that no tasks were spawned for the connector
connect.assertions().assertConnectorAndExactlyNumTasksArePaused(
CONNECTOR_NAME,
0,
"Connector was not created in a paused state"
);
assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks());
assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME));
// Verify that a connector created in the PAUSED state can be resumed successfully
connect.resumeConnector(CONNECTOR_NAME);
connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(
CONNECTOR_NAME,
NUM_TASKS,
"Connector or tasks did not start running healthily in time"
);
}
@Test
public void testCreateSourceConnectorWithStoppedInitialStateAndModifyOffsets() throws Exception {
connect = connectBuilder.build();
// start the clusters
connect.start();
Map<String, String> props = defaultSourceConnectorProps(TOPIC_NAME);
// Configure the connector to produce a maximum of 10 messages
props.put("max.messages", "10");
props.put(TASKS_MAX_CONFIG, "1");
CreateConnectorRequest createConnectorRequest = new CreateConnectorRequest(
CONNECTOR_NAME,
props,
CreateConnectorRequest.InitialState.STOPPED
);
connect.configureConnector(createConnectorRequest);
// Verify that the connector's status is STOPPED and also that no tasks were spawned for the connector
connect.assertions().assertConnectorIsStopped(
CONNECTOR_NAME,
"Connector was not created in a stopped state"
);
assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks());
assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME));
// Verify that the offsets can be modified for a source connector created in the STOPPED state
// Alter the offsets so that only 5 messages are produced
connect.alterSourceConnectorOffset(
CONNECTOR_NAME,
Map.of("task.id", CONNECTOR_NAME + "-0"),
Map.of("saved", 5L)
);
// Verify that a connector created in the STOPPED state can be resumed successfully
connect.resumeConnector(CONNECTOR_NAME);
connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(
CONNECTOR_NAME,
1,
"Connector or tasks did not start running healthily in time"
);
// Verify that only 5 messages were produced. We verify this by consuming all the messages from the topic after we've already ensured that at
// least 5 messages can be consumed.
long timeoutMs = TimeUnit.SECONDS.toMillis(10);
connect.kafka().consume(5, timeoutMs, TOPIC_NAME);
assertEquals(5, connect.kafka().consumeAll(timeoutMs, TOPIC_NAME).count());
}
@Test
public void testCreateSinkConnectorWithStoppedInitialStateAndModifyOffsets() throws Exception {
connect = connectBuilder.build();
// start the clusters
connect.start();
// Create topic and produce 10 messages
connect.kafka().createTopic(TOPIC_NAME);
for (int i = 0; i < 10; i++) {
connect.kafka().produce(TOPIC_NAME, "Message " + i);
}
Map<String, String> props = defaultSinkConnectorProps(TOPIC_NAME);
props.put(TASKS_MAX_CONFIG, "1");
CreateConnectorRequest createConnectorRequest = new CreateConnectorRequest(
CONNECTOR_NAME,
props,
CreateConnectorRequest.InitialState.STOPPED
);
connect.configureConnector(createConnectorRequest);
// Verify that the connector's status is STOPPED and also that no tasks were spawned for the connector
connect.assertions().assertConnectorIsStopped(
CONNECTOR_NAME,
"Connector was not created in a stopped state"
);
assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks());
assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME));
// Verify that the offsets can be modified for a sink connector created in the STOPPED state
// Alter the offsets so that the first 5 messages in the topic are skipped
connect.alterSinkConnectorOffset(CONNECTOR_NAME, new TopicPartition(TOPIC_NAME, 0), 5L);
// This will cause the connector task to fail if it encounters a record with offset < 5
TaskHandle taskHandle = RuntimeHandles.get().connectorHandle(CONNECTOR_NAME).taskHandle(CONNECTOR_NAME + "-0",
sinkRecord -> {
if (sinkRecord.kafkaOffset() < 5L) {
throw new ConnectException("Unexpected record encountered: " + sinkRecord);
}
});
// We produced 10 records and altered the connector offsets to skip over the first 5, so we expect 5 records to be consumed
taskHandle.expectedRecords(5);
// Verify that a connector created in the STOPPED state can be resumed successfully
connect.resumeConnector(CONNECTOR_NAME);
connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(
CONNECTOR_NAME,
1,
"Connector or tasks did not start running healthily in time"
);
taskHandle.awaitRecords(TimeUnit.SECONDS.toMillis(10));
// Confirm that the task is still running (i.e. it didn't fail due to encountering any records with offset < 5)
connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(
CONNECTOR_NAME,
1,
"Connector or tasks did not start running healthily in time"
);
}
@Test
public void testDeleteConnectorCreatedWithPausedOrStoppedInitialState() throws Exception {
connect = connectBuilder.build();
// start the clusters
connect.start();
// Create a connector with PAUSED initial state
CreateConnectorRequest createConnectorRequest = new CreateConnectorRequest(
CONNECTOR_NAME,
defaultSourceConnectorProps(TOPIC_NAME),
CreateConnectorRequest.InitialState.PAUSED
);
connect.configureConnector(createConnectorRequest);
// Verify that the connector's status is PAUSED and also that no tasks were spawned for the connector
connect.assertions().assertConnectorAndExactlyNumTasksArePaused(
CONNECTOR_NAME,
0,
"Connector was not created in a paused state"
);
assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks());
assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME));
// Verify that a connector created in the PAUSED state can be deleted successfully
connect.deleteConnector(CONNECTOR_NAME);
connect.assertions().assertConnectorDoesNotExist(CONNECTOR_NAME, "Connector wasn't deleted in time");
// Create a connector with STOPPED initial state
createConnectorRequest = new CreateConnectorRequest(
CONNECTOR_NAME,
defaultSourceConnectorProps(TOPIC_NAME),
CreateConnectorRequest.InitialState.STOPPED
);
connect.configureConnector(createConnectorRequest);
// Verify that the connector's status is STOPPED and also that no tasks were spawned for the connector
connect.assertions().assertConnectorIsStopped(
CONNECTOR_NAME,
"Connector was not created in a stopped state"
);
assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks());
assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME));
// Verify that a connector created in the STOPPED state can be deleted successfully
connect.deleteConnector(CONNECTOR_NAME);
connect.assertions().assertConnectorDoesNotExist(CONNECTOR_NAME, "Connector wasn't deleted in time");
}
@Test
public void testPatchConnectorConfig() throws Exception {
connect = connectBuilder.build();
// start the clusters
connect.start();
connect.kafka().createTopic(TOPIC_NAME);
Map<String, String> props = defaultSinkConnectorProps(TOPIC_NAME);
props.put("unaffected-key", "unaffected-value");
props.put("to-be-deleted-key", "value");
props.put(TASKS_MAX_CONFIG, "2");
Map<String, String> patch = new HashMap<>();
patch.put(TASKS_MAX_CONFIG, "3"); // this plays as a value to be changed
patch.put("to-be-added-key", "value");
patch.put("to-be-deleted-key", null);
connect.configureConnector(CONNECTOR_NAME, props);
connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(CONNECTOR_NAME, 2,
"connector and tasks did not start in time");
connect.patchConnectorConfig(CONNECTOR_NAME, patch);
connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(CONNECTOR_NAME, 3,
"connector and tasks did not reconfigure and restart in time");
Map<String, String> expectedConfig = new HashMap<>(props);
expectedConfig.put("name", CONNECTOR_NAME);
expectedConfig.put("to-be-added-key", "value");
expectedConfig.put(TASKS_MAX_CONFIG, "3");
expectedConfig.remove("to-be-deleted-key");
assertEquals(expectedConfig, connect.connectorInfo(CONNECTOR_NAME).config());
}
private Map<String, String> defaultSinkConnectorProps(String topics) {
// setup props for the sink connector
Map<String, String> props = new HashMap<>();
props.put(CONNECTOR_CLASS_CONFIG, TestableSinkConnector.class.getSimpleName());
props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS));
props.put(TOPICS_CONFIG, topics);
return props;
}
@Test
public void testRequestTimeouts() throws Exception {
final String configTopic = "test-request-timeout-configs";
workerProps.put(CONFIG_TOPIC_CONFIG, configTopic);
// Workaround for KAFKA-15676, which can cause the scheduled rebalance delay to
// be spuriously triggered after the group coordinator for a Connect cluster is bounced
workerProps.put(SCHEDULED_REBALANCE_MAX_DELAY_MS_CONFIG, "0");
workerProps.put(METADATA_RECOVERY_STRATEGY_CONFIG, MetadataRecoveryStrategy.NONE.name);
connect = connectBuilder
.numWorkers(1)
.build();
connect.start();
Map<String, String> connectorConfig1 = defaultSourceConnectorProps(TOPIC_NAME);
Map<String, String> connectorConfig2 = new HashMap<>(connectorConfig1);
connectorConfig2.put(TASKS_MAX_CONFIG, Integer.toString(NUM_TASKS + 1));
// Create a connector to ensure that the worker has completed startup
log.info("Creating initial connector");
connect.configureConnector(CONNECTOR_NAME, connectorConfig1);
connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(
CONNECTOR_NAME, NUM_TASKS, "connector and tasks did not start in time"
);
// Bring down Kafka, which should cause some REST requests to fail
log.info("Stopping Kafka cluster");
connect.kafka().stopOnlyBrokers();
// Try to reconfigure the connector, which should fail with a timeout error
log.info("Trying to reconfigure connector while Kafka cluster is down");
assertTimeoutException(
() -> connect.configureConnector(CONNECTOR_NAME, connectorConfig2),
"flushing updates to the status topic",
true
);
log.info("Restarting Kafka cluster");
connect.kafka().restartOnlyBrokers();
connect.assertions().assertExactlyNumBrokersAreUp(1, "Broker did not complete startup in time");
log.info("Kafka cluster is restarted");
// Reconfigure the connector to ensure that the broker has completed startup
log.info("Reconfiguring connector with one more task");
connect.configureConnector(CONNECTOR_NAME, connectorConfig2);
connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(
CONNECTOR_NAME, NUM_TASKS + 1, "connector and tasks did not start in time"
);
// Delete the config topic--WCGW?
log.info("Deleting Kafka Connect config topic");
connect.kafka().deleteTopic(configTopic);
// Try to reconfigure the connector, which should fail with a slightly-different timeout error
log.info("Trying to reconfigure connector after config topic has been deleted");
assertTimeoutException(
() -> connect.configureConnector(CONNECTOR_NAME, connectorConfig1),
"writing a config for connector " + CONNECTOR_NAME + " to the config topic",
true
);
// The worker should still be blocked on the same operation, and the timeout should occur
// immediately
log.info("Trying to delete connector after config topic has been deleted");
assertTimeoutException(
() -> connect.deleteConnector(CONNECTOR_NAME),
"writing a config for connector " + CONNECTOR_NAME + " to the config topic",
false
);
}
@Test
public void testPollTimeoutExpiry() throws Exception {
// This is a fabricated test to ensure that a poll timeout expiry happens. The tick thread awaits on
// task#stop method which is blocked. The timeouts have been set accordingly
workerProps.put(REBALANCE_TIMEOUT_MS_CONFIG, Long.toString(TimeUnit.SECONDS.toMillis(20)));
workerProps.put(TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_CONFIG, Long.toString(TimeUnit.SECONDS.toMillis(30)));
connect = connectBuilder
.numBrokers(1)
.numWorkers(1)
.build();
connect.start();
Map<String, String> connectorWithBlockingTaskStopConfig = new HashMap<>();
connectorWithBlockingTaskStopConfig.put(CONNECTOR_CLASS_CONFIG, BlockingConnectorTest.BlockingSourceConnector.class.getName());
connectorWithBlockingTaskStopConfig.put(TASKS_MAX_CONFIG, "1");
connectorWithBlockingTaskStopConfig.put(BlockingConnectorTest.Block.BLOCK_CONFIG, Objects.requireNonNull(TASK_STOP));
connect.configureConnector(CONNECTOR_NAME, connectorWithBlockingTaskStopConfig);
connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(
CONNECTOR_NAME, 1, "connector and tasks did not start in time"
);
try (LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(DistributedHerder.class)) {
connect.restartTask(CONNECTOR_NAME, 0);
TestUtils.waitForCondition(() -> logCaptureAppender.getEvents().stream().anyMatch(e -> e.getLevel().equals("WARN")) &&
logCaptureAppender.getEvents().stream().anyMatch(e ->
// Ensure that the tick thread is blocked on the stage which we expect it to be, i.e restarting the task.
e.getMessage().contains("worker poll timeout has expired") &&
e.getMessage().contains("The last known action being performed by the worker is : restarting task " + CONNECTOR_NAME + "-0")
),
"Coordinator did not poll for rebalance.timeout.ms");
// This clean up ensures that the test ends quickly as o/w we will wait for task#stop.
BlockingConnectorTest.Block.reset();
}
}
private void assertTimeoutException(Runnable operation, String expectedStageDescription, boolean wait) throws InterruptedException {
connect.requestTimeout(1_000);
AtomicReference<Throwable> latestError = new AtomicReference<>();
// If requested, wait for the specific operation against the Connect cluster to time out
// Otherwise, assert that the operation times out immediately
long timeoutMs = wait ? 30_000L : 0L;
waitForCondition(
() -> {
try {
operation.run();
latestError.set(null);
return false;
} catch (Throwable t) {
latestError.set(t);
assertInstanceOf(ConnectRestException.class, t);
ConnectRestException restException = (ConnectRestException) t;
assertEquals(INTERNAL_SERVER_ERROR.getStatusCode(), restException.statusCode());
assertNotNull(restException.getMessage());
assertTrue(
restException.getMessage().contains("Request timed out. The worker is currently " + expectedStageDescription),
"Message '" + restException.getMessage() + "' does not match expected format"
);
return true;
}
},
timeoutMs,
() -> {
String baseMessage = "REST request did not time out with expected error message in time. ";
Throwable t = latestError.get();
if (t == null) {
return baseMessage + "The most recent request did not fail.";
} else {
return baseMessage + "Most recent error: " + t;
}
}
);
// Ensure that the health check endpoints of all workers also report the same timeout message
connect.workers().forEach(worker -> {
try (Response response = connect.healthCheck(worker)) {
assertEquals(INTERNAL_SERVER_ERROR.getStatusCode(), response.getStatus());
assertNotNull(response.getEntity());
String body = response.getEntity().toString();
String expectedSubstring = "Worker was unable to handle this request and may be unable to handle other requests";
assertTrue(
body.contains(expectedSubstring),
"Response body '" + body + "' did not contain expected message '" + expectedSubstring + "'"
);
assertTrue(
body.contains(expectedStageDescription),
"Response body '" + body + "' did not contain expected message '" + expectedStageDescription + "'"
);
}
});
connect.resetRequestTimeout();
}
/**
* Tests the logic around enforcement of the
* {@link org.apache.kafka.connect.runtime.ConnectorConfig#TASKS_MAX_CONFIG tasks.max}
* property and how it can be toggled via the
* {@link org.apache.kafka.connect.runtime.ConnectorConfig#TASKS_MAX_ENFORCE_CONFIG tasks.max.enforce}
* property, following the test plain laid out in
* <a href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-1004%3A+Enforce+tasks.max+property+in+Kafka+Connect#KIP1004:Enforcetasks.maxpropertyinKafkaConnect-TestPlan">KIP-1004</a>.
*/
@Test
public void testTasksMaxEnforcement() throws Exception {
String configTopic = "tasks-max-enforcement-configs";
workerProps.put(CONFIG_TOPIC_CONFIG, configTopic);
connect = connectBuilder.build();
// start the clusters
connect.start();
Map<String, String> connectorProps = defaultSourceConnectorProps(TOPIC_NAME);
int maxTasks = 1;
connectorProps.put(TASKS_MAX_CONFIG, Integer.toString(maxTasks));
int numTasks = 2;
connectorProps.put(TestableSourceConnector.NUM_TASKS, Integer.toString(numTasks));
connect.configureConnector(CONNECTOR_NAME, connectorProps);
// A connector that generates excessive tasks will be failed with an expected error message
connect.assertions().assertConnectorIsFailedAndTasksHaveFailed(
CONNECTOR_NAME,
0,
"connector did not fail in time"
);
String expectedErrorSnippet = String.format(
"The connector %s has generated %d tasks, which is greater than %d, "
+ "the maximum number of tasks it is configured to create. ",
CONNECTOR_NAME,
numTasks,
maxTasks
);
String errorMessage = connect.connectorStatus(CONNECTOR_NAME).connector().trace();
assertTrue(errorMessage.contains(expectedErrorSnippet));
// Stop all workers in the cluster
connect.workers().forEach(connect::removeWorker);
// Publish a set of too many task configs to the config topic, to simulate
// an existing set of task configs that was written before the cluster was upgraded
try (JsonConverter converter = new JsonConverter()) {
converter.configure(
Map.of(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"),
false
);
for (int i = 0; i < numTasks; i++) {
Map<String, String> taskConfig = TestableSourceConnector.taskConfig(
connectorProps,
CONNECTOR_NAME,
i
);
Struct wrappedTaskConfig = new Struct(KafkaConfigBackingStore.TASK_CONFIGURATION_V0)
.put("properties", taskConfig);
String key = KafkaConfigBackingStore.TASK_KEY(new ConnectorTaskId(CONNECTOR_NAME, i));
byte[] value = converter.fromConnectData(
configTopic,
KafkaConfigBackingStore.TASK_CONFIGURATION_V0,
wrappedTaskConfig
);
connect.kafka().produce(configTopic, key, new String(value));
}
Struct taskCommitMessage = new Struct(KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0);
taskCommitMessage.put("tasks", numTasks);
String key = KafkaConfigBackingStore.COMMIT_TASKS_KEY(CONNECTOR_NAME);
byte[] value = converter.fromConnectData(
configTopic,
KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0,
taskCommitMessage
);
connect.kafka().produce(configTopic, key, new String(value));
}
// Restart all the workers in the cluster
for (int i = 0; i < NUM_WORKERS; i++)
connect.addWorker();
// An existing set of tasks that exceeds the tasks.max property
// will be failed with an expected error message
connect.assertions().assertConnectorIsFailedAndTasksHaveFailed(
CONNECTOR_NAME,
numTasks,
"connector and tasks did not fail in time"
);
connectorProps.put(TASKS_MAX_ENFORCE_CONFIG, "false");
connect.configureConnector(CONNECTOR_NAME, connectorProps);
// That same existing set of tasks will be allowed to run
// once the connector is reconfigured with tasks.max.enforce set to false
connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(
CONNECTOR_NAME,
numTasks,
"connector and tasks did not start in time"
);
numTasks++;
connectorProps.put(TestableSourceConnector.NUM_TASKS, Integer.toString(numTasks));
connect.configureConnector(CONNECTOR_NAME, connectorProps);
// A connector will be allowed to generate excessive tasks when tasks.max.enforce is set to false
connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(
CONNECTOR_NAME,
numTasks,
"connector and tasks did not start in time"
);
numTasks = maxTasks;
connectorProps.put(TestableSourceConnector.NUM_TASKS, Integer.toString(numTasks));
connectorProps.put(TASKS_MAX_ENFORCE_CONFIG, "true");
connect.configureConnector(CONNECTOR_NAME, connectorProps);
connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(
CONNECTOR_NAME,
numTasks,
"connector and tasks did not start in time"
);
numTasks = maxTasks + 1;
connectorProps.put(TestableSourceConnector.NUM_TASKS, Integer.toString(numTasks));
connect.configureConnector(CONNECTOR_NAME, connectorProps);
// A connector that generates excessive tasks after being reconfigured will be failed, but its existing tasks will continue running
connect.assertions().assertConnectorIsFailedAndNumTasksAreRunning(
CONNECTOR_NAME,
maxTasks,
"connector did not fail in time, or tasks were incorrectly failed"
);
// Make sure that the tasks have had a chance to fail (i.e., that the worker has been given
// a chance to check on the number of tasks for the connector during task startup)
for (int i = 0; i < maxTasks; i++)
connect.restartTask(CONNECTOR_NAME, i);
// Verify one more time that none of the tasks have actually failed
connect.assertions().assertConnectorIsFailedAndNumTasksAreRunning(
CONNECTOR_NAME,
maxTasks,
"connector did not fail in time, or tasks were incorrectly failed"
);
}
/**
* Task configs are not removed from the config topic after a connector is deleted.
* When topic compaction takes place, this can cause the tombstone message for the
* connector config to be deleted, leaving the task configs in the config topic with no
* explicit record of the connector's deletion.
* <p>
* This test guarantees that those older task configs are never used, even when the
* connector is recreated later.
*/
@Test
public void testCompactedDeletedOlderConnectorConfig() throws Exception {
brokerProps.put("log.cleaner.backoff.ms", "100");
brokerProps.put("log.cleaner.delete.retention.ms", "1");
brokerProps.put("log.cleaner.max.compaction.lag.ms", "1");
brokerProps.put("log.cleaner.min.cleanable.ratio", "0");
brokerProps.put("log.cleaner.min.compaction.lag.ms", "1");
brokerProps.put("log.cleaner.threads", "1");
final String configTopic = "kafka-16838-configs";
final int offsetCommitIntervalMs = 100;
workerProps.put(CONFIG_TOPIC_CONFIG, configTopic);
workerProps.put(CONFIG_STORAGE_PREFIX + SEGMENT_MS_CONFIG, "100");
workerProps.put(CONFIG_STORAGE_PREFIX + DELETE_RETENTION_MS_CONFIG, "1");
workerProps.put(OFFSET_COMMIT_INTERVAL_MS_CONFIG, Integer.toString(offsetCommitIntervalMs));
final int numWorkers = 1;
connect = connectBuilder
.numWorkers(numWorkers)
.build();
// start the clusters
connect.start();
final String connectorTopic = "connector-topic";
connect.kafka().createTopic(connectorTopic, 1);
ConnectorHandle connectorHandle = RuntimeHandles.get().connectorHandle(CONNECTOR_NAME);
connectorHandle.expectedCommits(NUM_TASKS * 2);
Map<String, String> connectorConfig = defaultSourceConnectorProps(connectorTopic);
connect.configureConnector(CONNECTOR_NAME, connectorConfig);
connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(
CONNECTOR_NAME,
NUM_TASKS,
"Connector or its tasks did not start in time"
);
connectorHandle.awaitCommits(RECORD_TRANSFER_TIMEOUT_MS);
connect.deleteConnector(CONNECTOR_NAME);
// Roll the entire cluster
connect.healthyWorkers().forEach(connect::removeWorker);
// Miserable hack: produce directly to the config topic and then wait a little bit
// in order to trigger segment rollover and allow compaction to take place
connect.kafka().produce(configTopic, "garbage-key-1", null);
Thread.sleep(1_000);
connect.kafka().produce(configTopic, "garbage-key-2", null);
Thread.sleep(1_000);
for (int i = 0; i < numWorkers; i++)
connect.addWorker();
connect.assertions().assertAtLeastNumWorkersAreUp(
numWorkers,
"Workers did not start in time after cluster was rolled."
);
final TopicPartition connectorTopicPartition = new TopicPartition(connectorTopic, 0);
final long initialEndOffset = connect.kafka().endOffset(connectorTopicPartition);
assertTrue(
initialEndOffset > 0,
"Source connector should have published at least one record to Kafka"
);
connectorHandle.expectedCommits(NUM_TASKS * 2);
// Re-create the connector with a different config (targets a different topic)
final String otherConnectorTopic = "other-topic";
connect.kafka().createTopic(otherConnectorTopic, 1);
connectorConfig.put(TOPIC_CONFIG, otherConnectorTopic);
connect.configureConnector(CONNECTOR_NAME, connectorConfig);
connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(
CONNECTOR_NAME,
NUM_TASKS,
"Connector or its tasks did not start in time"
);
connectorHandle.awaitCommits(RECORD_TRANSFER_TIMEOUT_MS);
// See if any new records got written to the old topic
final long nextEndOffset = connect.kafka().endOffset(connectorTopicPartition);
assertEquals(
initialEndOffset,
nextEndOffset,
"No new records should have been written to the older topic"
);
}
/**
* If a connector has existing tasks, and then generates new task configs, workers compare the
* new and existing configs before publishing them to the config topic. If there is no difference,
* workers do not publish task configs (this is a workaround to prevent infinite loops with eager
* rebalancing).
* <p>
* This test tries to guarantee that, if the old task configs become invalid because of
* an invalid config provider reference, it will still be possible to reconfigure the connector.
*/
@Test
public void testReconfigureConnectorWithFailingTaskConfigs(@TempDir Path tmp) throws Exception {
final int offsetCommitIntervalMs = 100;
workerProps.put(CONFIG_PROVIDERS_CONFIG, "file");
workerProps.put(CONFIG_PROVIDERS_CONFIG + ".file.class", FileConfigProvider.class.getName());
workerProps.put(OFFSET_COMMIT_INTERVAL_MS_CONFIG, Integer.toString(offsetCommitIntervalMs));
final int numWorkers = 1;
connect = connectBuilder
.numWorkers(numWorkers)
.build();
// start the clusters
connect.start();
final String firstConnectorTopic = "connector-topic-1";
connect.kafka().createTopic(firstConnectorTopic);
final File secretsFile = tmp.resolve("test-secrets").toFile();
final Properties secrets = new Properties();
final String throughputSecretKey = "secret-throughput";
secrets.put(throughputSecretKey, "10");
try (FileOutputStream secretsOutputStream = new FileOutputStream(secretsFile)) {
secrets.store(secretsOutputStream, null);
}
ConnectorHandle connectorHandle = RuntimeHandles.get().connectorHandle(CONNECTOR_NAME);
connectorHandle.expectedCommits(NUM_TASKS * 2);
Map<String, String> connectorConfig = defaultSourceConnectorProps(firstConnectorTopic);
connectorConfig.put(
"throughput",
"${file:" + secretsFile.getAbsolutePath() + ":" + throughputSecretKey + "}"
);
connect.configureConnector(CONNECTOR_NAME, connectorConfig);
connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(
CONNECTOR_NAME,
NUM_TASKS,
"Connector or its tasks did not start in time"
);
connectorHandle.awaitCommits(RECORD_TRANSFER_TIMEOUT_MS);
// Delete the secrets file, which should render the old task configs invalid
assertTrue(secretsFile.delete(), "Failed to delete secrets file");
// Use a start latch here instead of assertConnectorAndExactlyNumTasksAreRunning
// since failure to reconfigure the tasks (which may occur if the bug this test was written
// to help catch resurfaces) will not cause existing tasks to fail or stop running
StartAndStopLatch restarts = connectorHandle.expectedStarts(1);
final String secondConnectorTopic = "connector-topic-2";
connect.kafka().createTopic(secondConnectorTopic, 1);
// Stop using the config provider for this connector, and instruct it to start writing to the
// old topic again
connectorConfig.put("throughput", "10");
connectorConfig.put(TOPIC_CONFIG, secondConnectorTopic);
connect.configureConnector(CONNECTOR_NAME, connectorConfig);
assertTrue(
restarts.await(10, TimeUnit.SECONDS),
"Connector tasks were not restarted in time"
);
// Wait for at least one task to commit offsets after being restarted
connectorHandle.expectedCommits(1);
connectorHandle.awaitCommits(RECORD_TRANSFER_TIMEOUT_MS);
final long endOffset = connect.kafka().endOffset(new TopicPartition(secondConnectorTopic, 0));
assertTrue(
endOffset > 0,
"Source connector should have published at least one record to new Kafka topic "
+ "after being reconfigured"
);
}
@Test
public void testRuntimePropertyReconfiguration() throws Exception {
final int offsetCommitIntervalMs = 1_000;
// force fast offset commits
workerProps.put(OFFSET_COMMIT_INTERVAL_MS_CONFIG, Integer.toString(offsetCommitIntervalMs));
connect = connectBuilder.build();
// start the clusters
connect.start();
final String topic = "kafka9228";
connect.kafka().createTopic(topic, 1);
connect.kafka().produce(topic, "non-json-value");
Map<String, String> connectorConfig = new HashMap<>();
connectorConfig.put(CONNECTOR_CLASS_CONFIG, EmptyTaskConfigsConnector.class.getName());
connectorConfig.put(TASKS_MAX_CONFIG, "1");
connectorConfig.put(TOPICS_CONFIG, topic);
// Initially configure the connector to use the JSON converter, which should cause task failure(s)
connectorConfig.put(VALUE_CONVERTER_CLASS_CONFIG, JsonConverter.class.getName());
connectorConfig.put(
VALUE_CONVERTER_CLASS_CONFIG + "." + JsonConverterConfig.SCHEMAS_ENABLE_CONFIG,
"false"
);
connect.configureConnector(CONNECTOR_NAME, connectorConfig);
connect.assertions().assertConnectorIsRunningAndTasksHaveFailed(
CONNECTOR_NAME,
1,
"Connector did not start or task did not fail in time"
);
assertEquals(
new ConnectorOffsets(List.of()),
connect.connectorOffsets(CONNECTOR_NAME),
"Connector should not have any committed offsets when only task fails on first record"
);
// Reconfigure the connector to use the string converter, which should not cause any more task failures
connectorConfig.put(VALUE_CONVERTER_CLASS_CONFIG, StringConverter.class.getName());
connectorConfig.remove(
KEY_CONVERTER_CLASS_CONFIG + "." + JsonConverterConfig.SCHEMAS_ENABLE_CONFIG
);
connect.configureConnector(CONNECTOR_NAME, connectorConfig);
connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(
CONNECTOR_NAME,
1,
"Connector or tasks did not start in time"
);
Map<String, Object> expectedOffsetKey = new HashMap<>();
expectedOffsetKey.put(SinkUtils.KAFKA_TOPIC_KEY, topic);
expectedOffsetKey.put(SinkUtils.KAFKA_PARTITION_KEY, 0);
Map<String, Object> expectedOffsetValue = Map.of(SinkUtils.KAFKA_OFFSET_KEY, 1);
ConnectorOffset expectedOffset = new ConnectorOffset(expectedOffsetKey, expectedOffsetValue);
ConnectorOffsets expectedOffsets = new ConnectorOffsets(List.of(expectedOffset));
// Wait for it to commit offsets, signaling that it has successfully processed the record we produced earlier
waitForCondition(
() -> expectedOffsets.equals(connect.connectorOffsets(CONNECTOR_NAME)),
offsetCommitIntervalMs * 2,
"Task did not successfully process record and/or commit offsets in time"
);
}
@Test
public void testPluginAliases() throws Exception {
connect = connectBuilder.build();
// start the clusters
connect.start();
// Create a topic; not strictly necessary but prevents log spam when we start a source connector later
final String topic = "kafka17150";
connect.kafka().createTopic(topic, 1);
Map<String, String> baseConnectorConfig = new HashMap<>();
// General connector properties
baseConnectorConfig.put(TASKS_MAX_CONFIG, Integer.toString(NUM_TASKS));
// Aliased converter classes
baseConnectorConfig.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getSimpleName());
baseConnectorConfig.put(VALUE_CONVERTER_CLASS_CONFIG, StringConverter.class.getSimpleName());
baseConnectorConfig.put(HEADER_CONVERTER_CLASS_CONFIG, StringConverter.class.getSimpleName());
// Aliased SMT and predicate classes
baseConnectorConfig.put(TRANSFORMS_CONFIG, "filter");
baseConnectorConfig.put(TRANSFORMS_CONFIG + ".filter.type", Filter.class.getSimpleName());
baseConnectorConfig.put(TRANSFORMS_CONFIG + ".filter.predicate", "tombstone");
baseConnectorConfig.put(PREDICATES_CONFIG, "tombstone");
baseConnectorConfig.put(PREDICATES_CONFIG + ".tombstone.type", RecordIsTombstone.class.getSimpleName());
// Test a source connector
final String sourceConnectorName = "plugins-alias-test-source";
Map<String, String> sourceConnectorConfig = new HashMap<>(baseConnectorConfig);
// Aliased source connector | ConnectWorkerIntegrationTest |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ser/jdk/MapKeySerializationTest.java | {
"start": 1672,
"end": 1711
} | enum ____ {
inner;
}
| Outer |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/java/typeutils/TypeExtractorTest.java | {
"start": 82730,
"end": 82891
} | class ____<K> extends Tuple2<K[], K> {
public CustomTuple2WithArray() {
// default constructor
}
}
public | CustomTuple2WithArray |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/valuehandlingmode/inline/EqualityComparisonTest.java | {
"start": 10495,
"end": 11076
} | class ____ implements java.io.Serializable {
private Date inceptionDate;
private Date soldDate;
public ShelfLife() {
}
public ShelfLife(Date inceptionDate, Date soldDate) {
this.inceptionDate = inceptionDate;
this.soldDate = soldDate;
}
@Basic
public Date getInceptionDate() {
return inceptionDate;
}
public void setInceptionDate(Date inceptionDate) {
this.inceptionDate = inceptionDate;
}
@Basic
public Date getSoldDate() {
return soldDate;
}
public void setSoldDate(Date soldDate) {
this.soldDate = soldDate;
}
}
}
| ShelfLife |
java | hibernate__hibernate-orm | tooling/metamodel-generator/src/test/java/org/hibernate/processor/test/embeddable/EmbeddableEntity.java | {
"start": 406,
"end": 573
} | class ____ {
@OneToMany(targetEntity = Stuff.class)
Set<IStuff> stuffs = new HashSet<IStuff>();
public Set<IStuff> getStuffs() {
return stuffs;
}
}
| EmbeddableEntity |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusResponse.java | {
"start": 559,
"end": 1756
} | class ____ extends ActionResponse implements ToXContentObject {
private final boolean eligibleToStartBasic;
public GetBasicStatusResponse(boolean eligibleToStartBasic) {
this.eligibleToStartBasic = eligibleToStartBasic;
}
boolean isEligibleToStartBasic() {
return eligibleToStartBasic;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeBoolean(eligibleToStartBasic);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field("eligible_to_start_basic", eligibleToStartBasic);
builder.endObject();
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
GetBasicStatusResponse that = (GetBasicStatusResponse) o;
return eligibleToStartBasic == that.eligibleToStartBasic;
}
@Override
public int hashCode() {
return Objects.hash(eligibleToStartBasic);
}
}
| GetBasicStatusResponse |
java | quarkusio__quarkus | devtools/project-core-extension-codestarts/src/main/resources/codestarts/quarkus/extension-codestarts/picocli-codestart/java/src/main/java/org/acme/GreetingCommand.java | {
"start": 191,
"end": 466
} | class ____ implements Runnable {
@Parameters(paramLabel = "<name>", defaultValue = "picocli",
description = "Your name.")
String name;
@Override
public void run() {
System.out.printf("Hello %s, go go commando!%n", name);
}
}
| GreetingCommand |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/producer/internals/ProduceRequestResult.java | {
"start": 1526,
"end": 7854
} | class ____ {
private final CountDownLatch latch = new CountDownLatch(1);
private final TopicPartition topicPartition;
/**
* List of dependent ProduceRequestResults created when this batch is split.
* When a batch is too large to send, it's split into multiple smaller batches.
* The original batch's ProduceRequestResult tracks all the split batches here
* so that flush() can wait for all splits to complete via awaitAllDependents().
*/
private final List<ProduceRequestResult> dependentResults = new ArrayList<>();
private volatile Long baseOffset = null;
private volatile long logAppendTime = RecordBatch.NO_TIMESTAMP;
private volatile Function<Integer, RuntimeException> errorsByIndex;
/**
* Create an instance of this class.
*
* @param topicPartition The topic and partition to which this record set was sent
*/
public ProduceRequestResult(TopicPartition topicPartition) {
this.topicPartition = topicPartition;
}
/**
* Set the result of the produce request.
*
* @param baseOffset The base offset assigned to the record
* @param logAppendTime The log append time or -1 if CreateTime is being used
* @param errorsByIndex Function mapping the batch index to the exception, or null if the response was successful
*/
public void set(long baseOffset, long logAppendTime, Function<Integer, RuntimeException> errorsByIndex) {
this.baseOffset = baseOffset;
this.logAppendTime = logAppendTime;
this.errorsByIndex = errorsByIndex;
}
/**
* Mark this request as complete and unblock any threads waiting on its completion.
*/
public void done() {
if (baseOffset == null)
throw new IllegalStateException("The method `set` must be invoked before this method.");
this.latch.countDown();
}
/**
* Add a dependent ProduceRequestResult.
* This is used when a batch is split into multiple batches - in some cases like flush(), the original
* batch's result should not complete until all split batches have completed.
*
* @param dependentResult The dependent result to wait for
*/
public void addDependent(ProduceRequestResult dependentResult) {
synchronized (dependentResults) {
dependentResults.add(dependentResult);
}
}
/**
* Await the completion of this request.
*
* This only waits for THIS request's latch and not dependent results.
* When a batch is split into multiple batches, dependent results are created and tracked
* separately, but this method does not wait for them. Individual record futures automatically
* handle waiting for their respective split batch via {@link FutureRecordMetadata#chain(FutureRecordMetadata)},
* which redirects the future to point to the correct split batch's result.
*
* For flush() semantics that require waiting for all dependent results, use
* {@link #awaitAllDependents()}.
*/
public void await() throws InterruptedException {
latch.await();
}
/**
* Await the completion of this request (up to the given time interval)
* @param timeout The maximum time to wait
* @param unit The unit for the max time
* @return true if the request completed, false if we timed out
*/
public boolean await(long timeout, TimeUnit unit) throws InterruptedException {
return latch.await(timeout, unit);
}
/**
* Await the completion of this request and all the dependent requests.
*
* This method is used by flush() to ensure all split batches have completed before
* returning. This method waits for all dependent {@link ProduceRequestResult}s that
* were created when the batch was split.
*
* @throws InterruptedException if the thread is interrupted while waiting
*/
public void awaitAllDependents() throws InterruptedException {
Queue<ProduceRequestResult> toWait = new ArrayDeque<>();
toWait.add(this);
while (!toWait.isEmpty()) {
ProduceRequestResult current = toWait.poll();
// first wait for THIS result's latch to be released
current.latch.await();
// add all dependent split batches to the queue.
// we synchronize to get a consistent snapshot, then release the lock
// before continuing but the actual waiting happens outside the lock.
synchronized (current.dependentResults) {
toWait.addAll(current.dependentResults);
}
}
}
/**
* The base offset for the request (the first offset in the record set)
*/
public long baseOffset() {
return baseOffset;
}
/**
* Return true if log append time is being used for this topic
*/
public boolean hasLogAppendTime() {
return logAppendTime != RecordBatch.NO_TIMESTAMP;
}
/**
* The log append time or -1 if CreateTime is being used
*/
public long logAppendTime() {
return logAppendTime;
}
/**
* The error thrown (generally on the server) while processing this request
*/
public RuntimeException error(int batchIndex) {
if (errorsByIndex == null) {
return null;
} else {
return errorsByIndex.apply(batchIndex);
}
}
/**
* The topic and partition to which the record was appended
*/
public TopicPartition topicPartition() {
return topicPartition;
}
/**
* Has the request completed?
*
* This method only checks if THIS request has completed and not its dependent results.
* When a batch is split into multiple batches, the dependent split batches are tracked
* separately. Individual record futures handle waiting for their respective split
* batch via {@link FutureRecordMetadata#chain(FutureRecordMetadata)}, which updates the
* {@code nextRecordMetadata} pointer to follow the correct split batch.
*
* For flush() semantics that require waiting for all dependent results, use
* {@link #awaitAllDependents()}.
*/
public boolean completed() {
return this.latch.getCount() == 0L;
}
}
| ProduceRequestResult |
java | google__guava | android/guava-testlib/src/com/google/common/collect/testing/IteratorTester.java | {
"start": 3714,
"end": 4387
} | class ____<E extends @Nullable Object>
extends AbstractIteratorTester<E, Iterator<E>> {
/**
* Creates an IteratorTester.
*
* @param steps how many operations to test for each tested pair of iterators
* @param features the features supported by the iterator
*/
protected IteratorTester(
int steps,
Iterable<? extends IteratorFeature> features,
Iterable<E> expectedElements,
KnownOrder knownOrder) {
super(steps, Collections.singleton(null), features, expectedElements, knownOrder, 0);
}
@Override
protected final Iterable<Stimulus<E, Iterator<E>>> getStimulusValues() {
return iteratorStimuli();
}
}
| IteratorTester |
java | hibernate__hibernate-orm | hibernate-envers/src/main/java/org/hibernate/envers/internal/entities/mapper/AbstractMapper.java | {
"start": 3891,
"end": 4427
} | class ____
* @return a new instance of the class
*/
protected <T> T newObjectInstance(Class<T> clazz, Object... args) {
try {
final Constructor<T> constructor = ReflectHelper.getDefaultConstructor( clazz );
if ( constructor == null ) {
throw new AuditException( "Failed to locate default constructor for class: " + clazz.getName() );
}
return constructor.newInstance( args );
}
catch (InstantiationException | IllegalAccessException | InvocationTargetException e) {
throw new AuditException( e );
}
}
}
| type |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/DistributedRuntimeUDFContext.java | {
"start": 2010,
"end": 5709
} | class ____ extends AbstractRuntimeUDFContext {
private final HashMap<String, BroadcastVariableMaterialization<?, ?>> broadcastVars =
new HashMap<String, BroadcastVariableMaterialization<?, ?>>();
private final ExternalResourceInfoProvider externalResourceInfoProvider;
public DistributedRuntimeUDFContext(
JobInfo jobInfo,
TaskInfo taskInfo,
UserCodeClassLoader userCodeClassLoader,
ExecutionConfig executionConfig,
Map<String, Future<Path>> cpTasks,
Map<String, Accumulator<?, ?>> accumulators,
OperatorMetricGroup metrics,
ExternalResourceInfoProvider externalResourceInfoProvider) {
super(
jobInfo,
taskInfo,
userCodeClassLoader,
executionConfig,
accumulators,
cpTasks,
metrics);
this.externalResourceInfoProvider =
Preconditions.checkNotNull(externalResourceInfoProvider);
}
@Override
public boolean hasBroadcastVariable(String name) {
return this.broadcastVars.containsKey(name);
}
@Override
public <T> List<T> getBroadcastVariable(String name) {
Preconditions.checkNotNull(name, "The broadcast variable name must not be null.");
// check if we have an initialized version
@SuppressWarnings("unchecked")
BroadcastVariableMaterialization<T, ?> variable =
(BroadcastVariableMaterialization<T, ?>) this.broadcastVars.get(name);
if (variable != null) {
try {
return variable.getVariable();
} catch (InitializationTypeConflictException e) {
throw new RuntimeException(
"The broadcast variable '"
+ name
+ "' has been initialized by a prior call to a "
+ e.getType());
}
} else {
throw new IllegalArgumentException(
"The broadcast variable with name '" + name + "' has not been set.");
}
}
@Override
public <T, C> C getBroadcastVariableWithInitializer(
String name, BroadcastVariableInitializer<T, C> initializer) {
Preconditions.checkNotNull(name, "The broadcast variable name must not be null.");
Preconditions.checkNotNull(
initializer, "The broadcast variable initializer must not be null.");
// check if we have an initialized version
@SuppressWarnings("unchecked")
BroadcastVariableMaterialization<T, C> variable =
(BroadcastVariableMaterialization<T, C>) this.broadcastVars.get(name);
if (variable != null) {
return variable.getVariable(initializer);
} else {
throw new IllegalArgumentException(
"The broadcast variable with name '" + name + "' has not been set.");
}
}
@Override
public Set<ExternalResourceInfo> getExternalResourceInfos(String resourceName) {
return externalResourceInfoProvider.getExternalResourceInfos(resourceName);
}
// --------------------------------------------------------------------------------------------
public void setBroadcastVariable(String name, BroadcastVariableMaterialization<?, ?> value) {
this.broadcastVars.put(name, value);
}
public void clearBroadcastVariable(String name) {
this.broadcastVars.remove(name);
}
public void clearAllBroadcastVariables() {
this.broadcastVars.clear();
}
}
| DistributedRuntimeUDFContext |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/exception/ExceptionTest.java | {
"start": 1027,
"end": 4655
} | class ____ {
@AfterEach
public void tearDown(EntityManagerFactoryScope scope) {
scope.getEntityManagerFactory().getSchemaManager().truncate();
}
@Test
public void testOptimisticLockingException(EntityManagerFactoryScope scope) {
EntityManager em = scope.getEntityManagerFactory().createEntityManager();
EntityManager em2 = scope.getEntityManagerFactory().createEntityManager();
try {
em.getTransaction().begin();
Music music = new Music();
music.setName( "Old Country" );
em.persist( music );
em.getTransaction().commit();
em2.getTransaction().begin();
Music music2 = em2.find( Music.class, music.getId() );
music2.setName( "HouseMusic" );
em2.getTransaction().commit();
em.getTransaction().begin();
music.setName( "Rock" );
try {
em.flush();
fail( "Should raise an optimistic lock exception" );
}
catch (OptimisticLockException e) {
//success
assertThat( e.getEntity() ).isEqualTo( music );
}
catch (Exception e) {
fail( "Should raise an optimistic lock exception" );
}
finally {
em.getTransaction().rollback();
em.close();
}
}
catch (Exception e) {
if ( em.getTransaction().isActive() ) {
em.getTransaction().rollback();
}
if ( em2.getTransaction().isActive() ) {
em2.getTransaction().rollback();
}
throw e;
}
finally {
em.close();
em2.close();
}
}
@Test
public void testEntityNotFoundException(EntityManagerFactoryScope scope) {
EntityManager em = scope.getEntityManagerFactory().createEntityManager();
Music music = em.getReference( Music.class, -1 );
try {
music.getName();
fail( "Non existent entity should raise an exception when state is accessed" );
}
catch ( EntityNotFoundException e ) {
//"success"
}
finally {
em.close();
}
}
@Test
@SkipForDialect(dialectClass = TiDBDialect.class, reason = "TiDB do not support FK violation checking")
public void testConstraintViolationException(EntityManagerFactoryScope scope) {
scope.inEntityManager(
entityManager -> {
try {
entityManager.getTransaction().begin();
Music music = new Music();
music.setName( "Jazz" );
entityManager.persist( music );
Musician lui = new Musician();
lui.setName( "Lui Armstrong" );
lui.setFavouriteMusic( music );
entityManager.persist( lui );
entityManager.getTransaction().commit();
try {
entityManager.getTransaction().begin();
String hqlDelete = "delete from Music where name = :name";
entityManager.createQuery( hqlDelete ).setParameter( "name", "Jazz" ).executeUpdate();
entityManager.getTransaction().commit();
fail();
}
catch ( PersistenceException e ) {
assertTrue( e instanceof ConstraintViolationException, "Should be a constraint violation" );
entityManager.getTransaction().rollback();
}
}
catch (Exception e) {
if ( entityManager.getTransaction().isActive() ) {
entityManager.getTransaction().rollback();
}
throw e;
}
}
);
}
@Test
@JiraKey( value = "HHH-4676" )
public void testInterceptor(EntityManagerFactoryScope scope) {
EntityManager em = scope.getEntityManagerFactory().createEntityManager();
em.getTransaction().begin();
Instrument instrument = new Instrument();
instrument.setName( "Guitar" );
try {
em.persist( instrument );
fail( "Commit should have failed." );
}
catch ( RuntimeException e ) {
assertTrue( em.getTransaction().getRollbackOnly() );
em.getTransaction().rollback();
}
finally {
em.close();
}
}
}
| ExceptionTest |
java | apache__kafka | streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/ProcessingExceptionHandlerIntegrationTest.java | {
"start": 30856,
"end": 33234
} | class ____ implements ProcessingExceptionHandler {
@Override
public ProcessingExceptionHandler.ProcessingHandlerResponse handle(final ErrorHandlerContext context, final Record<?, ?> record, final Exception exception) {
assertEquals("ID123-1", Serdes.String().deserializer().deserialize("topic", context.sourceRawKey()));
assertEquals("ID123-A1", Serdes.String().deserializer().deserialize("topic", context.sourceRawValue()));
return ProcessingExceptionHandler.ProcessingHandlerResponse.FAIL;
}
@Override
public void configure(final Map<String, ?> configs) {
// No-op
}
}
/**
* Metric name for dropped records total.
*
* @return the metric name
*/
private MetricName droppedRecordsTotalMetric() {
return new MetricName(
"dropped-records-total",
"stream-task-metrics",
"The total number of dropped records",
mkMap(
mkEntry("thread-id", threadId),
mkEntry("task-id", "0_0")
)
);
}
/**
* Metric name for dropped records rate.
*
* @return the metric name
*/
private MetricName droppedRecordsRateMetric() {
return new MetricName(
"dropped-records-rate",
"stream-task-metrics",
"The average number of dropped records per second",
mkMap(
mkEntry("thread-id", threadId),
mkEntry("task-id", "0_0")
)
);
}
/**
* Processor supplier that throws a runtime exception on process.
*
* @return the processor supplier
*/
private ProcessorSupplier<String, String, String, String> runtimeErrorProcessorSupplierMock() {
return () -> new ContextualProcessor<String, String, String, String>() {
@Override
public void process(final Record<String, String> record) {
if (record.key().contains("ERR") || record.value().equals("3")) {
throw new RuntimeException("Exception should be handled by processing exception handler");
}
context().forward(new Record<>(record.key(), record.value(), record.timestamp()));
}
};
}
} | AssertSourceRawRecordProcessingExceptionHandlerMockTest |
java | apache__dubbo | dubbo-plugin/dubbo-qos/src/main/java/org/apache/dubbo/qos/command/impl/GetOpenAPI.java | {
"start": 1682,
"end": 3722
} | class ____ implements BaseCommand {
private final FrameworkModel frameworkModel;
public GetOpenAPI(FrameworkModel frameworkModel) {
this.frameworkModel = frameworkModel;
}
@Override
public String execute(CommandContext commandContext, String[] args) {
OpenAPIService openAPIService = frameworkModel.getBean(OpenAPIService.class);
if (openAPIService == null) {
return "OpenAPI is not available";
}
OpenAPIRequest request = new OpenAPIRequest();
int len = args.length;
if (len > 0) {
if (len == 1) {
String arg0 = args[0];
if (arg0.indexOf('.') > 0) {
request.setService(new String[] {arg0});
} else {
request.setGroup(arg0);
}
} else {
for (int i = 0; i < len; i += 2) {
String value = args[i + 1];
switch (StringUtils.substringAfterLast(args[i], '-')) {
case "group":
request.setGroup(value);
break;
case "version":
request.setVersion(value);
break;
case "tag":
request.setTag(StringUtils.tokenize(value));
break;
case "service":
request.setService(StringUtils.tokenize(value));
break;
case "openapi":
request.setOpenapi(value);
break;
case "format":
request.setFormat(value);
break;
default:
break;
}
}
}
}
return openAPIService.getDocument(request);
}
}
| GetOpenAPI |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/CassandraEndpointBuilderFactory.java | {
"start": 42781,
"end": 43534
} | class ____ implements logic for converting ResultSet
* into message body ALL, ONE, LIMIT_10, LIMIT_100...
*
* The option is a:
* <code>org.apache.camel.component.cassandra.ResultSetConversionStrategy</code> type.
*
* Group: advanced
*
* @param resultSetConversionStrategy the value to set
* @return the dsl builder
*/
default AdvancedCassandraEndpointProducerBuilder resultSetConversionStrategy(org.apache.camel.component.cassandra.ResultSetConversionStrategy resultSetConversionStrategy) {
doSetProperty("resultSetConversionStrategy", resultSetConversionStrategy);
return this;
}
/**
* To use a custom | that |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/checkreturnvalue/CanIgnoreReturnValueSuggesterTest.java | {
"start": 23764,
"end": 24094
} | interface ____ {}
public Client setName(String name) {
this.name = name;
return this;
}
}
""")
.addOutputLines(
"Client.java",
"""
package com.google.frobber;
public final | CanIgnoreReturnValue |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/InstanceOfAssertFactoriesTest.java | {
"start": 75472,
"end": 76170
} | class ____ {
private final Object actual = new StringBuffer("string");
@Test
void createAssert() {
// WHEN
AbstractCharSequenceAssert<?, ? extends CharSequence> result = STRING_BUFFER.createAssert(actual);
// THEN
result.startsWith("str");
}
@Test
void createAssert_with_ValueProvider() {
// GIVEN
ValueProvider<?> valueProvider = mockThatDelegatesTo(type -> actual);
// WHEN
AbstractCharSequenceAssert<?, ? extends CharSequence> result = STRING_BUFFER.createAssert(valueProvider);
// THEN
result.startsWith("str");
verify(valueProvider).apply(StringBuffer.class);
}
}
@Nested
| StringBuffer_Factory |
java | spring-projects__spring-framework | spring-expression/src/test/java/org/springframework/expression/spel/EvaluationTests.java | {
"start": 56778,
"end": 56826
} | class ____ {
public int iii = 99;
}
}
| Spr9751_2 |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_3200/Issue3246.java | {
"start": 187,
"end": 798
} | class ____ extends TestCase {
public void test_for_issue() throws Exception {
String jsonStr = "{\"d_id\":\"bphyean01\",\"isOpenMergeCode\":0,\"offlineOrder\":false,\"offlineOrderType\":-1,\"og\":0,\"pushIdFromRemote\":false,\"qrisAmountPrice\":22000,\"s_req\":0,\"s_t\":1,\"skr_id\":0,\"type\":1,\"c_id\":471,\"o_$\":5500.0,\"am\":4,\"$_tp\":\"bp\",\"o_t\":1,\"a_m\":3}";
Order parseOrder = JSON.parseObject(jsonStr,Order.class);
assertEquals(Integer.valueOf(4), parseOrder.getAmount());
assertEquals("3", parseOrder.getAddMoney());
}
@Data
public static | Issue3246 |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/rescore/QueryRescoreMode.java | {
"start": 738,
"end": 2797
} | enum ____ implements Writeable {
Avg {
@Override
public float combine(float primary, float secondary) {
return (primary + secondary) / 2;
}
@Override
public String toString() {
return "avg";
}
},
Max {
@Override
public float combine(float primary, float secondary) {
return Math.max(primary, secondary);
}
@Override
public String toString() {
return "max";
}
},
Min {
@Override
public float combine(float primary, float secondary) {
return Math.min(primary, secondary);
}
@Override
public String toString() {
return "min";
}
},
Total {
@Override
public float combine(float primary, float secondary) {
return primary + secondary;
}
@Override
public String toString() {
return "sum";
}
},
Multiply {
@Override
public float combine(float primary, float secondary) {
return primary * secondary;
}
@Override
public String toString() {
return "product";
}
};
public abstract float combine(float primary, float secondary);
public static QueryRescoreMode readFromStream(StreamInput in) throws IOException {
return in.readEnum(QueryRescoreMode.class);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeEnum(this);
}
public static QueryRescoreMode fromString(String scoreMode) {
for (QueryRescoreMode mode : values()) {
if (scoreMode.toLowerCase(Locale.ROOT).equals(mode.name().toLowerCase(Locale.ROOT))) {
return mode;
}
}
throw new IllegalArgumentException("illegal score_mode [" + scoreMode + "]");
}
@Override
public String toString() {
return name().toLowerCase(Locale.ROOT);
}
}
| QueryRescoreMode |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/bytecode/spi/BytecodeProvider.java | {
"start": 885,
"end": 1313
} | interface ____ extends Service {
/**
* Retrieve the specific factory for this provider capable of
* generating run-time proxies for lazy-loading purposes.
*
* @return The provider specific factory.
*/
ProxyFactoryFactory getProxyFactoryFactory();
/**
* Retrieve the ReflectionOptimizer delegate for this provider
* capable of generating reflection optimization components.
*
* @param clazz The | BytecodeProvider |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng3924XmlMarkupInterpolationTest.java | {
"start": 1210,
"end": 2151
} | class ____ extends AbstractMavenIntegrationTestCase {
/**
* Test that interpolation of properties that resolve to XML markup doesn't crash the project builder.
*
* @throws Exception in case of failure
*/
@Test
public void testitMNG3924() throws Exception {
File testDir = extractResources("/mng-3924");
Verifier verifier = newVerifier(testDir.getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
verifier.addCliArgument("validate");
verifier.execute();
verifier.verifyErrorFreeLog();
Properties props = verifier.loadProperties("target/xml.properties");
assertEquals("<?xml version='1.0'?>Tom&Jerry", props.getProperty("project.properties.xmlMarkup"));
assertEquals("<?xml version='1.0'?>Tom&Jerry", props.getProperty("project.properties.xmlTest"));
}
}
| MavenITmng3924XmlMarkupInterpolationTest |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adaptive/DefaultStateTransitionManagerTest.java | {
"start": 2038,
"end": 14444
} | class ____ {
@Test
void testTriggerWithoutChangeEventNoopInCooldownPhase() {
final TestingStateTransitionManagerContext ctx =
TestingStateTransitionManagerContext.stableContext().withDesiredResources();
final DefaultStateTransitionManager testInstance = ctx.createTestInstanceInCooldownPhase();
triggerWithoutPhaseMove(ctx, testInstance, Cooldown.class);
}
@Test
void testTriggerWithoutChangeEventNoopInIdlingPhase() {
final TestingStateTransitionManagerContext ctx =
TestingStateTransitionManagerContext.stableContext().withDesiredResources();
final DefaultStateTransitionManager testInstance =
ctx.createTestInstanceThatPassedCooldownPhase();
triggerWithoutPhaseMove(ctx, testInstance, Idling.class);
}
@Test
void testTriggerWithoutChangeEventNoopInTransitioningPhase() {
final TestingStateTransitionManagerContext ctx =
TestingStateTransitionManagerContext.stableContext().withDesiredResources();
final DefaultStateTransitionManager testInstance =
ctx.createTestInstanceInTransitioningPhase();
triggerWithoutPhaseMove(ctx, testInstance, Transitioning.class);
}
@Test
void testStateTransitionRightAfterCooldown() {
final TestingStateTransitionManagerContext ctx =
TestingStateTransitionManagerContext.stableContext().withDesiredResources();
final DefaultStateTransitionManager testInstance = ctx.createTestInstanceInCooldownPhase();
changeWithoutPhaseMove(ctx, testInstance, Cooldown.class);
triggerWithoutPhaseMove(ctx, testInstance, Cooldown.class);
ctx.transitionToInclusiveCooldownEnd();
assertPhaseWithoutStateTransition(ctx, testInstance, Cooldown.class);
testInstance.onTrigger();
ctx.passTime(Duration.ofMillis(1));
assertPhaseWithoutStateTransition(ctx, testInstance, Stabilizing.class);
testInstance.onTrigger();
assertFinalStateTransitionHappened(ctx, testInstance);
}
@Test
void testDesiredChangeInCooldownPhase() {
final TestingStateTransitionManagerContext ctx =
TestingStateTransitionManagerContext.stableContext().withDesiredResources();
final DefaultStateTransitionManager testInstance = ctx.createTestInstanceInCooldownPhase();
changeWithoutPhaseMove(ctx, testInstance, Cooldown.class);
triggerWithoutPhaseMove(ctx, testInstance, Cooldown.class);
ctx.transitionOutOfCooldownPhase();
assertPhaseWithoutStateTransition(ctx, testInstance, Stabilizing.class);
testInstance.onTrigger();
assertFinalStateTransitionHappened(ctx, testInstance);
}
@Test
void testDesiredChangeInIdlingPhase() {
final TestingStateTransitionManagerContext ctx =
TestingStateTransitionManagerContext.stableContext().withDesiredResources();
final DefaultStateTransitionManager testInstance =
ctx.createTestInstanceThatPassedCooldownPhase();
assertPhaseWithoutStateTransition(ctx, testInstance, Idling.class);
testInstance.onChange();
assertPhaseWithoutStateTransition(ctx, testInstance, Stabilizing.class);
testInstance.onTrigger();
assertFinalStateTransitionHappened(ctx, testInstance);
}
@Test
void testDesiredChangeInStabilizedPhase() {
final TestingStateTransitionManagerContext ctx =
TestingStateTransitionManagerContext.stableContext().withSufficientResources();
final DefaultStateTransitionManager testInstance =
ctx.createTestInstanceInStabilizedPhase();
assertPhaseWithoutStateTransition(ctx, testInstance, Stabilized.class);
withDesiredChange(ctx, testInstance);
assertPhaseWithoutStateTransition(ctx, testInstance, Stabilized.class);
testInstance.onTrigger();
assertFinalStateTransitionHappened(ctx, testInstance);
}
@Test
void testDesiredResourcesInStabilizingPhaseAfterMaxTriggerDelay() {
final TestingStateTransitionManagerContext ctx =
TestingStateTransitionManagerContext.stableContext()
.withSufficientResources()
.withMaxTriggerDelay(Duration.ofSeconds(10));
final DefaultStateTransitionManager testInstance =
ctx.createTestInstance(
manager -> {
manager.onChange();
ctx.transitionOutOfCooldownPhase();
});
assertPhaseWithoutStateTransition(ctx, testInstance, Stabilizing.class);
ctx.passMaxDelayTriggerTimeout();
withDesiredChange(ctx, testInstance);
assertPhaseWithoutStateTransition(ctx, testInstance, Stabilizing.class);
ctx.passMaxDelayTriggerTimeout();
assertFinalStateTransitionHappened(ctx, testInstance);
}
@Test
void testNoResourcesChangeInCooldownPhase() {
final TestingStateTransitionManagerContext ctx =
TestingStateTransitionManagerContext.stableContext();
final DefaultStateTransitionManager testInstance = ctx.createTestInstanceInCooldownPhase();
changeWithoutPhaseMove(ctx, testInstance, Cooldown.class);
triggerWithoutPhaseMove(ctx, testInstance, Cooldown.class);
ctx.transitionOutOfCooldownPhase();
assertPhaseWithoutStateTransition(ctx, testInstance, Idling.class);
}
@Test
void testNoResourcesChangeInIdlingPhase() {
final TestingStateTransitionManagerContext ctx =
TestingStateTransitionManagerContext.stableContext();
final DefaultStateTransitionManager testInstance =
ctx.createTestInstanceThatPassedCooldownPhase();
changeWithoutPhaseMove(ctx, testInstance, Idling.class);
triggerWithoutPhaseMove(ctx, testInstance, Idling.class);
}
@Test
void testSufficientResourcesInStabilizingPhaseAfterMaxTriggerDelay() {
final TestingStateTransitionManagerContext ctx =
TestingStateTransitionManagerContext.stableContext()
.withSufficientResources()
.withMaxTriggerDelay(Duration.ofSeconds(10));
final DefaultStateTransitionManager testInstance =
ctx.createTestInstance(
manager -> {
manager.onChange();
ctx.transitionOutOfCooldownPhase();
});
assertPhaseWithoutStateTransition(ctx, testInstance, Stabilizing.class);
ctx.passMaxDelayTriggerTimeout();
assertPhaseWithoutStateTransition(ctx, testInstance, Stabilizing.class);
}
@Test
void testSufficientResourcesInStabilizedPhaseAfterMaxTriggerDelay() {
final TestingStateTransitionManagerContext ctx =
TestingStateTransitionManagerContext.stableContext().withSufficientResources();
final DefaultStateTransitionManager testInstance =
ctx.createTestInstanceInStabilizedPhase();
assertPhaseWithoutStateTransition(ctx, testInstance, Stabilized.class);
ctx.passMaxDelayTriggerTimeout();
assertFinalStateTransitionHappened(ctx, testInstance);
}
@Test
void testSufficientChangeInCooldownPhase() {
final TestingStateTransitionManagerContext ctx =
TestingStateTransitionManagerContext.stableContext().withSufficientResources();
final DefaultStateTransitionManager testInstance = ctx.createTestInstanceInCooldownPhase();
changeWithoutPhaseMove(ctx, testInstance, Cooldown.class);
triggerWithoutPhaseMove(ctx, testInstance, Cooldown.class);
ctx.transitionOutOfCooldownPhase();
triggerWithoutPhaseMove(ctx, testInstance, Stabilizing.class);
ctx.passResourceStabilizationTimeout();
testInstance.onTrigger();
assertFinalStateTransitionHappened(ctx, testInstance);
}
@Test
void testSufficientChangeInIdlingPhase() {
final TestingStateTransitionManagerContext ctx =
TestingStateTransitionManagerContext.stableContext().withSufficientResources();
final DefaultStateTransitionManager testInstance =
ctx.createTestInstanceThatPassedCooldownPhase();
assertPhaseWithoutStateTransition(ctx, testInstance, Idling.class);
testInstance.onChange();
triggerWithoutPhaseMove(ctx, testInstance, Stabilizing.class);
ctx.passResourceStabilizationTimeout();
testInstance.onTrigger();
assertFinalStateTransitionHappened(ctx, testInstance);
}
@Test
void testSufficientChangeInStabilizedPhase() {
final TestingStateTransitionManagerContext ctx =
TestingStateTransitionManagerContext.stableContext().withSufficientResources();
final DefaultStateTransitionManager testInstance =
ctx.createTestInstanceInStabilizedPhase();
assertPhaseWithoutStateTransition(ctx, testInstance, Stabilized.class);
testInstance.onTrigger();
assertFinalStateTransitionHappened(ctx, testInstance);
}
@Test
void testSufficientChangeWithSubsequentDesiredChangeInStabilizingPhase() {
final TestingStateTransitionManagerContext ctx =
TestingStateTransitionManagerContext.stableContext().withSufficientResources();
final DefaultStateTransitionManager testInstance =
ctx.createTestInstanceThatPassedCooldownPhase();
assertPhaseWithoutStateTransition(ctx, testInstance, Idling.class);
testInstance.onChange();
triggerWithoutPhaseMove(ctx, testInstance, Stabilizing.class);
withDesiredChange(ctx, testInstance);
assertPhaseWithoutStateTransition(ctx, testInstance, Stabilizing.class);
testInstance.onTrigger();
assertFinalStateTransitionHappened(ctx, testInstance);
}
@Test
void testRevokedChangeInStabilizingPhaseWithSubsequentSufficientChangeInStabilizedPhase() {
final TestingStateTransitionManagerContext ctx =
TestingStateTransitionManagerContext.stableContext().withSufficientResources();
final DefaultStateTransitionManager testInstance =
ctx.createTestInstanceThatPassedCooldownPhase();
assertPhaseWithoutStateTransition(ctx, testInstance, Idling.class);
testInstance.onChange();
triggerWithoutPhaseMove(ctx, testInstance, Stabilizing.class);
ctx.withRevokeResources();
changeWithoutPhaseMove(ctx, testInstance, Stabilizing.class);
triggerWithoutPhaseMove(ctx, testInstance, Stabilizing.class);
ctx.passResourceStabilizationTimeout();
assertPhaseWithoutStateTransition(ctx, testInstance, Stabilized.class);
withSufficientChange(ctx, testInstance);
assertPhaseWithoutStateTransition(ctx, testInstance, Stabilized.class);
testInstance.onTrigger();
assertFinalStateTransitionHappened(ctx, testInstance);
}
@Test
void testRevokedChangeInStabilizedPhase() {
final TestingStateTransitionManagerContext ctx =
TestingStateTransitionManagerContext.stableContext().withSufficientResources();
final DefaultStateTransitionManager testInstance =
ctx.createTestInstanceInStabilizedPhase();
assertPhaseWithoutStateTransition(ctx, testInstance, Stabilized.class);
ctx.withRevokeResources();
testInstance.onTrigger();
assertPhaseWithoutStateTransition(ctx, testInstance, Idling.class);
}
@Test
void testScheduledTaskBeingIgnoredAfterStateChanged() {
final TestingStateTransitionManagerContext ctx =
TestingStateTransitionManagerContext.stableContext();
final DefaultStateTransitionManager testInstance =
ctx.createTestInstanceInStabilizedPhase();
final AtomicBoolean callbackCalled = new AtomicBoolean();
testInstance.scheduleFromNow(
() -> callbackCalled.set(true), Duration.ZERO, new TestPhase());
ctx.triggerOutdatedTasks();
assertThat(callbackCalled).isFalse();
}
private static | DefaultStateTransitionManagerTest |
java | apache__rocketmq | filter/src/main/java/org/apache/rocketmq/filter/parser/TokenMgrError.java | {
"start": 1158,
"end": 5851
} | class ____.
*/
private static final long serialVersionUID = 1L;
/*
* Ordinals for various reasons why an Error of this type can be thrown.
*/
/**
* Lexical error occurred.
*/
static final int LEXICAL_ERROR = 0;
/**
* An attempt was made to create a second instance of a static token manager.
*/
static final int STATIC_LEXER_ERROR = 1;
/**
* Tried to change to an invalid lexical state.
*/
static final int INVALID_LEXICAL_STATE = 2;
/**
* Detected (and bailed out of) an infinite loop in the token manager.
*/
static final int LOOP_DETECTED = 3;
/**
* Indicates the reason why the exception is thrown. It will have
* one of the above 4 values.
*/
int errorCode;
/**
* Replaces unprintable characters by their escaped (or unicode escaped)
* equivalents in the given string
*/
protected static final String addEscapes(String str) {
StringBuilder retval = new StringBuilder();
char ch;
for (int i = 0; i < str.length(); i++) {
switch (str.charAt(i)) {
case 0:
continue;
case '\b':
retval.append("\\b");
continue;
case '\t':
retval.append("\\t");
continue;
case '\n':
retval.append("\\n");
continue;
case '\f':
retval.append("\\f");
continue;
case '\r':
retval.append("\\r");
continue;
case '\"':
retval.append("\\\"");
continue;
case '\'':
retval.append("\\\'");
continue;
case '\\':
retval.append("\\\\");
continue;
default:
if ((ch = str.charAt(i)) < 0x20 || ch > 0x7e) {
String s = "0000" + Integer.toString(ch, 16);
retval.append("\\u" + s.substring(s.length() - 4, s.length()));
} else {
retval.append(ch);
}
continue;
}
}
return retval.toString();
}
/**
* Returns a detailed message for the Error when it is thrown by the
* token manager to indicate a lexical error.
* Parameters :
* eofSeen : indicates if EOF caused the lexical error
* curLexState : lexical state in which this error occurred
* errorLine : line number when the error occurred
* errorColumn : column number when the error occurred
* errorAfter : prefix that was seen before this error occurred
* curchar : the offending character
* Note: You can customize the lexical error message by modifying this method.
*/
protected static String LexicalError(boolean eofSeen, int lexState, int errorLine, int errorColumn,
String errorAfter, char curChar) {
return "Lexical error at line " +
errorLine + ", column " +
errorColumn + ". Encountered: " +
(eofSeen ?
"<EOF> " :
("\"" + addEscapes(String.valueOf(curChar)) + "\"") + " (" + (int) curChar + "), ") +
"after : \"" + addEscapes(errorAfter) + "\"";
}
/**
* You can also modify the body of this method to customize your error messages.
* For example, cases like LOOP_DETECTED and INVALID_LEXICAL_STATE are not
* of end-users concern, so you can return something like :
* <p/>
* "Internal Error : Please file a bug report .... "
* <p/>
* from this method for such cases in the release version of your parser.
*/
@Override
public String getMessage() {
return super.getMessage();
}
/*
* Constructors of various flavors follow.
*/
/**
* No arg constructor.
*/
public TokenMgrError() {
}
/**
* Constructor with message and reason.
*/
public TokenMgrError(String message, int reason) {
super(message);
errorCode = reason;
}
/**
* Full Constructor.
*/
public TokenMgrError(boolean eofSeen, int lexState, int errorLine, int errorColumn, String errorAfter, char curChar,
int reason) {
this(LexicalError(eofSeen, lexState, errorLine, errorColumn, errorAfter, curChar), reason);
}
}
/* JavaCC - OriginalChecksum=de79709675790dcbad2e0d728aa630d1 (do not edit this line) */
| changes |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/collections/MapKeyToOneInEmbeddedIdTest.java | {
"start": 2968,
"end": 3356
} | class ____ implements Serializable {
@ManyToOne
private EntityC entity;
private Integer code;
public EntityBID() {
}
public EntityBID(EntityC entity, Integer code) {
this.entity = entity;
this.code = code;
}
public EntityC getEntity() {
return entity;
}
public Integer getCode() {
return code;
}
}
@Entity( name = "EntityB" )
public static | EntityBID |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/checkpointing/CheckpointStoreITCase.java | {
"start": 2979,
"end": 5001
} | class ____ extends TestLogger {
private static final Configuration CONFIGURATION =
new Configuration()
.set(
HighAvailabilityOptions.HA_MODE,
BlockingHighAvailabilityServiceFactory.class.getName());
@ClassRule
public static final MiniClusterWithClientResource CLUSTER =
new MiniClusterWithClientResource(
new MiniClusterResourceConfiguration.Builder()
.setConfiguration(CONFIGURATION)
.build());
@Before
public void setUp() {
BlockingHighAvailabilityServiceFactory.reset();
FailingMapper.reset();
}
@Test
public void testJobClientRemainsResponsiveDuringCompletedCheckpointStoreRecovery()
throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.enableCheckpointing(10);
RestartStrategyUtils.configureFixedDelayRestartStrategy(
env, 2 /* failure on processing + on recovery */, 0L);
env.fromSource(
new EmitUntilSource(() -> FailingMapper.failedAndProcessed),
WatermarkStrategy.noWatermarks(),
"EmitUntilSourceV2")
.map(new FailingMapper())
.sinkTo(new DiscardingSink<>());
final JobClient jobClient = env.executeAsync();
BlockingHighAvailabilityServiceFactory.fetchRemoteCheckpointsStart.await();
for (int i = 0; i < 10; i++) {
final JobStatus jobStatus = jobClient.getJobStatus().get();
assertEquals(JobStatus.INITIALIZING, jobStatus);
}
BlockingHighAvailabilityServiceFactory.fetchRemoteCheckpointsFinished.countDown();
// Await for job to finish.
jobClient.getJobExecutionResult().get();
checkState(FailingMapper.failedAndProcessed);
}
private static | CheckpointStoreITCase |
java | quarkusio__quarkus | test-framework/junit5/src/test/java/io/quarkus/test/junit/util/QuarkusTestProfileAwareClassOrdererTest.java | {
"start": 14488,
"end": 14798
} | class ____ an actual annotation since the orderer will have to do the meta-check directly
// because ClassDescriptor does not offer any details whether an annotation is directly annotated or meta-annotated
@WithTestResource(value = Manager3.class, scope = TestResourceScope.GLOBAL)
private static | needs |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/script/field/IpDocValuesField.java | {
"start": 4272,
"end": 5578
} | class ____ implements ScriptDocValues.Supplier<InetAddress> {
private final SortedSetDocValues in;
private long[] ords = new long[0];
private int count;
public SortedSetIpSupplier(SortedSetDocValues in) {
this.in = in;
}
@Override
public void setNextDocId(int docId) throws IOException {
count = 0;
if (in.advanceExact(docId)) {
for (int i = 0; i < in.docValueCount(); i++) {
long ord = in.nextOrd();
ords = ArrayUtil.grow(ords, count + 1);
ords[count++] = ord;
}
}
}
@Override
public InetAddress getInternal(int index) {
try {
BytesRef encoded = in.lookupOrd(ords[index]);
return InetAddressPoint.decode(Arrays.copyOfRange(encoded.bytes, encoded.offset, encoded.offset + encoded.length));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public int size() {
return count;
}
}
/** Used if we do not have global ordinals, such as in the IP runtime field see: {@link IpScriptFieldData} */
protected static | SortedSetIpSupplier |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/lookup/StrSubstitutor.java | {
"start": 2089,
"end": 3130
} | class ____ the following pattern: First an instance is created
* and initialized with the map that contains the values for the available variables.
* If a prefix and/or suffix for variables should be used other than the default ones,
* the appropriate settings can be performed. After that the <code>replace()</code>
* method can be called passing in the source text for interpolation. In the returned
* text all variable references (as long as their values are known) will be resolved.
* The following example demonstrates this:
* </p>
* <pre>
* Map valuesMap = new HashMap<>();
* valuesMap.put("animal", "quick brown fox");
* valuesMap.put("target", "lazy dog");
* String templateString = "The ${animal} jumped over the ${target}.";
* StrSubstitutor sub = new StrSubstitutor(valuesMap);
* String resolvedString = sub.replace(templateString);
* </pre>
* <p>yielding:</p>
* <pre>
* The quick brown fox jumped over the lazy dog.
* </pre>
* <p>
* Also, this | follows |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/data/LobTestEntity.java | {
"start": 468,
"end": 2695
} | class ____ {
@Id
@GeneratedValue
private Integer id;
@Lob
@Audited
private String stringLob;
@Lob
@Audited
private byte[] byteLob;
@Lob
@Audited
private char[] charLob;
@NotAudited
private String data;
public LobTestEntity() {
}
public LobTestEntity(String stringLob, byte[] byteLob, char[] charLob) {
this.stringLob = stringLob;
this.byteLob = byteLob;
this.charLob = charLob;
}
public LobTestEntity(Integer id, String stringLob, byte[] byteLob, char[] charLob) {
this.id = id;
this.stringLob = stringLob;
this.byteLob = byteLob;
this.charLob = charLob;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getStringLob() {
return stringLob;
}
public void setStringLob(String stringLob) {
this.stringLob = stringLob;
}
public byte[] getByteLob() {
return byteLob;
}
public void setByteLob(byte[] byteLob) {
this.byteLob = byteLob;
}
public char[] getCharLob() {
return charLob;
}
public void setCharLob(char[] charLob) {
this.charLob = charLob;
}
public String getData() {
return data;
}
public void setData(String data) {
this.data = data;
}
public boolean equals(Object o) {
if ( this == o ) {
return true;
}
if ( !(o instanceof LobTestEntity) ) {
return false;
}
LobTestEntity that = (LobTestEntity) o;
if ( !Arrays.equals( byteLob, that.byteLob ) ) {
return false;
}
if ( !Arrays.equals( charLob, that.charLob ) ) {
return false;
}
if ( id != null ? !id.equals( that.id ) : that.id != null ) {
return false;
}
if ( stringLob != null ? !stringLob.equals( that.stringLob ) : that.stringLob != null ) {
return false;
}
if ( data != null ? !data.equals( that.data ) : that.data != null ) {
return false;
}
return true;
}
public int hashCode() {
int result;
result = (id != null ? id.hashCode() : 0);
result = 31 * result + (stringLob != null ? stringLob.hashCode() : 0);
result = 31 * result + (byteLob != null ? Arrays.hashCode( byteLob ) : 0);
result = 31 * result + (charLob != null ? Arrays.hashCode( charLob ) : 0);
result = 31 * result + (data != null ? data.hashCode() : 0);
return result;
}
}
| LobTestEntity |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMappingWithDependency.java | {
"start": 1162,
"end": 2682
} | interface ____ extends DNSToSwitchMapping {
/**
* Get a list of dependent DNS-names for a given DNS-name/IP-address.
* Dependent DNS-names fall into the same fault domain which must be
* taken into account when placing replicas. This is intended to be used for
* cross node group dependencies when node groups are not sufficient to
* distinguish data nodes by fault domains. In practice, this is needed when
* a compute server runs VMs which use shared storage (as opposite to
* directly attached storage). In this case data nodes fall in two different
* fault domains. One fault domain is defined by a compute server and
* the other is defined by storage. With node groups we can group data nodes
* either by server fault domain or by storage fault domain. However one of
* the fault domains cannot be handled and there we need to define cross node
* group dependencies. These dependencies are applied in block placement
* polices which ensure that no two replicas will be on two dependent nodes.
* @param name - host name or IP address of a data node. Input host name
* parameter must take a value of dfs.datanode.hostname config value if this
* config property is set. Otherwise FQDN of the data node is used.
* @return list of dependent host names. If dfs.datanode.hostname config
* property is set, then its value must be returned.
* Otherwise, FQDN is returned.
*/
public List<String> getDependency(String name);
}
| DNSToSwitchMappingWithDependency |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/options/CacheModeTests.java | {
"start": 524,
"end": 793
} | class ____ {
@Test
public void testNullCacheMode(SessionFactoryScope scope) {
// tests passing null as CacheMode
scope.inTransaction( (session) -> {
session.createQuery( "select c from Contact c" )
.setCacheMode( null )
.list();
});
}
}
| CacheModeTests |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.