language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__logging-log4j2 | log4j-1.2-api/src/main/java/org/apache/log4j/xml/DOMConfigurator.java | {
"start": 2031,
"end": 2734
} | class ____ initialize the log4j environment using a DOM tree.
*
* <p>
* The DTD is specified in <a href="doc-files/log4j.dtd"><b>log4j.dtd</b></a>.
*
* <p>
* Sometimes it is useful to see how log4j is reading configuration files. You can enable log4j internal logging by
* defining the <b>log4j.debug</b> variable on the java command line. Alternatively, set the <code>debug</code>
* attribute in the <code>log4j:configuration</code> element. As in
*
* <pre>
* <log4j:configuration <b>debug="true"</b> xmlns:log4j="http://jakarta.apache.org/log4j/">
* ...
* </log4j:configuration>
* </pre>
*
* <p>
* There are sample XML files included in the package.
*
* @since 0.8.3
*/
public | to |
java | redisson__redisson | redisson/src/main/java/org/redisson/connection/decoder/MapNativeAllDecoder.java | {
"start": 906,
"end": 1880
} | class ____ implements MultiDecoder<Map<Object, Object>> {
private final List<Object> args;
private final Class<?> valueClass;
public MapNativeAllDecoder(List<Object> args, Class<?> valueClass) {
this.args = args;
this.valueClass = valueClass;
}
@Override
public Map<Object, Object> decode(List<Object> parts, State state) {
if (parts.isEmpty()) {
return new HashMap<>();
}
Map<Object, Object> result = new LinkedHashMap<>(parts.size());
for (int index = 0; index < parts.size(); index++) {
Long value = (Long) parts.get(index);
if (value == -2 && valueClass != Long.class) {
continue;
}
if (valueClass == Boolean.class) {
result.put(args.get(index), value == 1);
} else {
result.put(args.get(index), value);
}
}
return result;
}
}
| MapNativeAllDecoder |
java | apache__logging-log4j2 | log4j-mongodb/src/main/java/org/apache/logging/log4j/mongodb/MongoDbProvider.java | {
"start": 1832,
"end": 2623
} | class ____ extends MongoDb4Provider.Builder<Builder> {
@Override
public MongoDb4Provider build() {
// Don't issue MongoDb4Provider's WARN event.
return newMongoDb4Provider();
}
}
/**
* There are no instances.
*/
private MongoDbProvider() {
// empty.
}
/**
* @deprecated since 2.25.0 use the typesafe {@link #newMongoDbBuilder()} method.
*/
@Deprecated
@SuppressWarnings("unchecked")
public static <B extends MongoDb4Provider.Builder<B>> B newBuilder() {
return (B) newMongoDbBuilder();
}
/**
* Creates a builder for the MongoDB plugin
*/
@PluginBuilderFactory
public static Builder newMongoDbBuilder() {
return new Builder();
}
}
| Builder |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adaptive/WaitingForResourcesTest.java | {
"start": 1832,
"end": 11518
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(WaitingForResourcesTest.class);
private static final Duration DISABLED_RESOURCE_WAIT_TIMEOUT = Duration.ofSeconds(-1);
@RegisterExtension private MockContext ctx = new MockContext();
/** WaitingForResources is transitioning to Executing if there are enough resources. */
@Test
void testTransitionToCreatingExecutionGraph() {
final AtomicBoolean onTriggerCalled = new AtomicBoolean();
final Function<StateTransitionManager.Context, StateTransitionManager>
stateTransitionManagerFactory =
context ->
new TestingStateTransitionManager(
() -> {
assertThat(context.hasDesiredResources()).isTrue();
assertThat(context.hasSufficientResources()).isTrue();
context.transitionToSubsequentState();
},
() -> onTriggerCalled.set(true));
ctx.setHasDesiredResources(() -> true);
ctx.setHasSufficientResources(() -> true);
ctx.setExpectCreatingExecutionGraph();
new WaitingForResources(
ctx, LOG, DISABLED_RESOURCE_WAIT_TIMEOUT, stateTransitionManagerFactory);
ctx.runScheduledTasks();
assertThat(onTriggerCalled.get()).isTrue();
}
@Test
void testNotEnoughResources() {
final AtomicBoolean onChangeCalled = new AtomicBoolean();
final AtomicBoolean onTriggerCalled = new AtomicBoolean();
final Function<StateTransitionManager.Context, StateTransitionManager>
stateTransitionManagerFactory =
context ->
new TestingStateTransitionManager(
() -> {
onChangeCalled.set(true);
assertThat(context.hasDesiredResources()).isFalse();
assertThat(context.hasSufficientResources()).isFalse();
},
() -> onTriggerCalled.set(true));
ctx.setHasDesiredResources(() -> false);
ctx.setHasSufficientResources(() -> false);
WaitingForResources wfr =
new WaitingForResources(
ctx, LOG, DISABLED_RESOURCE_WAIT_TIMEOUT, stateTransitionManagerFactory);
ctx.runScheduledTasks();
// we expect no state transition.
assertThat(ctx.hasStateTransition()).isFalse();
assertThat(onChangeCalled.get()).isTrue();
assertThat(onTriggerCalled.get()).isTrue();
}
@Test
void testNotifyNewResourcesAvailable() {
final AtomicInteger callsCounter = new AtomicInteger();
final Function<StateTransitionManager.Context, StateTransitionManager>
stateTransitionManagerFactory =
context ->
TestingStateTransitionManager.withOnChangeEventOnly(
() -> {
if (callsCounter.incrementAndGet() == 0) {
// initially, not enough resources
assertThat(context.hasDesiredResources()).isFalse();
assertThat(context.hasSufficientResources())
.isFalse();
}
if (context.hasDesiredResources()
&& context.hasSufficientResources()) {
context.transitionToSubsequentState();
}
});
// initially, not enough resources
ctx.setHasDesiredResources(() -> false);
ctx.setHasSufficientResources(() -> false);
WaitingForResources wfr =
new WaitingForResources(
ctx, LOG, DISABLED_RESOURCE_WAIT_TIMEOUT, stateTransitionManagerFactory);
ctx.runScheduledTasks();
// make resources available
ctx.setHasDesiredResources(() -> true);
ctx.setHasSufficientResources(() -> true);
ctx.setExpectCreatingExecutionGraph();
wfr.onNewResourcesAvailable(); // .. and notify
}
@Test
void testSchedulingWithSufficientResources() {
final Function<StateTransitionManager.Context, StateTransitionManager>
stateTransitionManagerFactory =
context ->
TestingStateTransitionManager.withOnChangeEventOnly(
() -> {
assertThat(context.hasDesiredResources()).isFalse();
if (context.hasSufficientResources()) {
context.transitionToSubsequentState();
}
});
WaitingForResources wfr =
new WaitingForResources(
ctx, LOG, DISABLED_RESOURCE_WAIT_TIMEOUT, stateTransitionManagerFactory);
ctx.runScheduledTasks();
// we expect no state transition.
assertThat(ctx.hasStateTransition()).isFalse();
ctx.setHasDesiredResources(() -> false);
ctx.setHasSufficientResources(() -> true);
ctx.setExpectCreatingExecutionGraph();
wfr.onNewResourcesAvailable();
}
@Test
void testNoStateTransitionOnNoResourceTimeout() {
ctx.setHasDesiredResources(() -> false);
ctx.setHasSufficientResources(() -> false);
WaitingForResources wfr =
new WaitingForResources(
ctx,
LOG,
DISABLED_RESOURCE_WAIT_TIMEOUT,
context -> TestingStateTransitionManager.withNoOp());
ctx.runScheduledTasks();
assertThat(ctx.hasStateTransition()).isFalse();
}
@Test
void testStateTransitionOnResourceTimeout() {
WaitingForResources wfr =
new WaitingForResources(
ctx,
LOG,
Duration.ZERO,
context -> TestingStateTransitionManager.withNoOp());
ctx.setExpectCreatingExecutionGraph();
ctx.runScheduledTasks();
}
@Test
void testInternalRunScheduledTasks_correctExecutionOrder() {
AtomicBoolean firstRun = new AtomicBoolean(false);
AtomicBoolean secondRun = new AtomicBoolean(false);
AtomicBoolean thirdRun = new AtomicBoolean(false);
Runnable runFirstBecauseOfLowDelay = () -> firstRun.set(true);
Runnable runSecondBecauseOfScheduleOrder =
() -> {
assertThat(firstRun).as("order violated").isTrue();
secondRun.set(true);
};
Runnable runLastBecauseOfHighDelay =
() -> {
assertThat(secondRun).as("order violated").isTrue();
thirdRun.set(true);
};
ctx.runIfState(
new AdaptiveSchedulerTest.DummyState(ctx),
runLastBecauseOfHighDelay,
Duration.ofMillis(999));
ctx.runIfState(
new AdaptiveSchedulerTest.DummyState(ctx),
runFirstBecauseOfLowDelay,
Duration.ZERO);
ctx.runIfState(
new AdaptiveSchedulerTest.DummyState(ctx),
runSecondBecauseOfScheduleOrder,
Duration.ZERO);
ctx.runScheduledTasks();
assertThat(thirdRun).isTrue();
}
@Test
void testInternalRunScheduledTasks_tasksAreRemovedAfterExecution() {
AtomicBoolean executed = new AtomicBoolean(false);
Runnable executeOnce =
() -> {
assertThat(executed).as("Multiple executions").isFalse();
executed.set(true);
};
ctx.runIfState(new AdaptiveSchedulerTest.DummyState(ctx), executeOnce, Duration.ZERO);
// execute at least twice
ctx.runScheduledTasks();
ctx.runScheduledTasks();
assertThat(executed).isTrue();
}
@Test
void testInternalRunScheduledTasks_upperBoundRespected() {
Runnable executeNever = () -> fail("Not expected");
ctx.runIfState(
new AdaptiveSchedulerTest.DummyState(ctx), executeNever, Duration.ofMillis(10));
ctx.runScheduledTasks(4);
}
@Test
void testInternalRunScheduledTasks_scheduleTaskFromRunnable() {
final State state = new AdaptiveSchedulerTest.DummyState(ctx);
AtomicBoolean executed = new AtomicBoolean(false);
ctx.runIfState(
state,
() -> {
// schedule something
ctx.runIfState(state, () -> executed.set(true), Duration.ofMillis(4));
},
Duration.ZERO);
// choose time that includes inner execution as well
ctx.runScheduledTasks(10);
assertThat(executed).isTrue();
}
private static | WaitingForResourcesTest |
java | playframework__playframework | core/play-guice/src/main/java/play/inject/guice/GuiceBuilder.java | {
"start": 721,
"end": 1694
} | class ____<
Self, Delegate extends play.api.inject.guice.GuiceBuilder<Delegate>> {
protected Delegate delegate;
protected GuiceBuilder(Delegate delegate) {
this.delegate = delegate;
}
/**
* Set the environment.
*
* @param env the environment to configure into this application
* @return a copy of this builder with the new environment
*/
public final Self in(Environment env) {
return newBuilder(delegate.in(env.asScala()));
}
/**
* Set the environment path.
*
* @param path the path to configure
* @return a copy of this builder with the new path
*/
public final Self in(File path) {
return newBuilder(delegate.in(path));
}
/**
* Set the environment mode.
*
* @param mode the mode to configure
* @return a copy of this build configured with this mode
*/
public final Self in(Mode mode) {
return newBuilder(delegate.in(mode.asScala()));
}
/**
* Set the environment | GuiceBuilder |
java | FasterXML__jackson-core | src/test/java/tools/jackson/core/unittest/json/async/AsyncInvalidCharsTest.java | {
"start": 643,
"end": 4497
} | class ____ extends AsyncTestBase
{
private final JsonFactory JSON_F = new JsonFactory();
@Test
void utf8BOMHandling() throws Exception
{
_testUtf8BOMHandling(0, 99);
_testUtf8BOMHandling(0, 5);
_testUtf8BOMHandling(0, 3);
_testUtf8BOMHandling(0, 2);
_testUtf8BOMHandling(0, 1);
_testUtf8BOMHandling(2, 99);
_testUtf8BOMHandling(2, 1);
}
private void _testUtf8BOMHandling(int offset, int readSize) throws Exception
{
_testUTF8BomOk(offset, readSize);
_testUTF8BomFail(offset, readSize, 1,
"Unexpected byte 0x5b following 0xEF; should get 0xBB as second byte");
_testUTF8BomFail(offset, readSize, 2,
"Unexpected byte 0x5b following 0xEF 0xBB; should get 0xBF as third byte");
}
private void _testUTF8BomOk(int offset, int readSize) throws Exception
{
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
// first, write BOM:
bytes.write(0xEF);
bytes.write(0xBB);
bytes.write(0xBF);
bytes.write("[ 1 ]".getBytes("UTF-8"));
byte[] doc = bytes.toByteArray();
AsyncReaderWrapper p = asyncForBytes(JSON_F, readSize, doc, offset);
assertEquals(JsonToken.START_ARRAY, p.nextToken());
// should also have skipped first 3 bytes of BOM; but do we have offset available?
/* Alas, due to [core#111], we have to omit BOM in calculations
* as we do not know what the offset is due to -- may need to revisit, if this
* discrepancy becomes an issue. For now it just means that BOM is considered
* "out of stream" (not part of input).
*/
TokenStreamLocation loc = p.parser().currentTokenLocation();
// so if BOM was consider in-stream (part of input), this should expect 3:
// (NOTE: this is location for START_ARRAY token, now)
assertEquals(-1, loc.getCharOffset());
// !!! TODO: fix location handling
/*
assertEquals(0, loc.getByteOffset());
assertEquals(1, loc.getLineNr());
assertEquals(1, loc.getColumnNr());
*/
assertEquals(JsonToken.VALUE_NUMBER_INT, p.nextToken());
assertEquals(JsonToken.END_ARRAY, p.nextToken());
p.close();
}
private void _testUTF8BomFail(int offset, int readSize,
int okBytes, String verify) throws Exception
{
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
bytes.write(0xEF);
if (okBytes > 1) {
bytes.write(0xBB);
}
bytes.write("[ 1 ]".getBytes("UTF-8"));
byte[] doc = bytes.toByteArray();
try (AsyncReaderWrapper p = asyncForBytes(JSON_F, readSize, doc, offset)) {
assertEquals(JsonToken.START_ARRAY, p.nextToken());
fail("Should not pass");
} catch (StreamReadException e) {
verifyException(e, verify);
}
}
@Test
void handlingOfInvalidSpace() throws Exception
{
_testHandlingOfInvalidSpace(0, 99);
_testHandlingOfInvalidSpace(0, 3);
_testHandlingOfInvalidSpace(0, 1);
_testHandlingOfInvalidSpace(1, 99);
_testHandlingOfInvalidSpace(2, 1);
}
private void _testHandlingOfInvalidSpace(int offset, int readSize) throws Exception
{
final String doc = "{ \u0008 \"a\":1}";
AsyncReaderWrapper p = asyncForBytes(JSON_F, readSize, _jsonDoc(doc), offset);
assertToken(JsonToken.START_OBJECT, p.nextToken());
try {
p.nextToken();
fail("Should have failed");
} catch (StreamReadException e) {
verifyException(e, "Illegal character");
// and correct error code
verifyException(e, "code 8");
}
p.close();
}
}
| AsyncInvalidCharsTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregator.java | {
"start": 1017,
"end": 1179
} | class ____ {
/**
* Parse the {@link PipelineAggregationBuilder} from a {@link XContentParser}.
*/
@FunctionalInterface
public | PipelineAggregator |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/repositories/SnapshotShardContext.java | {
"start": 1552,
"end": 5650
} | class ____ extends DelegatingActionListener<ShardSnapshotResult, ShardSnapshotResult> {
private final Store store;
private final MapperService mapperService;
private final SnapshotId snapshotId;
private final IndexId indexId;
private final SnapshotIndexCommit commitRef;
@Nullable
private final String shardStateIdentifier;
private final IndexShardSnapshotStatus snapshotStatus;
private final IndexVersion repositoryMetaVersion;
private final long snapshotStartTime;
/**
* @param store store to be snapshotted
* @param mapperService the shards mapper service
* @param snapshotId snapshot id
* @param indexId id for the index being snapshotted
* @param commitRef commit point reference
* @param shardStateIdentifier a unique identifier of the state of the shard that is stored with the shard's snapshot and used
* to detect if the shard has changed between snapshots. If {@code null} is passed as the identifier
* snapshotting will be done by inspecting the physical files referenced by {@code snapshotIndexCommit}
* @param snapshotStatus snapshot status
* @param repositoryMetaVersion version of the updated repository metadata to write
* @param snapshotStartTime start time of the snapshot found in
* {@link org.elasticsearch.cluster.SnapshotsInProgress.Entry#startTime()}
* @param listener listener invoked on completion
*/
public SnapshotShardContext(
Store store,
MapperService mapperService,
SnapshotId snapshotId,
IndexId indexId,
SnapshotIndexCommit commitRef,
@Nullable String shardStateIdentifier,
IndexShardSnapshotStatus snapshotStatus,
IndexVersion repositoryMetaVersion,
final long snapshotStartTime,
ActionListener<ShardSnapshotResult> listener
) {
super(new SubscribableListener<>());
addListener(commitRef.closingBefore(listener));
this.store = store;
this.mapperService = mapperService;
this.snapshotId = snapshotId;
this.indexId = indexId;
this.commitRef = commitRef;
this.shardStateIdentifier = shardStateIdentifier;
this.snapshotStatus = snapshotStatus;
this.repositoryMetaVersion = repositoryMetaVersion;
this.snapshotStartTime = snapshotStartTime;
}
public Store store() {
return store;
}
public MapperService mapperService() {
return mapperService;
}
public SnapshotId snapshotId() {
return snapshotId;
}
public IndexId indexId() {
return indexId;
}
public IndexCommit indexCommit() {
return commitRef.indexCommit();
}
@Nullable
public String stateIdentifier() {
return shardStateIdentifier;
}
public IndexShardSnapshotStatus status() {
return snapshotStatus;
}
public IndexVersion getRepositoryMetaVersion() {
return repositoryMetaVersion;
}
public long snapshotStartTime() {
return snapshotStartTime;
}
@Override
public void onResponse(ShardSnapshotResult result) {
delegate.onResponse(result);
}
public Releasable withCommitRef() {
snapshotStatus.ensureNotAborted(); // check this first to avoid acquiring a ref when aborted even if refs are available
if (commitRef.tryIncRef()) {
return Releasables.releaseOnce(commitRef::decRef);
} else {
snapshotStatus.ensureNotAborted();
assert false : "commit ref closed early in state " + snapshotStatus;
throw new IndexShardSnapshotFailedException(store.shardId(), "Store got closed concurrently");
}
}
public void addListener(ActionListener<ShardSnapshotResult> listener) {
((SubscribableListener<ShardSnapshotResult>) this.delegate).addListener(listener);
}
}
| SnapshotShardContext |
java | quarkusio__quarkus | integration-tests/oidc-code-flow/src/main/java/io/quarkus/it/keycloak/SessionExpiredOidcRedirectFilter.java | {
"start": 616,
"end": 1706
} | class ____ implements OidcRedirectFilter {
@Override
public void filter(OidcRedirectContext context) {
if (!"tenant-refresh".equals(context.oidcTenantConfig().tenantId.get())) {
throw new RuntimeException("Invalid tenant id");
}
if (!context.redirectUri().contains("/session-expired-page")) {
throw new RuntimeException("Invalid redirect URI");
}
AuthorizationCodeTokens tokens = context.routingContext().get(AuthorizationCodeTokens.class.getName());
String userName = OidcCommonUtils.decodeJwtContent(tokens.getIdToken()).getString(Claims.preferred_username.name());
String jwe = Jwt.preferredUserName(userName).jwe()
.encryptWithSecret(context.oidcTenantConfig().credentials.secret.get());
OidcUtils.createCookie(context.routingContext(), context.oidcTenantConfig(), "session_expired",
jwe + "|" + context.oidcTenantConfig().tenantId.get(), 10);
context.additionalQueryParams().add("session-expired", "true");
}
}
| SessionExpiredOidcRedirectFilter |
java | netty__netty | transport-classes-io_uring/src/main/java/io/netty/channel/uring/IoUringIoHandlerConfig.java | {
"start": 1176,
"end": 3992
} | class ____ the following configurable options:
* </p>
*
* <table border="1" cellspacing="0" cellpadding="6">
* <caption>Available Configuration Options</caption>
* <thead>
* <tr>
* <th>Setter Method</th>
* <th>Description</th>
* </tr>
* </thead>
* <tbody>
* <tr>
* <td>{@link IoUringIoHandlerConfig#setRingSize}</td>
* <td>Sets the size of the submission queue for the io_uring instance.
* <br>
* If you want to submit a large number of io_uring requests at once,
* it is recommended to properly configure this option.
* The default value is 4096, which is sufficient for most scenarios.
* </td>
* </tr>
* <tr>
* <td>{@link IoUringIoHandlerConfig#setMaxBoundedWorker}</td>
* <td>Defines the maximum number of bounded io_uring worker threads.
* <br>
* If you extend io_uring-related file operations based on Netty,
* it is recommended to properly configure this option.
* For more details, refer to the
* <a href="https://man7.org/linux/man-pages/man3/io_uring_register_iowq_max_workers.3.html>
* manual.
* </a>
* </td>
* </tr>
* <tr>
* <td>{@link IoUringIoHandlerConfig#setMaxUnboundedWorker}</td>
* <td>Defines the maximum number of unbounded io_uring worker threads.
* <br>
* If you use FileRegion to perform `sendfile` operations in io_uring,
* it is recommended to properly configure this option as otherwise you might
* end up with an <a href="https://github.com/netty/netty/issues/15125>unexpected number of kernel threads</a>.
* </td>
* </tr>
* <tr>
* <td>{@link IoUringIoHandlerConfig#setCqSize}</td>
* <td>Sets the size of the completionQueue queue for the io_uring instance.
* <br>
* If your current kernel supports some multishot variants
* (such as IORING_RECV_MULTISHOT, IORING_ACCEPT_MULTISHOT) or IORING_RECVSEND_BUNDLE,
* and you want to handle more CQEs in a single syscall
* it is recommended to properly configure this option.
* The default value is twice the ring size, which is sufficient for most scenarios.
* </td>
* </tr>
* <tr>
* <td>{@link IoUringIoHandlerConfig#setBufferRingConfig}</td>
* <td>
* Adds a buffer ring configuration to the list of buffer ring configurations.
* It will be used to register the buffer ring for the io_uring instance.
* </td>
* </tr>
* <tr>
* <td>{@link IoUringIoHandlerConfig#setSingleIssuer}</td>
* <td>
* Enable or disable the use of {@code IORING_SETUP_SINGLE_ISSUER}.
* </td>
* </tr>
* </tbody>
* </table>
*/
public final | provides |
java | spring-projects__spring-security | kerberos/kerberos-core/src/main/java/org/springframework/security/kerberos/authentication/sun/SunJaasKerberosTicketValidator.java | {
"start": 7297,
"end": 8263
} | class ____ implements PrivilegedExceptionAction<KerberosTicketValidation> {
byte[] kerberosTicket;
private KerberosMultitierValidateAction(byte[] kerberosTicket) {
this.kerberosTicket = kerberosTicket;
}
@Override
public KerberosTicketValidation run() throws Exception {
byte[] responseToken = new byte[0];
GSSManager manager = GSSManager.getInstance();
GSSContext context = manager.createContext((GSSCredential) null);
while (!context.isEstablished()) {
context.acceptSecContext(this.kerberosTicket, 0, this.kerberosTicket.length);
}
Subject subject = GSSUtil.createSubject(context.getSrcName(), context.getDelegCred());
KerberosTicketValidation result = new KerberosTicketValidation(context.getSrcName().toString(), subject,
responseToken, context);
if (!SunJaasKerberosTicketValidator.this.holdOnToGSSContext) {
context.dispose();
}
return result;
}
}
/**
* This | KerberosMultitierValidateAction |
java | grpc__grpc-java | core/src/main/java/io/grpc/internal/CallTracer.java | {
"start": 845,
"end": 2075
} | class ____ {
private final TimeProvider timeProvider;
private final LongCounter callsStarted = LongCounterFactory.create();
private final LongCounter callsSucceeded = LongCounterFactory.create();
private final LongCounter callsFailed = LongCounterFactory.create();
private volatile long lastCallStartedNanos;
CallTracer(TimeProvider timeProvider) {
this.timeProvider = timeProvider;
}
public void reportCallStarted() {
callsStarted.add(1);
lastCallStartedNanos = timeProvider.currentTimeNanos();
}
public void reportCallEnded(boolean success) {
if (success) {
callsSucceeded.add(1);
} else {
callsFailed.add(1);
}
}
void updateBuilder(ChannelStats.Builder builder) {
builder
.setCallsStarted(callsStarted.value())
.setCallsSucceeded(callsSucceeded.value())
.setCallsFailed(callsFailed.value())
.setLastCallStartedNanos(lastCallStartedNanos);
}
void updateBuilder(ServerStats.Builder builder) {
builder
.setCallsStarted(callsStarted.value())
.setCallsSucceeded(callsSucceeded.value())
.setCallsFailed(callsFailed.value())
.setLastCallStartedNanos(lastCallStartedNanos);
}
public | CallTracer |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java | {
"start": 1936,
"end": 8520
} | class ____ implements WasbAuthorizerInterface {
public static final Logger LOG = LoggerFactory
.getLogger(RemoteWasbAuthorizerImpl.class);
private static final ObjectReader RESPONSE_READER = new ObjectMapper()
.readerFor(RemoteWasbAuthorizerResponse.class);
/**
* Configuration parameter name expected in the Configuration object to
* provide the urls of the remote service instances. {@value}
*/
public static final String KEY_REMOTE_AUTH_SERVICE_URLS =
"fs.azure.authorization.remote.service.urls";
/**
* Authorization operation OP name in the remote service {@value}
*/
private static final String CHECK_AUTHORIZATION_OP = "CHECK_AUTHORIZATION";
/**
* Query parameter specifying the access operation type. {@value}
*/
private static final String ACCESS_OPERATION_QUERY_PARAM_NAME =
"operation_type";
/**
* Query parameter specifying the wasb absolute path. {@value}
*/
private static final String WASB_ABSOLUTE_PATH_QUERY_PARAM_NAME =
"wasb_absolute_path";
/**
* Query parameter name for sending owner of the specific resource {@value}
*/
private static final String WASB_RESOURCE_OWNER_QUERY_PARAM_NAME =
"wasb_resource_owner";
/**
* Authorization Remote http client retry policy enabled configuration key. {@value}
*/
private static final String AUTHORIZER_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY =
"fs.azure.authorizer.http.retry.policy.enabled";
/**
* Authorization Remote http client retry policy spec. {@value}
*/
private static final String AUTHORIZER_HTTP_CLIENT_RETRY_POLICY_SPEC_SPEC =
"fs.azure.authorizer.http.retry.policy.spec";
/**
* Authorization Remote http client retry policy spec default value. {@value}
*/
private static final String AUTHORIZER_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT =
"10,3,100,2";
/**
* Authorization caching period
*/
private static final String AUTHORIZATION_CACHEENTRY_EXPIRY_PERIOD =
"fs.azure.authorization.cacheentry.expiry.period";
private WasbRemoteCallHelper remoteCallHelper = null;
private boolean isKerberosSupportEnabled;
private boolean isSpnegoTokenCacheEnabled;
private RetryPolicy retryPolicy;
private String[] commaSeparatedUrls = null;
private CachingAuthorizer<CachedAuthorizerEntry, Boolean> cache;
@VisibleForTesting public void updateWasbRemoteCallHelper(
WasbRemoteCallHelper helper) {
this.remoteCallHelper = helper;
}
@Override
public void init(Configuration conf)
throws IOException {
LOG.debug("Initializing RemoteWasbAuthorizerImpl instance");
this.isKerberosSupportEnabled =
conf.getBoolean(Constants.AZURE_KERBEROS_SUPPORT_PROPERTY_NAME, false);
this.isSpnegoTokenCacheEnabled =
conf.getBoolean(Constants.AZURE_ENABLE_SPNEGO_TOKEN_CACHE, true);
this.commaSeparatedUrls =
conf.getTrimmedStrings(KEY_REMOTE_AUTH_SERVICE_URLS);
if (this.commaSeparatedUrls == null
|| this.commaSeparatedUrls.length <= 0) {
throw new IOException(KEY_REMOTE_AUTH_SERVICE_URLS + " config not set"
+ " in configuration.");
}
this.retryPolicy = RetryUtils.getMultipleLinearRandomRetry(conf,
AUTHORIZER_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY, true,
AUTHORIZER_HTTP_CLIENT_RETRY_POLICY_SPEC_SPEC,
AUTHORIZER_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT);
if (isKerberosSupportEnabled && UserGroupInformation.isSecurityEnabled()) {
this.remoteCallHelper = new SecureWasbRemoteCallHelper(retryPolicy, false,
isSpnegoTokenCacheEnabled);
} else {
this.remoteCallHelper = new WasbRemoteCallHelper(retryPolicy);
}
this.cache = new CachingAuthorizer<>(
conf.getTimeDuration(AUTHORIZATION_CACHEENTRY_EXPIRY_PERIOD, 5L, TimeUnit.MINUTES), "AUTHORIZATION"
);
this.cache.init(conf);
}
@Override
public boolean authorize(String wasbAbsolutePath, String accessType, String resourceOwner)
throws IOException {
/* Make an exception for the internal -RenamePending files */
if (wasbAbsolutePath.endsWith(NativeAzureFileSystem.FolderRenamePending.SUFFIX)) {
return true;
}
CachedAuthorizerEntry cacheKey = new CachedAuthorizerEntry(wasbAbsolutePath, accessType, resourceOwner);
Boolean cacheresult = cache.get(cacheKey);
if (cacheresult != null) {
return cacheresult;
}
boolean authorizeresult = authorizeInternal(wasbAbsolutePath, accessType, resourceOwner);
cache.put(cacheKey, authorizeresult);
return authorizeresult;
}
private boolean authorizeInternal(String wasbAbsolutePath, String accessType, String resourceOwner)
throws IOException {
try {
final URIBuilder uriBuilder = new URIBuilder();
uriBuilder.setPath("/" + CHECK_AUTHORIZATION_OP);
uriBuilder
.addParameter(WASB_ABSOLUTE_PATH_QUERY_PARAM_NAME, wasbAbsolutePath);
uriBuilder.addParameter(ACCESS_OPERATION_QUERY_PARAM_NAME, accessType);
if (resourceOwner != null && StringUtils.isNotEmpty(resourceOwner)) {
uriBuilder.addParameter(WASB_RESOURCE_OWNER_QUERY_PARAM_NAME,
resourceOwner);
}
String responseBody = remoteCallHelper
.makeRemoteRequest(commaSeparatedUrls, uriBuilder.getPath(),
uriBuilder.getQueryParams(), HttpGet.METHOD_NAME);
RemoteWasbAuthorizerResponse authorizerResponse = RESPONSE_READER
.readValue(responseBody);
if (authorizerResponse == null) {
throw new WasbAuthorizationException(
"RemoteWasbAuthorizerResponse object null from remote call");
} else if (authorizerResponse.getResponseCode()
== REMOTE_CALL_SUCCESS_CODE) {
return authorizerResponse.getAuthorizationResult();
} else {
throw new WasbAuthorizationException(
"Remote authorization" + " service encountered an error "
+ authorizerResponse.getResponseMessage());
}
} catch (WasbRemoteCallException | JsonParseException | JsonMappingException ex) {
throw new WasbAuthorizationException(ex);
}
}
}
/**
* POJO representing the response expected from a remote
* authorization service.
* The remote service is expected to return the authorization
* response in the following JSON format
* {
* "responseCode" : 0 or non-zero <int>,
* "responseMessage" : relevant message of failure <String>
* "authorizationResult" : authorization result <boolean>
* true - if auhorization allowed
* false - otherwise.
* }
*/
| RemoteWasbAuthorizerImpl |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/ClassUtils.java | {
"start": 25635,
"end": 25754
} | class ____
* @throws NullPointerException if the className is null
* @throws ClassNotFoundException if the | loader |
java | spring-projects__spring-boot | smoke-test/spring-boot-smoke-test-ant/src/test/java/smoketest/ant/SampleAntApplicationIT.java | {
"start": 1059,
"end": 1548
} | class ____ {
@Test
void runJar() throws Exception {
File libs = new File("build/ant/libs");
Process process = new JavaExecutable().processBuilder("-jar", "spring-boot-smoke-test-ant.jar")
.directory(libs)
.start();
process.waitFor(5, TimeUnit.MINUTES);
assertThat(process.exitValue()).isZero();
String output = FileCopyUtils.copyToString(new InputStreamReader(process.getInputStream()));
assertThat(output).contains("Spring Boot Ant Example");
}
}
| SampleAntApplicationIT |
java | apache__camel | components/camel-dataset/src/main/java/org/apache/camel/component/dataset/ListDataSet.java | {
"start": 1081,
"end": 2259
} | class ____ extends DataSetSupport {
private List<Object> defaultBodies;
public ListDataSet() {
super(0);
}
public ListDataSet(List<Object> defaultBodies) {
this.defaultBodies = defaultBodies;
setSize(defaultBodies.size());
}
// Properties
//-------------------------------------------------------------------------
public List<Object> getDefaultBodies() {
if (defaultBodies == null) {
defaultBodies = new LinkedList<>();
}
return defaultBodies;
}
public void setDefaultBodies(List<Object> defaultBodies) {
this.defaultBodies = defaultBodies;
setSize(defaultBodies.size());
}
// Implementation methods
//-------------------------------------------------------------------------
/**
* Creates the message body for a given message. If the messageIndex is greater than the size of the list, use the
* modulus.
*/
@Override
protected Object createMessageBody(long messageIndex) {
int listIndex = (int) (messageIndex % getDefaultBodies().size());
return getDefaultBodies().get(listIndex);
}
}
| ListDataSet |
java | apache__kafka | coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MockCoordinatorLoader.java | {
"start": 1032,
"end": 2029
} | class ____ implements CoordinatorLoader<String> {
private final LoadSummary summary;
private final List<Long> lastWrittenOffsets;
private final List<Long> lastCommittedOffsets;
public MockCoordinatorLoader(
LoadSummary summary,
List<Long> lastWrittenOffsets,
List<Long> lastCommittedOffsets
) {
this.summary = summary;
this.lastWrittenOffsets = lastWrittenOffsets;
this.lastCommittedOffsets = lastCommittedOffsets;
}
public MockCoordinatorLoader() {
this(null, List.of(), List.of());
}
@Override
public CompletableFuture<LoadSummary> load(
TopicPartition tp,
CoordinatorPlayback<String> replayable
) {
lastWrittenOffsets.forEach(replayable::updateLastWrittenOffset);
lastCommittedOffsets.forEach(replayable::updateLastCommittedOffset);
return CompletableFuture.completedFuture(summary);
}
@Override
public void close() {}
}
| MockCoordinatorLoader |
java | google__dagger | dagger-runtime/main/java/dagger/MapKey.java | {
"start": 1709,
"end": 1890
} | class ____ {
* {@literal @}Provides
* {@literal @}IntoMap
* {@literal @}SomeEnumKey(SomeEnum.FOO)
* Integer provideFooValue() {
* return 2;
* }
* }
*
* | SomeModule |
java | apache__kafka | trogdor/src/main/java/org/apache/kafka/trogdor/workload/ProduceBenchWorker.java | {
"start": 2482,
"end": 3863
} | class ____ implements TaskWorker {
private static final Logger log = LoggerFactory.getLogger(ProduceBenchWorker.class);
private static final int THROTTLE_PERIOD_MS = 100;
private final String id;
private final ProduceBenchSpec spec;
private final AtomicBoolean running = new AtomicBoolean(false);
private ScheduledExecutorService executor;
private WorkerStatusTracker status;
private KafkaFutureImpl<String> doneFuture;
public ProduceBenchWorker(String id, ProduceBenchSpec spec) {
this.id = id;
this.spec = spec;
}
@Override
public void start(Platform platform, WorkerStatusTracker status,
KafkaFutureImpl<String> doneFuture) {
if (!running.compareAndSet(false, true)) {
throw new IllegalStateException("ProducerBenchWorker is already running.");
}
log.info("{}: Activating ProduceBenchWorker with {}", id, spec);
// Create an executor with 2 threads. We need the second thread so
// that the StatusUpdater can run in parallel with SendRecords.
this.executor = Executors.newScheduledThreadPool(2,
ThreadUtils.createThreadFactory("ProduceBenchWorkerThread%d", false));
this.status = status;
this.doneFuture = doneFuture;
executor.submit(new Prepare());
}
public | ProduceBenchWorker |
java | apache__kafka | connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTaskContext.java | {
"start": 1160,
"end": 2411
} | class ____ implements SourceTaskContext {
private final OffsetStorageReader reader;
private final ConnectorTaskId id;
private final ClusterConfigState configState;
private final WorkerTransactionContext transactionContext;
private final PluginMetrics pluginMetrics;
public WorkerSourceTaskContext(OffsetStorageReader reader,
ConnectorTaskId id,
ClusterConfigState configState,
WorkerTransactionContext transactionContext,
PluginMetrics pluginMetrics) {
this.reader = reader;
this.id = id;
this.configState = configState;
this.transactionContext = transactionContext;
this.pluginMetrics = pluginMetrics;
}
@Override
public Map<String, String> configs() {
return configState.taskConfig(id);
}
@Override
public OffsetStorageReader offsetStorageReader() {
return reader;
}
@Override
public WorkerTransactionContext transactionContext() {
return transactionContext;
}
@Override
public PluginMetrics pluginMetrics() {
return pluginMetrics;
}
}
| WorkerSourceTaskContext |
java | apache__rocketmq | store/src/test/java/org/apache/rocketmq/store/ha/HAServerTest.java | {
"start": 1931,
"end": 12304
} | class ____ {
private DefaultMessageStore defaultMessageStore;
private MessageStoreConfig storeConfig;
private HAService haService;
private Random random = new Random();
private List<HAClient> haClientList = new ArrayList<>();
@Before
public void setUp() throws Exception {
this.storeConfig = new MessageStoreConfig();
this.storeConfig.setHaListenPort(9000 + random.nextInt(1000));
this.storeConfig.setHaSendHeartbeatInterval(10);
this.defaultMessageStore = mockMessageStore();
this.haService = new DefaultHAService();
this.haService.init(defaultMessageStore);
this.haService.start();
}
@After
public void tearDown() {
tearDownAllHAClient();
await().atMost(Duration.ofMinutes(1)).until(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
return HAServerTest.this.haService.getConnectionCount().get() == 0;
}
});
this.haService.shutdown();
}
@Test
public void testConnectionList_OneHAClient() throws IOException {
setUpOneHAClient();
await().atMost(Duration.ofMinutes(1)).until(new Callable<Boolean>() {
@Override
public Boolean call() {
return HAServerTest.this.haService.getConnectionCount().get() == 1;
}
});
}
@Test
public void testConnectionList_MultipleHAClient() throws IOException {
setUpOneHAClient();
setUpOneHAClient();
setUpOneHAClient();
await().atMost(Duration.ofMinutes(1)).until(new Callable<Boolean>() {
@Override
public Boolean call() {
return HAServerTest.this.haService.getConnectionCount().get() == 3;
}
});
tearDownOneHAClient();
await().atMost(Duration.ofMinutes(1)).until(new Callable<Boolean>() {
@Override
public Boolean call() {
return HAServerTest.this.haService.getConnectionCount().get() == 2;
}
});
}
@Test
public void inSyncReplicasNums() throws IOException, RocksDBException {
DefaultMessageStore messageStore = mockMessageStore();
doReturn(123L).when(messageStore).getMaxPhyOffset();
doReturn(123L).when(messageStore).getMasterFlushedOffset();
setUpOneHAClient(messageStore);
messageStore = mockMessageStore();
doReturn(124L).when(messageStore).getMaxPhyOffset();
doReturn(124L).when(messageStore).getMasterFlushedOffset();
setUpOneHAClient(messageStore);
messageStore = mockMessageStore();
doReturn(123L).when(messageStore).getMaxPhyOffset();
doReturn(123L).when(messageStore).getMasterFlushedOffset();
setUpOneHAClient(messageStore);
messageStore = mockMessageStore();
doReturn(125L).when(messageStore).getMaxPhyOffset();
doReturn(125L).when(messageStore).getMasterFlushedOffset();
setUpOneHAClient(messageStore);
final int haSlaveFallbehindMax = this.defaultMessageStore.getMessageStoreConfig().getHaMaxGapNotInSync();
await().atMost(Duration.ofMinutes(1)).until(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
return HAServerTest.this.haService.inSyncReplicasNums(haSlaveFallbehindMax) == 5;
}
});
assertThat(HAServerTest.this.haService.inSyncReplicasNums(123L + haSlaveFallbehindMax)).isEqualTo(3);
assertThat(HAServerTest.this.haService.inSyncReplicasNums(124L + haSlaveFallbehindMax)).isEqualTo(2);
assertThat(HAServerTest.this.haService.inSyncReplicasNums(125L + haSlaveFallbehindMax)).isEqualTo(1);
}
@Test
public void isSlaveOK() throws IOException, RocksDBException {
DefaultMessageStore messageStore = mockMessageStore();
doReturn(123L).when(messageStore).getMaxPhyOffset();
doReturn(123L).when(messageStore).getMasterFlushedOffset();
setUpOneHAClient(messageStore);
messageStore = mockMessageStore();
doReturn(124L).when(messageStore).getMaxPhyOffset();
doReturn(124L).when(messageStore).getMasterFlushedOffset();
setUpOneHAClient(messageStore);
final int haSlaveFallbehindMax = this.defaultMessageStore.getMessageStoreConfig().getHaMaxGapNotInSync();
await().atMost(Duration.ofMinutes(1)).until(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
return HAServerTest.this.haService.isSlaveOK(haSlaveFallbehindMax + 123);
}
});
assertThat(HAServerTest.this.haService.isSlaveOK(122L + haSlaveFallbehindMax)).isTrue();
assertThat(HAServerTest.this.haService.isSlaveOK(124L + haSlaveFallbehindMax)).isFalse();
}
@Test
public void putRequest_SingleAck()
throws IOException, ExecutionException, InterruptedException, TimeoutException, RocksDBException {
CommitLog.GroupCommitRequest request = new CommitLog.GroupCommitRequest(124, 4000, 1);
this.haService.putRequest(request);
assertThat(request.future().get()).isEqualTo(PutMessageStatus.FLUSH_SLAVE_TIMEOUT);
DefaultMessageStore messageStore = mockMessageStore();
doReturn(124L).when(messageStore).getMaxPhyOffset();
doReturn(124L).when(messageStore).getMasterFlushedOffset();
setUpOneHAClient(messageStore);
request = new CommitLog.GroupCommitRequest(124, 4000, 1);
this.haService.putRequest(request);
assertThat(request.future().get()).isEqualTo(PutMessageStatus.PUT_OK);
}
@Test
public void putRequest_MultipleAckAndRequests()
throws IOException, ExecutionException, InterruptedException, RocksDBException {
CommitLog.GroupCommitRequest oneAck = new CommitLog.GroupCommitRequest(124, 4000, 2);
this.haService.putRequest(oneAck);
CommitLog.GroupCommitRequest twoAck = new CommitLog.GroupCommitRequest(124, 4000, 3);
this.haService.putRequest(twoAck);
DefaultMessageStore messageStore = mockMessageStore();
doReturn(125L).when(messageStore).getMaxPhyOffset();
doReturn(125L).when(messageStore).getMasterFlushedOffset();
setUpOneHAClient(messageStore);
assertThat(oneAck.future().get()).isEqualTo(PutMessageStatus.PUT_OK);
assertThat(twoAck.future().get()).isEqualTo(PutMessageStatus.FLUSH_SLAVE_TIMEOUT);
messageStore = mockMessageStore();
doReturn(128L).when(messageStore).getMaxPhyOffset();
doReturn(128L).when(messageStore).getMasterFlushedOffset();
setUpOneHAClient(messageStore);
twoAck = new CommitLog.GroupCommitRequest(124, 4000, 3);
this.haService.putRequest(twoAck);
assertThat(twoAck.future().get()).isEqualTo(PutMessageStatus.PUT_OK);
}
@Test
public void getPush2SlaveMaxOffset() throws IOException, RocksDBException {
DefaultMessageStore messageStore = mockMessageStore();
doReturn(123L).when(messageStore).getMaxPhyOffset();
doReturn(123L).when(messageStore).getMasterFlushedOffset();
setUpOneHAClient(messageStore);
messageStore = mockMessageStore();
doReturn(124L).when(messageStore).getMaxPhyOffset();
doReturn(124L).when(messageStore).getMasterFlushedOffset();
setUpOneHAClient(messageStore);
messageStore = mockMessageStore();
doReturn(125L).when(messageStore).getMaxPhyOffset();
doReturn(125L).when(messageStore).getMasterFlushedOffset();
setUpOneHAClient(messageStore);
await().atMost(Duration.ofMinutes(1)).until(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
return HAServerTest.this.haService.getPush2SlaveMaxOffset().get() == 125L;
}
});
}
private void setUpOneHAClient(DefaultMessageStore defaultMessageStore) throws IOException {
HAClient haClient = new DefaultHAClient(defaultMessageStore);
haClient.updateHaMasterAddress("127.0.0.1:" + this.storeConfig.getHaListenPort());
haClient.start();
this.haClientList.add(haClient);
}
private void setUpOneHAClient() throws IOException {
HAClient haClient = new DefaultHAClient(this.defaultMessageStore);
haClient.updateHaMasterAddress("127.0.0.1:" + this.storeConfig.getHaListenPort());
haClient.start();
this.haClientList.add(haClient);
}
private DefaultMessageStore mockMessageStore() throws IOException, RocksDBException {
DefaultMessageStore messageStore = mock(DefaultMessageStore.class);
BrokerConfig brokerConfig = mock(BrokerConfig.class);
doReturn(true).when(brokerConfig).isInBrokerContainer();
doReturn("mock").when(brokerConfig).getIdentifier();
doReturn(brokerConfig).when(messageStore).getBrokerConfig();
doReturn(new SystemClock()).when(messageStore).getSystemClock();
doAnswer(invocation -> System.currentTimeMillis()).when(messageStore).now();
doReturn(this.storeConfig).when(messageStore).getMessageStoreConfig();
doReturn(new BrokerConfig()).when(messageStore).getBrokerConfig();
doReturn(true).when(messageStore).isOffsetAligned(anyLong());
// doReturn(new PutMessageResult(PutMessageStatus.PUT_OK, new AppendMessageResult(AppendMessageStatus.PUT_OK))).when(messageStore).sendMsgBack(anyLong());
doReturn(true).when(messageStore).truncateFiles(anyLong());
DefaultMessageStore masterStore = mock(DefaultMessageStore.class);
doReturn(Long.MAX_VALUE).when(masterStore).getFlushedWhere();
doReturn(masterStore).when(messageStore).getMasterStoreInProcess();
CommitLog commitLog = new CommitLog(messageStore);
doReturn(commitLog).when(messageStore).getCommitLog();
return messageStore;
}
private void tearDownOneHAClient() {
final HAClient haClient = this.haClientList.remove(0);
haClient.shutdown();
}
private void tearDownAllHAClient() {
for (final HAClient client : this.haClientList) {
client.shutdown();
}
this.haClientList.clear();
}
}
| HAServerTest |
java | quarkusio__quarkus | test-framework/junit5-component/src/main/java/io/quarkus/test/component/QuarkusComponentTestExtension.java | {
"start": 9134,
"end": 22058
} | class ____ before any @Nested test classes run
// Therefore we need to discard the existing container here
cleanup(context);
}
initContainer(context);
startContainer(context, Lifecycle.PER_CLASS);
LOG.debugf("beforeAll: %s ms", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
}
@Override
public void afterAll(ExtensionContext context) throws Exception {
long start = System.nanoTime();
stopContainer(context, Lifecycle.PER_CLASS);
cleanup(context);
LOG.debugf("afterAll: %s ms", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
}
@Override
public void beforeEach(ExtensionContext context) throws Exception {
long start = System.nanoTime();
startContainer(context, Lifecycle.PER_METHOD);
if (getContainerState(context) == ContainerState.STARTED) {
// Activate the request context
Arc.container().requestContext().activate();
}
LOG.debugf("beforeEach: %s ms", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
}
@Override
public void afterEach(ExtensionContext context) throws Exception {
long start = System.nanoTime();
if (getContainerState(context) == ContainerState.STARTED) {
// Terminate the request context
Arc.container().requestContext().terminate();
// Destroy @Dependent beans injected as test method parameters correctly
destroyDependentTestMethodParams(context);
}
// Stop the container if Lifecycle.PER_METHOD is used
stopContainer(context, Lifecycle.PER_METHOD);
LOG.debugf("afterEach: %s ms", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
}
@Override
public void postProcessTestInstance(Object testInstance, ExtensionContext context) throws Exception {
long start = System.nanoTime();
store(context).put(KEY_TEST_INSTANCE, testInstance);
LOG.debugf("postProcessTestInstance: %s ms", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
}
static final Predicate<Parameter> BUILTIN_PARAMETER = new Predicate<Parameter>() {
@Override
public boolean test(Parameter parameter) {
if (parameter.isAnnotationPresent(TempDir.class)) {
return true;
}
java.lang.reflect.Type type = parameter.getParameterizedType();
return type.equals(TestInfo.class) || type.equals(RepetitionInfo.class) || type.equals(TestReporter.class);
}
};
@Override
public boolean supportsParameter(ParameterContext parameterContext, ExtensionContext extensionContext)
throws ParameterResolutionException {
if (
// Target is empty for constructor or static method
parameterContext.getTarget().isPresent()
// Only test methods are supported
&& isTestMethod(parameterContext.getDeclaringExecutable())
// A method/param annotated with @SkipInject is never supported
&& !parameterContext.isAnnotated(SkipInject.class)
&& !parameterContext.getDeclaringExecutable().isAnnotationPresent(SkipInject.class)
// A param annotated with @org.mockito.Mock is never supported
&& !parameterContext.isAnnotated(Mock.class)
// Skip params covered by built-in extensions
&& !BUILTIN_PARAMETER.test(parameterContext.getParameter())) {
BeanManager beanManager = Arc.container().beanManager();
java.lang.reflect.Type requiredType = parameterContext.getParameter().getParameterizedType();
Annotation[] qualifiers = getQualifiers(parameterContext.getAnnotatedElement(), beanManager);
if (isListAllInjectionPoint(requiredType, qualifiers, parameterContext.getParameter())) {
return true;
} else {
try {
Bean<?> bean = beanManager.resolve(beanManager.getBeans(requiredType, qualifiers));
if (bean == null) {
String msg = String.format("No matching bean found for the type [%s] and qualifiers %s",
requiredType, Arrays.toString(qualifiers));
if (parameterContext.isAnnotated(InjectMock.class) || qualifiers.length > 0) {
throw new IllegalStateException(msg);
} else {
LOG.info(msg + " - consider annotating the parameter with @SkipInject");
return false;
}
}
return true;
} catch (AmbiguousResolutionException e) {
String msg = String.format(
"Multiple matching beans found for the type [%s] and qualifiers %s\n\t- if this parameter should not be resolved by CDI then use @SkipInject\n\t- found beans: %s",
requiredType, Arrays.toString(qualifiers), e.getMessage());
throw new IllegalStateException(msg);
}
}
}
return false;
}
@Override
public Object resolveParameter(ParameterContext parameterContext, ExtensionContext context)
throws ParameterResolutionException {
@SuppressWarnings("unchecked")
List<Object> injectedParams = store(context).get(KEY_INJECTED_PARAMS, List.class);
ArcContainer container = Arc.container();
BeanManager beanManager = container.beanManager();
java.lang.reflect.Type requiredType = parameterContext.getParameter().getParameterizedType();
Annotation[] qualifiers = getQualifiers(parameterContext.getAnnotatedElement(), beanManager);
if (Instance.class.isAssignableFrom(parameterContext.getParameter().getType())) {
InstanceImpl<?> instance = InstanceImpl.forGlobalEntrypoint(getFirstActualTypeArgument(requiredType),
Set.of(qualifiers));
injectedParams.add(instance);
return instance;
} else if (isListAllInjectionPoint(requiredType, qualifiers, parameterContext.getParameter())) {
// Special handling for @Inject @All List<>
Collection<InstanceHandle<?>> unsetHandles = new ArrayList<>();
Object ret = handleListAll(requiredType, qualifiers, container, unsetHandles);
unsetHandles.forEach(injectedParams::add);
return ret;
} else {
InstanceHandle<?> handle = container.instance(requiredType, qualifiers);
injectedParams.add(handle);
return handle.get();
}
}
private void destroyDependentTestMethodParams(ExtensionContext context) {
@SuppressWarnings("unchecked")
List<Object> injectedParams = store(context).get(KEY_INJECTED_PARAMS, List.class);
for (Object param : injectedParams) {
if (param instanceof InstanceHandle) {
@SuppressWarnings("resource")
InstanceHandle<?> handle = (InstanceHandle<?>) param;
if (handle.getBean() != null && handle.getBean().getScope().equals(Dependent.class)) {
try {
handle.destroy();
} catch (Exception e) {
LOG.errorf(e, "Unable to destroy the injected %s", handle.getBean());
}
}
} else if (param instanceof InstanceImpl) {
InstanceImpl<?> instance = (InstanceImpl<?>) param;
instance.destroy();
}
}
injectedParams.clear();
}
private void initContainer(ExtensionContext context) {
if (getContainerState(context) != ContainerState.UNINITIALIZED) {
return;
}
QuarkusComponentTestConfiguration testClassConfiguration = baseConfiguration
.update(context.getRequiredTestClass());
store(context).put(KEY_TEST_CLASS_CONFIG, testClassConfiguration);
ClassLoader oldTccl = Thread.currentThread().getContextClassLoader();
Class<?> testClass = context.getRequiredTestClass();
ClassLoader testCl = testClass.getClassLoader();
Thread.currentThread().setContextClassLoader(testCl);
if (testCl instanceof QuarkusComponentTestClassLoader componentCl) {
Map<String, Set<String>> configMappings = componentCl.getConfigMappings();
if (!configMappings.isEmpty()) {
Set<ConfigClass> mappings = new HashSet<>();
for (Entry<String, Set<String>> e : configMappings.entrySet()) {
for (String mapping : e.getValue()) {
mappings.add(configClass(ConfigMappingBeanCreator.tryLoad(mapping), e.getKey()));
}
}
store(context).put(KEY_CONFIG_MAPPINGS, mappings);
}
try {
InterceptorMethodCreator.register(context, componentCl.getInterceptorMethods());
} catch (Exception e) {
throw new IllegalStateException("Unable to register interceptor methods", e);
}
buildFailure.set((Throwable) componentCl.getBuildFailure());
}
for (MockBeanConfiguratorImpl<?> mockBeanConfigurator : testClassConfiguration.mockConfigurators) {
MockBeanCreator.registerCreate(testClass.getName(), cast(mockBeanConfigurator.create));
}
if (buildFailure.get() == null) {
store(context).put(KEY_OLD_TCCL, oldTccl);
setContainerState(context, ContainerState.INITIALIZED);
} else {
setContainerState(context, ContainerState.BUILD_FAILED);
}
}
@SuppressWarnings("unchecked")
private void cleanup(ExtensionContext context) {
if (getContainerState(context).requiresCleanup()) {
ClassLoader oldTccl = store(context).get(KEY_OLD_TCCL, ClassLoader.class);
if (oldTccl != null) {
Thread.currentThread().setContextClassLoader(oldTccl);
}
store(context).remove(KEY_OLD_TCCL);
store(context).remove(KEY_CONFIG_MAPPINGS);
Set<Path> generatedResources = store(context).get(KEY_GENERATED_RESOURCES, Set.class);
if (generatedResources != null) {
for (Path path : generatedResources) {
try {
LOG.debugf("Delete generated %s", path);
Files.deleteIfExists(path);
} catch (IOException e) {
LOG.errorf("Unable to delete the generated resource %s: ", path, e.getMessage());
}
}
}
store(context).remove(KEY_GENERATED_RESOURCES);
setContainerState(context, ContainerState.UNINITIALIZED);
}
}
@SuppressWarnings("unchecked")
private void stopContainer(ExtensionContext context, Lifecycle testInstanceLifecycle) throws Exception {
if (testInstanceLifecycle.equals(context.getTestInstanceLifecycle().orElse(Lifecycle.PER_METHOD))
&& getContainerState(context) == ContainerState.STARTED) {
for (FieldInjector fieldInjector : (List<FieldInjector>) store(context)
.get(KEY_INJECTED_FIELDS, List.class)) {
fieldInjector.unset();
}
try {
Arc.shutdown();
} catch (Exception e) {
LOG.error("An error occured during ArC shutdown: " + e);
}
MockBeanCreator.clear();
ConfigBeanCreator.clear();
InterceptorMethodCreator.clear();
store(context).remove(KEY_CONTAINER_STATE);
SmallRyeConfig config = store(context).get(KEY_CONFIG, SmallRyeConfig.class);
ConfigProviderResolver.instance().releaseConfig(config);
ConfigProviderResolver oldConfigProviderResolver = store(context).get(KEY_OLD_CONFIG_PROVIDER_RESOLVER,
ConfigProviderResolver.class);
ConfigProviderResolver.setInstance(oldConfigProviderResolver);
setContainerState(context, ContainerState.STOPPED);
QuarkusComponentTestConfiguration configuration = store(context).get(KEY_TEST_CLASS_CONFIG,
QuarkusComponentTestConfiguration.class);
if (configuration.hasCallbacks()) {
AfterStopContext afterStopContext = new AfterStopContextImpl(context.getRequiredTestClass());
for (QuarkusComponentTestCallbacks callbacks : configuration.callbacks) {
callbacks.afterStop(afterStopContext);
}
}
}
}
| but |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClosedFS.java | {
"start": 1470,
"end": 3718
} | class ____ extends AbstractS3ATestBase {
private Path root = new Path("/");
@BeforeEach
@Override
public void setup() throws Exception {
super.setup();
root = getFileSystem().makeQualified(new Path("/"));
getFileSystem().close();
}
@AfterEach
@Override
public void teardown() {
// no op, as the FS is closed
}
private static final Set<String> THREAD_SET =
listInitialThreadsForLifecycleChecks();
@AfterAll
public static void checkForThreadLeakage() {
Assertions.assertThat(getCurrentThreadNames())
.describedAs("The threads at the end of the test run")
.isSubsetOf(THREAD_SET);
}
@Test
public void testClosedGetFileStatus() throws Exception {
intercept(IOException.class, E_FS_CLOSED,
() -> getFileSystem().getFileStatus(root));
}
@Test
public void testClosedListStatus() throws Exception {
intercept(IOException.class, E_FS_CLOSED,
() -> getFileSystem().listStatus(root));
}
@Test
public void testClosedListFile() throws Exception {
intercept(IOException.class, E_FS_CLOSED,
() -> getFileSystem().listFiles(root, false));
}
@Test
public void testClosedListLocatedStatus() throws Exception {
intercept(IOException.class, E_FS_CLOSED,
() -> getFileSystem().listLocatedStatus(root));
}
@Test
public void testClosedCreate() throws Exception {
intercept(IOException.class, E_FS_CLOSED,
() -> getFileSystem().create(path("to-create")).close());
}
@Test
public void testClosedDelete() throws Exception {
intercept(IOException.class, E_FS_CLOSED,
() -> getFileSystem().delete(path("to-delete"), false));
}
@Test
public void testClosedOpen() throws Exception {
intercept(IOException.class, E_FS_CLOSED,
() -> getFileSystem().open(path("to-open")));
}
@Test
public void testClosedInstrumentation() throws Exception {
// no metrics
Assertions.assertThat(S3AInstrumentation.hasMetricSystem())
.describedAs("S3AInstrumentation.hasMetricSystem()")
.isFalse();
Assertions.assertThat(getFileSystem().getIOStatistics())
.describedAs("iostatistics of %s", getFileSystem())
.isNotNull();
}
}
| ITestS3AClosedFS |
java | apache__commons-lang | src/test/java/org/apache/commons/lang3/function/FailableTest.java | {
"start": 96200,
"end": 96703
} | interface ____ properly defined to throw any exception using the top level generic types
* Object and Throwable.
*/
@Test
void testThrows_FailableLongFunction_Throwable() {
assertThrows(IOException.class, () -> new FailableLongFunction<Object, Throwable>() {
@Override
public Object apply(final long input) throws Throwable {
throw new IOException("test");
}
}.apply(0));
}
/**
* Tests that our failable | is |
java | spring-projects__spring-boot | module/spring-boot-liquibase/src/test/java/org/springframework/boot/liquibase/autoconfigure/LiquibaseAutoConfigurationTests.java | {
"start": 29503,
"end": 30309
} | class ____ extends org.h2.Driver {
}
@WithResource(name = "db/changelog/db.changelog-master.yaml", content = """
databaseChangeLog:
- changeSet:
id: 1
author: marceloverdijk
changes:
- createTable:
tableName: customer
columns:
- column:
name: id
type: int
autoIncrement: true
constraints:
primaryKey: true
nullable: false
- column:
name: name
type: varchar(50)
constraints:
nullable: false
""")
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.METHOD)
@ | CustomH2Driver |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/jackson/MutableThreadContextStackDeserializer.java | {
"start": 1322,
"end": 1963
} | class ____ extends StdDeserializer<MutableThreadContextStack> {
private static final long serialVersionUID = 1L;
MutableThreadContextStackDeserializer() {
super(MutableThreadContextStack.class);
}
@Override
public MutableThreadContextStack deserialize(final JsonParser jp, final DeserializationContext ctxt)
throws IOException, JsonProcessingException {
final List<String> list = jp.readValueAs(
new TypeReference<List<String>>() {
// empty
});
return new MutableThreadContextStack(list);
}
}
| MutableThreadContextStackDeserializer |
java | quarkusio__quarkus | extensions/security/deployment/src/test/java/io/quarkus/security/test/permissionsallowed/AbstractMethodLevelPermissionsAllowedTest.java | {
"start": 30671,
"end": 31344
} | interface ____ {
String write();
String read();
Uni<String> writeNonBlocking();
Uni<String> readNonBlocking();
void prohibited();
Uni<Void> prohibitedNonBlocking();
String multiple();
Uni<String> multipleNonBlocking();
String multipleActions();
Uni<String> multipleNonBlockingActions();
String combination();
Uni<String> combinationNonBlockingActions();
String combination2();
Uni<String> combination2NonBlockingActions();
User inclusive();
Uni<User> inclusiveNonBlocking();
}
public | PermissionsAllowedNameAndActionsOnlyBeanI |
java | spring-projects__spring-boot | module/spring-boot-cache/src/test/java/org/springframework/boot/cache/autoconfigure/CacheAutoConfigurationTests.java | {
"start": 41961,
"end": 42175
} | class ____ {
@Bean
HazelcastInstance customHazelcastInstance() {
return mock(HazelcastInstance.class);
}
}
@Configuration(proxyBeanMethods = false)
@EnableCaching
static | HazelcastCustomHazelcastInstance |
java | apache__dubbo | dubbo-cluster/src/main/java/org/apache/dubbo/rpc/cluster/configurator/parser/model/ConfiguratorConfig.java | {
"start": 956,
"end": 3309
} | class ____ {
public static final String MATCH_CONDITION = "MATCH_CONDITION";
public static final String SCOPE_SERVICE = "service";
public static final String SCOPE_APPLICATION = "application";
public static final String CONFIG_VERSION_KEY = "configVersion";
public static final String SCOPE_KEY = "scope";
public static final String CONFIG_KEY = "key";
public static final String ENABLED_KEY = "enabled";
public static final String CONFIGS_KEY = "configs";
private String configVersion;
private String scope;
private String key;
private Boolean enabled = true;
private List<ConfigItem> configs;
@SuppressWarnings("unchecked")
public static ConfiguratorConfig parseFromMap(Map<String, Object> map) {
ConfiguratorConfig configuratorConfig = new ConfiguratorConfig();
configuratorConfig.setConfigVersion((String) map.get(CONFIG_VERSION_KEY));
configuratorConfig.setScope((String) map.get(SCOPE_KEY));
configuratorConfig.setKey((String) map.get(CONFIG_KEY));
Object enabled = map.get(ENABLED_KEY);
if (enabled != null) {
configuratorConfig.setEnabled(Boolean.parseBoolean(enabled.toString()));
}
Object configs = map.get(CONFIGS_KEY);
if (configs != null && List.class.isAssignableFrom(configs.getClass())) {
configuratorConfig.setConfigs(((List<Map<String, Object>>) configs)
.stream().map(ConfigItem::parseFromMap).collect(Collectors.toList()));
}
return configuratorConfig;
}
public String getConfigVersion() {
return configVersion;
}
public void setConfigVersion(String configVersion) {
this.configVersion = configVersion;
}
public String getScope() {
return scope;
}
public void setScope(String scope) {
this.scope = scope;
}
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public Boolean getEnabled() {
return enabled;
}
public void setEnabled(Boolean enabled) {
this.enabled = enabled;
}
public List<ConfigItem> getConfigs() {
return configs;
}
public void setConfigs(List<ConfigItem> configs) {
this.configs = configs;
}
}
| ConfiguratorConfig |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/authorization/AuthoritiesAuthorizationManager.java | {
"start": 1407,
"end": 3245
} | class ____ implements AuthorizationManager<Collection<String>> {
private RoleHierarchy roleHierarchy = new NullRoleHierarchy();
/**
* Sets the {@link RoleHierarchy} to be used. Default is {@link NullRoleHierarchy}.
* Cannot be null.
* @param roleHierarchy the {@link RoleHierarchy} to use
*/
public void setRoleHierarchy(RoleHierarchy roleHierarchy) {
Assert.notNull(roleHierarchy, "roleHierarchy cannot be null");
this.roleHierarchy = roleHierarchy;
}
/**
* Determines if the current user is authorized by evaluating if the
* {@link Authentication} contains any of specified authorities.
* @param authentication the {@link Supplier} of the {@link Authentication} to check
* @param authorities the collection of authority strings to check
* @return an {@link AuthorityAuthorizationDecision}
*/
@Override
public AuthorizationResult authorize(Supplier<? extends @Nullable Authentication> authentication,
Collection<String> authorities) {
boolean granted = isGranted(authentication.get(), authorities);
return new AuthorityAuthorizationDecision(granted, AuthorityUtils.createAuthorityList(authorities));
}
private boolean isGranted(Authentication authentication, Collection<String> authorities) {
return authentication != null && isAuthorized(authentication, authorities);
}
private boolean isAuthorized(Authentication authentication, Collection<String> authorities) {
for (GrantedAuthority grantedAuthority : getGrantedAuthorities(authentication)) {
if (authorities.contains(grantedAuthority.getAuthority())) {
return true;
}
}
return false;
}
private Collection<? extends GrantedAuthority> getGrantedAuthorities(Authentication authentication) {
return this.roleHierarchy.getReachableGrantedAuthorities(authentication.getAuthorities());
}
}
| AuthoritiesAuthorizationManager |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableOnErrorReturnTest.java | {
"start": 6374,
"end": 9359
} | class ____ implements Publisher<String> {
final String[] values;
Thread t;
TestFlowable(String... values) {
this.values = values;
}
@Override
public void subscribe(final Subscriber<? super String> subscriber) {
subscriber.onSubscribe(new BooleanSubscription());
System.out.println("TestFlowable subscribed to ...");
t = new Thread(new Runnable() {
@Override
public void run() {
try {
System.out.println("running TestFlowable thread");
for (String s : values) {
System.out.println("TestFlowable onNext: " + s);
subscriber.onNext(s);
}
throw new RuntimeException("Forced Failure");
} catch (Throwable e) {
subscriber.onError(e);
}
}
});
System.out.println("starting TestFlowable thread");
t.start();
System.out.println("done starting TestFlowable thread");
}
}
@Test
public void normalBackpressure() {
TestSubscriber<Integer> ts = TestSubscriber.create(0);
PublishProcessor<Integer> pp = PublishProcessor.create();
pp.onErrorReturn(new Function<Throwable, Integer>() {
@Override
public Integer apply(Throwable e) {
return 3;
}
}).subscribe(ts);
ts.request(2);
pp.onNext(1);
pp.onNext(2);
pp.onError(new TestException("Forced failure"));
ts.assertValues(1, 2);
ts.assertNoErrors();
ts.assertNotComplete();
ts.request(2);
ts.assertValues(1, 2, 3);
ts.assertNoErrors();
ts.assertComplete();
}
@Test
public void returnItem() {
Flowable.error(new TestException())
.onErrorReturnItem(1)
.test()
.assertResult(1);
}
@Test
public void dispose() {
TestHelper.checkDisposed(Flowable.just(1).onErrorReturnItem(1));
}
@Test
public void doubleOnSubscribe() {
TestHelper.checkDoubleOnSubscribeFlowable(new Function<Flowable<Object>, Flowable<Object>>() {
@Override
public Flowable<Object> apply(Flowable<Object> f) throws Exception {
return f.onErrorReturnItem(1);
}
});
}
@Test
public void doubleOnError() {
new Flowable<Integer>() {
@Override
protected void subscribeActual(Subscriber<? super Integer> s) {
s.onSubscribe(new BooleanSubscription());
s.onError(new TestException());
s.onError(new TestException());
}
}
.onErrorReturnItem(1)
.test()
.assertResult(1);
}
}
| TestFlowable |
java | apache__commons-lang | src/test/java/org/apache/commons/lang3/ValidateTest.java | {
"start": 38487,
"end": 39740
} | class ____ {
@Test
void shouldNotThrowExceptionForNonEmptyCollection() {
Validate.noNullElements(Collections.singleton("a"));
}
@Test
void shouldReturnSameInstance() {
final Set<String> col = Collections.singleton("a");
assertSame(col, Validate.noNullElements(col));
}
@Test
void shouldThrowIllegalArgumentExceptionWithDefaultMessageForCollectionWithNullElement() {
final IllegalArgumentException ex = assertIllegalArgumentException(
() -> Validate.noNullElements(Collections.singleton(null)));
assertEquals("The validated collection contains null element at index: 0", ex.getMessage());
}
@Test
void shouldThrowNullPointerExceptionWithDefaultMessageForNullCollection() {
final NullPointerException ex = assertNullPointerException(() -> Validate.noNullElements((Collection<?>) null));
assertEquals("iterable", ex.getMessage());
}
}
}
}
@Nested
final | WithoutMessage |
java | apache__rocketmq | broker/src/test/java/org/apache/rocketmq/broker/offset/RocksdbTransferOffsetAndCqTest.java | {
"start": 2394,
"end": 7730
} | class ____ {
private final String basePath = Paths.get(System.getProperty("user.home"),
"unit-test-store", UUID.randomUUID().toString().substring(0, 16).toUpperCase()).toString();
private final String topic = "topic";
private final String group = "group";
private final String clientHost = "clientHost";
private final int queueId = 1;
private RocksDBConsumerOffsetManager rocksdbConsumerOffsetManager;
private ConsumerOffsetManager consumerOffsetManager;
private DefaultMessageStore defaultMessageStore;
@Mock
private BrokerController brokerController;
@Before
public void init() throws IOException {
if (notToBeExecuted()) {
return;
}
BrokerConfig brokerConfig = new BrokerConfig();
brokerConfig.setConsumerOffsetUpdateVersionStep(10);
MessageStoreConfig messageStoreConfig = new MessageStoreConfig();
messageStoreConfig.setStorePathRootDir(basePath);
messageStoreConfig.setRocksdbCQDoubleWriteEnable(true);
Mockito.lenient().when(brokerController.getBrokerConfig()).thenReturn(brokerConfig);
Mockito.lenient().when(brokerController.getMessageStoreConfig()).thenReturn(messageStoreConfig);
defaultMessageStore = new DefaultMessageStore(messageStoreConfig, new BrokerStatsManager("for-test", true), null,
brokerConfig, new ConcurrentHashMap<>());
defaultMessageStore.loadCheckPoint();
consumerOffsetManager = new ConsumerOffsetManager(brokerController);
consumerOffsetManager.load();
rocksdbConsumerOffsetManager = new RocksDBConsumerOffsetManager(brokerController);
}
@Test
public void testTransferOffset() {
if (notToBeExecuted()) {
return;
}
for (int i = 0; i < 200; i++) {
consumerOffsetManager.commitOffset(clientHost, group, topic, queueId, i);
}
ConcurrentMap<String, ConcurrentMap<Integer, Long>> offsetTable = consumerOffsetManager.getOffsetTable();
ConcurrentMap<Integer, Long> map = offsetTable.get(topic + "@" + group);
Assert.assertTrue(MapUtils.isNotEmpty(map));
Long offset = map.get(queueId);
Assert.assertEquals(199L, (long) offset);
long offsetDataVersion = consumerOffsetManager.getDataVersion().getCounter().get();
Assert.assertEquals(20L, offsetDataVersion);
consumerOffsetManager.persist();
boolean loadResult = rocksdbConsumerOffsetManager.load();
Assert.assertTrue(loadResult);
ConcurrentMap<String, ConcurrentMap<Integer, Long>> rocksdbOffsetTable = rocksdbConsumerOffsetManager.getOffsetTable();
ConcurrentMap<Integer, Long> rocksdbMap = rocksdbOffsetTable.get(topic + "@" + group);
Assert.assertTrue(MapUtils.isNotEmpty(rocksdbMap));
Long aLong1 = rocksdbMap.get(queueId);
Assert.assertEquals(199L, (long) aLong1);
long rocksdbOffset = rocksdbConsumerOffsetManager.getDataVersion().getCounter().get();
Assert.assertEquals(21L, rocksdbOffset);
}
@Test
public void testRocksdbCqWrite() throws RocksDBException {
if (notToBeExecuted()) {
return;
}
long startTimestamp = System.currentTimeMillis();
ConsumeQueueStoreInterface combineConsumeQueueStore = defaultMessageStore.getQueueStore();
Assert.assertTrue(combineConsumeQueueStore instanceof CombineConsumeQueueStore);
combineConsumeQueueStore.load();
combineConsumeQueueStore.recover(false);
combineConsumeQueueStore.start();
RocksDBConsumeQueueStore rocksDBConsumeQueueStore = ((CombineConsumeQueueStore) combineConsumeQueueStore).getRocksDBConsumeQueueStore();
ConsumeQueueStore consumeQueueStore = ((CombineConsumeQueueStore) combineConsumeQueueStore).getConsumeQueueStore();
for (int i = 0; i < 200; i++) {
DispatchRequest request = new DispatchRequest(topic, queueId, i, 200, 0, System.currentTimeMillis(), i, "", "", 0, 0, new HashMap<>());
combineConsumeQueueStore.putMessagePositionInfoWrapper(request);
}
ConsumeQueueInterface rocksdbCq = rocksDBConsumeQueueStore.findOrCreateConsumeQueue(topic, queueId);
ConsumeQueueInterface fileCq = consumeQueueStore.findOrCreateConsumeQueue(topic, queueId);
Awaitility.await()
.pollInterval(100, TimeUnit.MILLISECONDS)
.atMost(3, TimeUnit.SECONDS)
.until(() -> rocksdbCq.getMaxOffsetInQueue() == 200);
Pair<CqUnit, Long> unit = rocksdbCq.getCqUnitAndStoreTime(100);
Pair<CqUnit, Long> unit1 = fileCq.getCqUnitAndStoreTime(100);
Assert.assertEquals(unit.getObject1().getPos(), unit1.getObject1().getPos());
CheckRocksdbCqWriteResult result = ((CombineConsumeQueueStore) combineConsumeQueueStore).doCheckCqWriteProgress(topic, startTimestamp, StoreType.DEFAULT, StoreType.DEFAULT_ROCKSDB);
Assert.assertEquals(CheckRocksdbCqWriteResult.CheckStatus.CHECK_OK.getValue(), result.getCheckStatus());
}
// /**
// * No need to skip macOS platform.
// * @return true if some platform is NOT a good fit for this test case.
// */
private boolean notToBeExecuted() {
return MixAll.isMac();
}
}
| RocksdbTransferOffsetAndCqTest |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/functions/casting/IntervalToStringCastRule.java | {
"start": 1555,
"end": 2469
} | class ____ extends AbstractCharacterFamilyTargetRule<Object> {
static final IntervalToStringCastRule INSTANCE = new IntervalToStringCastRule();
private IntervalToStringCastRule() {
super(
CastRulePredicate.builder()
.input(LogicalTypeFamily.INTERVAL)
.target(STRING_TYPE)
.build());
}
@Override
public String generateStringExpression(
CodeGeneratorCastRule.Context context,
String inputTerm,
LogicalType inputLogicalType,
LogicalType targetLogicalType) {
final Method method =
inputLogicalType.is(LogicalTypeRoot.INTERVAL_YEAR_MONTH)
? INTERVAL_YEAR_MONTH_TO_STRING()
: INTERVAL_DAY_TIME_TO_STRING();
return staticCall(method, inputTerm);
}
}
| IntervalToStringCastRule |
java | apache__camel | components/camel-jsonpath/src/test/java/org/apache/camel/jsonpath/JsonPathSimpleTransformTest.java | {
"start": 1035,
"end": 2132
} | class ____ extends CamelTestSupport {
private static String EXPECTED = """
{
"roll": 123,
"years": 42,
"fullname": "scott"
}""";
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.transform().simple("""
{
"roll": ${jsonpath($.id)},
"years": ${jsonpath($.age)},
"fullname": "${jsonpath($.name)}"
}""")
.to("mock:result");
}
};
}
@Test
public void testTransform() throws Exception {
getMockEndpoint("mock:result").expectedBodiesReceived(EXPECTED);
template.sendBody("direct:start", "{\"id\": 123, \"age\": 42, \"name\": \"scott\"}");
MockEndpoint.assertIsSatisfied(context);
}
}
| JsonPathSimpleTransformTest |
java | apache__rocketmq | store/src/main/java/org/apache/rocketmq/store/ha/autoswitch/AutoSwitchHAClient.java | {
"start": 1906,
"end": 20120
} | class ____ extends ServiceThread implements HAClient {
/**
* Handshake header buffer size. Schema: state ordinal + Two flags + slaveBrokerId. Format:
*
* <pre>
* ┌──────────────────┬───────────────┐
* │isSyncFromLastFile│ isAsyncLearner│
* │ (2bytes) │ (2bytes) │
* └──────────────────┴───────────────┘
* \ /
* \ /
* ╲ /
* ╲ /
* ┌───────────────────────┬───────────────────────┬───────────────────────┐
* │ current state │ Flags │ slaveBrokerId │
* │ (4bytes) │ (4bytes) │ (8bytes) │
* ├───────────────────────┴───────────────────────┴───────────────────────┤
* │ │
* │ HANDSHAKE Header │
* </pre>
* <p>
* Flag: isSyncFromLastFile(short), isAsyncLearner(short)... we can add more flags in the future if needed
*/
public static final int HANDSHAKE_HEADER_SIZE = 4 + 4 + 8;
/**
* Header + slaveAddress, Format:
* <pre>
* ┌──────────────────┬───────────────┐
* │isSyncFromLastFile│ isAsyncLearner│
* │ (2bytes) │ (2bytes) │
* └──────────────────┴───────────────┘
* \ /
* \ /
* ╲ /
* ╲ /
* ┌───────────────────────┬───────────────────────┬───────────────────────┬───────────────────────────────┐
* │ current state │ Flags │ slaveAddressLength │ slaveAddress │
* │ (4bytes) │ (4bytes) │ (4bytes) │ (50bytes) │
* ├───────────────────────┴───────────────────────┴───────────────────────┼───────────────────────────────┤
* │ │ │
* │ HANDSHAKE Header │ body │
* </pre>
*/
@Deprecated
public static final int HANDSHAKE_SIZE = HANDSHAKE_HEADER_SIZE + 50;
/**
* Transfer header buffer size. Schema: state ordinal + maxOffset. Format:
* <pre>
* ┌───────────────────────┬───────────────────────┐
* │ current state │ maxOffset │
* │ (4bytes) │ (8bytes) │
* ├───────────────────────┴───────────────────────┤
* │ │
* │ TRANSFER Header │
* </pre>
*/
public static final int TRANSFER_HEADER_SIZE = 4 + 8;
public static final int MIN_HEADER_SIZE = Math.min(HANDSHAKE_HEADER_SIZE, TRANSFER_HEADER_SIZE);
private static final Logger LOGGER = LoggerFactory.getLogger(LoggerName.STORE_LOGGER_NAME);
private static final int READ_MAX_BUFFER_SIZE = 1024 * 1024 * 4;
private final AtomicReference<String> masterHaAddress = new AtomicReference<>();
private final AtomicReference<String> masterAddress = new AtomicReference<>();
private final ByteBuffer handshakeHeaderBuffer = ByteBuffer.allocate(HANDSHAKE_HEADER_SIZE);
private final ByteBuffer transferHeaderBuffer = ByteBuffer.allocate(TRANSFER_HEADER_SIZE);
private final AutoSwitchHAService haService;
private final ByteBuffer byteBufferRead = ByteBuffer.allocate(READ_MAX_BUFFER_SIZE);
private final DefaultMessageStore messageStore;
private final EpochFileCache epochCache;
private final Long brokerId;
private SocketChannel socketChannel;
private Selector selector;
private AbstractHAReader haReader;
private HAWriter haWriter;
private FlowMonitor flowMonitor;
/**
* last time that slave reads date from master.
*/
private long lastReadTimestamp;
/**
* last time that slave reports offset to master.
*/
private long lastWriteTimestamp;
private long currentReportedOffset;
private int processPosition;
private volatile HAConnectionState currentState;
/**
* Current epoch
*/
private volatile int currentReceivedEpoch;
public AutoSwitchHAClient(AutoSwitchHAService haService, DefaultMessageStore defaultMessageStore,
EpochFileCache epochCache, Long brokerId) throws IOException {
this.haService = haService;
this.messageStore = defaultMessageStore;
this.epochCache = epochCache;
this.brokerId = brokerId;
init();
}
public void init() throws IOException {
this.selector = NetworkUtil.openSelector();
this.flowMonitor = new FlowMonitor(this.messageStore.getMessageStoreConfig());
this.haReader = new HAClientReader();
haReader.registerHook(readSize -> {
if (readSize > 0) {
AutoSwitchHAClient.this.flowMonitor.addByteCountTransferred(readSize);
lastReadTimestamp = System.currentTimeMillis();
}
});
this.haWriter = new HAWriter();
haWriter.registerHook(writeSize -> {
if (writeSize > 0) {
lastWriteTimestamp = System.currentTimeMillis();
}
});
changeCurrentState(HAConnectionState.READY);
this.currentReceivedEpoch = -1;
this.currentReportedOffset = 0;
this.processPosition = 0;
this.lastReadTimestamp = System.currentTimeMillis();
this.lastWriteTimestamp = System.currentTimeMillis();
}
public void reOpen() throws IOException {
shutdown();
init();
}
@Override
public String getServiceName() {
if (haService.getDefaultMessageStore().getBrokerConfig().isInBrokerContainer()) {
return haService.getDefaultMessageStore().getBrokerIdentity().getIdentifier() + AutoSwitchHAClient.class.getSimpleName();
}
return AutoSwitchHAClient.class.getSimpleName();
}
@Override
public void updateMasterAddress(String newAddress) {
String currentAddr = this.masterAddress.get();
if (!StringUtils.equals(newAddress, currentAddr) && masterAddress.compareAndSet(currentAddr, newAddress)) {
LOGGER.info("update master address, OLD: " + currentAddr + " NEW: " + newAddress);
}
}
@Override
public void updateHaMasterAddress(String newAddress) {
String currentAddr = this.masterHaAddress.get();
if (!StringUtils.equals(newAddress, currentAddr) && masterHaAddress.compareAndSet(currentAddr, newAddress)) {
LOGGER.info("update master ha address, OLD: " + currentAddr + " NEW: " + newAddress);
wakeup();
}
}
@Override
public String getMasterAddress() {
return this.masterAddress.get();
}
@Override
public String getHaMasterAddress() {
return this.masterHaAddress.get();
}
@Override
public long getLastReadTimestamp() {
return this.lastReadTimestamp;
}
@Override
public long getLastWriteTimestamp() {
return this.lastWriteTimestamp;
}
@Override
public HAConnectionState getCurrentState() {
return this.currentState;
}
@Override
public void changeCurrentState(HAConnectionState haConnectionState) {
LOGGER.info("change state to {}", haConnectionState);
this.currentState = haConnectionState;
}
public void closeMasterAndWait() {
this.closeMaster();
this.waitForRunning(1000 * 5);
}
@Override
public void closeMaster() {
if (null != this.socketChannel) {
try {
SelectionKey sk = this.socketChannel.keyFor(this.selector);
if (sk != null) {
sk.cancel();
}
this.socketChannel.close();
this.socketChannel = null;
LOGGER.info("AutoSwitchHAClient close connection with master {}", this.masterHaAddress.get());
this.changeCurrentState(HAConnectionState.READY);
} catch (IOException e) {
LOGGER.warn("CloseMaster exception. ", e);
}
this.lastReadTimestamp = 0;
this.processPosition = 0;
this.byteBufferRead.position(0);
this.byteBufferRead.limit(READ_MAX_BUFFER_SIZE);
}
}
@Override
public long getTransferredByteInSecond() {
return this.flowMonitor.getTransferredByteInSecond();
}
@Override
public void shutdown() {
changeCurrentState(HAConnectionState.SHUTDOWN);
// Shutdown thread firstly
this.flowMonitor.shutdown();
super.shutdown();
closeMaster();
try {
this.selector.close();
} catch (IOException e) {
LOGGER.warn("Close the selector of AutoSwitchHAClient error, ", e);
}
}
private boolean isTimeToReportOffset() {
long interval = this.messageStore.now() - this.lastWriteTimestamp;
return interval > this.messageStore.getMessageStoreConfig().getHaSendHeartbeatInterval();
}
private boolean sendHandshakeHeader() throws IOException {
this.handshakeHeaderBuffer.position(0);
this.handshakeHeaderBuffer.limit(HANDSHAKE_HEADER_SIZE);
// Original state
this.handshakeHeaderBuffer.putInt(HAConnectionState.HANDSHAKE.ordinal());
// IsSyncFromLastFile
short isSyncFromLastFile = this.haService.getDefaultMessageStore().getMessageStoreConfig().isSyncFromLastFile() ? (short) 1 : (short) 0;
this.handshakeHeaderBuffer.putShort(isSyncFromLastFile);
// IsAsyncLearner role
short isAsyncLearner = this.haService.getDefaultMessageStore().getMessageStoreConfig().isAsyncLearner() ? (short) 1 : (short) 0;
this.handshakeHeaderBuffer.putShort(isAsyncLearner);
// Slave brokerId
this.handshakeHeaderBuffer.putLong(this.brokerId);
this.handshakeHeaderBuffer.flip();
return this.haWriter.write(this.socketChannel, this.handshakeHeaderBuffer);
}
private void handshakeWithMaster() throws IOException {
boolean result = this.sendHandshakeHeader();
if (!result) {
closeMasterAndWait();
}
this.selector.select(5000);
result = this.haReader.read(this.socketChannel, this.byteBufferRead);
if (!result) {
closeMasterAndWait();
}
}
private boolean reportSlaveOffset(HAConnectionState currentState, final long offsetToReport) throws IOException {
this.transferHeaderBuffer.position(0);
this.transferHeaderBuffer.limit(TRANSFER_HEADER_SIZE);
this.transferHeaderBuffer.putInt(currentState.ordinal());
this.transferHeaderBuffer.putLong(offsetToReport);
this.transferHeaderBuffer.flip();
return this.haWriter.write(this.socketChannel, this.transferHeaderBuffer);
}
private boolean reportSlaveMaxOffset(HAConnectionState currentState) throws IOException {
boolean result = true;
final long maxPhyOffset = this.messageStore.getMaxPhyOffset();
if (maxPhyOffset > this.currentReportedOffset) {
this.currentReportedOffset = maxPhyOffset;
result = reportSlaveOffset(currentState, this.currentReportedOffset);
}
return result;
}
public boolean connectMaster() throws IOException {
if (null == this.socketChannel) {
String addr = this.masterHaAddress.get();
if (StringUtils.isNotEmpty(addr)) {
SocketAddress socketAddress = NetworkUtil.string2SocketAddress(addr);
this.socketChannel = RemotingHelper.connect(socketAddress);
if (this.socketChannel != null) {
this.socketChannel.register(this.selector, SelectionKey.OP_READ);
LOGGER.info("AutoSwitchHAClient connect to master {}", addr);
changeCurrentState(HAConnectionState.HANDSHAKE);
}
}
this.currentReportedOffset = this.messageStore.getMaxPhyOffset();
this.lastReadTimestamp = System.currentTimeMillis();
}
return this.socketChannel != null;
}
private boolean transferFromMaster() throws IOException {
boolean result;
if (isTimeToReportOffset()) {
LOGGER.info("Slave report current offset {}", this.currentReportedOffset);
result = reportSlaveOffset(HAConnectionState.TRANSFER, this.currentReportedOffset);
if (!result) {
return false;
}
}
this.selector.select(1000);
result = this.haReader.read(this.socketChannel, this.byteBufferRead);
if (!result) {
return false;
}
return this.reportSlaveMaxOffset(HAConnectionState.TRANSFER);
}
@Override
public void run() {
LOGGER.info(this.getServiceName() + " service started");
this.flowMonitor.start();
while (!this.isStopped()) {
try {
switch (this.currentState) {
case SHUTDOWN:
this.flowMonitor.shutdown(true);
return;
case READY:
// Truncate invalid msg first
final long truncateOffset = AutoSwitchHAClient.this.haService.truncateInvalidMsg();
if (truncateOffset >= 0) {
AutoSwitchHAClient.this.epochCache.truncateSuffixByOffset(truncateOffset);
}
if (!connectMaster()) {
LOGGER.warn("AutoSwitchHAClient connect to master {} failed", this.masterHaAddress.get());
waitForRunning(1000 * 5);
}
continue;
case HANDSHAKE:
handshakeWithMaster();
continue;
case TRANSFER:
if (!transferFromMaster()) {
closeMasterAndWait();
continue;
}
break;
case SUSPEND:
default:
waitForRunning(1000 * 5);
continue;
}
long interval = this.messageStore.now() - this.lastReadTimestamp;
if (interval > this.messageStore.getMessageStoreConfig().getHaHousekeepingInterval()) {
LOGGER.warn("AutoSwitchHAClient, housekeeping, found this connection[" + this.masterHaAddress
+ "] expired, " + interval);
closeMaster();
LOGGER.warn("AutoSwitchHAClient, master not response some time, so close connection");
}
} catch (Exception e) {
LOGGER.warn(this.getServiceName() + " service has exception. ", e);
closeMasterAndWait();
}
}
this.flowMonitor.shutdown(true);
LOGGER.info(this.getServiceName() + " service end");
}
/**
* Compare the master and slave's epoch file, find consistent point, do truncate.
*/
private boolean doTruncate(List<EpochEntry> masterEpochEntries, long masterEndOffset) throws Exception {
if (this.epochCache.getEntrySize() == 0) {
// If epochMap is empty, means the broker is a new replicas
LOGGER.info("Slave local epochCache is empty, skip truncate log");
changeCurrentState(HAConnectionState.TRANSFER);
this.currentReportedOffset = 0;
} else {
final EpochFileCache masterEpochCache = new EpochFileCache();
masterEpochCache.initCacheFromEntries(masterEpochEntries);
masterEpochCache.setLastEpochEntryEndOffset(masterEndOffset);
final List<EpochEntry> localEpochEntries = this.epochCache.getAllEntries();
final EpochFileCache localEpochCache = new EpochFileCache();
localEpochCache.initCacheFromEntries(localEpochEntries);
localEpochCache.setLastEpochEntryEndOffset(this.messageStore.getMaxPhyOffset());
LOGGER.info("master epoch entries is {}", masterEpochCache.getAllEntries());
LOGGER.info("local epoch entries is {}", localEpochEntries);
final long truncateOffset = localEpochCache.findConsistentPoint(masterEpochCache);
LOGGER.info("truncateOffset is {}", truncateOffset);
if (truncateOffset < 0) {
// If truncateOffset < 0, means we can't find a consistent point
LOGGER.error("Failed to find a consistent point between masterEpoch:{} and slaveEpoch:{}", masterEpochEntries, localEpochEntries);
return false;
}
if (!this.messageStore.truncateFiles(truncateOffset)) {
LOGGER.error("Failed to truncate slave log to {}", truncateOffset);
return false;
}
this.epochCache.truncateSuffixByOffset(truncateOffset);
LOGGER.info("Truncate slave log to {} success, change to transfer state", truncateOffset);
changeCurrentState(HAConnectionState.TRANSFER);
this.currentReportedOffset = truncateOffset;
}
if (!reportSlaveMaxOffset(HAConnectionState.TRANSFER)) {
LOGGER.error("AutoSwitchHAClient report max offset to master failed");
return false;
}
return true;
}
| AutoSwitchHAClient |
java | apache__hadoop | hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/commit/mapred/JobSuite.java | {
"start": 2224,
"end": 9310
} | class ____ extends BaseJobSuite {
private final JobContext jobContext;
private final TaskAttemptContext taskAttemptContext;
private final Committer committer;
private JobSuite(FileSystem fs, JobConf conf,
TaskAttemptID taskAttemptId, int appAttemptId, Path outputPath)
throws IOException {
setFs(fs);
// Initialize the job instance.
setJob(Job.getInstance(conf));
job().setJobID(JobID.forName(CommitUtils.buildJobId(conf, taskAttemptId.getJobID())));
this.jobContext = createJobContext(conf, taskAttemptId);
this.taskAttemptContext = createTaskAttemptContext(conf, taskAttemptId, appAttemptId);
setJobId(CommitUtils.buildJobId(jobContext));
// Set job output directory.
FileOutputFormat.setOutputPath(conf, outputPath);
setOutputPath(outputPath);
setObjectStorage(ObjectStorageFactory.create(outputPath.toUri().getScheme(),
outputPath.toUri().getAuthority(), conf));
// Initialize committer.
this.committer = new Committer();
this.committer.setupTask(taskAttemptContext);
}
public static JobSuite create(Configuration conf, TaskAttemptID taskAttemptId, Path outDir)
throws IOException {
FileSystem fs = outDir.getFileSystem(conf);
return new JobSuite(fs, new JobConf(conf), taskAttemptId, DEFAULT_APP_ATTEMPT_ID, outDir);
}
public static TaskAttemptID createTaskAttemptId(String trimmedJobId, int attemptId) {
String attempt = String.format("attempt_%s_m_000000_%d", trimmedJobId, attemptId);
return TaskAttemptID.forName(attempt);
}
public static JobContext createJobContext(JobConf jobConf, TaskAttemptID taskAttemptId) {
return new JobContextImpl(jobConf, taskAttemptId.getJobID());
}
public static TaskAttemptContext createTaskAttemptContext(
JobConf jobConf, TaskAttemptID taskAttemptId, int appAttemptId) throws IOException {
// Set the key values for job configuration.
jobConf.set(MRJobConfig.TASK_ATTEMPT_ID, taskAttemptId.toString());
jobConf.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, appAttemptId);
jobConf.set("mapred.output.committer.class",
Committer.class.getName()); // 2x and 3x newApiCommitter=false.
return new TaskAttemptContextImpl(jobConf, taskAttemptId);
}
public void setupJob() throws IOException {
committer.setupJob(jobContext);
}
public void setupTask() throws IOException {
committer.setupTask(taskAttemptContext);
}
// This method simulates the scenario that the job may set up task with a different
// taskAttemptContext, e.g., for a spark job.
public void setupTask(TaskAttemptContext taskAttemptCxt) throws IOException {
committer.setupTask(taskAttemptCxt);
}
public void writeOutput() throws Exception {
writeOutput(taskAttemptContext);
}
// This method simulates the scenario that the job may set up task with a different
// taskAttemptContext, e.g., for a spark job.
public void writeOutput(TaskAttemptContext taskAttemptCxt) throws Exception {
RecordWriter<Object, Object> writer = new TextOutputFormat<>().getRecordWriter(fs(),
taskAttemptCxt.getJobConf(),
CommitUtils.buildJobId(taskAttemptCxt),
taskAttemptCxt.getProgressible());
NullWritable nullKey = NullWritable.get();
NullWritable nullVal = NullWritable.get();
Object[] keys = new Object[]{KEY_1, nullKey, null, nullKey, null, KEY_2};
Object[] vals = new Object[]{VAL_1, nullVal, null, null, nullVal, VAL_2};
try {
assertEquals(keys.length, vals.length);
for (int i = 0; i < keys.length; i++) {
writer.write(keys[i], vals[i]);
}
} finally {
writer.close(Reporter.NULL);
}
}
public boolean needsTaskCommit() throws IOException {
return committer.needsTaskCommit(taskAttemptContext);
}
public void commitTask() throws IOException {
committer.commitTask(taskAttemptContext);
}
// This method simulates the scenario that the job may set up task with a different
// taskAttemptContext, e.g., for a spark job.
public void commitTask(TaskAttemptContext taskAttemptCxt) throws IOException {
committer.commitTask(taskAttemptCxt);
}
public void abortTask() throws IOException {
committer.abortTask(taskAttemptContext);
}
public void commitJob() throws IOException {
committer.commitJob(jobContext);
}
@Override
public Path magicPartPath() {
return new Path(committer.getWorkPath(), committer.jobId());
}
@Override
public Path magicPendingSetPath() {
return CommitUtils.magicTaskPendingSetPath(taskAttemptContext, outputPath());
}
public TaskAttemptContext taskAttemptContext() {
return taskAttemptContext;
}
public Committer committer() {
return committer;
}
@Override
public void assertNoTaskAttemptPath() throws IOException {
Path path = CommitUtils.magicTaskAttemptBasePath(taskAttemptContext, outputPath());
assertFalse(fs().exists(path), "Task attempt path should be not existing");
String pathToKey = ObjectUtils.pathToKey(path);
assertNull(storage().head(pathToKey), "Should have no task attempt path key");
}
@Override
protected boolean skipTests() {
return storage().bucket().isDirectory();
}
@Override
public void assertSuccessMarker() throws IOException {
Path succPath = CommitUtils.successMarker(outputPath());
assertTrue(fs().exists(succPath), String.format("%s should be exists", succPath));
SuccessData successData = SuccessData.deserialize(CommitUtils.load(fs(), succPath));
assertEquals(SuccessData.class.getName(), successData.name());
assertTrue(successData.success());
assertEquals(NetUtils.getHostname(), successData.hostname());
assertEquals(CommitUtils.COMMITTER_NAME, successData.committer());
assertEquals(
String.format("Task committer %s", taskAttemptContext.getTaskAttemptID()),
successData.description());
assertEquals(job().getJobID().toString(), successData.jobId());
assertEquals(1, successData.filenames().size());
assertEquals(destPartKey(), successData.filenames().get(0));
}
@Override
public void assertSummaryReport(Path reportDir) throws IOException {
Path reportPath = CommitUtils.summaryReport(reportDir, job().getJobID().toString());
assertTrue(fs().exists(reportPath), String.format("%s should be exists", reportPath));
SuccessData reportData = SuccessData.deserialize(CommitUtils.load(fs(), reportPath));
assertEquals(SuccessData.class.getName(), reportData.name());
assertTrue(reportData.success());
assertEquals(NetUtils.getHostname(), reportData.hostname());
assertEquals(CommitUtils.COMMITTER_NAME, reportData.committer());
assertEquals(String.format("Task committer %s", taskAttemptContext.getTaskAttemptID()),
reportData.description());
assertEquals(job().getJobID().toString(), reportData.jobId());
assertEquals(1, reportData.filenames().size());
assertEquals(destPartKey(), reportData.filenames().get(0));
assertEquals("clean", reportData.diagnostics().get("stage"));
}
}
| JobSuite |
java | junit-team__junit5 | documentation/src/test/java/example/defaultmethods/ComparableContract.java | {
"start": 533,
"end": 1177
} | interface ____<T extends Comparable<T>> extends Testable<T> {
T createSmallerValue();
@Test
default void returnsZeroWhenComparedToItself() {
T value = createValue();
assertEquals(0, value.compareTo(value));
}
@Test
default void returnsPositiveNumberWhenComparedToSmallerValue() {
T value = createValue();
T smallerValue = createSmallerValue();
assertTrue(value.compareTo(smallerValue) > 0);
}
@Test
default void returnsNegativeNumberWhenComparedToLargerValue() {
T value = createValue();
T smallerValue = createSmallerValue();
assertTrue(smallerValue.compareTo(value) < 0);
}
}
// end::user_guide[]
| ComparableContract |
java | qos-ch__slf4j | slf4j-api/src/main/java/org/slf4j/spi/LoggerFactoryBinder.java | {
"start": 1745,
"end": 2265
} | class ____ bind to.
*/
public ILoggerFactory getLoggerFactory();
/**
* The String form of the {@link ILoggerFactory} object that this
* <code>LoggerFactoryBinder</code> instance is <em>intended</em> to return.
*
* <p>This method allows the developer to interrogate this binder's intention
* which may be different from the {@link ILoggerFactory} instance it is able to
* yield in practice. The discrepancy should only occur in case of errors.
*
* @return the | should |
java | quarkusio__quarkus | integration-tests/grpc-test-random-port/src/test/java/io/quarkus/grpc/examples/hello/RandomPortSeparateServerTlsTestBase.java | {
"start": 212,
"end": 908
} | class ____ implements QuarkusTestProfile {
@Override
public Map<String, String> getConfigOverrides() {
return Map.of(
"quarkus.grpc.server.plain-text", "false",
"quarkus.grpc.server.test-port", "0",
"quarkus.grpc.server.ssl.certificate", "tls/server.pem",
"quarkus.grpc.server.ssl.key", "tls/server.key",
"quarkus.grpc.clients.hello.host", "localhost",
"quarkus.grpc.clients.hello.ssl.trust-store", "tls/ca.pem");
}
}
@Override
protected String serverPortProperty() {
return "quarkus.grpc.server.test-port";
}
}
| Profile |
java | elastic__elasticsearch | x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/QuarterProcessor.java | {
"start": 627,
"end": 1886
} | class ____ extends BaseDateTimeProcessor {
public static final String NAME = "q";
private static final DateTimeFormatter QUARTER_FORMAT = DateTimeFormatter.ofPattern("q", Locale.ROOT);
public QuarterProcessor(ZoneId zoneId) {
super(zoneId);
}
public QuarterProcessor(StreamInput in) throws IOException {
super(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public Object doProcess(ZonedDateTime zdt) {
return quarter(zdt);
}
public static Integer quarter(ZonedDateTime dateTime, String tzId) {
return quarter(dateTime.withZoneSameInstant(ZoneId.of(tzId)));
}
static Integer quarter(ZonedDateTime zdt) {
return Integer.valueOf(zdt.format(QUARTER_FORMAT));
}
@Override
public int hashCode() {
return Objects.hash(zoneId());
}
@Override
public boolean equals(Object obj) {
if (obj == null || obj.getClass() != getClass()) {
return false;
}
DateTimeProcessor other = (DateTimeProcessor) obj;
return Objects.equals(zoneId(), other.zoneId());
}
}
| QuarterProcessor |
java | quarkusio__quarkus | extensions/mailer/deployment/src/test/java/io/quarkus/mailer/MailTemplateLocationTest.java | {
"start": 1016,
"end": 1295
} | class ____ {
@Inject
@Location("confirmation")
MailTemplate confirmationMailTemplate;
Uni<Void> send() {
return confirmationMailTemplate.to("quarkus@quarkus.io").subject("Test").data("name", "Foo").send();
}
}
}
| MailTemplates |
java | processing__processing4 | java/test/processing/mode/java/CodeFolderRuntimePathFactoryTest.java | {
"start": 1044,
"end": 2025
} | class ____ {
private RuntimePathBuilder.RuntimePathFactoryStrategy factory;
private JavaMode testMode;
private List<ImportStatement> testImports;
private Sketch testSketch;
private List<String> classpath;
@Before
public void setUp() throws Exception {
RuntimePathBuilder builder = new RuntimePathBuilder();
factory = builder::buildCodeFolderPath;
testMode = RuntimePathFactoryTestUtil.createTestJavaMode();
testImports = RuntimePathFactoryTestUtil.createTestImports();
testSketch = RuntimePathFactoryTestUtil.createTestSketch();
classpath = factory.buildClasspath(testMode, testImports, testSketch);
}
@Test
public void testBuildClasspathSize() {
assertEquals(2, classpath.size());
}
@Test
public void testBuildClasspathValues() {
assertEquals("testdir" + File.separator + "file1.jar", classpath.get(0));
assertEquals("testdir" + File.separator + "file3.zip", classpath.get(1));
}
} | CodeFolderRuntimePathFactoryTest |
java | apache__camel | core/camel-support/src/main/java/org/apache/camel/support/http/RestUtil.java | {
"start": 926,
"end": 1994
} | class ____ {
/**
* Used for validating incoming REST calls whether Camel can process according to consumes/produces and
* Accept/Content-Type headers.
*/
public static boolean isValidOrAcceptedContentType(String valid, String target) {
if (valid == null || target == null) {
return true;
}
// Any MIME type
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept#Directives
if (target.contains("*/*")) {
return true;
}
// content-type is before optional charset
target = StringHelper.before(target, ";", target);
valid = valid.toLowerCase(Locale.ENGLISH);
target = target.toLowerCase(Locale.ENGLISH);
if (valid.contains(target)) {
return true;
}
// try each part of the target
for (String part : target.split(",")) {
part = part.trim();
if (valid.contains(part)) {
return true;
}
}
return false;
}
}
| RestUtil |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/providers/ParamConverterTest.java | {
"start": 2794,
"end": 3139
} | class ____ implements ParamConverter<UUID> {
@Override
public UUID fromString(String value) {
return UUID.fromString(STATIC_UUID);
}
@Override
public String toString(UUID value) {
return value.toString();
}
}
}
}
| UUIDParamConverter |
java | apache__dubbo | dubbo-common/src/test/java/org/apache/dubbo/rpc/model/HelloReply.java | {
"start": 889,
"end": 1452
} | class ____ extends com.google.protobuf.GeneratedMessageV3 {
@Override
protected FieldAccessorTable internalGetFieldAccessorTable() {
return null;
}
@Override
protected Message.Builder newBuilderForType(BuilderParent builderParent) {
return null;
}
@Override
public Message.Builder newBuilderForType() {
return null;
}
@Override
public Message.Builder toBuilder() {
return null;
}
@Override
public Message getDefaultInstanceForType() {
return null;
}
}
| HelloReply |
java | google__dagger | dagger-compiler/main/java/dagger/internal/codegen/javac/JavacPluginProcessingEnvironment.java | {
"start": 1744,
"end": 3102
} | class ____ implements ProcessingEnvironment {
private final Elements elements;
private final Types types;
private final Filer filer = new ThrowingFiler();
private final Messager messager = new NoopMessager();
JavacPluginProcessingEnvironment(Elements elements, Types types) {
this.elements = elements;
this.types = types;
}
@Override
public Elements getElementUtils() {
return elements;
}
@Override
public Types getTypeUtils() {
return types;
}
@Override
public Filer getFiler() {
return filer;
}
@Override
public Locale getLocale() {
// Null means there's no locale in effect
return null;
}
@Override
public Messager getMessager() {
return messager;
}
@Override
public ImmutableMap<String, String> getOptions() {
// TODO(erichang): You can technically parse options out of the context, but it is internal
// implementation and unclear that any of the tools will ever be passing an option.
return ImmutableMap.of();
}
@Override
public SourceVersion getSourceVersion() {
// This source version doesn't really matter because it is saying what version generated code
// should adhere to, which there shouldn't be any because the Filer doesn't work.
return SourceVersion.latestSupported();
}
private static final | JavacPluginProcessingEnvironment |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/ObjectEqualsForPrimitivesTest.java | {
"start": 1283,
"end": 1696
} | class ____ {
private static boolean doTest(Integer a, Integer b) {
return Objects.equals(a, b);
}
}
""")
.expectUnchanged()
.doTest();
}
@Test
public void boxedAndPrimitive() {
refactoringHelper
.addInputLines(
"Test.java",
"""
import java.util.Objects;
public | Test |
java | junit-team__junit5 | junit-platform-engine/src/main/java/org/junit/platform/engine/support/descriptor/MethodSource.java | {
"start": 1360,
"end": 1526
} | class ____ implements TestSource {
@Serial
private static final long serialVersionUID = 1L;
/**
* Create a new {@code MethodSource} using the supplied | MethodSource |
java | spring-projects__spring-boot | module/spring-boot-webflux/src/main/java/org/springframework/boot/webflux/actuate/web/mappings/DispatcherHandlersMappingDescriptionProvider.java | {
"start": 5843,
"end": 6552
} | class ____
implements HandlerMappingDescriptionProvider<AbstractUrlHandlerMapping> {
@Override
public Class<AbstractUrlHandlerMapping> getMappingClass() {
return AbstractUrlHandlerMapping.class;
}
@Override
public List<DispatcherHandlerMappingDescription> describe(AbstractUrlHandlerMapping handlerMapping) {
return handlerMapping.getHandlerMap().entrySet().stream().map(this::describe).toList();
}
private DispatcherHandlerMappingDescription describe(Entry<PathPattern, Object> mapping) {
return new DispatcherHandlerMappingDescription(mapping.getKey().getPatternString(),
mapping.getValue().toString(), null);
}
}
private static final | UrlHandlerMappingDescriptionProvider |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/superclass/auditedAtSuperclassLevel/auditAllSubclass/MappedSubclassingAllAuditedTest.java | {
"start": 1243,
"end": 3652
} | class ____ {
private Integer id2_1;
private Integer id1_1;
@BeforeClassTemplate
public void initData(EntityManagerFactoryScope scope) {
// Revision 1
scope.inTransaction( em -> {
NotAuditedSubclassEntity nas = new NotAuditedSubclassEntity( "nae", "super str", "not audited str" );
em.persist( nas );
AuditedAllSubclassEntity ae = new AuditedAllSubclassEntity( "ae", "super str", "audited str" );
em.persist( ae );
id1_1 = ae.getId();
id2_1 = nas.getId();
} );
// Revision 2
scope.inTransaction( em -> {
AuditedAllSubclassEntity ae = em.find( AuditedAllSubclassEntity.class, id1_1 );
ae.setStr( "ae new" );
ae.setSubAuditedStr( "audited str new" );
NotAuditedSubclassEntity nas = em.find( NotAuditedSubclassEntity.class, id2_1 );
nas.setStr( "nae new" );
nas.setNotAuditedStr( "not aud str new" );
} );
}
@Test
public void testRevisionsCountsForAudited(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
assertEquals( Arrays.asList( 1, 2 ),
AuditReaderFactory.get( em ).getRevisions( AuditedAllSubclassEntity.class, id1_1 ) );
} );
}
@Test
public void testRevisionsCountsForNotAudited(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
assertThrows( NotAuditedException.class, () -> {
AuditReaderFactory.get( em ).getRevisions( NotAuditedSubclassEntity.class, id2_1 );
} );
} );
}
@Test
public void testHistoryOfAudited(EntityManagerFactoryScope scope) {
AuditedAllSubclassEntity ver1 = new AuditedAllSubclassEntity( id1_1, "ae", "super str", "audited str" );
AuditedAllSubclassEntity ver2 = new AuditedAllSubclassEntity( id1_1, "ae new", "super str", "audited str new" );
scope.inEntityManager( em -> {
AuditedAllSubclassEntity rev1 = AuditReaderFactory.get( em ).find( AuditedAllSubclassEntity.class, id1_1, 1 );
AuditedAllSubclassEntity rev2 = AuditReaderFactory.get( em ).find( AuditedAllSubclassEntity.class, id1_1, 2 );
assertNotNull( rev1.getOtherStr() );
assertNotNull( rev2.getOtherStr() );
assertEquals( ver1, rev1 );
assertEquals( ver2, rev2 );
} );
}
@Test
public void testHistoryOfNotAudited(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
assertThrows( NotAuditedException.class, () -> {
AuditReaderFactory.get( em ).find( NotAuditedSubclassEntity.class, id2_1, 1 );
} );
} );
}
}
| MappedSubclassingAllAuditedTest |
java | google__dagger | dagger-compiler/main/java/dagger/internal/codegen/validation/ComponentCreatorValidator.java | {
"start": 2497,
"end": 4826
} | class ____ implements ClearableCache {
private final Map<XTypeElement, ValidationReport> reports = new HashMap<>();
private final MethodSignatureFormatter methodSignatureFormatter;
private final DaggerSuperficialValidation superficialValidation;
@Inject
ComponentCreatorValidator(
MethodSignatureFormatter methodSignatureFormatter,
DaggerSuperficialValidation superficialValidation) {
this.methodSignatureFormatter = methodSignatureFormatter;
this.superficialValidation = superficialValidation;
}
@Override
public void clearCache() {
reports.clear();
}
/** Validates that the given {@code type} is potentially a valid component creator type. */
public ValidationReport validate(XTypeElement type) {
return reentrantComputeIfAbsent(reports, type, this::validateUncached);
}
private ValidationReport validateUncached(XTypeElement type) {
ValidationReport.Builder report = ValidationReport.about(type);
ImmutableSet<ComponentCreatorAnnotation> creatorAnnotations = getCreatorAnnotations(type);
if (!validateOnlyOneCreatorAnnotation(creatorAnnotations, report)) {
return report.build();
}
// Note: there's more validation in ComponentDescriptorValidator:
// - to make sure the setter methods/factory parameters mirror the deps
// - to make sure each type or key is set by only one method or parameter
ElementValidator validator =
new ElementValidator(type, report, getOnlyElement(creatorAnnotations));
return validator.validate();
}
private boolean validateOnlyOneCreatorAnnotation(
ImmutableSet<ComponentCreatorAnnotation> creatorAnnotations,
ValidationReport.Builder report) {
// creatorAnnotations should never be empty because this should only ever be called for
// types that have been found to have some creator annotation
if (creatorAnnotations.size() > 1) {
String error =
"May not have more than one component Factory or Builder annotation on a type"
+ ": found "
+ creatorAnnotations;
report.addError(error);
return false;
}
return true;
}
/**
* Validator for a single {@link XTypeElement} that is annotated with a {@code Builder} or {@code
* Factory} annotation.
*/
private final | ComponentCreatorValidator |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/boot/database/qualfiedTableNaming/DefaultCatalogAndSchemaTest.java | {
"start": 44287,
"end": 44484
} | class ____ {
public static final String NAME = "EntityWithHbmXmlImplicitFileLevelQualifiers";
private Long id;
private String basic;
}
public static | EntityWithHbmXmlImplicitFileLevelQualifiers |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/ingest/SamplingServiceRawDocumentTests.java | {
"start": 808,
"end": 1975
} | class ____ extends AbstractWireSerializingTestCase<RawDocument> {
@Override
protected Writeable.Reader<RawDocument> instanceReader() {
return RawDocument::new;
}
@Override
protected RawDocument createTestInstance() {
return new RawDocument(randomIdentifier(), randomByteArrayOfLength(randomIntBetween(10, 1000)), randomFrom(XContentType.values()));
}
@Override
protected RawDocument mutateInstance(RawDocument instance) throws IOException {
String indexName = instance.indexName();
byte[] source = instance.source();
XContentType xContentType = instance.contentType();
switch (between(0, 2)) {
case 0 -> indexName = randomValueOtherThan(indexName, ESTestCase::randomIdentifier);
case 1 -> source = randomByteArrayOfLength(randomIntBetween(100, 1000));
case 2 -> xContentType = randomValueOtherThan(xContentType, () -> randomFrom(XContentType.values()));
default -> throw new IllegalArgumentException("Should never get here");
}
return new RawDocument(indexName, source, xContentType);
}
}
| SamplingServiceRawDocumentTests |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModDoublesEvaluator.java | {
"start": 5193,
"end": 5944
} | class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory lhs;
private final EvalOperator.ExpressionEvaluator.Factory rhs;
public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory lhs,
EvalOperator.ExpressionEvaluator.Factory rhs) {
this.source = source;
this.lhs = lhs;
this.rhs = rhs;
}
@Override
public ModDoublesEvaluator get(DriverContext context) {
return new ModDoublesEvaluator(source, lhs.get(context), rhs.get(context), context);
}
@Override
public String toString() {
return "ModDoublesEvaluator[" + "lhs=" + lhs + ", rhs=" + rhs + "]";
}
}
}
| Factory |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/entitynonentity/EntityNonEntityTest.java | {
"start": 836,
"end": 2899
} | class ____ {
@AfterEach
public void tearDown(SessionFactoryScope scope) {
scope.inTransaction(
session ->
scope.getSessionFactory().getSchemaManager().truncateMappedObjects()
);
}
@Test
public void testMix(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
GSM gsm = new GSM();
gsm.brand = "Sony";
gsm.frequency = 900;
gsm.isNumeric = true;
gsm.number = 2;
gsm.species = "human";
session.persist( gsm );
session.getTransaction().commit();
session.clear();
session.beginTransaction();
gsm = session.get( GSM.class, gsm.id );
assertThat( gsm.number )
.describedAs( "top mapped superclass" )
.isEqualTo( 2 );
assertThat( gsm.species )
.describedAs( "non entity between mapped superclass and entity" )
.isNull();
assertThat( gsm.isNumeric )
.describedAs( "mapped superclass under entity" )
.isTrue();
assertThat( gsm.brand )
.describedAs( "non entity under entity" )
.isNull();
assertThat( gsm.frequency )
.describedAs( "leaf entity" )
.isEqualTo( 900 );
session.remove( gsm );
}
);
}
@Test
@JiraKey(value = "HHH-9856")
public void testGetAndFindNonEntityThrowsIllegalArgumentException(SessionFactoryScope scope) {
assertThrows( UnknownEntityTypeException.class, () -> {
scope.getSessionFactory().getMappingMetamodel().findEntityDescriptor( Cellular.class );
scope.getSessionFactory().getMappingMetamodel().getEntityDescriptor( Cellular.class );
} );
assertThrows( UnknownEntityTypeException.class, () ->
scope.getSessionFactory().getMappingMetamodel().getMappingMetamodel()
.getEntityDescriptor( Cellular.class.getName() )
);
assertThrows( UnknownEntityTypeException.class, () -> scope.inTransaction(
session ->
session.get( Cellular.class, 1 )
) );
assertThrows( UnknownEntityTypeException.class, () -> scope.inTransaction(
session ->
session.get( Cellular.class.getName(), 1 )
) );
}
}
| EntityNonEntityTest |
java | apache__camel | components/camel-atmosphere-websocket/src/test/java/org/apache/camel/component/atmosphere/websocket/WebsocketCamelRouterWithInitParamTestSupport.java | {
"start": 1282,
"end": 2656
} | class ____ extends CamelTestSupport {
protected static final int PORT = AvailablePortFinder.getNextAvailable();
// This test needs to run with its own lifecycle management, so we cannot use extensions
protected JettyEmbeddedService service;
@BeforeEach
void setupJetty() {
final JettyConfiguration.WebSocketContextHandlerConfiguration.ServletConfiguration<CamelWebSocketServlet> servletConfiguration
= new JettyConfiguration.WebSocketContextHandlerConfiguration.ServletConfiguration<>(
new CamelWebSocketServlet(),
JettyConfiguration.WebSocketContextHandlerConfiguration.ServletConfiguration.ROOT_PATH_SPEC,
"CamelWsServlet");
servletConfiguration.addInitParameter("events", "true");
final JettyConfiguration jettyConfiguration = JettyConfigurationBuilder
.emptyTemplate()
.withPort(PORT)
.withContextPath(JettyConfiguration.ROOT_CONTEXT_PATH)
.withWebSocketConfiguration().addServletConfiguration(servletConfiguration).build()
.build();
service = new JettyEmbeddedService(jettyConfiguration);
service.initialize();
}
@AfterEach
void tearDownJetty() {
service.shutdown();
}
}
| WebsocketCamelRouterWithInitParamTestSupport |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/io/buffer/LimitedDataBufferList.java | {
"start": 1616,
"end": 4083
} | class ____ extends ArrayList<DataBuffer> {
private final int maxByteCount;
private int byteCount;
public LimitedDataBufferList(int maxByteCount) {
this.maxByteCount = maxByteCount;
}
@Override
public boolean add(DataBuffer buffer) {
updateCount(buffer.readableByteCount());
return super.add(buffer);
}
@Override
public void add(int index, DataBuffer buffer) {
super.add(index, buffer);
updateCount(buffer.readableByteCount());
}
@Override
public boolean addAll(Collection<? extends DataBuffer> collection) {
boolean result = super.addAll(collection);
collection.forEach(buffer -> updateCount(buffer.readableByteCount()));
return result;
}
@Override
public boolean addAll(int index, Collection<? extends DataBuffer> collection) {
boolean result = super.addAll(index, collection);
collection.forEach(buffer -> updateCount(buffer.readableByteCount()));
return result;
}
private void updateCount(int bytesToAdd) {
if (this.maxByteCount < 0) {
return;
}
if (bytesToAdd > Integer.MAX_VALUE - this.byteCount) {
raiseLimitException();
}
else {
this.byteCount += bytesToAdd;
if (this.byteCount > this.maxByteCount) {
raiseLimitException();
}
}
}
private void raiseLimitException() {
// Do not release here, it's likely done via doOnDiscard
throw new DataBufferLimitException(
"Exceeded limit on max bytes to buffer : " + this.maxByteCount);
}
@Override
public DataBuffer remove(int index) {
throw new UnsupportedOperationException();
}
@Override
public boolean remove(Object o) {
throw new UnsupportedOperationException();
}
@Override
protected void removeRange(int fromIndex, int toIndex) {
throw new UnsupportedOperationException();
}
@Override
public boolean removeAll(Collection<?> c) {
throw new UnsupportedOperationException();
}
@Override
public boolean removeIf(Predicate<? super DataBuffer> filter) {
throw new UnsupportedOperationException();
}
@Override
public DataBuffer set(int index, DataBuffer element) {
throw new UnsupportedOperationException();
}
@Override
public void clear() {
this.byteCount = 0;
super.clear();
}
/**
* Shortcut to {@link DataBufferUtils#release release} all data buffers and
* then {@link #clear()}.
*/
public void releaseAndClear() {
forEach(buf -> {
try {
DataBufferUtils.release(buf);
}
catch (Throwable ex) {
// Keep going..
}
});
clear();
}
}
| LimitedDataBufferList |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/transport/RemoteClusterPortSettingsTests.java | {
"start": 1234,
"end": 10656
} | class ____ extends ESTestCase {
/**
* Tests that, if Remote Cluster Security 2.0 is enabled, we reject any configuration of that profile
* via the profile settings.
*/
public void testRemoteClusterProfileCannotBeUsedWhenRcs2IsEnabled() {
List<Setting.AffixSetting<?>> transportProfileSettings = List.of(
TransportSettings.TCP_KEEP_ALIVE_PROFILE,
TransportSettings.TCP_KEEP_IDLE_PROFILE,
TransportSettings.TCP_KEEP_INTERVAL_PROFILE,
TransportSettings.TCP_KEEP_COUNT_PROFILE,
TransportSettings.TCP_NO_DELAY_PROFILE,
TransportSettings.TCP_REUSE_ADDRESS_PROFILE,
TransportSettings.TCP_SEND_BUFFER_SIZE_PROFILE,
TransportSettings.TCP_RECEIVE_BUFFER_SIZE_PROFILE,
TransportSettings.BIND_HOST_PROFILE,
TransportSettings.PUBLISH_HOST_PROFILE,
TransportSettings.PORT_PROFILE,
TransportSettings.PUBLISH_PORT_PROFILE
);
for (Setting.AffixSetting<?> profileSetting : transportProfileSettings) {
Settings testSettings = Settings.builder()
.put(REMOTE_CLUSTER_SERVER_ENABLED.getKey(), true)
// We can just stick a random value in, even if it doesn't match the type - that validation happens at a different layer
.put(profileSetting.getConcreteSettingForNamespace(REMOTE_CLUSTER_PROFILE).getKey(), randomAlphaOfLength(5))
.build();
final IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> RemoteClusterPortSettings.buildRemoteAccessProfileSettings(testSettings)
);
assertThat(
e.getMessage(),
containsString(
"Remote Access settings should not be configured using the [_remote_cluster] profile. "
+ "Use the [remote_cluster.] settings instead."
)
);
}
}
public void testPortSettingsConstruction() {
String hostValue = NetworkAddress.format(randomIp(true));
Settings.Builder testSettingsBuilder = Settings.builder()
.put(REMOTE_CLUSTER_SERVER_ENABLED.getKey(), true)
.put(randomFrom(RemoteClusterPortSettings.HOST, TransportSettings.BIND_HOST, TransportSettings.HOST).getKey(), hostValue);
boolean publishHostSet = randomBoolean();
String publishHostValue = publishHostSet ? NetworkAddress.format(randomIp(true)) : hostValue;
if (publishHostSet) {
testSettingsBuilder.put(RemoteClusterPortSettings.PUBLISH_HOST.getKey(), publishHostValue);
}
boolean bindHostSet = randomBoolean();
String bindHostValue = bindHostSet ? NetworkAddress.format(randomIp(true)) : hostValue;
if (bindHostSet) {
testSettingsBuilder.put(RemoteClusterPortSettings.BIND_HOST.getKey(), bindHostValue);
}
boolean portSet = randomBoolean();
int portValue = portSet ? randomInt(65535) : RemoteClusterPortSettings.PORT.getDefault(Settings.EMPTY);
if (portSet) {
testSettingsBuilder.put(RemoteClusterPortSettings.PORT.getKey(), portValue);
}
boolean publishPortSet = randomBoolean();
int publishPortValue = publishPortSet ? randomInt(65535) : -1; // Publish port is handled a bit weird in ProfileSettings
if (publishPortSet) {
testSettingsBuilder.put(RemoteClusterPortSettings.PUBLISH_PORT.getKey(), publishPortValue);
}
boolean keepAliveSet = randomBoolean();
boolean keepAliveValue = keepAliveSet ? randomBoolean() : NetworkService.TCP_KEEP_ALIVE.getDefault(Settings.EMPTY);
if (keepAliveSet) {
testSettingsBuilder.put(
randomFrom(RemoteClusterPortSettings.TCP_KEEP_ALIVE, TransportSettings.TCP_KEEP_ALIVE, NetworkService.TCP_KEEP_ALIVE)
.getKey(),
keepAliveValue
);
}
boolean keepIdleSet = randomBoolean();
int keepIdleValue = keepIdleSet ? randomInt(300) : NetworkService.TCP_KEEP_IDLE.getDefault(Settings.EMPTY);
if (keepIdleSet) {
testSettingsBuilder.put(
randomFrom(RemoteClusterPortSettings.TCP_KEEP_IDLE, TransportSettings.TCP_KEEP_IDLE, NetworkService.TCP_KEEP_IDLE).getKey(),
keepIdleValue
);
}
boolean keepIntervalSet = randomBoolean();
int keepIntervalValue = keepIntervalSet ? randomInt(300) : NetworkService.TCP_KEEP_INTERVAL.getDefault(Settings.EMPTY);
if (keepIntervalSet) {
testSettingsBuilder.put(
randomFrom(
RemoteClusterPortSettings.TCP_KEEP_INTERVAL,
TransportSettings.TCP_KEEP_INTERVAL,
NetworkService.TCP_KEEP_INTERVAL
).getKey(),
keepIntervalValue
);
}
boolean keepCountSet = randomBoolean();
int keepCountValue = keepCountSet ? randomInt(1000000) : NetworkService.TCP_KEEP_COUNT.getDefault(Settings.EMPTY);
if (keepCountSet) {
testSettingsBuilder.put(
randomFrom(RemoteClusterPortSettings.TCP_KEEP_COUNT, TransportSettings.TCP_KEEP_COUNT, NetworkService.TCP_KEEP_COUNT)
.getKey(),
keepCountValue
);
}
boolean noDelaySet = randomBoolean();
boolean noDelayValue = noDelaySet ? randomBoolean() : NetworkService.TCP_NO_DELAY.getDefault(Settings.EMPTY);
if (noDelaySet) {
testSettingsBuilder.put(
randomFrom(RemoteClusterPortSettings.TCP_NO_DELAY, TransportSettings.TCP_NO_DELAY).getKey(),
noDelayValue
);
}
boolean reuseAddressSet = randomBoolean();
boolean reuseAddressValue = reuseAddressSet ? randomBoolean() : NetworkService.TCP_REUSE_ADDRESS.getDefault(Settings.EMPTY);
if (reuseAddressSet) {
testSettingsBuilder.put(
randomFrom(
RemoteClusterPortSettings.TCP_REUSE_ADDRESS,
TransportSettings.TCP_REUSE_ADDRESS,
NetworkService.TCP_REUSE_ADDRESS
).getKey(),
reuseAddressValue
);
}
boolean sendBufferSizeSet = randomBoolean();
int sendBufSizeBytes = randomInt(10_000_000);
ByteSizeValue sendBufferSizeValue = sendBufferSizeSet
? ByteSizeValue.ofBytes(sendBufSizeBytes)
: NetworkService.TCP_SEND_BUFFER_SIZE.getDefault(Settings.EMPTY);
if (sendBufferSizeSet) {
testSettingsBuilder.put(
randomFrom(
RemoteClusterPortSettings.TCP_SEND_BUFFER_SIZE,
TransportSettings.TCP_SEND_BUFFER_SIZE,
NetworkService.TCP_SEND_BUFFER_SIZE
).getKey(),
sendBufferSizeValue
);
}
boolean receiveBufferSizeSet = randomBoolean();
int rcvBufSizeBytes = randomInt(10_000_000);
ByteSizeValue receiveBufferSizeValue = receiveBufferSizeSet
? ByteSizeValue.ofBytes(rcvBufSizeBytes)
: NetworkService.TCP_RECEIVE_BUFFER_SIZE.getDefault(Settings.EMPTY);
if (receiveBufferSizeSet) {
testSettingsBuilder.put(
randomFrom(
RemoteClusterPortSettings.TCP_RECEIVE_BUFFER_SIZE,
TransportSettings.TCP_RECEIVE_BUFFER_SIZE,
NetworkService.TCP_RECEIVE_BUFFER_SIZE
).getKey(),
receiveBufferSizeValue
);
}
Settings testSettings = testSettingsBuilder.build();
TcpTransport.ProfileSettings profileSettings = RemoteClusterPortSettings.buildRemoteAccessProfileSettings(testSettings);
assertThat(profileSettings.profileName, equalTo(REMOTE_CLUSTER_PROFILE));
assertThat(profileSettings.bindHosts, contains(bindHostValue));
assertThat(profileSettings.publishHosts, contains(publishHostValue));
assertThat(profileSettings.portOrRange, equalTo(Integer.toString(portValue)));
assertThat(profileSettings.publishPort, equalTo(publishPortValue));
assertThat(profileSettings.tcpNoDelay, equalTo(noDelayValue));
assertThat(profileSettings.tcpKeepAlive, equalTo(keepAliveValue));
assertThat(profileSettings.tcpKeepIdle, equalTo(keepIdleValue));
assertThat(profileSettings.tcpKeepInterval, equalTo(keepIntervalValue));
assertThat(profileSettings.tcpKeepCount, equalTo(keepCountValue));
assertThat(profileSettings.reuseAddress, equalTo(reuseAddressValue));
assertThat(profileSettings.sendBufferSize, equalTo(sendBufferSizeValue));
assertThat(profileSettings.receiveBufferSize, equalTo(receiveBufferSizeValue));
assertThat(profileSettings.isDefaultProfile, equalTo(false));
}
public void testRemoteClusterPortDefaultValue() {
assertThat(RemoteClusterPortSettings.PORT.getDefault(Settings.EMPTY), equalTo(9443));
assertThat(
RemoteClusterPortSettings.PORT.getDefault(Settings.builder().put(DiscoveryNode.STATELESS_ENABLED_SETTING_NAME, true).build()),
equalTo(9400)
);
}
}
| RemoteClusterPortSettingsTests |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/cfg/HikariCPSettings.java | {
"start": 165,
"end": 3659
} | interface ____ {
/**
* A setting prefix used to indicate settings that target the {@code hibernate-hikaricp} integration.
*/
String HIKARI_CONFIG_PREFIX = "hibernate.hikari";
/**
* The maximum size of the connection pool.
* <p>
* The default is 10.
*/
String HIKARI_MAX_SIZE = HIKARI_CONFIG_PREFIX + ".maximumPoolSize";
/**
* The minimum number of idle connections to try and maintain in the pool.
* <p>
* The default is the same as {@link HikariCPSettings#HIKARI_MAX_SIZE}.
*/
String HIKARI_MIN_IDLE_SIZE = HIKARI_CONFIG_PREFIX + ".minimumIdle";
/**
* The maximum amount of time a connection can live, after which it is evicted.
* <p>
* The default is 1800000 milliseconds (30 minutes).
*/
String HIKARI_MAX_LIFETIME = HIKARI_CONFIG_PREFIX + ".maxLifetime";
/**
* The maximum amount of time a connection can remain out of the pool, after
* which it is reported as a leak.
* <p>
* The default is 0 milliseconds, resulting in no checks for connection leaks.
*/
String HIKARI_LEAK_TIMEOUT = HIKARI_CONFIG_PREFIX + ".leakDetectionThreshold";
/**
* The maximum amount of time a connection can remain idle, after which it is evicted.
* <p>
* The default is 600000 milliseconds (10 minutes).
*/
String HIKARI_IDLE_TIMEOUT = HIKARI_CONFIG_PREFIX + ".idleTimeout";
/**
* The maximum amount of time a thread can wait for a connection, after which an
* exception is thrown instead.
* <p>
* The default is 30000 milliseconds (30 seconds).
*/
String HIKARI_ACQUISITION_TIMEOUT = HIKARI_CONFIG_PREFIX + ".connectionTimeout";
/**
* The maximum amount of time that a connection will be tested for aliveness. Must
* be lower than {@link HikariCPSettings#HIKARI_ACQUISITION_TIMEOUT}.
* <p>
* The default is 5000 milliseconds (5 seconds).
*/
String HIKARI_VALIDATION_TIMEOUT = HIKARI_CONFIG_PREFIX + ".validationTimeout";
/**
* The maximum amount of time the application thread can wait to attempt to acquire
* an initial connection. Applied after {@link HikariCPSettings#HIKARI_ACQUISITION_TIMEOUT}.
* <p>
* The default is 1 millisecond.
*/
String HIKARI_INITIALIZATION_TIMEOUT = HIKARI_CONFIG_PREFIX + ".initializationFailTimeout";
/**
* How often connections will attempt to be kept alive to prevent a timeout.
* <p>
* The default is 0 milliseconds, resulting in no keep-alive behaviour.
*/
String HIKARI_KEEPALIVE_TIME = HIKARI_CONFIG_PREFIX + ".keepaliveTime";
/**
* An SQL command to be executed when a connection is created.
*/
String HIKARI_INITIAL_SQL = HIKARI_CONFIG_PREFIX + ".connectionInitSql";
/**
* A user-defined name for the pool that appears in logging.
* <p>
* The default is auto-generated.
*/
String HIKARI_POOL_NAME = HIKARI_CONFIG_PREFIX + ".poolName";
/**
* If {@code true}, connections obtained from the pool are in read-only mode
* by default.
* <p>
* Some databases do not support read-only mode while some will provide query
* optimizations when a connection is in read-only mode.
* <p>
* The default is {@code false}.
*/
String HIKARI_READ_ONLY = HIKARI_CONFIG_PREFIX + ".readOnly";
/**
* If {@code true}, internal pool queries (such as keep-alives) will be isolated
* in their own transaction.
* <p>
* Only applies if {@link AvailableSettings#AUTOCOMMIT} is disabled.
* <p>
* The default is {@code false}.
*/
String HIKARI_ISOLATE_INTERNAL_QUERIES = HIKARI_CONFIG_PREFIX + ".isolateInternalQueries";
}
| HikariCPSettings |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/StringBuilderInitWithCharTest.java | {
"start": 2240,
"end": 2491
} | class ____ {
{
new StringBuilder("a");
new StringBuilder(5);
new StringBuilder();
}
}\
""")
.doTest();
}
}
| StringBuilderInitWithCharNegativeCases |
java | spring-projects__spring-framework | spring-beans/src/test/java/org/springframework/beans/factory/support/QualifierAnnotationAutowireBeanFactoryTests.java | {
"start": 1518,
"end": 11854
} | class ____ {
private static final String JUERGEN = "juergen";
private static final String MARK = "mark";
private final DefaultListableBeanFactory lbf = new DefaultListableBeanFactory();
@Test
void testAutowireCandidateDefaultWithIrrelevantDescriptor() throws Exception {
ConstructorArgumentValues cavs = new ConstructorArgumentValues();
cavs.addGenericArgumentValue(JUERGEN);
RootBeanDefinition rbd = new RootBeanDefinition(Person.class, cavs, null);
lbf.registerBeanDefinition(JUERGEN, rbd);
assertThat(lbf.isAutowireCandidate(JUERGEN, null)).isTrue();
assertThat(lbf.isAutowireCandidate(JUERGEN,
new DependencyDescriptor(Person.class.getDeclaredField("name"), false))).isTrue();
assertThat(lbf.isAutowireCandidate(JUERGEN,
new DependencyDescriptor(Person.class.getDeclaredField("name"), true))).isTrue();
}
@Test
void testAutowireCandidateExplicitlyFalseWithIrrelevantDescriptor() throws Exception {
ConstructorArgumentValues cavs = new ConstructorArgumentValues();
cavs.addGenericArgumentValue(JUERGEN);
RootBeanDefinition rbd = new RootBeanDefinition(Person.class, cavs, null);
rbd.setAutowireCandidate(false);
lbf.registerBeanDefinition(JUERGEN, rbd);
assertThat(lbf.isAutowireCandidate(JUERGEN, null)).isFalse();
assertThat(lbf.isAutowireCandidate(JUERGEN,
new DependencyDescriptor(Person.class.getDeclaredField("name"), false))).isFalse();
assertThat(lbf.isAutowireCandidate(JUERGEN,
new DependencyDescriptor(Person.class.getDeclaredField("name"), true))).isFalse();
}
@Test
void testAutowireCandidateWithFieldDescriptor() throws Exception {
lbf.setAutowireCandidateResolver(new QualifierAnnotationAutowireCandidateResolver());
ConstructorArgumentValues cavs1 = new ConstructorArgumentValues();
cavs1.addGenericArgumentValue(JUERGEN);
RootBeanDefinition person1 = new RootBeanDefinition(Person.class, cavs1, null);
person1.addQualifier(new AutowireCandidateQualifier(TestQualifier.class));
lbf.registerBeanDefinition(JUERGEN, person1);
ConstructorArgumentValues cavs2 = new ConstructorArgumentValues();
cavs2.addGenericArgumentValue(MARK);
RootBeanDefinition person2 = new RootBeanDefinition(Person.class, cavs2, null);
lbf.registerBeanDefinition(MARK, person2);
DependencyDescriptor qualifiedDescriptor = new DependencyDescriptor(
QualifiedTestBean.class.getDeclaredField("qualified"), false);
DependencyDescriptor nonqualifiedDescriptor = new DependencyDescriptor(
QualifiedTestBean.class.getDeclaredField("nonqualified"), false);
assertThat(lbf.isAutowireCandidate(JUERGEN, nonqualifiedDescriptor)).isTrue();
assertThat(lbf.isAutowireCandidate(JUERGEN, qualifiedDescriptor)).isTrue();
assertThat(lbf.isAutowireCandidate(MARK, nonqualifiedDescriptor)).isTrue();
assertThat(lbf.isAutowireCandidate(MARK, qualifiedDescriptor)).isFalse();
}
@Test
void testAutowireCandidateExplicitlyFalseWithFieldDescriptor() throws Exception {
ConstructorArgumentValues cavs = new ConstructorArgumentValues();
cavs.addGenericArgumentValue(JUERGEN);
RootBeanDefinition person = new RootBeanDefinition(Person.class, cavs, null);
person.setAutowireCandidate(false);
person.addQualifier(new AutowireCandidateQualifier(TestQualifier.class));
lbf.registerBeanDefinition(JUERGEN, person);
DependencyDescriptor qualifiedDescriptor = new DependencyDescriptor(
QualifiedTestBean.class.getDeclaredField("qualified"), false);
DependencyDescriptor nonqualifiedDescriptor = new DependencyDescriptor(
QualifiedTestBean.class.getDeclaredField("nonqualified"), false);
assertThat(lbf.isAutowireCandidate(JUERGEN, null)).isFalse();
assertThat(lbf.isAutowireCandidate(JUERGEN, nonqualifiedDescriptor)).isFalse();
assertThat(lbf.isAutowireCandidate(JUERGEN, qualifiedDescriptor)).isFalse();
}
@Test
void testAutowireCandidateWithShortClassName() throws Exception {
ConstructorArgumentValues cavs = new ConstructorArgumentValues();
cavs.addGenericArgumentValue(JUERGEN);
RootBeanDefinition person = new RootBeanDefinition(Person.class, cavs, null);
person.addQualifier(new AutowireCandidateQualifier(ClassUtils.getShortName(TestQualifier.class)));
lbf.registerBeanDefinition(JUERGEN, person);
DependencyDescriptor qualifiedDescriptor = new DependencyDescriptor(
QualifiedTestBean.class.getDeclaredField("qualified"), false);
DependencyDescriptor nonqualifiedDescriptor = new DependencyDescriptor(
QualifiedTestBean.class.getDeclaredField("nonqualified"), false);
assertThat(lbf.isAutowireCandidate(JUERGEN, null)).isTrue();
assertThat(lbf.isAutowireCandidate(JUERGEN, nonqualifiedDescriptor)).isTrue();
assertThat(lbf.isAutowireCandidate(JUERGEN, qualifiedDescriptor)).isTrue();
}
@Test
void testAutowireCandidateWithConstructorDescriptor() throws Exception {
lbf.setAutowireCandidateResolver(new QualifierAnnotationAutowireCandidateResolver());
ConstructorArgumentValues cavs1 = new ConstructorArgumentValues();
cavs1.addGenericArgumentValue(JUERGEN);
RootBeanDefinition person1 = new RootBeanDefinition(Person.class, cavs1, null);
person1.addQualifier(new AutowireCandidateQualifier(TestQualifier.class));
lbf.registerBeanDefinition(JUERGEN, person1);
ConstructorArgumentValues cavs2 = new ConstructorArgumentValues();
cavs2.addGenericArgumentValue(MARK);
RootBeanDefinition person2 = new RootBeanDefinition(Person.class, cavs2, null);
lbf.registerBeanDefinition(MARK, person2);
MethodParameter param = new MethodParameter(QualifiedTestBean.class.getDeclaredConstructor(Person.class), 0);
DependencyDescriptor qualifiedDescriptor = new DependencyDescriptor(param, false);
param.initParameterNameDiscovery(new DefaultParameterNameDiscoverer());
assertThat(param.getParameterName()).isEqualTo("tpb");
assertThat(lbf.isAutowireCandidate(JUERGEN, qualifiedDescriptor)).isTrue();
assertThat(lbf.isAutowireCandidate(MARK, qualifiedDescriptor)).isFalse();
}
@Test
void testAutowireCandidateWithMethodDescriptor() throws Exception {
lbf.setAutowireCandidateResolver(new QualifierAnnotationAutowireCandidateResolver());
ConstructorArgumentValues cavs1 = new ConstructorArgumentValues();
cavs1.addGenericArgumentValue(JUERGEN);
RootBeanDefinition person1 = new RootBeanDefinition(Person.class, cavs1, null);
person1.addQualifier(new AutowireCandidateQualifier(TestQualifier.class));
lbf.registerBeanDefinition(JUERGEN, person1);
ConstructorArgumentValues cavs2 = new ConstructorArgumentValues();
cavs2.addGenericArgumentValue(MARK);
RootBeanDefinition person2 = new RootBeanDefinition(Person.class, cavs2, null);
lbf.registerBeanDefinition(MARK, person2);
MethodParameter qualifiedParam =
new MethodParameter(QualifiedTestBean.class.getDeclaredMethod("autowireQualified", Person.class), 0);
MethodParameter nonqualifiedParam =
new MethodParameter(QualifiedTestBean.class.getDeclaredMethod("autowireNonqualified", Person.class), 0);
DependencyDescriptor qualifiedDescriptor = new DependencyDescriptor(qualifiedParam, false);
DependencyDescriptor nonqualifiedDescriptor = new DependencyDescriptor(nonqualifiedParam, false);
qualifiedParam.initParameterNameDiscovery(new DefaultParameterNameDiscoverer());
nonqualifiedParam.initParameterNameDiscovery(new DefaultParameterNameDiscoverer());
assertThat(qualifiedParam.getParameterName()).isEqualTo("tpb");
assertThat(nonqualifiedParam.getParameterName()).isEqualTo("tpb");
assertThat(lbf.isAutowireCandidate(JUERGEN, nonqualifiedDescriptor)).isTrue();
assertThat(lbf.isAutowireCandidate(JUERGEN, qualifiedDescriptor)).isTrue();
assertThat(lbf.isAutowireCandidate(MARK, nonqualifiedDescriptor)).isTrue();
assertThat(lbf.isAutowireCandidate(MARK, qualifiedDescriptor)).isFalse();
}
@Test
void testAutowireCandidateWithMultipleCandidatesDescriptor() throws Exception {
ConstructorArgumentValues cavs1 = new ConstructorArgumentValues();
cavs1.addGenericArgumentValue(JUERGEN);
RootBeanDefinition person1 = new RootBeanDefinition(Person.class, cavs1, null);
person1.addQualifier(new AutowireCandidateQualifier(TestQualifier.class));
lbf.registerBeanDefinition(JUERGEN, person1);
ConstructorArgumentValues cavs2 = new ConstructorArgumentValues();
cavs2.addGenericArgumentValue(MARK);
RootBeanDefinition person2 = new RootBeanDefinition(Person.class, cavs2, null);
person2.addQualifier(new AutowireCandidateQualifier(TestQualifier.class));
lbf.registerBeanDefinition(MARK, person2);
DependencyDescriptor qualifiedDescriptor = new DependencyDescriptor(
new MethodParameter(QualifiedTestBean.class.getDeclaredConstructor(Person.class), 0),
false);
assertThat(lbf.isAutowireCandidate(JUERGEN, qualifiedDescriptor)).isTrue();
assertThat(lbf.isAutowireCandidate(MARK, qualifiedDescriptor)).isTrue();
}
@Test
void autowireBeanByTypeWithQualifierPrecedence() throws Exception {
lbf.setAutowireCandidateResolver(new QualifierAnnotationAutowireCandidateResolver());
RootBeanDefinition bd = new RootBeanDefinition(TestBean.class);
RootBeanDefinition bd2 = new RootBeanDefinition(TestBean.class);
lbf.registerBeanDefinition("testBean", bd);
lbf.registerBeanDefinition("spouse", bd2);
lbf.registerAlias("test", "testBean");
assertThat(lbf.resolveDependency(new DependencyDescriptor(getClass().getDeclaredField("testBean"), true), null))
.isSameAs(lbf.getBean("spouse"));
}
@Test
void autowireBeanByTypeWithQualifierPrecedenceInAncestor() throws Exception {
DefaultListableBeanFactory parent = new DefaultListableBeanFactory();
parent.setAutowireCandidateResolver(new QualifierAnnotationAutowireCandidateResolver());
RootBeanDefinition bd = new RootBeanDefinition(TestBean.class);
RootBeanDefinition bd2 = new RootBeanDefinition(TestBean.class);
parent.registerBeanDefinition("test", bd);
parent.registerBeanDefinition("spouse", bd2);
parent.registerAlias("test", "testBean");
DefaultListableBeanFactory lbf = new DefaultListableBeanFactory(parent);
lbf.setAutowireCandidateResolver(new QualifierAnnotationAutowireCandidateResolver());
assertThat(lbf.resolveDependency(new DependencyDescriptor(getClass().getDeclaredField("testBean"), true), null))
.isSameAs(lbf.getBean("spouse"));
}
@SuppressWarnings("unused")
private static | QualifierAnnotationAutowireBeanFactoryTests |
java | elastic__elasticsearch | libs/x-content/src/main/java/org/elasticsearch/xcontent/ConstructingObjectParser.java | {
"start": 23148,
"end": 23373
} | class ____ {
/**
* Array of constructor args to be passed to the {@link ConstructingObjectParser#builder}.
*/
private final Object[] constructorArgs;
/**
* The parser this | Target |
java | quarkusio__quarkus | extensions/redis-client/deployment/src/test/java/io/quarkus/redis/deployment/client/preloading/MultipleFilesForDefaultClientImportPreloadingTest.java | {
"start": 718,
"end": 2559
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest unitTest = new QuarkusUnitTest()
.setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class)
.addAsResource(new StringAsset(
"quarkus.redis.hosts=${quarkus.redis.tr}\n" +
"quarkus.redis.load-script=import/my-import.redis, sample.redis"),
"application.properties")
.addAsResource(new File("src/test/resources/imports/import.redis"), "import/my-import.redis")
.addAsResource(new File("src/test/resources/imports/sample.redis"), "sample.redis"));
@Inject
RedisDataSource ds;
@Test
void verifyImport() {
var keys = ds.key();
var values = ds.value(String.class);
var hashes = ds.hash(String.class);
assertThat(keys.keys("*")).containsExactlyInAnyOrder("foo", "bar", "key1", "key2", "key3",
"key4", "space:key", "counter", "key");
assertThat(hashes.hgetall("foo")).containsOnly(entry("field1", "abc"), entry("field2", "123"));
assertThat(hashes.hgetall("bar")).containsOnly(entry("field1", "abc def"), entry("field2", "123 456 "));
assertThat(values.get("key1")).isEqualTo("A value using \"double-quotes\"");
assertThat(values.get("key2")).isEqualTo("A value using 'single-quotes'");
assertThat(values.get("key3")).isEqualTo("A value using a single single ' quote");
assertThat(values.get("key4")).isEqualTo("A value using a single double \" quote");
assertThat(values.get("key")).isEqualTo("value");
assertThat(values.get("space:key")).isEqualTo("another value");
assertThat(values.get("counter")).isEqualTo("1");
}
}
| MultipleFilesForDefaultClientImportPreloadingTest |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/interceptors/context/AsyncContinuationTest.java | {
"start": 865,
"end": 1724
} | class ____ {
@RegisterExtension
public ArcTestContainer container = new ArcTestContainer(Simple.class, SimpleBean.class,
AlphaInterceptor.class, BravoInterceptor.class, CharlieInterceptor.class);
private static ExecutorService executor;
@BeforeAll
static void init() {
executor = Executors.newFixedThreadPool(1);
}
@AfterAll
static void tearDown() {
executor.shutdownNow();
}
@Test
public void testAsyncExecution() throws IOException, InterruptedException {
BravoInterceptor.reset();
assertEquals("A1:dummy:A2", Arc.container().instance(SimpleBean.class).get().foo());
assertTrue(BravoInterceptor.latch.await(3, TimeUnit.SECONDS));
assertEquals("C1:ok:C2", BravoInterceptor.asyncResult);
}
@Simple
@Singleton
static | AsyncContinuationTest |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/scheduling/annotation/ScheduledAnnotationBeanPostProcessorTests.java | {
"start": 38119,
"end": 38178
} | class ____ extends FixedRatesBaseBean {
}
| FixedRatesSubBean |
java | alibaba__nacos | plugin/control/src/main/java/com/alibaba/nacos/plugin/control/tps/DefaultTpsControlManager.java | {
"start": 1189,
"end": 3346
} | class ____ extends TpsControlManager {
/**
* point name -> tps barrier.
*/
protected final Map<String, TpsBarrier> points = new ConcurrentHashMap<>(16);
/**
* point name -> tps control rule.
*/
protected final Map<String, TpsControlRule> rules = new ConcurrentHashMap<>(16);
public DefaultTpsControlManager() {
}
/**
* apple tps rule.
*
* @param pointName pointName.
*/
public synchronized void registerTpsPoint(String pointName) {
if (!points.containsKey(pointName)) {
points.put(pointName, tpsBarrierCreator.createTpsBarrier(pointName));
if (rules.containsKey(pointName)) {
points.get(pointName).applyRule(rules.get(pointName));
} else {
initTpsRule(pointName);
}
}
Loggers.CONTROL
.warn("Tps point for {} registered, But tps control manager is no limit implementation.", pointName);
}
/**
* apple tps rule.
*
* @param pointName pointName.
* @param rule rule.
*/
public synchronized void applyTpsRule(String pointName, TpsControlRule rule) {
if (rule == null) {
rules.remove(pointName);
} else {
rules.put(pointName, rule);
}
if (points.containsKey(pointName)) {
points.get(pointName).applyRule(rule);
}
Loggers.CONTROL.warn("Tps rule for point name {} updated, But tps control manager is no limit implementation.",
pointName);
}
public Map<String, TpsBarrier> getPoints() {
return points;
}
public Map<String, TpsControlRule> getRules() {
return rules;
}
/**
* check tps result.
*
* @param tpsRequest TpsRequest.
* @return check current tps is allowed.
*/
public TpsCheckResponse check(TpsCheckRequest tpsRequest) {
return new TpsCheckResponse(true, TpsResultCode.CHECK_SKIP, "skip");
}
@Override
public String getName() {
return "noLimit";
}
}
| DefaultTpsControlManager |
java | apache__camel | components/camel-micrometer/src/main/java/org/apache/camel/component/micrometer/eventnotifier/MicrometerRouteEventNotifierNamingStrategy.java | {
"start": 1796,
"end": 3687
} | interface ____ {
Predicate<Meter.Id> EVENT_NOTIFIERS
= id -> KIND_ROUTE.equals(id.getTag(KIND));
/**
* Default naming strategy that uses micrometer naming convention.
*/
MicrometerRouteEventNotifierNamingStrategy DEFAULT = new MicrometerRouteEventNotifierNamingStrategy() {
@Override
public String getRouteAddedName() {
return DEFAULT_CAMEL_ROUTES_ADDED;
}
@Override
public String getRouteRunningName() {
return DEFAULT_CAMEL_ROUTES_RUNNING;
}
@Override
public String getRouteReloadedName() {
return DEFAULT_CAMEL_ROUTES_RELOADED;
}
};
/**
* Naming strategy that uses the classic/legacy naming style (camelCase)
*/
MicrometerRouteEventNotifierNamingStrategy LEGACY = new MicrometerRouteEventNotifierNamingStrategy() {
@Override
public String getRouteAddedName() {
return formatName(DEFAULT_CAMEL_ROUTES_ADDED);
}
@Override
public String getRouteRunningName() {
return formatName(DEFAULT_CAMEL_ROUTES_RUNNING);
}
@Override
public String getRouteReloadedName() {
return formatName(DEFAULT_CAMEL_ROUTES_RELOADED);
}
@Override
public String formatName(String name) {
return MicrometerUtils.legacyName(name);
}
};
default String formatName(String name) {
return name;
}
String getRouteAddedName();
String getRouteRunningName();
String getRouteReloadedName();
default Tags getTags(CamelContext camelContext) {
return Tags.of(
KIND, KIND_ROUTE,
CAMEL_CONTEXT_TAG, camelContext.getName(),
EVENT_TYPE_TAG, RouteEvent.class.getSimpleName());
}
}
| MicrometerRouteEventNotifierNamingStrategy |
java | elastic__elasticsearch | modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java | {
"start": 14323,
"end": 16003
} | class ____ implements ClusterStateApplier {
private static final String GCS_SETTING_PREFIX = "gcs.";
private final Settings nodeGcsSettings;
// A map of projectId to clients holder. Adding to and removing from the map happen only in the applier thread.
private final Map<ProjectId, PerProjectClientsHolder> perProjectClientsHolders;
private final ClusterClientsHolder clusterClientsHolder;
GoogleCloudStorageClientsManager(Settings nodeSettings, boolean supportsMultipleProjects) {
this.nodeGcsSettings = Settings.builder()
.put(nodeSettings.getByPrefix(GCS_SETTING_PREFIX), false) // not rely on any cluster scoped secrets
.normalizePrefix(GCS_SETTING_PREFIX)
.build();
if (supportsMultipleProjects) {
this.perProjectClientsHolders = ConcurrentCollections.newConcurrentMap();
} else {
this.perProjectClientsHolders = null;
}
this.clusterClientsHolder = new ClusterClientsHolder();
}
@Override
public void applyClusterState(ClusterChangedEvent event) {
assert perProjectClientsHolders != null;
final Map<ProjectId, ProjectMetadata> currentProjects = event.state().metadata().projects();
final var updatedPerProjectClients = new HashMap<ProjectId, PerProjectClientsHolder>();
for (var project : currentProjects.values()) {
// Skip the default project, it is tracked separately with clusterClientsHolder and
// updated differently with the ReloadablePlugin | GoogleCloudStorageClientsManager |
java | junit-team__junit5 | junit-platform-engine/src/main/java/org/junit/platform/engine/discovery/DiscoverySelectors.java | {
"start": 30595,
"end": 30733
} | class ____.
*
* <p>The parameter type names {@code String} is typically a comma-separated
* list of atomic types, fully qualified | loader |
java | qos-ch__slf4j | slf4j-api/src/main/java/org/slf4j/Logger.java | {
"start": 1954,
"end": 2229
} | interface ____ the main user entry point of SLF4J API.
* It is expected that logging takes place through concrete implementations
* of this interface.
*
* <H3>Typical usage pattern:</H3>
* <pre>
* import org.slf4j.Logger;
* import org.slf4j.LoggerFactory;
*
* public | is |
java | netty__netty | codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeadersFrame.java | {
"start": 722,
"end": 1493
} | interface ____ extends SpdyStreamFrame {
/**
* Returns {@code true} if this header block is invalid.
* A RST_STREAM frame with code PROTOCOL_ERROR should be sent.
*/
boolean isInvalid();
/**
* Marks this header block as invalid.
*/
SpdyHeadersFrame setInvalid();
/**
* Returns {@code true} if this header block has been truncated due to
* length restrictions.
*/
boolean isTruncated();
/**
* Mark this header block as truncated.
*/
SpdyHeadersFrame setTruncated();
/**
* Returns the {@link SpdyHeaders}.
*/
SpdyHeaders headers();
@Override
SpdyHeadersFrame setStreamId(int streamID);
@Override
SpdyHeadersFrame setLast(boolean last);
}
| SpdyHeadersFrame |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/rpc/RpcEndpointTest.java | {
"start": 18394,
"end": 19172
} | class ____ implements MainThreadExecutable {
private final Consumer<Runnable> scheduleRunAsyncConsumer;
private TestMainThreadExecutable(Consumer<Runnable> scheduleRunAsyncConsumer) {
this.scheduleRunAsyncConsumer = scheduleRunAsyncConsumer;
}
@Override
public void runAsync(Runnable runnable) {
scheduleRunAsyncConsumer.accept(runnable);
}
@Override
public <V> CompletableFuture<V> callAsync(Callable<V> callable, Duration callTimeout) {
throw new UnsupportedOperationException();
}
@Override
public void scheduleRunAsync(Runnable runnable, long delay) {
scheduleRunAsyncConsumer.accept(runnable);
}
}
}
| TestMainThreadExecutable |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1900/Issue1903.java | {
"start": 1443,
"end": 1974
} | class ____ implements InvocationHandler {
Map<String, Object> map = new HashMap<String, Object>();
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
String name = method.getName().substring(3);
String first = String.valueOf(name.charAt(0));
name = name.replaceFirst(first, first.toLowerCase());
return map.get(name);
}
public void add(String key, Object val){
map.put(key, val);
}
}
}
| MapHandler |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/context/NestedTestConfiguration.java | {
"start": 7203,
"end": 7393
} | class ____
* <em>override</em> configuration from its
* {@linkplain Class#getEnclosingClass() enclosing class}.
*/
OVERRIDE;
/**
* Get the {@code EnclosingConfiguration} | should |
java | apache__camel | components/camel-aws/camel-aws2-sts/src/main/java/org/apache/camel/component/aws2/sts/client/STS2InternalClient.java | {
"start": 918,
"end": 1125
} | interface ____ {
/**
* Returns an sts client after a factory method determines which one to return.
*
* @return StsClient stsClient
*/
StsClient getStsClient();
}
| STS2InternalClient |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/concurrent/Computable.java | {
"start": 1414,
"end": 1808
} | interface ____<I, O> {
/**
* This method carries out the given operation with the provided argument.
*
* @param arg
* the argument for the calculation
* @return the result of the calculation
* @throws InterruptedException
* thrown if the calculation is interrupted
*/
O compute(I arg) throws InterruptedException;
}
| Computable |
java | apache__camel | components/camel-sjms2/src/test/java/org/apache/camel/component/sjms2/support/Jms2TestSupport.java | {
"start": 1822,
"end": 4845
} | class ____ extends CamelTestSupport {
protected final Logger log = LoggerFactory.getLogger(getClass());
@Produce
protected ProducerTemplate template;
private Connection connection;
private Session session;
private DestinationCreationStrategy destinationCreationStrategy = new DefaultDestinationCreationStrategy();
@AfterEach
public void closeSessions() throws JMSException {
log.info("Closing JMS Session");
if (getSession() != null) {
getSession().close();
setSession(null);
}
log.info("Closing JMS Connection");
if (connection != null) {
connection.stop();
connection = null;
}
}
/*
* @see org.apache.camel.test.junit5.CamelTestSupport#createCamelContext()
* @return
* @throws Exception
*/
@Override
protected CamelContext createCamelContext() throws Exception {
CamelContext camelContext = super.createCamelContext();
ConnectionFactory connectionFactory = getConnectionFactory();
connection = connectionFactory.createConnection();
connection.start();
session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
Sjms2Component component = new Sjms2Component();
component.setConnectionFactory(connectionFactory);
camelContext.addComponent("sjms2", component);
return camelContext;
}
protected static ConnectionFactory getConnectionFactory(String serviceAddress) throws Exception {
final String protocol = System.getProperty("protocol", "CORE").toUpperCase();
//Currently AMQP and HORENTQ don't operate in exactly the same way on artemis as OPENWIRE
//and CORE so its not possible to write protocol agnostic tests but in the future releases
//of artemis we may be able test against them in an agnostic way.
switch (protocol) {
case "OPENWIRE":
return new ActiveMQConnectionFactory(serviceAddress);
default:
return ActiveMQJMSClient.createConnectionFactory(serviceAddress, "test");
}
}
protected abstract ConnectionFactory getConnectionFactory() throws Exception;
public void setSession(Session session) {
this.session = session;
}
public Session getSession() {
return session;
}
public MessageConsumer createQueueConsumer(String destination) throws Exception {
return new Jms2ObjectFactory().createMessageConsumer(session,
destinationCreationStrategy.createDestination(session, destination, false), null, false, null, true, false);
}
public MessageConsumer createTopicConsumer(String destination, String messageSelector) throws Exception {
return new Jms2ObjectFactory().createMessageConsumer(session,
destinationCreationStrategy.createDestination(session, destination, true), messageSelector, true, null, true,
false);
}
}
| Jms2TestSupport |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/id/generators/pkg/PackageLevelGeneratorTest.java | {
"start": 1852,
"end": 1999
} | class ____ {
@Id
@GeneratedValue
long id;
}
@Entity(name = "EntityWithDefaultedPackageGenerator3")
static | EntityWithDefaultedPackageGenerator2 |
java | apache__camel | components/camel-rss/src/test/java/org/apache/camel/component/rss/RssEntrySortTest.java | {
"start": 1276,
"end": 2568
} | class ____ extends CamelTestSupport {
@Test
public void testSortedEntries() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:sorted");
mock.expectsAscending(ExpressionBuilder.beanExpression("myBean?method=getPubDate"));
mock.expectedMessageCount(10);
mock.setResultWaitTime(15000L);
mock.assertIsSatisfied();
}
@Test
public void testUnSortedEntries() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:unsorted");
mock.expectsAscending(ExpressionBuilder.beanExpression("myBean?method=getPubDate"));
mock.expectedMessageCount(10);
mock.setResultWaitTime(2000L);
mock.assertIsNotSatisfied(2000L);
}
@Override
protected void bindToRegistry(Registry registry) {
registry.bind("myBean", new MyBean());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("rss:file:src/test/data/rss20.xml?splitEntries=true&sortEntries=true&delay=50").to("mock:sorted");
from("rss:file:src/test/data/rss20.xml?splitEntries=true&sortEntries=false&delay=50").to("mock:unsorted");
}
};
}
public static | RssEntrySortTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java | {
"start": 5123,
"end": 26173
} | class ____ implements ClusterStateTaskListener {
private final UpdateSettingsClusterStateUpdateRequest request;
private final ActionListener<AcknowledgedResponse> listener;
private UpdateSettingsTask(UpdateSettingsClusterStateUpdateRequest request, ActionListener<AcknowledgedResponse> listener) {
this.request = request;
this.listener = listener;
}
private ClusterStateAckListener getAckListener(AllocationActionMultiListener<AcknowledgedResponse> multiListener) {
return new ClusterStateAckListener() {
@Override
public boolean mustAck(DiscoveryNode discoveryNode) {
return true;
}
@Override
public void onAllNodesAcked() {
multiListener.delay(listener).onResponse(AcknowledgedResponse.of(true));
}
@Override
public void onAckFailure(Exception e) {
multiListener.delay(listener).onFailure(e);
}
@Override
public void onAckTimeout() {
multiListener.delay(listener).onResponse(AcknowledgedResponse.of(false));
}
@Override
public TimeValue ackTimeout() {
return request.ackTimeout();
}
};
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
ClusterState execute(ClusterState currentState) {
final Settings normalizedSettings = Settings.builder()
.put(request.settings())
.normalizePrefix(IndexMetadata.INDEX_SETTING_PREFIX)
.build();
Settings.Builder settingsForClosedIndices = Settings.builder();
Settings.Builder settingsForOpenIndices = Settings.builder();
final Set<String> skippedSettings = new HashSet<>();
indexScopedSettings.validate(
normalizedSettings.filter(s -> Regex.isSimpleMatchPattern(s) == false), // don't validate wildcards
false, // don't validate values here we check it below never allow to change the number of shards
true
); // validate internal or private index settings
for (String key : normalizedSettings.keySet()) {
Setting<?> setting = indexScopedSettings.get(key);
boolean isWildcard = setting == null && Regex.isSimpleMatchPattern(key);
assert setting != null // we already validated the normalized settings
|| (isWildcard && normalizedSettings.hasValue(key) == false)
: "unknown setting: " + key + " isWildcard: " + isWildcard + " hasValue: " + normalizedSettings.hasValue(key);
settingsForClosedIndices.copy(key, normalizedSettings);
if (isWildcard || setting.isDynamic()) {
settingsForOpenIndices.copy(key, normalizedSettings);
} else {
skippedSettings.add(key);
}
}
final Settings closedSettings = settingsForClosedIndices.build();
final Settings openSettings = settingsForOpenIndices.build();
final boolean preserveExisting = request.onExisting() == UpdateSettingsClusterStateUpdateRequest.OnExisting.PRESERVE;
final var currentRoutingTable = currentState.routingTable(request.projectId());
RoutingTable.Builder routingTableBuilder = null;
final Metadata currentMetadata = currentState.metadata();
final ProjectMetadata currentProject = currentMetadata.getProject(request.projectId());
ProjectMetadata.Builder metadataBuilder = ProjectMetadata.builder(currentProject);
final var minSupportedIndexVersion = currentState.nodes().getMinSupportedIndexVersion();
// allow to change any settings to a closed index, and only allow dynamic settings to be changed
// on an open index
Set<Index> openIndices = new HashSet<>();
Set<Index> closedIndices = new HashSet<>();
Set<Index> readOnlyIndices = null;
final String[] actualIndices = new String[request.indices().length];
for (int i = 0; i < request.indices().length; i++) {
Index index = request.indices()[i];
actualIndices[i] = index.getName();
final IndexMetadata metadata = currentProject.getIndexSafe(index);
if (metadata.getState() == IndexMetadata.State.OPEN) {
openIndices.add(index);
} else {
closedIndices.add(index);
}
if (metadata.getCompatibilityVersion().before(minSupportedIndexVersion)) {
if (readOnlyIndices == null) {
readOnlyIndices = new HashSet<>();
}
readOnlyIndices.add(index);
}
}
if (skippedSettings.isEmpty() == false && openIndices.isEmpty() == false) {
if (request.onStaticSetting() == UpdateSettingsClusterStateUpdateRequest.OnStaticSetting.REOPEN_INDICES) {
// We have non-dynamic settings and open indices. We will unassign all of the shards in these indices so that the new
// changed settings are applied when the shards are re-assigned.
routingTableBuilder = RoutingTable.builder(allocationService.getShardRoutingRoleStrategy(), currentRoutingTable);
for (Index index : new HashSet<>(openIndices)) {
// We only want to take on the expense of reopening all shards for an index if the setting is really changing
Settings existingSettings = currentProject.index(index).getSettings();
boolean needToReopenIndex = false;
for (String setting : skippedSettings) {
String newValue = request.settings().get(setting);
if (Objects.equals(newValue, existingSettings.get(setting)) == false) {
needToReopenIndex = true;
break;
}
}
if (needToReopenIndex) {
List<ShardRouting> shardRoutingList = currentRoutingTable.allShards(index.getName());
IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index);
for (ShardRouting shardRouting : shardRoutingList) {
if (ShardRoutingState.UNASSIGNED.equals(shardRouting.state()) == false) {
indexRoutingTableBuilder.addShard(
shardRouting.moveToUnassigned(
new UnassignedInfo(
UnassignedInfo.Reason.INDEX_REOPENED,
"Unassigning shards to update static settings"
)
)
);
} else {
indexRoutingTableBuilder.addShard(shardRouting);
}
}
routingTableBuilder.add(indexRoutingTableBuilder.build());
openIndices.remove(index);
closedIndices.add(index);
}
}
} else {
throw new IllegalArgumentException(
String.format(
Locale.ROOT,
"Can't update non dynamic settings [%s] for open indices %s unless the `reopen` query parameter is set to "
+ "true. Alternatively, close the indices, apply the settings changes, and reopen the indices",
skippedSettings,
openIndices
)
);
}
}
if (IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.exists(openSettings)) {
final int updatedNumberOfReplicas = IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(openSettings);
if (preserveExisting == false) {
// Verify that this won't take us over the cluster shard limit.
shardLimitValidator.validateShardLimitOnReplicaUpdate(
currentState.nodes(),
currentMetadata,
request.indices(),
updatedNumberOfReplicas
);
/*
* We do not update the in-sync allocation IDs as they will be removed upon the first index operation
* which makes these copies stale.
*
* TODO: should we update the in-sync allocation IDs once the data is deleted by the node?
*/
if (routingTableBuilder == null) {
routingTableBuilder = RoutingTable.builder(allocationService.getShardRoutingRoleStrategy(), currentRoutingTable);
}
routingTableBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices);
metadataBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices);
logger.info("updating number_of_replicas to [{}] for indices {}", updatedNumberOfReplicas, actualIndices);
}
}
updateIndexSettings(
openIndices,
metadataBuilder,
(index, indexSettings) -> indexScopedSettings.updateDynamicSettings(
openSettings,
indexSettings,
Settings.builder(),
index.getName()
),
preserveExisting,
indexScopedSettings
);
updateIndexSettings(
closedIndices,
metadataBuilder,
(index, indexSettings) -> indexScopedSettings.updateSettings(
closedSettings,
indexSettings,
Settings.builder(),
index.getName()
),
preserveExisting,
indexScopedSettings
);
if (IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.exists(normalizedSettings)
|| IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.exists(normalizedSettings)) {
for (String index : actualIndices) {
final Settings settings = metadataBuilder.get(index).getSettings();
MetadataCreateIndexService.validateTranslogRetentionSettings(settings);
MetadataCreateIndexService.validateStoreTypeSetting(settings);
}
}
boolean changed = false;
// increment settings versions
for (final String index : actualIndices) {
if (same(currentProject.index(index).getSettings(), metadataBuilder.get(index).getSettings()) == false) {
changed = true;
final IndexMetadata.Builder builder = IndexMetadata.builder(metadataBuilder.get(index));
builder.settingsVersion(1 + builder.settingsVersion());
metadataBuilder.put(builder);
}
}
final Function<String, Boolean> verifiedReadOnly = indexName -> VERIFIED_READ_ONLY_SETTING.get(
currentProject.index(indexName).getSettings()
);
final ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
boolean changedBlocks = false;
for (IndexMetadata.APIBlock block : IndexMetadata.APIBlock.values()) {
changedBlocks |= maybeUpdateClusterBlock(
request.projectId(),
actualIndices,
blocks,
block.block,
block.setting,
openSettings,
metadataBuilder,
verifiedReadOnly
);
}
changed |= changedBlocks;
if (changed == false) {
return currentState;
}
ClusterState updatedState = ClusterState.builder(currentState)
.putProjectMetadata(metadataBuilder)
.putRoutingTable(request.projectId(), routingTableBuilder == null ? currentRoutingTable : routingTableBuilder.build())
.blocks(changedBlocks ? blocks.build() : currentState.blocks())
.build();
try {
final var updatedProject = updatedState.metadata().getProject(request.projectId());
for (Index index : openIndices) {
final IndexMetadata currentIndexMetadata = currentProject.getIndexSafe(index);
final IndexMetadata updatedIndexMetadata = updatedProject.getIndexSafe(index);
indicesService.verifyIndexMetadata(currentIndexMetadata, updatedIndexMetadata);
}
for (Index index : closedIndices) {
final IndexMetadata currentIndexMetadata = currentProject.getIndexSafe(index);
final IndexMetadata updatedIndexMetadata = updatedProject.getIndexSafe(index);
// Verifies that the current index settings can be updated with the updated dynamic settings.
indicesService.verifyIndexMetadata(currentIndexMetadata, updatedIndexMetadata);
// Now check that we can create the index with the updated settings (dynamic and non-dynamic).
// This step is mandatory since we allow to update non-dynamic settings on closed indices.
indicesService.verifyIndexMetadata(updatedIndexMetadata, updatedIndexMetadata);
}
verifyReadOnlyIndices(request.projectId(), readOnlyIndices, updatedState.blocks());
} catch (IOException ex) {
throw ExceptionsHelper.convertToElastic(ex);
}
return updatedState;
}
@Override
public String toString() {
return request.toString();
}
}
public void updateSettings(final UpdateSettingsClusterStateUpdateRequest request, final ActionListener<AcknowledgedResponse> listener) {
taskQueue.submitTask(
"update-settings " + Arrays.toString(request.indices()),
new UpdateSettingsTask(request, listener),
request.masterNodeTimeout()
);
}
public static void updateIndexSettings(
Set<Index> indices,
ProjectMetadata.Builder metadataBuilder,
BiFunction<Index, Settings.Builder, Boolean> settingUpdater,
Boolean preserveExisting,
IndexScopedSettings indexScopedSettings
) {
for (Index index : indices) {
IndexMetadata indexMetadata = metadataBuilder.getSafe(index);
// We validate the settings for removed deprecated settings, since we have the indexMetadata now.
indexScopedSettings.validate(indexMetadata.getSettings(), true, true, true);
Settings.Builder indexSettings = Settings.builder().put(indexMetadata.getSettings());
if (settingUpdater.apply(index, indexSettings)) {
if (preserveExisting) {
indexSettings.put(indexMetadata.getSettings());
}
/*
* The setting index.number_of_replicas is special; we require that this setting has a value
* in the index. When creating the index, we ensure this by explicitly providing a value for
* the setting to the default (one) if there is a not value provided on the source of the
* index creation. A user can update this setting though, including updating it to null,
* indicating that they want to use the default value. In this case, we again have to
* provide an explicit value for the setting to the default (one).
*/
if (IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.exists(indexSettings) == false) {
indexSettings.put(
IndexMetadata.SETTING_NUMBER_OF_REPLICAS,
IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(Settings.EMPTY)
);
}
Settings finalSettings = indexSettings.build();
indexScopedSettings.validate(finalSettings.filter(k -> indexScopedSettings.isPrivateSetting(k) == false), true);
metadataBuilder.put(IndexMetadata.builder(indexMetadata).settings(finalSettings));
}
}
}
/**
* Verifies that read-only compatible indices always have a write block.
*
* @param projectId the project ID
* @param readOnlyIndices the read-only compatible indices
* @param blocks the updated cluster state blocks
*/
private static void verifyReadOnlyIndices(ProjectId projectId, @Nullable Set<Index> readOnlyIndices, ClusterBlocks blocks) {
if (readOnlyIndices != null) {
for (Index readOnlyIndex : readOnlyIndices) {
if (blocks.hasIndexBlockLevel(projectId, readOnlyIndex.getName(), ClusterBlockLevel.WRITE) == false) {
throw new IllegalArgumentException(
String.format(Locale.ROOT, "Can't remove the write block on read-only compatible index %s", readOnlyIndex)
);
}
}
}
}
/**
* Updates the cluster block only iff the setting exists in the given settings
*/
private static boolean maybeUpdateClusterBlock(
ProjectId projectId,
String[] actualIndices,
ClusterBlocks.Builder blocks,
ClusterBlock block,
Setting<Boolean> setting,
Settings openSettings,
ProjectMetadata.Builder metadataBuilder,
Function<String, Boolean> verifiedReadOnlyBeforeBlockChanges
) {
boolean changed = false;
if (setting.exists(openSettings)) {
final boolean updateBlock = setting.get(openSettings);
for (String index : actualIndices) {
if (updateBlock) {
if (blocks.hasIndexBlock(projectId, index, block) == false) {
blocks.addIndexBlock(projectId, index, block);
changed = true;
if (block.contains(ClusterBlockLevel.WRITE)) {
var isVerifiedReadOnly = verifiedReadOnlyBeforeBlockChanges.apply(index);
if (isVerifiedReadOnly) {
var indexMetadata = metadataBuilder.get(index);
metadataBuilder.put(
IndexMetadata.builder(indexMetadata)
.settings(
Settings.builder()
.put(indexMetadata.getSettings())
.put(VERIFIED_READ_ONLY_SETTING.getKey(), true)
)
);
}
}
}
} else {
if (blocks.hasIndexBlock(projectId, index, block)) {
blocks.removeIndexBlock(projectId, index, block);
changed = true;
if (block.contains(ClusterBlockLevel.WRITE)) {
if (blocks.hasIndexBlockLevel(projectId, index, ClusterBlockLevel.WRITE) == false) {
var indexMetadata = metadataBuilder.get(index);
var indexSettings = Settings.builder().put(indexMetadata.getSettings());
indexSettings.remove(VERIFIED_READ_ONLY_SETTING.getKey());
metadataBuilder.put(IndexMetadata.builder(indexMetadata).settings(indexSettings));
}
}
}
}
}
}
return changed;
}
}
| UpdateSettingsTask |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/merge/CustomMapMerge5237Test.java | {
"start": 664,
"end": 744
} | class ____<K, V> extends HashMap<K, V> implements MyMap<K, V> {}
static | MapImpl |
java | apache__kafka | share-coordinator/src/test/java/org/apache/kafka/coordinator/share/PersisterStateBatchCombinerTest.java | {
"start": 1226,
"end": 2779
} | class ____ {
final String testName;
final List<PersisterStateBatch> batchesSoFar;
final List<PersisterStateBatch> newBatches;
final List<PersisterStateBatch> expectedResult;
final long startOffset;
final boolean shouldRun;
BatchTestHolder(
String testName,
List<PersisterStateBatch> batchesSoFar,
List<PersisterStateBatch> newBatches,
List<PersisterStateBatch> expectedResult,
long startOffset
) {
this(testName, batchesSoFar, newBatches, expectedResult, startOffset, false);
}
BatchTestHolder(
String testName,
List<PersisterStateBatch> batchesSoFar,
List<PersisterStateBatch> newBatches,
List<PersisterStateBatch> expectedResult,
long startOffset,
boolean shouldRun
) {
this.testName = testName;
this.batchesSoFar = batchesSoFar;
this.newBatches = newBatches;
this.expectedResult = expectedResult;
this.startOffset = startOffset;
this.shouldRun = shouldRun;
}
static List<PersisterStateBatch> singleBatch(
long firstOffset,
long lastOffset,
int deliveryState,
int deliveryCount
) {
return List.of(
new PersisterStateBatch(firstOffset, lastOffset, (byte) deliveryState, (short) deliveryCount)
);
}
static | BatchTestHolder |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/resequencer/ResequencerBatchOrderTest.java | {
"start": 1213,
"end": 2886
} | class ____ extends ContextTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(ResequencerBatchOrderTest.class);
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("direct:start").resequence(body()).batch().size(2).timeout(50).to("mock:result");
}
};
}
@Test
public void testResequencerBatch() throws Exception {
for (int i = 0; i < 100; i++) {
testIteration(i);
}
}
private void testIteration(int i) throws Exception {
MockEndpoint me = context.getEndpoint("mock:result", MockEndpoint.class);
me.reset();
me.expectedMessageCount(4);
LOG.info("Run #{}", i);
template.sendBody("direct:start", "4");
template.sendBody("direct:start", "1");
template.sendBody("direct:start", "3");
template.sendBody("direct:start", "2");
assertMockEndpointsSatisfied();
// because the order can change a bit depending when the resequencer
// trigger cut-off
// then the order can be a bit different
String a = me.getExchanges().get(0).getIn().getBody(String.class);
String b = me.getExchanges().get(1).getIn().getBody(String.class);
String c = me.getExchanges().get(2).getIn().getBody(String.class);
String d = me.getExchanges().get(3).getIn().getBody(String.class);
String line = a + b + c + d;
LOG.info("Order: {}", line);
assertTrue("1423".equals(line) || "1234".equals(line), "Line was " + line);
}
}
| ResequencerBatchOrderTest |
java | apache__camel | components/camel-as2/camel-as2-api/src/main/java/org/apache/camel/component/as2/api/entity/ApplicationEDIX12Entity.java | {
"start": 993,
"end": 1469
} | class ____ extends ApplicationEntity {
public ApplicationEDIX12Entity(byte[] content, String charset, String contentTransferEncoding,
boolean isMainBody, String filename) {
super(content, ContentType.create(AS2MediaType.APPLICATION_EDI_X12, charset), contentTransferEncoding, isMainBody,
filename);
}
@Override
public void close() throws IOException {
// do nothing
}
}
| ApplicationEDIX12Entity |
java | elastic__elasticsearch | distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerProcessTests.java | {
"start": 2444,
"end": 3030
} | class ____ extends ESTestCase {
private static final ExecutorService mockJvmProcessExecutor = Executors.newSingleThreadExecutor();
final MockTerminal terminal = MockTerminal.create();
protected final Map<String, String> sysprops = new HashMap<>();
protected final Map<String, String> envVars = new HashMap<>();
Path esHomeDir;
Path logsDir;
Settings.Builder nodeSettings;
ProcessValidator processValidator;
MainMethod mainCallback;
Runnable forceStopCallback;
MockElasticsearchProcess process;
SecureSettings secrets;
| ServerProcessTests |
java | google__dagger | hilt-android/main/java/dagger/hilt/android/lifecycle/RetainedLifecycle.java | {
"start": 833,
"end": 1456
} | interface ____ {
/**
* Adds a new {@link OnClearedListener} for receiving a callback when the lifecycle is cleared.
*
* @param listener The listener that should be added.
*/
@MainThread
void addOnClearedListener(@NonNull OnClearedListener listener);
/**
* Removes a {@link OnClearedListener} previously added via {@link
* #addOnClearedListener(OnClearedListener)}.
*
* @param listener The listener that should be removed.
*/
@MainThread
void removeOnClearedListener(@NonNull OnClearedListener listener);
/** Listener for when the retained lifecycle is cleared. */
| RetainedLifecycle |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/vector/MagnitudeTests.java | {
"start": 1119,
"end": 2868
} | class ____ extends AbstractVectorTestCase {
public MagnitudeTests(@Name("TestCase") Supplier<TestCaseSupplier.TestCase> testCaseSupplier) {
this.testCase = testCaseSupplier.get();
}
@ParametersFactory
public static Iterable<Object[]> parameters() {
return scalarParameters(Magnitude.class.getSimpleName(), Magnitude.SCALAR_FUNCTION);
}
protected EsqlCapabilities.Cap capability() {
return EsqlCapabilities.Cap.MAGNITUDE_SCALAR_VECTOR_FUNCTION;
}
@Override
protected Expression build(Source source, List<Expression> args) {
return new Magnitude(source, args.get(0));
}
@Before
public void checkCapability() {
assumeTrue("Scalar function is not enabled", capability().isEnabled());
}
protected static Iterable<Object[]> scalarParameters(String className, Magnitude.ScalarEvaluatorFunction scalarFunction) {
final String evaluatorName = className + "Evaluator" + "[child=Attribute[channel=0]]";
List<TestCaseSupplier> suppliers = new ArrayList<>();
// Basic test with a dense vector.
suppliers.add(new TestCaseSupplier(List.of(DENSE_VECTOR), () -> {
int dimensions = between(64, 128);
List<Float> input = randomDenseVector(dimensions);
float[] array = listToFloatArray(input);
double expected = scalarFunction.calculateScalar(array);
return new TestCaseSupplier.TestCase(
List.of(new TestCaseSupplier.TypedData(input, DENSE_VECTOR, "vector")),
evaluatorName,
DOUBLE,
equalTo(expected)
);
}));
return parameterSuppliersFromTypedData(suppliers);
}
}
| MagnitudeTests |
java | apache__dubbo | dubbo-remoting/dubbo-remoting-api/src/main/java/org/apache/dubbo/remoting/exchange/support/ReplierDispatcher.java | {
"start": 1098,
"end": 2657
} | class ____ implements Replier<Object> {
private final Replier<?> defaultReplier;
private final Map<Class<?>, Replier<?>> repliers = new ConcurrentHashMap<>();
public ReplierDispatcher() {
this(null, null);
}
public ReplierDispatcher(Replier<?> defaultReplier) {
this(defaultReplier, null);
}
public ReplierDispatcher(Replier<?> defaultReplier, Map<Class<?>, Replier<?>> repliers) {
this.defaultReplier = defaultReplier;
if (CollectionUtils.isNotEmptyMap(repliers)) {
this.repliers.putAll(repliers);
}
}
public <T> ReplierDispatcher addReplier(Class<T> type, Replier<T> replier) {
repliers.put(type, replier);
return this;
}
public <T> ReplierDispatcher removeReplier(Class<T> type) {
repliers.remove(type);
return this;
}
private Replier<?> getReplier(Class<?> type) {
for (Map.Entry<Class<?>, Replier<?>> entry : repliers.entrySet()) {
if (entry.getKey().isAssignableFrom(type)) {
return entry.getValue();
}
}
if (defaultReplier != null) {
return defaultReplier;
}
throw new IllegalStateException("Replier not found, Unsupported message object: " + type);
}
@Override
@SuppressWarnings({"unchecked", "rawtypes"})
public Object reply(ExchangeChannel channel, Object request) throws RemotingException {
return ((Replier) getReplier(request.getClass())).reply(channel, request);
}
}
| ReplierDispatcher |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/http/codec/json/Jackson2CodecSupport.java | {
"start": 2223,
"end": 3251
} | class ____ {
/**
* The key for the hint to specify a "JSON View" for encoding or decoding
* with the value expected to be a {@link Class}.
* @see <a href="https://www.baeldung.com/jackson-json-view-annotation">Jackson JSON Views</a>
*/
public static final String JSON_VIEW_HINT = Jackson2CodecSupport.class.getName() + ".jsonView";
/**
* The key for the hint to access the actual ResolvableType passed into
* {@link org.springframework.http.codec.HttpMessageReader#read(ResolvableType, ResolvableType, ServerHttpRequest, ServerHttpResponse, Map)}
* (server-side only). Currently set when the method argument has generics because
* in case of reactive types, use of {@code ResolvableType.getGeneric()} means no
* MethodParameter source and no knowledge of the containing class.
*/
static final String ACTUAL_TYPE_HINT = Jackson2CodecSupport.class.getName() + ".actualType";
private static final String JSON_VIEW_HINT_ERROR =
"@JsonView only supported for write hints with exactly 1 | Jackson2CodecSupport |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_2674/Issue2674Test.java | {
"start": 748,
"end": 1432
} | class ____ {
@ProcessorTest
@ExpectedCompilationOutcome(
value = CompilationResult.FAILED,
diagnostics = {
@Diagnostic(type = ErroneousSourceTargetMapping.class,
kind = Kind.ERROR,
line = 20,
message = "@BeforeMapping can only be applied to an implemented method."),
@Diagnostic(type = ErroneousSourceTargetMapping.class,
kind = Kind.ERROR,
line = 23,
message = "@AfterMapping can only be applied to an implemented method.")
}
)
public void shouldRaiseErrorIfThereIsNoAfterOrBeforeMethodImplementation() {
}
}
| Issue2674Test |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/ControlBusEndpointBuilderFactory.java | {
"start": 6890,
"end": 9396
} | interface ____
extends
EndpointProducerBuilder {
default ControlBusEndpointBuilder basic() {
return (ControlBusEndpointBuilder) this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedControlBusEndpointBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedControlBusEndpointBuilder lazyStartProducer(String lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
}
public | AdvancedControlBusEndpointBuilder |
java | apache__flink | flink-table/flink-sql-gateway/src/main/java/org/apache/flink/table/gateway/rest/header/session/TriggerSessionHeartbeatHeaders.java | {
"start": 1447,
"end": 2920
} | class ____
implements SqlGatewayMessageHeaders<
EmptyRequestBody, EmptyResponseBody, SessionMessageParameters> {
private static final TriggerSessionHeartbeatHeaders INSTANCE =
new TriggerSessionHeartbeatHeaders();
private static final String URL =
"/sessions/:" + SessionHandleIdPathParameter.KEY + "/heartbeat";
@Override
public Class<EmptyResponseBody> getResponseClass() {
return EmptyResponseBody.class;
}
@Override
public HttpResponseStatus getResponseStatusCode() {
return HttpResponseStatus.OK;
}
@Override
public String getDescription() {
return "Trigger heartbeat to tell the server that the client is active, and to keep the session alive as long as configured timeout value.";
}
@Override
public Class<EmptyRequestBody> getRequestClass() {
return EmptyRequestBody.class;
}
@Override
public SessionMessageParameters getUnresolvedMessageParameters() {
return new SessionMessageParameters();
}
@Override
public HttpMethodWrapper getHttpMethod() {
return HttpMethodWrapper.POST;
}
@Override
public String getTargetRestEndpointURL() {
return URL;
}
public static TriggerSessionHeartbeatHeaders getInstance() {
return INSTANCE;
}
@Override
public String operationId() {
return "triggerSession";
}
}
| TriggerSessionHeartbeatHeaders |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/resource/basic/ContextAndInstanceTest.java | {
"start": 4035,
"end": 4156
} | class ____ extends SummaryGenerator {
@Context
HttpHeaders headers;
}
public | GermanSummaryGenerator |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/NatsComponentBuilderFactory.java | {
"start": 7523,
"end": 8906
} | class ____
extends AbstractComponentBuilder<NatsComponent>
implements NatsComponentBuilder {
@Override
protected NatsComponent buildConcreteComponent() {
return new NatsComponent();
}
@Override
protected boolean setPropertyOnComponent(
Component component,
String name,
Object value) {
switch (name) {
case "servers": ((NatsComponent) component).setServers((java.lang.String) value); return true;
case "verbose": ((NatsComponent) component).setVerbose((boolean) value); return true;
case "bridgeErrorHandler": ((NatsComponent) component).setBridgeErrorHandler((boolean) value); return true;
case "lazyStartProducer": ((NatsComponent) component).setLazyStartProducer((boolean) value); return true;
case "autowiredEnabled": ((NatsComponent) component).setAutowiredEnabled((boolean) value); return true;
case "headerFilterStrategy": ((NatsComponent) component).setHeaderFilterStrategy((org.apache.camel.spi.HeaderFilterStrategy) value); return true;
case "useGlobalSslContextParameters": ((NatsComponent) component).setUseGlobalSslContextParameters((boolean) value); return true;
default: return false;
}
}
}
} | NatsComponentBuilderImpl |
java | micronaut-projects__micronaut-core | http-client/src/main/java/io/micronaut/http/client/netty/StreamWriter.java | {
"start": 1308,
"end": 1487
} | class ____ used to write from a {@link StreamingNettyByteBody} to a channel with appropriate
* backpressure control.
*
* @author Jonas Konrad
* @since 4.7.0
*/
@Internal
final | is |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/contextual/ContextualSerializationTest.java | {
"start": 5342,
"end": 11541
} | class ____
extends ValueSerializer<String>
{
protected String desc;
public AccumulatingContextual() { this(""); }
public AccumulatingContextual(String newDesc) {
desc = newDesc;
}
@Override
public void serialize(String value, JsonGenerator g, SerializationContext provider)
{
g.writeString(desc+"/"+value);
}
@Override
public ValueSerializer<?> createContextual(SerializationContext prov, BeanProperty property)
{
if (property == null) {
return new AccumulatingContextual(desc+"/ROOT");
}
return new AccumulatingContextual(desc+"/"+property.getName());
}
}
/*
/**********************************************************
/* Unit tests
/**********************************************************
*/
// Test to verify that contextual serializer can make use of property
// (method, field) annotations.
@Test
public void testMethodAnnotations() throws Exception
{
SimpleModule module = new SimpleModule("test", Version.unknownVersion());
module.addSerializer(String.class, new AnnotatedContextualSerializer());
ObjectMapper mapper = jsonMapperBuilder()
.addModule(module)
.build();
assertEquals("{\"value\":\"see:foobar\"}", mapper.writeValueAsString(new ContextualBean("foobar")));
}
// Test to verify that contextual serializer can also use annotations
// for enclosing class.
@Test
public void testClassAnnotations() throws Exception
{
SimpleModule module = new SimpleModule("test", Version.unknownVersion());
module.addSerializer(String.class, new AnnotatedContextualSerializer());
ObjectMapper mapper = jsonMapperBuilder()
.addModule(module)
.build();
assertEquals("{\"value\":\"Voila->xyz\"}", mapper.writeValueAsString(new BeanWithClassConfig("xyz")));
}
@Test
public void testWrappedBean() throws Exception
{
SimpleModule module = new SimpleModule("test", Version.unknownVersion());
module.addSerializer(String.class, new AnnotatedContextualSerializer());
ObjectMapper mapper = jsonMapperBuilder()
.addModule(module)
.build();
assertEquals("{\"wrapped\":{\"value\":\"see:xyz\"}}", mapper.writeValueAsString(new ContextualBeanWrapper("xyz")));
}
// Serializer should get passed property context even if contained in an array.
@Test
public void testMethodAnnotationInArray() throws Exception
{
SimpleModule module = new SimpleModule("test", Version.unknownVersion());
module.addSerializer(String.class, new AnnotatedContextualSerializer());
ObjectMapper mapper = jsonMapperBuilder()
.addModule(module)
.build();
ContextualArrayBean beans = new ContextualArrayBean("123");
assertEquals("{\"beans\":[\"array->123\"]}", mapper.writeValueAsString(beans));
}
// Serializer should get passed property context even if contained in a Collection.
@Test
public void testMethodAnnotationInList() throws Exception
{
SimpleModule module = new SimpleModule("test", Version.unknownVersion());
module.addSerializer(String.class, new AnnotatedContextualSerializer());
ObjectMapper mapper = jsonMapperBuilder()
.addModule(module)
.build();
ContextualListBean beans = new ContextualListBean("abc");
assertEquals("{\"beans\":[\"list->abc\"]}", mapper.writeValueAsString(beans));
}
// Serializer should get passed property context even if contained in a Collection.
@Test
public void testMethodAnnotationInMap() throws Exception
{
SimpleModule module = new SimpleModule("test", Version.unknownVersion());
module.addSerializer(String.class, new AnnotatedContextualSerializer());
ObjectMapper mapper = jsonMapperBuilder()
.addModule(module)
.build();
ContextualMapBean map = new ContextualMapBean();
map.beans.put("first", "In Map");
assertEquals("{\"beans\":{\"first\":\"map->In Map\"}}", mapper.writeValueAsString(map));
}
@Test
public void testContextualViaAnnotation() throws Exception
{
ObjectMapper mapper = newJsonMapper();
AnnotatedContextualBean bean = new AnnotatedContextualBean("abc");
assertEquals("{\"value\":\"prefix->abc\"}", mapper.writeValueAsString(bean));
}
@Test
public void testResolveOnContextual() throws Exception
{
SimpleModule module = new SimpleModule("test", Version.unknownVersion());
module.addSerializer(String.class, new ContextualAndResolvable());
ObjectMapper mapper = jsonMapperBuilder()
.addModule(module)
.build();
assertEquals(q("contextual=1,resolved=1"), mapper.writeValueAsString("abc"));
// also: should NOT be called again
assertEquals(q("contextual=1,resolved=1"), mapper.writeValueAsString("foo"));
}
@Test
public void testContextualArrayElement() throws Exception
{
ObjectMapper mapper = newJsonMapper();
ContextualArrayElementBean beans = new ContextualArrayElementBean("456");
assertEquals("{\"beans\":[\"elem->456\"]}", mapper.writeValueAsString(beans));
}
// Test to verify aspects of [databind#2429]
@Test
public void testRootContextualization2429() throws Exception
{
ObjectMapper mapper = jsonMapperBuilder()
.disable(JsonWriteFeature.ESCAPE_FORWARD_SLASHES)
.addModule(new SimpleModule("test", Version.unknownVersion())
.addSerializer(String.class, new AccumulatingContextual()))
.build();
assertEquals(q("/ROOT/foo"), mapper.writeValueAsString("foo"));
assertEquals(q("/ROOT/bar"), mapper.writeValueAsString("bar"));
assertEquals(q("/ROOT/3"), mapper.writeValueAsString("3"));
}
}
| AccumulatingContextual |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/sink/abilities/SupportsRowLevelDelete.java | {
"start": 2182,
"end": 2771
} | interface ____ {
/**
* Applies row-level delete with {@link RowLevelModificationScanContext}, and return a {@link
* RowLevelDeleteInfo}.
*
* @param context the context passed by table source which implement {@link
* SupportsRowLevelModificationScan}. It'll be null if the table source doesn't implement
* it.
*/
RowLevelDeleteInfo applyRowLevelDelete(@Nullable RowLevelModificationScanContext context);
/** The information that guides the planner on how to rewrite the delete statement. */
@PublicEvolving
| SupportsRowLevelDelete |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.java | {
"start": 109176,
"end": 111320
} | class ____ extends ParserRuleContext {
public StringContext string() {
return getRuleContext(StringContext.class,0);
}
public ParameterContext parameter() {
return getRuleContext(ParameterContext.class,0);
}
@SuppressWarnings("this-escape")
public StringOrParameterContext(ParserRuleContext parent, int invokingState) {
super(parent, invokingState);
}
@Override public int getRuleIndex() { return RULE_stringOrParameter; }
@Override
public void enterRule(ParseTreeListener listener) {
if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).enterStringOrParameter(this);
}
@Override
public void exitRule(ParseTreeListener listener) {
if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).exitStringOrParameter(this);
}
@Override
public <T> T accept(ParseTreeVisitor<? extends T> visitor) {
if ( visitor instanceof EsqlBaseParserVisitor ) return ((EsqlBaseParserVisitor<? extends T>)visitor).visitStringOrParameter(this);
else return visitor.visitChildren(this);
}
}
public final StringOrParameterContext stringOrParameter() throws RecognitionException {
StringOrParameterContext _localctx = new StringOrParameterContext(_ctx, getState());
enterRule(_localctx, 74, RULE_stringOrParameter);
try {
setState(450);
_errHandler.sync(this);
switch (_input.LA(1)) {
case QUOTED_STRING:
enterOuterAlt(_localctx, 1);
{
setState(448);
string();
}
break;
case PARAM:
case NAMED_OR_POSITIONAL_PARAM:
enterOuterAlt(_localctx, 2);
{
setState(449);
parameter();
}
break;
default:
throw new NoViableAltException(this);
}
}
catch (RecognitionException re) {
_localctx.exception = re;
_errHandler.reportError(this, re);
_errHandler.recover(this, re);
}
finally {
exitRule();
}
return _localctx;
}
@SuppressWarnings("CheckReturnValue")
public static | StringOrParameterContext |
java | quarkusio__quarkus | extensions/spring-web/resteasy-reactive/tests/src/test/java/io/quarkus/spring/web/requestparam/RequestParamController.java | {
"start": 3079,
"end": 3338
} | class ____ {
@ExceptionHandler(WebApplicationException.class)
public ResponseEntity<Object> handleException(Exception ex) {
return new ResponseEntity<>(ex.getMessage(), HttpStatus.BAD_REQUEST);
}
}
}
| RestExceptionHandler |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.