language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | spring-projects__spring-boot | buildpack/spring-boot-buildpack-platform/src/main/java/org/springframework/boot/buildpack/platform/docker/ExportedImageTar.java | {
"start": 10074,
"end": 10641
} | class ____ extends LayerArchiveFactory {
private final Set<String> layers;
ManifestLayerArchiveFactory(ImageArchiveManifest manifest) {
this.layers = manifest.getEntries()
.stream()
.flatMap((entry) -> entry.getLayers().stream())
.collect(Collectors.toUnmodifiableSet());
}
@Override
@Nullable TarArchive getLayerArchive(TarArchiveInputStream tar, TarArchiveEntry entry) {
if (!this.layers.contains(entry.getName())) {
return null;
}
return TarArchive.fromInputStream(tar, Compression.NONE);
}
}
}
| ManifestLayerArchiveFactory |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamIterationHead.java | {
"start": 1738,
"end": 5361
} | class ____<OUT> extends OneInputStreamTask<OUT, OUT> {
private static final Logger LOG = LoggerFactory.getLogger(StreamIterationHead.class);
private RecordWriterOutput<OUT>[] streamOutputs;
private final BlockingQueue<StreamRecord<OUT>> dataChannel;
private final String brokerID;
private final long iterationWaitTime;
private final boolean shouldWait;
public StreamIterationHead(Environment env) throws Exception {
super(env);
final String iterationId = getConfiguration().getIterationId();
if (iterationId == null || iterationId.length() == 0) {
throw new FlinkRuntimeException("Missing iteration ID in the task configuration");
}
this.dataChannel = new ArrayBlockingQueue<>(1);
this.brokerID =
createBrokerIdString(
getEnvironment().getJobID(),
iterationId,
getEnvironment().getTaskInfo().getIndexOfThisSubtask());
this.iterationWaitTime = getConfiguration().getIterationWaitTime();
this.shouldWait = iterationWaitTime > 0;
}
// ------------------------------------------------------------------------
@Override
protected void processInput(MailboxDefaultAction.Controller controller) throws Exception {
StreamRecord<OUT> nextRecord =
shouldWait
? dataChannel.poll(iterationWaitTime, TimeUnit.MILLISECONDS)
: dataChannel.take();
if (nextRecord != null) {
for (RecordWriterOutput<OUT> output : streamOutputs) {
output.collect(nextRecord);
}
} else {
controller.suspendDefaultAction();
mailboxProcessor.suspend();
}
}
// ------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public void init() {
// offer the queue for the tail
BlockingQueueBroker.INSTANCE.handIn(brokerID, dataChannel);
LOG.info("Iteration head {} added feedback queue under {}", getName(), brokerID);
this.streamOutputs = (RecordWriterOutput<OUT>[]) getStreamOutputs();
// If timestamps are enabled we make sure to remove cyclic watermark dependencies
if (isSerializingTimestamps()) {
for (RecordWriterOutput<OUT> output : streamOutputs) {
output.emitWatermark(new Watermark(Long.MAX_VALUE));
}
}
}
@Override
protected void cleanUpInternal() {
// make sure that we remove the queue from the broker, to prevent a resource leak
BlockingQueueBroker.INSTANCE.remove(brokerID);
LOG.info("Iteration head {} removed feedback queue under {}", getName(), brokerID);
}
// ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
/**
* Creates the identification string with which head and tail task find the shared blocking
* queue for the back channel. The identification string is unique per parallel head/tail pair
* per iteration per job.
*
* @param jid The job ID.
* @param iterationID The id of the iteration in the job.
* @param subtaskIndex The parallel subtask number
* @return The identification string.
*/
public static String createBrokerIdString(JobID jid, String iterationID, int subtaskIndex) {
return jid + "-" + iterationID + "-" + subtaskIndex;
}
}
| StreamIterationHead |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/DelegationTokenIOException.java | {
"start": 1057,
"end": 1229
} | class ____ extends IOException {
private static final long serialVersionUID = 599813827985340023L;
/** Error: delegation token/token identifier | DelegationTokenIOException |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/AbstractVerticle.java | {
"start": 603,
"end": 708
} | class ____ not deprecated, however we encourage instead to use {@link VerticleBase}
*
* An abstract base | is |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/dynamic/support/ReflectionUtils.java | {
"start": 13013,
"end": 13082
} | interface ____ on each field in the hierarchy.
*/
public | invoked |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SearchHighlightContext.java | {
"start": 1821,
"end": 4678
} | class ____ {
// Field options that default to null or -1 are often set to their real default in HighlighterParseElement#parse
private int fragmentCharSize = -1;
private int numberOfFragments = -1;
private int fragmentOffset = -1;
private String encoder;
private String[] preTags;
private String[] postTags;
private Boolean scoreOrdered;
private Boolean highlightFilter;
private Boolean requireFieldMatch;
private Integer maxAnalyzedOffset;
private String highlighterType;
private String fragmenter;
private BoundaryScannerType boundaryScannerType;
private int boundaryMaxScan = -1;
private char[] boundaryChars = null;
private Locale boundaryScannerLocale;
private Query highlightQuery;
private int noMatchSize = -1;
private Set<String> matchedFields;
private Map<String, Object> options;
private int phraseLimit = -1;
public int fragmentCharSize() {
return fragmentCharSize;
}
public int numberOfFragments() {
return numberOfFragments;
}
public int fragmentOffset() {
return fragmentOffset;
}
public String encoder() {
return encoder;
}
public String[] preTags() {
return preTags;
}
public String[] postTags() {
return postTags;
}
public Boolean scoreOrdered() {
return scoreOrdered;
}
public Boolean highlightFilter() {
return highlightFilter;
}
public Boolean requireFieldMatch() {
return requireFieldMatch;
}
public Integer maxAnalyzedOffset() {
return maxAnalyzedOffset;
}
public String highlighterType() {
return highlighterType;
}
public String fragmenter() {
return fragmenter;
}
public BoundaryScannerType boundaryScannerType() {
return boundaryScannerType;
}
public int boundaryMaxScan() {
return boundaryMaxScan;
}
public char[] boundaryChars() {
return boundaryChars;
}
public Locale boundaryScannerLocale() {
return boundaryScannerLocale;
}
public Query highlightQuery() {
return highlightQuery;
}
public int noMatchSize() {
return noMatchSize;
}
public int phraseLimit() {
return phraseLimit;
}
public Set<String> matchedFields() {
return matchedFields;
}
public Map<String, Object> options() {
return options;
}
static | FieldOptions |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java | {
"start": 83661,
"end": 85608
} | class ____ implements RecoveryTargetHandler {
@Override
public void prepareForTranslogOperations(int totalTranslogOps, ActionListener<Void> listener) {}
@Override
public void finalizeRecovery(long globalCheckpoint, long trimAboveSeqNo, ActionListener<Void> listener) {}
@Override
public void handoffPrimaryContext(ReplicationTracker.PrimaryContext primaryContext, ActionListener<Void> listener) {}
@Override
public void indexTranslogOperations(
final List<Translog.Operation> operations,
final int totalTranslogOps,
final long timestamp,
final long msu,
final RetentionLeases retentionLeases,
final long mappingVersion,
final ActionListener<Long> listener
) {}
@Override
public void receiveFileInfo(
List<String> phase1FileNames,
List<Long> phase1FileSizes,
List<String> phase1ExistingFileNames,
List<Long> phase1ExistingFileSizes,
int totalTranslogOps,
ActionListener<Void> listener
) {
}
@Override
public void cleanFiles(
int totalTranslogOps,
long globalCheckpoint,
Store.MetadataSnapshot sourceMetadata,
ActionListener<Void> listener
) {}
@Override
public void restoreFileFromSnapshot(
String repository,
IndexId indexId,
BlobStoreIndexShardSnapshot.FileInfo snapshotFile,
ActionListener<Void> listener
) {}
@Override
public void writeFileChunk(
StoreFileMetadata fileMetadata,
long position,
ReleasableBytesReference content,
boolean lastChunk,
int totalTranslogOps,
ActionListener<Void> listener
) {}
}
| TestRecoveryTargetHandler |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/bean/BeanHandlerMethodTest.java | {
"start": 4458,
"end": 4578
} | interface ____ {
@Handler
String hello(@Body String hi);
}
public abstract static | MyBaseInterface |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/node/NumberNodes1770Test.java | {
"start": 547,
"end": 3452
} | class ____ extends DatabindTestUtil
{
// For to [databind#1770] (broken due to fix for #1028): `JsonNodeDeserializer`
// would coerce ok but does `parser.isNaN()` check which ends up parsing
// as Double, gets `POSITIVE_INFINITY` and returns `true`: this results in
// `DoubleNode` being used even tho `BigDecimal` could fit the number.
@Test
public void testBigDecimalCoercion() throws Exception
{
final String value = "7976931348623157e309";
final JsonNode jsonNode = newJsonMapper().reader()
.with(DeserializationFeature.USE_BIG_DECIMAL_FOR_FLOATS)
.readTree(value);
assertTrue(jsonNode.isBigDecimal(), "Expected DecimalNode, got: "+jsonNode.getClass().getName()+": "+jsonNode);
assertEquals(new BigDecimal(value), jsonNode.decimalValue());
}
@Test
public void testBigDecimalCoercionInf() throws Exception
{
final String value = "+INF";
JsonFactory factory = JsonFactory.builder()
.enable(JsonReadFeature.ALLOW_NON_NUMERIC_NUMBERS)
.build();
final JsonNode jsonNode = new JsonMapper(factory).reader()
.with(DeserializationFeature.USE_BIG_DECIMAL_FOR_FLOATS)
.readTree(value);
assertTrue(jsonNode.isDouble(), "Expected DoubleNode, got: "+jsonNode.getClass().getName()+": "+jsonNode);
assertEquals(Double.POSITIVE_INFINITY, jsonNode.doubleValue());
}
// [databind#4194]: should be able to, by configuration, fail coercing NaN to BigDecimal
@Test
public void testBigDecimalCoercionNaN() throws Exception
{
JsonNode n = _tryBigDecimalCoercionNaNWithOption(false);
if (!n.isDouble()) {
fail("Expected DoubleNode, got: "+n.getClass().getName());
}
assertEquals(Double.NaN, n.doubleValue());
try {
n = _tryBigDecimalCoercionNaNWithOption(true);
fail("Should not pass without allowing coercion: produced JsonNode of type "
+n.getClass().getName());
} catch (InvalidFormatException e) {
verifyException(e, "Cannot convert NaN");
}
}
private JsonNode _tryBigDecimalCoercionNaNWithOption(boolean isEnabled) throws Exception
{
JsonFactory factory = JsonFactory.builder()
.enable(JsonReadFeature.ALLOW_NON_NUMERIC_NUMBERS)
.build();
final ObjectReader reader = new JsonMapper(factory)
.reader()
.with(DeserializationFeature.USE_BIG_DECIMAL_FOR_FLOATS);
final String value = "NaN";
// depending on option
return isEnabled
? reader.with(JsonNodeFeature.FAIL_ON_NAN_TO_BIG_DECIMAL_COERCION).readTree(value)
: reader.without(JsonNodeFeature.FAIL_ON_NAN_TO_BIG_DECIMAL_COERCION).readTree(value);
}
}
| NumberNodes1770Test |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/single/SingleStartWithTest.java | {
"start": 784,
"end": 3075
} | class ____ {
@Test
public void justCompletableComplete() {
Single.just(1)
.startWith(Completable.complete())
.test()
.assertResult(1);
}
@Test
public void justCompletableError() {
Single.just(1)
.startWith(Completable.error(new TestException()))
.test()
.assertFailure(TestException.class);
}
@Test
public void justSingleJust() {
Single.just(1)
.startWith(Single.just(0))
.test()
.assertResult(0, 1);
}
@Test
public void justSingleError() {
Single.just(1)
.startWith(Single.error(new TestException()))
.test()
.assertFailure(TestException.class);
}
@Test
public void justMaybeJust() {
Single.just(1)
.startWith(Maybe.just(0))
.test()
.assertResult(0, 1);
}
@Test
public void justMaybeEmpty() {
Single.just(1)
.startWith(Maybe.empty())
.test()
.assertResult(1);
}
@Test
public void justMaybeError() {
Single.just(1)
.startWith(Maybe.error(new TestException()))
.test()
.assertFailure(TestException.class);
}
@Test
public void justObservableJust() {
Single.just(1)
.startWith(Observable.just(-1, 0))
.test()
.assertResult(-1, 0, 1);
}
@Test
public void justObservableEmpty() {
Single.just(1)
.startWith(Observable.empty())
.test()
.assertResult(1);
}
@Test
public void justObservableError() {
Single.just(1)
.startWith(Observable.error(new TestException()))
.test()
.assertFailure(TestException.class);
}
@Test
public void justFlowableJust() {
Single.just(1)
.startWith(Flowable.just(-1, 0))
.test()
.assertResult(-1, 0, 1);
}
@Test
public void justFlowableEmpty() {
Single.just(1)
.startWith(Observable.empty())
.test()
.assertResult(1);
}
@Test
public void justFlowableError() {
Single.just(1)
.startWith(Flowable.error(new TestException()))
.test()
.assertFailure(TestException.class);
}
}
| SingleStartWithTest |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java | {
"start": 52216,
"end": 55166
} | enum ____ {
// currently we support only one auth per method, but eventually a
// subtype is needed to differentiate, ex. if digest is token or ldap
SIMPLE(AuthMethod.SIMPLE,
HadoopConfiguration.SIMPLE_CONFIG_NAME),
KERBEROS(AuthMethod.KERBEROS,
HadoopConfiguration.KERBEROS_CONFIG_NAME),
TOKEN(AuthMethod.TOKEN),
CERTIFICATE(null),
KERBEROS_SSL(null),
PROXY(null);
private final AuthMethod authMethod;
private final String loginAppName;
private AuthenticationMethod(AuthMethod authMethod) {
this(authMethod, null);
}
private AuthenticationMethod(AuthMethod authMethod, String loginAppName) {
this.authMethod = authMethod;
this.loginAppName = loginAppName;
}
public AuthMethod getAuthMethod() {
return authMethod;
}
String getLoginAppName() {
if (loginAppName == null) {
throw new UnsupportedOperationException(
this + " login authentication is not supported");
}
return loginAppName;
}
public static AuthenticationMethod valueOf(AuthMethod authMethod) {
for (AuthenticationMethod value : values()) {
if (value.getAuthMethod() == authMethod) {
return value;
}
}
throw new IllegalArgumentException(
"no authentication method for " + authMethod);
}
};
/**
* Create a proxy user using username of the effective user and the ugi of the
* real user.
* @param user user.
* @param realUser realUser.
* @return proxyUser ugi
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation createProxyUser(String user,
UserGroupInformation realUser) {
if (user == null || user.isEmpty()) {
throw new IllegalArgumentException("Null user");
}
if (realUser == null) {
throw new IllegalArgumentException("Null real user");
}
Subject subject = new Subject();
Set<Principal> principals = subject.getPrincipals();
principals.add(new User(user, AuthenticationMethod.PROXY, null));
principals.add(new RealUser(realUser));
return new UserGroupInformation(subject);
}
/**
* get RealUser (vs. EffectiveUser)
* @return realUser running over proxy user
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public UserGroupInformation getRealUser() {
for (RealUser p: subject.getPrincipals(RealUser.class)) {
return p.getRealUser();
}
return null;
}
/**
* If this is a proxy user, get the real user. Otherwise, return
* this user.
* @param user the user to check
* @return the real user or self
*/
public static UserGroupInformation getRealUserOrSelf(UserGroupInformation user) {
if (user == null) {
return null;
}
UserGroupInformation real = user.getRealUser();
return real != null ? real : user;
}
/**
* This | AuthenticationMethod |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/DefaultCharsetTest.java | {
"start": 16876,
"end": 17304
} | class ____ {
void f(File file) throws Exception {
var fileWriter = new FileWriter(file);
}
}
""")
.addOutputLines(
"out/Test.java",
"""
import static java.nio.charset.StandardCharsets.UTF_8;
import java.io.File;
import java.io.FileWriter;
import java.nio.file.Files;
| Test |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/config/plugins/util/PluginBuilder.java | {
"start": 2275,
"end": 2408
} | class ____ instantiate and configure a Plugin object using a PluginFactory method or PluginBuilderFactory
* builder class.
*/
public | to |
java | hibernate__hibernate-orm | tooling/metamodel-generator/src/test/java/org/hibernate/processor/test/embeddable/nested/field/NestedEmbeddableTest.java | {
"start": 415,
"end": 805
} | class ____ {
@Test
@WithClasses({ Author.class, Address.class, Postcode.class })
void testCorrectAccessTypeUsedForEmbeddable() {
assertAttributeTypeInMetaModelFor(
Address.class,
"city",
String.class,
"city should be String"
);
assertAttributeTypeInMetaModelFor(
Postcode.class,
"zip",
String.class,
"zip should be String"
);
}
}
| NestedEmbeddableTest |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/support/spring/DruidNativeJdbcExtractor.java | {
"start": 766,
"end": 1621
} | class ____ extends NativeJdbcExtractorAdapter {
protected Connection doGetNativeConnection(Connection con) throws SQLException {
return (Connection) con.unwrap(Connection.class);
}
public Statement getNativeStatement(Statement stmt) throws SQLException {
return (Statement) stmt.unwrap(Statement.class);
}
public PreparedStatement getNativePreparedStatement(PreparedStatement ps) throws SQLException {
return (PreparedStatement) ps.unwrap(PreparedStatement.class);
}
public CallableStatement getNativeCallableStatement(CallableStatement cs) throws SQLException {
return (CallableStatement) cs.unwrap(CallableStatement.class);
}
public ResultSet getNativeResultSet(ResultSet rs) throws SQLException {
return (ResultSet) rs.unwrap(ResultSet.class);
}
}
| DruidNativeJdbcExtractor |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/offsettime/OffsetTimeAssert_isAfter_Test.java | {
"start": 1138,
"end": 3726
} | class ____ extends OffsetTimeAssertBaseTest {
@Test
void test_isAfter_assertion() {
// WHEN
assertThat(AFTER).isAfter(REFERENCE);
assertThat(AFTER).isAfter(REFERENCE.toString());
// THEN
verify_that_isAfter_assertion_fails_and_throws_AssertionError(REFERENCE, REFERENCE);
verify_that_isAfter_assertion_fails_and_throws_AssertionError(BEFORE, REFERENCE);
}
@Test
void test_isAfter_assertion_error_message() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> assertThat(parse("03:00:05.123Z")).isAfter(parse("03:00:05.123456789Z")))
.withMessage(format("%n" +
"Expecting actual:%n" +
" 03:00:05.123Z%n" +
"to be strictly after:%n" +
" 03:00:05.123456789Z%n"));
}
@Test
void should_fail_if_actual_is_null() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> {
OffsetTime actual = null;
assertThat(actual).isAfter(OffsetTime.now());
}).withMessage(actualIsNull());
}
@Test
void should_fail_if_offsetTime_parameter_is_null() {
assertThatIllegalArgumentException().isThrownBy(() -> assertThat(OffsetTime.now()).isAfter((OffsetTime) null))
.withMessage("The OffsetTime to compare actual with should not be null");
}
@Test
void should_fail_if_offsetTime_as_string_parameter_is_null() {
assertThatIllegalArgumentException().isThrownBy(() -> assertThat(OffsetTime.now()).isAfter((String) null))
.withMessage("The String representing the OffsetTime to compare actual with should not be null");
}
private static void verify_that_isAfter_assertion_fails_and_throws_AssertionError(OffsetTime timeToCheck,
OffsetTime reference) {
try {
assertThat(timeToCheck).isAfter(reference);
} catch (AssertionError e) {
// AssertionError was expected, test same assertion with String based parameter
try {
assertThat(timeToCheck).isAfter(reference.toString());
} catch (AssertionError e2) {
// AssertionError was expected (again)
return;
}
}
fail("Should have thrown AssertionError");
}
}
| OffsetTimeAssert_isAfter_Test |
java | apache__rocketmq | broker/src/main/java/org/apache/rocketmq/broker/transaction/TransactionalMessageService.java | {
"start": 1158,
"end": 3198
} | interface ____ {
/**
* Process prepare message, in common, we should put this message to storage service.
*
* @param messageInner Prepare(Half) message.
* @return Prepare message storage result.
*/
PutMessageResult prepareMessage(MessageExtBrokerInner messageInner);
/**
* Process prepare message in async manner, we should put this message to storage service
*
* @param messageInner Prepare(Half) message.
* @return CompletableFuture of put result, will be completed at put success(flush and replica done)
*/
CompletableFuture<PutMessageResult> asyncPrepareMessage(MessageExtBrokerInner messageInner);
/**
* Delete prepare message when this message has been committed or rolled back.
*
* @param messageExt
*/
boolean deletePrepareMessage(MessageExt messageExt);
/**
* Invoked to process commit prepare message.
*
* @param requestHeader Commit message request header.
* @return Operate result contains prepare message and relative error code.
*/
OperationResult commitMessage(EndTransactionRequestHeader requestHeader);
/**
* Invoked to roll back prepare message.
*
* @param requestHeader Prepare message request header.
* @return Operate result contains prepare message and relative error code.
*/
OperationResult rollbackMessage(EndTransactionRequestHeader requestHeader);
/**
* Traverse uncommitted/unroll back half message and send check back request to producer to obtain transaction
* status.
*
* @param transactionTimeout The minimum time of the transactional message to be checked firstly, one message only
* exceed this time interval that can be checked.
* @param transactionCheckMax The maximum number of times the message was checked, if exceed this value, this
* message will be discarded.
* @param listener When the message is considered to be checked or discarded, the relative method of this | TransactionalMessageService |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/TypeParameterQualifierTest.java | {
"start": 867,
"end": 1198
} | class ____ {
private final CompilationTestHelper compilationHelper =
CompilationTestHelper.newInstance(TypeParameterQualifier.class, getClass());
@Test
public void positive() {
compilationHelper
.addSourceLines(
"Foo.java",
// force a line break
" | TypeParameterQualifierTest |
java | apache__rocketmq | tools/src/main/java/org/apache/rocketmq/tools/command/broker/GetColdDataFlowCtrInfoSubCommand.java | {
"start": 1809,
"end": 5649
} | class ____ implements SubCommand {
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
@Override
public String commandName() {
return "getColdDataFlowCtrInfo";
}
@Override
public String commandDesc() {
return "Get cold data flow ctr info.";
}
@Override
public Options buildCommandlineOptions(final Options options) {
Option opt = new Option("b", "brokerAddr", true, "get from which broker");
opt.setRequired(false);
options.addOption(opt);
opt = new Option("c", "clusterName", true, "get from which cluster");
opt.setRequired(false);
options.addOption(opt);
return options;
}
@Override
public void execute(final CommandLine commandLine, final Options options, final RPCHook rpcHook)
throws SubCommandException {
DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook);
defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis()));
try {
if (commandLine.hasOption('b')) {
String brokerAddr = commandLine.getOptionValue('b').trim();
defaultMQAdminExt.start();
getAndPrint(defaultMQAdminExt, String.format("============%s============\n", brokerAddr), brokerAddr);
} else if (commandLine.hasOption('c')) {
String clusterName = commandLine.getOptionValue('c').trim();
defaultMQAdminExt.start();
Map<String, List<String>> masterAndSlaveMap = CommandUtil.fetchMasterAndSlaveDistinguish(defaultMQAdminExt, clusterName);
for (String masterAddr : masterAndSlaveMap.keySet()) {
getAndPrint(defaultMQAdminExt, String.format("============Master: %s============\n", masterAddr), masterAddr);
for (String slaveAddr : masterAndSlaveMap.get(masterAddr)) {
getAndPrint(defaultMQAdminExt, String.format("============My Master: %s=====Slave: %s============\n", masterAddr, slaveAddr), slaveAddr);
}
}
}
} catch (Exception e) {
throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e);
} finally {
defaultMQAdminExt.shutdown();
}
}
protected void getAndPrint(final MQAdminExt defaultMQAdminExt, final String printPrefix, final String addr)
throws InterruptedException, RemotingConnectException,
UnsupportedEncodingException, RemotingTimeoutException,
MQBrokerException, RemotingSendRequestException {
System.out.print(" " + printPrefix);
String rstStr = defaultMQAdminExt.getColdDataFlowCtrInfo(addr);
if (rstStr == null) {
System.out.printf("Broker[%s] has no cold ctr table !\n", addr);
return;
}
JSONObject jsonObject = JSON.parseObject(rstStr);
Map<String, JSONObject> runtimeTable = (Map<String, JSONObject>)jsonObject.get("runtimeTable");
runtimeTable.entrySet().stream().forEach(i -> {
JSONObject value = i.getValue();
Date lastColdReadTimeMillsDate = new Date(Long.parseLong(String.valueOf(value.get("lastColdReadTimeMills"))));
value.put("lastColdReadTimeFormat", sdf.format(lastColdReadTimeMillsDate));
value.remove("lastColdReadTimeMills");
Date createTimeMillsDate = new Date(Long.parseLong(String.valueOf(value.get("createTimeMills"))));
value.put("createTimeFormat", sdf.format(createTimeMillsDate));
value.remove("createTimeMills");
});
String formatStr = JSON.toJSONString(jsonObject, true);
System.out.printf(formatStr);
System.out.printf("%n");
}
}
| GetColdDataFlowCtrInfoSubCommand |
java | apache__camel | dsl/camel-yaml-dsl/camel-yaml-dsl-deserializers/src/generated/java/org/apache/camel/dsl/yaml/deserializers/ModelDeserializers.java | {
"start": 936663,
"end": 939832
} | class ____ extends YamlDeserializerBase<RestsDefinition> {
public RestsDefinitionDeserializer() {
super(RestsDefinition.class);
}
@Override
protected RestsDefinition newInstance() {
return new RestsDefinition();
}
@Override
protected boolean setProperty(RestsDefinition target, String propertyKey,
String propertyName, Node node) {
propertyKey = org.apache.camel.util.StringHelper.dashToCamelCase(propertyKey);
switch(propertyKey) {
case "rest": {
java.util.List<org.apache.camel.model.rest.RestDefinition> val = asFlatList(node, org.apache.camel.model.rest.RestDefinition.class);
target.setRests(val);
break;
}
case "id": {
String val = asText(node);
target.setId(val);
break;
}
case "description": {
String val = asText(node);
target.setDescription(val);
break;
}
case "note": {
String val = asText(node);
target.setNote(val);
break;
}
default: {
return false;
}
}
return true;
}
}
@YamlType(
nodes = "resumable",
types = org.apache.camel.model.ResumableDefinition.class,
order = org.apache.camel.dsl.yaml.common.YamlDeserializerResolver.ORDER_LOWEST - 1,
displayName = "Resumable",
description = "Resume EIP to support resuming processing from last known offset.",
deprecated = false,
properties = {
@YamlProperty(name = "description", type = "string", description = "Sets the description of this node", displayName = "Description"),
@YamlProperty(name = "disabled", type = "boolean", defaultValue = "false", description = "Disables this EIP from the route.", displayName = "Disabled"),
@YamlProperty(name = "id", type = "string", description = "Sets the id of this node", displayName = "Id"),
@YamlProperty(name = "intermittent", type = "boolean", defaultValue = "false", description = "Sets whether the offsets will be intermittently present or whether they must be present in every exchange", displayName = "Intermittent"),
@YamlProperty(name = "loggingLevel", type = "enum:TRACE,DEBUG,INFO,WARN,ERROR,OFF", defaultValue = "ERROR", description = "The logging level to use in case of failures.", displayName = "Logging Level"),
@YamlProperty(name = "note", type = "string", description = "Sets the note of this node", displayName = "Note"),
@YamlProperty(name = "resumeStrategy", type = "string", required = true, description = "Sets the resume strategy to use", displayName = "Resume Strategy")
}
)
public static | RestsDefinitionDeserializer |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/erroneous/ambiguousmapping/ErroneousWithAmbiguousMethodsMapper.java | {
"start": 745,
"end": 806
} | class ____ {
public BranchDTO branch;
}
| TrunkDTO |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/state/PlaceholderStreamStateHandle.java | {
"start": 1225,
"end": 1324
} | class ____ used in the referenced states of {@link
* IncrementalRemoteKeyedStateHandle}.
*/
public | is |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ext/javatime/ser/LocalDateSerTest.java | {
"start": 1163,
"end": 1231
} | class ____
extends DateTimeTestBase
{
final static | LocalDateSerTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/ActionListenerImplementations.java | {
"start": 5938,
"end": 7048
} | class ____<T> extends DelegatingActionListener<T, T> {
private final BiConsumer<ActionListener<T>, Exception> bc;
DelegatingResponseActionListener(ActionListener<T> delegate, BiConsumer<ActionListener<T>, Exception> bc) {
super(delegate);
this.bc = bc;
}
@Override
public void onResponse(T t) {
delegate.onResponse(t);
}
private void acceptException(Exception e) {
bc.accept(delegate, e);
}
@Override
public void onFailure(Exception e) {
safeAcceptException(this::acceptException, e);
}
@Override
public String toString() {
return super.toString() + "/" + bc;
}
}
/**
* Replaces the onResponse handling of a given ActionListener with a lambda that receives both the original listener and a response.
* This is useful when a listener is needed to do some additional work with a response before passing a response on to the original
* listener.
*/
static final | DelegatingResponseActionListener |
java | apache__logging-log4j2 | log4j-api/src/main/java/org/apache/logging/log4j/util/LoaderUtil.java | {
"start": 9467,
"end": 10021
} | class ____ for any other reason
* @see #loadClass(String)
* @since 2.22.0
*/
public static Class<?> loadClassUnchecked(final String className) {
try {
return loadClass(className);
} catch (final ClassNotFoundException e) {
final NoClassDefFoundError error = new NoClassDefFoundError(e.getMessage());
error.initCause(e);
throw error;
}
}
/**
* Loads and instantiates a Class using the default constructor.
*
* @param <T> the type of the | fails |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/state/metrics/LatencyTrackingStateConfig.java | {
"start": 1177,
"end": 1992
} | class ____ extends MetricsTrackingStateConfig {
LatencyTrackingStateConfig(
MetricGroup metricGroup,
boolean enabled,
int sampleInterval,
int historySize,
boolean stateNameAsVariable) {
super(metricGroup, enabled, sampleInterval, historySize, stateNameAsVariable);
if (enabled) {
Preconditions.checkNotNull(
metricGroup, "Metric group cannot be null if latency tracking is enabled.");
Preconditions.checkArgument(sampleInterval >= 1);
}
}
public static LatencyTrackingStateConfig disabled() {
return newBuilder().setEnabled(false).build();
}
public static Builder newBuilder() {
return new Builder();
}
public static | LatencyTrackingStateConfig |
java | quarkusio__quarkus | integration-tests/reactive-messaging-pulsar/src/test/java/io/quarkus/it/pulsar/PulsarConnectorIT.java | {
"start": 116,
"end": 173
} | class ____ extends PulsarConnectorTest {
}
| PulsarConnectorIT |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/pool/basic/PoolableCallableStatementTest.java | {
"start": 1219,
"end": 52115
} | class ____ extends TestCase {
private DruidDataSource dataSource = new DruidDataSource();
protected DruidPooledConnection conn;
protected MockCallableStatement raw;
protected DruidPooledCallableStatement stmt;
protected void setUp() throws Exception {
MockConnection mockConn = new MockConnection();
DruidConnectionHolder connHolder = new DruidConnectionHolder(dataSource, mockConn, 0);
conn = new DruidPooledConnection(connHolder);
raw = new MockCallableStatement(null, null);
stmt = new DruidPooledCallableStatement(conn, new PreparedStatementHolder(new PreparedStatementKey("", null,
null, 0, 0,
0), raw)) {
protected SQLException checkException(Throwable error) throws SQLException {
if (error instanceof SQLException) {
return (SQLException) error;
}
return new SQLException(error);
}
};
assertEquals(0, raw.getOutParameters().size());
stmt.registerOutParameter(1, Types.INTEGER);
assertEquals(1, raw.getOutParameters().size());
stmt.registerOutParameter(2, Types.DECIMAL, 10);
assertEquals(2, raw.getOutParameters().size());
}
public void test_executeQuery_large() throws Exception {
for (int i = 0; i < 1000 * 1000; ++i) {
ResultSet rs = stmt.executeQuery();
rs.close();
}
}
public void test_basic() throws Exception {
assertEquals(raw, stmt.getCallableStatementRaw());
}
@SuppressWarnings("deprecation")
public void test_callableStmt() throws Exception {
assertTrue(stmt.wasNull() == false);
stmt.getString(1);
assertTrue(stmt.wasNull());
stmt.getBoolean(1);
stmt.getByte(1);
stmt.getShort(1);
stmt.getInt(1);
stmt.getLong(1);
stmt.getFloat(1);
stmt.getDouble(1);
stmt.getBigDecimal(1);
stmt.getBigDecimal(1, 1);
stmt.getBytes(1);
stmt.getDate(1);
stmt.getTime(1);
stmt.getTimestamp(1);
stmt.getObject(1);
stmt.getRef(1);
stmt.getBlob(1);
stmt.getString("1");
stmt.getBoolean("1");
stmt.getByte("1");
stmt.getShort("1");
stmt.getInt("1");
stmt.getLong("1");
stmt.getFloat("1");
stmt.getDouble("1");
stmt.getBigDecimal("1");
stmt.getBytes("1");
stmt.getDate("1");
stmt.getTime("1");
stmt.getTimestamp("1");
stmt.getObject("1");
stmt.getRef("1");
stmt.getBlob("1");
}
public void test_getByLabel_error() {
{
SQLException error = null;
try {
stmt.getTimestamp(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
// ////////////////
{
SQLException error = null;
try {
stmt.getString("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getBoolean("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getByte("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getShort("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getInt("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getLong("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getFloat("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getDouble("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getBigDecimal("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getBytes("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getDate("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getTime("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getTimestamp("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getObject("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getRef("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getBlob("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
@SuppressWarnings("deprecation")
public void test_get_error() {
{
SQLException error = null;
try {
stmt.getString(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getBoolean(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getByte(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getShort(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getInt(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getLong(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getFloat(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getDouble(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getBigDecimal(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getBigDecimal(0, 1);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getBytes(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getDate(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getTime(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getObject(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getRef(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getBlob(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_getNClob() throws Exception {
stmt.getNClob(1);
stmt.getNClob("1");
{
SQLException error = null;
try {
stmt.getNClob(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getNClob("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_getNString() throws Exception {
stmt.getNString(1);
stmt.getNString("1");
{
SQLException error = null;
try {
stmt.getNString(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getNString("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_getNCharacterStream() throws Exception {
stmt.getNCharacterStream(1);
stmt.getNCharacterStream("1");
{
SQLException error = null;
try {
stmt.getNCharacterStream(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getNCharacterStream("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_getCharacterStream() throws Exception {
stmt.getCharacterStream(1);
stmt.getCharacterStream("1");
{
SQLException error = null;
try {
stmt.getCharacterStream(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getCharacterStream("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_updateCharacterStream_2() throws Exception {
stmt.setCharacterStream(1, (Reader) null, 1L);
stmt.setCharacterStream("1", (Reader) null, 1L);
{
SQLException error = null;
try {
stmt.setCharacterStream("0", (Reader) null, 1L);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setCharacterStream(0, (Reader) null, 1L);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setNClob() throws Exception {
stmt.setNClob(1, (Reader) null);
stmt.setNClob("1", (Reader) null);
{
SQLException error = null;
try {
stmt.setNClob("0", (Reader) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setNClob(0, (Reader) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setClob() throws Exception {
stmt.setClob(1, (Reader) null);
stmt.setClob("1", (Reader) null);
{
SQLException error = null;
try {
stmt.setClob("0", (Reader) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setClob(0, (Reader) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setNCharacterStream() throws Exception {
stmt.setNCharacterStream(1, (Reader) null);
stmt.setNCharacterStream("1", (Reader) null);
{
SQLException error = null;
try {
stmt.setNCharacterStream("0", (Reader) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setNCharacterStream(0, (Reader) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setCharacterStream() throws Exception {
stmt.setCharacterStream(1, (Reader) null);
stmt.setCharacterStream("1", (Reader) null);
{
SQLException error = null;
try {
stmt.setCharacterStream("0", (Reader) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setCharacterStream(0, (Reader) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setBinaryStream() throws Exception {
stmt.setBinaryStream(1, (InputStream) null);
stmt.setBinaryStream("1", (InputStream) null);
{
SQLException error = null;
try {
stmt.setBinaryStream("0", (InputStream) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setBinaryStream(0, (InputStream) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setBinaryStream_1() throws Exception {
stmt.setBinaryStream(1, (InputStream) null, 1);
stmt.setBinaryStream("1", (InputStream) null, 1);
{
SQLException error = null;
try {
stmt.setBinaryStream("0", (InputStream) null, 1);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setBinaryStream(0, (InputStream) null, 1);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setBinaryStream_2() throws Exception {
stmt.setBinaryStream(1, (InputStream) null, 1L);
stmt.setBinaryStream("1", (InputStream) null, 1L);
{
SQLException error = null;
try {
stmt.setBinaryStream("0", (InputStream) null, 1L);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setBinaryStream(0, (InputStream) null, 1L);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setAsciiStream() throws Exception {
stmt.setAsciiStream(1, (InputStream) null);
stmt.setAsciiStream("1", (InputStream) null);
{
SQLException error = null;
try {
stmt.setAsciiStream("0", (InputStream) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setAsciiStream(0, (InputStream) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setBlob() throws Exception {
stmt.setBlob(1, (InputStream) null);
stmt.setBlob("1", (InputStream) null);
{
SQLException error = null;
try {
stmt.setBlob("0", (InputStream) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setBlob(0, (InputStream) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setClob_1() throws Exception {
stmt.setClob(1, (Clob) null);
stmt.setClob("1", (Clob) null);
{
SQLException error = null;
try {
stmt.setClob("0", (Clob) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setClob(0, (Clob) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setClob_2() throws Exception {
stmt.setClob(1, (Reader) null, 1L);
stmt.setClob("1", (Reader) null, 1L);
{
SQLException error = null;
try {
stmt.setClob("0", (Reader) null, 1L);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setClob(0, (Reader) null, 1L);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setAsciiStream_1() throws Exception {
stmt.setAsciiStream(1, (InputStream) null, 1L);
stmt.setAsciiStream("1", (InputStream) null, 1L);
{
SQLException error = null;
try {
stmt.setAsciiStream("0", (InputStream) null, 1L);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setAsciiStream(0, (InputStream) null, 1L);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setNull() throws Exception {
stmt.setNull(1, Types.INTEGER, "Int");
stmt.setNull("1", Types.INTEGER, "Int");
{
SQLException error = null;
try {
stmt.setNull("0", Types.INTEGER, "Int");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setNull(0, Types.INTEGER, "Int");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setTimestamp() throws Exception {
stmt.setTimestamp(1, (Timestamp) null);
stmt.setTimestamp("1", (Timestamp) null);
{
SQLException error = null;
try {
stmt.setTimestamp("0", (Timestamp) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setTimestamp(0, (Timestamp) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setTimestamp_1() throws Exception {
stmt.setTimestamp(1, (Timestamp) null, null);
stmt.setTimestamp("1", (Timestamp) null, null);
{
SQLException error = null;
try {
stmt.setTimestamp("0", (Timestamp) null, null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setTimestamp(0, (Timestamp) null, null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setTime() throws Exception {
stmt.setTime(1, (Time) null);
stmt.setTime("1", (Time) null);
{
SQLException error = null;
try {
stmt.setTime("0", (Time) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setTime(0, (Time) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setTime_1() throws Exception {
stmt.setTime(1, (Time) null, null);
stmt.setTime("1", (Time) null, null);
{
SQLException error = null;
try {
stmt.setTime("0", (Time) null, null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setTime(0, (Time) null, null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setDate() throws Exception {
stmt.setDate(1, (Date) null);
stmt.setDate("1", (Date) null);
{
SQLException error = null;
try {
stmt.setDate("0", (Date) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setDate(0, (Date) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setDate_1() throws Exception {
stmt.setDate(1, (Date) null, null);
stmt.setDate("1", (Date) null, null);
{
SQLException error = null;
try {
stmt.setDate("0", (Date) null, null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setDate(0, (Date) null, null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setCharacterStream_1() throws Exception {
stmt.setCharacterStream(1, (Reader) null, 1);
stmt.setCharacterStream("1", (Reader) null, 1);
{
SQLException error = null;
try {
stmt.setCharacterStream("0", (Reader) null, 1);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setCharacterStream(0, (Reader) null, 1);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setObject() throws Exception {
stmt.setObject(1, null);
stmt.setObject("1", null);
{
SQLException error = null;
try {
stmt.setObject("0", null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setObject(0, null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setObject_1() throws Exception {
stmt.setObject(1, null, Types.INTEGER);
stmt.setObject("1", null, Types.INTEGER);
{
SQLException error = null;
try {
stmt.setObject("0", null, Types.INTEGER);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setObject(0, null, Types.INTEGER);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setObject_2() throws Exception {
stmt.setObject(1, null, Types.INTEGER, 2);
stmt.setObject("1", null, Types.INTEGER, 2);
{
SQLException error = null;
try {
stmt.setObject("0", null, Types.INTEGER, 2);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setObject(0, null, Types.INTEGER, 2);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setAsciiStream_2() throws Exception {
stmt.setAsciiStream(1, (InputStream) null, 1);
stmt.setAsciiStream("1", (InputStream) null, 1);
{
SQLException error = null;
try {
stmt.setAsciiStream("0", (InputStream) null, 1);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setAsciiStream(0, (InputStream) null, 1);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setBytes() throws Exception {
stmt.setBytes(1, null);
stmt.setBytes("1", null);
{
SQLException error = null;
try {
stmt.setBytes("0", null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setBytes(0, null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setString() throws Exception {
stmt.setString(1, null);
stmt.setString("1", null);
{
SQLException error = null;
try {
stmt.setString("0", null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setString(0, null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setBigDecimal() throws Exception {
stmt.setBigDecimal(1, null);
stmt.setBigDecimal("1", null);
{
SQLException error = null;
try {
stmt.setBigDecimal("0", null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setBigDecimal(0, null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setDouble() throws Exception {
stmt.setDouble(1, 1.0D);
stmt.setDouble("1", 1.0D);
{
SQLException error = null;
try {
stmt.setDouble("0", 1.0D);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setDouble(0, 1.0D);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setFloat() throws Exception {
stmt.setFloat(1, 1.0F);
stmt.setFloat("1", 1.0F);
{
SQLException error = null;
try {
stmt.setFloat("0", 1.0F);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setFloat(0, 1.0F);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setLong() throws Exception {
stmt.setLong(1, 2);
stmt.setLong("1", 2);
{
SQLException error = null;
try {
stmt.setLong("0", 2);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setLong(0, 2);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setInt() throws Exception {
stmt.setInt(1, 2);
stmt.setInt("1", 2);
{
SQLException error = null;
try {
stmt.setInt("0", 2);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setInt(0, 2);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setShort() throws Exception {
stmt.setShort(1, Short.MAX_VALUE);
stmt.setShort("1", Short.MAX_VALUE);
{
SQLException error = null;
try {
stmt.setShort("0", Short.MAX_VALUE);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setShort(0, Short.MAX_VALUE);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setByte() throws Exception {
stmt.setByte(1, Byte.MAX_VALUE);
stmt.setByte("1", Byte.MAX_VALUE);
{
SQLException error = null;
try {
stmt.setByte("0", Byte.MAX_VALUE);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setByte(0, Byte.MAX_VALUE);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_getClob() throws Exception {
stmt.getClob(1);
stmt.getClob("1");
{
SQLException error = null;
try {
stmt.getClob(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getClob("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_getArray() throws Exception {
stmt.getArray(1);
stmt.getArray("1");
{
SQLException error = null;
try {
stmt.getArray(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getArray("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_getDate() throws Exception {
stmt.getDate(1);
stmt.getDate("1");
{
SQLException error = null;
try {
stmt.getDate(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getDate("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_getDate_1() throws Exception {
stmt.getDate(1, null);
stmt.getDate("1", null);
{
SQLException error = null;
try {
stmt.getDate(0, null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getDate("0", null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_getTime() throws Exception {
stmt.getTime(1);
stmt.getTime("1");
{
SQLException error = null;
try {
stmt.getTime(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getTime("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_getTime_1() throws Exception {
stmt.getTime(1, null);
stmt.getTime("1", null);
{
SQLException error = null;
try {
stmt.getTime(0, null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getTime("0", null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_getTimestamp() throws Exception {
stmt.getTimestamp(1);
stmt.getTimestamp("1");
{
SQLException error = null;
try {
stmt.getTimestamp(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getTimestamp("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_getTimestamp_1() throws Exception {
stmt.getTimestamp(1, null);
stmt.getTimestamp("1", null);
{
SQLException error = null;
try {
stmt.getTimestamp(0, null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getTimestamp("0", null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_getURL() throws Exception {
stmt.getURL(1);
stmt.getURL("1");
{
SQLException error = null;
try {
stmt.getURL(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getURL("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setBlob_1() throws Exception {
stmt.setBlob(1, (Blob) null);
stmt.setBlob("1", (Blob) null);
{
SQLException error = null;
try {
stmt.setBlob("0", (Blob) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setBlob(0, (Blob) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setSQLXML() throws Exception {
stmt.setSQLXML(1, (SQLXML) null);
stmt.setSQLXML("1", (SQLXML) null);
{
SQLException error = null;
try {
stmt.setSQLXML("0", (SQLXML) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setSQLXML(0, (SQLXML) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_getSQLXML() throws Exception {
stmt.getSQLXML(1);
stmt.getSQLXML("1");
{
SQLException error = null;
try {
stmt.getSQLXML(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getSQLXML("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setBlob_2() throws Exception {
stmt.setBlob(1, (InputStream) null, 1L);
stmt.setBlob("1", (InputStream) null, 1L);
{
SQLException error = null;
try {
stmt.setBlob("0", (InputStream) null, 1L);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setBlob(0, (InputStream) null, 1L);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setNClob_1() throws Exception {
stmt.setNClob(1, (Reader) null, 1L);
stmt.setNClob("1", (Reader) null, 1L);
{
SQLException error = null;
try {
stmt.setNClob("0", (Reader) null, 1L);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setNClob(0, (Reader) null, 1L);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setNCharacterStream_1() throws Exception {
stmt.setNCharacterStream(1, (Reader) null, 1L);
stmt.setNCharacterStream("1", (Reader) null, 1L);
{
SQLException error = null;
try {
stmt.setNCharacterStream("0", (Reader) null, 1L);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setNCharacterStream(0, (Reader) null, 1L);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setRowId() throws Exception {
stmt.setRowId(1, (RowId) null);
stmt.setRowId("1", (RowId) null);
{
SQLException error = null;
try {
stmt.setRowId("0", (RowId) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setRowId(0, (RowId) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_getRowId() throws Exception {
stmt.getRowId(1);
stmt.getRowId("1");
{
SQLException error = null;
try {
stmt.getRowId(0);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getRowId("0");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setNString() throws Exception {
stmt.setNString(1, (String) null);
stmt.setNString("1", (String) null);
{
SQLException error = null;
try {
stmt.setNString("0", (String) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setNString(0, (String) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_getObject() throws Exception {
stmt.getObject(1, (java.util.Map) null);
stmt.getObject("1", (java.util.Map) null);
{
SQLException error = null;
try {
stmt.getObject(0, (java.util.Map) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.getObject("0", (java.util.Map) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setBoolean() throws Exception {
stmt.setBoolean(1, true);
stmt.setBoolean("1", true);
{
SQLException error = null;
try {
stmt.setBoolean("0", true);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setBoolean(0, true);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setURL() throws Exception {
stmt.setURL(1, null);
stmt.setURL("1", null);
{
SQLException error = null;
try {
stmt.setURL("0", null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setURL(0, null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setNClob_2() throws Exception {
stmt.setNClob(1, (NClob) null);
stmt.setNClob("1", (NClob) null);
{
SQLException error = null;
try {
stmt.setNClob("0", (NClob) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setNClob(0, (NClob) null);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_setNull_1() throws Exception {
stmt.setNull(1, Types.INTEGER);
stmt.setNull("1", Types.INTEGER);
{
SQLException error = null;
try {
stmt.setNull("0", Types.INTEGER);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.setNull(0, Types.INTEGER);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_registerOutParameter() throws Exception {
stmt.registerOutParameter(1, Types.INTEGER, "Int");
stmt.registerOutParameter("1", Types.INTEGER, "Int");
{
SQLException error = null;
try {
stmt.registerOutParameter("0", Types.INTEGER, "Int");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.registerOutParameter(0, Types.INTEGER, "Int");
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_registerOutParameter_1() throws Exception {
stmt.registerOutParameter(1, Types.INTEGER, 2);
stmt.registerOutParameter("1", Types.INTEGER, 2);
{
SQLException error = null;
try {
stmt.registerOutParameter("0", Types.INTEGER, 2);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.registerOutParameter(0, Types.INTEGER, 2);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
public void test_registerOutParameter_2() throws Exception {
stmt.registerOutParameter(1, Types.INTEGER);
stmt.registerOutParameter("1", Types.INTEGER);
{
SQLException error = null;
try {
stmt.registerOutParameter("0", Types.INTEGER);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
{
SQLException error = null;
try {
stmt.registerOutParameter(0, Types.INTEGER);
} catch (SQLException ex) {
error = ex;
}
assertNotNull(error);
}
}
}
| PoolableCallableStatementTest |
java | apache__flink | flink-core-api/src/main/java/org/apache/flink/api/java/tuple/builder/Tuple22Builder.java | {
"start": 1268,
"end": 2111
} | class ____ {@link Tuple22}.
*
* @param <T0> The type of field 0
* @param <T1> The type of field 1
* @param <T2> The type of field 2
* @param <T3> The type of field 3
* @param <T4> The type of field 4
* @param <T5> The type of field 5
* @param <T6> The type of field 6
* @param <T7> The type of field 7
* @param <T8> The type of field 8
* @param <T9> The type of field 9
* @param <T10> The type of field 10
* @param <T11> The type of field 11
* @param <T12> The type of field 12
* @param <T13> The type of field 13
* @param <T14> The type of field 14
* @param <T15> The type of field 15
* @param <T16> The type of field 16
* @param <T17> The type of field 17
* @param <T18> The type of field 18
* @param <T19> The type of field 19
* @param <T20> The type of field 20
* @param <T21> The type of field 21
*/
@Public
public | for |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/internal/stubbing/answers/AnswerFunctionalInterfaces.java | {
"start": 861,
"end": 954
} | class ____ {
/**
* Hide constructor to avoid instantiation of | AnswerFunctionalInterfaces |
java | google__guice | extensions/struts2/test/com/google/inject/struts2/Struts2FactoryTest.java | {
"start": 1153,
"end": 1263
} | class ____ extends TestCase {
private static final Date TODAY = new Date();
public static | Struts2FactoryTest |
java | apache__dubbo | dubbo-remoting/dubbo-remoting-zookeeper-curator5/src/test/java/org/apache/dubbo/remoting/zookeeper/curator5/Curator5ZookeeperClientTest.java | {
"start": 3575,
"end": 33503
} | class ____ {
private static Curator5ZookeeperClient curatorClient;
private static int zookeeperServerMockPort1;
private static String zookeeperConnectionAddress1;
private static MockedStatic<CuratorFrameworkFactory> curatorFrameworkFactoryMockedStatic;
CuratorFrameworkFactory.Builder spyBuilder = CuratorFrameworkFactory.builder();
private CuratorFramework mockCuratorFramework;
private CreateBuilder mockCreateBuilder;
private ExistsBuilder mockExistsBuilder;
private GetChildrenBuilder mockGetChildrenBuilder;
private DeleteBuilder mockDeleteBuilder;
private GetDataBuilder mockGetDataBuilder;
private SetDataBuilder mockSetDataBuilder;
private CuratorZookeeperClient mockCuratorZookeeperClient;
private WatcherRemoveCuratorFramework mockWatcherRemoveCuratorFramework;
private Answer<String> createAnswer;
@BeforeAll
public static void setUp() throws Exception {
zookeeperServerMockPort1 = 2181;
zookeeperConnectionAddress1 = "zookeeper://localhost:" + zookeeperServerMockPort1;
// mock begin
// create mock bean begin
CuratorFrameworkFactory.Builder realBuilder = CuratorFrameworkFactory.builder();
CuratorFrameworkFactory.Builder spyBuilder = spy(realBuilder);
curatorFrameworkFactoryMockedStatic = mockStatic(CuratorFrameworkFactory.class);
curatorFrameworkFactoryMockedStatic
.when(CuratorFrameworkFactory::builder)
.thenReturn(spyBuilder);
}
@BeforeEach
public void init() throws Exception {
mockCreateBuilder = mock(CreateBuilder.class);
mockExistsBuilder = mock(ExistsBuilder.class);
mockDeleteBuilder = mock(DeleteBuilder.class);
mockCuratorFramework = mock(CuratorFramework.class);
mockGetChildrenBuilder = mock(GetChildrenBuilder.class);
mockGetDataBuilder = mock(GetDataBuilder.class);
mockCuratorZookeeperClient = mock(CuratorZookeeperClient.class);
mockWatcherRemoveCuratorFramework = mock(WatcherRemoveCuratorFramework.class);
mockSetDataBuilder = mock(SetDataBuilder.class);
doReturn(mockCuratorFramework).when(spyBuilder).build();
when(mockCuratorFramework.blockUntilConnected(anyInt(), any())).thenReturn(true);
when(mockCuratorFramework.getConnectionStateListenable()).thenReturn(StandardListenerManager.standard());
when(mockCuratorFramework.create()).thenReturn(mockCreateBuilder);
when(mockCuratorFramework.checkExists()).thenReturn(mockExistsBuilder);
when(mockCuratorFramework.getChildren()).thenReturn(mockGetChildrenBuilder);
when(mockCuratorFramework.getZookeeperClient()).thenReturn(mockCuratorZookeeperClient);
when(mockCuratorFramework.newWatcherRemoveCuratorFramework()).thenReturn(mockWatcherRemoveCuratorFramework);
when(mockCuratorZookeeperClient.isConnected()).thenReturn(true);
when(mockCuratorFramework.delete()).thenReturn(mockDeleteBuilder);
when(mockCreateBuilder.withMode(any())).thenReturn(mockCreateBuilder);
when(mockDeleteBuilder.deletingChildrenIfNeeded()).thenReturn(mockDeleteBuilder);
when(mockDeleteBuilder.forPath(any())).then((Answer<Void>) invocationOnMock -> null);
when(mockCuratorFramework.getData()).thenReturn(mockGetDataBuilder);
when(mockCuratorFramework.setData()).thenReturn(mockSetDataBuilder);
when(mockSetDataBuilder.withVersion(anyInt())).thenReturn(mockSetDataBuilder);
List<String> paths = new ArrayList<>();
createAnswer = invocationOnMock -> {
String param = invocationOnMock.getArgument(0);
if (paths.contains(param)) {
throw new NodeExistsException("node existed: " + param);
}
paths.add(invocationOnMock.getArgument(0));
return invocationOnMock.getArgument(0);
};
when(mockCreateBuilder.forPath(anyString())).thenAnswer(createAnswer);
when(mockCreateBuilder.forPath(anyString(), any())).thenAnswer(createAnswer);
when(mockExistsBuilder.forPath(anyString())).thenAnswer(i -> {
if (paths.contains(i.getArgument(0))) {
return new Stat();
}
return null;
});
when(mockDeleteBuilder.forPath(anyString())).thenAnswer(i -> {
if (paths.contains(i.getArgument(0))) {
paths.remove(i.getArgument(0));
}
return null;
});
curatorClient = new Curator5ZookeeperClient(
URL.valueOf(zookeeperConnectionAddress1 + "/org.apache.dubbo.registry.RegistryService"));
}
@Test
void testCheckExists() {
String path = "/dubbo/org.apache.dubbo.demo.DemoService/providers";
curatorClient.create(path, false, true);
assertThat(curatorClient.checkExists(path), is(true));
assertThat(curatorClient.checkExists(path + "/noneexits"), is(false));
}
@Test
void testChildrenPath() throws Exception {
when(mockGetChildrenBuilder.forPath(any())).thenReturn(Lists.newArrayList("provider1", "provider2"));
String path = "/dubbo/org.apache.dubbo.demo.DemoService/providers";
curatorClient.create(path, false, true);
curatorClient.create(path + "/provider1", false, true);
curatorClient.create(path + "/provider2", false, true);
List<String> children = curatorClient.getChildren(path);
assertThat(children.size(), is(2));
}
@Test
@Timeout(value = 2)
public void testChildrenListener() throws Exception {
String path = "/dubbo/org.apache.dubbo.demo.DemoListenerService/providers";
curatorClient.create(path, false, true);
final CountDownLatch countDownLatch = new CountDownLatch(1);
when(mockGetChildrenBuilder.usingWatcher(any(CuratorWatcher.class))).thenReturn(mockGetChildrenBuilder);
when(mockGetChildrenBuilder.forPath(any())).thenReturn(Lists.newArrayList("providers"));
CuratorWatcherImpl watcher = new CuratorWatcherImpl() {
@Override
public void process(WatchedEvent watchedEvent) {
countDownLatch.countDown();
}
};
curatorClient.addTargetChildListener(path, watcher);
watcher.process(new WatchedEvent(Event.EventType.NodeDeleted, KeeperState.Closed, "providers"));
curatorClient.createPersistent(path + "/provider1", true);
countDownLatch.await();
}
@Test
void testWithInvalidServer() throws InterruptedException {
when(mockCuratorFramework.blockUntilConnected(anyInt(), any())).thenReturn(false);
Assertions.assertThrows(IllegalStateException.class, () -> {
curatorClient = new Curator5ZookeeperClient(URL.valueOf("zookeeper://127.0.0.1:1/service?timeout=1000"));
curatorClient.create("/testPath", true, true);
});
}
@Test
void testWithInvalidServerWithoutCheck() throws InterruptedException {
when(mockCuratorFramework.blockUntilConnected(anyInt(), any())).thenReturn(false);
URL url = URL.valueOf("zookeeper://127.0.0.1:1/service").addParameter(CHECK_KEY, false);
Assertions.assertDoesNotThrow(() -> {
curatorClient = new Curator5ZookeeperClient(url);
curatorClient.create("/testPath", true, true);
});
}
@Test
void testRemoveChildrenListener() throws Exception {
ChildListener childListener = mock(ChildListener.class);
when(mockGetChildrenBuilder.usingWatcher(any(CuratorWatcher.class))).thenReturn(mockGetChildrenBuilder);
when(mockGetChildrenBuilder.forPath(any())).thenReturn(Lists.newArrayList("children"));
curatorClient.addChildListener("/children", childListener);
curatorClient.removeChildListener("/children", childListener);
}
@Test
void testCreateExistingPath() {
curatorClient.create("/pathOne", false, true);
curatorClient.create("/pathOne", false, true);
}
@Test
void testConnectedStatus() {
curatorClient.createEphemeral("/testPath", true);
boolean connected = curatorClient.isConnected();
assertThat(connected, is(true));
}
@Test
void testCreateContent4Persistent() throws Exception {
String path = "/curatorTest4CrContent/content.data";
String content = "createContentTest";
curatorClient.delete(path);
assertThat(curatorClient.checkExists(path), is(false));
assertNull(curatorClient.getContent(path));
when(mockGetDataBuilder.forPath(any())).then(invocationOnMock -> content.getBytes());
curatorClient.createOrUpdate(path, content, false);
assertThat(curatorClient.checkExists(path), is(true));
assertEquals(curatorClient.getContent(path), content);
}
@Test
void testCreateContent4Temp() throws Exception {
String path = "/curatorTest4CrContent/content.data";
String content = "createContentTest";
curatorClient.delete(path);
assertThat(curatorClient.checkExists(path), is(false));
assertNull(curatorClient.getContent(path));
when(mockGetDataBuilder.forPath(any())).then(invocationOnMock -> content.getBytes());
curatorClient.createOrUpdate(path, content, true);
assertThat(curatorClient.checkExists(path), is(true));
assertEquals(curatorClient.getContent(path), content);
}
@Test
void testCreatePersistentFailed() {
String path = "/dubbo/test/path";
curatorClient.delete(path);
curatorClient.create(path, false, true);
Assertions.assertTrue(curatorClient.checkExists(path));
curatorClient.createPersistent(path, true);
Assertions.assertTrue(curatorClient.checkExists(path));
curatorClient.createPersistent(path, true);
Assertions.assertTrue(curatorClient.checkExists(path));
Assertions.assertThrows(IllegalStateException.class, () -> {
curatorClient.createPersistent(path, false);
});
Assertions.assertTrue(curatorClient.checkExists(path));
}
@Test
void testCreateEphemeralFailed() {
String path = "/dubbo/test/path";
curatorClient.delete(path);
curatorClient.create(path, true, true);
Assertions.assertTrue(curatorClient.checkExists(path));
curatorClient.createEphemeral(path, true);
Assertions.assertTrue(curatorClient.checkExists(path));
curatorClient.createEphemeral(path, true);
Assertions.assertTrue(curatorClient.checkExists(path));
Assertions.assertThrows(IllegalStateException.class, () -> {
curatorClient.createEphemeral(path, false);
});
Assertions.assertTrue(curatorClient.checkExists(path));
}
@Test
void testAddTargetDataListener() throws Exception {
String listenerPath = "/dubbo/service.name/configuration";
String path = listenerPath + "/dat/data";
String value = "vav";
curatorClient.createOrUpdate(path + "/d.json", value, true);
when(mockGetDataBuilder.forPath(any())).then(invocationOnMock -> value.getBytes());
String valueFromCache = curatorClient.getContent(path + "/d.json");
Assertions.assertEquals(value, valueFromCache);
final AtomicInteger atomicInteger = new AtomicInteger(0);
NodeCache mockNodeCache = mock(NodeCache.class);
MockedConstruction<NodeCache> mockedConstruction =
mockConstructionWithAnswer(NodeCache.class, invocationOnMock -> invocationOnMock
.getMethod()
.invoke(mockNodeCache, invocationOnMock.getArguments()));
when(mockNodeCache.getListenable()).thenReturn(StandardListenerManager.standard());
Curator5ZookeeperClient.NodeCacheListenerImpl nodeCacheListener =
new Curator5ZookeeperClient.NodeCacheListenerImpl() {
@Override
public void nodeChanged() {
atomicInteger.incrementAndGet();
}
};
curatorClient.addTargetDataListener(path + "/d.json", nodeCacheListener);
valueFromCache = curatorClient.getContent(path + "/d.json");
Assertions.assertNotNull(valueFromCache);
int currentCount1 = atomicInteger.get();
when(mockSetDataBuilder.forPath(any(), any())).then(invocationOnMock -> {
nodeCacheListener.nodeChanged();
return null;
});
curatorClient.getClient().setData().forPath(path + "/d.json", "foo".getBytes());
await().until(() -> atomicInteger.get() > currentCount1);
int currentCount2 = atomicInteger.get();
curatorClient.getClient().setData().forPath(path + "/d.json", "bar".getBytes());
await().until(() -> atomicInteger.get() > currentCount2);
int currentCount3 = atomicInteger.get();
when(mockDeleteBuilder.forPath(any())).then(invocationOnMock -> {
nodeCacheListener.nodeChanged();
return null;
});
curatorClient.delete(path + "/d.json");
when(mockGetDataBuilder.forPath(any())).thenReturn(null);
valueFromCache = curatorClient.getContent(path + "/d.json");
Assertions.assertNull(valueFromCache);
await().until(() -> atomicInteger.get() > currentCount3);
mockedConstruction.close();
}
@Test
void testPersistentCas1() throws Exception {
// test create failed when others create success
String path = "/dubbo/mapping/org.apache.dubbo.demo.DemoService";
AtomicReference<Runnable> runnable = new AtomicReference<>();
Curator5ZookeeperClient curatorClient =
new Curator5ZookeeperClient(
URL.valueOf(zookeeperConnectionAddress1 + "/org.apache.dubbo.registry.RegistryService")) {
@Override
protected void createPersistent(String path, String data, boolean faultTolerant) {
if (runnable.get() != null) {
runnable.get().run();
}
super.createPersistent(path, data, faultTolerant);
}
@Override
protected void update(String path, String data, int version) {
if (runnable.get() != null) {
runnable.get().run();
}
super.update(path, data, version);
}
};
curatorClient.delete(path);
runnable.set(() -> {
try {
mockCuratorFramework.create().forPath(path, "version x".getBytes(StandardCharsets.UTF_8));
} catch (Exception e) {
throw new RuntimeException(e);
}
});
when(mockGetDataBuilder.forPath(any())).then(invocationOnMock -> "version x".getBytes());
when(mockCreateBuilder.forPath(any())).then(invocationOnMock -> {
String value;
try {
value = createAnswer.answer(invocationOnMock);
} catch (Exception e) {
throw e;
}
try {
runnable.get().run();
} catch (Exception ignored) {
}
return value;
});
Assertions.assertThrows(
IllegalStateException.class, () -> curatorClient.createOrUpdate(path, "version 1", false, 0));
Assertions.assertEquals("version x", curatorClient.getContent(path));
mockCuratorFramework.setData().forPath(path, "version 1".getBytes(StandardCharsets.UTF_8));
when(mockGetDataBuilder.storingStatIn(any())).thenReturn(new WatchPathable<byte[]>() {
@Override
public byte[] forPath(String s) throws Exception {
return mockGetDataBuilder.forPath(s);
}
@Override
public Pathable<byte[]> watched() {
return null;
}
@Override
public Pathable<byte[]> usingWatcher(Watcher watcher) {
return null;
}
@Override
public Pathable<byte[]> usingWatcher(CuratorWatcher curatorWatcher) {
return null;
}
});
ConfigItem configItem = curatorClient.getConfigItem(path);
runnable.set(() -> {
try {
mockCuratorFramework.setData().forPath(path, "version x".getBytes(StandardCharsets.UTF_8));
} catch (Exception e) {
throw new RuntimeException(e);
}
});
when(mockSetDataBuilder.forPath(any(), any())).thenThrow(new IllegalStateException());
int version1 = ((Stat) configItem.getTicket()).getVersion();
Assertions.assertThrows(
IllegalStateException.class, () -> curatorClient.createOrUpdate(path, "version 2", false, version1));
when(mockGetDataBuilder.forPath(any())).then(invocationOnMock -> "version x".getBytes());
Assertions.assertEquals("version x", curatorClient.getContent(path));
runnable.set(null);
configItem = curatorClient.getConfigItem(path);
int version2 = ((Stat) configItem.getTicket()).getVersion();
doReturn(null).when(mockSetDataBuilder).forPath(any(), any());
curatorClient.createOrUpdate(path, "version 2", false, version2);
when(mockGetDataBuilder.forPath(any())).then(invocationOnMock -> "version 2".getBytes());
Assertions.assertEquals("version 2", curatorClient.getContent(path));
curatorClient.close();
}
@Test
void testPersistentCas2() throws Exception {
// test update failed when others create success
String path = "/dubbo/mapping/org.apache.dubbo.demo.DemoService";
Curator5ZookeeperClient curatorClient = new Curator5ZookeeperClient(
URL.valueOf(zookeeperConnectionAddress1 + "/org.apache.dubbo.registry.RegistryService"));
curatorClient.delete(path);
curatorClient.createOrUpdate(path, "version x", false);
Assertions.assertThrows(
IllegalStateException.class, () -> curatorClient.createOrUpdate(path, "version 1", false, null));
when(mockGetDataBuilder.forPath(any())).then(invocationOnMock -> "version x".getBytes());
Assertions.assertEquals("version x", curatorClient.getContent(path));
curatorClient.close();
}
@Test
void testPersistentNonVersion() throws Exception {
String path = "/dubbo/metadata/org.apache.dubbo.demo.DemoService";
AtomicReference<Runnable> runnable = new AtomicReference<>();
Curator5ZookeeperClient curatorClient =
new Curator5ZookeeperClient(
URL.valueOf(zookeeperConnectionAddress1 + "/org.apache.dubbo.registry.RegistryService")) {
@Override
protected void createPersistent(String path, String data, boolean faultTolerant) {
if (runnable.get() != null) {
runnable.get().run();
}
super.createPersistent(path, data, faultTolerant);
}
@Override
protected void update(String path, String data, int version) {
if (runnable.get() != null) {
runnable.get().run();
}
super.update(path, data, version);
}
};
curatorClient.delete(path);
when(mockGetDataBuilder.forPath(any())).then(invocationOnMock -> "version x".getBytes());
when(mockCreateBuilder.forPath(any())).then(invocationOnMock -> {
String value;
try {
value = createAnswer.answer(invocationOnMock);
} catch (Exception e) {
throw e;
}
try {
runnable.get().run();
} catch (Exception ignored) {
}
return value;
});
when(mockGetDataBuilder.forPath(any())).then(invocationOnMock -> "version 0".getBytes());
curatorClient.createOrUpdate(path, "version 0", false);
Assertions.assertEquals("version 0", curatorClient.getContent(path));
curatorClient.delete(path);
runnable.set(() -> {
try {
mockCuratorFramework.create().forPath(path, "version x".getBytes(StandardCharsets.UTF_8));
} catch (Exception e) {
throw new RuntimeException(e);
}
});
when(mockGetDataBuilder.forPath(any())).then(invocationOnMock -> "version 1".getBytes());
curatorClient.createOrUpdate(path, "version 1", false);
Assertions.assertEquals("version 1", curatorClient.getContent(path));
runnable.set(() -> {
try {
mockCuratorFramework.setData().forPath(path, "version x".getBytes(StandardCharsets.UTF_8));
} catch (Exception e) {
throw new RuntimeException(e);
}
});
curatorClient.createOrUpdate(path, "version 2", false);
when(mockGetDataBuilder.forPath(any())).then(invocationOnMock -> "version 2".getBytes());
Assertions.assertEquals("version 2", curatorClient.getContent(path));
runnable.set(null);
curatorClient.createOrUpdate(path, "version 3", false);
when(mockGetDataBuilder.forPath(any())).then(invocationOnMock -> "version 3".getBytes());
Assertions.assertEquals("version 3", curatorClient.getContent(path));
curatorClient.close();
}
@Test
void testEphemeralCas1() throws Exception {
// test create failed when others create success
String path = "/dubbo/mapping/org.apache.dubbo.demo.DemoService";
AtomicReference<Runnable> runnable = new AtomicReference<>();
Curator5ZookeeperClient curatorClient =
new Curator5ZookeeperClient(
URL.valueOf(zookeeperConnectionAddress1 + "/org.apache.dubbo.registry.RegistryService")) {
@Override
protected void createEphemeral(String path, String data, boolean faultTolerant) {
if (runnable.get() != null) {
runnable.get().run();
}
super.createPersistent(path, data, faultTolerant);
}
@Override
protected void update(String path, String data, int version) {
if (runnable.get() != null) {
runnable.get().run();
}
super.update(path, data, version);
}
};
curatorClient.delete(path);
when(mockCreateBuilder.forPath(any())).then(invocationOnMock -> {
String value;
try {
value = createAnswer.answer(invocationOnMock);
} catch (Exception e) {
throw e;
}
try {
runnable.get().run();
} catch (Exception ignored) {
}
return value;
});
runnable.set(() -> {
try {
mockCuratorFramework.create().forPath(path, "version x".getBytes(StandardCharsets.UTF_8));
} catch (Exception e) {
throw new RuntimeException(e);
}
});
Assertions.assertThrows(
IllegalStateException.class, () -> curatorClient.createOrUpdate(path, "version 1", true, 0));
when(mockGetDataBuilder.forPath(any())).then(invocationOnMock -> "version x".getBytes());
Assertions.assertEquals("version x", curatorClient.getContent(path));
mockCuratorFramework.setData().forPath(path, "version 1".getBytes(StandardCharsets.UTF_8));
when(mockGetDataBuilder.storingStatIn(any())).thenReturn(new WatchPathable<byte[]>() {
@Override
public byte[] forPath(String s) throws Exception {
return mockGetDataBuilder.forPath(s);
}
@Override
public Pathable<byte[]> watched() {
return null;
}
@Override
public Pathable<byte[]> usingWatcher(Watcher watcher) {
return null;
}
@Override
public Pathable<byte[]> usingWatcher(CuratorWatcher curatorWatcher) {
return null;
}
});
ConfigItem configItem = curatorClient.getConfigItem(path);
runnable.set(() -> {
try {
mockCuratorFramework.setData().forPath(path, "version x".getBytes(StandardCharsets.UTF_8));
} catch (Exception e) {
throw new RuntimeException(e);
}
});
int version1 = ((Stat) configItem.getTicket()).getVersion();
when(mockSetDataBuilder.forPath(any(), any())).thenThrow(new IllegalStateException());
Assertions.assertThrows(
IllegalStateException.class, () -> curatorClient.createOrUpdate(path, "version 2", true, version1));
when(mockGetDataBuilder.forPath(any())).then(invocationOnMock -> "version x".getBytes());
Assertions.assertEquals("version x", curatorClient.getContent(path));
runnable.set(null);
configItem = curatorClient.getConfigItem(path);
int version2 = ((Stat) configItem.getTicket()).getVersion();
doReturn(null).when(mockSetDataBuilder).forPath(any(), any());
curatorClient.createOrUpdate(path, "version 2", true, version2);
when(mockGetDataBuilder.forPath(any())).then(invocationOnMock -> "version 2".getBytes());
Assertions.assertEquals("version 2", curatorClient.getContent(path));
curatorClient.close();
}
@Test
void testEphemeralCas2() throws Exception {
// test update failed when others create success
String path = "/dubbo/mapping/org.apache.dubbo.demo.DemoService";
Curator5ZookeeperClient curatorClient = new Curator5ZookeeperClient(
URL.valueOf(zookeeperConnectionAddress1 + "/org.apache.dubbo.registry.RegistryService"));
curatorClient.delete(path);
curatorClient.createOrUpdate(path, "version x", true);
Assertions.assertThrows(
IllegalStateException.class, () -> curatorClient.createOrUpdate(path, "version 1", true, null));
when(mockGetDataBuilder.forPath(any())).then(invocationOnMock -> "version x".getBytes());
Assertions.assertEquals("version x", curatorClient.getContent(path));
curatorClient.close();
}
@Test
void testEphemeralNonVersion() throws Exception {
String path = "/dubbo/metadata/org.apache.dubbo.demo.DemoService";
AtomicReference<Runnable> runnable = new AtomicReference<>();
Curator5ZookeeperClient curatorClient =
new Curator5ZookeeperClient(
URL.valueOf(zookeeperConnectionAddress1 + "/org.apache.dubbo.registry.RegistryService")) {
@Override
protected void createPersistent(String path, String data, boolean faultTolerant) {
if (runnable.get() != null) {
runnable.get().run();
}
super.createPersistent(path, data, faultTolerant);
}
@Override
protected void update(String path, String data, int version) {
if (runnable.get() != null) {
runnable.get().run();
}
super.update(path, data, version);
}
};
curatorClient.delete(path);
curatorClient.createOrUpdate(path, "version 0", true);
when(mockGetDataBuilder.forPath(any())).then(invocationOnMock -> "version 0".getBytes());
Assertions.assertEquals("version 0", curatorClient.getContent(path));
curatorClient.delete(path);
runnable.set(() -> {
try {
mockCuratorFramework.create().forPath(path, "version x".getBytes(StandardCharsets.UTF_8));
} catch (Exception e) {
throw new RuntimeException(e);
}
});
curatorClient.createOrUpdate(path, "version 1", true);
when(mockGetDataBuilder.forPath(any())).then(invocationOnMock -> "version 1".getBytes());
Assertions.assertEquals("version 1", curatorClient.getContent(path));
runnable.set(() -> {
try {
mockCuratorFramework.setData().forPath(path, "version x".getBytes(StandardCharsets.UTF_8));
} catch (Exception e) {
throw new RuntimeException(e);
}
});
curatorClient.createOrUpdate(path, "version 2", true);
when(mockGetDataBuilder.forPath(any())).then(invocationOnMock -> "version 2".getBytes());
Assertions.assertEquals("version 2", curatorClient.getContent(path));
runnable.set(null);
curatorClient.createOrUpdate(path, "version 3", true);
when(mockGetDataBuilder.forPath(any())).then(invocationOnMock -> "version 3".getBytes());
Assertions.assertEquals("version 3", curatorClient.getContent(path));
curatorClient.close();
}
@AfterAll
public static void testWithStoppedServer() {
curatorFrameworkFactoryMockedStatic.close();
curatorClient.close();
}
}
| Curator5ZookeeperClientTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java | {
"start": 9962,
"end": 39908
} | class ____ implements SettingUpdater<Settings> {
final Predicate<String> loggerPredicate = Loggers.LOG_LEVEL_SETTING::match;
private final Settings settings;
LoggingSettingUpdater(Settings settings) {
this.settings = settings;
}
@Override
public boolean hasChanged(Settings current, Settings previous) {
return current.filter(loggerPredicate).equals(previous.filter(loggerPredicate)) == false;
}
@Override
public Settings getValue(Settings current, Settings previous) {
Settings.Builder builder = Settings.builder();
builder.put(current.filter(loggerPredicate));
for (String key : previous.keySet()) {
if (loggerPredicate.test(key) && builder.keys().contains(key) == false) {
if (Loggers.LOG_LEVEL_SETTING.getConcreteSetting(key).exists(settings) == false) {
builder.putNull(key);
} else {
builder.put(key, Loggers.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings).toString());
}
}
}
return builder.build();
}
@Override
public void apply(Settings value, Settings current, Settings previous) {
for (String key : value.keySet()) {
assert loggerPredicate.test(key);
String component = key.substring("logger.".length());
if ("level".equals(component)) {
continue;
}
if ("_root".equals(component)) {
final String rootLevel = value.get(key);
if (rootLevel == null) {
Loggers.setLevel(LogManager.getRootLogger(), Loggers.LOG_DEFAULT_LEVEL_SETTING.get(settings));
} else {
Loggers.setLevel(LogManager.getRootLogger(), rootLevel);
}
} else {
Loggers.setLevel(LogManager.getLogger(component), value.get(key));
}
}
}
}
public static final Set<Setting<?>> BUILT_IN_CLUSTER_SETTINGS = Set.of(
AllocationBalancingRoundSummaryService.ENABLE_BALANCER_ROUND_SUMMARIES_SETTING,
AllocationBalancingRoundSummaryService.BALANCER_ROUND_SUMMARIES_LOG_INTERVAL_SETTING,
AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING,
AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING,
BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING,
BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING,
BalancedShardsAllocator.WRITE_LOAD_BALANCE_FACTOR_SETTING,
BalancedShardsAllocator.DISK_USAGE_BALANCE_FACTOR_SETTING,
BalancedShardsAllocator.THRESHOLD_SETTING,
DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_DECREASE_SHARDS_COOLDOWN,
DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_INCREASE_SHARDS_COOLDOWN,
DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING,
DataStreamAutoShardingService.CLUSTER_AUTO_SHARDING_MAX_WRITE_THREADS,
DataStreamAutoShardingService.CLUSTER_AUTO_SHARDING_MIN_WRITE_THREADS,
DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_INCREASE_SHARDS_LOAD_METRIC,
DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_DECREASE_SHARDS_LOAD_METRIC,
DesiredBalanceComputer.PROGRESS_LOG_INTERVAL_SETTING,
DesiredBalanceComputer.MAX_BALANCE_COMPUTATION_TIME_DURING_INDEX_CREATION_SETTING,
DesiredBalanceReconciler.UNDESIRED_ALLOCATIONS_LOG_INTERVAL_SETTING,
DesiredBalanceReconciler.UNDESIRED_ALLOCATIONS_LOG_THRESHOLD_SETTING,
UndesiredAllocationsTracker.UNDESIRED_ALLOCATION_DURATION_LOG_THRESHOLD_SETTING,
UndesiredAllocationsTracker.UNDESIRED_ALLOCATION_DURATION_LOG_INTERVAL_SETTING,
UndesiredAllocationsTracker.MAX_UNDESIRED_ALLOCATIONS_TO_TRACK,
BreakerSettings.CIRCUIT_BREAKER_LIMIT_SETTING,
BreakerSettings.CIRCUIT_BREAKER_OVERHEAD_SETTING,
BreakerSettings.CIRCUIT_BREAKER_TYPE,
ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING,
ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING,
ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_FROZEN_REBALANCE_SETTING,
EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING,
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING,
FsRepository.REPOSITORIES_CHUNK_SIZE_SETTING,
FsRepository.REPOSITORIES_LOCATION_SETTING,
IndicesQueryCache.INDICES_CACHE_QUERY_SIZE_SETTING,
IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING,
IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING,
IndicesService.INDICES_ID_FIELD_DATA_ENABLED_SETTING,
IndicesService.WRITE_DANGLING_INDICES_INFO_SETTING,
MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING,
MappingUpdatedAction.INDICES_MAX_IN_FLIGHT_UPDATES_SETTING,
Metadata.SETTING_READ_ONLY_SETTING,
Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING,
ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE,
IncrementalBulkService.INCREMENTAL_BULK,
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING,
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING,
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING,
RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING,
RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING,
RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING,
RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_RETRY_TIMEOUT_SETTING,
RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING,
RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING,
RecoverySettings.INDICES_RECOVERY_USE_SNAPSHOTS_SETTING,
RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS,
RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS_PER_NODE,
RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE,
RecoverySettings.NODE_BANDWIDTH_RECOVERY_FACTOR_READ_SETTING,
RecoverySettings.NODE_BANDWIDTH_RECOVERY_FACTOR_WRITE_SETTING,
RecoverySettings.NODE_BANDWIDTH_RECOVERY_OPERATOR_FACTOR_SETTING,
RecoverySettings.NODE_BANDWIDTH_RECOVERY_OPERATOR_FACTOR_READ_SETTING,
RecoverySettings.NODE_BANDWIDTH_RECOVERY_OPERATOR_FACTOR_WRITE_SETTING,
RecoverySettings.NODE_BANDWIDTH_RECOVERY_OPERATOR_FACTOR_MAX_OVERCOMMIT_SETTING,
RecoverySettings.NODE_BANDWIDTH_RECOVERY_DISK_WRITE_SETTING,
RecoverySettings.NODE_BANDWIDTH_RECOVERY_DISK_READ_SETTING,
RecoverySettings.NODE_BANDWIDTH_RECOVERY_NETWORK_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING,
DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING,
DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_MAX_HEADROOM_SETTING,
DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING,
DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_MAX_HEADROOM_SETTING,
DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING,
DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_MAX_HEADROOM_SETTING,
DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_FROZEN_WATERMARK_SETTING,
DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_FROZEN_MAX_HEADROOM_SETTING,
DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING,
DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING,
InternalClusterInfoService.CLUSTER_ROUTING_ALLOCATION_ESTIMATED_HEAP_THRESHOLD_DECIDER_ENABLED,
SameShardAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING,
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING,
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING,
InternalSnapshotsInfoService.INTERNAL_SNAPSHOT_INFO_MAX_CONCURRENT_FETCHES_SETTING,
DestructiveOperations.REQUIRES_NAME_SETTING,
NoMasterBlockService.NO_MASTER_BLOCK_SETTING,
GatewayService.EXPECTED_DATA_NODES_SETTING,
GatewayService.RECOVER_AFTER_DATA_NODES_SETTING,
GatewayService.RECOVER_AFTER_TIME_SETTING,
PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD,
PersistedClusterStateService.DOCUMENT_PAGE_SIZE,
NetworkModule.HTTP_DEFAULT_TYPE_SETTING,
NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING,
NetworkModule.HTTP_TYPE_SETTING,
NetworkModule.TRANSPORT_TYPE_SETTING,
HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS,
HttpTransportSettings.SETTING_CORS_ENABLED,
HttpTransportSettings.SETTING_CORS_MAX_AGE,
HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN,
HttpTransportSettings.SETTING_HTTP_HOST,
HttpTransportSettings.SETTING_HTTP_PUBLISH_HOST,
HttpTransportSettings.SETTING_HTTP_BIND_HOST,
HttpTransportSettings.SETTING_HTTP_PORT,
HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT,
HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS,
HttpTransportSettings.SETTING_HTTP_COMPRESSION,
HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL,
HttpTransportSettings.SETTING_CORS_ALLOW_METHODS,
HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS,
HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH,
HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE,
HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE,
HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_COUNT,
HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE,
HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH,
HttpTransportSettings.SETTING_HTTP_SERVER_SHUTDOWN_GRACE_PERIOD,
HttpTransportSettings.SETTING_HTTP_SERVER_SHUTDOWN_POLL_PERIOD,
HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT,
HttpTransportSettings.SETTING_HTTP_RESET_COOKIES,
HttpTransportSettings.SETTING_HTTP_TCP_NO_DELAY,
HttpTransportSettings.SETTING_HTTP_TCP_KEEP_ALIVE,
HttpTransportSettings.SETTING_HTTP_TCP_KEEP_IDLE,
HttpTransportSettings.SETTING_HTTP_TCP_KEEP_INTERVAL,
HttpTransportSettings.SETTING_HTTP_TCP_KEEP_COUNT,
HttpTransportSettings.SETTING_HTTP_TCP_REUSE_ADDRESS,
HttpTransportSettings.SETTING_HTTP_TCP_SEND_BUFFER_SIZE,
HttpTransportSettings.SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE,
HttpTransportSettings.SETTING_HTTP_TRACE_LOG_INCLUDE,
HttpTransportSettings.SETTING_HTTP_TRACE_LOG_EXCLUDE,
HttpTransportSettings.SETTING_HTTP_CLIENT_STATS_ENABLED,
HttpTransportSettings.SETTING_HTTP_CLIENT_STATS_MAX_CLOSED_CHANNEL_AGE,
HttpTransportSettings.SETTING_HTTP_CLIENT_STATS_MAX_CLOSED_CHANNEL_COUNT,
HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING,
HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING,
HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_OVERHEAD_SETTING,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING,
IndexModule.NODE_STORE_ALLOW_MMAP,
IndexSettings.NODE_DEFAULT_REFRESH_INTERVAL_SETTING,
ClusterApplierService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING,
ClusterApplierService.CLUSTER_SERVICE_SLOW_TASK_THREAD_DUMP_TIMEOUT_SETTING,
ClusterApplierService.CLUSTER_APPLIER_THREAD_WATCHDOG_INTERVAL,
ClusterApplierService.CLUSTER_APPLIER_THREAD_WATCHDOG_QUIET_TIME,
ClusterService.USER_DEFINED_METADATA,
MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING,
MasterService.MASTER_SERVICE_STARVATION_LOGGING_THRESHOLD_SETTING,
SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING,
SearchService.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS,
TransportSearchAction.SHARD_COUNT_LIMIT_SETTING,
TransportSearchAction.DEFAULT_PRE_FILTER_SHARD_SIZE,
RemoteClusterSettings.REMOTE_CLUSTER_SKIP_UNAVAILABLE,
RemoteClusterSettings.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING,
RemoteClusterSettings.REMOTE_NODE_ATTRIBUTE,
RemoteClusterSettings.REMOTE_CLUSTER_PING_SCHEDULE,
RemoteClusterSettings.REMOTE_CLUSTER_COMPRESS,
RemoteClusterSettings.REMOTE_CLUSTER_COMPRESSION_SCHEME,
RemoteClusterSettings.REMOTE_CONNECTION_MODE,
ProxyConnectionStrategySettings.PROXY_ADDRESS,
ProxyConnectionStrategySettings.REMOTE_SOCKET_CONNECTIONS,
ProxyConnectionStrategySettings.SERVER_NAME,
SniffConnectionStrategySettings.REMOTE_CLUSTERS_PROXY,
SniffConnectionStrategySettings.REMOTE_CLUSTER_SEEDS,
SniffConnectionStrategySettings.REMOTE_CONNECTIONS_PER_CLUSTER,
SniffConnectionStrategySettings.REMOTE_NODE_CONNECTIONS,
TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING,
ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING,
SnapshotShutdownProgressTracker.SNAPSHOT_PROGRESS_DURING_SHUTDOWN_LOG_INTERVAL_SETTING,
NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING,
TransportReplicationAction.REPLICATION_INITIAL_RETRY_BACKOFF_BOUND,
TransportReplicationAction.REPLICATION_RETRY_TIMEOUT,
TransportSettings.HOST,
TransportSettings.PUBLISH_HOST,
TransportSettings.PUBLISH_HOST_PROFILE,
TransportSettings.BIND_HOST,
TransportSettings.BIND_HOST_PROFILE,
TransportSettings.PORT,
TransportSettings.PORT_PROFILE,
TransportSettings.PUBLISH_PORT,
TransportSettings.PUBLISH_PORT_PROFILE,
TransportSettings.TRANSPORT_COMPRESS,
TransportSettings.TRANSPORT_COMPRESSION_SCHEME,
TransportSettings.PING_SCHEDULE,
TransportSettings.CONNECT_TIMEOUT,
TransportSettings.DEFAULT_FEATURES_SETTING,
TransportSettings.TCP_NO_DELAY,
TransportSettings.TCP_NO_DELAY_PROFILE,
TransportSettings.TCP_KEEP_ALIVE,
TransportSettings.TCP_KEEP_ALIVE_PROFILE,
TransportSettings.TCP_KEEP_IDLE,
TransportSettings.TCP_KEEP_IDLE_PROFILE,
TransportSettings.TCP_KEEP_INTERVAL,
TransportSettings.TCP_KEEP_INTERVAL_PROFILE,
TransportSettings.TCP_KEEP_COUNT,
TransportSettings.TCP_KEEP_COUNT_PROFILE,
TransportSettings.TCP_REUSE_ADDRESS,
TransportSettings.TCP_REUSE_ADDRESS_PROFILE,
TransportSettings.TCP_SEND_BUFFER_SIZE,
TransportSettings.TCP_SEND_BUFFER_SIZE_PROFILE,
TransportSettings.TCP_RECEIVE_BUFFER_SIZE,
TransportSettings.TCP_RECEIVE_BUFFER_SIZE_PROFILE,
TransportSettings.CONNECTIONS_PER_NODE_RECOVERY,
TransportSettings.CONNECTIONS_PER_NODE_BULK,
TransportSettings.CONNECTIONS_PER_NODE_REG,
TransportSettings.CONNECTIONS_PER_NODE_STATE,
TransportSettings.CONNECTIONS_PER_NODE_PING,
TransportSettings.TRACE_LOG_EXCLUDE_SETTING,
TransportSettings.TRACE_LOG_INCLUDE_SETTING,
TransportSettings.SLOW_OPERATION_THRESHOLD_SETTING,
TransportSettings.RST_ON_CLOSE,
NetworkService.NETWORK_SERVER,
NetworkService.GLOBAL_NETWORK_HOST_SETTING,
NetworkService.GLOBAL_NETWORK_BIND_HOST_SETTING,
NetworkService.GLOBAL_NETWORK_PUBLISH_HOST_SETTING,
NetworkService.TCP_NO_DELAY,
NetworkService.TCP_KEEP_ALIVE,
NetworkService.TCP_KEEP_IDLE,
NetworkService.TCP_KEEP_INTERVAL,
NetworkService.TCP_KEEP_COUNT,
NetworkService.TCP_REUSE_ADDRESS,
NetworkService.TCP_SEND_BUFFER_SIZE,
NetworkService.TCP_RECEIVE_BUFFER_SIZE,
ThreadWatchdog.NETWORK_THREAD_WATCHDOG_INTERVAL,
ThreadWatchdog.NETWORK_THREAD_WATCHDOG_QUIET_TIME,
IndexSettings.QUERY_STRING_ANALYZE_WILDCARD,
IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD,
ScriptService.SCRIPT_CACHE_SIZE_SETTING,
ScriptService.SCRIPT_CACHE_EXPIRE_SETTING,
ScriptService.SCRIPT_DISABLE_MAX_COMPILATIONS_RATE_SETTING,
ScriptService.SCRIPT_GENERAL_CACHE_EXPIRE_SETTING,
ScriptService.SCRIPT_GENERAL_CACHE_SIZE_SETTING,
ScriptService.SCRIPT_GENERAL_MAX_COMPILATIONS_RATE_SETTING,
ScriptService.SCRIPT_MAX_COMPILATIONS_RATE_SETTING,
ScriptService.SCRIPT_MAX_SIZE_IN_BYTES,
ScriptService.TYPES_ALLOWED_SETTING,
ScriptService.CONTEXTS_ALLOWED_SETTING,
IndicesService.INDICES_CACHE_CLEAN_INTERVAL_SETTING,
IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY,
IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_EXPIRE,
IndicesRequestCache.INDICES_CACHE_QUERY_SIZE,
IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE,
HunspellService.HUNSPELL_LAZY_LOAD,
HunspellService.HUNSPELL_IGNORE_CASE,
HunspellService.HUNSPELL_DICTIONARY_OPTIONS,
IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT,
Environment.PATH_DATA_SETTING,
Environment.PATH_HOME_SETTING,
Environment.PATH_LOGS_SETTING,
Environment.PATH_REPO_SETTING,
Environment.PATH_SHARED_DATA_SETTING,
NodeEnvironment.NODE_ID_SEED_SETTING,
Node.INITIAL_STATE_TIMEOUT_SETTING,
ShutdownPrepareService.MAXIMUM_SHUTDOWN_TIMEOUT_SETTING,
ShutdownPrepareService.MAXIMUM_REINDEXING_TIMEOUT_SETTING,
DiscoveryModule.DISCOVERY_TYPE_SETTING,
DiscoveryModule.DISCOVERY_SEED_PROVIDERS_SETTING,
DiscoveryModule.ELECTION_STRATEGY_SETTING,
SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING,
SeedHostsResolver.DISCOVERY_SEED_RESOLVER_MAX_CONCURRENT_RESOLVERS_SETTING,
SeedHostsResolver.DISCOVERY_SEED_RESOLVER_TIMEOUT_SETTING,
SearchService.DEFAULT_KEEPALIVE_SETTING,
SearchService.KEEPALIVE_INTERVAL_SETTING,
SearchService.MAX_KEEPALIVE_SETTING,
SearchService.ALLOW_EXPENSIVE_QUERIES,
SearchService.CCS_VERSION_CHECK_SETTING,
SearchService.CCS_COLLECT_TELEMETRY,
SearchService.BATCHED_QUERY_PHASE,
SearchService.PREWARMING_THRESHOLD_THREADPOOL_SIZE_FACTOR_POOL_SIZE,
MultiBucketConsumerService.MAX_BUCKET_SETTING,
SearchService.LOW_LEVEL_CANCELLATION_SETTING,
SearchService.MAX_OPEN_SCROLL_CONTEXT,
SearchService.ENABLE_REWRITE_AGGS_TO_FILTER_BY_FILTER,
SearchService.MAX_ASYNC_SEARCH_RESPONSE_SIZE_SETTING,
Node.WRITE_PORTS_FILE_SETTING,
Node.NODE_EXTERNAL_ID_SETTING,
Node.NODE_NAME_SETTING,
Node.NODE_ATTRIBUTES,
NodeRoleSettings.NODE_ROLES_SETTING,
AutoCreateIndex.AUTO_CREATE_INDEX_SETTING,
BaseRestHandler.MULTI_ALLOW_EXPLICIT_INDEX,
ClusterName.CLUSTER_NAME_SETTING,
ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING,
EsExecutors.NODE_PROCESSORS_SETTING,
ThreadContext.DEFAULT_HEADERS_SETTING,
Loggers.LOG_DEFAULT_LEVEL_SETTING,
Loggers.LOG_LEVEL_SETTING,
NodeEnvironment.ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING,
OsService.REFRESH_INTERVAL_SETTING,
ProcessService.REFRESH_INTERVAL_SETTING,
JvmService.REFRESH_INTERVAL_SETTING,
FsService.REFRESH_INTERVAL_SETTING,
JvmGcMonitorService.ENABLED_SETTING,
JvmGcMonitorService.REFRESH_INTERVAL_SETTING,
JvmGcMonitorService.GC_SETTING,
JvmGcMonitorService.GC_OVERHEAD_WARN_SETTING,
JvmGcMonitorService.GC_OVERHEAD_INFO_SETTING,
JvmGcMonitorService.GC_OVERHEAD_DEBUG_SETTING,
PageCacheRecycler.LIMIT_HEAP_SETTING,
PageCacheRecycler.WEIGHT_BYTES_SETTING,
PageCacheRecycler.WEIGHT_INT_SETTING,
PageCacheRecycler.WEIGHT_LONG_SETTING,
PageCacheRecycler.WEIGHT_OBJECTS_SETTING,
PageCacheRecycler.TYPE_SETTING,
PluginsService.MANDATORY_SETTING,
BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING,
BootstrapSettings.MEMORY_LOCK_SETTING,
BootstrapSettings.CTRLHANDLER_SETTING,
KeyStoreWrapper.SEED_SETTING,
IndexingMemoryController.INDEX_BUFFER_SIZE_SETTING,
IndexingMemoryController.MIN_INDEX_BUFFER_SIZE_SETTING,
IndexingMemoryController.MAX_INDEX_BUFFER_SIZE_SETTING,
IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING,
IndexingMemoryController.SHARD_MEMORY_INTERVAL_TIME_SETTING,
IndexingMemoryController.PAUSE_INDEXING_ON_THROTTLE,
ResourceWatcherService.ENABLED,
ResourceWatcherService.RELOAD_INTERVAL_HIGH,
ResourceWatcherService.RELOAD_INTERVAL_MEDIUM,
ResourceWatcherService.RELOAD_INTERVAL_LOW,
SearchModule.INDICES_MAX_CLAUSE_COUNT_SETTING,
SearchModule.INDICES_MAX_NESTED_DEPTH_SETTING,
SearchModule.SCRIPTED_METRICS_AGG_ONLY_ALLOWED_SCRIPTS,
SearchModule.SCRIPTED_METRICS_AGG_ALLOWED_INLINE_SCRIPTS,
SearchModule.SCRIPTED_METRICS_AGG_ALLOWED_STORED_SCRIPTS,
SearchService.SEARCH_WORKER_THREADS_ENABLED,
SearchService.QUERY_PHASE_PARALLEL_COLLECTION_ENABLED,
SearchService.MEMORY_ACCOUNTING_BUFFER_SIZE,
ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING,
ThreadPool.LATE_TIME_INTERVAL_WARN_THRESHOLD_SETTING,
ThreadPool.SLOW_SCHEDULER_TASK_WARN_THRESHOLD_SETTING,
ThreadPool.WRITE_THREAD_POOLS_EWMA_ALPHA_SETTING,
FastVectorHighlighter.SETTING_TV_HIGHLIGHT_MULTI_VALUE,
Node.BREAKER_TYPE_KEY,
OperationRouting.USE_ADAPTIVE_REPLICA_SELECTION_SETTING,
IndexGraveyard.SETTING_MAX_TOMBSTONES,
PersistentTasksClusterService.CLUSTER_TASKS_ALLOCATION_RECHECK_INTERVAL_SETTING,
EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING,
PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_SETTING,
PeerFinder.DISCOVERY_REQUEST_PEERS_TIMEOUT_SETTING,
ClusterFormationFailureHelper.DISCOVERY_CLUSTER_FORMATION_WARNING_TIMEOUT_SETTING,
ElectionSchedulerFactory.ELECTION_INITIAL_TIMEOUT_SETTING,
ElectionSchedulerFactory.ELECTION_BACK_OFF_TIME_SETTING,
ElectionSchedulerFactory.ELECTION_MAX_TIMEOUT_SETTING,
ElectionSchedulerFactory.ELECTION_DURATION_SETTING,
Coordinator.PUBLISH_TIMEOUT_SETTING,
Coordinator.PUBLISH_INFO_TIMEOUT_SETTING,
Coordinator.SINGLE_NODE_CLUSTER_SEED_HOSTS_CHECK_INTERVAL_SETTING,
JoinValidationService.JOIN_VALIDATION_CACHE_TIMEOUT_SETTING,
FollowersChecker.FOLLOWER_CHECK_TIMEOUT_SETTING,
FollowersChecker.FOLLOWER_CHECK_INTERVAL_SETTING,
FollowersChecker.FOLLOWER_CHECK_RETRY_COUNT_SETTING,
LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING,
LeaderChecker.LEADER_CHECK_INTERVAL_SETTING,
LeaderChecker.LEADER_CHECK_RETRY_COUNT_SETTING,
Reconfigurator.CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION,
TransportAddVotingConfigExclusionsAction.MAXIMUM_VOTING_CONFIG_EXCLUSIONS_SETTING,
ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING,
ClusterBootstrapService.UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING,
LagDetector.CLUSTER_FOLLOWER_LAG_TIMEOUT_SETTING,
HandshakingTransportAddressConnector.PROBE_CONNECT_TIMEOUT_SETTING,
HandshakingTransportAddressConnector.PROBE_HANDSHAKE_TIMEOUT_SETTING,
SnapshotsService.MAX_CONCURRENT_SNAPSHOT_OPERATIONS_SETTING,
RestoreService.REFRESH_REPO_UUID_ON_RESTORE_SETTING,
FsHealthService.ENABLED_SETTING,
FsHealthService.REFRESH_INTERVAL_SETTING,
FsHealthService.SLOW_PATH_LOGGING_THRESHOLD_SETTING,
IndexingPressure.MAX_INDEXING_BYTES,
IndexingPressure.MAX_COORDINATING_BYTES,
IndexingPressure.MAX_OPERATION_SIZE,
IndexingPressure.MAX_PRIMARY_BYTES,
IndexingPressure.MAX_REPLICA_BYTES,
IndexingPressure.SPLIT_BULK_THRESHOLD,
IndexingPressure.SPLIT_BULK_HIGH_WATERMARK,
IndexingPressure.SPLIT_BULK_HIGH_WATERMARK_SIZE,
IndexingPressure.SPLIT_BULK_LOW_WATERMARK,
IndexingPressure.SPLIT_BULK_LOW_WATERMARK_SIZE,
ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE_FROZEN,
DataTier.ENFORCE_DEFAULT_TIER_PREFERENCE_SETTING,
CoordinationDiagnosticsService.IDENTITY_CHANGES_THRESHOLD_SETTING,
CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING,
CoordinationDiagnosticsService.NODE_HAS_MASTER_LOOKUP_TIMEFRAME_SETTING,
MasterHistory.MAX_HISTORY_AGE_SETTING,
ReadinessService.PORT,
HealthNodeTaskExecutor.ENABLED_SETTING,
LocalHealthMonitor.POLL_INTERVAL_SETTING,
TransportHealthNodeAction.HEALTH_NODE_TRANSPORT_ACTION_TIMEOUT,
SimulatePipelineTransportAction.INGEST_NODE_TRANSPORT_ACTION_TIMEOUT,
WriteAckDelay.WRITE_ACK_DELAY_INTERVAL,
WriteAckDelay.WRITE_ACK_DELAY_RANDOMNESS_BOUND,
RemoteClusterSettings.REMOTE_CLUSTER_CREDENTIALS,
RemoteClusterPortSettings.REMOTE_CLUSTER_SERVER_ENABLED,
RemoteClusterPortSettings.HOST,
RemoteClusterPortSettings.PUBLISH_HOST,
RemoteClusterPortSettings.BIND_HOST,
RemoteClusterPortSettings.PORT,
RemoteClusterPortSettings.PUBLISH_PORT,
RemoteClusterPortSettings.TCP_KEEP_ALIVE,
RemoteClusterPortSettings.TCP_KEEP_IDLE,
RemoteClusterPortSettings.TCP_KEEP_INTERVAL,
RemoteClusterPortSettings.TCP_KEEP_COUNT,
RemoteClusterPortSettings.TCP_NO_DELAY,
RemoteClusterPortSettings.TCP_REUSE_ADDRESS,
RemoteClusterPortSettings.TCP_SEND_BUFFER_SIZE,
RemoteClusterPortSettings.MAX_REQUEST_HEADER_SIZE,
HealthPeriodicLogger.POLL_INTERVAL_SETTING,
HealthPeriodicLogger.ENABLED_SETTING,
HealthPeriodicLogger.OUTPUT_MODE_SETTING,
DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING,
IndicesClusterStateService.SHARD_LOCK_RETRY_INTERVAL_SETTING,
IndicesClusterStateService.SHARD_LOCK_RETRY_TIMEOUT_SETTING,
IndicesClusterStateService.CONCURRENT_SHARD_CLOSE_LIMIT,
IngestSettings.GROK_WATCHDOG_INTERVAL,
IngestSettings.GROK_WATCHDOG_MAX_EXECUTION_TIME,
TDigestExecutionHint.SETTING,
MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT_SETTING,
MergePolicyConfig.DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT_SETTING,
ThreadPoolMergeScheduler.USE_THREAD_POOL_MERGE_SCHEDULER_SETTING,
ThreadPoolMergeExecutorService.INDICES_MERGE_DISK_HIGH_WATERMARK_SETTING,
ThreadPoolMergeExecutorService.INDICES_MERGE_DISK_HIGH_MAX_HEADROOM_SETTING,
ThreadPoolMergeExecutorService.INDICES_MERGE_DISK_CHECK_INTERVAL_SETTING,
TransportService.ENABLE_STACK_OVERFLOW_AVOIDANCE,
DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING,
DataStreamGlobalRetentionSettings.DATA_STREAMS_MAX_RETENTION_SETTING,
DataStreamGlobalRetentionSettings.FAILURE_STORE_DEFAULT_RETENTION_SETTING,
ShardsAvailabilityHealthIndicatorService.REPLICA_UNASSIGNED_BUFFER_TIME,
DataStreamFailureStoreSettings.DATA_STREAM_FAILURE_STORED_ENABLED_SETTING,
IndexingStatsSettings.RECENT_WRITE_LOAD_HALF_LIFE_SETTING,
SearchStatsSettings.RECENT_READ_LOAD_HALF_LIFE_SETTING,
TransportGetAllocationStatsAction.CACHE_TTL_SETTING,
WriteLoadConstraintSettings.WRITE_LOAD_DECIDER_ENABLED_SETTING,
WriteLoadConstraintSettings.WRITE_LOAD_DECIDER_HIGH_UTILIZATION_THRESHOLD_SETTING,
WriteLoadConstraintSettings.WRITE_LOAD_DECIDER_HIGH_UTILIZATION_DURATION_SETTING,
WriteLoadConstraintSettings.WRITE_LOAD_DECIDER_QUEUE_LATENCY_THRESHOLD_SETTING,
WriteLoadConstraintSettings.WRITE_LOAD_DECIDER_REROUTE_INTERVAL_SETTING,
IndexBalanceConstraintSettings.INDEX_BALANCE_DECIDER_ENABLED_SETTING,
IndexBalanceConstraintSettings.INDEX_BALANCE_DECIDER_EXCESS_SHARDS,
WriteLoadConstraintSettings.WRITE_LOAD_DECIDER_MINIMUM_LOGGING_INTERVAL,
SamplingService.TTL_POLL_INTERVAL_SETTING,
BlobStoreRepository.MAX_HEAP_SIZE_FOR_SNAPSHOT_DELETION_SETTING,
ShardsCapacityHealthIndicatorService.SETTING_SHARD_CAPACITY_UNHEALTHY_THRESHOLD_YELLOW,
ShardsCapacityHealthIndicatorService.SETTING_SHARD_CAPACITY_UNHEALTHY_THRESHOLD_RED
);
}
| LoggingSettingUpdater |
java | apache__kafka | group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupConfigTest.java | {
"start": 1431,
"end": 14972
} | class ____ {
private static final int OFFSET_METADATA_MAX_SIZE = 4096;
private static final long OFFSETS_RETENTION_CHECK_INTERVAL_MS = 1000L;
private static final int OFFSETS_RETENTION_MINUTES = 24 * 60;
private static final boolean SHARE_GROUP_ENABLE = true;
private static final int SHARE_GROUP_PARTITION_MAX_RECORD_LOCKS = 200;
private static final int SHARE_GROUP_DELIVERY_COUNT_LIMIT = 5;
private static final int SHARE_GROUP_RECORD_LOCK_DURATION_MS = 30000;
private static final int SHARE_GROUP_MIN_RECORD_LOCK_DURATION_MS = 15000;
private static final int SHARE_GROUP_MAX_RECORD_LOCK_DURATION_MS = 60000;
@Test
public void testFromPropsInvalid() {
GroupConfig.configNames().forEach(name -> {
if (GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG.equals(name)) {
assertPropertyInvalid(name, "not_a_number", "-0.1", "1.2");
} else if (GroupConfig.CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG.equals(name)) {
assertPropertyInvalid(name, "not_a_number", "-0.1", "1.2");
} else if (GroupConfig.SHARE_SESSION_TIMEOUT_MS_CONFIG.equals(name)) {
assertPropertyInvalid(name, "not_a_number", "-0.1", "1.2");
} else if (GroupConfig.SHARE_HEARTBEAT_INTERVAL_MS_CONFIG.equals(name)) {
assertPropertyInvalid(name, "not_a_number", "-0.1", "1.2");
} else if (GroupConfig.SHARE_RECORD_LOCK_DURATION_MS_CONFIG.equals(name)) {
assertPropertyInvalid(name, "not_a_number", "-0.1", "1.2");
} else if (GroupConfig.SHARE_AUTO_OFFSET_RESET_CONFIG.equals(name)) {
assertPropertyInvalid(name, "hello", "1.0");
} else if (GroupConfig.SHARE_ISOLATION_LEVEL_CONFIG.equals(name)) {
assertPropertyInvalid(name, "hello", "1.0");
} else if (GroupConfig.STREAMS_HEARTBEAT_INTERVAL_MS_CONFIG.equals(name)) {
assertPropertyInvalid(name, "not_a_number", "1.0");
} else if (GroupConfig.STREAMS_NUM_STANDBY_REPLICAS_CONFIG.equals(name)) {
assertPropertyInvalid(name, "not_a_number", "1.0");
} else if (GroupConfig.STREAMS_SESSION_TIMEOUT_MS_CONFIG.equals(name)) {
assertPropertyInvalid(name, "not_a_number", "1.0");
} else if (GroupConfig.STREAMS_INITIAL_REBALANCE_DELAY_MS_CONFIG.equals(name)) {
assertPropertyInvalid(name, "not_a_number", "-1", "1.0");
} else {
assertPropertyInvalid(name, "not_a_number", "-0.1");
}
});
}
private void assertPropertyInvalid(String name, Object... values) {
for (Object value : values) {
Properties props = new Properties();
props.setProperty(name, value.toString());
assertThrows(Exception.class, () -> new GroupConfig(props));
}
}
@Test
public void testValidShareAutoOffsetResetValues() {
Properties props = createValidGroupConfig();
// Check for value "latest"
props.put(GroupConfig.SHARE_AUTO_OFFSET_RESET_CONFIG, "latest");
doTestValidProps(props);
props = createValidGroupConfig();
// Check for value "earliest"
props.put(GroupConfig.SHARE_AUTO_OFFSET_RESET_CONFIG, "earliest");
doTestValidProps(props);
// Check for value "by_duration"
props.put(GroupConfig.SHARE_AUTO_OFFSET_RESET_CONFIG, "by_duration:PT10S");
doTestValidProps(props);
}
@Test
public void testValidShareIsolationLevelValues() {
// Check for value READ_UNCOMMITTED
Properties props = createValidGroupConfig();
props.put(GroupConfig.SHARE_ISOLATION_LEVEL_CONFIG, "read_committed");
doTestValidProps(props);
// Check for value READ_COMMITTED
props = createValidGroupConfig();
props.put(GroupConfig.SHARE_ISOLATION_LEVEL_CONFIG, "read_uncommitted");
doTestValidProps(props);
}
@Test
public void testInvalidProps() {
Properties props = createValidGroupConfig();
// Check for invalid consumerSessionTimeoutMs, < MIN
props.put(GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG, "1");
doTestInvalidProps(props, InvalidConfigurationException.class);
props = createValidGroupConfig();
// Check for invalid consumerSessionTimeoutMs, > MAX
props.put(GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG, "70000");
doTestInvalidProps(props, InvalidConfigurationException.class);
props = createValidGroupConfig();
// Check for invalid consumerHeartbeatIntervalMs, < MIN
props.put(GroupConfig.CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG, "1");
doTestInvalidProps(props, InvalidConfigurationException.class);
props = createValidGroupConfig();
// Check for invalid consumerHeartbeatIntervalMs, > MAX
props.put(GroupConfig.CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG, "70000");
doTestInvalidProps(props, InvalidConfigurationException.class);
props = createValidGroupConfig();
// Check for invalid shareSessionTimeoutMs, < MIN
props.put(GroupConfig.SHARE_SESSION_TIMEOUT_MS_CONFIG, "1");
doTestInvalidProps(props, InvalidConfigurationException.class);
props = createValidGroupConfig();
// Check for invalid shareSessionTimeoutMs, > MAX
props.put(GroupConfig.SHARE_SESSION_TIMEOUT_MS_CONFIG, "70000");
doTestInvalidProps(props, InvalidConfigurationException.class);
props = createValidGroupConfig();
// Check for invalid shareHeartbeatIntervalMs, < MIN
props.put(GroupConfig.SHARE_HEARTBEAT_INTERVAL_MS_CONFIG, "1");
doTestInvalidProps(props, InvalidConfigurationException.class);
props = createValidGroupConfig();
// Check for invalid shareHeartbeatIntervalMs, > MAX
props.put(GroupConfig.SHARE_HEARTBEAT_INTERVAL_MS_CONFIG, "70000");
doTestInvalidProps(props, InvalidConfigurationException.class);
props = createValidGroupConfig();
// Check for invalid shareRecordLockDurationMs, < MIN
props.put(GroupConfig.SHARE_RECORD_LOCK_DURATION_MS_CONFIG, "10000");
doTestInvalidProps(props, InvalidConfigurationException.class);
props = createValidGroupConfig();
// Check for invalid shareRecordLockDurationMs, > MAX
props.put(GroupConfig.SHARE_RECORD_LOCK_DURATION_MS_CONFIG, "70000");
doTestInvalidProps(props, InvalidConfigurationException.class);
props = createValidGroupConfig();
// Check for invalid shareAutoOffsetReset
props.put(GroupConfig.SHARE_AUTO_OFFSET_RESET_CONFIG, "hello");
doTestInvalidProps(props, ConfigException.class);
// Check for invalid shareAutoOffsetReset, by_duration without duration
props.put(GroupConfig.SHARE_AUTO_OFFSET_RESET_CONFIG, "by_duration");
doTestInvalidProps(props, ConfigException.class);
// Check for invalid shareAutoOffsetReset, by_duration with negative duration
props.put(GroupConfig.SHARE_AUTO_OFFSET_RESET_CONFIG, "by_duration:-PT10S");
doTestInvalidProps(props, ConfigException.class);
// Check for invalid shareAutoOffsetReset, by_duration with invalid duration
props.put(GroupConfig.SHARE_AUTO_OFFSET_RESET_CONFIG, "by_duration:invalid");
doTestInvalidProps(props, ConfigException.class);
props = createValidGroupConfig();
// Check for invalid streamsSessionTimeoutMs, < MIN
props.put(GroupConfig.STREAMS_SESSION_TIMEOUT_MS_CONFIG, "1");
doTestInvalidProps(props, InvalidConfigurationException.class);
props = createValidGroupConfig();
// Check for invalid streamsSessionTimeoutMs, > MAX
props.put(GroupConfig.STREAMS_SESSION_TIMEOUT_MS_CONFIG, "70000");
doTestInvalidProps(props, InvalidConfigurationException.class);
props = createValidGroupConfig();
// Check for invalid streamsHeartbeatIntervalMs, < MIN
props.put(GroupConfig.STREAMS_HEARTBEAT_INTERVAL_MS_CONFIG, "1000");
doTestInvalidProps(props, InvalidConfigurationException.class);
props = createValidGroupConfig();
// Check for invalid streamsHeartbeatIntervalMs, > MAX
props.put(GroupConfig.STREAMS_HEARTBEAT_INTERVAL_MS_CONFIG, "70000");
doTestInvalidProps(props, InvalidConfigurationException.class);
props = createValidGroupConfig();
// Check for invalid shareIsolationLevel.
props.put(GroupConfig.SHARE_ISOLATION_LEVEL_CONFIG, "read_commit");
doTestInvalidProps(props, ConfigException.class);
props = createValidGroupConfig();
// Check for invalid shareIsolationLevel.
props.put(GroupConfig.SHARE_ISOLATION_LEVEL_CONFIG, "read_uncommit");
doTestInvalidProps(props, ConfigException.class);
}
private void doTestInvalidProps(Properties props, Class<? extends Exception> exceptionClassName) {
assertThrows(exceptionClassName, () -> GroupConfig.validate(props, createGroupCoordinatorConfig(), createShareGroupConfig()));
}
private void doTestValidProps(Properties props) {
assertDoesNotThrow(() -> GroupConfig.validate(props, createGroupCoordinatorConfig(), createShareGroupConfig()));
}
@Test
public void testFromPropsWithDefaultValue() {
Map<String, String> defaultValue = new HashMap<>();
defaultValue.put(GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG, "10");
defaultValue.put(GroupConfig.CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG, "10");
defaultValue.put(GroupConfig.SHARE_SESSION_TIMEOUT_MS_CONFIG, "10");
defaultValue.put(GroupConfig.SHARE_HEARTBEAT_INTERVAL_MS_CONFIG, "10");
defaultValue.put(GroupConfig.SHARE_RECORD_LOCK_DURATION_MS_CONFIG, "2000");
defaultValue.put(GroupConfig.SHARE_AUTO_OFFSET_RESET_CONFIG, "latest");
defaultValue.put(GroupConfig.SHARE_ISOLATION_LEVEL_CONFIG, "read_uncommitted");
defaultValue.put(GroupConfig.STREAMS_HEARTBEAT_INTERVAL_MS_CONFIG, "10");
defaultValue.put(GroupConfig.STREAMS_SESSION_TIMEOUT_MS_CONFIG, "2000");
defaultValue.put(GroupConfig.STREAMS_NUM_STANDBY_REPLICAS_CONFIG, "1");
defaultValue.put(GroupConfig.STREAMS_INITIAL_REBALANCE_DELAY_MS_CONFIG, "3000");
Properties props = new Properties();
props.put(GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG, "20");
GroupConfig config = GroupConfig.fromProps(defaultValue, props);
assertEquals(10, config.getInt(GroupConfig.CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG));
assertEquals(20, config.getInt(GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG));
assertEquals(10, config.getInt(GroupConfig.SHARE_HEARTBEAT_INTERVAL_MS_CONFIG));
assertEquals(10, config.getInt(GroupConfig.SHARE_SESSION_TIMEOUT_MS_CONFIG));
assertEquals(2000, config.getInt(GroupConfig.SHARE_RECORD_LOCK_DURATION_MS_CONFIG));
assertEquals("latest", config.getString(GroupConfig.SHARE_AUTO_OFFSET_RESET_CONFIG));
assertEquals("read_uncommitted", config.getString(GroupConfig.SHARE_ISOLATION_LEVEL_CONFIG));
assertEquals(10, config.getInt(GroupConfig.STREAMS_HEARTBEAT_INTERVAL_MS_CONFIG));
assertEquals(2000, config.getInt(GroupConfig.STREAMS_SESSION_TIMEOUT_MS_CONFIG));
assertEquals(1, config.getInt(GroupConfig.STREAMS_NUM_STANDBY_REPLICAS_CONFIG));
assertEquals(3000, config.getInt(GroupConfig.STREAMS_INITIAL_REBALANCE_DELAY_MS_CONFIG));
}
@Test
public void testInvalidConfigName() {
Properties props = new Properties();
props.put(GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG, "10");
props.put("invalid.config.name", "10");
assertThrows(InvalidConfigurationException.class, () -> GroupConfig.validate(props, createGroupCoordinatorConfig(), createShareGroupConfig()));
}
private Properties createValidGroupConfig() {
Properties props = new Properties();
props.put(GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG, "45000");
props.put(GroupConfig.CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG, "5000");
props.put(GroupConfig.SHARE_SESSION_TIMEOUT_MS_CONFIG, "45000");
props.put(GroupConfig.SHARE_HEARTBEAT_INTERVAL_MS_CONFIG, "5000");
props.put(GroupConfig.SHARE_RECORD_LOCK_DURATION_MS_CONFIG, "30000");
props.put(GroupConfig.SHARE_AUTO_OFFSET_RESET_CONFIG, "latest");
props.put(GroupConfig.SHARE_ISOLATION_LEVEL_CONFIG, "read_uncommitted");
props.put(GroupConfig.STREAMS_SESSION_TIMEOUT_MS_CONFIG, "50000");
props.put(GroupConfig.STREAMS_HEARTBEAT_INTERVAL_MS_CONFIG, "6000");
props.put(GroupConfig.STREAMS_NUM_STANDBY_REPLICAS_CONFIG, "1");
props.put(GroupConfig.STREAMS_INITIAL_REBALANCE_DELAY_MS_CONFIG, "3000");
return props;
}
private GroupCoordinatorConfig createGroupCoordinatorConfig() {
return GroupCoordinatorConfigTest.createGroupCoordinatorConfig(OFFSET_METADATA_MAX_SIZE, OFFSETS_RETENTION_CHECK_INTERVAL_MS, OFFSETS_RETENTION_MINUTES);
}
private ShareGroupConfig createShareGroupConfig() {
return ShareGroupConfigTest.createShareGroupConfig(SHARE_GROUP_ENABLE, SHARE_GROUP_PARTITION_MAX_RECORD_LOCKS, SHARE_GROUP_DELIVERY_COUNT_LIMIT,
SHARE_GROUP_RECORD_LOCK_DURATION_MS, SHARE_GROUP_MIN_RECORD_LOCK_DURATION_MS, SHARE_GROUP_MAX_RECORD_LOCK_DURATION_MS);
}
}
| GroupConfigTest |
java | quarkusio__quarkus | extensions/panache/panache-common/deployment/src/main/java/io/quarkus/panache/common/deployment/PanacheRepositoryEnhancer.java | {
"start": 247,
"end": 907
} | class ____ implements BiFunction<String, ClassVisitor, ClassVisitor> {
protected final IndexView indexView;
public PanacheRepositoryEnhancer(IndexView index) {
this.indexView = index;
}
@Override
public abstract ClassVisitor apply(String className, ClassVisitor outputClassVisitor);
public boolean skipRepository(ClassInfo classInfo) {
// we don't want to add methods to abstract/generic entities/repositories: they get added to bottom types
// which can't be either
return Modifier.isAbstract(classInfo.flags())
|| !classInfo.typeParameters().isEmpty();
}
}
| PanacheRepositoryEnhancer |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/SourceLoader.java | {
"start": 1959,
"end": 2747
} | interface ____ {
/**
* Load the {@code _source} for a document.
* @param storedFields a loader for stored fields
* @param docId the doc to load
*/
Source source(LeafStoredFieldLoader storedFields, int docId) throws IOException;
/**
* Write the {@code _source} for a document in the provided {@link XContentBuilder}.
* @param storedFields a loader for stored fields
* @param docId the doc to load
* @param b the builder to write the xcontent
*/
void write(LeafStoredFieldLoader storedFields, int docId, XContentBuilder b) throws IOException;
}
/**
* Load {@code _source} from a stored field.
*/
SourceLoader FROM_STORED_SOURCE = new Stored(null);
| Leaf |
java | google__dagger | javatests/dagger/functional/cycle/CycleTest.java | {
"start": 1225,
"end": 4068
} | class ____ {
@Test
public void providerIndirectionSelfCycle() {
SelfCycleComponent selfCycleComponent = DaggerCycles_SelfCycleComponent.create();
S s = selfCycleComponent.s();
assertThat(s.sProvider.get()).isNotNull();
}
@Test
public void providerIndirectionCycle() {
CycleComponent cycleComponent = DaggerCycles_CycleComponent.create();
A a = cycleComponent.a();
C c = cycleComponent.c();
assertThat(c.aProvider.get()).isNotNull();
assertThat(a.b.c.aProvider.get()).isNotNull();
assertThat(a.e.d.b.c.aProvider.get()).isNotNull();
}
@Test
public void lazyIndirectionSelfCycle() {
SelfCycleComponent selfCycleComponent = DaggerCycles_SelfCycleComponent.create();
S s = selfCycleComponent.s();
assertThat(s.sLazy.get()).isNotNull();
}
@Test
public void lazyIndirectionCycle() {
CycleComponent cycleComponent = DaggerCycles_CycleComponent.create();
A a = cycleComponent.a();
C c = cycleComponent.c();
assertThat(c.aLazy.get()).isNotNull();
assertThat(a.b.c.aLazy.get()).isNotNull();
assertThat(a.e.d.b.c.aLazy.get()).isNotNull();
}
@Test
public void subcomponentIndirectionCycle() {
ChildCycleComponent childCycleComponent = DaggerCycles_CycleComponent.create().child();
A a = childCycleComponent.a();
assertThat(a.b.c.aProvider.get()).isNotNull();
assertThat(a.e.d.b.c.aProvider.get()).isNotNull();
}
@Test
public void providerMapIndirectionCycle() {
CycleMapComponent cycleMapComponent = DaggerCycles_CycleMapComponent.create();
assertThat(cycleMapComponent.y()).isNotNull();
assertThat(cycleMapComponent.y().mapOfProvidersOfX).containsKey("X");
assertThat(cycleMapComponent.y().mapOfProvidersOfX.get("X")).isNotNull();
assertThat(cycleMapComponent.y().mapOfProvidersOfX.get("X").get()).isNotNull();
assertThat(cycleMapComponent.y().mapOfProvidersOfX.get("X").get().y).isNotNull();
assertThat(cycleMapComponent.y().mapOfProvidersOfX).hasSize(1);
assertThat(cycleMapComponent.y().mapOfProvidersOfY).containsKey("Y");
assertThat(cycleMapComponent.y().mapOfProvidersOfY.get("Y")).isNotNull();
assertThat(cycleMapComponent.y().mapOfProvidersOfY.get("Y").get()).isNotNull();
assertThat(cycleMapComponent.y().mapOfProvidersOfY.get("Y").get().mapOfProvidersOfX).hasSize(1);
assertThat(cycleMapComponent.y().mapOfProvidersOfY.get("Y").get().mapOfProvidersOfY).hasSize(1);
assertThat(cycleMapComponent.y().mapOfProvidersOfY).hasSize(1);
}
/**
* Tests that a cycle where a {@code @Binds} binding depends on a binding that has to be deferred
* works.
*/
@Test
public void cycleWithDeferredBinds() {
BindsCycleComponent bindsCycleComponent = DaggerCycles_BindsCycleComponent.create();
assertThat(bindsCycleComponent.bar()).isNotNull();
}
}
| CycleTest |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/configuration/interfaces/BootstrapWithInterfaceTests.java | {
"start": 1055,
"end": 1229
} | class ____ implements BootstrapWithTestInterface {
@Autowired
String foo;
@Test
void injectedBean() {
assertThat(foo).isEqualTo("foo");
}
}
| BootstrapWithInterfaceTests |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestCapacityOverTimePolicy.java | {
"start": 1497,
"end": 1626
} | class ____ the {@code CapacityOvertimePolicy} sharing policy.
*/
@NotThreadSafe
@SuppressWarnings("VisibilityModifier")
public | tests |
java | apache__maven | impl/maven-impl/src/test/java/org/apache/maven/impl/di/SessionScopeTest.java | {
"start": 1574,
"end": 1649
} | class ____ extends Base implements C {}
@Typed({C.class}) // explicit | Impl |
java | spring-projects__spring-security | oauth2/oauth2-client/src/test/java/org/springframework/security/oauth2/client/userinfo/DefaultOAuth2UserServiceTests.java | {
"start": 2743,
"end": 20587
} | class ____ {
private ClientRegistration.Builder clientRegistrationBuilder;
private OAuth2AccessToken accessToken;
private DefaultOAuth2UserService userService = new DefaultOAuth2UserService();
private MockWebServer server;
@BeforeEach
public void setup() throws Exception {
this.server = new MockWebServer();
this.server.start();
// @formatter:off
this.clientRegistrationBuilder = TestClientRegistrations.clientRegistration()
.userInfoUri(null)
.userNameAttributeName(null);
// @formatter:on
this.accessToken = TestOAuth2AccessTokens.noScopes();
}
@AfterEach
public void cleanup() throws Exception {
this.server.shutdown();
}
@Test
public void setRequestEntityConverterWhenNullThenThrowIllegalArgumentException() {
assertThatIllegalArgumentException().isThrownBy(() -> this.userService.setRequestEntityConverter(null));
}
@Test
public void setRestOperationsWhenNullThenThrowIllegalArgumentException() {
assertThatIllegalArgumentException().isThrownBy(() -> this.userService.setRestOperations(null));
}
@Test
public void loadUserWhenUserRequestIsNullThenThrowIllegalArgumentException() {
assertThatIllegalArgumentException().isThrownBy(() -> this.userService.loadUser(null));
}
@Test
public void loadUserWhenUserInfoUriIsNullThenThrowOAuth2AuthenticationException() {
ClientRegistration clientRegistration = this.clientRegistrationBuilder.build();
assertThatExceptionOfType(OAuth2AuthenticationException.class)
.isThrownBy(() -> this.userService.loadUser(new OAuth2UserRequest(clientRegistration, this.accessToken)))
.withMessageContaining("missing_user_info_uri");
}
@Test
public void loadUserWhenUserNameAttributeNameIsNullThenThrowOAuth2AuthenticationException() {
// @formatter:off
ClientRegistration clientRegistration = this.clientRegistrationBuilder
.userInfoUri("https://provider.com/user")
.build();
// @formatter:on
assertThatExceptionOfType(OAuth2AuthenticationException.class)
.isThrownBy(() -> this.userService.loadUser(new OAuth2UserRequest(clientRegistration, this.accessToken)))
.withMessageContaining("missing_user_name_attribute");
}
@Test
public void loadUserWhenUserInfoSuccessResponseThenReturnUser() {
// @formatter:off
String userInfoResponse = "{\n"
+ " \"user-name\": \"user1\",\n"
+ " \"first-name\": \"first\",\n"
+ " \"last-name\": \"last\",\n"
+ " \"middle-name\": \"middle\",\n"
+ " \"address\": \"address\",\n"
+ " \"email\": \"user1@example.com\"\n"
+ "}\n";
// @formatter:on
this.server.enqueue(jsonResponse(userInfoResponse));
String userInfoUri = this.server.url("/user").toString();
ClientRegistration clientRegistration = this.clientRegistrationBuilder.userInfoUri(userInfoUri)
.userInfoAuthenticationMethod(AuthenticationMethod.HEADER)
.userNameAttributeName("user-name")
.build();
OAuth2User user = this.userService.loadUser(new OAuth2UserRequest(clientRegistration, this.accessToken));
assertThat(user.getName()).isEqualTo("user1");
assertThat(user.getAttributes()).hasSize(6);
assertThat((String) user.getAttribute("user-name")).isEqualTo("user1");
assertThat((String) user.getAttribute("first-name")).isEqualTo("first");
assertThat((String) user.getAttribute("last-name")).isEqualTo("last");
assertThat((String) user.getAttribute("middle-name")).isEqualTo("middle");
assertThat((String) user.getAttribute("address")).isEqualTo("address");
assertThat((String) user.getAttribute("email")).isEqualTo("user1@example.com");
assertThat(user.getAuthorities()).hasSize(1);
assertThat(user.getAuthorities().iterator().next()).isInstanceOf(OAuth2UserAuthority.class);
OAuth2UserAuthority userAuthority = (OAuth2UserAuthority) user.getAuthorities().iterator().next();
assertThat(userAuthority.getAuthority()).isEqualTo("OAUTH2_USER");
assertThat(userAuthority.getAttributes()).isEqualTo(user.getAttributes());
assertThat(userAuthority.getUserNameAttributeName()).isEqualTo("user-name");
}
@Test
public void loadUserWhenNestedUserInfoSuccessThenReturnUser() {
// @formatter:off
String userInfoResponse = "{\n"
+ " \"user\": {\"user-name\": \"user1\"},\n"
+ " \"first-name\": \"first\",\n"
+ " \"last-name\": \"last\",\n"
+ " \"middle-name\": \"middle\",\n"
+ " \"address\": \"address\",\n"
+ " \"email\": \"user1@example.com\"\n"
+ "}\n";
// @formatter:on
this.server.enqueue(jsonResponse(userInfoResponse));
String userInfoUri = this.server.url("/user").toString();
ClientRegistration clientRegistration = this.clientRegistrationBuilder.userInfoUri(userInfoUri)
.userInfoAuthenticationMethod(AuthenticationMethod.HEADER)
.userNameAttributeName("user-name")
.build();
DefaultOAuth2UserService userService = new DefaultOAuth2UserService();
userService.setAttributesConverter((request) -> (attributes) -> {
Map<String, Object> user = (Map<String, Object>) attributes.get("user");
attributes.put("user-name", user.get("user-name"));
return attributes;
});
OAuth2User user = userService.loadUser(new OAuth2UserRequest(clientRegistration, this.accessToken));
assertThat(user.getName()).isEqualTo("user1");
assertThat(user.getAttributes()).hasSize(7);
assertThat(((Map<?, ?>) user.getAttribute("user")).get("user-name")).isEqualTo("user1");
assertThat((String) user.getAttribute("first-name")).isEqualTo("first");
assertThat((String) user.getAttribute("last-name")).isEqualTo("last");
assertThat((String) user.getAttribute("middle-name")).isEqualTo("middle");
assertThat((String) user.getAttribute("address")).isEqualTo("address");
assertThat((String) user.getAttribute("email")).isEqualTo("user1@example.com");
assertThat(user.getAuthorities()).hasSize(1);
assertThat(user.getAuthorities().iterator().next()).isInstanceOf(OAuth2UserAuthority.class);
OAuth2UserAuthority userAuthority = (OAuth2UserAuthority) user.getAuthorities().iterator().next();
assertThat(userAuthority.getAuthority()).isEqualTo("OAUTH2_USER");
assertThat(userAuthority.getAttributes()).isEqualTo(user.getAttributes());
assertThat(userAuthority.getUserNameAttributeName()).isEqualTo("user-name");
}
@Test
public void loadUserWhenUserInfoSuccessResponseInvalidThenThrowOAuth2AuthenticationException() {
// @formatter:off
String userInfoResponse = "{\n"
+ " \"user-name\": \"user1\",\n"
+ " \"first-name\": \"first\",\n"
+ " \"last-name\": \"last\",\n"
+ " \"middle-name\": \"middle\",\n"
+ " \"address\": \"address\",\n"
+ " \"email\": \"user1@example.com\"\n";
// "}\n"; // Make the JSON invalid/malformed
// @formatter:on
this.server.enqueue(jsonResponse(userInfoResponse));
String userInfoUri = this.server.url("/user").toString();
ClientRegistration clientRegistration = this.clientRegistrationBuilder.userInfoUri(userInfoUri)
.userInfoAuthenticationMethod(AuthenticationMethod.HEADER)
.userNameAttributeName("user-name")
.build();
assertThatExceptionOfType(OAuth2AuthenticationException.class)
.isThrownBy(() -> this.userService.loadUser(new OAuth2UserRequest(clientRegistration, this.accessToken)))
.withMessageContaining(
"[invalid_user_info_response] An error occurred while attempting to retrieve the UserInfo Resource");
}
@Test
public void loadUserWhenUserInfoErrorResponseWwwAuthenticateHeaderThenThrowOAuth2AuthenticationException() {
String wwwAuthenticateHeader = "Bearer realm=\"auth-realm\" error=\"insufficient_scope\" error_description=\"The access token expired\"";
MockResponse response = new MockResponse();
response.setHeader(HttpHeaders.WWW_AUTHENTICATE, wwwAuthenticateHeader);
response.setResponseCode(400);
this.server.enqueue(response);
String userInfoUri = this.server.url("/user").toString();
ClientRegistration clientRegistration = this.clientRegistrationBuilder.userInfoUri(userInfoUri)
.userInfoAuthenticationMethod(AuthenticationMethod.HEADER)
.userNameAttributeName("user-name")
.build();
assertThatExceptionOfType(OAuth2AuthenticationException.class)
.isThrownBy(() -> this.userService.loadUser(new OAuth2UserRequest(clientRegistration, this.accessToken)))
.withMessageContaining(
"[invalid_user_info_response] An error occurred while attempting to retrieve the UserInfo Resource")
.withMessageContaining("Error Code: insufficient_scope, Error Description: The access token expired");
}
@Test
public void loadUserWhenUserInfoErrorResponseThenThrowOAuth2AuthenticationException() {
// @formatter:off
String userInfoErrorResponse = "{\n"
+ " \"error\": \"invalid_token\"\n"
+ "}\n";
// @formatter:on
this.server.enqueue(jsonResponse(userInfoErrorResponse).setResponseCode(400));
String userInfoUri = this.server.url("/user").toString();
ClientRegistration clientRegistration = this.clientRegistrationBuilder.userInfoUri(userInfoUri)
.userInfoAuthenticationMethod(AuthenticationMethod.HEADER)
.userNameAttributeName("user-name")
.build();
assertThatExceptionOfType(OAuth2AuthenticationException.class)
.isThrownBy(() -> this.userService.loadUser(new OAuth2UserRequest(clientRegistration, this.accessToken)))
.withMessageContaining(
"[invalid_user_info_response] An error occurred while attempting to retrieve the UserInfo Resource")
.withMessageContaining("Error Code: invalid_token");
}
@Test
public void loadUserWhenServerErrorThenThrowOAuth2AuthenticationException() {
this.server.enqueue(new MockResponse().setResponseCode(500));
String userInfoUri = this.server.url("/user").toString();
ClientRegistration clientRegistration = this.clientRegistrationBuilder.userInfoUri(userInfoUri)
.userInfoAuthenticationMethod(AuthenticationMethod.HEADER)
.userNameAttributeName("user-name")
.build();
assertThatExceptionOfType(OAuth2AuthenticationException.class)
.isThrownBy(() -> this.userService.loadUser(new OAuth2UserRequest(clientRegistration, this.accessToken)))
.withMessageContaining(
"[invalid_user_info_response] An error occurred while attempting to retrieve the UserInfo Resource: 500 Server Error");
}
@Test
public void loadUserWhenUserInfoUriInvalidThenThrowOAuth2AuthenticationException() {
String userInfoUri = "https://invalid-provider.com/user";
ClientRegistration clientRegistration = this.clientRegistrationBuilder.userInfoUri(userInfoUri)
.userInfoAuthenticationMethod(AuthenticationMethod.HEADER)
.userNameAttributeName("user-name")
.build();
assertThatExceptionOfType(OAuth2AuthenticationException.class)
.isThrownBy(() -> this.userService.loadUser(new OAuth2UserRequest(clientRegistration, this.accessToken)))
.withMessageContaining(
"[invalid_user_info_response] An error occurred while attempting to retrieve the UserInfo Resource");
}
// gh-5294
@Test
public void loadUserWhenUserInfoSuccessResponseThenAcceptHeaderJson() throws Exception {
// @formatter:off
String userInfoResponse = "{\n"
+ " \"user-name\": \"user1\",\n"
+ " \"first-name\": \"first\",\n"
+ " \"last-name\": \"last\",\n"
+ " \"middle-name\": \"middle\",\n"
+ " \"address\": \"address\",\n"
+ " \"email\": \"user1@example.com\"\n"
+ "}\n";
// @formatter:on
this.server.enqueue(jsonResponse(userInfoResponse));
String userInfoUri = this.server.url("/user").toString();
ClientRegistration clientRegistration = this.clientRegistrationBuilder.userInfoUri(userInfoUri)
.userInfoAuthenticationMethod(AuthenticationMethod.HEADER)
.userNameAttributeName("user-name")
.build();
this.userService.loadUser(new OAuth2UserRequest(clientRegistration, this.accessToken));
assertThat(this.server.takeRequest(1, TimeUnit.SECONDS).getHeader(HttpHeaders.ACCEPT))
.isEqualTo(MediaType.APPLICATION_JSON_VALUE);
}
// gh-5500
@Test
public void loadUserWhenAuthenticationMethodHeaderSuccessResponseThenHttpMethodGet() throws Exception {
// @formatter:off
String userInfoResponse = "{\n"
+ " \"user-name\": \"user1\",\n"
+ " \"first-name\": \"first\",\n"
+ " \"last-name\": \"last\",\n"
+ " \"middle-name\": \"middle\",\n"
+ " \"address\": \"address\",\n"
+ " \"email\": \"user1@example.com\"\n"
+ "}\n";
// @formatter:on
this.server.enqueue(jsonResponse(userInfoResponse));
String userInfoUri = this.server.url("/user").toString();
ClientRegistration clientRegistration = this.clientRegistrationBuilder.userInfoUri(userInfoUri)
.userInfoAuthenticationMethod(AuthenticationMethod.HEADER)
.userNameAttributeName("user-name")
.build();
this.userService.loadUser(new OAuth2UserRequest(clientRegistration, this.accessToken));
RecordedRequest request = this.server.takeRequest();
assertThat(request.getMethod()).isEqualTo(HttpMethod.GET.name());
assertThat(request.getHeader(HttpHeaders.ACCEPT)).isEqualTo(MediaType.APPLICATION_JSON_VALUE);
assertThat(request.getHeader(HttpHeaders.AUTHORIZATION))
.isEqualTo("Bearer " + this.accessToken.getTokenValue());
}
// gh-5500
@Test
public void loadUserWhenAuthenticationMethodFormSuccessResponseThenHttpMethodPost() throws Exception {
// @formatter:off
String userInfoResponse = "{\n"
+ " \"user-name\": \"user1\",\n"
+ " \"first-name\": \"first\",\n"
+ " \"last-name\": \"last\",\n"
+ " \"middle-name\": \"middle\",\n"
+ " \"address\": \"address\",\n"
+ " \"email\": \"user1@example.com\"\n"
+ "}\n";
// @formatter:on
this.server.enqueue(jsonResponse(userInfoResponse));
String userInfoUri = this.server.url("/user").toString();
ClientRegistration clientRegistration = this.clientRegistrationBuilder.userInfoUri(userInfoUri)
.userInfoAuthenticationMethod(AuthenticationMethod.FORM)
.userNameAttributeName("user-name")
.build();
this.userService.loadUser(new OAuth2UserRequest(clientRegistration, this.accessToken));
RecordedRequest request = this.server.takeRequest();
assertThat(request.getMethod()).isEqualTo(HttpMethod.POST.name());
assertThat(request.getHeader(HttpHeaders.ACCEPT)).isEqualTo(MediaType.APPLICATION_JSON_VALUE);
assertThat(request.getHeader(HttpHeaders.CONTENT_TYPE)).contains(MediaType.APPLICATION_FORM_URLENCODED_VALUE);
assertThat(request.getBody().readUtf8()).isEqualTo("access_token=" + this.accessToken.getTokenValue());
}
@Test
public void loadUserWhenTokenContainsScopesThenIndividualScopeAuthorities() {
Map<String, Object> body = new HashMap<>();
body.put("id", "id");
DefaultOAuth2UserService userService = withMockResponse(body);
OAuth2UserRequest request = new OAuth2UserRequest(TestClientRegistrations.clientRegistration().build(),
TestOAuth2AccessTokens.scopes("message:read", "message:write"));
OAuth2User user = userService.loadUser(request);
assertThat(user.getAuthorities()).hasSize(3);
Iterator<? extends GrantedAuthority> authorities = user.getAuthorities().iterator();
assertThat(authorities.next()).isInstanceOf(OAuth2UserAuthority.class);
assertThat(authorities.next()).isEqualTo(new SimpleGrantedAuthority("SCOPE_message:read"));
assertThat(authorities.next()).isEqualTo(new SimpleGrantedAuthority("SCOPE_message:write"));
}
@Test
public void loadUserWhenTokenDoesNotContainScopesThenNoScopeAuthorities() {
Map<String, Object> body = new HashMap<>();
body.put("id", "id");
DefaultOAuth2UserService userService = withMockResponse(body);
OAuth2UserRequest request = new OAuth2UserRequest(TestClientRegistrations.clientRegistration().build(),
TestOAuth2AccessTokens.noScopes());
OAuth2User user = userService.loadUser(request);
assertThat(user.getAuthorities()).hasSize(1);
Iterator<? extends GrantedAuthority> authorities = user.getAuthorities().iterator();
assertThat(authorities.next()).isInstanceOf(OAuth2UserAuthority.class);
}
// gh-8764
@Test
public void loadUserWhenUserInfoSuccessResponseInvalidContentTypeThenThrowOAuth2AuthenticationException() {
String userInfoUri = this.server.url("/user").toString();
MockResponse response = new MockResponse();
response.setHeader(HttpHeaders.CONTENT_TYPE, MediaType.TEXT_PLAIN_VALUE);
response.setBody("invalid content type");
this.server.enqueue(response);
ClientRegistration clientRegistration = this.clientRegistrationBuilder.userInfoUri(userInfoUri)
.userInfoAuthenticationMethod(AuthenticationMethod.HEADER)
.userNameAttributeName("user-name")
.build();
assertThatExceptionOfType(OAuth2AuthenticationException.class)
.isThrownBy(() -> this.userService.loadUser(new OAuth2UserRequest(clientRegistration, this.accessToken)))
.withMessageContaining(
"[invalid_user_info_response] An error occurred while attempting to retrieve the UserInfo Resource "
+ "from '" + userInfoUri + "': response contains invalid content type 'text/plain'.");
}
@Test
public void setAttributesConverterWhenNullThenException() {
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> this.userService.setAttributesConverter(null));
}
private DefaultOAuth2UserService withMockResponse(Map<String, Object> response) {
ResponseEntity<Map<String, Object>> responseEntity = new ResponseEntity<>(response, HttpStatus.OK);
Converter<OAuth2UserRequest, RequestEntity<?>> requestEntityConverter = mock(Converter.class);
RestOperations rest = mock(RestOperations.class);
given(rest.exchange(nullable(RequestEntity.class), any(ParameterizedTypeReference.class)))
.willReturn(responseEntity);
DefaultOAuth2UserService userService = new DefaultOAuth2UserService();
userService.setRequestEntityConverter(requestEntityConverter);
userService.setRestOperations(rest);
return userService;
}
private MockResponse jsonResponse(String json) {
return new MockResponse().setHeader(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON_VALUE).setBody(json);
}
}
| DefaultOAuth2UserServiceTests |
java | apache__camel | components/camel-openstack/src/generated/java/org/apache/camel/component/openstack/neutron/NeutronComponentConfigurer.java | {
"start": 744,
"end": 2315
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
NeutronComponent target = (NeutronComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": target.setAutowiredEnabled(property(camelContext, boolean.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return boolean.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
NeutronComponent target = (NeutronComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return target.isAutowiredEnabled();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
default: return null;
}
}
}
| NeutronComponentConfigurer |
java | apache__flink | flink-architecture-tests/flink-architecture-tests-production/src/main/java/org/apache/flink/architecture/rules/ConnectorRules.java | {
"start": 1644,
"end": 4328
} | class ____ {
private static final String[] CONNECTOR_PACKAGES = {
"org.apache.flink.connector..", "org.apache.flink.streaming.connectors.."
};
private static final String[] UTIL_PACKAGES = {"org.apache.flink.util.."};
private static DescribedPredicate<JavaClass>
areFlinkClassesThatResideOutsideOfConnectorPackagesAndArePublic() {
return JavaClass.Predicates.resideInAPackage("org.apache.flink..")
.and(JavaClass.Predicates.resideOutsideOfPackages(CONNECTOR_PACKAGES))
.and(
areDirectlyAnnotatedWithAtLeastOneOf(Public.class, PublicEvolving.class)
.or(areEnclosedInPublicClasses()))
.as(
"are flink classes that reside outside of connector packages and that are public",
Joiner.on("', '").join(CONNECTOR_PACKAGES));
}
private static DescribedPredicate<JavaClass> areEnclosedInPublicClasses() {
return JavaClass.Predicates.belongTo(
areDirectlyAnnotatedWithAtLeastOneOf(Public.class, PublicEvolving.class))
.as("are enclosed in public classes");
}
@ArchTest
@ArchTag(value = "org.apache.flink.testutils.junit.FailsOnJava11")
@ArchTag(value = "org.apache.flink.testutils.junit.FailsOnJava17")
public static final ArchRule CONNECTOR_CLASSES_ONLY_DEPEND_ON_PUBLIC_API =
freeze(
javaClassesThat(resideInAnyPackage(CONNECTOR_PACKAGES))
.and()
.areNotAnnotatedWith(Deprecated.class)
.should()
.onlyDependOnClassesThat(
areFlinkClassesThatResideOutsideOfConnectorPackagesAndArePublic()
.or(
JavaClass.Predicates.resideOutsideOfPackages(
"org.apache.flink.."))
.or(
JavaClass.Predicates.resideInAnyPackage(
CONNECTOR_PACKAGES))
.or(
JavaClass.Predicates.resideInAnyPackage(
UTIL_PACKAGES)))
.as(
"Connector production code must depend only on public API when outside of connector packages"));
}
| ConnectorRules |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java | {
"start": 4222,
"end": 4580
} | class ____ {
private SerialNumberManager.StringTable stringTable;
private final ArrayList<INodeReference> refList = Lists.newArrayList();
public SerialNumberManager.StringTable getStringTable() {
return stringTable;
}
public ArrayList<INodeReference> getRefList() {
return refList;
}
}
public static final | LoaderContext |
java | google__auto | value/src/it/functional/src/test/java/com/google/auto/value/AutoValueTest.java | {
"start": 16646,
"end": 17474
} | class ____ extends NonPublicSuper {
abstract String subString();
abstract int subInt();
static NonPublicSub create(Object superObject, String subString, int subInt) {
return new AutoValue_AutoValueTest_NonPublicSub(superObject, subString, subInt);
}
}
@Test
public void testNonPublicInheritedGetters() throws Exception {
NonPublicSub instance = NonPublicSub.create("blim", "blam", 1729);
assertEquals("blim", instance.superObject());
assertEquals("blam", instance.subString());
assertEquals(1729, instance.subInt());
assertEquals(instance, instance);
assertEqualsNullIsFalse(instance);
}
@SuppressWarnings("ObjectEqualsNull")
private void assertEqualsNullIsFalse(Object instance) {
assertFalse(instance.equals(null));
}
@AutoValue
abstract static | NonPublicSub |
java | google__error-prone | core/src/test/java/com/google/errorprone/fixes/SuggestedFixesTest.java | {
"start": 22303,
"end": 22666
} | class ____ {
<SomeAnnotation> @some.pkg.SomeAnnotation Void foo() {
return null;
}
}
""")
.doTest();
}
/** A test check that replaces all methods' return types with a given type. */
@BugPattern(summary = "Change the method return type", severity = ERROR)
public static | AddAnnotation |
java | apache__camel | components/camel-huawei/camel-huaweicloud-iam/src/test/java/org/apache/camel/component/huaweicloud/iam/GetUserTest.java | {
"start": 1318,
"end": 3137
} | class ____ extends CamelTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(GetUserTest.class.getName());
TestConfiguration testConfiguration = new TestConfiguration();
@BindToRegistry("iamClient")
IAMMockClient mockClient = new IAMMockClient(null);
@BindToRegistry("serviceKeys")
ServiceKeys serviceKeys = new ServiceKeys(
testConfiguration.getProperty("accessKey"),
testConfiguration.getProperty("secretKey"));
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:get_user")
.setProperty("CamelHwCloudIamOperation", constant("getUser"))
.setProperty("CamelHwCloudIamUserId", constant(testConfiguration.getProperty("userId")))
.to("hwcloud-iam:?" +
"region=" + testConfiguration.getProperty("region") +
"&ignoreSslVerification=true" +
"&iamClient=#iamClient" +
"&serviceKeys=#serviceKeys")
.log("Get user successful")
.to("mock:get_user_result");
}
};
}
@Test
public void testGetUser() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:get_user_result");
mock.expectedMinimumMessageCount(1);
template.sendBody("direct:get_user", "sample_body");
Exchange responseExchange = mock.getExchanges().get(0);
mock.assertIsSatisfied();
assertEquals("{\"domainId\":\"138\",\"name\":\"User 15\",\"email\":\"user15@email.com\"}",
responseExchange.getIn().getBody(String.class));
}
}
| GetUserTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/batchfetch/BatchFetchBootstrapTest.java | {
"start": 1635,
"end": 1891
} | class ____ extends DatabaseEntity {
private Set<JafSid> members = new LinkedHashSet<>();
@ManyToMany
public Set<JafSid> getMembers() {
return members;
}
public void setMembers(Set<JafSid> members) {
this.members = members;
}
}
}
| UserGroup |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/ignore/AnimalDto.java | {
"start": 194,
"end": 1228
} | class ____ {
//CHECKSTYLE:OFF
public Integer publicAge;
public String publicColor;
//CHECKSTYLE:ON
private String name;
private Integer size;
private Integer age;
private String color;
public AnimalDto() {
}
public AnimalDto(String name, Integer size, Integer age, String color) {
this.name = name;
this.size = size;
this.publicAge = age;
this.age = age;
this.publicColor = color;
this.color = color;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Integer getSize() {
return size;
}
public void setSize(Integer size) {
this.size = size;
}
public Integer getAge() {
return age;
}
public void setAge(Integer age) {
this.age = age;
}
public String getColor() {
return color;
}
public void setColor(String color) {
this.color = color;
}
}
| AnimalDto |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverContext.java | {
"start": 2529,
"end": 7552
} | class ____ {
// Working set. Only the thread executing the driver will update this set.
Set<Releasable> workingSet = Collections.newSetFromMap(new IdentityHashMap<>());
private final AtomicReference<Snapshot> snapshot = new AtomicReference<>();
private final BigArrays bigArrays;
private final BlockFactory blockFactory;
private final AsyncActions asyncActions = new AsyncActions();
private final WarningsMode warningsMode;
private final @Nullable String driverDescription;
private Runnable earlyTerminationChecker = () -> {};
public DriverContext(BigArrays bigArrays, BlockFactory blockFactory) {
this(bigArrays, blockFactory, null, WarningsMode.COLLECT);
}
public DriverContext(BigArrays bigArrays, BlockFactory blockFactory, String description) {
this(bigArrays, blockFactory, description, WarningsMode.COLLECT);
}
private DriverContext(BigArrays bigArrays, BlockFactory blockFactory, @Nullable String description, WarningsMode warningsMode) {
Objects.requireNonNull(bigArrays);
Objects.requireNonNull(blockFactory);
this.bigArrays = bigArrays;
this.blockFactory = blockFactory;
this.driverDescription = description;
this.warningsMode = warningsMode;
}
public BigArrays bigArrays() {
return bigArrays;
}
/**
* The {@link CircuitBreaker} to use to track memory.
*/
public CircuitBreaker breaker() {
return blockFactory.breaker();
}
public BlockFactory blockFactory() {
return blockFactory;
}
/** See {@link Driver#shortDescription}. */
@Nullable
public String driverDescription() {
return driverDescription;
}
/** A snapshot of the driver context. */
public record Snapshot(Set<Releasable> releasables) implements Releasable {
@Override
public void close() {
Releasables.close(releasables);
}
}
/**
* Adds a releasable to this context. Releasables are identified by Object identity.
* @return true if the releasable was added, otherwise false (if already present)
*/
public boolean addReleasable(Releasable releasable) {
return workingSet.add(releasable);
}
/**
* Removes a releasable from this context. Releasables are identified by Object identity.
* @return true if the releasable was removed, otherwise false (if not present)
*/
public boolean removeReleasable(Releasable releasable) {
return workingSet.remove(releasable);
}
/**
* Retrieves the snapshot of the driver context after it has been finished.
* @return the snapshot
*/
public Snapshot getSnapshot() {
ensureFinished();
// should be called by the DriverRunner
return snapshot.get();
}
/**
* Tells whether this context is finished. Can be invoked from any thread.
*/
public boolean isFinished() {
return snapshot.get() != null;
}
/**
* Finishes this context. Further mutating operations should not be performed.
*/
public void finish() {
if (isFinished()) {
return;
}
// must be called by the thread executing the driver.
// no more updates to this context.
asyncActions.finish();
var itr = workingSet.iterator();
workingSet = null;
Set<Releasable> releasableSet = Collections.newSetFromMap(new IdentityHashMap<>());
while (itr.hasNext()) {
var r = itr.next();
releasableSet.add(r);
itr.remove();
}
snapshot.compareAndSet(null, new Snapshot(releasableSet));
}
private void ensureFinished() {
if (isFinished() == false) {
throw new IllegalStateException("not finished");
}
}
public void waitForAsyncActions(ActionListener<Void> listener) {
asyncActions.addListener(listener);
}
public void addAsyncAction() {
asyncActions.addInstance();
}
public void removeAsyncAction() {
asyncActions.removeInstance();
}
/**
* Checks if the Driver associated with this DriverContext has been cancelled or early terminated.
*/
public void checkForEarlyTermination() {
earlyTerminationChecker.run();
}
/**
* Initializes the early termination or cancellation checker for this DriverContext.
* This method should be called when associating this DriverContext with a driver.
*/
public void initializeEarlyTerminationChecker(Runnable checker) {
this.earlyTerminationChecker = checker;
}
/**
* Evaluators should use this function to decide their warning behavior.
* @return an appropriate {@link WarningsMode}
*/
public WarningsMode warningsMode() {
return warningsMode;
}
/**
* Indicates the behavior Evaluators of this context should use for reporting warnings
*/
public | DriverContext |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/context/junit/jupiter/SpringExtension.java | {
"start": 9882,
"end": 10748
} | class ____ times.
Store store = context.getStore(AUTOWIRED_VALIDATION_NAMESPACE);
String errorMessage = store.computeIfAbsent(context.getRequiredTestClass(), testClass -> {
Method[] methodsWithErrors =
ReflectionUtils.getUniqueDeclaredMethods(testClass, autowiredTestOrLifecycleMethodFilter);
return (methodsWithErrors.length == 0 ? NO_VIOLATIONS_DETECTED :
String.format(
"Test methods and test lifecycle methods must not be annotated with @Autowired. " +
"You should instead annotate individual method parameters with @Autowired, " +
"@Qualifier, or @Value. Offending methods in test class %s: %s",
testClass.getName(), Arrays.toString(methodsWithErrors)));
}, String.class);
if (!errorMessage.isEmpty()) {
throw new IllegalStateException(errorMessage);
}
}
/**
* Validate that the test | multiple |
java | spring-projects__spring-boot | module/spring-boot-actuator-autoconfigure/src/main/java/org/springframework/boot/actuate/autoconfigure/beans/BeansEndpointAutoConfiguration.java | {
"start": 1414,
"end": 1624
} | class ____ {
@Bean
@ConditionalOnMissingBean
BeansEndpoint beansEndpoint(ConfigurableApplicationContext applicationContext) {
return new BeansEndpoint(applicationContext);
}
}
| BeansEndpointAutoConfiguration |
java | apache__avro | lang/java/mapred/src/main/java/org/apache/avro/mapred/AvroMultipleInputs.java | {
"start": 4123,
"end": 4896
} | class ____ use for this path
*/
private static void addInputPath(JobConf conf, Path path, Schema inputSchema) {
String schemaMapping = path.toString() + ";" + toBase64(inputSchema.toString());
String schemas = conf.get(SCHEMA_KEY);
conf.set(SCHEMA_KEY, schemas == null ? schemaMapping : schemas + "," + schemaMapping);
conf.setInputFormat(DelegatingInputFormat.class);
}
/**
* Add a {@link Path} with a custom {@link Schema} and {@link AvroMapper} to the
* list of inputs for the map-reduce job.
*
* @param conf The configuration of the job
* @param path {@link Path} to be added to the list of inputs for the job
* @param inputSchema {@link Schema} to use for this path
* @param mapperClass {@link AvroMapper} | to |
java | elastic__elasticsearch | x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/FuseIT.java | {
"start": 845,
"end": 9999
} | class ____ extends AbstractEsqlIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return List.of(EsqlPluginWithEnterpriseOrTrialLicense.class);
}
@Before
public void setupIndex() {
createAndPopulateIndex();
}
public void testFuseWithRrf() throws Exception {
var query = """
FROM test METADATA _score, _id, _index
| WHERE id > 2
| FORK
( WHERE content:"fox" | SORT _score, _id DESC )
( WHERE content:"dog" | SORT _score, _id DESC )
| FUSE
| SORT _score DESC, _id, _index
| EVAL _fork = mv_sort(_fork)
| EVAL _score = round(_score, 4)
| KEEP id, content, _score, _fork
""";
try (var resp = run(query)) {
assertColumnNames(resp.columns(), List.of("id", "content", "_score", "_fork"));
assertColumnTypes(resp.columns(), List.of("integer", "keyword", "double", "keyword"));
assertThat(getValuesList(resp.values()).size(), equalTo(3));
Iterable<Iterable<Object>> expectedValues = List.of(
List.of(6, "The quick brown fox jumps over the lazy dog", 0.0325, List.of("fork1", "fork2")),
List.of(4, "The dog is brown but this document is very very long", 0.0164, "fork2"),
List.of(3, "This dog is really brown", 0.0159, "fork2")
);
assertValues(resp.values(), expectedValues);
}
}
public void testFuseRrfWithWeights() {
var query = """
FROM test METADATA _score, _id, _index
| WHERE id > 2
| FORK
( WHERE content:"fox" | SORT _score, _id DESC )
( WHERE content:"dog" | SORT _score, _id DESC )
| FUSE RRF WITH {"weights": { "fork1": 0.4, "fork2": 0.6}}
| SORT _score DESC, _id, _index
| EVAL _fork = mv_sort(_fork)
| EVAL _score = round(_score, 4)
| KEEP id, content, _score, _fork
""";
try (var resp = run(query)) {
assertColumnNames(resp.columns(), List.of("id", "content", "_score", "_fork"));
assertColumnTypes(resp.columns(), List.of("integer", "keyword", "double", "keyword"));
assertThat(getValuesList(resp.values()).size(), equalTo(3));
Iterable<Iterable<Object>> expectedValues = List.of(
List.of(6, "The quick brown fox jumps over the lazy dog", 0.0162, List.of("fork1", "fork2")),
List.of(4, "The dog is brown but this document is very very long", 0.0098, "fork2"),
List.of(3, "This dog is really brown", 0.0095, "fork2")
);
assertValues(resp.values(), expectedValues);
}
}
public void testFuseRrfWithWeightsAndRankConstant() {
var query = """
FROM test METADATA _score, _id, _index
| WHERE id > 2
| FORK
( WHERE content:"fox" | SORT _score, _id DESC )
( WHERE content:"dog" | SORT _score, _id DESC )
| FUSE RRF WITH {"weights": { "fork1": 0.4, "fork2": 0.6}, "rank_constant": 55 }
| SORT _score DESC, _id, _index
| EVAL _fork = mv_sort(_fork)
| EVAL _score = round(_score, 4)
| KEEP id, content, _score, _fork
""";
try (var resp = run(query)) {
assertColumnNames(resp.columns(), List.of("id", "content", "_score", "_fork"));
assertColumnTypes(resp.columns(), List.of("integer", "keyword", "double", "keyword"));
assertThat(getValuesList(resp.values()).size(), equalTo(3));
Iterable<Iterable<Object>> expectedValues = List.of(
List.of(6, "The quick brown fox jumps over the lazy dog", 0.0177, List.of("fork1", "fork2")),
List.of(4, "The dog is brown but this document is very very long", 0.0107, "fork2"),
List.of(3, "This dog is really brown", 0.0103, "fork2")
);
assertValues(resp.values(), expectedValues);
}
}
public void testFuseSimpleLinear() {
var query = """
FROM test METADATA _score, _id, _index
| WHERE id > 2
| FORK
( WHERE content:"fox" | SORT _score, _id DESC )
( WHERE content:"dog" | SORT _score, _id DESC )
| FUSE linear
| SORT _score DESC
| EVAL _fork = mv_sort(_fork)
| EVAL _score = round(_score, 4)
| KEEP id, content, _score, _fork
""";
try (var resp = run(query)) {
assertColumnNames(resp.columns(), List.of("id", "content", "_score", "_fork"));
assertColumnTypes(resp.columns(), List.of("integer", "keyword", "double", "keyword"));
assertThat(getValuesList(resp.values()).size(), equalTo(3));
Iterable<Iterable<Object>> expectedValues = List.of(
List.of(6, "The quick brown fox jumps over the lazy dog", 1.3025, List.of("fork1", "fork2")),
List.of(3, "This dog is really brown", 0.4963, "fork2"),
List.of(4, "The dog is brown but this document is very very long", 0.3536, "fork2")
);
assertValues(resp.values(), expectedValues);
}
}
public void testFuseLinearWithWeightsAndNormalizer() {
assumeTrue("requires FUSE_L2_NORM capability", EsqlCapabilities.Cap.FUSE_L2_NORM.isEnabled());
var query = """
FROM test METADATA _score, _id, _index
| WHERE id > 2
| FORK
( WHERE content:"fox" | SORT _score, _id DESC )
( WHERE content:"dog" | SORT _score, _id DESC )
| FUSE LINEAR WITH {"weights": { "fork1": 0.4, "fork2": 0.6}, "normalizer": "l2_norm"}
| SORT _score DESC
| EVAL _fork = mv_sort(_fork)
| EVAL _score = round(_score, 4)
| KEEP id, content, _score, _fork
""";
try (var resp = run(query)) {
assertColumnNames(resp.columns(), List.of("id", "content", "_score", "_fork"));
assertColumnTypes(resp.columns(), List.of("integer", "keyword", "double", "keyword"));
assertThat(getValuesList(resp.values()).size(), equalTo(3));
Iterable<Iterable<Object>> expectedValues = List.of(
List.of(6, "The quick brown fox jumps over the lazy dog", 0.7241, List.of("fork1", "fork2")),
List.of(3, "This dog is really brown", 0.4112, "fork2"),
List.of(4, "The dog is brown but this document is very very long", 0.293, "fork2")
);
assertValues(resp.values(), expectedValues);
}
}
public void testFuseWithSingleFork() {
for (Fuse.FuseType type : Fuse.FuseType.values()) {
var query = """
FROM test METADATA _score, _id, _index
| WHERE id > 2
| FORK
( WHERE content:"fox" | SORT _score, _id DESC )
| FUSE
""" + type.name() + """
| SORT _score DESC, _id, _index
| EVAL _fork = mv_sort(_fork)
| EVAL _score = round(_score, 4)
| KEEP id, content, _fork
""";
try (var resp = run(query)) {
assertColumnNames(resp.columns(), List.of("id", "content", "_fork"));
assertColumnTypes(resp.columns(), List.of("integer", "keyword", "keyword"));
assertThat(getValuesList(resp.values()).size(), equalTo(1));
Iterable<Iterable<Object>> expectedValues = List.of(List.of(6, "The quick brown fox jumps over the lazy dog", "fork1"));
assertValues(resp.values(), expectedValues);
}
}
}
private void createAndPopulateIndex() {
var indexName = "test";
var client = client().admin().indices();
var CreateRequest = client.prepareCreate(indexName)
.setSettings(Settings.builder().put("index.number_of_shards", 1))
.setMapping("id", "type=integer", "content", "type=text");
assertAcked(CreateRequest);
client().prepareBulk()
.add(new IndexRequest(indexName).id("1").source("id", 1, "content", "This is a brown fox"))
.add(new IndexRequest(indexName).id("2").source("id", 2, "content", "This is a brown dog"))
.add(new IndexRequest(indexName).id("3").source("id", 3, "content", "This dog is really brown"))
.add(new IndexRequest(indexName).id("4").source("id", 4, "content", "The dog is brown but this document is very very long"))
.add(new IndexRequest(indexName).id("5").source("id", 5, "content", "There is also a white cat"))
.add(new IndexRequest(indexName).id("6").source("id", 6, "content", "The quick brown fox jumps over the lazy dog"))
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
.get();
ensureYellow(indexName);
}
}
| FuseIT |
java | spring-projects__spring-boot | core/spring-boot-test/src/main/java/org/springframework/boot/test/json/JsonContentAssert.java | {
"start": 26545,
"end": 30092
} | class ____ to load the resource
* @param compareMode the compare mode used when checking
* @return {@code this} assertion object
* @throws AssertionError if the actual JSON value is equal to the given one
*/
public JsonContentAssert isNotEqualToJson(String path, Class<?> resourceLoadClass, JSONCompareMode compareMode) {
String expectedJson = this.loader.getJson(path, resourceLoadClass);
return assertNotPassed(compare(expectedJson, compareMode));
}
/**
* Verifies that the actual value is not equal to the specified JSON bytes.
* @param expected the expected JSON bytes
* @param compareMode the compare mode used when checking
* @return {@code this} assertion object
* @throws AssertionError if the actual JSON value is equal to the given one
*/
public JsonContentAssert isNotEqualToJson(byte[] expected, JSONCompareMode compareMode) {
String expectedJson = this.loader.getJson(expected);
return assertNotPassed(compare(expectedJson, compareMode));
}
/**
* Verifies that the actual value is not equal to the specified JSON file.
* @param expected a file containing the expected JSON
* @param compareMode the compare mode used when checking
* @return {@code this} assertion object
* @throws AssertionError if the actual JSON value is equal to the given one
*/
public JsonContentAssert isNotEqualToJson(File expected, JSONCompareMode compareMode) {
String expectedJson = this.loader.getJson(expected);
return assertNotPassed(compare(expectedJson, compareMode));
}
/**
* Verifies that the actual value is not equal to the specified JSON input stream.
* @param expected an input stream containing the expected JSON
* @param compareMode the compare mode used when checking
* @return {@code this} assertion object
* @throws AssertionError if the actual JSON value is equal to the given one
*/
public JsonContentAssert isNotEqualToJson(InputStream expected, JSONCompareMode compareMode) {
String expectedJson = this.loader.getJson(expected);
return assertNotPassed(compare(expectedJson, compareMode));
}
/**
* Verifies that the actual value is not equal to the specified JSON resource.
* @param expected a resource containing the expected JSON
* @param compareMode the compare mode used when checking
* @return {@code this} assertion object
* @throws AssertionError if the actual JSON value is equal to the given one
*/
public JsonContentAssert isNotEqualToJson(Resource expected, JSONCompareMode compareMode) {
String expectedJson = this.loader.getJson(expected);
return assertNotPassed(compare(expectedJson, compareMode));
}
/**
* Verifies that the actual value is not equal to the specified JSON. The
* {@code expected} value can contain the JSON itself or, if it ends with
* {@code .json}, the name of a resource to be loaded using {@code resourceLoadClass}.
* @param expected the expected JSON or the name of a resource containing the expected
* JSON
* @param comparator the comparator used when checking
* @return {@code this} assertion object
* @throws AssertionError if the actual JSON value is equal to the given one
*/
public JsonContentAssert isNotEqualToJson(CharSequence expected, JSONComparator comparator) {
String expectedJson = this.loader.getJson(expected);
return assertNotPassed(compare(expectedJson, comparator));
}
/**
* Verifies that the actual value is not equal to the specified JSON resource.
* @param path the name of a resource containing the expected JSON
* @param resourceLoadClass the source | used |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/engine/TestInstanceLifecycleTests.java | {
"start": 49149,
"end": 49488
} | class ____ {
ClassTemplateWithDefaultLifecycleTestCase() {
incrementInstanceCount(ClassTemplateWithDefaultLifecycleTestCase.class);
}
@Test
void test1() {
}
@Test
void test2() {
}
}
@ClassTemplate
@ExtendWith(Twice.class)
@ExtendWith(InstanceTrackingExtension.class)
static | ClassTemplateWithDefaultLifecycleTestCase |
java | apache__camel | components/camel-webhook/src/test/java/org/apache/camel/component/webhook/WebhookHttpBindingTest.java | {
"start": 1311,
"end": 3082
} | class ____ extends WebhookTestBase {
@Test
public void testWrapper() {
String result = template.requestBody("netty-http:http://localhost:" + port
+ WebhookConfiguration.computeDefaultPath("wb-delegate://xx"),
"", String.class);
assertEquals("msg: webhook", result);
result = template.requestBodyAndHeader("netty-http:http://localhost:" + port
+ WebhookConfiguration.computeDefaultPath("wb-delegate://xx"),
"", Exchange.HTTP_METHOD, "PUT", String.class);
assertEquals("msg: webhook", result);
}
@Test
public void testGetError() {
assertThrows(CamelExecutionException.class,
() -> template.requestBodyAndHeader("netty-http:http://localhost:" + port, "",
Exchange.HTTP_METHOD, "GET", String.class));
}
@Override
protected void bindToRegistry(Registry registry) {
registry.bind("wb-delegate-component", new TestComponent(endpoint -> {
endpoint.setWebhookHandler(proc -> ex -> {
ex.getMessage().setBody("webhook");
proc.process(ex);
});
endpoint.setWebhookMethods(() -> Arrays.asList("POST", "PUT"));
}));
}
@Override
protected RoutesBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
restConfiguration()
.host("0.0.0.0")
.port(port);
from("webhook:wb-delegate://xx")
.transform(body().prepend("msg: "));
}
};
}
}
| WebhookHttpBindingTest |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/KubernetesNodesEndpointBuilderFactory.java | {
"start": 23112,
"end": 33354
} | interface ____
extends
EndpointProducerBuilder {
default AdvancedKubernetesNodesEndpointProducerBuilder advanced() {
return (AdvancedKubernetesNodesEndpointProducerBuilder) this;
}
/**
* The Kubernetes API Version to use.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param apiVersion the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointProducerBuilder apiVersion(String apiVersion) {
doSetProperty("apiVersion", apiVersion);
return this;
}
/**
* The dns domain, used for ServiceCall EIP.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param dnsDomain the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointProducerBuilder dnsDomain(String dnsDomain) {
doSetProperty("dnsDomain", dnsDomain);
return this;
}
/**
* Default KubernetesClient to use if provided.
*
* The option is a:
* <code>io.fabric8.kubernetes.client.KubernetesClient</code> type.
*
* Group: common
*
* @param kubernetesClient the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointProducerBuilder kubernetesClient(io.fabric8.kubernetes.client.KubernetesClient kubernetesClient) {
doSetProperty("kubernetesClient", kubernetesClient);
return this;
}
/**
* Default KubernetesClient to use if provided.
*
* The option will be converted to a
* <code>io.fabric8.kubernetes.client.KubernetesClient</code> type.
*
* Group: common
*
* @param kubernetesClient the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointProducerBuilder kubernetesClient(String kubernetesClient) {
doSetProperty("kubernetesClient", kubernetesClient);
return this;
}
/**
* The namespace.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param namespace the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointProducerBuilder namespace(String namespace) {
doSetProperty("namespace", namespace);
return this;
}
/**
* The port name, used for ServiceCall EIP.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param portName the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointProducerBuilder portName(String portName) {
doSetProperty("portName", portName);
return this;
}
/**
* The port protocol, used for ServiceCall EIP.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: tcp
* Group: common
*
* @param portProtocol the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointProducerBuilder portProtocol(String portProtocol) {
doSetProperty("portProtocol", portProtocol);
return this;
}
/**
* Producer operation to do on Kubernetes.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param operation the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointProducerBuilder operation(String operation) {
doSetProperty("operation", operation);
return this;
}
/**
* The CA Cert Data.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param caCertData the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointProducerBuilder caCertData(String caCertData) {
doSetProperty("caCertData", caCertData);
return this;
}
/**
* The CA Cert File.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param caCertFile the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointProducerBuilder caCertFile(String caCertFile) {
doSetProperty("caCertFile", caCertFile);
return this;
}
/**
* The Client Cert Data.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param clientCertData the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointProducerBuilder clientCertData(String clientCertData) {
doSetProperty("clientCertData", clientCertData);
return this;
}
/**
* The Client Cert File.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param clientCertFile the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointProducerBuilder clientCertFile(String clientCertFile) {
doSetProperty("clientCertFile", clientCertFile);
return this;
}
/**
* The Key Algorithm used by the client.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param clientKeyAlgo the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointProducerBuilder clientKeyAlgo(String clientKeyAlgo) {
doSetProperty("clientKeyAlgo", clientKeyAlgo);
return this;
}
/**
* The Client Key data.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param clientKeyData the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointProducerBuilder clientKeyData(String clientKeyData) {
doSetProperty("clientKeyData", clientKeyData);
return this;
}
/**
* The Client Key file.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param clientKeyFile the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointProducerBuilder clientKeyFile(String clientKeyFile) {
doSetProperty("clientKeyFile", clientKeyFile);
return this;
}
/**
* The Client Key Passphrase.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param clientKeyPassphrase the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointProducerBuilder clientKeyPassphrase(String clientKeyPassphrase) {
doSetProperty("clientKeyPassphrase", clientKeyPassphrase);
return this;
}
/**
* The Auth Token.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param oauthToken the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointProducerBuilder oauthToken(String oauthToken) {
doSetProperty("oauthToken", oauthToken);
return this;
}
/**
* Password to connect to Kubernetes.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param password the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointProducerBuilder password(String password) {
doSetProperty("password", password);
return this;
}
/**
* Define if the certs we used are trusted anyway or not.
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Default: false
* Group: security
*
* @param trustCerts the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointProducerBuilder trustCerts(Boolean trustCerts) {
doSetProperty("trustCerts", trustCerts);
return this;
}
/**
* Define if the certs we used are trusted anyway or not.
*
* The option will be converted to a <code>java.lang.Boolean</code>
* type.
*
* Default: false
* Group: security
*
* @param trustCerts the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointProducerBuilder trustCerts(String trustCerts) {
doSetProperty("trustCerts", trustCerts);
return this;
}
/**
* Username to connect to Kubernetes.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param username the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointProducerBuilder username(String username) {
doSetProperty("username", username);
return this;
}
}
/**
* Advanced builder for endpoint producers for the Kubernetes Nodes component.
*/
public | KubernetesNodesEndpointProducerBuilder |
java | quarkusio__quarkus | independent-projects/arc/processor/src/main/java/io/quarkus/arc/processor/bcextensions/MethodInfoImpl.java | {
"start": 653,
"end": 1884
} | class ____ extends DeclarationInfoImpl<org.jboss.jandex.MethodInfo> implements MethodInfo {
MethodInfoImpl(org.jboss.jandex.IndexView jandexIndex, org.jboss.jandex.MutableAnnotationOverlay annotationOverlay,
org.jboss.jandex.MethodInfo jandexDeclaration) {
super(jandexIndex, annotationOverlay, jandexDeclaration);
}
@Override
public String name() {
if (isConstructor()) {
return jandexDeclaration.declaringClass().name().toString();
}
return jandexDeclaration.name();
}
@Override
public List<ParameterInfo> parameters() {
List<ParameterInfo> result = new ArrayList<>(jandexDeclaration.parametersCount());
for (org.jboss.jandex.MethodParameterInfo jandexParameter : jandexDeclaration.parameters()) {
result.add(new ParameterInfoImpl(jandexIndex, annotationOverlay, jandexParameter));
}
return result;
}
@Override
public Type returnType() {
if (isConstructor()) {
// Jandex returns a void type as a return type of a constructor,
// but it has the correct (type use) annotations
//
// so we just copy those annotations to a | MethodInfoImpl |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/connectors/ExternalDynamicSink.java | {
"start": 2049,
"end": 6325
} | class ____ implements DynamicTableSink, SupportsWritingMetadata {
private static final String EXTERNAL_DATASTREAM_TRANSFORMATION = "external-datastream";
private static final String ROWTIME_METADATA_KEY = "rowtime";
private static final DataType ROWTIME_METADATA_DATA_TYPE = DataTypes.TIMESTAMP_LTZ(3).notNull();
private final @Nullable ChangelogMode changelogMode;
private final DataType physicalDataType;
// mutable attributes
private boolean consumeRowtimeMetadata;
ExternalDynamicSink(@Nullable ChangelogMode changelogMode, DataType physicalDataType) {
this.changelogMode = changelogMode;
this.physicalDataType = physicalDataType;
}
@Override
public ChangelogMode getChangelogMode(ChangelogMode requestedMode) {
if (changelogMode == null) {
return requestedMode;
}
return changelogMode;
}
@Override
public SinkRuntimeProvider getSinkRuntimeProvider(Context context) {
final DynamicTableSink.DataStructureConverter physicalConverter =
context.createDataStructureConverter(physicalDataType);
return (TransformationSinkProvider)
transformationContext -> {
final Transformation<RowData> input =
transformationContext.getInputTransformation();
final LogicalType physicalType = physicalDataType.getLogicalType();
final RowData.FieldGetter atomicFieldGetter;
if (LogicalTypeChecks.isCompositeType(physicalType)) {
atomicFieldGetter = null;
} else {
atomicFieldGetter = RowData.createFieldGetter(physicalType, 0);
}
TransformationMetadata transformationMeta =
transformationContext
.generateUid(EXTERNAL_DATASTREAM_TRANSFORMATION)
.map(
uid ->
new TransformationMetadata(
uid,
generateOperatorName(),
generateOperatorDesc()))
.orElseGet(
() ->
new TransformationMetadata(
generateOperatorName(),
generateOperatorDesc()));
return ExecNodeUtil.createOneInputTransformation(
input,
transformationMeta,
new OutputConversionOperator(
atomicFieldGetter,
physicalConverter,
transformationContext.getRowtimeIndex(),
consumeRowtimeMetadata),
ExternalTypeInfo.of(physicalDataType),
input.getParallelism(),
false);
};
}
private String generateOperatorName() {
return "TableToDataStream";
}
private String generateOperatorDesc() {
return String.format(
"TableToDataStream(type=%s, rowtime=%s)",
physicalDataType.toString(), consumeRowtimeMetadata);
}
@Override
public DynamicTableSink copy() {
return new ExternalDynamicSink(changelogMode, physicalDataType);
}
@Override
public String asSummaryString() {
return generateOperatorName();
}
@Override
public Map<String, DataType> listWritableMetadata() {
return Collections.singletonMap(ROWTIME_METADATA_KEY, ROWTIME_METADATA_DATA_TYPE);
}
@Override
public void applyWritableMetadata(List<String> metadataKeys, DataType consumedDataType) {
consumeRowtimeMetadata = metadataKeys.contains(ROWTIME_METADATA_KEY);
}
}
| ExternalDynamicSink |
java | apache__camel | core/camel-management-api/src/main/java/org/apache/camel/api/management/mbean/ManagedSetVariableMBean.java | {
"start": 916,
"end": 1314
} | interface ____ extends ManagedProcessorMBean {
@ManagedAttribute(description = "Name of variable to set a new value")
String getVariableName();
@ManagedAttribute(description = "The language for the expression")
String getExpressionLanguage();
@ManagedAttribute(description = "Expression to return the value of the variable")
String getExpression();
}
| ManagedSetVariableMBean |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java | {
"start": 43288,
"end": 57471
} | class ____ implements Closeable {
// The underlying TFile reader.
final Reader reader;
// current block (null if reaching end)
private BlockReader blkReader;
Location beginLocation;
Location endLocation;
Location currentLocation;
// flag to ensure value is only examined once.
boolean valueChecked = false;
// reusable buffer for keys.
final byte[] keyBuffer;
// length of key, -1 means key is invalid.
int klen = -1;
static final int MAX_VAL_TRANSFER_BUF_SIZE = 128 * 1024;
BytesWritable valTransferBuffer;
DataInputBuffer keyDataInputStream;
ChunkDecoder valueBufferInputStream;
DataInputStream valueDataInputStream;
// vlen == -1 if unknown.
int vlen;
/**
* Constructor
*
* @param reader
* The TFile reader object.
* @param offBegin
* Begin byte-offset of the scan.
* @param offEnd
* End byte-offset of the scan.
* @throws IOException
*
* The offsets will be rounded to the beginning of a compressed
* block whose offset is greater than or equal to the specified
* offset.
*/
protected Scanner(Reader reader, long offBegin, long offEnd)
throws IOException {
this(reader, reader.getLocationNear(offBegin), reader
.getLocationNear(offEnd));
}
/**
* Constructor
*
* @param reader
* The TFile reader object.
* @param begin
* Begin location of the scan.
* @param end
* End location of the scan.
* @throws IOException
*/
Scanner(Reader reader, Location begin, Location end) throws IOException {
this.reader = reader;
// ensure the TFile index is loaded throughout the life of scanner.
reader.checkTFileDataIndex();
beginLocation = begin;
endLocation = end;
valTransferBuffer = new BytesWritable();
// TODO: remember the longest key in a TFile, and use it to replace
// MAX_KEY_SIZE.
keyBuffer = new byte[MAX_KEY_SIZE];
keyDataInputStream = new DataInputBuffer();
valueBufferInputStream = new ChunkDecoder();
valueDataInputStream = new DataInputStream(valueBufferInputStream);
if (beginLocation.compareTo(endLocation) >= 0) {
currentLocation = new Location(endLocation);
} else {
currentLocation = new Location(0, 0);
initBlock(beginLocation.getBlockIndex());
inBlockAdvance(beginLocation.getRecordIndex());
}
}
/**
* Constructor
*
* @param reader
* The TFile reader object.
* @param beginKey
* Begin key of the scan. If null, scan from the first
* <K, V> entry of the TFile.
* @param endKey
* End key of the scan. If null, scan up to the last <K, V>
* entry of the TFile.
* @throws IOException raised on errors performing I/O.
*/
protected Scanner(Reader reader, RawComparable beginKey,
RawComparable endKey) throws IOException {
this(reader, (beginKey == null) ? reader.begin() : reader
.getBlockContainsKey(beginKey, false), reader.end());
if (beginKey != null) {
inBlockAdvance(beginKey, false);
beginLocation.set(currentLocation);
}
if (endKey != null) {
seekTo(endKey, false);
endLocation.set(currentLocation);
seekTo(beginLocation);
}
}
/**
* Move the cursor to the first entry whose key is greater than or equal
* to the input key. Synonymous to seekTo(key, 0, key.length). The entry
* returned by the previous entry() call will be invalid.
*
* @param key
* The input key
* @return true if we find an equal key.
* @throws IOException raised on errors performing I/O.
*/
public boolean seekTo(byte[] key) throws IOException {
return seekTo(key, 0, key.length);
}
/**
* Move the cursor to the first entry whose key is greater than or equal
* to the input key. The entry returned by the previous entry() call will
* be invalid.
*
* @param key
* The input key
* @param keyOffset
* offset in the key buffer.
* @param keyLen
* key buffer length.
* @return true if we find an equal key; false otherwise.
* @throws IOException raised on errors performing I/O.
*/
public boolean seekTo(byte[] key, int keyOffset, int keyLen)
throws IOException {
return seekTo(new ByteArray(key, keyOffset, keyLen), false);
}
private boolean seekTo(RawComparable key, boolean beyond)
throws IOException {
Location l = reader.getBlockContainsKey(key, beyond);
if (l.compareTo(beginLocation) < 0) {
l = beginLocation;
} else if (l.compareTo(endLocation) >= 0) {
seekTo(endLocation);
return false;
}
// check if what we are seeking is in the later part of the current
// block.
if (atEnd() || (l.getBlockIndex() != currentLocation.getBlockIndex())
|| (compareCursorKeyTo(key) >= 0)) {
// sorry, we must seek to a different location first.
seekTo(l);
}
return inBlockAdvance(key, beyond);
}
/**
* Move the cursor to the new location. The entry returned by the previous
* entry() call will be invalid.
*
* @param l
* new cursor location. It must fall between the begin and end
* location of the scanner.
* @throws IOException
*/
private void seekTo(Location l) throws IOException {
if (l.compareTo(beginLocation) < 0) {
throw new IllegalArgumentException(
"Attempt to seek before the begin location.");
}
if (l.compareTo(endLocation) > 0) {
throw new IllegalArgumentException(
"Attempt to seek after the end location.");
}
if (l.compareTo(endLocation) == 0) {
parkCursorAtEnd();
return;
}
if (l.getBlockIndex() != currentLocation.getBlockIndex()) {
// going to a totally different block
initBlock(l.getBlockIndex());
} else {
if (valueChecked) {
// may temporarily go beyond the last record in the block (in which
// case the next if loop will always be true).
inBlockAdvance(1);
}
if (l.getRecordIndex() < currentLocation.getRecordIndex()) {
initBlock(l.getBlockIndex());
}
}
inBlockAdvance(l.getRecordIndex() - currentLocation.getRecordIndex());
return;
}
/**
* Rewind to the first entry in the scanner. The entry returned by the
* previous entry() call will be invalid.
*
* @throws IOException raised on errors performing I/O.
*/
public void rewind() throws IOException {
seekTo(beginLocation);
}
/**
* Seek to the end of the scanner. The entry returned by the previous
* entry() call will be invalid.
*
* @throws IOException raised on errors performing I/O.
*/
public void seekToEnd() throws IOException {
parkCursorAtEnd();
}
/**
* Move the cursor to the first entry whose key is greater than or equal
* to the input key. Synonymous to lowerBound(key, 0, key.length). The
* entry returned by the previous entry() call will be invalid.
*
* @param key
* The input key
* @throws IOException raised on errors performing I/O.
*/
public void lowerBound(byte[] key) throws IOException {
lowerBound(key, 0, key.length);
}
/**
* Move the cursor to the first entry whose key is greater than or equal
* to the input key. The entry returned by the previous entry() call will
* be invalid.
*
* @param key
* The input key
* @param keyOffset
* offset in the key buffer.
* @param keyLen
* key buffer length.
* @throws IOException raised on errors performing I/O.
*/
public void lowerBound(byte[] key, int keyOffset, int keyLen)
throws IOException {
seekTo(new ByteArray(key, keyOffset, keyLen), false);
}
/**
* Move the cursor to the first entry whose key is strictly greater than
* the input key. Synonymous to upperBound(key, 0, key.length). The entry
* returned by the previous entry() call will be invalid.
*
* @param key
* The input key
* @throws IOException raised on errors performing I/O.
*/
public void upperBound(byte[] key) throws IOException {
upperBound(key, 0, key.length);
}
/**
* Move the cursor to the first entry whose key is strictly greater than
* the input key. The entry returned by the previous entry() call will be
* invalid.
*
* @param key
* The input key
* @param keyOffset
* offset in the key buffer.
* @param keyLen
* key buffer length.
* @throws IOException raised on errors performing I/O.
*/
public void upperBound(byte[] key, int keyOffset, int keyLen)
throws IOException {
seekTo(new ByteArray(key, keyOffset, keyLen), true);
}
/**
* Move the cursor to the next key-value pair. The entry returned by the
* previous entry() call will be invalid.
*
* @return true if the cursor successfully moves. False when cursor is
* already at the end location and cannot be advanced.
* @throws IOException raised on errors performing I/O.
*/
public boolean advance() throws IOException {
if (atEnd()) {
return false;
}
int curBid = currentLocation.getBlockIndex();
long curRid = currentLocation.getRecordIndex();
long entriesInBlock = reader.getBlockEntryCount(curBid);
if (curRid + 1 >= entriesInBlock) {
if (endLocation.compareTo(curBid + 1, 0) <= 0) {
// last entry in TFile.
parkCursorAtEnd();
} else {
// last entry in Block.
initBlock(curBid + 1);
}
} else {
inBlockAdvance(1);
}
return true;
}
/**
* Load a compressed block for reading. Expecting blockIndex is valid.
*
* @throws IOException
*/
private void initBlock(int blockIndex) throws IOException {
klen = -1;
if (blkReader != null) {
try {
blkReader.close();
} finally {
blkReader = null;
}
}
blkReader = reader.getBlockReader(blockIndex);
currentLocation.set(blockIndex, 0);
}
private void parkCursorAtEnd() throws IOException {
klen = -1;
currentLocation.set(endLocation);
if (blkReader != null) {
try {
blkReader.close();
} finally {
blkReader = null;
}
}
}
/**
* Close the scanner. Release all resources. The behavior of using the
* scanner after calling close is not defined. The entry returned by the
* previous entry() call will be invalid.
*/
@Override
public void close() throws IOException {
parkCursorAtEnd();
}
/**
* Is cursor at the end location?
*
* @return true if the cursor is at the end location.
*/
public boolean atEnd() {
return (currentLocation.compareTo(endLocation) >= 0);
}
/**
* check whether we have already successfully obtained the key. It also
* initializes the valueInputStream.
*/
void checkKey() throws IOException {
if (klen >= 0) return;
if (atEnd()) {
throw new EOFException("No key-value to read");
}
klen = -1;
vlen = -1;
valueChecked = false;
klen = Utils.readVInt(blkReader);
blkReader.readFully(keyBuffer, 0, klen);
valueBufferInputStream.reset(blkReader);
if (valueBufferInputStream.isLastChunk()) {
vlen = valueBufferInputStream.getRemain();
}
}
/**
* Get an entry to access the key and value.
*
* @return The Entry object to access the key and value.
* @throws IOException raised on errors performing I/O.
*/
public Entry entry() throws IOException {
checkKey();
return new Entry();
}
/**
* Get the RecordNum corresponding to the entry pointed by the cursor.
* @return The RecordNum corresponding to the entry pointed by the cursor.
* @throws IOException raised on errors performing I/O.
*/
public long getRecordNum() throws IOException {
return reader.getRecordNumByLocation(currentLocation);
}
/**
* Internal API. Comparing the key at cursor to user-specified key.
*
* @param other
* user-specified key.
* @return negative if key at cursor is smaller than user key; 0 if equal;
* and positive if key at cursor greater than user key.
* @throws IOException
*/
int compareCursorKeyTo(RawComparable other) throws IOException {
checkKey();
return reader.compareKeys(keyBuffer, 0, klen, other.buffer(), other
.offset(), other.size());
}
/**
* Entry to a <Key, Value> pair.
*/
public | Scanner |
java | spring-projects__spring-framework | spring-messaging/src/main/java/org/springframework/messaging/simp/user/SimpSubscription.java | {
"start": 789,
"end": 1061
} | interface ____ {
/**
* Return the id associated of the subscription.
*/
String getId();
/**
* Return the session of the subscription.
*/
SimpSession getSession();
/**
* Return the subscription's destination.
*/
String getDestination();
}
| SimpSubscription |
java | junit-team__junit5 | platform-tests/src/test/java/org/junit/platform/commons/util/AnnotationUtilsTests.java | {
"start": 27446,
"end": 27522
} | class ____ implements TaggedInterface {
}
@Smoke
static | TaggedInterfaceClass |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/util/concurrent/Queues.java | {
"start": 6895,
"end": 8898
} | class ____<T> extends AtomicReference<@Nullable T> implements Queue<T> {
@Override
public boolean add(T t) {
while (!offer(t));
return true;
}
@Override
public boolean addAll(Collection<? extends T> c) {
return false;
}
@Override
public void clear() {
set(null);
}
@Override
public boolean contains(Object o) {
return Objects.equals(get(), o);
}
@Override
public boolean containsAll(Collection<?> c) {
return false;
}
@Override
public @Nullable T element() {
return get();
}
@Override
public boolean isEmpty() {
return get() == null;
}
@Override
public Iterator<T> iterator() {
return new QueueIterator<>(this);
}
@Override
public boolean offer(T t) {
if (get() != null) {
return false;
}
lazySet(t);
return true;
}
@Override
public @Nullable T peek() {
return get();
}
@Override
public @Nullable T poll() {
T v = get();
if (v != null) {
lazySet(null);
}
return v;
}
@Override
public @Nullable T remove() {
return getAndSet(null);
}
@Override
public boolean remove(Object o) {
return false;
}
@Override
public boolean removeAll(Collection<?> c) {
return false;
}
@Override
public boolean retainAll(Collection<?> c) {
return false;
}
@Override
public int size() {
return get() == null ? 0 : 1;
}
@Override
public Object[] toArray() {
T t = get();
if (t == null) {
return new Object[0];
}
return new Object[]{t};
}
@Override
@SuppressWarnings("unchecked")
@NullUnmarked
public <T1> T1[] toArray(T1[] a) {
int size = size();
if (a.length < size) {
a = (T1[]) java.lang.reflect.Array.newInstance(
a.getClass().getComponentType(), size);
}
if (size == 1) {
a[0] = (T1) get();
}
if (a.length > size) {
a[size] = null;
}
return a;
}
private static final long serialVersionUID = -6079491923525372331L;
}
static final | OneQueue |
java | apache__maven | its/core-it-suite/src/test/resources/mng-5805-pkg-type-mojo-configuration2/mng5805-plugin/src/main/java/org/apache/maven/its/mng5805/plugin/TestMojo.java | {
"start": 1096,
"end": 1546
} | class ____ extends AbstractMojo {
/**
*/
@Parameter(defaultValue = "org.apache.maven.its.mng5805.DoesNotExist")
private String className;
public void execute() throws MojoExecutionException {
getLog().info("CLASS_NAME=" + className);
try {
Class.forName(className);
} catch (ClassNotFoundException e) {
throw new MojoExecutionException(e.getMessage(), e);
}
}
}
| TestMojo |
java | netty__netty | transport-classes-io_uring/src/main/java/io/netty/channel/uring/IoUringDatagramChannel.java | {
"start": 14092,
"end": 28402
} | class ____ implements ChannelOutboundBuffer.MessageProcessor {
private int written;
@Override
public boolean processMessage(Object msg) {
if (scheduleWrite(msg, written == 0)) {
written++;
return true;
}
return false;
}
int write(ChannelOutboundBuffer in) {
written = 0;
try {
in.forEachFlushedMessage(this);
} catch (Exception e) {
// This should never happen as our processMessage(...) never throws.
throw new IllegalStateException(e);
}
return written;
}
}
@Override
protected void readComplete0(byte op, int res, int flags, short data, int outstanding) {
assert outstanding != -1 : "multi-shot not implemented yet";
final IoUringRecvByteAllocatorHandle allocHandle = recvBufAllocHandle();
final ChannelPipeline pipeline = pipeline();
ByteBuf byteBuf = this.readBuffer;
assert byteBuf != null;
try {
recvmsgComplete(pipeline, allocHandle, byteBuf, res, flags, data, outstanding);
} catch (Throwable t) {
Throwable e = (connected && t instanceof NativeIoException) ?
translateForConnected((NativeIoException) t) : t;
pipeline.fireExceptionCaught(e);
}
}
private void recvmsgComplete(ChannelPipeline pipeline, IoUringRecvByteAllocatorHandle allocHandle,
ByteBuf byteBuf, int res, int flags, int idx, int outstanding)
throws IOException {
MsgHdrMemory hdr = recvmsgHdrs.hdr(idx);
if (res < 0) {
if (res != Native.ERRNO_ECANCELED_NEGATIVE) {
// If res is negative we should pass it to ioResult(...) which will either throw
// or convert it to 0 if we could not read because the socket was not readable.
allocHandle.lastBytesRead(ioResult("io_uring recvmsg", res));
}
} else {
allocHandle.lastBytesRead(res);
if (hdr.hasPort(IoUringDatagramChannel.this)) {
allocHandle.incMessagesRead(1);
DatagramPacket packet = hdr.get(
IoUringDatagramChannel.this, registration().attachment(), byteBuf, res);
pipeline.fireChannelRead(packet);
}
}
// Reset the id as this read was completed and so don't need to be cancelled later.
recvmsgHdrs.setId(idx, MsgHdrMemoryArray.NO_ID);
if (outstanding == 0) {
// There are no outstanding completion events, release the readBuffer and see if we need to schedule
// another one or if the user will do it.
this.readBuffer.release();
this.readBuffer = null;
recvmsgHdrs.clear();
if (res != Native.ERRNO_ECANCELED_NEGATIVE) {
if (allocHandle.lastBytesRead() > 0 &&
allocHandle.continueReading(UncheckedBooleanSupplier.TRUE_SUPPLIER) &&
// If IORING_CQE_F_SOCK_NONEMPTY is supported we should check for it first before
// trying to schedule a read. If it's supported and not part of the flags we know for sure
// that the next read (which would be using Native.MSG_DONTWAIT) will complete without
// be able to read any data. This is useless work and we can skip it.
(!IoUring.isCqeFSockNonEmptySupported() ||
(flags & Native.IORING_CQE_F_SOCK_NONEMPTY) != 0)) {
// Let's schedule another read.
scheduleRead(false);
} else {
// the read was completed with EAGAIN.
allocHandle.readComplete();
pipeline.fireChannelReadComplete();
}
}
}
}
@Override
protected int scheduleRead0(boolean first, boolean socketIsEmpty) {
final IoUringRecvByteAllocatorHandle allocHandle = recvBufAllocHandle();
ByteBuf byteBuf = allocHandle.allocate(alloc());
assert readBuffer == null;
readBuffer = byteBuf;
int writable = byteBuf.writableBytes();
allocHandle.attemptedBytesRead(writable);
int datagramSize = ((IoUringDatagramChannelConfig) config()).getMaxDatagramPayloadSize();
int numDatagram = datagramSize == 0 ? 1 : Math.max(1, byteBuf.writableBytes() / datagramSize);
int scheduled = scheduleRecvmsg(byteBuf, numDatagram, datagramSize);
if (scheduled == 0) {
// We could not schedule any recvmmsg so we need to release the buffer as there will be no
// completion event.
readBuffer = null;
byteBuf.release();
}
return scheduled;
}
private int scheduleRecvmsg(ByteBuf byteBuf, int numDatagram, int datagramSize) {
int writable = byteBuf.writableBytes();
long bufferAddress = IoUring.memoryAddress(byteBuf) + byteBuf.writerIndex();
if (numDatagram <= 1) {
return scheduleRecvmsg0(bufferAddress, writable, true) ? 1 : 0;
}
int i = 0;
// Add multiple IORING_OP_RECVMSG to the submission queue. This basically emulates recvmmsg(...)
for (; i < numDatagram && writable >= datagramSize; i++) {
if (!scheduleRecvmsg0(bufferAddress, datagramSize, i == 0)) {
break;
}
bufferAddress += datagramSize;
writable -= datagramSize;
}
return i;
}
private boolean scheduleRecvmsg0(long bufferAddress, int bufferLength, boolean first) {
MsgHdrMemory msgHdrMemory = recvmsgHdrs.nextHdr();
if (msgHdrMemory == null) {
// We can not continue reading before we did not submit the recvmsg(s) and received the results.
return false;
}
msgHdrMemory.set(socket, null, bufferAddress, bufferLength, (short) 0);
int fd = fd().intValue();
int msgFlags = first ? 0 : Native.MSG_DONTWAIT;
IoRegistration registration = registration();
// We always use idx here so we can detect if no idx was used by checking if data < 0 in
// readComplete0(...)
IoUringIoOps ops = IoUringIoOps.newRecvmsg(
fd, (byte) 0, msgFlags, msgHdrMemory.address(), msgHdrMemory.idx());
long id = registration.submit(ops);
if (id == 0) {
// Submission failed we don't used the MsgHdrMemory and so should give it back.
recvmsgHdrs.restoreNextHdr(msgHdrMemory);
return false;
}
recvmsgHdrs.setId(msgHdrMemory.idx(), id);
return true;
}
@Override
boolean writeComplete0(byte op, int res, int flags, short data, int outstanding) {
ChannelOutboundBuffer outboundBuffer = outboundBuffer();
// Reset the id as this write was completed and so don't need to be cancelled later.
sendmsgHdrs.setId(data, MsgHdrMemoryArray.NO_ID);
sendmsgResArray[data] = res;
// Store the result so we can handle it as soon as we have no outstanding writes anymore.
if (outstanding == 0) {
// All writes are done as part of a batch. Let's remove these from the ChannelOutboundBuffer
boolean writtenSomething = false;
int numWritten = sendmsgHdrs.length();
sendmsgHdrs.clear();
for (int i = 0; i < numWritten; i++) {
writtenSomething |= removeFromOutboundBuffer(
outboundBuffer, sendmsgResArray[i], "io_uring sendmsg");
}
return writtenSomething;
}
return true;
}
private boolean removeFromOutboundBuffer(ChannelOutboundBuffer outboundBuffer, int res, String errormsg) {
if (res >= 0) {
// When using Datagram we should consider the message written as long as res is not negative.
return outboundBuffer.remove();
}
if (res == Native.ERRNO_ECANCELED_NEGATIVE) {
return false;
}
try {
return ioResult(errormsg, res) != 0;
} catch (Throwable cause) {
return outboundBuffer.remove(cause);
}
}
@Override
void connectComplete(byte op, int res, int flags, short data) {
if (res >= 0) {
connected = true;
}
super.connectComplete(op, res, flags, data);
}
@Override
protected int scheduleWriteMultiple(ChannelOutboundBuffer in) {
return writeProcessor.write(in);
}
@Override
protected int scheduleWriteSingle(Object msg) {
return scheduleWrite(msg, true) ? 1 : 0;
}
private boolean scheduleWrite(Object msg, boolean first) {
final ByteBuf data;
final InetSocketAddress remoteAddress;
final int segmentSize;
if (msg instanceof AddressedEnvelope) {
@SuppressWarnings("unchecked")
AddressedEnvelope<ByteBuf, InetSocketAddress> envelope =
(AddressedEnvelope<ByteBuf, InetSocketAddress>) msg;
data = envelope.content();
remoteAddress = envelope.recipient();
if (msg instanceof SegmentedDatagramPacket) {
segmentSize = ((SegmentedDatagramPacket) msg).segmentSize();
} else {
segmentSize = 0;
}
} else {
data = (ByteBuf) msg;
remoteAddress = (InetSocketAddress) remoteAddress();
segmentSize = 0;
}
long bufferAddress = IoUring.memoryAddress(data);
return scheduleSendmsg(remoteAddress, bufferAddress, data.readableBytes(), segmentSize, first);
}
private boolean scheduleSendmsg(InetSocketAddress remoteAddress, long bufferAddress,
int bufferLength, int segmentSize, boolean first) {
MsgHdrMemory hdr = sendmsgHdrs.nextHdr();
if (hdr == null) {
// There is no MsgHdrMemory left to use. We need to submit and wait for the writes to complete
// before we can write again.
return false;
}
hdr.set(socket, remoteAddress, bufferAddress, bufferLength, (short) segmentSize);
int fd = fd().intValue();
int msgFlags = first ? 0 : Native.MSG_DONTWAIT;
IoRegistration registration = registration();
IoUringIoOps ops = IoUringIoOps.newSendmsg(fd, (byte) 0, msgFlags, hdr.address(), hdr.idx());
long id = registration.submit(ops);
if (id == 0) {
// Submission failed we don't used the MsgHdrMemory and so should give it back.
sendmsgHdrs.restoreNextHdr(hdr);
return false;
}
sendmsgHdrs.setId(hdr.idx(), id);
return true;
}
@Override
public void unregistered() {
super.unregistered();
sendmsgHdrs.release();
recvmsgHdrs.release();
}
}
private static IOException translateForConnected(NativeIoException e) {
// We need to correctly translate connect errors to match NIO behaviour.
if (e.expectedErr() == Errors.ERROR_ECONNREFUSED_NEGATIVE) {
PortUnreachableException error = new PortUnreachableException(e.getMessage());
error.initCause(e);
return error;
}
return e;
}
/**
* Returns {@code true} if the usage of {@link io.netty.channel.unix.SegmentedDatagramPacket} is supported.
*
* @return {@code true} if supported, {@code false} otherwise.
*/
public static boolean isSegmentedDatagramPacketSupported() {
return IoUring.isAvailable();
}
@Override
protected void cancelOutstandingReads(IoRegistration registration, int numOutstandingReads) {
if (numOutstandingReads > 0) {
int canceled = cancel(registration, Native.IORING_OP_RECVMSG, recvmsgHdrs);
assert canceled == numOutstandingReads;
}
}
@Override
protected void cancelOutstandingWrites(IoRegistration registration, int numOutstandingWrites) {
if (numOutstandingWrites > 0) {
int canceled = cancel(registration, Native.IORING_OP_SENDMSG, sendmsgHdrs);
assert canceled == numOutstandingWrites;
}
}
private int cancel(IoRegistration registration, byte op, MsgHdrMemoryArray array) {
int cancelled = 0;
for (int idx = 0; idx < array.length(); idx++) {
long id = array.id(idx);
if (id == MsgHdrMemoryArray.NO_ID) {
continue;
}
// Let's try to cancel outstanding op as these might be submitted and waiting for data
// (via fastpoll).
IoUringIoOps ops = IoUringIoOps.newAsyncCancel((byte) 0, id, op);
registration.submit(ops);
cancelled++;
}
return cancelled;
}
@Override
protected boolean socketIsEmpty(int flags) {
return IoUring.isCqeFSockNonEmptySupported() && (flags & Native.IORING_CQE_F_SOCK_NONEMPTY) == 0;
}
@Override
boolean isPollInFirst() {
return false;
}
}
| WriteProcessor |
java | apache__thrift | lib/javame/src/org/apache/thrift/meta_data/FieldValueMetaData.java | {
"start": 1807,
"end": 2116
} | class ____ {
public final byte type;
public FieldValueMetaData(byte type){
this.type = type;
}
public boolean isStruct() {
return type == TType.STRUCT;
}
public boolean isContainer() {
return type == TType.LIST || type == TType.MAP || type == TType.SET;
}
}
| FieldValueMetaData |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/pool/ha/selector/NamedDataSourceSelector.java | {
"start": 883,
"end": 2771
} | class ____ implements DataSourceSelector {
public static final String DEFAULT_NAME = "default";
private HighAvailableDataSource highAvailableDataSource;
private ThreadLocal<String> targetDataSourceName = new ThreadLocal<String>();
private String defaultName = DEFAULT_NAME;
public NamedDataSourceSelector(HighAvailableDataSource highAvailableDataSource) {
this.highAvailableDataSource = highAvailableDataSource;
}
@Override
public void init() {
}
@Override
public void destroy() {
}
@Override
public String getName() {
return DataSourceSelectorEnum.BY_NAME.getName();
}
@Override
public DataSource get() {
if (highAvailableDataSource == null) {
return null;
}
Map<String, DataSource> dataSourceMap = highAvailableDataSource.getAvailableDataSourceMap();
if (dataSourceMap == null || dataSourceMap.isEmpty()) {
return null;
}
if (dataSourceMap.size() == 1) {
for (DataSource v : dataSourceMap.values()) {
return v;
}
}
String name = getTarget();
if (name == null) {
if (dataSourceMap.get(getDefaultName()) != null) {
return dataSourceMap.get(getDefaultName());
}
} else {
return dataSourceMap.get(name);
}
return null;
}
@Override
public void setTarget(String name) {
targetDataSourceName.set(name);
}
public String getTarget() {
return targetDataSourceName.get();
}
public void resetDataSourceName() {
targetDataSourceName.remove();
}
public String getDefaultName() {
return defaultName;
}
public void setDefaultName(String defaultName) {
this.defaultName = defaultName;
}
}
| NamedDataSourceSelector |
java | dropwizard__dropwizard | dropwizard-testing/src/test/java/io/dropwizard/testing/junit5/DropwizardAppExtensionResetConfigOverrideTest.java | {
"start": 368,
"end": 1686
} | class ____ {
private final DropwizardAppExtension<TestConfiguration> dropwizardAppExtension = new DropwizardAppExtension<>(
TestApplication.class,
"test-config.yaml",
new ResourceConfigurationSourceProvider(),
"app-rule-reset",
config("app-rule-reset", "message", "A new way to say Hooray!"));
@Test
void test2() throws Exception {
dropwizardAppExtension.before();
assertThat(System.getProperty("app-rule-reset.message")).isEqualTo("A new way to say Hooray!");
assertThat(System.getProperty("app-rule-reset.extra")).isNull();
dropwizardAppExtension.after();
System.setProperty("app-rule-reset.extra", "Some extra system property");
dropwizardAppExtension.before();
assertThat(System.getProperty("app-rule-reset.message")).isEqualTo("A new way to say Hooray!");
assertThat(System.getProperty("app-rule-reset.extra")).isEqualTo("Some extra system property");
dropwizardAppExtension.after();
assertThat(System.getProperty("app-rule-reset.message")).isNull();
assertThat(System.getProperty("app-rule-reset.extra")).isEqualTo("Some extra system property");
System.clearProperty("app-rule-reset.extra");
}
}
| DropwizardAppExtensionResetConfigOverrideTest |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ser/filter/JsonIncludeTest.java | {
"start": 4239,
"end": 4444
} | class ____ extends NonDefault<Calendar> {
public NonDefaultCalendar(Calendar v) { super(v); }
}
// [databind#1327]
@JsonInclude(JsonInclude.Include.NON_EMPTY)
static | NonDefaultCalendar |
java | google__auto | factory/src/main/java/com/google/auto/factory/processor/Elements2.java | {
"start": 2705,
"end": 3100
} | interface ____.");
}
TypeMirror subExecutableTypeMirror =
types.asMemberOf(MoreTypes.asDeclared(subTypeMirror), executableElement);
if (!subExecutableTypeMirror.getKind().equals(TypeKind.EXECUTABLE)) {
throw new IllegalStateException("Expected subExecutableTypeMirror to be an executable type.");
}
return MoreTypes.asExecutable(subExecutableTypeMirror);
}
}
| type |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/HadoopPlatform.java | {
"start": 1625,
"end": 3234
} | class ____ extends Platform {
private static final Logger LOG =
LoggerFactory.getLogger(HadoopPlatform.class);
public HadoopPlatform() throws IOException {
}
@Override
public void init() throws IOException {
registerKey(NullWritable.class.getName(), NullWritableSerializer.class);
registerKey(Text.class.getName(), TextSerializer.class);
registerKey(LongWritable.class.getName(), LongWritableSerializer.class);
registerKey(IntWritable.class.getName(), IntWritableSerializer.class);
registerKey(Writable.class.getName(), DefaultSerializer.class);
registerKey(BytesWritable.class.getName(), BytesWritableSerializer.class);
registerKey(BooleanWritable.class.getName(), BoolWritableSerializer.class);
registerKey(ByteWritable.class.getName(), ByteWritableSerializer.class);
registerKey(FloatWritable.class.getName(), FloatWritableSerializer.class);
registerKey(DoubleWritable.class.getName(), DoubleWritableSerializer.class);
registerKey(VIntWritable.class.getName(), VIntWritableSerializer.class);
registerKey(VLongWritable.class.getName(), VLongWritableSerializer.class);
LOG.info("Hadoop platform inited");
}
@Override
public boolean support(String keyClassName, INativeSerializer<?> serializer, JobConf job) {
if (keyClassNames.contains(keyClassName)
&& serializer instanceof INativeComparable) {
return true;
} else {
return false;
}
}
@Override
public boolean define(Class<?> comparatorClass) {
return false;
}
@Override
public String name() {
return "Hadoop";
}
}
| HadoopPlatform |
java | apache__camel | components/camel-ftp/src/test/java/org/apache/camel/component/file/remote/integration/FromFtpUseListFalseIT.java | {
"start": 1330,
"end": 2958
} | class ____ extends FtpServerTestSupport {
private String getFtpUrl() {
return "ftp://admin@localhost:{{ftp.server.port}}/nolist/?password=admin"
+ "&stepwise=false&useList=false&ignoreFileNotFoundOrPermissionError=true&fileName=report.txt&delete=true";
}
@BeforeEach
public void prepareFtpServer() throws Exception {
// prepares the FTP Server by creating a file on the server that we want
// to unit
// test that we can pool and store as a local file
Endpoint endpoint
= context.getEndpoint("ftp://admin@localhost:{{ftp.server.port}}/nolist/?password=admin&binary=false");
Exchange exchange = endpoint.createExchange();
exchange.getIn().setBody("Hello World from FTPServer");
exchange.getIn().setHeader(Exchange.FILE_NAME, "report.txt");
Producer producer = endpoint.createProducer();
producer.start();
producer.process(exchange);
producer.stop();
}
@Test
public void testUseListFalse() {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("Hello World from FTPServer");
// just allow to poll a few more times, but we should only get the file
// once
await().atMost(2, TimeUnit.SECONDS)
.untilAsserted(() -> mock.assertIsSatisfied());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from(getFtpUrl()).to("mock:result");
}
};
}
}
| FromFtpUseListFalseIT |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/config/RouteRefMultipleRefsTest.java | {
"start": 1086,
"end": 1708
} | class ____ extends SpringTestSupport {
@Test
public void testRouteRefOutside() throws Exception {
getMockEndpoint("mock:result").expectedMessageCount(1);
getMockEndpoint("mock:foo").expectedMessageCount(1);
template.sendBody("direct:start", "Hello World");
template.sendBody("direct:foo", "Bye World");
assertMockEndpointsSatisfied();
}
@Override
protected AbstractXmlApplicationContext createApplicationContext() {
return new ClassPathXmlApplicationContext("org/apache/camel/spring/config/RouteRefMultipleRefsTest.xml");
}
}
| RouteRefMultipleRefsTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java | {
"start": 26568,
"end": 27107
} | class ____ implements KeySerializer<Integer> {
public static final IntKeySerializer INSTANCE = new IntKeySerializer();
@Override
public void writeKey(Integer key, StreamOutput out) throws IOException {
out.writeInt(key);
}
@Override
public Integer readKey(StreamInput in) throws IOException {
return in.readInt();
}
}
/**
* Serializes Integer keys of a map as a VInt. Requires keys to be positive.
*/
private static final | IntKeySerializer |
java | micronaut-projects__micronaut-core | function/src/main/java/io/micronaut/function/executor/FunctionInitializer.java | {
"start": 1318,
"end": 5031
} | class ____ extends AbstractExecutor {
protected final boolean closeContext;
private FunctionExitHandler functionExitHandler = new DefaultFunctionExitHandler();
/**
* Constructor.
*/
public FunctionInitializer() {
ApplicationContext applicationContext = buildApplicationContext(null);
startThis(applicationContext);
injectThis(applicationContext);
MutableAnnotationMetadata annotationMetadata = new MutableAnnotationMetadata();
// the runtime registered bean should be lower priority than the existing bean
// used for dependency injecting the instance
annotationMetadata.addDeclaredAnnotation(Secondary.class.getName(), Collections.emptyMap());
applicationContext.registerBeanDefinition(
RuntimeBeanDefinition.builder(this)
.annotationMetadata(annotationMetadata)
.build()
);
this.closeContext = true;
}
/**
* Start a function for an existing {@link ApplicationContext}.
*
* @param applicationContext The application context
*/
protected FunctionInitializer(ApplicationContext applicationContext) {
this(applicationContext, true);
}
/**
* Start a function for an existing {@link ApplicationContext}.
*
* @param applicationContext The application context
* @param inject inject this into the application flag
*/
protected FunctionInitializer(ApplicationContext applicationContext, boolean inject) {
this.applicationContext = applicationContext;
this.closeContext = false;
if (inject) {
injectThis(applicationContext);
}
}
@Override
@Internal
public void close() {
if (closeContext && applicationContext != null) {
applicationContext.close();
}
}
/**
* This method is designed to be called when using the {@link FunctionInitializer} from a static Application main method.
*
* @param args The arguments passed to main
* @param supplier The function that executes this function
*/
public void run(String[] args, Function<ParseContext, ?> supplier) {
ApplicationContext applicationContext = this.applicationContext;
this.functionExitHandler = applicationContext.findBean(FunctionExitHandler.class).orElse(this.functionExitHandler);
ParseContext context = new ParseContext(args);
try {
Object result = supplier.apply(context);
if (result != null) {
LocalFunctionRegistry bean = applicationContext.getBean(LocalFunctionRegistry.class);
StreamFunctionExecutor.encode(applicationContext.getEnvironment(), bean, result.getClass(), result, System.out);
functionExitHandler.exitWithSuccess();
}
} catch (Exception e) {
functionExitHandler.exitWithError(e, context.debug);
}
}
/**
* Start this environment.
*
* @param applicationContext The application context
*/
protected void startThis(ApplicationContext applicationContext) {
startEnvironment(applicationContext);
}
/**
* Injects this instance.
*
* @param applicationContext The {@link ApplicationContext}
*/
protected void injectThis(ApplicationContext applicationContext) {
if (applicationContext != null) {
applicationContext.inject(this);
}
}
/**
* The parse context supplied from the {@link #run(String[], Function)} method. Consumers can use the {@link #get(Class)} method to obtain the data is the desired type.
*/
public | FunctionInitializer |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/LagInfo.java | {
"start": 1110,
"end": 3222
} | class ____ {
private final long currentOffsetPosition;
private final long endOffsetPosition;
private final long offsetLag;
LagInfo(final long currentOffsetPosition, final long endOffsetPosition) {
this.currentOffsetPosition = currentOffsetPosition;
this.endOffsetPosition = endOffsetPosition;
this.offsetLag = Math.max(0, endOffsetPosition - currentOffsetPosition);
}
/**
* Get the current maximum offset on the store partition's changelog topic, that has been successfully written into
* the store partition's state store.
*
* @return current consume offset for standby/restoring store partitions & simply end offset for active store partition replicas
*/
public long currentOffsetPosition() {
return this.currentOffsetPosition;
}
/**
* Get the end offset position for this store partition's changelog topic on the Kafka brokers.
*
* @return last offset written to the changelog topic partition
*/
public long endOffsetPosition() {
return this.endOffsetPosition;
}
/**
* Get the measured lag between current and end offset positions, for this store partition replica
*
* @return lag as measured by message offsets
*/
public long offsetLag() {
return this.offsetLag;
}
@Override
public boolean equals(final Object obj) {
if (!(obj instanceof LagInfo)) {
return false;
}
final LagInfo other = (LagInfo) obj;
return currentOffsetPosition == other.currentOffsetPosition
&& endOffsetPosition == other.endOffsetPosition
&& this.offsetLag == other.offsetLag;
}
@Override
public int hashCode() {
return Objects.hash(currentOffsetPosition, endOffsetPosition, offsetLag);
}
@Override
public String toString() {
return "LagInfo {" +
" currentOffsetPosition=" + currentOffsetPosition +
", endOffsetPosition=" + endOffsetPosition +
", offsetLag=" + offsetLag +
'}';
}
}
| LagInfo |
java | google__dagger | javatests/dagger/internal/codegen/ModuleFactoryGeneratorTest.java | {
"start": 64453,
"end": 64683
} | interface ____ {}");
Source fooImplFile =
CompilerTests.javaSource(
"test.FooImpl",
"package test;",
"",
"import javax.inject.Inject;",
"",
"final | Foo |
java | mapstruct__mapstruct | processor/src/main/java/org/mapstruct/ap/internal/model/source/builtin/LocalDateTimeToXmlGregorianCalendar.java | {
"start": 624,
"end": 1471
} | class ____ extends AbstractToXmlGregorianCalendar {
private final Parameter parameter;
private final Set<Type> importTypes;
public LocalDateTimeToXmlGregorianCalendar(TypeFactory typeFactory) {
super( typeFactory );
this.parameter = new Parameter( "localDateTime", typeFactory.getType( LocalDateTime.class ) );
this.importTypes = asSet(
parameter.getType(),
typeFactory.getType( XmlConstants.JAVAX_XML_DATATYPE_CONSTANTS ),
typeFactory.getType( ChronoField.class )
);
}
@Override
public Set<Type> getImportTypes() {
Set<Type> result = super.getImportTypes();
result.addAll( importTypes );
return result;
}
@Override
public Parameter getParameter() {
return parameter;
}
}
| LocalDateTimeToXmlGregorianCalendar |
java | bumptech__glide | integration/sqljournaldiskcache/src/main/java/com/bumptech/glide/integration/sqljournaldiskcache/JournaledLruDiskCache.java | {
"start": 1005,
"end": 16722
} | class ____ {
private static final String TAG = "DiskCache";
private static final String CANARY_FILE_NAME = "cache_canary";
// You must restart the app after enabling these logs for the change to take affect.
// We cache isLoggable to avoid the performance hit of checking repeatedly.
private static final boolean LOG_WARN = Log.isLoggable(TAG, Log.WARN);
private static final boolean LOG_VERBOSE = Log.isLoggable(TAG, Log.VERBOSE);
// The fraction of the maximum byte size of the cache we will allow the cache to go over before
// triggering an eviction.
private static final float DEFAULT_EVICTION_SLOP_MULTIPLIER = 0.05f;
// The number of items we will queue to update the date modified time of in batches.
private static final int DEFAULT_UPDATE_MODIFIED_TIME_BATCH_SIZE = 20;
static final String TEMP_FILE_INDICATOR = ".tmp";
private final File cacheDirectory;
private final FileSystem fileSystem;
private final Journal journal;
// We use this File to determine if the system has wiped out our cache directory, which it may do
// at any time. If the File is not present, then either we've never opened the cache for the given
// directory before, or the cache was wiped.
private final File canaryFile;
private final EvictionManager evictionManager;
private final RecoveryManager recoveryManager;
private final EntryCache entries = new EntryCache();
private volatile boolean isOpen;
/**
* @param cacheDirectory The directory in which the cache should store its files (Warning: the
* cache will delete all Files in the given directory. The directory should not be used to
* store any other content).
* @param maximumSizeBytes The target maximum size in bytes. The cache size may briefly exceed
* this size by up to around 25mb depending on the size, thread scheduling, and the number of
* failed requests.
*/
JournaledLruDiskCache(
File cacheDirectory,
DiskCacheDbHelper diskCacheDbHelper,
long maximumSizeBytes,
long staleEvictionThresholdMs,
Clock clock) {
this(
cacheDirectory,
diskCacheDbHelper,
new FileSystem() {},
maximumSizeBytes,
getBackgroundLooper(),
DEFAULT_EVICTION_SLOP_MULTIPLIER,
DEFAULT_UPDATE_MODIFIED_TIME_BATCH_SIZE,
staleEvictionThresholdMs,
clock);
}
@VisibleForTesting
JournaledLruDiskCache(
File cacheDirectory,
DiskCacheDbHelper diskCacheDbHelper,
FileSystem fileSystem,
long maximumSizeBytes,
Looper workLooper,
float slopMultiplier,
int updateModifiedTimeBatchSize,
long staleEvictionThresholdMs,
Clock clock) {
Preconditions.checkArgument(
updateModifiedTimeBatchSize >= 1, "updated modified time batch size must be >= 1");
this.cacheDirectory = cacheDirectory;
this.fileSystem = fileSystem;
journal = new Journal(diskCacheDbHelper, workLooper, updateModifiedTimeBatchSize, clock);
canaryFile = new File(cacheDirectory, CANARY_FILE_NAME);
evictionManager =
new EvictionManager(
this,
cacheDirectory,
fileSystem,
journal,
workLooper,
maximumSizeBytes,
slopMultiplier,
staleEvictionThresholdMs,
clock);
recoveryManager = new RecoveryManager(this, cacheDirectory, journal, workLooper);
}
private static Looper getBackgroundLooper() {
HandlerThread workThread =
new HandlerThread("disk_cache_journal", Process.THREAD_PRIORITY_BACKGROUND);
workThread.start();
return workThread.getLooper();
}
@SuppressWarnings("checkstyle:UnnecessaryParentheses") // Readability
private void openIfNotOpen() {
if (!isOpen) {
synchronized (this) {
if (!isOpen) {
boolean createdDirectory =
cacheDirectory.mkdirs() || (cacheDirectory.exists() && cacheDirectory.isDirectory());
if (!createdDirectory) {
throw new IllegalStateException("Failed to create cache directory: " + cacheDirectory);
}
journal.open();
isOpen = true;
recoveryManager.triggerRecovery();
}
}
}
}
// TODO(judds): rather than polling, we should use Android's FileObserver.
private void verifyCanaryOrClear() {
if (fileSystem.exists(canaryFile)) {
return;
}
synchronized (this) {
if (fileSystem.exists(canaryFile)) {
return;
}
if (LOG_WARN) {
Log.w(TAG, "Failed to find canary file, clearing disk cache");
}
clear();
}
}
private void touchCanaryFile() {
try {
if (!fileSystem.createNewFile(canaryFile) && LOG_WARN) {
Log.w(TAG, "Failed to create new canary file");
}
} catch (IOException e) {
if (LOG_WARN) {
Log.w(TAG, "Threw creating canary", e);
}
}
}
long getCurrentSizeBytes() {
return journal.getCurrentSizeBytes();
}
/**
* Makes a best effort attempt to delete all Files and clear the journal.
*
* <p>In progress writes may still complete and/or leave behind partial data.
*/
public synchronized void clear() {
if (LOG_WARN) {
Log.w(TAG, "Clearing cache and deleting all entries!");
}
fileSystem.deleteAll(cacheDirectory);
journal.clear();
isOpen = false;
entries.clear();
openIfNotOpen();
touchCanaryFile();
}
/**
* Attempts to delete any content currently in the cache for the given key.
*
* <p>If no entry for the given key is found, this method will silently fail. If an entry is
* found, it is possible the File deletion will fail and be re-attempted in the future.
*/
public void delete(String key) {
delete(Collections.singletonList(key));
}
List<String> delete(List<String> keys) {
journal.markPendingDelete(keys);
List<String> successfullyDeleted = new ArrayList<>(keys.size());
for (String key : keys) {
EntryCache.Entry entry = entries.get(key);
entry.acquireWriteLock();
try {
File file = getCacheFile(key);
if (fileSystem.delete(file)) {
successfullyDeleted.add(key);
} else if (LOG_WARN) {
Log.w(TAG, "Failed to delete file: " + file);
}
entry.setNotPresent();
} finally {
entry.releaseWriteLock();
}
}
journal.delete(successfullyDeleted);
return successfullyDeleted;
}
/**
* Returns a File committed previously for the given key, or {@code null} if no such File exists.
*
* <p>If a write is in progress but not yet committed for the given key, this method will return
* {@code null} immediately, just as if the key were simply not present.
*/
public File get(String key) {
long startTime = getLogTime();
openIfNotOpen();
final File result;
EntryCache.Entry entry = entries.get(key);
entry.acquireReadLock();
try {
if (entry.isStateKnown()) {
result = entry.isPresent() ? entry.getFile() : null;
} else {
File cacheFile = getCacheFile(key);
if (fileSystem.exists(cacheFile)) {
entry.setPresent(cacheFile);
result = cacheFile;
} else {
entry.setNotPresent();
result = null;
}
}
if (result != null) {
journal.get(key);
}
if (LOG_VERBOSE) {
Log.v(TAG, "Completed get in: " + getElapsedTime(startTime) + ", key: " + key);
}
} finally {
entry.releaseReadLock();
}
return result;
}
/**
* Starts a put for the given key and returns a temporary {@link File} to which the caller can
* write data, or {@code null} if an edit is already in progress for the given Key, or if a
* committed entry already exists for the given key.
*
* <p>Callers should call {@link #commitPut(String, File)} with the given key and the {@link File}
* returned from this method after they finish writing data to make the data they have written
* available to calls to {@link #get(String)}. If an error occurs while writing data, callers can
* omit calling {@link #commitPut(String, File)} and use {@link #abortPutIfNotCommitted(String,
* File)} to cleanup any partial {@link File Files}.
*
* <p>Callers must call {@link #abortPutIfNotCommitted(String, File)} regardless of whether or not
* their write succeeds. The expected pattern is as follows:
*
* <pre>{@code
* File tempFile = cache.beginPut(key);
* try {
* if (tempFile != null && writeToFile(someData, tempFile)) {
* cache.commitPut(key, tempFile);
* }
* } finally {
* cache.abortIfNotCommitted(key, tempFile);
* }
* }</pre>
*
* <p>Until the caller calls {@link #abortPutIfNotCommitted(String, File)}, a lock is held that
* will block future calls to this method for the given key.
*
* <p>The returned {@link File} may contain partial data if a previous write to this key failed.
* Callers should not assume it is safe to append to the File without first clearing it.
*/
@Nullable
public File beginPut(String key) {
long startTime = getLogTime();
openIfNotOpen();
verifyCanaryOrClear();
EntryCache.Entry entry = entries.get(key);
entry.acquireWriteLock();
File permanentFile = getCacheFile(key);
if (fileSystem.exists(permanentFile)) {
return null;
}
File result = getTempFile(key);
if (LOG_VERBOSE) {
Log.v(TAG, "Completed begin put in: " + getElapsedTime(startTime) + ", key: " + key);
}
return result;
}
/**
* Updates the size of the cache based on the data in the given temporary file and renames the
* given temporary File to its permanent equivalent and makes it available to calls from {@link
* #get(String)}.
*
* <p>The given {@link File} must be a {@link File} returned from {@link #get(String)} for the
* given key. No validation is performed to verify either that the given {@link File} is a
* legitimate temporary file from this cache or that the given {@link File} matches the given key.
*
* <p>It is possible this commit may fail silently, there is no guarantee that the data in the
* given {@link File} will actually be available from {@link #get(String)}} when this method
* completes. In practice commits should fail rarely unless insufficient storage is available or
* the cache's directory or files are manipulated by a third party.
*
* <p>If the commit does fail, it will do so in one of two ways:
*
* <ul>
* <li>Prior to or while writing the entry to the journal
* <li>After writing the entry to the journal prior to or while renaming the temporary file to
* the permanent file.
* </ul>
*
* If the commit fails prior to writing the entry to the journal, the dangling temporary File will
* be found during recovery and deleted. If the commit fails after writing the entry to the
* journal, the temporary file will be found during recovery and deleted and the corresponding
* journal entry will also be deleted. The absence of a temporary File for a given key is assumed
* to mean that either no entry exists, or the entry is committed and may be read.
*
* @throws IllegalStateException If this method wasn't preceded by a call to {@link
* #beginPut(String)} for the given key.
*/
public void commitPut(String key, File temp) {
long startTime = getLogTime();
long totalBytesAdded = fileSystem.length(temp);
journal.put(key, totalBytesAdded);
if (LOG_VERBOSE) {
Log.v(TAG, "Completed insertIntoDb in: " + getElapsedTime(startTime));
}
long startRenameTime = getLogTime();
File permanentFile = getCacheFile(key);
boolean isRenameSuccessful = fileSystem.rename(temp, permanentFile);
// If we fail to rename the file, we will try to recover in our next recovery phase.
if (isRenameSuccessful) {
if (LOG_VERBOSE) {
Log.v(TAG, "Successfully renamed in: " + getElapsedTime(startRenameTime));
}
EntryCache.Entry entry = entries.get(key);
entry.setPresent(permanentFile);
} else if (LOG_WARN) {
Log.w(TAG, "Failed to rename file" + ", from: " + temp + ", to: " + permanentFile);
}
evictionManager.maybeScheduleEviction();
if (LOG_VERBOSE) {
Log.v(
TAG,
"Completed commitPut in: "
+ getElapsedTime(startTime)
+ ", current size: "
+ journal.getCurrentSizeBytes()
+ ", key: "
+ key);
}
}
/**
* Releases the write lock for the given key and, if the write was not committed, cleans up the
* given temporary File and the corresponding journal entry for the given Key.
*
* <p>A write is assumed to have not been committed if the given temporary File still exists.
*/
public void abortPutIfNotCommitted(String key, File temp) {
try {
// If the temporary File still exists, we haven't committed. If it doesn't exist, we either
// didn't start writing and have nothing to roll back, or we finished writing and finished
// the rename so the edit is committed.
if (temp != null && fileSystem.delete(temp)) {
journal.abortPut(key);
EntryCache.Entry entry = entries.get(key);
entry.setUnknown();
}
} finally {
EntryCache.Entry entry = entries.get(key);
entry.releaseWriteLock();
}
}
void recoverPartialWrite(File temp) {
String key = keyFromFile(temp);
EntryCache.Entry entry = entries.get(key);
entry.acquireWriteLock();
try {
// Try to delete the temporary file, if it fails, we will try again in the next recovery
// phase.
boolean deleted = temp.delete();
if (!deleted) {
if (LOG_WARN) {
Log.w(TAG, "Failed to cleanup in progress write: " + temp);
}
// The write lock prevents us from directly racing with an in progress write. However when
// the write lock is released, we will get to run. If the write completed successfully,
// the
// temp file will no longer exist, but the entry will. We do not want to delete the entry
// just because we happened to try to run recovery during the write.
return;
}
delete(key);
} finally {
entry.releaseWriteLock();
}
}
private String keyFromFile(File file) {
String name = file.getName();
final String key;
if (name.endsWith(TEMP_FILE_INDICATOR)) {
key = name.substring(0, name.length() - TEMP_FILE_INDICATOR.length());
} else {
key = name;
}
return key;
}
/**
* Sets the maximum size of the cache to a new size in bytes.
*
* <p>Must be called on a background thread.
*
* <p>The EvictionManager manages the sizing of the cache. Decreasing the size may schedule an
* eviction if the current cache size exceeds newMaximumSizeBytes. Evictions will be scheduled and
* executed asynchronously. Therefore, the eviction will happen based on the latest maximum cache
* size, not the maximum size at scheduling.
*/
public void setMaximumSizeBytes(long newMaximumSizeBytes) {
evictionManager.setMaximumSizeBytes(newMaximumSizeBytes);
}
private File getCacheFile(String key) {
return new File(cacheDirectory, key);
}
private File getTempFile(String key) {
return new File(cacheDirectory, key + TEMP_FILE_INDICATOR);
}
private static long getLogTime() {
return System.currentTimeMillis();
}
private static long getElapsedTime(long startTime) {
return getLogTime() - startTime;
}
}
| JournaledLruDiskCache |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/bean/override/mockito/MockitoBeanOverrideHandlerTests.java | {
"start": 8765,
"end": 8870
} | class ____ {
}
@MockitoBean(name = "beanToMock", types = String.class)
static | ClassLevelStringMockByName1 |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java | {
"start": 1463,
"end": 3253
} | class ____ extends BaseRestHandler {
@Override
public List<Route> routes() {
return List.of(new Route(PUT, "/_ingest/pipeline/{id}"));
}
@Override
public String getName() {
return "ingest_put_pipeline_action";
}
@Override
public RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException {
Integer ifVersion = null;
if (restRequest.hasParam("if_version")) {
String versionString = restRequest.param("if_version");
try {
ifVersion = Integer.parseInt(versionString);
} catch (NumberFormatException e) {
throw new IllegalArgumentException(
String.format(Locale.ROOT, "invalid value [%s] specified for [if_version]. must be an integer value", versionString)
);
}
}
Tuple<XContentType, ReleasableBytesReference> sourceTuple = restRequest.contentOrSourceParam();
var content = sourceTuple.v2();
final var request = new PutPipelineRequest(
getMasterNodeTimeout(restRequest),
getAckTimeout(restRequest),
restRequest.param("id"),
content,
sourceTuple.v1(),
ifVersion
);
return channel -> client.execute(
PutPipelineTransportAction.TYPE,
request,
ActionListener.withRef(new RestToXContentListener<>(channel), content)
);
}
@Override
public Set<String> supportedCapabilities() {
// pipeline_tracking info: `{created,modified}_date` system properties defined within pipeline definition.
return Set.of("pipeline_tracking_info", "field_access_pattern.flexible");
}
}
| RestPutPipelineAction |
java | apache__maven | compat/maven-model-builder/src/main/java/org/apache/maven/model/interpolation/StringSearchModelInterpolator.java | {
"start": 10297,
"end": 11649
} | class ____ {
final Field field;
CacheField(Field field) {
this.field = field;
field.setAccessible(true);
}
void interpolate(Object target, InterpolateObjectAction interpolateObjectAction) {
try {
doInterpolate(target, interpolateObjectAction);
} catch (IllegalArgumentException e) {
interpolateObjectAction.problems.add(new ModelProblemCollectorRequest(Severity.ERROR, Version.BASE)
.setMessage("Failed to interpolate field3: " + field + " on class: "
+ field.getType().getName())
.setException(e)); // TODO Not entirely the same message
} catch (IllegalAccessException e) {
interpolateObjectAction.problems.add(new ModelProblemCollectorRequest(Severity.ERROR, Version.BASE)
.setMessage("Failed to interpolate field4: " + field + " on class: "
+ field.getType().getName())
.setException(e));
}
}
abstract void doInterpolate(Object target, InterpolateObjectAction ctx) throws IllegalAccessException;
}
static final | CacheField |
java | resilience4j__resilience4j | resilience4j-rxjava2/src/main/java/io/github/resilience4j/ratelimiter/operator/FlowableRateLimiter.java | {
"start": 1173,
"end": 2189
} | class ____<T> extends Flowable<T> {
private final RateLimiter rateLimiter;
private final Publisher<T> upstream;
FlowableRateLimiter(Publisher<T> upstream, RateLimiter rateLimiter) {
this.rateLimiter = requireNonNull(rateLimiter);
this.upstream = Objects.requireNonNull(upstream, "source is null");
}
@Override
protected void subscribeActual(Subscriber<? super T> downstream) {
long waitDuration = rateLimiter.reservePermission();
if (waitDuration >= 0) {
if (waitDuration > 0) {
Completable.timer(waitDuration, TimeUnit.NANOSECONDS)
.subscribe(() -> upstream.subscribe(new RateLimiterSubscriber(downstream)));
} else {
upstream.subscribe(new RateLimiterSubscriber(downstream));
}
} else {
downstream.onSubscribe(EmptySubscription.INSTANCE);
downstream.onError(createRequestNotPermitted(rateLimiter));
}
}
| FlowableRateLimiter |
java | quarkusio__quarkus | extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/enhancer/HibernateEntityEnhancerMissingEmbeddableAnnotationTest.java | {
"start": 2868,
"end": 3365
} | class ____ {
private String string;
public EmbeddableMissingAnnotation() {
}
public EmbeddableMissingAnnotation(String string) {
this.string = string;
}
public String getString() {
return string;
}
public void setString(String string) {
this.string = string;
}
}
@Embeddable
public static | EmbeddableMissingAnnotation |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/common/breaker/PreallocatedCircuitBreakerServiceTests.java | {
"start": 1218,
"end": 6751
} | class ____ extends ESTestCase {
public void testUseNotPreallocated() {
HierarchyCircuitBreakerService real = real();
try (PreallocatedCircuitBreakerService preallocated = preallocateRequest(real, 1024)) {
CircuitBreaker b = preallocated.getBreaker(CircuitBreaker.REQUEST);
b.addEstimateBytesAndMaybeBreak(100, "test");
b.addWithoutBreaking(-100);
}
assertThat(real.getBreaker(CircuitBreaker.REQUEST).getUsed(), equalTo(0L));
}
public void testUseLessThanPreallocated() {
HierarchyCircuitBreakerService real = real();
try (PreallocatedCircuitBreakerService preallocated = preallocateRequest(real, 1024)) {
CircuitBreaker b = preallocated.getBreaker(CircuitBreaker.REQUEST);
b.addEstimateBytesAndMaybeBreak(100, "test");
b.addWithoutBreaking(-100);
}
assertThat(real.getBreaker(CircuitBreaker.REQUEST).getUsed(), equalTo(0L));
}
public void testCloseIsIdempotent() {
HierarchyCircuitBreakerService real = real();
try (PreallocatedCircuitBreakerService preallocated = preallocateRequest(real, 1024)) {
CircuitBreaker b = preallocated.getBreaker(CircuitBreaker.REQUEST);
b.addEstimateBytesAndMaybeBreak(100, "test");
b.addWithoutBreaking(-100);
preallocated.close();
assertThat(real.getBreaker(CircuitBreaker.REQUEST).getUsed(), equalTo(0L));
} // Closes again which should do nothing
assertThat(real.getBreaker(CircuitBreaker.REQUEST).getUsed(), equalTo(0L));
}
public void testUseMoreThanPreallocated() {
HierarchyCircuitBreakerService real = real();
try (PreallocatedCircuitBreakerService preallocated = preallocateRequest(real, 1024)) {
CircuitBreaker b = preallocated.getBreaker(CircuitBreaker.REQUEST);
b.addEstimateBytesAndMaybeBreak(2048, "test");
b.addWithoutBreaking(-2048);
}
assertThat(real.getBreaker(CircuitBreaker.REQUEST).getUsed(), equalTo(0L));
}
public void testPreallocateMoreThanRemains() {
HierarchyCircuitBreakerService real = real();
long limit = real.getBreaker(CircuitBreaker.REQUEST).getLimit();
Exception e = expectThrows(CircuitBreakingException.class, () -> preallocateRequest(real, limit + 1024));
assertThat(e.getMessage(), startsWith("[request] Data too large, data for [preallocate[test]] would be ["));
}
public void testRandom() {
HierarchyCircuitBreakerService real = real();
CircuitBreaker realBreaker = real.getBreaker(CircuitBreaker.REQUEST);
long preallocatedBytes = randomLongBetween(1, (long) (realBreaker.getLimit() * .8));
try (PreallocatedCircuitBreakerService preallocated = preallocateRequest(real, preallocatedBytes)) {
CircuitBreaker b = preallocated.getBreaker(CircuitBreaker.REQUEST);
boolean usedPreallocated = false;
long current = 0;
for (int i = 0; i < 10000; i++) {
if (current >= preallocatedBytes) {
usedPreallocated = true;
}
if (usedPreallocated) {
assertThat(realBreaker.getUsed(), equalTo(current));
} else {
assertThat(realBreaker.getUsed(), equalTo(preallocatedBytes));
}
if (current > 0 && randomBoolean()) {
long delta = randomLongBetween(-Math.min(current, realBreaker.getLimit() / 100), 0);
b.addWithoutBreaking(delta);
current += delta;
continue;
}
long delta = randomLongBetween(0, realBreaker.getLimit() / 100);
if (randomBoolean()) {
b.addWithoutBreaking(delta);
current += delta;
continue;
}
if (current + delta < realBreaker.getLimit()) {
b.addEstimateBytesAndMaybeBreak(delta, "test");
current += delta;
continue;
}
Exception e = expectThrows(CircuitBreakingException.class, () -> b.addEstimateBytesAndMaybeBreak(delta, "test"));
assertThat(e.getMessage(), startsWith("[request] Data too large, data for [test] would be ["));
}
b.addWithoutBreaking(-current);
}
assertThat(real.getBreaker(CircuitBreaker.REQUEST).getUsed(), equalTo(0L));
}
private HierarchyCircuitBreakerService real() {
return new HierarchyCircuitBreakerService(
CircuitBreakerMetrics.NOOP,
Settings.builder()
// Pin the limit to something that'll totally fit in the heap we use for the tests
.put(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "100mb")
// Disable the real memory checking because it causes other tests to interfere with this one.
.put(USE_REAL_MEMORY_USAGE_SETTING.getKey(), false)
.build(),
List.of(),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)
);
}
private PreallocatedCircuitBreakerService preallocateRequest(CircuitBreakerService real, long bytes) {
return new PreallocatedCircuitBreakerService(real, CircuitBreaker.REQUEST, bytes, "test");
}
}
| PreallocatedCircuitBreakerServiceTests |
java | spring-projects__spring-security | config/src/main/java/org/springframework/security/config/annotation/rsocket/RSocketSecurity.java | {
"start": 14785,
"end": 16223
} | class ____ {
private final PayloadExchangeMatcher matcher;
private Access(PayloadExchangeMatcher matcher) {
this.matcher = matcher;
}
public AuthorizePayloadsSpec authenticated() {
return access(AuthenticatedReactiveAuthorizationManager.authenticated());
}
public AuthorizePayloadsSpec hasAuthority(String authority) {
return access(AuthorityReactiveAuthorizationManager.hasAuthority(authority));
}
public AuthorizePayloadsSpec hasRole(String role) {
return access(AuthorityReactiveAuthorizationManager.hasRole(role));
}
public AuthorizePayloadsSpec hasAnyRole(String... roles) {
return access(AuthorityReactiveAuthorizationManager.hasAnyRole(roles));
}
public AuthorizePayloadsSpec permitAll() {
return access((a, ctx) -> Mono.just(new AuthorizationDecision(true)));
}
public AuthorizePayloadsSpec hasAnyAuthority(String... authorities) {
return access(AuthorityReactiveAuthorizationManager.hasAnyAuthority(authorities));
}
public AuthorizePayloadsSpec access(
ReactiveAuthorizationManager<PayloadExchangeAuthorizationContext> authorization) {
AuthorizePayloadsSpec.this.authzBuilder
.add(new PayloadExchangeMatcherEntry<>(this.matcher, authorization));
return AuthorizePayloadsSpec.this;
}
public AuthorizePayloadsSpec denyAll() {
return access((a, ctx) -> Mono.just(new AuthorizationDecision(false)));
}
}
}
}
| Access |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/cluster/service/FakeThreadPoolMasterService.java | {
"start": 1411,
"end": 4413
} | class ____ extends MasterService {
private final Consumer<Runnable> taskExecutor;
private final ThreadContext threadContext;
public FakeThreadPoolMasterService(String nodeName, ThreadPool threadPool, Consumer<Runnable> taskExecutor) {
this(
Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), nodeName).build(),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
threadPool,
taskExecutor
);
}
private FakeThreadPoolMasterService(
Settings settings,
ClusterSettings clusterSettings,
ThreadPool threadPool,
Consumer<Runnable> taskExecutor
) {
super(settings, clusterSettings, threadPool, new TaskManager(settings, threadPool, Set.of()));
this.taskExecutor = taskExecutor;
this.threadContext = threadPool.getThreadContext();
}
@Override
protected ExecutorService createThreadPoolExecutor() {
return new StoppableExecutorServiceWrapper(EsExecutors.DIRECT_EXECUTOR_SERVICE) {
@Override
public void execute(Runnable command) {
taskExecutor.accept(threadContext.preserveContext(command));
}
@Override
public String toString() {
return "FakeThreadPoolMasterService executor";
}
};
}
@Override
public ClusterState.Builder incrementVersion(ClusterState clusterState) {
// generate cluster UUID deterministically for repeatable tests
return ClusterState.builder(clusterState).incrementVersion().stateUUID(UUIDs.randomBase64UUID(random()));
}
@Override
protected void publish(
ClusterStatePublicationEvent clusterStatePublicationEvent,
AckListener ackListener,
ActionListener<Void> publicationListener
) {
// allow to fork the publication to add a little extra room for concurrent activity here
taskExecutor.accept(threadPool.getThreadContext().preserveContext(new Runnable() {
@Override
public void run() {
FakeThreadPoolMasterService.super.publish(clusterStatePublicationEvent, wrapAckListener(ackListener), publicationListener);
}
@Override
public String toString() {
return "publish change of cluster state from version ["
+ clusterStatePublicationEvent.getOldState().version()
+ "] in term ["
+ clusterStatePublicationEvent.getOldState().term()
+ "] to version ["
+ clusterStatePublicationEvent.getNewState().version()
+ "] in term ["
+ clusterStatePublicationEvent.getNewState().term()
+ "]";
}
}));
}
protected AckListener wrapAckListener(AckListener ackListener) {
return ackListener;
}
}
| FakeThreadPoolMasterService |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterServiceTestBase.java | {
"start": 4190,
"end": 15111
} | class ____ {
private static final Logger LOG = LoggerFactory
.getLogger(ApplicationMasterServiceTestBase.class);
static final int GB = 1024;
static final String CUSTOM_RES = "res_1";
static final String DEFAULT_HOST = "127.0.0.1";
static final String DEFAULT_PORT = "1234";
protected static YarnConfiguration conf;
protected abstract YarnConfiguration createYarnConfig();
protected abstract Resource getResourceUsageForQueue(ResourceManager rm,
String queue);
protected abstract String getDefaultQueueName();
Map<String, ResourceInformation> initializeMandatoryResources() {
Map<String, ResourceInformation> riMap = new HashMap<>();
ResourceInformation memory = ResourceInformation.newInstance(
ResourceInformation.MEMORY_MB.getName(),
ResourceInformation.MEMORY_MB.getUnits(),
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB);
ResourceInformation vcores = ResourceInformation.newInstance(
ResourceInformation.VCORES.getName(),
ResourceInformation.VCORES.getUnits(),
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,
DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
riMap.put(ResourceInformation.MEMORY_URI, memory);
riMap.put(ResourceInformation.VCORES_URI, vcores);
return riMap;
}
private void requestResources(MockAM am, long memory, int vCores,
Map<String, Integer> customResources) throws Exception {
Map<String, String> convertedCustomResources =
ResourceTypesTestHelper.convertCustomResources(customResources);
am.allocate(Collections.singletonList(ResourceRequest.newBuilder()
.capability(ResourceTypesTestHelper.newResource(
memory, vCores, convertedCustomResources))
.numContainers(1)
.resourceName("*")
.build()), null);
}
@BeforeEach
public void setup() {
conf = new YarnConfiguration();
conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
ResourceScheduler.class);
}
@Test
@Timeout(value = 3000)
public void testRMIdentifierOnContainerAllocation() throws Exception {
MockRM rm = new MockRM(conf);
rm.start();
// Register node1
MockNM nm1 = rm.registerNode(DEFAULT_HOST + ":" + DEFAULT_PORT, 6 * GB);
// Submit an application
RMApp app1 = MockRMAppSubmitter.submitWithMemory(2048, rm);
// kick the scheduling
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
am1.addRequests(new String[] {DEFAULT_HOST}, GB, 1, 1);
AllocateResponse alloc1Response = am1.schedule(); // send the request
// kick the scheduler
nm1.nodeHeartbeat(true);
while (alloc1Response.getAllocatedContainers().size() < 1) {
LOG.info("Waiting for containers to be created for app 1...");
sleep(1000);
alloc1Response = am1.schedule();
}
// assert RMIdentifier is set properly in allocated containers
Container allocatedContainer =
alloc1Response.getAllocatedContainers().get(0);
ContainerTokenIdentifier tokenId =
BuilderUtils.newContainerTokenIdentifier(allocatedContainer
.getContainerToken());
assertEquals(MockRM.getClusterTimeStamp(),
tokenId.getRMIdentifier());
rm.stop();
}
@Test
@Timeout(value = 3000)
public void testAllocateResponseIdOverflow() throws Exception {
MockRM rm = new MockRM(conf);
try {
rm.start();
// Register node1
MockNM nm1 = rm.registerNode(DEFAULT_HOST + ":" + DEFAULT_PORT, 6 * GB);
// Submit an application
RMApp app1 = MockRMAppSubmitter.submitWithMemory(2048, rm);
// kick off the scheduling
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
// Set the last responseId to be Integer.MAX_VALUE
assertTrue(am1.setApplicationLastResponseId(Integer.MAX_VALUE));
// Both allocate should succeed
am1.schedule(); // send allocate with responseId = Integer.MAX_VALUE
assertEquals(0, am1.getResponseId());
am1.schedule(); // send allocate with responseId = 0
assertEquals(1, am1.getResponseId());
} finally {
rm.stop();
}
}
@Test
@Timeout(value = 600)
public void testInvalidContainerReleaseRequest() throws Exception {
MockRM rm = new MockRM(conf);
try {
rm.start();
// Register node1
MockNM nm1 = rm.registerNode(DEFAULT_HOST + ":" + DEFAULT_PORT, 6 * GB);
// Submit an application
RMApp app1 = MockRMAppSubmitter.submitWithMemory(1024, rm);
// kick the scheduling
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
am1.addRequests(new String[] {DEFAULT_HOST}, GB, 1, 1);
AllocateResponse alloc1Response = am1.schedule(); // send the request
// kick the scheduler
nm1.nodeHeartbeat(true);
while (alloc1Response.getAllocatedContainers().size() < 1) {
LOG.info("Waiting for containers to be created for app 1...");
sleep(1000);
alloc1Response = am1.schedule();
}
assertTrue(alloc1Response.getAllocatedContainers().size() > 0);
RMApp app2 = MockRMAppSubmitter.submitWithMemory(1024, rm);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt2 = app2.getCurrentAppAttempt();
MockAM am2 = rm.sendAMLaunched(attempt2.getAppAttemptId());
am2.registerAppAttempt();
// Now trying to release container allocated for app1 -> appAttempt1.
ContainerId cId = alloc1Response.getAllocatedContainers().get(0).getId();
am2.addContainerToBeReleased(cId);
try {
am2.schedule();
fail("Exception was expected!!");
} catch (InvalidContainerReleaseException e) {
StringBuilder sb = new StringBuilder("Cannot release container : ");
sb.append(cId.toString());
sb.append(" not belonging to this application attempt : ");
sb.append(attempt2.getAppAttemptId().toString());
assertTrue(e.getMessage().contains(sb.toString()));
}
} finally {
rm.stop();
}
}
@Test
@Timeout(value = 1200)
public void testProgressFilter() throws Exception{
MockRM rm = new MockRM(conf);
rm.start();
// Register node1
MockNM nm1 = rm.registerNode(DEFAULT_HOST + ":" + DEFAULT_PORT, 6 * GB);
// Submit an application
RMApp app1 = MockRMAppSubmitter.submitWithMemory(2048, rm);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
AllocateRequestPBImpl allocateRequest = new AllocateRequestPBImpl();
List<ContainerId> release = new ArrayList<>();
List<ResourceRequest> ask = new ArrayList<>();
allocateRequest.setReleaseList(release);
allocateRequest.setAskList(ask);
allocateRequest.setProgress(Float.POSITIVE_INFINITY);
am1.allocate(allocateRequest);
while(attempt1.getProgress()!=1){
LOG.info("Waiting for allocate event to be handled ...");
sleep(100);
}
allocateRequest.setProgress(Float.NaN);
am1.allocate(allocateRequest);
while(attempt1.getProgress()!=0){
LOG.info("Waiting for allocate event to be handled ...");
sleep(100);
}
allocateRequest.setProgress((float)9);
am1.allocate(allocateRequest);
while(attempt1.getProgress()!=1){
LOG.info("Waiting for allocate event to be handled ...");
sleep(100);
}
allocateRequest.setProgress(Float.NEGATIVE_INFINITY);
am1.allocate(allocateRequest);
while(attempt1.getProgress()!=0){
LOG.info("Waiting for allocate event to be handled ...");
sleep(100);
}
allocateRequest.setProgress((float)0.5);
am1.allocate(allocateRequest);
while(attempt1.getProgress()!=0.5){
LOG.info("Waiting for allocate event to be handled ...");
sleep(100);
}
allocateRequest.setProgress((float)-1);
am1.allocate(allocateRequest);
while(attempt1.getProgress()!=0){
LOG.info("Waiting for allocate event to be handled ...");
sleep(100);
}
}
@Test
@Timeout(value = 1200)
public void testFinishApplicationMasterBeforeRegistering() throws Exception {
MockRM rm = new MockRM(conf);
try {
rm.start();
// Register node1
MockNM nm1 = rm.registerNode(DEFAULT_HOST + ":" + DEFAULT_PORT, 6 * GB);
// Submit an application
RMApp app1 = MockRMAppSubmitter.submitWithMemory(2048, rm);
MockAM am1 = MockRM.launchAM(app1, rm, nm1);
FinishApplicationMasterRequest req =
FinishApplicationMasterRequest.newInstance(
FinalApplicationStatus.FAILED, "", "");
try {
am1.unregisterAppAttempt(req, false);
fail("ApplicationMasterNotRegisteredException should be thrown");
} catch (ApplicationMasterNotRegisteredException e) {
assertNotNull(e);
assertNotNull(e.getMessage());
assertTrue(e.getMessage().contains(
"Application Master is trying to unregister before registering for:"
));
} catch (Exception e) {
fail("ApplicationMasterNotRegisteredException should be thrown");
}
am1.registerAppAttempt();
am1.unregisterAppAttempt(req, false);
rm.waitForState(am1.getApplicationAttemptId(),
RMAppAttemptState.FINISHING);
} finally {
rm.stop();
}
}
@Test
@Timeout(value = 1200)
public void testRepeatedFinishApplicationMaster() throws Exception {
CountingDispatcher dispatcher = new CountingDispatcher();
MockRM rm = new MockRM(conf) {
@Override
protected Dispatcher createDispatcher() {
return dispatcher;
}
};
try {
rm.start();
// Register node1
MockNM nm1 = rm.registerNode(DEFAULT_HOST + ":" + DEFAULT_PORT, 6 * GB);
// Submit an application
RMApp app1 = MockRMAppSubmitter.submit(rm,
MockRMAppSubmissionData.Builder.createWithMemory(2048, rm).build());
MockAM am1 = MockRM.launchAM(app1, rm, nm1);
am1.registerAppAttempt();
FinishApplicationMasterRequest req = FinishApplicationMasterRequest
.newInstance(FinalApplicationStatus.FAILED, "", "");
for (int i = 0; i < 10; i++) {
am1.unregisterAppAttempt(req, false);
}
rm.drainEvents();
assertEquals(1, dispatcher.getEventCount(),
"Expecting only one event");
} finally {
rm.stop();
}
}
static | ApplicationMasterServiceTestBase |
java | spring-projects__spring-security | docs/src/test/java/org/springframework/security/docs/reactive/configuration/customizerbeanordering/CustomizerBeanOrderingConfiguration.java | {
"start": 1363,
"end": 2822
} | class ____ {
// tag::sample[]
@Bean // <4>
SecurityWebFilterChain springSecurity(ServerHttpSecurity http) {
// @formatter:off
http
.authorizeExchange((exchange) -> exchange
.anyExchange().authenticated()
);
return http.build();
// @formatter:on
}
@Bean
@Order(Ordered.LOWEST_PRECEDENCE) // <2>
Customizer<ServerHttpSecurity> userAuthorization() {
// @formatter:off
return (http) -> http
.authorizeExchange((exchange) -> exchange
.pathMatchers("/users/**").hasRole("USER")
);
// @formatter:on
}
@Bean
@Order(Ordered.HIGHEST_PRECEDENCE) // <1>
Customizer<ServerHttpSecurity> adminAuthorization() {
// @formatter:off
return (http) -> http
.authorizeExchange((exchange) -> exchange
.pathMatchers("/admins/**").hasRole("ADMIN")
);
// @formatter:on
}
// <3>
@Bean
Customizer<ServerHttpSecurity.HeaderSpec> contentSecurityPolicy() {
// @formatter:off
return (headers) -> headers
.contentSecurityPolicy((csp) -> csp
.policyDirectives("object-src 'none'")
);
// @formatter:on
}
@Bean
Customizer<ServerHttpSecurity.HeaderSpec> contentTypeOptions() {
// @formatter:off
return (headers) -> headers
.contentTypeOptions(Customizer.withDefaults());
// @formatter:on
}
@Bean
Customizer<ServerHttpSecurity.HttpsRedirectSpec> httpsRedirect() {
// @formatter:off
return Customizer.withDefaults();
// @formatter:on
}
// end::sample[]
}
| CustomizerBeanOrderingConfiguration |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java | {
"start": 1621,
"end": 1836
} | class ____ APIs for the client app to add a job to the group
* and to get the jobs in the group in different states. When a job is
* added, an ID unique to the group is assigned to the job.
*
* This | provides |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/internal/RealNumbers.java | {
"start": 1099,
"end": 1216
} | class ____ reusable assertions for real numbers (float and double).
*
* @author Joel Costigliola
*/
public abstract | of |
java | redisson__redisson | redisson/src/main/java/org/redisson/api/listener/ListTrimListener.java | {
"start": 906,
"end": 1093
} | interface ____ extends ObjectListener {
/**
* Invoked on list trimming event
*
* @param name - name of object
*/
void onListTrim(String name);
}
| ListTrimListener |
java | spring-projects__spring-boot | module/spring-boot-health/src/main/java/org/springframework/boot/health/actuate/endpoint/ReactiveHealthEndpointWebExtension.java | {
"start": 2093,
"end": 5516
} | class ____
extends HealthEndpointSupport<Mono<? extends Health>, Mono<? extends HealthDescriptor>> {
/**
* Create a new {@link ReactiveHealthEndpointWebExtension} instance.
* @param registry the health contributor registry
* @param fallbackRegistry the fallback registry or {@code null}
* @param groups the health endpoint groups
* @param slowContributorLoggingThreshold duration after which slow health indicator
* logging should occur
*/
public ReactiveHealthEndpointWebExtension(ReactiveHealthContributorRegistry registry,
@Nullable HealthContributorRegistry fallbackRegistry, HealthEndpointGroups groups,
@Nullable Duration slowContributorLoggingThreshold) {
super(Contributor.reactive(registry, fallbackRegistry), groups, slowContributorLoggingThreshold);
}
@ReadOperation
public Mono<WebEndpointResponse<? extends HealthDescriptor>> health(ApiVersion apiVersion,
@Nullable WebServerNamespace serverNamespace, SecurityContext securityContext) {
return health(apiVersion, serverNamespace, securityContext, false, EMPTY_PATH);
}
@ReadOperation
public Mono<WebEndpointResponse<? extends HealthDescriptor>> health(ApiVersion apiVersion,
@Nullable WebServerNamespace serverNamespace, SecurityContext securityContext,
@Selector(match = Match.ALL_REMAINING) String... path) {
return health(apiVersion, serverNamespace, securityContext, false, path);
}
public Mono<WebEndpointResponse<? extends HealthDescriptor>> health(ApiVersion apiVersion,
@Nullable WebServerNamespace serverNamespace, SecurityContext securityContext, boolean showAll,
String... path) {
Result<Mono<? extends HealthDescriptor>> result = getResult(apiVersion, serverNamespace, securityContext,
showAll, path);
if (result == null) {
return (Arrays.equals(path, EMPTY_PATH))
? Mono.just(new WebEndpointResponse<>(IndicatedHealthDescriptor.UP, WebEndpointResponse.STATUS_OK))
: Mono.just(new WebEndpointResponse<>(WebEndpointResponse.STATUS_NOT_FOUND));
}
HealthEndpointGroup group = result.group();
return result.descriptor().map((health) -> {
int statusCode = group.getHttpCodeStatusMapper().getStatusCode(health.getStatus());
return new WebEndpointResponse<>(health, statusCode);
});
}
@Override
protected Mono<? extends HealthDescriptor> aggregateDescriptors(ApiVersion apiVersion,
Map<String, Mono<? extends HealthDescriptor>> contributions, StatusAggregator statusAggregator,
boolean showComponents, @Nullable Set<String> groupNames) {
return Flux.fromIterable(contributions.entrySet())
.flatMap(NamedHealthDescriptor::create)
.collectMap(NamedHealthDescriptor::name, NamedHealthDescriptor::descriptor)
.map((components) -> this.getCompositeDescriptor(apiVersion, components, statusAggregator, showComponents,
groupNames));
}
/**
* A named {@link HealthDescriptor}.
*/
private record NamedHealthDescriptor(String name, HealthDescriptor descriptor) {
static Mono<NamedHealthDescriptor> create(Map.Entry<String, Mono<? extends HealthDescriptor>> entry) {
Mono<String> name = Mono.just(entry.getKey());
Mono<? extends HealthDescriptor> health = entry.getValue();
return Mono.zip(NamedHealthDescriptor::ofPair, name, health);
}
private static NamedHealthDescriptor ofPair(Object... pair) {
return new NamedHealthDescriptor((String) pair[0], (HealthDescriptor) pair[1]);
}
}
}
| ReactiveHealthEndpointWebExtension |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/ignore/IgnorePropertyTest.java | {
"start": 812,
"end": 3233
} | class ____ {
@ProcessorTest
@IssueKey("72")
public void shouldNotPropagateIgnoredPropertyGivenViaTargetAttribute() {
Animal animal = new Animal( "Bruno", 100, 23, "black" );
AnimalDto animalDto = AnimalMapper.INSTANCE.animalToDto( animal );
assertThat( animalDto ).isNotNull();
assertThat( animalDto.getName() ).isEqualTo( "Bruno" );
assertThat( animalDto.getSize() ).isEqualTo( 100 );
assertThat( animalDto.getAge() ).isNull();
assertThat( animalDto.publicAge ).isNull();
assertThat( animalDto.getColor() ).isNull();
assertThat( animalDto.publicColor ).isNull();
}
@ProcessorTest
@IssueKey("1392")
public void shouldIgnoreAllTargetPropertiesWithNoUnmappedTargetWarnings() {
Animal animal = new Animal( "Bruno", 100, 23, "black" );
AnimalDto animalDto = AnimalMapper.INSTANCE.animalToDtoIgnoreAll( animal );
assertThat( animalDto ).isNotNull();
assertThat( animalDto.getName() ).isNull();
assertThat( animalDto.getSize() ).isNull();
assertThat( animalDto.getAge() ).isNull();
assertThat( animalDto.publicAge ).isNull();
assertThat( animalDto.getColor() ).isNull();
assertThat( animalDto.publicColor ).isNull();
}
@ProcessorTest
@IssueKey("337")
public void propertyIsIgnoredInReverseMappingWhenSourceIsAlsoSpecifiedICWIgnore() {
AnimalDto animalDto = new AnimalDto( "Bruno", 100, 23, "black" );
Animal animal = AnimalMapper.INSTANCE.animalDtoToAnimal( animalDto );
assertThat( animal ).isNotNull();
assertThat( animalDto.getName() ).isEqualTo( "Bruno" );
assertThat( animalDto.getSize() ).isEqualTo( 100 );
assertThat( animal.getColour() ).isNull();
assertThat( animal.publicColour ).isNull();
}
@ProcessorTest
@IssueKey("833")
@WithClasses({Preditor.class, PreditorDto.class, ErroneousTargetHasNoWriteAccessorMapper.class})
@ExpectedCompilationOutcome(
value = CompilationResult.FAILED,
diagnostics = {
@Diagnostic(type = ErroneousTargetHasNoWriteAccessorMapper.class,
kind = Kind.ERROR,
line = 22,
message = "Property \"hasClaws\" has no write accessor in PreditorDto.")
}
)
public void shouldGiveErrorOnMappingForReadOnlyProp() {
}
}
| IgnorePropertyTest |
java | apache__logging-log4j2 | log4j-core-test/src/test/java/org/apache/logging/log4j/core/pattern/EqualsReplacementConverterTest.java | {
"start": 1307,
"end": 4225
} | class ____ {
private static final String TEST_MESSAGE = "This is a test";
@Test
void testMarkerReplacement() {
testReplacement("%marker", Strings.EMPTY);
}
@Test
void testMarkerSimpleNameReplacement() {
testReplacement("%markerSimpleName", Strings.EMPTY);
}
@Test
void testLoggerNameReplacement() {
testReplacement("%logger", "[" + EqualsReplacementConverterTest.class.getName() + "]");
}
@Test
void testMarkerReplacementWithMessage() {
testReplacement(TEST_MESSAGE, new String[] {"[%marker]", "[]", "%msg"});
}
private void testReplacement(final String tag, final String expectedValue) {
final String[] options = new String[] {"[" + tag + "]", "[]", expectedValue};
testReplacement(expectedValue, options);
}
private void testReplacement(final String expectedValue, final String[] options) {
final LogEvent event = Log4jLogEvent.newBuilder() //
.setLoggerName(EqualsReplacementConverterTest.class.getName()) //
.setLevel(Level.DEBUG) //
.setMessage(new SimpleMessage(TEST_MESSAGE)) //
.build();
final StringBuilder sb = new StringBuilder();
final LoggerContext ctx = LoggerContext.getContext();
final EqualsReplacementConverter converter =
EqualsReplacementConverter.newInstance(ctx.getConfiguration(), options);
assertNotNull(converter);
converter.format(event, sb);
assertEquals(expectedValue, sb.toString());
}
@Test
void testParseSubstitutionWithPattern() {
testParseSubstitution("%msg", TEST_MESSAGE);
}
@Test
void testParseSubstitutionWithoutPattern() {
final String substitution = "test";
testParseSubstitution(substitution, substitution);
}
@Test
void testParseSubstitutionEmpty() {
testParseSubstitution("", "");
}
@Test
void testParseSubstitutionWithWhiteSpaces() {
testParseSubstitution(" ", " ");
}
private void testParseSubstitution(final String substitution, final String expected) {
final LogEvent event = Log4jLogEvent.newBuilder()
.setLoggerName(EqualsReplacementConverterTest.class.getName())
.setLevel(Level.DEBUG)
.setMessage(new SimpleMessage(TEST_MESSAGE))
.build();
final LoggerContext ctx = LoggerContext.getContext();
final EqualsReplacementConverter converter = EqualsReplacementConverter.newInstance(
ctx.getConfiguration(), new String[] {"[%marker]", "[]", substitution});
final StringBuilder sb = new StringBuilder();
assertNotNull(converter);
converter.parseSubstitution(event, sb);
final String actual = sb.toString();
assertEquals(expected, actual);
}
}
| EqualsReplacementConverterTest |
java | apache__camel | components/camel-infinispan/camel-infinispan/src/test/java/org/apache/camel/component/infinispan/remote/cluster/AbstractInfinispanRemoteClusteredIT.java | {
"start": 2427,
"end": 2701
} | class ____ {
@RegisterExtension
public static InfinispanService service = InfinispanServiceFactory.createSingletonInfinispanService();
private RemoteCacheManager cacheContainer;
private final String viewName = "myView";
| AbstractInfinispanRemoteClusteredIT |
java | micronaut-projects__micronaut-core | management/src/test/java/io/micronaut/management/health/indicator/service/ServiceReadyIndicatorTest.java | {
"start": 570,
"end": 1495
} | class ____ {
@Test
void serviceReadyHealthIndicatorViaConfiguration() {
Consumer<ApplicationContext> healthBeansConsumer = context -> {
assertTrue(context.containsBean(HealthEndpoint.class));
assertTrue(context.containsBean(DefaultHealthAggregator.class));
};
Map<String, Object> configuration = Map.of("endpoints.health.service-ready-indicator-enabled", StringUtils.FALSE);
try (ApplicationContext context = ApplicationContext.run(configuration)) {
healthBeansConsumer.accept(context);
assertFalse(context.containsBean(ServiceReadyHealthIndicator.class));
}
// enabled by default
try (ApplicationContext context = ApplicationContext.run()) {
healthBeansConsumer.accept(context);
assertTrue(context.containsBean(ServiceReadyHealthIndicator.class));
}
}
}
| ServiceReadyIndicatorTest |
java | apache__flink | flink-core/src/test/java/org/apache/flink/util/TernaryBooleanTest.java | {
"start": 1162,
"end": 2758
} | class ____ {
@Test
void testWithDefault() {
assertThat(TRUE.getOrDefault(true)).isTrue();
assertThat(TRUE.getOrDefault(false)).isTrue();
assertThat(FALSE.getOrDefault(true)).isFalse();
assertThat(FALSE.getOrDefault(false)).isFalse();
assertThat(UNDEFINED.getOrDefault(true)).isTrue();
assertThat(UNDEFINED.getOrDefault(false)).isFalse();
}
@Test
void testResolveUndefined() {
assertThat(TRUE.resolveUndefined(true)).isEqualTo(TRUE);
assertThat(TRUE.resolveUndefined(false)).isEqualTo(TRUE);
assertThat(FALSE.resolveUndefined(true)).isEqualTo(FALSE);
assertThat(FALSE.resolveUndefined(false)).isEqualTo(FALSE);
assertThat(UNDEFINED.resolveUndefined(true)).isEqualTo(TRUE);
assertThat(UNDEFINED.resolveUndefined(false)).isEqualTo(FALSE);
}
@Test
void testToBoolean() {
assertThat(TRUE.getAsBoolean()).isSameAs(Boolean.TRUE);
assertThat(FALSE.getAsBoolean()).isSameAs(Boolean.FALSE);
assertThat(UNDEFINED.getAsBoolean()).isNull();
}
@Test
void testFromBoolean() {
assertThat(TernaryBoolean.fromBoolean(true)).isEqualTo(TRUE);
assertThat(TernaryBoolean.fromBoolean(false)).isEqualTo(FALSE);
}
@Test
void testFromBoxedBoolean() {
assertThat(TernaryBoolean.fromBoxedBoolean(Boolean.TRUE)).isEqualTo(TRUE);
assertThat(TernaryBoolean.fromBoxedBoolean(Boolean.FALSE)).isEqualTo(FALSE);
assertThat(TernaryBoolean.fromBoxedBoolean(null)).isEqualTo(UNDEFINED);
}
}
| TernaryBooleanTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.